Map and reduce methods to operate on local files

Source: Internet
Author: User

Map and reduce methods to operate on local files

 

In the map and reduce methods, you can directly operate on local files, such as writing or reading to the local file system. However, this can also be distributed read and write, this will be read from or written to the local hard disk of the node where the task is executed.

Note: After writing the mapreduce program, you must package it into a jar and run it in the command line commit. Previously, when I wrote data to the local file system, I never generated any data. I thought that the data in map or reduce cannot be written to the local file system. In fact, this is not the case. My error is that it is directly compiled and run in eclipse on the master node. Since there is no such code on the master node on the slave node, it will be ineffective after execution.

Write the file code to the local file system in map. After the following code is run successfully, the loginfo file is generated in the/home/hadoop directory of the node.

Package Org. apache. hadoop. examples; </P> <p> Import Java. io. file; <br/> Import Java. io. filenotfoundexception; <br/> Import Java. io. fileoutputstream; <br/> Import Java. io. ioexception; <br/> Import Java. util. stringtokenizer; </P> <p> Import Org. apache. hadoop. conf. configuration; <br/> Import Org. apache. hadoop. FS. filestatus; <br/> Import Org. apache. hadoop. FS. filesystem; <br/> Import Org. apache. hadoop. FS. path; <br/> Import Org. apache. hadoop. io. intwritable; <br/> Import Org. apache. hadoop. io. text; <br/> Import Org. apache. hadoop. mapred. filealreadyexistsexception; <br/> Import Org. apache. hadoop. mapreduce. job; <br/> Import Org. apache. hadoop. mapreduce. mapper; <br/> Import Org. apache. hadoop. mapreduce. reducer; <br/> Import Org. apache. hadoop. mapreduce. mapper. context; <br/> Import Org. apache. hadoop. mapreduce. lib. input. fileindium Utformat; <br/> Import Org. apache. hadoop. mapreduce. lib. output. fileoutputformat; <br/> Import Org. apache. hadoop. util. genericoptionsparser; </P> <p>/* <br/> * Author: zhankunlin 2010-8-16 <br/> */<br/> public class wordcountzkl {</P> <p> Public static class loginfo {<br/> Public static string logfile = "/home /hadoop/loginfo "; <br/> static {</P> <p >}< br/> Public static void begin (string region, string taskid) {<br /> File log = new file (logfile); <br/> fileoutputstream out; <br/> try {<br/> out = new fileoutputstream (logfile, true ); <br/> out. write (region + "" + taskid + "begin/N "). getbytes (); <br/>} catch (filenotfoundexception e) {</P> <p >}< br/> catch (ioexception E) {</P> <p >}< br/> Public static void end (string region, string taskid) {<br/> // file log = new file (logfile); <br/> fileoutputstream out; <br/> try {<br/> out = New fileoutputstream (logfile, true); <br/> out. write (region + "" + taskid + "End/N "). getbytes (); <br/>} catch (filenotfoundexception e) {</P> <p >}< br/> catch (ioexception E) {</P> <p >}< br/>}</P> <p>/* <br/> Public static class wordcountmapper extends <br/> mapper <object, text, text, intwritable >{</P> <p> private final static intwritable one = new intwritable (1); <br/> private text word = new text (); </P> <P> Public void map (Object key, text value, context) <br/> throws ioexception, interruptedexception {<br/> stringtokenizer itr = new stringtokenizer (value. tostring (); <br/> while (itr. hasmoretokens () {<br/> word. set (itr. nexttoken (); <br/> context. write (word, one ); <br/>}</P> <p> Public static class wordcountreducer extends <br/> CER <text, intwritable, text, intwritable> {<br/> Private intwritable result = new intwritable (); </P> <p> Public void reduce (Text key, iterable <intwritable> values, <br/> context) throws ioexception, interruptedexception {<br/> int sum = 0; <br/> for (intwritable VAL: values) {<br/> sum + = Val. get (); <br/>}< br/> result. set (SUM); <br/> context. write (Key, result); <br/>}< br/> */</P> <p> Public static class wordcountmapperzkl extends <br/> Mapper <object, text, text, intwritable >{</P> <p> private final static intwritable one = new intwritable (1 ); <br/> private text word = new text (); </P> <p> Public void map (context) throws ioexception, <br/> interruptedexception {<br/> loginfo. begin ("map", context. gettaskattemptid (). gettaskid (). tostring (); // The file is generated from the node <br/> while (context. nextkeyvalue () {<br/> Object key = context. getcurrentkey (); <Br/> text value = (text) context. getcurrentvalue (); <br/> //// <br/> stringtokenizer itr = new stringtokenizer (value. tostring (); <br/> while (itr. hasmoretokens () {<br/> word. set (itr. nexttoken (); <br/> context. write (word, one); <br/>}< br/>//// <br/>}< br/> loginfo. end ("map", context. gettaskattemptid (). gettaskid (). tostring (); <br/>}</P> <p>/** <br/> * expert users can override this method for more Complete control over the <br/> * execution of the mapper. <br/> * @ Param context <br/> * @ throws ioexception <br/> */<br/> Public void run (context) throws ioexception, interruptedexception {<br/> setup (context); <br/> map (context); <br/> cleanup (context ); <br/>}</P> <p> Public static class wordcountreducerzkl extends <br/> CER <text, intwritable, text, intwritable >{< br/> priva Te intwritable result = new intwritable (); </P> <p> Public void reduce (context) throws ioexception, interruptedexception {<br/> while (context. nextkey () {<br/> text key = context. getcurrentkey (); <br/> iterable <intwritable> values = context. getvalues (); <br/> // <br/> int sum = 0; <br/> for (intwritable VAL: values) {<br/> sum + = Val. get (); <br/>}< br/> result. set (SUM); <br/> context. write (K Ey, result ); <br/> // <br/>}</P> <p>/** <br/> * Advanced Application writers can use the <br/> * {@ link # Run (Org. apache. hadoop. mapreduce. CER Cer. context)} method to <br/> * control how the reduce task works. <br/> */<br/> Public void run (context) throws ioexception, interruptedexception {<br/> setup (context); <br/> reduce (context ); <br/> cleanup (context); <br/>}</P> <p> @ Suppresswarnings ("deprecation") <br/> Public static void main (string [] ARGs) throws exception {</P> <p> loginfo. begin ("job", "job_1"); // The loginfo file is generated on the master node </P> <p> Configuration conf = new configuration (); <br/>/* <br/> * string [] otherargs = new genericoptionsparser (Conf, <br/> * ARGs ). getremainingargs (); If (otherargs. length! = 2) {<br/> * system. err. println ("Usage: wordcount <in> <out>"); system. exit (2) ;}< br/> */</P> <p> string [] inputpars = {"wcinzkl", "wcoutzkl "}; <br/> string [] otherargs = new genericoptionsparser (Conf, inputpars) <br/>. getremainingargs (); </P> <p> path outputpaths = New Path (otherargs [1]); <br/> filesystem FS = filesystem. get (CONF); <br/> If (FS. exists (outputpaths) {// please see the code of exists () Method <br/> // throw new filealreadyexistsexception ("output directory" + <br/> // outputpaths + "already exists"); <br/> filestatus fsstatus = FS. getfilestatus (outputpaths); <br/> If (fsstatus. isdir () // only test the methods of HDFS, but it is not necessary <br/> FS. delete (outputpaths, true); <br/> else <br/> FS. delete (outputpaths, false); // true is also OK <br/> system. out. println ("output directory /"" + Outputpaths <br/> + "/" already exists "+", firstly delete it "); <br/>}</P> <p>/* <br/> * filestatus fsstatus = FS. getfilestatus (outputpaths); If <br/> * (fsstatus! = NULL) {Throw new <br/> * filealreadyexistsexception ("output directory" + outputpaths + <br/> * "already exists ");} <br/> */</P> <p> job = new job (Conf, "Word Count zkl"); <br/> job. setjarbyclass (wordcountzkl. class); <br/> job. setmapperclass (wordcountmapperzkl. class); <br/> job. setcombinerclass (wordcountreducerzkl. class); <br/> job. setreducerclass (wordcountreducerzkl. class); <br/> job. setoutputkey Class (text. class); <br/> job. setoutputvalueclass (intwritable. class); <br/> fileinputformat. addinputpath (job, new path (otherargs [0]); <br/> fileoutputformat. setoutputpath (job, new path (otherargs [1]); <br/> system. out. println ("job" + job. getjobname () + "(" + job. getjobid () + ")" + "finished? "+ Job. waitforcompletion (true); <br/> // system. Exit (job. waitforcompletion (true )? 0: 1); <br/> loginfo. End ("job", "job_1"); <br/>}< br/>

 

Contact Us

The content source of this page is from Internet, which doesn't represent Alibaba Cloud's opinion; products and services mentioned on that page don't have any relationship with Alibaba Cloud. If the content of the page makes you feel confusing, please write us an email, we will handle the problem within 5 days after receiving your email.

If you find any instances of plagiarism from the community, please send an email to: info-contact@alibabacloud.com and provide relevant evidence. A staff member will contact you within 5 working days.

A Free Trial That Lets You Build Big!

Start building with 50+ products and up to 12 months usage for Elastic Compute Service

  • Sales Support

    1 on 1 presale consultation

  • After-Sales Support

    24/7 Technical Support 6 Free Tickets per Quarter Faster Response

  • Alibaba Cloud offers highly flexible support services tailored to meet your exact needs.