Implement only one map or reduce method for each map or reduce task

Source: Internet
Author: User
Tags map class

Implement only one map or reduce method for each map or reduce task

 

By default, the map and reduce methods are executed once for each <key, value> pair. However, you can write applications that allow map or reduce tasks to execute only one map or reduce method. Before writing such a program, find out how the map and reduce methods run in the mapreduce framework.

In fact, you only need to reload the run method in the map class and reduce class of the user program to traverse each input <key, value> put it into the map function and reduce function for processing.

Run: Package the code into a jar and run it in the cluster.

The detailed code under the hadoop-0.20.1 example is as follows:

Package Org. apache. hadoop. examples; </P> <p> Import Java. io. file; <br/> Import Java. io. filenotfoundexception; <br/> Import Java. io. fileoutputstream; <br/> Import Java. io. ioexception; <br/> Import Java. util. stringtokenizer; </P> <p> Import Org. apache. hadoop. conf. configuration; <br/> Import Org. apache. hadoop. FS. filestatus; <br/> Import Org. apache. hadoop. FS. filesystem; <br/> Import Org. apache. hadoop. FS. path; <br/> Import Org. apache. hadoop. io. intwritable; <br/> Import Org. apache. hadoop. io. text; <br/> Import Org. apache. hadoop. mapred. filealreadyexistsexception; <br/> Import Org. apache. hadoop. mapreduce. job; <br/> Import Org. apache. hadoop. mapreduce. mapper; <br/> Import Org. apache. hadoop. mapreduce. reducer; <br/> Import Org. apache. hadoop. mapreduce. mapper. context; <br/> Import Org. apache. hadoop. mapreduce. lib. input. fileindium Utformat; <br/> Import Org. apache. hadoop. mapreduce. lib. output. fileoutputformat; <br/> Import Org. apache. hadoop. util. genericoptionsparser; </P> <p>/* <br/> * Author: zhankunlin 2010-8-16 <br/> */<br/> public class wordcountzkl {</P> <p> Public static class wordcountmapperzkl extends <br/> mapper <object, text, text, intwritable >{</P> <p> private final static intwritable one = new intwritable (1); <br/> privat E text word = new text (); </P> <p> Public void map (context) throws ioexception, <br/> interruptedexception {<br/> while (context. nextkeyvalue () {// process it cyclically <br/> Object key = context. getcurrentkey (); <br/> text value = (text) context. getcurrentvalue (); <br/> //// <br/> stringtokenizer itr = new stringtokenizer (value. tostring (); <br/> while (itr. hasmoretokens () {<br/> word. set (itr. nexttoken (); <Br/> context. write (word, one ); <br/>}< br/> /// <br/>}</P> <p>/** <br/> * Expert users can override this method for more complete control over the <br/> * execution of the mapper. <br/> * @ Param context <br/> * @ throws ioexception <br/> */<br/> Public void run (context) throws ioexception, interruptedexception {<br/> setup (context); <br/> map (context); // modify here <br/> cleanup (Context ); <Br/>}</P> <p> Public static class wordcountreducerzkl extends <br/> CER <text, intwritable, text, intwritable >{< br/> private intwritable result = new intwritable (); </P> <p> Public void reduce (context) throws ioexception, interruptedexception {<br/> while (context. nextkey () {// extract the loop <br/> text key = context. getcurrentkey (); <br/> iterable <intwritable> values = context. getvalues (); <Br/> // <br/> int sum = 0; <br/> for (intwritable VAL: values) {<br/> sum + = Val. get (); <br/>}< br/> result. set (SUM); <br/> context. write (Key, result ); <br/> // <br/>}</P> <p>/** <br/> * Advanced Application writers can use the <br/> * {@ link # Run (Org. apache. hadoop. mapreduce. CER Cer. context)} method to <br/> * control how the reduce task works. <br/> */<br/> Public void run (Conte XT context) throws ioexception, interruptedexception {<br/> setup (context); <br/> reduce (context); // modify this location <br/> cleanup (context ); <br/>}</P> <p> @ suppresswarnings ("deprecation") <br/> Public static void main (string [] ARGs) throws exception {</P> <p> Configuration conf = new configuration (); <br/>/* <br/> * string [] otherargs = new genericoptionsparser (Conf, <br/> * ARGs ). getremainingargs (); If (otherargs. length! = 2) {<br/> * system. err. println ("Usage: wordcount <in> <out>"); system. exit (2) ;}< br/> */</P> <p> string [] inputpars = {"wcinzkl", "wcoutzkl "}; <br/> string [] otherargs = new genericoptionsparser (Conf, inputpars) <br/>. getremainingargs (); </P> <p> path outputpaths = New Path (otherargs [1]); <br/> filesystem FS = filesystem. get (CONF); <br/> If (FS. exists (outputpaths) {// please see the code of exists () Method <br/> // throw new filealreadyexistsexception ("output directory" + <br/> // outputpaths + "already exists"); <br/> filestatus fsstatus = FS. getfilestatus (outputpaths); <br/> If (fsstatus. isdir () // only test the methods of HDFS, but it is not necessary <br/> FS. delete (outputpaths, true); <br/> else <br/> FS. delete (outputpaths, false); // true is also OK <br/> system. out. println ("output directory /"" + Outputpaths <br/> + "/" already exists "+", firstly delete it "); <br/>}</P> <p> job = new job (Conf, "Word Count zkl"); <br/> job. setjarbyclass (wordcountzkl. class); <br/> job. setmapperclass (wordcountmapperzkl. class); <br/> job. setcombinerclass (wordcountreducerzkl. class); <br/> job. setreducerclass (wordcountreducerzkl. class); <br/> job. setoutputkeyclass (text. class); <br/> job. setoutputvalueclass (in Twritable. class); <br/> fileinputformat. addinputpath (job, new path (otherargs [0]); <br/> fileoutputformat. setoutputpath (job, new path (otherargs [1]); <br/> system. out. println ("job" + job. getjobname () + "(" + job. getjobid () + ")" + "finished? "+ Job. waitforcompletion (true); </P> <p >}< br/>

Contact Us

The content source of this page is from Internet, which doesn't represent Alibaba Cloud's opinion; products and services mentioned on that page don't have any relationship with Alibaba Cloud. If the content of the page makes you feel confusing, please write us an email, we will handle the problem within 5 days after receiving your email.

If you find any instances of plagiarism from the community, please send an email to: info-contact@alibabacloud.com and provide relevant evidence. A staff member will contact you within 5 working days.

A Free Trial That Lets You Build Big!

Start building with 50+ products and up to 12 months usage for Elastic Compute Service

  • Sales Support

    1 on 1 presale consultation

  • After-Sales Support

    24/7 Technical Support 6 Free Tickets per Quarter Faster Response

  • Alibaba Cloud offers highly flexible support services tailored to meet your exact needs.