The mapreduce processing process is divided into two stages: Map stage and reduce stage. When you want to count the number of occurrences of all words in a specified file,
In the map stage, each keyword is written to one row and separated by commas (,), and the initialization quantity is 1 (the map in the same word hadoop is automatically placed in one row)
The reduce stage counts the frequency of occurrence of each word and writes it back.
Such as code:
Package COM. CLQ. hadoop2; import Org. apache. hadoop. io. intwritable; import Org. apache. hadoop. io. longwritable; import Org. apache. hadoop. io. text; import Org. apache. hadoop. mapreduce. mapper; public class mymapper extends mapper <longwritable, text, text, intwritable> {final text key2 = new text (); // value2 indicates the number of times a word appears in the row. Final intwritable value2 = new intwritable (1 ); // key indicates the starting position of the text line // value indicates the protected void map (longwritable key, text value, context) of the text line throws Java. io. ioexception, interruptedexception {final string [] splited = value. tostring (). split (","); For (string word: splited) {key2.set (Word); // write key2 and value2 to context. write (key2, value2 );}}}
Package COM. CLQ. hadoop2; import Org. apache. hadoop. io. intwritable; import Org. apache. hadoop. io. text; import Org. apache. hadoop. mapreduce. reducer; public class myreducer extends reducer <text, intwritable, text, intwritable> {// value3 indicates the total number of times a word appears. Final intwritable value3 = new intwritable (0 ); /*** key indicates the set context object of 1 output by the Map Method */protected void reduce (Text key, Java. lang. iterable <intwritable> values, context) throws Java. io. ioexception, interruptedexception {int sum = 0; For (intwritable count: values) {sum + = count. get () ;}// run here. sum indicates the total number of times the word appears. // key3 indicates the word, which is the final output keyfinal text key3 = key; // value3 indicates the total number of times a word appears, which is the final output valuevalue3.set (SUM); context. write (key3, value3 );}}
Package COM. CLQ. hadoop2; import Java. io. ioexception; import Org. apache. hadoop. conf. configuration; import Org. apache. hadoop. FS. path; import Org. apache. hadoop. io. intwritable; import Org. apache. hadoop. io. text; import Org. apache. hadoop. mapred. lib. hashpartitioner; import Org. apache. hadoop. mapreduce. job; import Org. apache. hadoop. mapreduce. lib. input. fileinputformat; import Org. apache. hadoop. mapreduce. lib. input. textinputformat; import Org. apache. hadoop. mapreduce. lib. output. fileoutputformat; import Org. apache. hadoop. mapreduce. lib. output. textoutputformat; public class mapperreducer {public static void main (string [] ARGs) throws ioexception, interruptedexception, classnotfoundexception {// specify the input and output paths final string input_path = "HDFS: // Ubuntu: 9000/input "; final string output_path =" HDFS: // Ubuntu: 9000/output "; // create a job object to encapsulate the final job = new job (new configuration (), "mapperreducer"); // compress it into a jar file to execute the job. setjarbyclass (mapperreducer. class); fileinputformat. setinputpaths (job, input_path); fileoutputformat. setoutputpath (job, new path (output_path); // specify the custom mapper class job. setmapperclass (mymapper. class); // specify the job to run mapper. setmapoutputkeyclass (text. class); job. setmapoutputvalueclass (intwritable. class); // specify the custom CER class job. setreducerclass (mycer Cer. class); // specifies the reducer's key and value type job. setinputformatclass (textinputformat. class); job. setoutputformatclass (textoutputformat. class); job. waitforcompletion (true );}}
Write a mapreduce program on hadoop to count the number of occurrences of keywords in text.