Or an example of using the previous word count
Custom Mapper Class
Import Java.io.ioexception;import Org.apache.hadoop.io.longwritable;import Org.apache.hadoop.io.text;import Org.apache.hadoop.mapred.mapreducebase;import Org.apache.hadoop.mapred.mapper;import Org.apache.hadoop.mapred.outputcollector;import org.apache.hadoop.mapred.reporter;// The custom mapper class must inherit mapreducebase and implement the Mapper interface public class Jmapper extends Mapreducebase implementsmapper<longwritable , text, text, longwritable> {@Overridepublic void map (longwritable key, text Value,outputcollector<text, Longwritable> collector, Reporter Reporter) throws IOException {string[] ss = Value.tostring (). Split ("\ T"); for ( String s:ss) {//Use Collector.collect instead of Context.writecollector.collect (new Text (s), New longwritable (1));}}
Custom Reducer Class
Import Java.io.ioexception;import Java.util.iterator;import Org.apache.hadoop.io.longwritable;import Org.apache.hadoop.io.text;import Org.apache.hadoop.mapred.mapreducebase;import Org.apache.hadoop.mapred.outputcollector;import Org.apache.hadoop.mapred.reducer;import org.apache.hadoop.mapred.reporter;//custom reducer class must inherit mapreducebase and implement reducer interface public class Jreducer extends Mapreducebase Implementsreducer<text, longwritable, text, longwritable> {@Overridepublic void reduce (text key, Iterator<longwritable> Value,outputcollector<text, longwritable> Collector, Reporter Reporter) throws IOException {Long sum = 0;//because value is not available in a Foreach loop, use while instead of while (Value.hasnext ()) {sum + = Value.next (). get (); Collector.collect (Key, New longwritable (sum));}}
The class that runs the commit code Jsubmit
Import Java.io.ioexception;import Java.net.uri;import Java.net.urisyntaxexception;import Org.apache.hadoop.conf.configuration;import Org.apache.hadoop.fs.filesystem;import Org.apache.hadoop.fs.Path; Import Org.apache.hadoop.io.longwritable;import Org.apache.hadoop.io.text;import Org.apache.hadoop.mapred.fileinputformat;import Org.apache.hadoop.mapred.fileoutputformat;import Org.apache.hadoop.mapred.jobclient;import Org.apache.hadoop.mapred.jobconf;import Org.apache.hadoop.mapred.textinputformat;import Org.apache.hadoop.mapred.textoutputformat;public class JSubmit { public static void Main (string[] args) throws Ioexception,urisyntaxexception, Interruptedexception, classnotfoundexception {Path Outpath = new Path ("Hdfs://localhost:9000/out"); Path Inpath = new Path ("/home/hadoop/word"); Configuration conf = new configuration (); FileSystem fs = Filesystem.get (New URI ("hdfs://localhost:9000"), conf), if (Fs.exists (Outpath)) {Fs.delete (Outpath, true);} Use jobconf instead of jobjobconf job = new Jobconf (cOnf, Jsubmit.class); Fileinputformat.setinputpaths (Job, Inpath); Job.setinputformat (Textinputformat.class); Job.setmapperclass ( Jmapper.class); Job.setmapoutputkeyclass (Text.class); Job.setmapoutputvalueclass (Longwritable.class); Job.setreducerclass (Jreducer.class); Job.setoutputkeyclass (Text.class); Job.setoutputvalueclass ( Longwritable.class); Fileoutputformat.setoutputpath (Job, Outpath); Job.setoutputformat (textoutputformat.class);// Use Jobclient.runjob instead of job.waitForCompletionJobClient.runJob (job);}}
Can see
In fact, the old version of the API is not very different, just a few classes replaced it
Note that the old version of the API class is identical to the new version of the API's class name.
But the package is different, the old version of the package is mapred, and the new version is in the MapReduce
Submitting Hadoop jobs using the old Java API