(1) In what way to read a record from The Shard, each read a record will call the Recordreader class;
(2) The system default Recordreader is Linerecordreader, such as Textinputformat While the recordreader of Sequencefileinputformat is Sequencefilerecordreader, (3) Linerecordreader is the key of the map with the offset of each line, The contents of each line as the value of map, (4) Application scenario: Customize the way each record is read; the type of the custom read key, such as the key that you want to read, is the path or name of the file instead of the line's offset in the file. Custom Recordreader: (1) Inherit abstract class Recordreader, implement an instance of Recordreader, (2) implement Custom InputFormat class, Override the Createrecordreader () method in InputFormat, the return value is a custom Recordreader instance, (3) Configure Job.setinputformatclass () to set the custom InputFormat instance; source see Org.apache.mapreduce.lib.input.TextInputFormat class; recordreader Example: Application scenario: Data: 1234567 ... Requirements: Calculate the sum of odd lines and even rows of data separately: 10+30+50+70=160 even line synthesis:20+40+60=120 new Project Testrecordreader, package Com.recordreader, Source Code MyMapper.java:package Com.recordreader; import Java.io.ioexception; import Org.apache.hadoop.io.longwritable;import Org.apache.hadoop.io.text;import Org.apache.hadoop.mapreduce.Mapper; public class Mymapper extends Mapper { @Overrideprotected void map (longwritable key, Text value,context Contex T) throws IOException, Interruptedexception {//ToDo auto-generated method Stubcontext.write (key, value);} } Source code MyPartitioner.java:package com.recordreader; import org.apache.hadoop.io.LongWritable ; Import Org.apache.hadoop.io.text;import Org.apache.hadoop.mapreduce.partitioner; public class MyPartitioner Extends partitioner { @Overridepublic int getpartition (longwritable key, Text value, int numpartitions) {//TODO Auto -generated Method Stubif (Key.get ()% 2 = = 0) {key.set (1); return 1;} else {key.set (0); return 0;}} } Source code MyReducer.java:package com.recordreader; import Java.io.ioexception; import Org.apache.hadoop.io.intwritable;import Org.apache.hadoop.io.longwritable;import Org.apache.hadoop.io.Text; Import Org.apache.hadoop.mapreduce.reducer; public class Myreducer extends Reducer { @Overrideprotected void reduce (longwritable key, iterable Value,context Context) throws IOException, interruptedexception {//TODO auto-generated method Stubint sum = 0;for (Text val:value) {sum + = inTeger.parseint (Val.tostring ());} Text Write_key = new text (), intwritable write_value = new Intwritable (), if (key.get () = = 0) write_key.set ("Odd:"); else& Nbsp;write_key.set ("even:"); Write_value.set (sum); Context.write (Write_key, write_value);} } Source code MyRecordReader.java:package com.recordreader; import Java.io.ioexception; import Org.apache.hadoop.conf.configuration;import Org.apache.hadoop.fs.fsdatainputstream;import Org.apache.hadoop.fs.filesystem;import Org.apache.hadoop.fs.path;import Org.apache.hadoop.io.longwritable;import Org.apache.hadoop.io.text;import Org.apache.hadoop.mapreduce.inputsplit;import Org.apache.hadoop.mapreduce.recordreader;import Org.apache.hadoop.mapreduce.taskattemptcontext;import Org.apache.hadoop.mapreduce.lib.input.filesplit;import Org.apache.hadoop.util.linereader; public Class Myrecordreader extends Recordreader {private long start;private long end;private long pos;private fsdatainputstream fin = Null;private longwritable key = Null;privaTe Text value = null;private Linereader reader = null, @Overridepublic void Close () throws IOException {//TODO Auto-genera Ted Method Stubfin.close ();} @Overridepublic longwritable Getcurrentkey () throws Ioexception,interruptedexception {//TODO auto-generated Method Stubreturn key;} @Overridepublic Text GetCurrentValue () throws IOException, interruptedexception {//TODO auto-generated method Stubreturn value;} @Overridepublic float getprogress () throws IOException, interruptedexception {//TODO auto-generated method Stubreturn 0;} @Overridepublic void Initialize (Inputsplit inputsplit, Taskattemptcontext context) throws IOException, interruptedexception {//TODO auto-generated method Stubfilesplit filesplit = (filesplit) Inputsplit;start = Filesplit.getstart (); end = start + Filesplit.getlength (); Configuration conf = context.getconfiguration (); Path PATH = Filesplit.getpath (); FileSystem fs = Path.getfilesystem (conf); fin = fs.open (path); Fin.seek (start); reader = new LinereAder (FIN);p OS = 1;} @Overridepublic Boolean nextkeyvalue () throws IOException, interruptedexception {//TODO auto-generated method STUBIF (key = = null) key = new Longwritable (); Key.set (POS); if (value = = null) value = new Text (); if (reader.readline (value) = = 0) return False;pos++;return true;} } Source code MyFileInputFormat.java:package com.recordreader; import Java.io.ioexception; import Org.apache.hadoop.fs.path;import Org.apache.hadoop.io.longwritable;import Org.apache.hadoop.io.text;import Org.apache.hadoop.mapreduce.inputsplit;import Org.apache.hadoop.mapreduce.jobcontext;import Org.apache.hadoop.mapreduce.recordreader;import Org.apache.hadoop.mapreduce.taskattemptcontext;import Org.apache.hadoop.mapreduce.lib.input.fileinputformat; public class Myfileinputformat extends FileInputFormat { @Overridepublic recordreader createrecordreader (inputsplit arg0,taskattemptcontext arg1) throws IOException, interruptedexception {//TODO auto-generated method Stubreturn New Myrecordreader ();} @Overrideprotected Boolean issplitable (jobcontext context, Path filename) {//TODO auto-generated method Stubreturn false;} } Source code TestRecordReader.java:package com.recordreader; import Java.io.ioexception; import Org.apache.hadoop.conf.configuration;import Org.apache.hadoop.fs.path;import org.apache.hadoop.io.IntWritable; Import Org.apache.hadoop.io.text;import Org.apache.hadoop.mapreduce.job;import Org.apache.hadoop.mapreduce.lib.input.fileinputformat;import Org.apache.hadoop.mapreduce.lib.output.fileoutputformat;import Org.apache.hadoop.util.GenericOptionsParser; public class Testrecordreader {public static void main (string[] args) throws IOException, ClassNotFoundException, Interruptedexception{ configuration conf = new Configuration (); string[] Otherargs = new Genericoptionsparser (conf, args). Getremainingargs (); if ( Otherargs.length! = 2) { system.erR.println ("Usage:wordcount"); system.exit (2); } job job = new Job (conf, "word count"); job.setjarbyclass (Testrecordreader.class); job.setmapperclass (Mymapper.class); job.setreducerclass ( Myreducer.class); job.setpartitionerclass (Mypartitioner.class); Job.setnumreducetasks (2); job.setinputformatclass (Myfileinputformat.class); fileinputformat.addinputpath (Job, New Path (OtherArgs[0])); fileoutputformat.setoutputpath (Job, New Path (Otherargs[1])); system.exit ( Job.waitforcompletion (True)? 0:1); }}
MapReduce Important Component--recordreader component [go]