Important MapReduce component-Recordreader component

Source: Internet
Author: User

(1) how to read a record from the Shard. The RecordReader class is called for every record read;

(2) the system's default RecordReader is LineRecordReader, such as TextInputFormat; while SequenceFileInputFormat's RecordReader is SequenceFileRecordReader; (3) LineRecordReader uses the offset of each row as the map key, the content of each row is used as the map value. (4) application scenario: you can customize the method for reading each record. You can also customize the type of the read key, if the key you want to read is the file path or name, rather than the offset of the row in the file. Custom RecordReader: (1) inherit the abstract class RecordReader to implement an instance of RecordReader; (2) implement the custom InputFormat class and override the createRecordReader () method in InputFormat, the returned value is a custom RecordReader instance. (3) configure the job. setInputFormatClass () sets the custom InputFormat instance. For the source code, see org. apache. mapreduce. lib. input. textInputFormat class; RecordReader example: application scenario: Data: 1234567 ...... requirements: calculate the sum of odd rows and even rows, and combine them with odd rows: 10 + 30 + 50 + 70 = 160 even rows: 20 + 40 + 60 = 120 new project TestRecordReader, package com. recordreader, source code MyMapper. java: package c Om. recordreader; import java. io. IOException; import org. apache. hadoop. io. longWritable; import org. apache. hadoop. io. text; import org. apache. hadoop. mapreduce. mapper; public class MyMapper extends Mapper {@ Overrideprotected void map (LongWritable key, Text value, Context context) throws IOException, InterruptedException {// TODO Auto-generated method stubcontext. write (key, value) ;}} source code MyPartitioner. jav A: package com. recordreader; import org. apache. hadoop. io. longWritable; import org. apache. hadoop. io. text; import org. apache. hadoop. mapreduce. partitioner; public class MyPartitioner extends Partitioner {@ Overridepublic int getPartition (LongWritable key, Text value, int numPartitions) {// TODO Auto-generated method stubif (key. get () % 2 = 0) {key. set (1); return 1;} else {key. set (0); return 0 ;}} source code MyRedu Cer. java: package com. recordreader; import java. io. IOException; import org. apache. hadoop. io. intWritable; import org. apache. hadoop. io. longWritable; import org. apache. hadoop. io. text; import org. apache. hadoop. mapreduce. reducer; public class MyReducer extends Reducer {@ Overrideprotected void reduce (LongWritable key, Iterable value, Context context) throws IOException, InterruptedException {// TODO Auto-gen Erated method stubint sum = 0; for (Text val: value) {sum + = Integer. parseInt (val. toString ();} Text write_key = new Text (); IntWritable write_value = new IntWritable (); if (key. get () = 0) write_key.set ("odd:"); else write_key.set ("even:"); write_value.set (sum); context. write (write_key, write_value) ;}} source code MyRecordReader. java: package com. recordreader; import java. io. IOException; import org. apache. hadoop. con F. configuration; import org. apache. hadoop. fs. FSDataInputStream; import org. apache. hadoop. fs. fileSystem; import org. apache. hadoop. fs. path; import org. apache. hadoop. io. longWritable; import org. apache. hadoop. io. text; import org. apache. hadoop. mapreduce. inputSplit; import org. apache. hadoop. mapreduce. recordReader; import org. apache. hadoop. mapreduce. taskAttemptContext; import org. apache. hadoop. mapreduce. lib. input . FileSplit; import org. apache. hadoop. util. lineReader; public class MyRecordReader extends RecordReader {private long start; private long end; private long pos; private FSDataInputStream fin = null; private LongWritable key = null; private Text value = null; private LineReader reader = null; @ Overridepublic void close () throws IOException {// TODO Auto-generated method stubfin. close () ;}@ Overridepublic Lon GWritable getCurrentKey () throws IOException, InterruptedException {// TODO Auto-generated method stubreturn key;} @ Overridepublic Text getCurrentValue () throws IOException, interruptedException {// TODO Auto-generated method stubreturn value;} @ Overridepublic float getProgress () throws IOException, InterruptedException {// TODO Auto-generated method stubreturn 0 ;} @ Overridepublic void initialize (I NputSplit inputSplit, TaskAttemptContext context) throws IOException, InterruptedException {// TODO Auto-generated method stubFileSplit fileSplit = (FileSplit) inputSplit; start = fileSplit. getStart (); end = start + fileSplit. getLength (); Configuration conf = context. getConfiguration (); Path path = fileSplit. getPath (); FileSystem fs = path. getFileSystem (conf); fin = fs. open (path); fin. seek (start); reader = New LineReader (fin); pos = 1 ;}@ Overridepublic boolean nextKeyValue () throws IOException, InterruptedException {// TODO Auto-generated method stubif (key = null) key = new LongWritable (); key. set (pos); if (value = null) value = new Text (); if (reader. readLine (value) = 0) return false; pos ++; return true ;}} source code MyFileInputFormat. java: package com. recordreader; import java. io. IOException; import org. apache. ha Doop. fs. path; import org. apache. hadoop. io. longWritable; import org. apache. hadoop. io. text; import org. apache. hadoop. mapreduce. inputSplit; import org. apache. hadoop. mapreduce. jobContext; import org. apache. hadoop. mapreduce. recordReader; import org. apache. hadoop. mapreduce. taskAttemptContext; import org. apache. hadoop. mapreduce. lib. input. fileInputFormat; public class MyFileInputFormat extends FileInputFormat { @ Overridepublic RecordReader iterate (InputSplit arg0, TaskAttemptContext arg1) throws IOException, writable {// TODO Auto-generated method stubreturn new MyRecordReader () ;}@ writable boolean isSplitable (JobContext context, path filename) {// TODO Auto-generated method stubreturn false;} Source code TestRecordReader. java: package com. recordreader; import java. io. IOException; Import org. apache. hadoop. conf. configuration; import org. apache. hadoop. fs. path; import org. apache. hadoop. io. intWritable; import org. apache. hadoop. io. text; import org. apache. hadoop. mapreduce. job; import org. apache. hadoop. mapreduce. lib. input. fileInputFormat; import org. apache. hadoop. mapreduce. lib. output. fileOutputFormat; import org. apache. hadoop. util. genericOptionsParser; public class TestRecordReader {publ Ic static void main (String [] args) throws IOException, ClassNotFoundException, InterruptedException {Configuration conf = new Configuration (); String [] otherArgs = new GenericOptionsParser (conf, args ). getRemainingArgs (); if (otherArgs. length! = 2) {System. err. println ("Usage: wordcount"); System. exit (2);} Job job = new Job (conf, "word count"); job. setJarByClass (TestRecordReader. class); job. setMapperClass (MyMapper. class); job. setReducerClass (mycer CER. class); job. setPartitionerClass (MyPartitioner. class); job. setNumReduceTasks (2); job. setInputFormatClass (MyFileInputFormat. class); FileInputFormat. addInputPath (job, new Path (otherArgs [0]) ; FileOutputFormat. setOutputPath (job, new Path (otherArgs [1]); System. exit (job. waitForCompletion (true )? 0: 1 );}}

Important MapReduce component-Recordreader component

Contact Us

The content source of this page is from Internet, which doesn't represent Alibaba Cloud's opinion; products and services mentioned on that page don't have any relationship with Alibaba Cloud. If the content of the page makes you feel confusing, please write us an email, we will handle the problem within 5 days after receiving your email.

If you find any instances of plagiarism from the community, please send an email to: info-contact@alibabacloud.com and provide relevant evidence. A staff member will contact you within 5 working days.

A Free Trial That Lets You Build Big!

Start building with 50+ products and up to 12 months usage for Elastic Compute Service

  • Sales Support

    1 on 1 presale consultation

  • After-Sales Support

    24/7 Technical Support 6 Free Tickets per Quarter Faster Response

  • Alibaba Cloud offers highly flexible support services tailored to meet your exact needs.