Package org.apache.hadoop.examples;
Import java.io.IOException;
Import Java.util.StringTokenizer;
Import org.apache.hadoop.conf.Configuration;
Import Org.apache.hadoop.fs.Path;
Import org.apache.hadoop.io.IntWritable;
Import Org.apache.hadoop.io.Text;
Import Org.apache.hadoop.mapreduce.Job;
Import Org.apache.hadoop.mapreduce.Mapper;
Import Org.apache.hadoop.mapreduce.Reducer;
Import Org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
Import Org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
Import Org.apache.hadoop.util.GenericOptionsParser;
public class WordCount {
public static class Tokenizermapper
Extends Mapper<object, text, text, intwritable>{
/*
Intwritable is a class implemented in Hadoop for encapsulating Java data types, and its prototype is public intwritable (int value) and public intwritable (). So new intwritable (1) is a new object for this class, and the value 1 is a parameter. In Hadoop it is equivalent to the integer integer variable in Java, which assigns a value of 1 to this variable. Similar to the integer i = new Integer (1) in Java. The intwritable implements the writable interface. Writable is the serial number format for Hadoop. When you want to pass objects or persist objects between processes, you need the object serial number to be a byte stream, and then deserialize when you want to accept or read bytes from disk to the object.
*/
Private final static intwritable one = new intwritable (1);
Private text Word = new text ();
public void Map (Object key, Text value, context context)
Throws IOException, Interruptedexception {
StringTokenizer ITR = new StringTokenizer (value.tostring ());
while (Itr.hasmoretokens ()) {
Word.set (Itr.nexttoken ());
Context.write (Word, one);
}
}
}
public static class Intsumreducer
Extends Reducer<text,intwritable,text,intwritable> {
Private intwritable result = new intwritable ();
public void reduce (Text key, iterable<intwritable> values,context Context)
Throws IOException, Interruptedexception {
int sum = 0;
for (intwritable val:values) {
Sum + = Val.get ();
}
Result.set (sum);
Context.write (key, result);
}
}
public static void main(string[] args) throws Exception {
Configuration conf = new configuration ();
string[] Otherargs = new Genericoptionsparser (conf, args). Getremainingargs ();
if (otherargs.length! = 2) {
System.err.println ("Usage:wordcount <in> <out>");
System.exit (2);
}
Job Job = new Job (conf, "word count");
Job.setjarbyclass (Wordcount.class);
Job.setmapperclass (Tokenizermapper.class);
Job.setcombinerclass (Intsumreducer.class);
Job.setreducerclass (Intsumreducer.class);
Job.setoutputkeyclass (Text.class);
Job.setoutputvalueclass (Intwritable.class);
Fileinputformat.addinputpath (Job, New Path (Otherargs[0]));
Fileoutputformat.setoutputpath (Job, New Path (Otherargs[1]));
System.exit (Job.waitforcompletion (true)? 0:1);
}
}
WordCount parameter Analysis in hadoop example