標籤:java api hadoop
還是使用之前的單詞計數的例子
自訂Mapper類
import java.io.IOException;import org.apache.hadoop.io.LongWritable;import org.apache.hadoop.io.Text;import org.apache.hadoop.mapred.MapReduceBase;import org.apache.hadoop.mapred.Mapper;import org.apache.hadoop.mapred.OutputCollector;import org.apache.hadoop.mapred.Reporter;//自訂的Mapper類必須繼承MapReduceBase 並且實現Mapper介面public class JMapper extends MapReduceBase implementsMapper<LongWritable, Text, Text, LongWritable> {@Overridepublic void map(LongWritable key, Text value,OutputCollector<Text, LongWritable> collector, Reporter reporter)throws IOException {String[] ss = value.toString().split("\t");for (String s : ss) {//使用collector.collect而不是context.writecollector.collect(new Text(s), new LongWritable(1));}}}
自訂Reducer類
import java.io.IOException;import java.util.Iterator;import org.apache.hadoop.io.LongWritable;import org.apache.hadoop.io.Text;import org.apache.hadoop.mapred.MapReduceBase;import org.apache.hadoop.mapred.OutputCollector;import org.apache.hadoop.mapred.Reducer;import org.apache.hadoop.mapred.Reporter;//自訂的Reducer類必須繼承MapReduceBase 並且實現Reducer介面public class JReducer extends MapReduceBase implementsReducer<Text, LongWritable, Text, LongWritable> {@Overridepublic void reduce(Text key, Iterator<LongWritable> value,OutputCollector<Text, LongWritable> collector, Reporter reporter)throws IOException {long sum = 0;//由於value不在可以用foreach迴圈,所以用while代替while (value.hasNext()) {sum += value.next().get();}collector.collect(key, new LongWritable(sum));}}
運行提交代碼的類JSubmit
import java.io.IOException;import java.net.URI;import java.net.URISyntaxException;import org.apache.hadoop.conf.Configuration;import org.apache.hadoop.fs.FileSystem;import org.apache.hadoop.fs.Path;import org.apache.hadoop.io.LongWritable;import org.apache.hadoop.io.Text;import org.apache.hadoop.mapred.FileInputFormat;import org.apache.hadoop.mapred.FileOutputFormat;import org.apache.hadoop.mapred.JobClient;import org.apache.hadoop.mapred.JobConf;import org.apache.hadoop.mapred.TextInputFormat;import org.apache.hadoop.mapred.TextOutputFormat;public class JSubmit {public static void main(String[] args) throws IOException,URISyntaxException, InterruptedException, ClassNotFoundException {Path outPath = new Path("hdfs://localhost:9000/out");Path inPath = new Path("/home/hadoop/word");Configuration conf = new Configuration();FileSystem fs = FileSystem.get(new URI("hdfs://localhost:9000"), conf);if (fs.exists(outPath)) {fs.delete(outPath, true);}// 使用JobConf 而不是JobJobConf job = new JobConf(conf, JSubmit.class);FileInputFormat.setInputPaths(job, inPath);job.setInputFormat(TextInputFormat.class);job.setMapperClass(JMapper.class);job.setMapOutputKeyClass(Text.class);job.setMapOutputValueClass(LongWritable.class);job.setReducerClass(JReducer.class);job.setOutputKeyClass(Text.class);job.setOutputValueClass(LongWritable.class);FileOutputFormat.setOutputPath(job, outPath);job.setOutputFormat(TextOutputFormat.class);// 使用JobClient.runJob而不是job.waitForCompletionJobClient.runJob(job);}}
可以看到
其實老版本的api差別不大,只是用了少數幾個類替換了而已
注意老版本api的類雖然和新版本api的類名字很多都是一模一樣的
但是所在的包不同,老版本所在的包都是mapred的,而新版本的都在mapreduce
使用老版本的java api提交hadoop作業