Big Data Technology auxiliary sequencing and two sequencing cases (Groupingcomparator)
1) Requirements
Have the following order data
Order ID |
Product ID |
Transaction amount |
0000001 |
Pdt_01 |
222.8 |
0000001 |
Pdt_05 |
25.8 |
0000002 |
Pdt_03 |
522.8 |
0000002 |
Pdt_04 |
122.4 |
0000002 |
Pdt_05 |
722.4 |
0000003 |
Pdt_01 |
222.8 |
0000003 |
Pdt_02 |
33.8 |
Now we need to find out the most expensive items in every order.
2) input Data GroupingComparator.txt
pdt_01 222.8 pdt_05 722.4 pdt_05 25.8 pdt_01 222.8 pdt_01 33.8 pdt_03 522.8 pdt_04 122.4
Output data expected:
3 222.8
Part-r-00000.txt
2 722.4
Part-r-00001.txt
1 222.8
Part-r-00002.txt
3) Analysis
(1) using "Order ID and Transaction amount" as key, all order data read from the map stage can be partitioned by ID, sorted by amount, and sent to reduce.
(2) At the reduce end, use Groupingcomparator to aggregate the same kv of the order ID into groups, and then take the first is the maximum value.
4) Realization
Define Order Information Orderbean
Package Com.xyg.mapreduce.order;import java.io.datainput;import java.io.dataoutput;import java.io.IOException; Import org.apache.hadoop.io.WritableComparable; Public classOrderbean Implements Writablecomparable<orderbean> { Private intorder_id;//Order ID Number Private DoublePrice//Price PublicOrderbean () {super (); } PublicOrderbean (intORDER_ID,DoublePrice ) {super (); This. order_id =order_id; This. Price =Price ; } @Override Public voidWrite (DataOutput out) throws IOException { out. Writeint (order_id); out. writedouble (price); } @Override Public voidReadFields (datainputinch) throws IOException {order_id=inch. ReadInt (); Price=inch. readdouble (); } @Override PublicString toString () {returnorder_id +"\ t"+Price ; } Public intgetorder_id () {returnorder_id; } Public voidSETORDER_ID (intorder_id) { This. order_id =order_id; } Public DoubleGetPrice () {returnPrice ; } Public voidSetprice (DoublePrice ) { This. Price =Price ; } //Two orders@Override Public intcompareTo (Orderbean o) {intresult = order_id > o.getorder_id ()?1: -1; if(order_id >o.getorder_id ()) {Result=1; } Else if(Order_Id <o.getorder_id ()) {Result= -1; } Else { //Price Reverse orderresult = Price > O.getprice ()? -1:1; } returnresult; }}
Write the Ordersortmapper processing process
Package com.xyg.mapreduce.order;
Import Java.io.ioexception;import Org.apache.hadoop.io.longwritable;import org.apache.hadoop.io.NullWritable; Import Org.apache.hadoop.io.text;import Org.apache.hadoop.mapreduce.mapper;public class Ordermapper extends Mapper <longwritable, Text, Orderbean, nullwritable> { Orderbean k = new Orderbean (); @Override protected void map (longwritable key, Text value, Context context) throws IOException, interruptedexception { //1 Gets a row of String line = value.tostring (); 2 intercept string[] fields = Line.split ("\ t");//3 Package Object k.setorder_id (Integer.parseint (fields[0])); K.setprice (Double.parsedouble (fields[2]));//4 Write Context.write (k, Nullwritable.get());}}
Write the Ordersortreducer processing process
Package com.xyg.mapreduce.order;
Import Java.io.ioexception;import Org.apache.hadoop.io.nullwritable;import org.apache.hadoop.mapreduce.Reducer; public class Orderreducer extends Reducer<orderbean, nullwritable, Orderbean, nullwritable> { @Override c3/>protected void reduce (Orderbean key, iterable<nullwritable> values, context context) throws IOException, interruptedexception { context.write (key, Nullwritable.get());} }
Write the Ordersortdriver processing process
Package Com.xyg.mapreduce.order;import Java.io.ioexception;import org.apache.hadoop.conf.configuration;import Org.apache.hadoop.fs.path;import Org.apache.hadoop.io.nullwritable;import Org.apache.hadoop.mapreduce.job;import Org.apache.hadoop.mapreduce.lib.input.fileinputformat;import Org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; Public classOrderdriver { Public Static voidMain (string[] args) throws Exception, IOException {//1 Getting configuration informationConfiguration conf =NewConfiguration (); Job Job=job.getinstance (conf); //2 Setting the JAR package load pathJob.setjarbyclass (Orderdriver.class); //3 Loading the Map/reduce classJob.setmapperclass (Ordermapper.class); Job.setreducerclass (orderreducer.class); //4 Setting the map output data key and value typesJob.setmapoutputkeyclass (Orderbean.class); Job.setmapoutputvalueclass (nullwritable.class); //5 Setting the key and value types of the final output dataJob.setoutputkeyclass (Orderbean.class); Job.setoutputvalueclass (nullwritable.class); //6 Setting the input data and output data pathFileinputformat.setinputpaths (Job,NewPath (args[0])); Fileoutputformat.setoutputpath (Job,NewPath (args[1])); //10 Set the group for the reduce sideJob.setgroupingcomparatorclass (Ordergroupingcomparator.class); //7 Setting up partitionsJob.setpartitionerclass (Orderpartitioner.class); //8 Set the number of reduceJob.setnumreducetasks (3); //9 SubmitBoolean result = Job.waitforcompletion (true); System.exit (Result?0:1); }}ordersortdriver
Write the Ordersortpartitioner processing process
Package com.xyg.mapreduce.order;
Import Org.apache.hadoop.io.nullwritable;import Org.apache.hadoop.mapreduce.partitioner;public class Orderpartitioner extends Partitioner<orderbean, nullwritable> { @Override public int Getpartition ( Orderbean key, nullwritable value, int numreducetasks) { return (key.getorder_id () & Integer.max_value)% numreducetasks;} }
Write the Ordersortgroupingcomparator processing process
Package com.xyg.mapreduce.order;
Import Org.apache.hadoop.io.writablecomparable;import Org.apache.hadoop.io.writablecomparator;public class Ordergroupingcomparator extends Writablecomparator { protected ordergroupingcomparator () { super ( Orderbean.class, True); } @SuppressWarnings ("Rawtypes") @Override public int Compare (writablecomparable A, Writablecomparable b) {Orderbean Abean = (Orderbean) A; Orderbean Bbean = (orderbean) b; int result; if (abean.getorder_id () > bbean.getorder_id ()) {result = 1 ; } else if (abean.getorder_id () < bbean.getorder_id ()) {result =-1;} else {result = 0;} return
result; }}
Big Data Technology auxiliary sequencing and two sequencing cases (Groupingcomparator)