Hbase extends MapreduceAPI to facilitate Mapreduce tasks to read and write HTable data. Example packagehbase; importjava. io. IOException; importjava. SQL. Connection; importjava. SQL. DriverManager; importjava. SQL. SQLException; importjav
Preface Hbase extends Mapreduce APIs to facilitate Mapreduce tasks to read and write HTable data. HBase as the source MapReduce read example package hbase; import java. io. IOException; import java. SQL. Connection; import java. SQL. DriverManager; import java. SQL. SQLException; import jav
Preface
Hbase extends Mapreduce APIs to facilitate Mapreduce tasks to read and write HTable data.
Example of HBase reading as the source MapReduce
Package hbase; import java. io. IOException; import java. SQL. connection; import java. SQL. driverManager; import java. SQL. SQLException; import java. SQL. statement; import org. apache. hadoop. conf. configuration; import org. apache. hadoop. hbase. HBaseConfiguration; import org. apache. hadoop. hbase. client. result; import org. apache. hadoop. hbase. client. scan; import org. apache. hadoop. hbase. io. immutableBytesWritable; import or G. apache. hadoop. hbase. mapreduce. tableMapReduceUtil; import org. apache. hadoop. hbase. mapreduce. tableMapper; import org. apache. hadoop. hbase. mapreduce. tableReducer; import org. apache. hadoop. hbase. util. bytes; import org. apache. hadoop. io. text; import org. apache. hadoop. mapreduce. job; import org. apache. hadoop. mapreduce. lib. output. nullOutputFormat; public class ExampleHbaseToMysqlMapreduce {public static void ma In (String [] args) throws Exception {// hbase Configuration config = HBaseConfiguration. create (); String tableName = "flws"; Scan scan = new Scan (); scan. setStartRow (Bytes. toBytes ("5768014"); scan. setStopRow (Bytes. toBytes ("5768888"); scan. addColumn (Bytes. toBytes ("cf"), Bytes. toBytes ("AH"); scan. setCaching (500); scan. setCacheBlocks (false); // JOB definition Job job = new Job (config, "ExampleHbaseMapreduc E "); job. setJarByClass (ExampleHbaseToMysqlMapreduce. class); // sets the TableMapReduceUtil Method for map to read hbase. initTableMapperJob (tableName, scan, MyMapper. class, Text. class, Text. class, job); // reduce sets job. setReducerClass (mycer Cer. class); job. setOutputFormatClass (NullOutputFormat. class); job. setNumReduceTasks (5); boolean B = job. waitForCompletion (true); if (! B) {throw new Exception ("error with job! ") ;}} Public static class MyMapper extends TableMapper
{Public void map (ImmutableBytesWritable row, Result value, Context context) throws IOException, InterruptedException {context. write (new Text (row. get (), new Text (value. getValue (Bytes. toBytes ("cf"), Bytes. toBytes ("AH");} public static class MyReducer extends TableReducer
{Private Connection conn = null; @ Override protected void cleanup (Context context) throws IOException, InterruptedException {try {conn. close ();} catch (SQLException e) {e. printStackTrace () ;}@ Override protected void setup (Context context) throws IOException, InterruptedException {String driver = "com. mysql. jdbc. driver "; String url =" jdbc: mysql: // 172.16.35.242/judgment? UseUnicode = true & characterEncoding = gbk & zeroDateTimeBehavior = convertToNull "; try {Class. forName (driver);} catch (ClassNotFoundException e) {e. printStackTrace ();} try {conn = DriverManager. getConnection (url, "root", "root");} catch (SQLException e) {e. printStackTrace ();} super. setup (context);} public void reduce (Text key, Iterable
Values, Context context) throws IOException, InterruptedException {StringBuffer sb = new StringBuffer (); for (Text text: values) {sb. append (text. toString ();} try {Statement st = conn. createStatement (); st.exe cuteUpdate ("insert into test_mapreduce (id, ah) values (" + Integer. valueOf (key. toString () + ", '" + sb. toString () + "')");} catch (SQLException e) {e. printStackTrace ();}}}}
Original article address: Mapreduce reads hbase and summarizes it to RDBMS. Thanks for sharing it with the original author.