1. mysql
--Create a database
Create database logs;
--Using
Use logs;
--Create a table
CREATE TABLE Weblogs (
MD5 varchar (32),
URL varchar (64),
Request_date date,
Request_time time,
IP varchar (15)
);
--loading data from an external text file
Load data infile '/path/weblogs_entries.txt ' into table weblogs fields terminated by ' \ t ' lines terminated by ' \ r \ n ';
--Query
SELECT * from Weblogs;
--Export MySQL data to HDFs
Sqoop import-m 1--connect jdbc:mysql://hadoop:3306/logs--username root--password root--table weblogs--targ Et-dir/data/weblogs/import
--target-dir determines which directory data will be stored in HDFs
by default, Sqoop imported data will be split by primary key, The number of primary keys, the number of output files (must be controlled). If you do not include a primary key, you must specify the-m or--split-by parameter to determine how the imported data is split. Each mapper will produce a separate file. Default split symbol for imported files ",",
, you can specify additional split symbols by--fields-terminated-by ' \ t '
sqoop import-m 1--connect jdbc:mysql:/ /hadoop:3306/logs--username root--password root--table weblogs--target-dir/data/weblogs/import-- Fields-terminated-by ' \ t '
Sqoop Import--connect jdbc:mysql://hadoop:3306/logs--username root--password root--table weblogs--split-by MD5--targ Et-dir/data/weblogs/import
--as-avrodatafile importing data into a Avro file
--as-sequencefile Import as a serialized file
-Z or--compress compress data during import, default gzip
--compression-codec <codec class > Any compression mode class supported with Hadoop
--direct instructs Sqoop to use the local import and export tools supported by the database directly, faster (must use-m to specify the number of mapper, otherwise if the use of--split-by will be error, mysqldump terminated status with 2)
Sqoop Import--direct-m 1--connect jdbc:mysql://hadoop:3306/test--username root--password root--table weblogs--tar Get-dir/data/weblogs/person
Use MySQL
Select Host,user from user;