Hadoop Eco-building (3 node) -07.hive configuration

Source: Internet
Author: User
Tags hdfs dfs

# HTTP://ARCHIVE.APACHE.ORG/DIST/HIVE/HIVE-2.1.1/

# ================================================================== Installing Hive

TAR-ZXVF apache-hive-2.1.1-bin.tar.gz-c/usr/localmv/usr/local/apache-hive-2.1.1-bin/usr/local/hive-2.1.1rm–r ~/ APACHE-HIVE-2.1.1-BIN.TAR.GZCP ~/mysql-connector-java-5.1.46.jar/usr/local/hive-2.1.1/lib/

# Configure Environment variables
# ==================================================================node1 Node2 Node3

vi/etc/profile# add export Histcontrol under Export PATH USER LOGNAME MAIL HOSTNAME histsize java_home=/usr/java/jdk1.8.0_ 111export Zookeeper_home=/usr/local/zookeeper-3.4.12export Hadoop_home=/usr/local/hadoop/hadoop-2.7.6export MYSQL _home=/usr/local/mysqlexport Hbase_home=/usr/local/hbase-1.2.4export Hive_home=/usr/local/hive-2.1.1export PATH=$ PATH: $JAVA _home/bin: $JAVA _home/jre/bin: $ZOOKEEPER _home/bin: $HADOOP _home/bin: $HADOOP _home/sbin: $MYSQL _home/bin: $HBASE _home/bin: $HIVE _home/binexport classpath=.: $JAVA _home/lib/dt.jar: $JAVA _home/lib/tools.jarexport HADOOP_ Install= $HADOOP _homeexport hadoop_mapred_home= $HADOOP _homeexport hadoop_common_home= $HADOOP _homeexport HADOOP_ Hdfs_home= $HADOOP _homeexport yarn_home= $HADOOP _homeexport hadoop_common_lib_native_dir= $HADOOP _home/lib/native

# ==================================================================node1

# Make the environment variable effective source/etc/profile# view configuration results echo $HIVE _homemkdir-p $HIVE _home/data/hivemkdir-p $HIVE _home/data/hive/operaion _logsmkdir-p $HIVE _home/data/resources# configuration HIVECP $HIVE _home/conf/hive-env.sh.template $HIVE _home/conf/ Hive-env.shcp $HIVE _home/conf/hive-default.xml.template $HIVE _HOME/CONF/HIVE-SITE.XMLCP $HIVE _home/conf/ Hive-exec-log4j2.properties.template $HIVE _HOME/CONF/HIVE-EXEC-LOG4J2.PROPERTIESCP $HIVE _home/conf/ Hive-log4j2.properties.template $HIVE _home/conf/hive-log4j2.properties
# ${system:java.io.tmpdir}/${system:user.name} is replaced with native path/usr/local/hive-2.1.1/data/hive# ${system:java.io.tmpdir}/$ {Hive.session.id}_resources is replaced with the native path/usr/local/hive-2.1.1/data/resources# ${system:java.io.tmpdir}/${system: User.name}/operation_logs Replace with native path/usr/local/hive-2.1.1/data/hive/operation_logs

VI $HIVE _home/conf/hive-site.xml

# ESC after input
:%s#${system:java.io.tmpdir}/${system:user.name}#/usr/local/hive-2.1.1/data/hive#

:%s#${system:java.io.tmpdir}/${hive.session.id}_resources#/usr/local/hive-2.1.1/data/resources#

# ESC/Input Hive.exec.scratchdir after finding <value> Click the Insert key to modify

< Property>    <name>Hive.exec.scratchdir</name>    <value>/hive/tmp</value></ Property>< Property>    <name>Hive.metastore.warehouse.dir</name>    <value>/hive/warehouse</value></ Property><!--connect MySQL's hive library via the JDBC protocol -< Property>    <name>Javax.jdo.option.ConnectionURL</name>    <value>Jdbc:mysql://node1:3306/hive?createdatabaseifnotexist=true&amp;Usessl=false</value></ Property><!--MySQL driver for JDBC -< Property>    <name>Javax.jdo.option.ConnectionDriverName</name>    <value>Com.mysql.jdbc.Driver</value></ Property><!--MySQL user name -< Property>    <name>Javax.jdo.option.ConnectionUserName</name>    <value>Hive</value></ Property><!--mysql user password -< Property>    <name>Javax.jdo.option.ConnectionPassword</name>    <value>Hive-123</value></ Property><!--Hiveserver2 of Ha -< Property>    <name>Hive.server2.support.dynamic.service.discovery</name>    <value>True</value></ Property>< Property>    <name>Hive.zookeeper.quorum</name>    <value>node1:2181,node2:2181,node3:2181</value></ Property><!--Hive's Web page is temporarily not configured -< Property>    <name>Hive.hwi.war.file</name>    <value>/usr/local/hive-2.1.1/lib/hive-hwi-2.1.1.jar</value></ Property>
# Copy the necessary jar package from Hbase/lib to Hive/lib under CP $HBASE _home/lib/hbase-client-1.2.4.jar $HBASE _home/lib/hbase-common-1.2.4.jar $ hive_home/lib# Sync HIVE and jline version of HADOOP CP $HIVE _home/lib/jline-2.12.jar $HADOOP _home/share/hadoop/yarn/lib# View version # CD $ hadoop_home/share/hadoop/yarn/lib# find./-name "*jline*jar" # Remove the lower version of JLine 0.9# RM jline-0.9.94.jar# Copy JDK Tools.jar to hive/lib CP $JAVA _home/lib/tools.jar $HIVE _home/lib# rm-f $HIVE _home/lib/ Log4j-slf4j-impl-2.4.1.jarvi $HIVE _home/conf/hive-env.shhadoop_home=/usr/local/hadoop-2.7.6export HIVE_HOME=/usr /local/hive-2.1.1export Hive_conf_dir=/usr/local/hive-2.1.1/confexport HIVE_AUX_JARS_PATH=/USR/LOCAL/HIVE-2.1.1/ Lib

# If Hadoop is not configured before hadoop.proxyuser.root.groups needs to be configured

# ==================================================================node1 Node2 node3# If you don't have permission, Add configuration in HADOOP's Core-site.xml: VI $HADOOP _home/etc/hadoop/core-site.xml<property><name> Hadoop.proxyuser.root.groups</name><value>*</value></property><property><name >hadoop.proxyuser.root.hosts</name><value>*</value></property># ==================== ==============================================node1# Configuring yarn rmadmin with Super User refresh- Refreshsuperusergroupsconfigurationhdfs dfsadmin-refreshsuperusergroupsconfiguration# ========================== ========================================node1 node2# If you do ha for Namenode, you need to perform HDFs dfsadmin-fs hdfs://on the primary and standby Namenode Appcluster-refreshsuperusergroupsconfiguration

# ==================================================================node2 Node3

# Make environment variables effective source/etc/profile# View configuration results echo $HIVE _home

# ==================================================================node1

$HIVE _home/bin/schematool-initschema-dbtype mysqlscp-r $HIVE _home node2:/usr/local/scp-r $HIVE _home node3:/usr/ local/

# start

# ==================================================================node1 Node2 node3# start zookeeperzkServer.sh startzkserver.sh status# ==================================================================node1# Start Hadoop all processes $ hadoop_home/sbin/start-all.sh$hadoop_home/sbin/hadoop-daemon.sh Start zkfc# ===================================== =============================node2$hadoop_home/sbin/yarn-daemon.sh Start resourcemanager$hadoop_home/sbin/ hadoop-daemon.sh Start zkfc# ==================================================================node1# To implement the HA (high availablity) of HBASE $HBASE _home/bin/hbase-daemon.sh start master# start HBASE (start-hbase.sh) $HBASE _home/bin/ start-hbase.sh# ==================================================================node2# Open HBase's ha$hbase_home/ bin/hbase-daemon.sh Start master# ==================================================================node1$hive_ home/bin/hiveserver2# ==================================================================node2$hive_home/bin/ hiveserver2#==================================================================node1zkcli.shls/hiveserver2get/hiveserver2/ Serveruri=node1:10000;version=2.1.1;sequence=0000000000$hive_home/bin/beeline-u "Jdbc:hive2://node1:2181,node2 : 2181,node3:2181/;servicediscoverymode=zookeeper;zookeepernamespace=hiveserver2 "root 123456# $HIVE _home/bin/ beeline# >!connect jdbc:hive2://node1:2181,node2:2181,node3:2181/;servicediscoverymode=zookeeper; Zookeepernamespace=hiveserver2 root "123456" > Create external Table user_info (user_id int comment ' UserID ', user_name String comment ' userName ') row format delimited fields terminated by ' \ t ' lines terminated by ' \ n ';> show Tables;mkdir/ root/hivevi/root/hive/user_info.txt1001zhangsan1002lisi1003wangwu> Load Data local inpath '/root/hive/user_ Info.txt ' into table user_info;> select * from user_info;> quit;hdfs Dfs-ls/hdfs Dfs-ls/hive/warehousehdfs DFS- Cat/hive/warehouse/user_info/user_info.txthadoop Fs-mkdir/hive_input_datavi/root/hive/useR_info.txt1001zhangsan1002lisi1003wangwu1004liuliu1005qiqihadoop Fs-put/root/hive/user_info.txt/hive_input_ Datahdfs dfs-ls/hive_input_data# hdfs-dfs-chmod 777/hive_input_data> select * from user_info;> load data inpat H '/hive_input_data/user_info.txt ' Overwrite to table user_info;> select * FROM user_info;# ======================= ===========================================node1# Stop has started the process $hbase_home/bin/stop-hbase.sh$hadoop_home/sbin/ stop-all.sh# ==================================================================node1 node2 node3# Stop zookeeperzkserver.sh stop# ==================================================================node1$hadoop_home/ sbin/hadoop-daemon.sh Stop zkfc# ==================================================================node2$hadoop_ home/sbin/yarn-daemon.sh Stop resourcemanager$hadoop_home/sbin/hadoop-daemon.sh Stop zkfcshutdown-h now# snapshot hive_ Hiveserver2 Cluster

Hadoop Eco-building (3 node) -07.hive configuration

Contact Us

The content source of this page is from Internet, which doesn't represent Alibaba Cloud's opinion; products and services mentioned on that page don't have any relationship with Alibaba Cloud. If the content of the page makes you feel confusing, please write us an email, we will handle the problem within 5 days after receiving your email.

If you find any instances of plagiarism from the community, please send an email to: info-contact@alibabacloud.com and provide relevant evidence. A staff member will contact you within 5 working days.

A Free Trial That Lets You Build Big!

Start building with 50+ products and up to 12 months usage for Elastic Compute Service

  • Sales Support

    1 on 1 presale consultation

  • After-Sales Support

    24/7 Technical Support 6 Free Tickets per Quarter Faster Response

  • Alibaba Cloud offers highly flexible support services tailored to meet your exact needs.