Hadoop Eco-building (3 node) -12.RABBITMQ configuration

Source: Internet
Author: User
Tags rabbitmq

# installation requires related packages
# ==================================================================node1 Node2 Node3

Yum install-y gcc gcc-c++ zlib zlin-devel perl ncurses-devel

# Install OpenSSL
# ==================================================================node1

Scp-r ~/openssl-1.1.0g.tar.gz node2:~/scp-r ~/openssl-1.1.0g.tar.gz node3:~/

# ==================================================================node1 Node2 Node3

TAR-ZVXF ~/openssl-1.1.0g.tar.gzcd openssl-1.1.0g./config--prefix=/usr/local/openssl# Modify Makefilevi Makefile#CFLAG= -dopenssl_threadscflag=-fpic-dopenssl_threads# execute make && make INSTALLRM-RF ~/openssl-1.1.0g.tar.gzrm-rf ~/ openssl-1.1.0g

# Install Erlang
# ==================================================================node1

Scp-r ~/otp_src_20.3.tar.gz node2:~/scp-r ~/otp_src_20.3.tar.gz node3:~/

# ==================================================================node1 Node2 Node3

CD ~TAR-XF ~/OTP_SRC_20.3.TAR.GZMKDIR/USR/LOCAL/ERLANGCD otp_src_20.3# configuration installation path compile code./configure--prefix=/usr/local/ Erlang--without-javac--with-ssl=/usr/local/openssl# Perform compilation results make && make install# enter/usr/local/after completion Erlang View Execution Results ls/usr/local/erlangrm-rf ~/OTP_SRC_20.3.TAR.GZRM-RF ~/otp_src_20.3# environment variable vi/etc/profile# in export PATH USER LOGNAME MAIL HOSTNAME histsize histcontrol Add export java_home=/usr/java/jdk1.8.0_111export zookeeper_home=/usr/ Local/zookeeper-3.4.12export Hadoop_home=/usr/local/hadoop-2.7.6export Mysql_home=/usr/local/mysqlexport HBASE_ Home=/usr/local/hbase-1.2.4export Hive_home=/usr/local/hive-2.1.1export Scala_home=/usr/local/scala-2.12.4export Kafka_home=/usr/local/kafka_2.12-0.10.2.1export Flume_home=/usr/local/flume-1.8.0export SPARK_HOME=/usr/local/ Spark-2.3.0export storm_home=/usr/local/storm-1.1.0export erlang_home=/usr/local/erlangexport PATH= $PATH: $JAVA _ Home/bin: $JAVA _home/jre/bin: $ZOOKEEPER _home/bin: $HADOOP _home/bin: $HADOOP _home/sbin: $MYSQL _home/bin: $HBase_home/bin: $HIVE _home/bin: $SCALA _home/bin: $KAFKA _home/bin: $FLUME _home/bin: $SPARK _home/bin: $SPARK _home/sbin: $STORM _home/bin: $ERLANG _home/binexport classpath=.: $JAVA _home/lib/dt.jar: $JAVA _home/lib/tools.jarexport HADOOP_ Install= $HADOOP _homeexport hadoop_mapred_home= $HADOOP _homeexport hadoop_common_home= $HADOOP _homeexport HADOOP_ Hdfs_home= $HADOOP _homeexport yarn_home= $HADOOP _homeexport hadoop_common_lib_native_dir= $HADOOP _home/lib/native# Make the environment variable effective CD ~source/etc/profile# view configuration results echo $ERLANG _HOMECD $ERLANG _homeerl# exit > Halt ().

# Install RABBITMQ

# ==================================================================node1
# Unzip the RABBITMQ, the official package is the XZ compression pack, so you need to use the XZ command

CD ~xz-d ~/rabbitmq-server-generic-unix-3.7.5.tar.xz#xz extract the. Tar package and unzip the TAR-XVF with the tar command Rabbitmq-server-generic-unix-3.7.5.tar-c/USR/LOCALRM-RF ~/rabbitmq-server-generic-unix-3.7.5.tar

# Environment variables
# ==================================================================node1 Node2 Node3

vi/etc/profile# add export Histcontrol under Export PATH USER LOGNAME MAIL HOSTNAME histsize java_home=/usr/java/jdk1.8.0_ 111export Zookeeper_home=/usr/local/zookeeper-3.4.12export Hadoop_home=/usr/local/hadoop-2.7.6export MYSQL_HOME=/ Usr/local/mysqlexport Hbase_home=/usr/local/hbase-1.2.4export Hive_home=/usr/local/hive-2.1.1export SCALA_HOME=/ Usr/local/scala-2.12.4export Kafka_home=/usr/local/kafka_2.12-0.10.2.1export FLUME_HOME=/usr/local/ Flume-1.8.0export Spark_home=/usr/local/spark-2.3.0export Storm_home=/usr/local/storm-1.1.0export ERLANG_HOME=/ Usr/local/erlangexport rabbitmq_home=/usr/local/rabbitmq_server-3.7.5export path= $PATH: $JAVA _home/bin: $JAVA _home /jre/bin: $ZOOKEEPER _home/bin: $HADOOP _home/bin: $HADOOP _home/sbin: $MYSQL _home/bin: $HBASE _home/bin: $HIVE _home/bin : $SCALA _home/bin: $KAFKA _home/bin: $FLUME _home/bin: $SPARK _home/bin: $SPARK _home/sbin: $STORM _home/bin: $ERLANG _home /bin: $RABBITMQ _home/ebin: $RABBITMQ _home/sbinexport classpath=.: $JAVA _home/lib/dt.jar: $JAVA _home/lib/ Tools.jarexport HAdoop_install= $HADOOP _homeexport hadoop_mapred_home= $HADOOP _homeexport hadoop_common_home= $HADOOP _homeexport Hadoop_hdfs_home= $HADOOP _homeexport yarn_home= $HADOOP _homeexport hadoop_common_lib_native_dir= $HADOOP _home/lib/ Native

# ==================================================================node1

# Make the environment variable effective source/etc/profile# view configuration results echo $RABBITMQ _homescp-r $RABBITMQ _home node2:/usr/local/scp-r $RABBITMQ _home node3:/usr/local/

# ==================================================================node2 Node3

# Make environment variables effective source/etc/profile# View configuration results echo $RABBITMQ _home

# High concurrency settings

# ==================================================================node1 Node2 Node3

# Adjust the number of open File/file descriptors (high concurrency Support) # adjust system limits Vi/etc/sysctl.conffs.file-max = 100000# make settings take effect sysctl-p# view system limits Sysctl fs.file-max# Adjust user limits vi/etc/security/limits.conf* soft nofile 65536* hard nofile 65536 shutdown-h now# snapshot rabbitmq before cluster

# ==================================================================node1 Node2 Node3

# in the background launch rabbitrabbitmq-server-detached# install Web plugin Management Interface # ============================================================= ===== Installing the Rabbitmq_management plug-in rabbitmq-plugins enable rabbitmq_management# ======================================== ========================== Security Authentication # Add User (user name admin password rabbitmq*123456) rabbitmqctl add_user admin rabbitmq*123456# Set User Role (set Admin user as Administrator role) rabbitmqctl set_user_tags admin administrator# Set User rights (set Admin user Configuration, write, read permissions) Rabbitmqctl set_ Permissions-p/admin ". *" ". *". * "# Delete User (delete guest user) Rabbitmqctl Delete_user guest# Note: RABBITMQ is prohibited from 3.3.0 guest/ Guest permissions are accessed through the exception of localhost. # Create and assign a role when you are done review and confirm Rabbitmqctl list_users# View status rabbitmqctl status# View Admin user's permissions rabbitmqctl list_user_permissions admin# Browser input address: http://node1:15672/# User name password: admin/rabbitmq*123456# browser input address: http://node2:15672/# User name password: admin/rabbitmq* 123456

# cluster configuration

# ==================================================================node1find/-name ". Erlang.cookie" ll/root/. Erlang.cookiechmod 600/root/.erlang.cookie# reads the cookie from one of the nodes and copies it to the other nodes (between the nodes through a cookie to determine if they can communicate with each other) # View the RABBITMQ's startup log in Home Direcho-n "Rabbitmqerlangcookie" >/root/.erlang.cookiecat/root/.erlang.cookie#rabbitmqerlangcookie# ========== ========================================================node2 Node3chmod 600/root/.erlang.cookie# =============== ===================================================node1scp/root/.erlang.cookie NODE2:/ROOT/.ERLANG.COOKIESCP/ Root/.erlang.cookie node3:/root/.erlang.cookie# ================================================================ ==node1 Node2 node3rebootrabbitmq-server-detached# Join the cluster # default is the disk node, if it is a memory node, you need to add--ram parameter # where--ram is the memory node, If you want to be a disk node without adding--ram, you need at least one disk node in the RABBITMQ cluster # ================================================================== Node1rabbitmqctl stop_apprabbitmqctl resetrabbitmqctl join_cluster--ram [EMAIL PROTECTED]RABBITMQCTL Start_apprabbitmqctl status# ================================================================== Node2rabbitmqctl stop_apprabbitmqctl resetrabbitmqctl join_cluster--ram [Email protected]rabbitmqctl Start_ Apprabbitmqctl status# ==================================================================node3rabbitmqctl Stop_ Apprabbitmqctl resetrabbitmqctl join_cluster [email protected]rabbitmqctl start_apprabbitmqctl status# ======== ==========================================================node1# Setting the mirroring policy rabbitmqctl set_policy ha-all "^" ' {"Ha-mode ":" All "," Ha-sync-mode ":" Automatic "} ' Rabbitmqctl cluster_status# ================================================ ==================node1# Create and assign roles to view and confirm Rabbitmqctl list_users# need to re-add user (username admin password rabbitmq*123456) rabbitmqctl add_ User admin rabbitmq*123456# Set users role (set Admin user as Administrator role) rabbitmqctl set_user_tags admin administrator# Set User rights (set Admin User Configuration, Write, read permissions) rabbitmqctl set_permissions-p/admin ". *" ". *" # Delete User (delete guest user) Rabbitmqctl Delete_userguest# Note: Rabbitmq from 3.3.0 is forbidden to use Guest/guest permissions through access other than localhost. # ==================================================================node1 Node2 node3# Create and assign roles when you are finished, review and confirm Rabbitmqctl List_usersrabbitmqctl cluster_status# Browser input address: http://node1:15672/# User name password: admin/rabbitmq*123456# browser input Address:/HTTP node2:15672/# User name password: admin/rabbitmq*123456rabbitmqctl stopshutdown-h now# Snapshot Rabbitmq

Hadoop Eco-building (3 node) -12.RABBITMQ configuration

Contact Us

The content source of this page is from Internet, which doesn't represent Alibaba Cloud's opinion; products and services mentioned on that page don't have any relationship with Alibaba Cloud. If the content of the page makes you feel confusing, please write us an email, we will handle the problem within 5 days after receiving your email.

If you find any instances of plagiarism from the community, please send an email to: info-contact@alibabacloud.com and provide relevant evidence. A staff member will contact you within 5 working days.

A Free Trial That Lets You Build Big!

Start building with 50+ products and up to 12 months usage for Elastic Compute Service

  • Sales Support

    1 on 1 presale consultation

  • After-Sales Support

    24/7 Technical Support 6 Free Tickets per Quarter Faster Response

  • Alibaba Cloud offers highly flexible support services tailored to meet your exact needs.