1.重命名 配置hosts
hostnamectl set-hostname master
bash
vi /etc/hosts
hostnamectl set-hostname slave1
bash
vi /etc/hosts
hostnamectl set-hostname slave2
bash
vi /etc/hosts
输入
IP master
IP slave1
IP slave2
2.设置时间
tzselect
5 9 1 1
写入
TZ='Asia/Shanghai'; export TZ
export PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin
export JAVA_HOME=/usr/java/jdk1.8.0_221
export CLASSPATH=$JAVA_HOME/lib/
export PATH=$PATH:$JAVA_HOME/bin
export HADOOP_HOME=/usr/hadoop/hadoop-2.7.7
export HIVE_HOME=/usr/hive/apache-hive-2.3.4-bin
export PATH=$PATH:$HIVE_HOME/bin
export ZOOKEEPER_HOME=/usr/zookeeper/zookeeper-3.4.14
export PATH=$PATH:$ZOOKEEPER_HOME/bin
export SCALA_HOME=/usr/scala/scala-2.11.11
export CLASSPATH=$SCALA_HOME/lib/
export PATH=SCALA_HOME/bin
export PATH SCALA_HOME CLASSPATH
export HBASE_HOME=/usr/hbase/hbase-1.2.4
export PATH=$PATH:$HBASE_HOME/bin
export SPARK_HOME=/usr/spark/spark-2.4.3-bin-hadoop2.7
export PATH=$SPARK_HOME/bin:$PATH
生效
source /etc/profile
3.启动ntp
vim /etc/ntp.conf
跳转到21行 删除4行
写入
server 127.127.1.0
fudge 127.127.1.0 stratum 10
退出并启动
systemctl restart ntpd.service
4.制作定时任务在slave1和slave2中
crontab -e
写入
*/30 10-17 * * * /usr/sbin/ntpdate master
5.配置免密登录
ssh-keygen
ssh-copy-id -i .ssh/id_rsa.pub master
yes
ssh-copy-id -i .ssh/id_rsa.pub localhost
ssh-copy-id -i .ssh/id_rsa.pub slave1
yes
ssh-copy-id -i .ssh/id_rsa.pub slave2
yes
7.执行以下内容
master中执行
scp /etc/profile root@slave1:/etc/
scp /etc/profile root@slave2:/etc/
在slave1,slave2中生效
source /etc/profile
四 HIVE
4.1
1.在salve2中
systemctl disable mysqld
systemctl start mysqld
grep "temporary password" /var/log/mysqld.log
mysql -uroot -p
输入密码进入mysql
写入
set global validate_password_policy=0;
set global validate_password_length=4;
alter user 'root'@'localhost' identified by '123456';
4.2
1.安装配置环境
mkdir -p /usr/hive
tar -zxvf /usr/package277/apache-hive-2.3.4-bin.tar.gz -C /usr/hive/
2.修改环境
cd $HIVE_HOME/conf && vim hive-env.sh
写入
export HADOOP_HOME=/usr/hadoop/hadoop-2.7.7
export HIVE_CONF_DIR=/usr/hive/apache-hive-2.3.4-bin/conf
export HIVE_AUX_JARS_PATH=/usr/hive/apache-hive-2.3.4-bin/lib
3.在master中
cp $HIVE_HOME/lib/jline-2.12.jar $HADOOP_HOME/share/hadoop/yarn/lib/
scp -r /usr/hive root@slave1:/usr/
scp $HIVE_HOME/lib/jline-2.12.jar root@slave1:$HADOOP_HOME/share/hadoop/yarn/lib/
4.3
在slave1中
cp /usr/package277/mysql-connector-java-5.1.47-bin.jar /usr/hive/apache-hive-2.3.4-bin/lib/
cd $HIVE_HOME/conf && vim hive-site.xml
空白添加
<configuration>
<property>
<name>hive.metastore.warehouse.dir</name>
<value>/user/hive_remote/warehouse</value>
</property>
<property>
<name>javax.jdo.option.ConnectionDriverName</name>
<value>com.mysql.jdbc.Driver</value>
</property>
<property>
<name>javax.jdo.option.ConnectionURL</name>
<value>jdbc:mysql://slave2:3306/hive?createDatabaseIfNotExist=true&useSSL=false</value>
</property>
<property>
<name>javax.jdo.option.ConnectionUserName</name>
<value>root</value>
</property>
<property>
<name>javax.jdo.option.ConnectionPassword</name>
<value>123456</value>
</property>
</configuration>
4.4 在master中
cd $HIVE_HOME/conf && vim hive-site.xml
空白添加
<configuration>
<property>
<name>hive.metastore.warehouse.dir</name>
<value>/user/hive_remote/warehouse</value>
</property>
<property>
<name>hive.metastore.local</name>
<value>false</value>
</property>
<property>
<name>hive.metastore.uris</name>
<value>thrift://slave1:9083</value>
</property>
</configuration>
4.5
schematool -dbType mysql -initSchema