1. 修改主机名
hostnamectl set-hostname ...
立即生效:bash
2. 关闭防火墙
systemctl stop firewalld
3. 配置hosts文件
4. 时间同步
tzselect(三台)
下载ntp: yum install -y ntp
master: vim /etc/ntp.conf
server 127.127.1.0
fudge 127.127.1.0 stratum 10
:wq
master: /bin/systemctl restart ntpd.service(重启ntp服务, 三台)
ntpdate master(三台)
5. ssh免密:
/usr/sbin/sshd
ssh-keygen
master:(本机免密)
cd /root/.ssh
cp id_rsa.pub authorized_keys
chmod 600 authorized_keys
ssh-copy-id -i /root/.ssh/id_rsa.pub master/slave1/slave2
6. jkd文件:
master:
cd /usr/java
wget+网址
解压: tar -zxvf jdk -C /usr/java
vim /etc/profile:
export JAVA_ HOME=/usr/java/jdk1.8.0_ 171
# export CLASSPATH=$JAVA HOME/lib/
export PATH-$PATH:$JAVA HOME/bin
# export PATH JAVA HOME CLASSPATH
source /etc/profile
java -version
scp -r /usr/java root@slave1:/usr/java
slave1&&slave2:
export JAVA_ HOME=/usr/java/jdk1.8.0_ 171
# export CLASSPATH=$JAVA_HOME/lib/
export PATH=$PATH:$JAVA HOME/bin
# export PATH JAVA_HOME CLASSPATH
7. 安装zookeeper
master:
mkdir /usr/zookeeper
cd /usr/zookeeper
wget+网址
cd /usr/zookeeper/zookeeper…
mkdir zkdata
mkdir zkdatalog
cd conf
cp zoo_sample.cfg zoo.cfg
vim zoo.cfg:
tickTime=2000
initLimit=10
syncLimit=5
dataDir=/usr/zookeeper/zookeeper-3.4.10/zkdata
clientPort=2181
dataLogDir=/usr/zookeeper/zookeeper-3.4.10/zkdatalog
server.1=master:2888:3888
server.2=slave1:2888:3888
server.3=slave2:2888:3888
:wq
cd /usr/zookeeper/zookeeper-3.4.10/zkdata
vim myid:
1
:wq
scp -r /usr/zookeeper root@slave1:/usr/zookeeper/
scp -r /usr/zookeeper root@slave2:/usr/zookeeper/
slave1:
cd /usr/zookeeper/zookeeper…/zkdata
vim myid:
2
:wq
slave2:
cd /usr/zookeeper/zookeeper…/zkdata
vim myid:
3
:wq
vim /etc/profile (三台):
export ZOOKEEPER_HOME=/usr/zookeeper/zookeeper…
export PATH=$PATH:$ZOOKEEPER_HOME/bin
:wq
source /etc/prodile
启动:(三台)
cd /usr/zookeeper
bin/zkServer.sh start # 启动
bin/zkServer.sh status #查看状态
8. HADOOP安装
master:
mkdir cd wget+网址
vim /etc/profile:(三台)
export HADOOP_HOME=/usr/hadoop/hadoop…
export PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin
export CLASSPATH=$CLASSPATH:$HADOOP_HOME/lib
:wq
source /etc/profile
cd $HADOOP_HOME/etc/hadoop
vim hadoop-env.sh
# 修改java环境变量
export JAVA_HOME=/usr/java/jdk…
:wq
vim core-site.xml
<property>
<name>fs.default.name</name>
<value>hdfs://master:9000</value>
</property>
<property>
<name>hadoop. tmp.dir</name>
<value>/usr/hadoop/hadoop-2.7.3/hdfs/tmp</value>
<description>A base for other temporarydirectories.</description>
</property>
<property>
<name>io.file.buffer.size</name>
<value>131072</value>
</property>
<property>
<name>fs.checkpoint.period</name>
<value>60</value>
</property>
<property>
<name>fs.checkpoint.size</name>
<value>67108864</value>
</property>
:wq
cp mapred-site.xml.template mapred-site.xml
vim mapred-site.xml
<property>
<!--指定Mapreduce运行在yarn上-->
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
:wq
vim yarn-site.xml
<!--指定ResourceManager 的地址-->
<property>
<name>yam.resourcemanager.address</name>
<value>master: 18040</value>
</property>
<property>
<name>yarn.resourcemanager.scheduler.address</name>
<value>master:18030</value>
</property>
<property>
<name>yarn.resourcemanager. webapp.address</name>
<value>master: 18088</value>
</property>
<property>
<name>yarn.resourcemanagerresource-tracker.address</name>
<value>master: 18025</value>
</property>
<property>
<name>yarn.resourcemanager.admin.address</name>
<value>master:18141</value>
</property>
<!--指定reducer获取数据的方式-->
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<property>
<name>yarn.nodemanager.auxservices.mapreduce.shuffle.class</name>
<value>org.apache.hadoop.mapred.ShuffleHandler</value>
</property>
:wq
vim hdfs-site.xml
<property>
<name>dfs.replication</name>
<value>2</value>
</property>
property>
<name>dfs.namenode.name.dir</name>
<value>file:/usr/hadoop/hadoop-2.7.3/hdfs/name</value>
<final>true</final>
</property>
<property>
<name>dfs.datanode.data.dir</name>
<value>file:/usr/hadoop/hadoop-2.7.3/hdfs/data</value>
<final>true</final>
</property>
<property>
<name>dfs.namenode.secondary.http-address</name>
<value>master:9001</value>
</property>
<property>
<name>dfs.webhdfs.enabled</name>
<value>true</value>
</property>
<property>
<name>dfs.permissions</name>
<value>false</value>
</property>
:wq
vim slaves
slave1
salve2
:wq
vim master
master
:wq
scp -r /usr/hadoop root@slave1:/usr/hadoop
scp -r /usr/hadoop root@slave2:/usr/hadoop
namenode: hadoop namenode -format
cd /usr/hadoop/hadoop-2.7.3
sbin/start-all.sh
slave1:jps
slave2:jps
9. 安装HBASE
mkdir cd wget+…
cd /usr/hbase/hbase…/conf
vim hbase-env.sh
export HBASE_MANAGES_ZK=false
export JAVA_HOME=/usr/java/jdk1.8.0_171
export HBASE CLASSPATH=/usr/hadoop/hadoop-2.7.3/etc/hadoop
:wq
cd habse-site.xml
property>
<name>hbase.rootdir</name>
<value>hdfs://master:9000/hbase</value>
</property>
<property>
<name>hbase.cluster.distributed</name>
<value>true</value>
</property>
<property>
<name>hbase.master</name>
<value>hdfs://master:6000</value>
</property>
<property>
<name>hbase.zookeeper. quorum</name>
<value>master,slave1,slave2</value>
</property>
<property>
<name>hbase.zookeeper.property. dataDir</name><value>/usr/zookeeper/zookeeper-3.4.10</value>
</property>
:wq
拷贝hadoop配置:
cp /usr/hadoop/hadoop…/etc/hadoop/hdfs-site.xml /usr/hbase/hbase…/conf
cp /usr/hadoop/hadoop…/etc/hadoop/core-site.xml /usr/hbase/hbase…/conf
分发
配置环境变量:(三台)
vim /etc/profile
export HBASE_HOME=/usr/hbase/hbase…
export PATH=$PATH:$HBASE_HOME/bin
:wq
source /etc/profile
10. 安装HIVE
a. master:mkdir cd wget+… tar
scp -r /usr/hive/apache-hive… root@slave1:/usr/hive
修改环境变量:
vim /etc/profile
export HIVE_HOME=/usr/hive/apache-hive…
export PATH=$PATH_HOME/bin
:wq
source /etc/profile
保留高版本jline jar包
cp /usr/hive/apache-hive…/lib/jline…jar /usr/hadoop/hadoop…/share/hadoop/yarn/lib/
配置相关文件
cd /usr/hive/apache-hive…/conf
vim hive-site.xml
<configuration>
<!-- Hive产生的元数据存放位置-->
<property>
<name>hive.metastore.warehouse.dir</name>
<value>/user/hive_remote/warehouse</value>
</property>
<--使用本地服务连接Hive,默认为true-->
<property>
<name>hive.metastore.local</name>
<value>false</value>
</property>
<!--连接服务器-->
<property>
<name>hive.metastore.uris</name>
<value>thrift://slave1:9083</value>
</property>
</configuration>
:wq
修改hive-env.sh中HADOOP_HOME环境变量:
HADOOP_HOME=/usr/hadoop/hadoop…
等slave1配置完成启动
hive server(slave1)
bin/hive --service metastore
hive client(master)
bin/hive
测试是否启动成功:
hive>show databases
创建数据库:
hive>create database hive_db
b. slave1:
修改环境变量:
vim /etc/profile
export HIVE_HOME=/usr/hive/apache-hive…
export PATH=$PATH_HOME/bin
:wq
source /etc/profile
将Mysql的依赖包放在Hive的lib目录里:
cd /usr/hive/apache-hive…/lib
wget+…
作为服务器配置hive
cd $HIVE_HOME/conf
cp hive-env.sh.template hive-env.sh
vim hive-env.sh
HADOOP_HOME=/usr/hadoop/hadoop…
:wq
vim hive-site.xml
<configuration>
</-- Hive产生的元数据存放位置-->
<property>
<name>hive.metastore.warehouse.dir</name>
<value>/user/hive_remote/warehouse</value>
</property>
<--数据库连接JDBC的URL地址-->
<property>
<name javax.jdo.option.ConnectionURL</name>
<value>jdbc:mysql://slave2:3306/hive?createDatabaseIfNotExist=true</value>
</property>
<--数据库连接driver,即 MySQL驱动-->
<property>
<name>javax.jdo.option.ConnectionDriverName</name>
<value>com.mysql.jdbc.Driver</value>
</property>
<l-- MySQL数据库用户名-->
<property>
<name>javax.jdo.option.ConnectionUserName</name>
<value>root</value>
</property>
<l-- MySQL数据库密码->
<property>
<name>javax.jdo.option.ConnectionPassword</name>
<value>123456</value>
</property>
<property>
<name>hive.metastore.schema.verification</name>
<value>false</value>
</property>
<property>
<name>datanucleus.schema.autoCreateAll</name>
<value>true</value>
</property>
</configuration>
:wq
hive server(slave1)
bin/hive --service metastore
hive client(master)
bin/hive
c. slave2安装MySQL server
yum -y install mysql-community-server
2.启动服务
重载所有修改过的配置文件: systemctl daemon-reload
开启服务: systemctl start mysqld
开机自启: systemctl enable mysqld
3.登陆 MySQL
安装完毕后,MySQL会在/var/og/mysqld.log这个文件中会自动生成一个随机的密码,获取
得这个随机密码,以用于登录MySQL数据库:
获取初密码:grep "temporary password"/var/log/mysqld.log
登陆MySQL:mysql -uroot -p(注意中英文)
4.MySQL密码安全策略设置
设置密码强度为低级:set global validate_password_policy=0;
设置密码长度: set global validate_password_length=4;
修改本地密码: alter user 'root'@"localhost identified by '123456';
退出: /q
linux分布式搭建 java hadoop zookeeper mysql hive
最新推荐文章于 2022-12-27 21:29:36 发布