大数据环境搭建

大数据环境搭建

一、虚拟机配置

1、关闭防火墙
# 把SELINUX那值设为disabled
[root@master ~]# vim /etc/selinux/config
SELINUX=disabled

停止firewall
[root@master ~]# systemctl stop firewalld.service

禁止firewall开机启动
[root@master ~]# systemctl disable firewalld.service 
2、配置节点网络
[root@master ~]# vi /etc/sysconfig/network-scripts/ifcfg-ens33
TYPE=Ethernet
PROXY_METHOD=none
BROWSER_ONLY=no
BOOTPROTO=static
DEFROUTE=yes
IPV4_FAILURE_FATAL=no
IPV6INIT=yes
IPV6_AUTOCONF=yes
IPV6_DEFROUTE=yes
IPV6_FAILURE_FATAL=no
IPV6_ADDR_GEN_MODE=stable-privacy
NAME=ens33
UUID=6eb68369-bc2b-4489-aa2a-98c0de02e445
DEVICE=ens33
ONBOOT=yes
IPADDR=192.168.128.160
GATEWAY=192.168.128.2
NETMASK=255.255.255.0
DNS1=8.8.8.8
3、重启网络
service network restart
4、安装软件
yum install vim zip openssh-server openssh-clients
5、修改每个节点主机名,添加个节点映射
# 在其他两个子节点的hostname处分别填slave1和slave2
[root@master ~]# vim /etc/hostname
master

[root@master ~]# vim /etc/hosts
192.168.128.160 master
192.168.128.161 slave1
192.168.128.162 slave2
#三个节点都要做
6、配置节点间ssh免密登录
#生成公钥与私钥对
[root@master ~]# ssh-keygen -t rsa
# 上面这条命令,敲击三次
(注如果提示not found 可能是ssh没有安装,输入
    yum install openssh-clients安装openssh)
   
# 拷贝本密钥到三个节点上
[root@master ~]# ssh-copy-id -i /root/.ssh/id_rsa.pub master
[root@master ~]# ssh-copy-id -i /root/.ssh/id_rsa.pub slave1
[root@master ~]# ssh-copy-id -i /root/.ssh/id_rsa.pub slave2

# master节点上做完后,再在其他两个节点上重复上述操作
7、配置时间同步服务

​ 1)检查ntp服务是否安装

[root@master ~]# rpm -qa|grep ntp
ntp-4.2.6p5-29.el7.centos.2.x86_64
ntpdate-4.2.6p5-29.el7.centos.2.x86_64

没有安装就yum -y install ntp安装 

​ 2)修改ntp配置文件

server 0.centos.pool.ntp.org iburst
server 1.centos.pool.ntp.org iburst
server 2.centos.pool.ntp.org iburst
server 3.centos.pool.ntp.org iburst为
#server 0.centos.pool.ntp.org iburst
#server 1.centos.pool.ntp.org iburst
#server 2.centos.pool.ntp.org iburst
#server 3.centos.pool.ntp.org iburst

[root@master ~]# vi /etc/ntp.conf

修改文件如下
#restrict 192.168.1.0 mask 255.255.255.0 nomodify notrap为
restrict 192.168.1.0 mask 255.255.255.0 nomodify notrap

server 0.centos.pool.ntp.org iburst
server 1.centos.pool.ntp.org iburst
server 2.centos.pool.ntp.org iburst
server 3.centos.pool.ntp.org iburst为
#server 0.centos.pool.ntp.org iburst
#server 1.centos.pool.ntp.org iburst
#server 2.centos.pool.ntp.org iburst
#server 3.centos.pool.ntp.org iburst

添加
server 127.127.1.0
fudge 127.127.1.0 stratum 10

​ 3)修改/etc/sysconfig/ntpd文件

[root@master ~] vim /etc/sysconfig/ntpd
添加
SYNC_HWCLOCK=yes

​ 4)重启ntpd服务

[root@master ~]# service ntpd status
[root@master ~]# service ntpd start

​ 5)设置ntpd服务开机启动

[root@master ~]# chkconfig ntpd on

​ 6)其他节点配置

#在其他机器配置10分钟与时间服务器同步一次
[root@slave1 ~]# crontab -e

编写定时任务如下:

*/10 * * * * /usr/sbin/ntpdate master
8、安装Java
# 我们所有的环境配置包都放到/usr/local/software下
[root@master ~]# cd /usr/local/src/software/
[root@master software]# tar -zxvf jdk-8u121-linux-x64.gz -C /usr/local/src/
# 配置环境变量,在profile文件最后添加java的环境变量
[root@master software]# vim /root/.bash_profile 
#java
export JAVA_HOME=/usr/local/src/jdk1.8.0_121
export PATH=$PATH:$JAVA_HOME/bin

[root@master software]# source /root/.bash_profile
[root@master software]# java -version
java version "1.8.0_121"
Java(TM) SE Runtime Environment (build 1.8.0_121-b13)
Java HotSpot(TM) 64-Bit Server VM (build 25.121-b13, mixed mode)

# 在其他两个节点上重复上述操作
# 或者也可以发送jdk到其他两台节点上
[root@master src]# scp -r jdk1.8.0_191/ root@slave1:/usr/local/src/
[root@master src]# scp -r jdk1.8.0_191/ root@slave2:/usr/local/src/
9、安装Scala
#解压
[root@master ~]# cd /usr/local/src/software/
[root@master software]# tar -zxvf scala-2.11.11.tgz -C /usr/local/src/

#配置环境变量
export SCALA_HOME=/usr/local/src/scala-2.11.11
export PATH=$PATH:$SCALA_HOME/bin

二、安装Hadoop

1、解压
[root@master software]# tar -zxvf hadoop-2.6.0.tar.gz -C /usr/local/src/
2、配置环境变量
[root@master software]# vim /root/.bash_profile
#hadoop
export HADOOP_HOME=/usr/local/src/hadoop-2.6.0
export PATH=$PATH:$HADOOP_HOME/bin
export PATH=$PATH:$HADOOP_HOME/sbin
[root@master software]# source /root/.bash_profile
3、修改配置文件
1)core-site.xml
<configuration>

<property>
<name>fs.defaultFS</name>
<value>hdfs://master:9000</value>
</property>

<property>
<name>hadoop.tmp.dir</name>
<value>/usr/local/src/hadoop-2.6.0/tmp</value>
</property>

</configuration>
2)hdfs-site.xml
<configuration>

<property>
<name>dfs.replication</name>
<value>3</value>
</property>

<property>
<name>dfs.namenode.secondary.http-address</name>
<value>slave1:50090</value>
</property>

<property>
<name>dfs.data.dir</name>
<value>/usr/local/src/hadoop-2.6.0/tmp/data</value>
</property>

<property>
<name>dfs.name.dir</name>
<value>/usr/local/src/hadoop-2.6.0/tmp/name</value>
</property>

<property>
<name>dfs.webhdfs.enabled</name>
<value>true</value>
</property>

</configuration>
3)yarn-site.xml
<configuration>

<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>

<property>
<name>yarn.resourcemanager.hostname</name>
<value>master</value>
</property>

</configuration>
4)mapred-site.xml
<configuration>

<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>

<property>
<name>mapreduce.jobhistory.address</name>
<value>master:10020</value>
</property>

<property>
<name>mapreduce.jobhistory.webapp.address</name>
<value>master:19888</value>
</property>

</configuration>                        
5)hadoop-env.sh
export JAVA_HOME=/usr/local/src/jdk1.8.0_121
6)yarn-env.sh
export JAVA_HOME=/usr/local/src/jdk1.8.0_121
7)slave
master
slave1
slave2
4、分发文件
[root@master src]# scp -r hadoop-2.6.0/ slave1:/usr/local/src/
[root@master src]# scp -r hadoop-2.6.0/ slave2:/usr/local/src
5、格式化namenode
[root@master hadoop-2.6.0]# bin/hdfs namenode -format

三、安装zookeeper

1、解压
[root@master software]# tar -zxvf zookeeper-3.4.5.tar.gz -C /usr/local/src/
[root@master software]# cd ..
[root@master src]# mv zookeeper-3.4.5/ zookeeper/
2、配置环境变量
root@master software]# vim /root/.bash_profile
#zookpeeper
export ZOOKEEPER_HOME=/usr/local/src/zookeeper
export PATH=$PATH:$ZOOKEEPER_HOME/bin

[root@master software]# source /root/.bash_profile

# 在其他两个节点上重复上述操作
3、修改配置文件
我们在zookpeeper的目录下能看到一个conf的文件夹,进入把zoo_sample.cfg重命名

mv zoo_sample.cfg zoo.cfg
然后进入zoo.cfg



vim zoo.cfg
在文件中修改datadir的路径和添加server的配置
如下:
# The number of milliseconds of each tick
tickTime=2000
# The number of ticks that the initial 
# synchronization phase can take
initLimit=10
# The number of ticks that can pass between 
# sending a request and getting an acknowledgement
syncLimit=5
# the directory where the snapshot is stored.
# do not use /tmp for storage, /tmp here is just 
# example sakes.
# 1修改datadir的路径
dataDir=/usr/local/src/zookeeper-3.4.5/zkData

# the port at which the clients will connect
clientPort=2181
#
# Be sure to read the maintenance section of the 
# administrator guide before turning on autopurge.
#
# http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance
#
# The number of snapshots to retain in dataDir
#autopurge.snapRetainCount=3
# Purge task interval in hours
# Set to "0" to disable auto purge feature
#autopurge.purgeInterval=1
# 2添加
server.1=master:2888:3888
server.2=slave1:2888:3888
server.3=slave2:2888:3888


在zkData目录下创建myid文件,修改值为1,如:
[root@master ~]# cd /usr/local/src/zookeeper/data/
[root@master data]# touch myid
[root@master data]# echo 1 > myid

四、配置Hadoop高可用

1、core-site.xml
<configuration>
<property>
<name>fs.defaultFS</name>
<value>hdfs://myhadoop</value>
</property>

<property>
<name>hadoop.tmp.dir</name>
<value>/usr/local/src/hadoop-2.6.0/tmp</value>
</property>

<property>
<name>ha.zookeeper.quorum</name>
<value>master:2181,slave1:2181,slave2:2181</value>
</property>

</configuration>
2、hdfs-site.xml
<configuration>
<property>
<name>dfs.replication</name>
<value>3</value>
</property>

<property>
<name>dfs.name.dir</name>
<value>/usr/local/src/hadoop-2.6.0/tmp/name</value>
</property>

<property>
<name>dfs.data.dir</name>
<value>/usr/local/src/hadoop-2.6.0/tmp/data</value>
</property>


<property>
<name>dfs.nameservices</name>
<value>myhadoop</value>
</property>

<property>
<name>dfs.ha.namenodes.myhadoop</name>
<value>nn1,nn2</value>
</property>

<property>
<name>dfs.namenode.rpc-address.myhadoop.nn1</name>
<value>master:9000</value>
</property>

<property>
<name>dfs.namenode.rpc-address.myhadoop.nn2</name>
<value>slave1:9000</value>
</property>

<property>
<name>dfs.namenode.http-address.myhadoop.nn1</name>
<value>master:50070</value>
</property>

<property>
<name>dfs.namenode.http-address.myhadoop.nn2</name>
<value>slave1:50070</value>
</property>

<property>
<name>dfs.namenode.shared.edits.dir</name>
<value>qjournal://master:8485;slave1:8485;slave2:8485/myhadoop</value>
</property>

<property>
<name>dfs.journalnode.edits.dir</name>
<value>/usr/local/src/hadoop-2.6.0/tmp/jn</value>
</property>

<property>
<name>dfs.ha.fencing.methods</name>
<value>shell(/bin/true)</value>
</property>

<property>
<name>dfs.ha.fencing.private-key-files</name>
<value>/root/.ssh/id_rsa</value>
</property>

<property>
<name>dfs.permissions.enable</name>
<value>false</value>
</property>

<property>
<name>dfs.client.failover.proxy.provider.myhadoop</name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>

<property>
<name>dfs.ha.automatic-failover.enabled</name>
<value>true</value>
</property>

</configuration>

3、yarn-site.xml
<configuration>

<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>

<!--启用resourcemanager ha-->
<property>
<name>yarn.resourcemanager.ha.enabled</name>
<value>true</value>
</property>

<!--声明两台resourcemanager的地址-->
<property>
<name>yarn.resourcemanager.cluster-id</name>
<value>cluster-yarn1</value>
</property>

<property>
<name>yarn.resourcemanager.ha.rm-ids</name>
<value>rm1,rm2</value>
</property>

<property>
<name>yarn.resourcemanager.hostname.rm1</name>
<value>master</value>
</property>

<property>
<name>yarn.resourcemanager.hostname.rm2</name>
<value>slave1</value>
</property>

<!--指定zookeeper集群的地址-->
<property>
<name>yarn.resourcemanager.zk-address</name>
<value>master:2181,slave1:2181,slave2:2181</value>
</property>

 <!--启用自动恢复-->
<property>
<name>yarn.resourcemanager.recovery.enabled</name>
<value>true</value>
</property>

<!--指定resourcemanager的状态信息存储在zookeeper集群-->
<property>
<name>yarn.resourcemanager.store.class</name>
<value>org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore</value>
</property>

</configuration>
4、mapred-site.xml
<configuration>

<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>

<property>
<name>mapreduce.jobhistory.address</name>
<value>master:10020</value>
</property>

<property>
<name>mapreduce.jobhistory.webapp.address</name>
<value>master:19888</value>
</property>

</configuration>                        
5、hadoop-env.sh
export JAVA_HOME=/usr/local/src/jdk1.8.0_121
6、yarn-env.sh
export JAVA_HOME=/usr/local/src/jdk1.8.0_121
7、slave
master
slave1
slave2
格式化namenode
[root@master hadoop-2.6.0]# bin/hdfs namenode -format
启动namenode
sbin/hadoop-daemon.sh start namenode
启动成功后,在slave1上进行元数据的同步
bin/hdfs namenode -bootstrapStandby
把master上的NameNode关闭,关闭master上的NameNode后,我们还需要初始化zookpeeper的状态
bin/hdfs zkfc -formatZK
启动所有节点
sbin/start-all.sh
启动jobhistoryserver
sbin/mr-jobhistory-daemon.sh start historyserver 
启动slave1resourcemanager
sbin/yarn-daemon.sh start resourcemanager

四、配置Hadoop3.x

1、core-site.xml
<configuration>

<property>
       <name>fs.default.name</name>
       <value>hdfs://master:9000</value>
</property>

<property>
       <name>hadoop.tmp.dir</name>
       <value>/usr/local/src/hadoop-3.1.3/tmp</value>
</property>

</configuration>
2、hdfs-site.xml
<configuration>

<property>
<name>dfs.replication</name>
<value>2</value>
</property>

<property>
<name>dfs.namenode.name.dir</name>
<value>/usr/local/src/hadoop-3.1.3/tmp/name</value>
</property>

<property>
<name>dfs.namenode.data.dir</name>
<value>/usr/local/src/hadoop-3.1.3/tmp/data</value>
</property>
    
<property>
<name>dfs.namenode.secondary.http-address</name>
<value>master:9001</value>
</property>

<property>
<name>dfs.http.address</name>
<value>master:50070</value>
</property>

</configuration>
3、yarn-site.xml
<configuration>

<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>

<property>
<name>yarn.resourcemanager.address</name>
<value>master:8032</value>
</property>

<property>
<name>yarn.resourcemanager.scheduler.address</name>
<value>master:8030</value>
</property>

<property>
<name>yarn.resourcemanager.resource-tracker.address</name>
<value>master:8031</value>
</property>

<property>
<name>yarn.resourcemanager.admin.address</name>
<value>master:8033</value>
</property>

<property>
<name>yarn.resourcemanager.webapp.address</name>
<value>master:8088</value>
</property>

</configuration>
4、mapred-site.xml
<configuration>

<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>

<property>
<name>mapred.job.tracker.http.address</name>
<value>master:50030</value>
</property>

<property>
<name>mapred.task.tracker.http.address</name>
<value>master:50060</value>
</property>

<property>
<name>mapreduce.application.classpath</name>
<value>
/usr/local/src/hadoop-3.1.3/etc/hadoop,
/usr/local/src/hadoop-3.1.3/share/hadoop/common/*,
/usr/local/src/hadoop-3.1.3/share/hadoop/common/lib/*,
/usr/local/src/hadoop-3.1.3/share/hadoop/hdfs/*,
/usr/local/src/hadoop-3.1.3/share/hadoop/hdfs/lib/*,
/usr/local/src/hadoop-3.1.3/share/hadoop/mapreduce/*,
/usr/local/src/hadoop-3.1.3/share/hadoop/mapreduce/lib/*,
/usr/local/src/hadoop-3.1.3/share/hadoop/yarn/*,
/usr/local/src/hadoop-3.1.3/share/hadoop/yarn/lib/*
</value>
</property>

</configuration>
5、Hadoop-env.sh
export HDFS_NAMENODE_USER=root
export HDFS_DATANODE_USER=root
export HDFS_JOURNALNODE_USER=root
export YARN_RESOURCEMANAGER_USER=root
export YARN_NODEMANAGER_USER=root
export HDFS_SECONDARYNAMENODE_USER=root
export JAVA_HOME=/usr/local/src/jdk1.8.0_191
6、workers
slave1
slave2
格式化namenode
[root@master hadoop-3.1.3]# bin/hadoop namenode -format

五、hive安装

1、解压
# 注意:Hive只需要在master节点上安装配置

[root@master ~]# cd /usr/local/src/software/
[root@master software]# tar -zxvf apache-hive-1.2.1-bin.tar.gz -C /usr/local/src/
[root@master software]# cd ..
[root@master src]# mv apache-hive-1.2.1-bin/ hive
2、安装MySQL

hive它有自己的内置数据库derby,但是hive 使用derby 数据库存在不支持多个连接的问题,所以我们一般会使用mysql来代替hive的元数据库

2_1、卸载iunx带的MariaDB
[root@master ~]# rpm -qa | grep mariadb
mariadb-libs-5.5.52-2.el7.x86_64
然后卸载
[root@master ~]# rpm -e --nodeps mariadb-libs-5.5.56-2.el7.x86_64
2_2、安装MySQL
[root@master 5.7.18]#rpm -ivh mysql-community-common-5.7.18-1.el7.x86_64.rpm 

[root@master 5.7.18]# rpm -ivh mysql-community-libs-5.7.18-1.el7.x86_64.rpm
[root@master 5.7.18]# rpm -ivh mysql-community-client-5.7.18-1.el7.x86_64.rp
m
[root@master 5.7.18]# rpm -ivh mysql-community-server-5.7.18-1.el7.x86_64.rp
m

安装之后查看安装情况
[root@master 5.7.18]# rpm -qa | grep mysql
mysql-community-common-5.7.18-1.el7.x86_64
mysql-community-client-5.7.18-1.el7.x86_64
mysql-community-libs-5.7.18-1.el7.x86_64
mysql-community-server-5.7.18-1.el7.x86_64
2_3、获取初始密码

初始密码在/var/log/mysqld.log这个文件里
输入命令:grep ‘temporary password’ /var/log/mysqld.log,可以直接获取密码。(注:密码为冒号后面的所有字符!)

2_4、修改MySQL密码
2_4_1、vim /etc/my.cnf
#skip-grant-tables
2_4_2设置密码安全策略,密码强度为low

set global validate_password_policy=0;

2_4_3设置密码安全策略,密码长度为4

set global validate_password_length=4;

2_4_4、设置MySQL本地root密码为123456

alter user ‘root’@‘localhost’ identified by ‘123456’;

	设置所有客户都可以连接数据库
	update mysql.user set host='%' where user='root';

	create user 'root'@'%' identified by '123456';
	grant all privileges on *.* to 'root'@'%' with grant option;
2_4_5、进入MySQL的客户端然后进行授权

grant all privileges on . to ‘root’@’%’ identified by ‘Gjs200010’ with grant option;

2_4_6、刷新

flush privileges;

3、复制mysql的驱动包到hive的lib目录下。
同时我们还需要去网上下载mysql的驱动包(mysql-connector-java-5.1.26.jar)把这个驱动包放置在hive目录下的lib目录下。
4、hive-site.xml
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>

<configuration>

<property>
<name>javax.jdo.option.ConnectionURL</name>
<value>jdbc:mysql://master:3306/hive?createDatabaseIfNotExist=true&amp;useSSL=false</value>
<description>JDBC connect string for a JDBC metastore</description>
</property>

<property>
<name>javax.jdo.option.ConnectionDriverName</name>
<value>com.mysql.jdbc.Driver</value>
<description>Driver class name for a JDBC metastore</description>
</property>

<property>
<name>javax.jdo.option.ConnectionUserName</name>
<value>root</value>
<description>username to use against metastore database</description>
</property>

<property>
<name>javax.jdo.option.ConnectionPassword</name>
<value>123456(填写mysql的用户密码,这里是root用户的密码)</value>
</property>

<property>
<name>hive.exec.scratchdir</name>
<value>/usr/local/src/set/hive/tmp</value>
</property>

<property>
<name>hive.exec.local.scratchdir</name>
<value>/usr/local/src/set/hive/tmp/local</value>
</property>

<property>
<name>hive.downloaded.resources.dir</name>
<value>/usr/local/src/set/hive/tmp/resources</value>
</property>

</configuration>
5、hive-env.sh
export HADOOP_HOME=/usr/local/src/hadoop-2.6.0
export JAVA_HOME=/usr/local/src/jdk1.8.0_121
6、初始化mysql
[root@master hive]# schematool -initSchema -dbType mysql
7、解决jline-0.9.94.jar包冲突
如果没成功的话.可能是hadoop目录下的**/share/hadoop/yarn/lib/** 中的jline-0.9.94.jar文件太老了,我们需要将hive的lib中的jline-2.12.jar放到hadoop的/share/hadoop/yarn/lib/中,同时将老版本的jline删除

另外注:如果hive的初始化失败,可能是在mysql中没赋权的原因,需要在mysql中配置

mysql > GRANT ALL PRIVILEGES ON *.* TO 'root'@'%'IDENTIFIED BY '123456' WITH GRANT OPTION;

六、安装spark

1、解压
[root@master ~]# cd /usr/local/src/software/
[root@master software]# tar -zxvf spark-2.0.0-bin-hadoop2.6.gz -C /usr/local/src/
2、修改配置文件(搭建HA)
2_1、spark-env.sh
[root@master spark-2.0.0]# cd conf/
root@master conf]# mv spark-env.sh.template spark-env.sh
[root@master conf]# vim spark-env.sh 

export JAVA_HOME=/usr/local/src/jdk1.8.0_191
export SPARK_MASTER_PORT=7077
export HADOOP_HOME=/usr/local/src/hadoop-2.6.0
export HADOOP_CONF_DIR=/usr/local/src/hadoop-2.6.0/etc/hadoop
export SCALA_HOME=/usr/local/src/scala-2.11.11
export SPARK_DAEMON_JAVA_OPTS="-Dspark.deploy.recoveryMode=ZOOKEEPER -Dspark.deploy.zookeeper.url=master:2181,slave1:2181,slave2:2181 Dspark.deploy.zookeeper.dir=/spark"
2_2、slaves
[root@master conf]# mv slaves.template slaves
[root@master conf]# vim slaves

master
slave1
slave2

七、安装hbase

1、解压
[root@master ~]# cd /usr/local/src/software/
[root@master software]# tar -zxvf hbase-1.2.0-bin.tar.gz -C /usr/local/src/
2、修改配置文件
2_1、hbase-env.sh
export JAVA_HOME=/usr/local/src/jdk1.8.0_191
export HBASE_LOG_DIR=${HBASE_HOME}/logs
export HBASE_MANAGES_ZK=false
export HBASE_PID_DIR=/usr/local/src/hbase-1.2.0/pid
2_2、 hbase-site.xml
<configuration>
<property>
    <name>hbase.rootdir</name>
    <value>hdfs://master:9000/hbase</value>
  </property>
    
<property> 
    <name>hbase.cluster.distributed</name> 
    <value>true</value>
  </property>
  
  <property>
    <name>hbase.zookeeper.quorum</name>
    <value>master,slave1,slave2</value>
  </property>
  
  <property>
    <name>hbase.zookeeper.property.dataDir</name>
    <value>/usr/local/src/zookeeper/data</value>
  </property>
  
  <property>
    <name>hbase.tmp.dir</name>
    <value>/usr/local/src/hbase-1.2.0/data</value>
  </property>
  
  <property>
    <name>hbase.master</name>
    <value>hdfs://master:60000</value>
  </property>
  
  <property>
    <name>hbase.master.info.port</name>
    <value>16010</value>
  </property>
  
  <property>
    <name>hbase.regionserver.info.port</name>
    <value>16030</value>
  </property>
</configuration>
2_3 、regionservers
master
slave1
slave2

八、安装sqoop

1、解压
# 注意:Sqoop只需要在master节点上安装配置

[root@master ~]# cd /usr/local/src/software/
[root@master software]# tar -zxvf sqoop-1.4.6.bin__hadoop-2.0.4-alpha.tar.gz -C /usr/local/src/
[root@master src]# mv sqoop-1.4.6.bin__hadoop-2.0.4-alpha/ sqoop-1.4.6
2、修改配置文件,sqoop-env.sh
[root@master sqoop-1.4.6]# cd conf/
[root@master conf]# mv sqoop-env-template.sh sqoop-env.sh 

[root@master conf]# vim sqoop-env.sh 

#Set path to where bin/hadoop is available
export HADOOP_COMMON_HOME=/usr/local/src/hadoop-2.6.0

#Set path to where hadoop-*-core.jar is available
export HADOOP_MAPRED_HOME=/usr/local/src/hadoop-2.6.0

#set the path to where bin/hbase is available
export HBASE_HOME=/usr/local/src/hbase-1.2.0

#Set the path to where bin/hive is available
export HIVE_HOME=/usr/local/src/hive

#Set the path for where zookeper config dir is
export ZOOCFGDIR=/usr/local/src/zookeeper/conf
3、复制mysql的驱动包到sqoop的lib目录下

在修改完配置文件后,把mysql的jar包上传的sqoop的lib目录下

也就是在安装hive时使用的jar包

九、安装storm

1、解压
[root@master src]# cd software/
[root@master software]# tar -zxvf apache-storm-1.0.4.tar.gz -C /usr/local/src/
[root@master software]# cd ..
[root@master src]# mv apache-storm-1.0.4/ storm-1.0.4/
2、修改配置文件,storm.yaml
[root@master storm-1.0.4]# cd conf/
[root@master conf]# vim storm.yaml 

storm.zookeeper.servers:
     - "master"
     - "slave1"
     - "slave2"
storm.local.dir: "/usr/local/src/storm-1.0.4/localdir"
storm.zookeeper.port: 2181
nimbus.seeds: ["master"]
ui.host: 0.0.0.0
ui.port: 8080
supervisor.slots.ports:
      - 6700
      - 6701
      - 6702

storm.zookeeper.servers这个参数中写的master,slave1,slave2和zookpeeper配置文件中写的必须一致

storm.local.dir: 这个参数是storm使用的本地文件系统目录(必须存在并且storm进程可读写)

这个可以自己配置,自己选择的文件目录

提一下,细心的读者可能这里发现了ui.port端口是8080端口,而在上文中的spark的网页端端口也是8088,这里我们可以修改spark的网页端口,可以参考这篇文章https://www.iteye.com/blog/daizj-2227382

我在这里把之前的spark端口改为8081

3、创建本地文件系统目录
[root@master storm-1.0.4]# mkdir localdir

4、

5、启动

#master上启动nimbus和ui进程
[root@master storm-1.0.4]# storm nimbus &
[1] 8089
[root@master storm-1.0.4]#  storm ui &
[2] 8126
[root@master storm-1.0.4]# 
#在slave1和slave2下启动supervisor进程

[root@slave1 storm-1.0.4]#  storm supervisor &
[1] 4865


[root@slave2 storm-1.0.4]# storm supervisor &
[1] 3555

十、安装kafka

1、解压
[root@master ~]# cd /usr/local/src/software/
[root@master software]# tar -zxvf kafka_2.11-1.0.0.tgz  -C /usr/local/src/
2、修改配置文件
[root@master kafka_2.11]# cd config/
[root@master config]# vim server.properties 

broker.id=0
listeners=PLAINTEXT://192.168.128.100:9092
advertised.listeners=PLAINTEXT://192.168.128.100:9092
zookeeper.connect=192.168.128.100:2181,192.168.128.101:2181,192.168.128.102:2181



# 而在另外两个节点上,对server.properties要有几处修改
# broker.id 分别修改成: 1 和 2
# listeners 在ip那里分别修改成子节点对应的,即 PLAINTEXT://192.168.128.101:9092 和 PLAINTEXT://192.168.128.102:9092
# advertised.listeners 也在ip那里分别修改成子节点对应的,即 PLAINTEXT://192.168.128.101:9092 和 PLAINTEXT://192.168.128.102:9092
# zookeeper.connect 不需要修改

十一、安装flume

1、解压
[root@master software]# tar -zxvf apache-flume-1.6.0-bin.tar.gz -C /usr/local/src/
[root@master src]# mv apache-flume-1.6.0-bin flume-1.6.0
2、修改配置文件flume-env.sh
root@master flume-1.6.0]# cd conf/
[root@master conf]# mv flume-env.sh.template flume-env.sh
[root@master conf]# vim flume-env.sh

export JAVA_HOME=/usr/local/src/jdk1.8.0_191
  • 0
    点赞
  • 6
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值