Hadoop+spark+scala+zookeeper+hbase

修改主机文件

vim /etc/sysconfig/network
HOSTNAME=master

复制master到分机上修改为分机名称

scp -r /etc/sysconfig/network root@slave1:/etc/sysconfig/network

修改分机名

hostname set-hostname slave1
reboot -f (重启显示)

添加局域名 master(主机)slave1(分机)

vim /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.136.133 master
192.168.136.134 slave1

JDK

下载JDK

下载1.8-JDK

删除自带的open版本的JDK 需要安装 oracle版本的

使用 rpm查出open版本

[root@master hadoop]# rpm -qa |grep java
java-1.8.0-openjdk-headless-1.8.0.242.b08-1.el7.x86_64
javapackages-tools-3.4.1-11.el7.noarch
java-1.8.0-openjdk-1.8.0.242.b08-1.el7.x86_64
python-javapackages-3.4.1-11.el7.noarch
tzdata-java-2019c-1.el7.noarch

使用 rpm -e --nodeps 包名

rpm -e --nodeps java-1.8.0-openjdk-headless-1.8.0.242.b08-1.el7.x86_64
rpm -e --nodeps java-1.8.0-openjdk-1.8.0.242.b08-1.el7.x86_64

卸载open JDK后

[root@master hadoop]# rpm -qa |grep java
javapackages-tools-3.4.1-11.el7.noarch
python-javapackages-3.4.1-11.el7.noarch
tzdata-java-2019c-1.el7.noarch

安装JDK(Oracle)

Xftp 传输到linux中

mkdir /opt/jdk
cd /opt/jdk

--传输安装包

tar -zxvf jdk-8u261-linux-x64.tar.gz
mv jdk1.8.0_261 jdk1.8

配置环境变量

vim /etc/profile
--拉倒最下添加
export JAVA_HOME=/opt/jdk/jdk1.8
export CLASS_PATH=${JAVA_HOME}/lib/tools.jar:${JAVA_HOME}/lib/dt.jar
export PATH=${PATH}:${JAVA_HOME}/bin

刷新配置

source /etc/profile

测试 java -version 查看版本

[root@master jdk]# java -version
java version "1.8.0_261"
Java(TM) SE Runtime Environment (build 1.8.0_261-b12)
Java HotSpot(TM) 64-Bit Server VM (build 25.261-b12, mixed mode)

配置开终端自动刷新

vim /etc/bashrc
--拉倒最下添加
source /etc/profile

Hapdoop

下载Hadoop

下载2.10.1-Hadoop

安装Hadoop

mkdir /opt/hadoop
cd /opt/hadoop

--传输安装包

tar -zxvf hadoop-2.10.1.tar.gz

修改hadoop安装名称 ,配置环境变量

 mv hadoop-2.10.1 hadoop2.10

配置环境变量

vim /etc/profile
--拉倒最下添加
export HADOOP_HOME=/opt/hadoop/hadoop2.10
export PATH=${PATH}:${JAVA_HOME}/bin:${HADOOP_HOME}/bin:${HADOOP_HOME}/sbin

更新配置

source /etc/profile

测试 hadoop version

[root@master dbt]# hadoop version
Hadoop 2.10.1
Subversion https://github.com/apache/hadoop -r 1827467c9a56f133025f28557bfc2c562d78e816
Compiled by centos on 2020-09-14T13:17Z
Compiled with protoc 2.5.0
From source with checksum 3114edef868f1f3824e7d0f68be03650
This command was run using /opt/hadoop/hadoop2.10/share/hadoop/common/hadoop-common-2.10.1.jar

修改 hadoop配置文件

source /etc/profile
cd ${HADOOP_HOME}/etc/hadoop

core-site.xml

vim ${HADOOP_HOME}/etc/hadoop/core-site.xml
<configuration>
	<property>
			<name>hadoop.temp.dir</name>
			<value>file:/root/hadoop/tmp</value>
			<description>hdfs 临时存放tmp</description>
	</property>
	<property>
		<name>fs.defaultFS</name>
		<value>hdfs://master:9000</value>
	</property>
	<property>
			<name>io.file.buffer.size</name>
			<value>131072</value>
	</property>
</configuration>

hadoop-env.sh(修改JDK存放位置,hadoop读取不到JDK环境变量)

vim ${HADOOP_HOME}/etc/hadoop/hadoop-env.sh
export JAVA_HOME=/opt/jdk/jdk1.8

hdfs-site.xml

vim ${HADOOP_HOME}/etc/hadoop/hdfs-site.xml
<configuration>
	<property>
		<name>dfs.replication</name>
		<value>2</value>
	</property>
	<!--NameNode存放目录-->
	<property>
		<name>dfs.namenode.name.dir</name>
		<value>file:/root/hadoop/name</value>
	</property>
	<!--DataNode存放目录-->
	<property>
			<name>dfs.datanode.data.dir</name>
			<value>file:/root/hadoop/data</value>
	</property>
	<!--<property>
			<name>dfs.namenode.secondary.http-address</name>
			<value>master:9001</value>
	</property>
	<property>
			<name>dfs.webhdfs.enabled</name>
			<value>true</value>
	</property>
	<property>
			<name>dfs.permissions.enabled</name>
			<value>false</value>
	</property>-->
	<!--hdfs最大文件数-->
	<!--<property>
	  <name>dfs.datanode.max.xcievers</name>
	  <value>4096</value>
	</property>-->

</configuration>

mapred-site.xml

--如果没有mapred-site.xml 文件 用template后缀复制出
mv ${HADOOP_HOME}/etc/hadoop/mapred-site.xml.template ${HADOOP_HOME}/etc/hadoop/mapred-site.xml
vim ${HADOOP_HOME}/etc/hadoop/mapred-site.xml
<configuration>
	<property>
		<name>mapreduce.framework.name</name>
		<value>yarn</value>
	</property>
	<!--配置实际的主机名和端口-->
	<!--
	<property>
		<name>mapreduce.jobhistory.address</name>
		
		<value>master:10020</value>
	</property>
	<property>
		<name>mapreduce.jobhistory.webapp.address</name>
		<value>master:19888</value>
	</property>-->
</configuration>

slave(填写分机每行一个)

vim ${HADOOP_HOME}/etc/hadoop/slave
slave1

yarn-site.xml

vim ${HADOOP_HOME}/etc/hadoop/yarn-site.xml
<configuration>

<!-- Site specific YARN configuration properties -->
	<property>
        <name>yarn.resourcemanager.hostname</name>
        <value>master</value>
   </property>
   <property>
        <name>yarn.resourcemanager.address</name>
        <value>${yarn.resourcemanager.hostname}:8032</value>
   </property>
   <property>
        <description>The address of the scheduler interface.</description>
        <name>yarn.resourcemanager.scheduler.address</name>
        <value>${yarn.resourcemanager.hostname}:8030</value>
   </property>
   <property>
        <description>The http address of the RM web application.</description>
        <name>yarn.resourcemanager.webapp.address</name>
        <value>${yarn.resourcemanager.hostname}:8088</value>
   </property>
   <property>
        <description>The https adddress of the RM web application.</description>
        <name>yarn.resourcemanager.webapp.https.address</name>
        <value>${yarn.resourcemanager.hostname}:8090</value>
   </property>
   <property>
        <name>yarn.resourcemanager.resource-tracker.address</name>
        <value>${yarn.resourcemanager.hostname}:8031</value>
   </property>
   <property>
        <description>The address of the RM admin interface.</description>
        <name>yarn.resourcemanager.admin.address</name>
        <value>${yarn.resourcemanager.hostname}:8033</value>
   </property>
   <property>
        <name>yarn.nodemanager.aux-services</name>
        <value>mapreduce_shuffle</value>
   </property>
   <property>
        <name>yarn.scheduler.maximum-allocation-mb</name>
        <value>8182</value>
        <discription>每个节点可用内存,单位MB,默认8182MB</discription>
   </property>
   <property>
        <name>yarn.nodemanager.vmem-pmem-ratio</name>
        <value>2.1</value>
   </property>
   <property>
        <name>yarn.nodemanager.resource.memory-mb</name>
        <value>2048</value>
	</property>
   <property>
        <name>yarn.nodemanager.vmem-check-enabled</name>
        <value>false</value>
	</property>

</configuration>

修改下start-all.sh,stop-all.sh 和spark冲突

mv ${HADOOP_HOME}/sbin/start-all.sh ${HADOOP_HOME}/sbin/hadoop-start-all.sh 
mv ${HADOOP_HOME}/sbin/stop-all.sh ${HADOOP_HOME}/sbin/hadoop-stop-all.sh 

复制文件到分机上

scp -r /opt/hadoop root@slave1:/opt

master上初始化hadoop hdfs

hdfs namenode -format

master上启动hadoop

hadoop-start-all.sh
--出现选择 都选 yes

测试是否开启成功(使用 ip 或者域名)

ip+:50070 打开hdfs页面
ip+:8088 打开hadoop页面

Scala

Scala下载

下载2.11.12-Scala

Scala安装

mkdir /opt/scala
cd /opt/scala

--传输安装包

tar -zxvf scala-2.11.8.tgz

修改Scala安装名称 ,配置环境变量

 mv scala-2.11.8 scala2.11

配置环境变量

vim /etc/profile
--拉倒最下添加
export SCALA_HOME=/opt/scala/scala2.11
export PATH=${PATH}:${JAVA_HOME}/bin:${HADOOP_HOME}/bin:${HADOOP_HOME}/sbin:${SCALA_HOME}/bin

更新配置

source /etc/profile

测试

[root@master bin]# scala
Welcome to Scala 2.11.8 (Java HotSpot(TM) 64-Bit Server VM, Java 1.8.0_261).
Type in expressions for evaluation. Or try :help.

复制文件到分机上

scp -r /opt/scala root@slave1:/opt

Spark

Spark下载

下载2.4.7-Spark

Spark安装

mkdir /opt/spark
cd /opt/spark

--传输安装包

tar -zxvf spark-2.4.7-bin-hadoop2.7.tgz

修改Spark安装名称 ,配置环境变量

 mv spark-2.4.7-bin-hadoop2.7 spark2.4

配置环境变量

vim /etc/profile
--拉倒最下添加
export SPARK_HOME=/opt/spark/spark2.4
export PATH=${PATH}:${JAVA_HOME}/bin:${HADOOP_HOME}/bin:${HADOOP_HOME}/sbin:${SCALA_HOME}/bin:${SPARK_HOME}/bin:${SPARK_HOME}/sbin

更新配置

source /etc/profile

测试

spark-shell

修改配置文件

spark-env.sh

--如果没有spark-env.sh文件,复制出来
mv ${SPARK_HOME}/conf/spark-env.sh.template ${SPARK_HOME}/conf/spark-env.sh

vim ${SPARK_HOME}/conf/spark-env.sh
--拉倒最下添加
export JAVA_HOME=/opt/jdk/jdk1.8
export SCALA_HOME=/opt/scala/scala2.11
export SPARK_HOME=/opt/spark/spark2.4
export HADOOP_HOME=/opt/hadoop/hadoop2.10
export HADOOP_CONF=${HADOOP_HOME}/etc/hadoop
export SPARK_MASTER_IP=master
export SPARK_EXECUTOR_MEMORY=4G

slaves

--如果没有spark-env.sh文件,复制出来
mv ${SPARK_HOME}/conf/slaves.template ${SPARK_HOME}/conf/slaves

vim ${SPARK_HOME}/conf/slaves
把localhost删除添加slave1

修改spark-start-all.sh,stop-all.sh

mv ${SPARK_HOME}/sbin/start-all.sh ${SPARK_HOME}/sbin/spark-start-all.sh 
mv ${SPARK_HOME}/sbin/stop-all.sh ${SPARK_HOME}/sbin/spark-stop-all.sh 

复制文件到分机上

scp -r /opt/spark root@slave1:/opt

启动spark

spark-start-all.sh

测试

--主机master
[root@master sbin]# jps
56643 SecondaryNameNode
56803 ResourceManager
62505 Master
56428 NameNode

--分机slave1
62685 Jps[root@slave1 opt]# jps
62562 Worker
62645 Jps
59080 NodeManager
58955 DataNode

主机存在Master,分机存在Worker

ip+8080 可以打开spark页面

Zookeeper

下载Zookeeper

下载3.5.8-Zookeeper

安装Zookeeper

mkdir /opt/zookeeper
cd /opt/zookeeper

--传输安装包

tar -zxvf apache-zookeeper-3.5.8-bin.tar.gz

修改Zookeeper安装名称 ,配置环境变量

 mv apache-zookeeper-3.5.8-bin zookeeper3.5

配置环境变量

vim /etc/profile
--拉倒最下添加
export ZK_HOME=/opt/zookeeper/zookeeper3.5
export PATH=${PATH}:${JAVA_HOME}/bin:${HADOOP_HOME}/bin:${HADOOP_HOME}/sbin:${SCALA_HOME}/bin:${SPARK_HOME}/bin:${SPARK_HOME}/sbin:${ZK_HOME}/bin

更新配置

source /etc/profile

创建data和dataLog

mkdir ${ZK_HOME}/../data
mkdir ${ZK_HOME}/../dataLog

创建关联文件

touch ${ZK_HOME}/../data/myid
vim ${ZK_HOME}/../data/myid
填写内容
1
--master机
[root@master conf]# cat ${ZK_HOME}/../data/myid
1
--slave1机
[root@slave1 opt]# cat ${ZK_HOME}/../data/myid
2

master值为1
slave1值为2

修改配置文件 zoo.cfg

--如果zoo.cfg不存在 复制zoo.cfg.template
mv ${ZK_HOME}/conf/zoo_sample.cfg ${ZK_HOME}/conf/zoo.cfg

vim ${ZK_HOME}/conf/zoo.cfg
--添加
dataDir=/opt/zookeeper/data
dataLogDir=/opt/zookeeper/dataLog
server.1=master:2888:3888
server.2=slave1:2888:3888

server 后的1关联data下myid中的值

复制文件到分机上

scp -r /opt/zookeeper root@slave1:/opt

启动

--master机和slave1机执行
zkServer.sh start

--slave1提示没有该命令的话
master机复制环境变量文件到slave1上
scp -r /etc/profile root@slave1:/etc/profile
slave1刷新配置
source /etc/profile

测试

master显示 follower
slave1显示 leader

zkServer.sh status
--master机
[root@master conf]# zkServer.sh status
ZooKeeper JMX enabled by default
Using config: /opt/zookeeper/zookeeper3.5/bin/../conf/zoo.cfg
Client port found: 2181. Client address: localhost.
Mode: follower

--slave1机
[root@slave1 opt]# zkServer.sh status
ZooKeeper JMX enabled by default
Using config: /opt/zookeeper/zookeeper3.5/bin/../conf/zoo.cfg
Client port found: 2181. Client address: localhost.
Mode: leader

Hbase

下载Hbase

下载2.2-Hbase

安装Hbase

mkdir /opt/hbase
cd /opt/hbase

--传输安装包

tar -zxvf hbase-2.2.6-bin.tar.gz

修改Hbase安装名称 ,配置环境变量

 mv hbase-2.2.6 hbase2.2

配置环境变量

vim /etc/profile
--拉倒最下添加
export HBASE_HOME=/opt/hbase/hbase2.2
export PATH=${PATH}:${JAVA_HOME}/bin:${HADOOP_HOME}/bin:${HADOOP_HOME}/sbin:${SCALA_HOME}/bin:${SPARK_HOME}/bin:${SPARK_HOME}/sbin:${ZK_HOME}/bin:${HBASE_HOME}/bin

更新配置

source /etc/profile

修改配置文件

hbase-env.sh

vim ${HBASE_HOME}/conf/hbase-env.sh
--添加
export JAVA_HOME=/opt/jdk/jdk1.8
export HADOOP_HOME=/opt/hadoop/hadoop2.10
export HBASE_HOME=/opt/hbase/hbase2.2
export HBASE_CLASSPATH=/opt/hadoop/hadoop2.10/etc/hadoop
export HBASE_PID_DIR=/root/hbase/pids
export HBASE_MANAGES_ZK=false

HBASE_MANAGES_ZK=false 表示不使用hbase自带的zookeeper

hbase-site.xml

vim ${HBASE_HOME}/conf/hbase-site.xml
<property>
	<name>hbase.rootdir</name>
	<value>hdfs://master:9000/hbase</value>
	<description>The directory shared byregion servers.</description>
  </property>
 <!-- hbase端口 -->
  <property>
	<name>hbase.zookeeper.property.clientPort</name>
	<value>2181</value>
  </property>
<!-- 超时时间 -->
  <property>
	<name>zookeeper.session.timeout</name>
	<value>120000</value>
  </property>
<!--防止服务器时间不同步出错 -->
  <property>
	<name>hbase.master.maxclockskew</name>
	<value>150000</value>
  </property>
<!-- 集群主机配置 -->
  <property>
	<name>hbase.zookeeper.quorum</name>
	<value>master,slave1</value>
  </property>
<!--   路径存放 -->
  <property>
	<name>hbase.tmp.dir</name>
	<value>/root/hbase/tmp</value>
  </property>
<!-- true表示分布式 -->
  <property>
	<name>hbase.cluster.distributed</name>
	<value>true</value>
  </property>
  <!-- 指定master -->
  <property>
    <name>hbase.master</name>
    <value>master:60000</value>
  </property>
  <!-- 解决 HMaster 自动消失 -->
  <property>
	<name>hbase.unsafe.stream.capability.enforce</name>
	<value>false</value>
  </property>

regionservers

vim ${HBASE_HOME}/conf/regionservers
--删除localhost
slave1

复制文件到分机上

scp -r /opt/hbase root@slave1:/opt

启动(master)

start-hbase.sh

测试

master中会有HMaster slave1中会有HRegionServer

--master
[root@master conf]# jps
56643 SecondaryNameNode
56803 ResourceManager
64279 QuorumPeerMain
62505 Master
65531 Jps
56428 NameNode
65421 HMaster

--slave1
[root@slave1 opt]# jps
62562 Worker
64726 Jps
59080 NodeManager
58955 DataNode
64460 HRegionServer
63677 QuorumPeerMain

ip+16010 打开hbase页面

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值