大数据平台搭建

所有节点关闭防火墙

systemctl stop firewalld
systemctl disable firewalld

分发ssh密钥

ssh-keygen -f 指定文件名 -t 指定加密格式(ras) -P ""
ssh-copy-ip -i 指定密钥文件  root@loaclhost

安装jdk

#先删除openjdk

rpm -aq | grep openjdk
rpm -e openjdk

#解压并配置环境变量
tar -xvf 压缩包 -C 指定解压到目录
tar -xzf /h3cu/jdk-8u144-linux-x64.tar.gz -C /usr/local/src/

#更改解压出来的文件名

mv /usr/local/src/jdk1.8.0_144 /usr/local/src/java

#修改环境变量
用户变量
vi /root/.bash_profile
export JAVA_HOME=/usr/local/src/java
export PATH=$PATH:$JAVA_HOME/bin
全局变量
vi /etc/profile
export JAVA_HOME=/usr/local/src/java
export PATH=$PATH:$JAVA_HOME/bin

安装hadoop

#解压文件
tar -xzf /h3cu/hadoop-2.7.1.tar.gz -C /usr/local/src/
#重命名解压文件名
mv /usr/local/src/hadoop-2.7.1 /usr/local/src/hadoop

#配置Hadoop环境变量
vi /root/.bash_profile 
export HADOOP_HOME=/usr/local/src/hadoop
export PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin

#配置hadoop-env.sh文件
vi /usr/local/src/hadoop/etc/hadoop/hadoop-env.sh 
export JAVA_HOME=/usr/local/src/java

#配置core-site
vi /usr/local/src/hadoop/etc/hadoop/core-site.xml
<property>
<name>fs.defaultFS</name>
<value>hdfs://master:9000</vlaue>
</property>
<property>
<!--hadoop临时文件路径-->
<name>hadoop.tmp.dir</name>
<value>/usr/local/src/hadoop/dfs/tmp</value>
</property>
mkdir /usr/local/src/hadoop/dfs/tmp 

#配置hdfs-site.xml
vi /usr/local/src/hadoop/etc/hadoop/hdfs.site.xml
<property>
<name>dfs.replication</name>
<value>3</value>
</property>
<property>
<name>dfs.namenode.name.dir</name>
value>/usr/local/src/hadoop/dfs/name</value>
</property>
<property>
<name>dfs.datanode.data.dir</name>
value>/usr/local/src/hadoop/dfs/data</value>
</property>

#配置mapred-site.xml
cp /usr/local/src/hadoop/etc/hadoop/mapred-site.xml.template /usr/local/src/hadoop/etc/hadoop/mapred-site.xml
vi /usr/local/src/hadoop/etc/hadoop/mapred-site.xml
<property>
<name>mapreduce.framework.name</name>
value>yarn</value>
</property>

#配置yarn-site.xml
vi /usr/local/src/hadoop/etc/hadoop/yarn-site.xml
<property>
<!--逗号分隔的服务列表(必选)-->
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<property>
<!--是否对容器强制执行物理内存限制。默认是true-->
<name>yarn.nodemanager.pmem-check-enabled</name>
<value>false</value>
</property>
<property>
<!--是否对容器实施虚拟内存限制。默认是true-->
<name>yarn.nodemanager.vmem-check-enabled</name>
<value>false</value>
</property>

#配置slaves
vi /usr/local/src/hadoop/etc/hadoop/slaves
master
slave1
slave2

#将hadoop文件和环境变量文件分发给slave1和slave2

[root@master ~]# scp -r /usr/local/src/hadoop slave1:/usr/local/src/
[root@slave2 ~]# scp -r /usr/local/src/hadoop slave2:/usr/local/src/
[root@slave1 ~]# scp /root/.bash_profile slave1:/root
[root@slave1 ~]# scp /root/.bash_profile slave2:/root

#namenode进行格式化
hdfs namenode -format

#启动hadoop集群,查看守护进程
start-all.sh

部署hive组件

#配置hadoop环境变量,仅当前用户生效
vi /root/.bash_profile 
export HIVE_HOME=/usr/local/src/hive
export PATH=$PATH:$HIVE_HOME/bin

#解压mysql-connect驱动包
tar -xzf /h3cu/mysql-connector-java-5.1.27.tar.gz -C /usr/local/src/
cp /usr/local/src/mysql-connector-java-5.1.27/mysql-connector-java-5.1.27-bin.jar /usr/local/src/hive/lib

#配置hive-site.xml
vi /usr/local/src/hive/conf/hive-site.xml
<property>
  <!--连接数据库URL(必选参数)-->
  <name>javax.jdo.option.ConnectionURL</name>
  <value>jdbc:mysql://master:3306/hive?createDatabaseIfNotExist=true&amp;
useSSL=false</value>
</property>
<property>
  <!--连接数据驱动(必选参数)-->
  <name>javax.jdo.option.ConnectionDriverName</name>
  <value>com.mysql.jdbc.Driver</value>
</property>
<property>
  <!--数据库连接用户名(必选参数)-->
  <name>javax.jdo.option.ConnectionUserName</name>
  <value>root</value>
</property>
<property>
  <!--数据库连接密码(必选参数)-->
  <name>javax.jdo.option.ConnectionPassword</name>
  <value>password</value>
</property>

#hive初始化
schematool -dbType mysql -initSchema

配置kafka

单机版

export KAFKA_HOME=/usr/local/src/kafka
export PATH=$PATH:$KAFKA_HOME/bin

broker.id=0
 //初始是0,每个 server 的broker.id 都应该设置为不一样的,
就和 myid 一样 我的三个服务分别设置的是 1,2,3
host.name=master

log.dirs=/usr/local/kafka/kafka_2.12-2.3.0/log

#设置zookeeper的连接端口
zookeeper.connect=master:2181

zookeeper.properity
dataDir=/usr/local/src/kafka/zookeeper
# 启动后台进程
zookeeper-server-start.sh -daemon /usr/local/src/kafka/config/zookeeper.properties
kafka-server-start.sh -daemon /usr/local/src/kafka/config/server.properties

--------------------------------------------------------------------------------------
zookeeper
vi /root/.bash_profile

export ZOOKEEPER_HOME=/usr/local/src/zookeeper
export PATH=$PATH:$ZOOKEEPER_HOME/bin

1.4 配置zoo.cfg
[root@master ~]#

cp /usr/local/src/zookeeper/conf/zoo_sample.cfg /usr/local/src/zookeeper/conf/zoo.cfg
vi /usr/local/src/zookeeper/conf/zoo.cfg

配置内容:

dataDir=/usr/local/src/zookeeper/data
dataLogDir=/usr/local/src/zookeeper/logs
server.1=master:2888:3888
server.2=slave1:2888:3888
server.3=slave2:2888:3888

1.5 配置myid文件
[root@master ~]#
mkdir /usr/local/src/zookeeper/data
echo "1" > /usr/local/src/zookeeper/data/myid
source /root/.bash_profile

部署sqoop组件

vi /root/.bash_profile 
export SQOOP_HOME=/usr/local/src/sqoop
export PATH=$PATH:$SQOOP_HOME/bin

cp /usr/local/src/mysql-connector-java-5.1.27/mysql-connector-java-5.1.27-bin.jar /usr/local/src/sqoop/lib/

#配置sqoop-env.sh
export HADOOP_COMMON_HOME=/usr/local/src/hadoop
export HADOOP_MAPRED_HOME=/usr/local/src/hadoop
export HIVE_HOME=/usr/local/src/hive
#测试
sqoop list-databases --connect jdbc:mysql://master:3306 --username root --password password

配置spark

export SPARK_HOME=/usr/local/src/spark
export PATH=$PATH:$SPARK_HOME/sbin:$SPARK_HOME/bin

vi spark-env.sh
export JAVA_HOME=/usr/local/src/java
export SPARK_MASTER_IP=master
export YARN_CONF_DIR=/usr/local/src/hadoop/etc/hadoop
export HADOOP_CONF_DIR=/usr/local/src/hadoop/etc/hadoop

vi /usr/local/src/spark/conf/slaves
master
slave1
slave2

#启动spark
start-all.sh

bin/spark-submit \
--class org.apache.spark.examples.SparkPi
--master spark://master:7077 \
/usr/local/src/examples/jars/spark-examples_2.11-2.1.1.jar 10

#on yarn: 
start-yarn.sh

/bin/spark-submit \
--class org.apache.spark.examples.SparkPi \
--master yarn \
--deploy-mode cluster \
/usr/local/src/examples/jars/spark-examples_2.11-2.1.1.jar 10

配置fink

vi /root/.bash_profile
export FLINK_HOME=/usr/local/src/flink
export PATH=$PATH:$FLINK_HOME/bin

cp /chinaskills/flink-shaded-hadoop-2-uber-2.7.5-10.0.jar /usr/local/src/flink/lib/

vi /usr/local/src/flink/conf/flink-conf.yaml
jobmanager.rpc.address: master

vi /usr/local/src/fink/conf/masters
<主机名>:8081

vi /usr/local/src/fink/conf/slaves
master
slave1
slave2

export HADOOP_CONF_DIR='/usr/hdp/2.6.2.25-1/hadoop/conf'
export HADOOP_CLASSPATH=`(/opt/hadoop/bin/hadoop classpath)`

start-cluster.sh

flink run -c wordcount.StreamWordCount -p 2
/root/target/FlinkProject-1.0-SNAPSHOT-jar-with-dependencies.jar
--host 192.168.222.201 -port 7777

flink run -m yarn-cluster -e yarn-per-job
/opt/flink/examples/batch/WordCount.jar

flume

# agent 名字唯一的 可以有多个agent
a2.sources = r1
a2.sinks = s1
a2.channels = c1

#source
a2.sources.r1.type = exec
a2.sources.r1.command = tail -F /opt/hadoop/logs/hadoop-root-secondarynamenode-master.log

#sinks
a2.sinks.s1.type = hdfs
a2.sinks.s1.hdfs.path =  hdfs://master:9000/tmp/flumes

#channels
a2.channels.c1.type = memory

#绑定
a2.sources.r1.channels = c1
a2.sinks.s1.channel = c1

______________________________________
a1.sources = r1
a1.sinks = k1
a1.channels = c1

# Describe/configure the source
a1.sources.r1.type = netcat
# 配置主机名/IP地址
a1.sources.r1.bind = master
# 配置端口
a1.sources.r1.port = 44444

# Describe the sink
a1.sinks.k1.type = logger

# Use a channel which buffers events in memory
a1.channels.c1.type = memory

# Bind the source and sink to the channel
a1.sources.r1.channels = c1
a1.sinks.k1.channel = c1

flume-ng agent -c conf -f conf/flumelog.conf -n al -Dflume.root.logger=INFO,console
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

Guest-yan

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值