大数据组件的维护

一、Hbase组件

1.上传hbase包解压后重命名(HBase)
2.编辑环境变量

vim /etc/profile

export HBASE_HOME=/usr/local/src/HBase
export PATH=$PATH:$HBASE_HOME/bin
[root@master ~]# cd /usr/local/src/hbase/conf/
[root@master conf]# vim hbase-env.sh
export JAVA_HOME=/usr/local/src/jdk
export HADOOP_HOME=/usr/lcoal/src/hadoop
export HADOOP_CONF_DIR=${HADOOP_HOME}/etc/hadoop
export HBASE_MANAGES_ZK=false
export HBASE_LOG_DIR=${HBASE_HOME}/logs
export HBASE_PID_DIR=${HBASE_HOME}/pid
3.hbase-site.xml
[root@master conf]# vim hbase-site.xml 	
<configuration>
<property>
        <name>hbase.rootdir</name>
        <value>hdfs://master:9000/HBase</value>
</property>
<property>
        <name>hbase.master.info.port</name>
        <value>16010</value>			(这里不写60010)
</property>
<property>
        <name>hbase.zookeeper.property.clientPort</name>
        <value>2181</value>
</property>
<property>
        <name>hbase.tmp.dir</name>
        <value>/usr/local/src/data/hbase/tmp</value>
</property>
<property>
        <name>zookeeper.session.timeout</name>
        <value>120000</value>
</property>
<property>
        <name>hbase.cluster.distributed</name>
        <value>true</value>
</property>
<property>
        <name>hbase.zookeeper.quorum</name>
        <value>master,slave1,slave2</value>
</property>
<property>
        <name>hbase.zookeeper.property.dataDir</name>
        <value>/usr/local/src/data/hbase /tmp/zookeeper-hbase</value>
</property>
</configuration>
4.设置regionserver
[root@master conf]# vim regionservers
slave1
slave2

[root@master hadoop]# cp ./core-site.xml /usr/local/src/hbase/conf/
[root@master hadoop]# cp ./hdfs-site.xml /usr/local/src/hbase/conf/
5.分发目录
[root@master ~]# scp /etc/profile slave1:/etc/
profile                                                     100% 2301   370.8KB/s   00:00
[root@master ~]# scp /etc/profile slave2:/etc/
profile                                                     100% 2301   939.7KB/s   00:00
[root@master ~]# source /etc/profile	三个节点
[root@master ~]# scp -r /usr/local/src/HBase/ slave1:/usr/local/src/
[root@master ~]# scp -r /usr/local/src/HBase/ slave2:/usr/local/src/
6.启动HBase集群

[root@master ~]# /usr/local/src/HBase/bin/start-hbase.sh

排错记录:(①HMaster存在 HRegionServer起不来②或者一会就挂掉,查日志无报错)

①根据hbase-site.xml配置文件,当使用JDK8+版本时将相关信息注释掉即可(三个节点)

[root@master conf]# vim hbase-env.sh

image-20220419163404173

②开时间同步(坑,没说要开,但其他节点最好都要有,此刻豁然开朗)

yum -y install ntpdate
ntpdate cn.pool.ntp.org

[root@master ~]# /usr/local/src/HBase/bin/start-hbase.sh

image-20220419174857144

create 'Student',{NAME=>'StuInfo',VERSIONS=>'3'},'Grades'
put 'Student', '0001', 'StuInfo:Name', 'Tom Green', 1
put 'Student', '0001', 'StuInfo:Age', '18'
put 'Student', '0001', 'StuInfo:Sex', 'Male'
put 'Student', '0001', 'Grades:BigData', '80'
put 'Student', '0001', 'Grades:Computer', '90'
put 'Student', '0001', 'Grades:Math', '85'
put 'Student', '0002', 'StuInfo:Name', 'Amy'
put 'Student', '0002', 'StuInfo:Age', '19'
put 'Student', '0002', 'Grades:BigData', '95'
put 'Student', '0002', 'Grades:Math', '89'
put 'Student', '0003', 'StuInfo:Name', 'Allen'
put 'Student', '0003', 'StuInfo:Age', '19'
put 'Student', '0003', 'StuInfo:Sex', 'Male'
put 'Student', '0003', 'StuInfo:Class', '02'
put 'Student', '0003', 'Grades:BigData', '90'
put 'Student', '0003', 'Grades:Math', '88' 

二、Hive组件

(一)hive安装和配置
1.上传包解压,配置环境变量
#set Hive
export HIVE_HOME=/usr/local/src/hive
export PATH=$HIVE_HOME/bin:$PATH
export HIVE_CONF_DIR=$HIVE_HOME/conf
[root@master hive]# source /etc/profile
2.安装启动MySQL
[root@master hive]# rpm -qa |grep mariadb
mariadb-libs-5.5.60-1.el7_5.x86_64		(卸载)
[root@master hive]# rpm -e --nodeps mariadb-libs-5.5.60-1.el7_5.x86_64

[root@master mysql-5.7.18]# rpm -ivh mysql-community-*
warning: mysql-community-client-5.7.18-1.el7.x86_64.rpm: Header V3 DSA/SHA1 Signature, key ID 5072e1f5: NOKEY
Preparing...                          ################################# [100%]
Updating / installing...
   1:mysql-community-common-5.7.18-1.e################################# [ 25%]
   2:mysql-community-libs-5.7.18-1.el7################################# [ 50%]
   3:mysql-community-client-5.7.18-1.e################################# [ 75%]
   4:mysql-community-server-5.7.18-1.e################################# [100%]

[root@master ~]# rpm -qa |grep mysql
mysql-community-client-5.7.18-1.el7.x86_64
mysql-community-common-5.7.18-1.el7.x86_64
mysql-community-libs-5.7.18-1.el7.x86_64
mysql-community-server-5.7.18-1.el7.x86_64

[root@master ~]# vim /etc/my.cnf
[root@master ~]# cat /etc/my.cnf|tail -5	增添内容
default-storage-engine=innodb
innodb_file_per_table
collation-server=utf8_general_ci
init-connect='SET NAMES utf8'
character-set-server=utf8

[root@master ~]# systemctl start mysqld
[root@master ~]# systemctl status msqld

3.初始化设置MySQL
[root@master ~]# cat /var/log/mysqld.log |grep password
2022-04-26T08:52:45.731775Z 1 [Note] A temporary password is generated for root@localhost: d+y5ntHUwGB!
[root@master ~]# mysql_secure_installation
Password123$
Password123$
No
No
No
No
Yes
[root@master ~]# mysql -uroot -pPassword123$
mysql> create database hive_db;
mysql> create user hive identified by 'Password123$';
mysql> grant all privileges on *.* to 'hive'@'%' identified by 'Password123$' with grant option ;
mysql> grant all privileges on *.* to 'root'@'%'identified by 'Password123$' with grant option ;
mysql> flush privileges;

4.配置Hive参数
上传或编辑hive-site.xml
[root@master conf]# hadoop fs -mkdir -p /user/hive/warehouse
[root@master conf]# hadoop fs -chmod g+w /user/hive/warehouse

[root@master conf]# vim hive-env.sh			增加内容
export JAVA_HOME=/usr/local/src/jdk
export HADOOP_HOME=/usr/local/src/hadoop
export HIVE_CONF_DIR=/usr/local/src/hive/conf
export HIVE_AUX_JARS_PATH=/usr/local/src/hive/lib

[root@master ~]# cp /opt/software/mysql-connector-java-5.1.46.jar /usr/local/src/hive/lib/
5.与mysql建立连接(hadoop集群和mysql运行下)
[root@master ~]# schematool -initSchema -dbType mysql

[root@master ~]# hive
hive> show databases;
hive> create database hive_test_db;
hive> use hive_test_db;
hive> create table t_user(id int,name string);
hive> show tables;
6.在MySQL里查看(hive创建的表统一在hive_db中的TBLS表中)
mysql> use hive_db;
Database changed
mysql> select * from TBLS;
+--------+-------------+-------+------------------+-------+-----------+-------+----------+---------------+--------------------+--------------------+
| TBL_ID | CREATE_TIME | DB_ID | LAST_ACCESS_TIME | OWNER | RETENTION | SD_ID | TBL_NAME | TBL_TYPE      | VIEW_EXPANDED_TEXT | VIEW_ORIGINAL_TEXT |
+--------+-------------+-------+------------------+-------+-----------+-------+----------+---------------+--------------------+--------------------+
|      1 |  1650971581 |     2 |                0 | root  |         0 |     1 | t_user   | MANAGED_TABLE | NULL               | NULL               |
+--------+-------------+-------+------------------+-------+-----------+-------+----------+---------------+--------------------+--------------------+
1 row in set (0.00 sec)



(二)beeline CLI远程访问Hive
1.关闭各服务再配置

[root@master ~]# /usr/local/src/hadoop/sbin/stop-all.sh

[root@master ~]# vim /usr/local/src/hadoop/etc/hadoop/core-site.xml

		<property>
                <name>hadoop.proxyuser.root.hosts</name>	(root或hadoop)
                <value>*</value>
        </property>
        <property>
                <name>hadoop.proxyuser.root.groups</name>	(root或hadoop)
                <value>*</value>		
        </property>
2.启动各服务
[root@master]/usr/local/src/hadoop/sbin/start-all.sh
[root@master]hiveserver2
31303 RunJar (jps多一个进程)

3.分发需要访问节点
[root@master hadoop]# nohup hiveserver2 &  	关闭这个前台 再按Enter
(在哪个节点访问就远程复制到哪个节点配置下面步骤,如在slave1)
[root@master ~]# scp -r /usr/local/src/hive/ slave1:/usr/local/src/
[root@slave1 ~]# vim ~/.bashrc

#set hive
export HIVE_HOME=/usr/local/src/hive
export HIVE_CONF_DIR=${HIVE_HOME}/conf
export PATH=${HIVE_HOME}/bin:$PATH

[root@slave1 ~]# source ~/.bashrc
[root@slave1 ~]# beeline -u jdbc:hive2://192.168.135.10:10000 -n root
排错记录(这里-n后面跟上面配置代理的用户,也就是第二个点后我这里用的是root)

image-20220430150109612image-20220430151424866


(三)hive库操作
[root@master ~]# hive
hive> create database school;
hive> create database if not exists school;
hive> show databases;
hive> use schoool;
hive> alter database school set dbproperties('creater'='myname');
hive> desc database extended school;
hive> drop database school;
hive> show databases;
(四)hive表操作

use hive_test_db;
CREATE TABLE ufodata(sighted STRING, reported STRING, sighting_location STRING,
shape STRING, duration STRING, description STRING COMMENT ‘Free text description’)
COMMENT ‘The UFO data set.’;

三、ZooKeeper组件

1.zk shell操作
[root@master ~]# /usr/local/src/zookeeper/bin/zkCli.sh  #启动命令行
ls / stat / 或 ls2 /	新版zk用 get -s #查看 	
create /zknode “aa”	#创建
set /zknode1 "zknode1_data"	 #赋值
delete /zknode1		#删除
2.leader选举
1 3 2    3 leader
1 2 3 	 2 leader
3 1 2	 3 leader
依照启动顺序比较并对myid大的节点投票,票数大于节点半数以上就能确定learder了

zxid 事务id 1 2 3 哪一个节点存的数据更新,同步新id
epoch 纪元值
follower的zxid的自己的纪元值发送给leader 返回最大的+1 并更新自己的纪元值 比较最新的成为自己的myid
3.zk的应用
a.服务注册与发现
	当我们的分布式系统增加了一个服务,我们只需要利用Znode和Watcher,让它注册到Zookeeper中,我们就可以很方便的对这个服务进行管理;
b.分布式锁
	为了防止在分布式环境下,服务中多个进程之间互相干扰,我们可以用Zookeeper的临时顺序节点实现分布式锁,对这些进程进行调度,让它们顺序执行;
c.配置管理
	我们可以把核心的配置文件交给Zookeeper管理。当我们修改配置文件时,Zookeeper就会把配置文件的信息同步到集群中的所有节点中去。	

不要拿zookeeper存业务数据
分布式系统CAP理论
Consistency一致性 任何时候任何节点数据都是一样的
Availability 可用性
Partition tolerance分区容忍性
hadoop ha集群有多个注册服务,和zk集群的高可用区别是调用小兵(其他服务进程)和换个领导(2n+1个节点选举)的区别。
zk数据结构同linux倒置的树 
znode最大不能超过1MB 太大会影响性能
4.zk acl的组成
a.授权模式 Scheme
IP、HOST、Auth、Digest、World、Super
b.授权对象 ID
c.权限信息 Permission
Create创建、Read读取、Write写入、Delete删除、Admin管理  crwda

四、ETL组件的维护

(一)Sqoop架构

mysql hadoop jdk hive hbase先启动

1.上传包解压改名配置环境变量,上传jar包到lib下
[root@master software]# tar -zxvf sqoop-1.4.7.tar.gz -C /usr/local/src/
[root@master src]# mv sqoop-1.4.7/ sqoop
[root@master conf]# cp sqoop-env-template.sh sqoop-env.sh 进入编辑

#Set path to where bin/hadoop is available
export HADOOP_COMMON_HOME=/usr/local/src/hadoop

#Set path to where hadoop-*-core.jar is available
export HADOOP_MAPRED_HOME=/usr/local/src/hadoop

#set the path to where bin/hbase is available
export HBASE_HOME=/usr/local/src/Hbase

#Set the path to where bin/hive is available
export HIVE_HOME=/usr/local/src/hive

#Set the path for where zookeper config dir is
export ZOOCFGDIR=/usr/local/src/zookeeper/conf
[root@master ~]# vim /etc/profile
#set sqoop
export SQOOP_HOME=/usr/local/src/sqoop
export PATH=$PATH:$JAVA_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:${SQOOP_HOME}/bin
export CLASSPATH=.:$JAVA_HOME/lib:${SQOOP_HOME}/lib
[root@master ~]# source /etc/profile
[root@master ~]# cp /usr/local/src/hive/lib/* /usr/local/src/sqoop/lib/
把 mysql-connector-java-5.1.46.jar和sqoop-1.4.6.jar也上传到/sqoop/lib下
[root@master ~]# sqoop version(报错是因为lib下没有sqoop的jar包)
2.连接mysql测试
[root@master ~]# sqoop list-databases --connect jdbc:mysql://master:3306/ --username root --password Password123$

image-20220510170546988

3.数据import

①删除HDFS上多余表,在本地mysql建表

[root@master ~]# hdfs dfs -rm -r -f /aaaa

下面sql语句写入一个.sql文件
create database company;
create table company.staff(id int(4) primary key not null auto_increment, name varchar(255));
insert into company.staff(name) values('Male');
insert into company.staff(name) values('Female');
insert into company.staff(name) values('Male');
insert into company.staff(name) values('Female');
insert into company.staff(name) values('Male');
insert into company.staff(name) values('Female');
insert into company.staff(name) values('Male');
insert into company.staff(name) values('Female');
登录 mysql> source /home/wzh01/mysqlToHdfs.sql 执行sql语句完成建表

②全导入

[root@master ~]# sqoop import --connect jdbc:mysql://master:3306/company --username root --password Password123$ --table staff --target-dir /company --delete-target-dir --num-mappers 1 --fields-terminated-by "\t" --split-by id

③部分导入

[root@master ~]# sqoop import --connect jdbc:mysql://master:3306/company --username root --password Password123$ --target-dir /company --delete-target-dir --num-mappers 1 --fields-terminated-by "\t" --query 'select id,name from staff where id<=4 and $CONDITIONS' -- split-by id
4.数据export

①清空表数据,保留表结构

mysql> use company
Reading table information for completion of table and column names
You can turn off this feature to get a quicker startup with -A

Database changed
mysql> show tables;
+-------------------+
| Tables_in_company |
+-------------------+
| staff             |
+-------------------+
1 row in set (0.00 sec)

mysql> select * from staff;
+----+--------+
| id | name   |
+----+--------+
|  1 | Male   |
|  2 | Female |
|  3 | Male   |
|  4 | Female |
|  5 | Male   |
|  6 | Female |
|  7 | Male   |
|  8 | Female |
+----+--------+
8 rows in set (0.00 sec)
mysql> truncate staff;
Query OK, 0 rows affected (0.02 sec)

mysql> select * from staff;
Empty set (0.00 sec)

②export到本地MySQL

[root@master ~]# sqoop export --connect jdbc:mysql://master:3306/company --username root --password Password123$ --table staff --num-mappers 1 --export-dir /company/part-m-00000 --input-fields-terminated-by "\t"
(二)Flume
1.上传包解压改名配置环境变量
[root@master ~]vim /etc/profile
#set flume
export FLUME_HOME=/usr/local/src/flume
export FLUME_CONF_DIR=$FLUME_HOME/conf
export PATH=$PATH:$FLUME_HOME/bin
[root@master ~]source /etc/profile

[root@master conf]# cp flume-env.sh.template flume-env.sh
[root@master conf]# vim flume-env.sh
export JAVA_HOME=/usr/local/src/jdk 更改内容

[root@master conf]# flume-ng version
Flume 1.9.0
Source code repository: https://git-wip-us.apache.org/repos/asf/flume.git
Revision: d4fcab4f501d41597bc616921329a4339f73585e
Compiled by fszabo on Mon Dec 17 20:45:25 CET 2018
From source with checksum 35db629a3bda49d23e9b3690c80737f9
报错解决:↓

image-20220513105402732

2.netcat作为source

①yum装nc,上传nc配置文件

到/usr/local/src/flume/conf/下放自定义配置文件(路径可随意,使用需指定)

[root@master conf]# vim flume-nc-conf.properties
#source、channel、sink
a1.sources = r1
a1.sinks = k1
a1.channels = c1

#对数据源的配置
a1.sources.r1.type = netcat
a1.sources.r1.bind = master
a1.sources.r1.port = 44444

#这是sink的描述
a1.sinks.k1.type = logger

#这是channel配置,如果内存做为channel优点,速度快
a1.channels.c1.type = memory
a1.channels.c1.capacity = 1000
a1.channels.c1.transactionCapacity = 100

#做一个绑定,主要绑定flume的source和channel,flume的channel和sink绑定
a1.sources.r1.channels = c1
a1.sinks.k1.channel = c1

②使用 flume-nc-conf.properties

a.启动flume进程
[root@master flume]# flume-ng agent -c /usr/local/src/flume/conf/ -n a1 -f /usr/local/src/flume/conf/flume-nc-conf.properties -Dflume.root.logger=INFO,console
新建窗口
[root@master conf]# lsof -i:44444
COMMAND   PID USER   FD   TYPE DEVICE SIZE/OFF NODE NAME
java    36375 root  656u  IPv6 204198      0t0  TCP master:cognex-dataman (LISTEN)
b.向监听端口发送信息
[root@master conf]# nc master 44444
hello wangzihao
OK
aaaaaaaa
OK
另一窗口同步更新日志
3.avro作为source

①上传avro配置文件

[root@master conf]# vim flume-avro-conf.properties
#配置一个agent,agent的名称可以自定义(如a1)
#指定agent的sources(如s1)、sinks(如k1)、channels(如c1)
#分别指定agent的sources,sinks,channels的名称 名称可以自定义
a1.sources = s1
a1.sinks = k1
a1.channels = c1

#配置source
a1.sources.s1.type = avro
a1.sources.s1.bind = 192.168.135.10
a1.sources.s1.port = 6666

#配置channels
a1.channels.c1.type = memory

#配置sinks
a1.sinks.k1.type = logger

#为sources和sinks绑定channels
a1.sources.s1.channels = c1
a1.sinks.k1.channel = c1

②使用flume-avro-conf.properties

[root@master ~]# flume-ng agent -c /usr/local/src/flume/conf/ -n a1 -f /usr/local/src/flume/conf/flume-avro-conf.properties -Dflume.root.logger=INFO,console

image-20220513114919469

[root@master ~]# flume-ng avro-client -c /usr/local/src/flume/conf/ -H 192.168.135.10 -p 6666 --dirname /test
[root@master test]# ls
a.txtfileSuffix  b.txtfileSuffix  c.txtfileSuffix
[root@master test]# cat *
123
456
789
4.hdfs作为sink

①上传hdfs配置文件

[root@master conf]# vim flume-memory-hdfs.properties
# 指定端口收集数据最终输出到HDFS中
# Agent
a1.sources = r1
a1.sinks = k1
a1.channels = c1

# Sources
# a1.sources.r1
# 配置source类型/绑定主机ip/端口号
a1.sources.r1.type = netcat
a1.sources.r1.bind = master
a1.sources.r1.port = 44444

# Sinks
# a1.sinks.k1
a1.sinks.k1.type = hdfs

# 设置hdfs文件路径,同时并设置了按照日期创建文件夹
a1.sinks.k1.hdfs.path = /flume/logs/%Y-%m-%d/%H-%M-%S

# 设置flume创建的hdfs文件前缀
a1.sinks.k1.hdfs.filePrefix = logs_%Y-%m-%d

# 以下三组参数的配置用于控制flume在hdfs中生成文件的滚动方式
# 满足以下三者中任何一个条件都会新生成hdfs文件
# 设置文件滚动的时间间隔,单位(second),置0表示关闭
a1.sinks.k1.hdfs.rollInterval = 10

# 设置文件滚动的最大size阈值,由于是hdfs sink故最好设置成Block Size的倍数
# 本次实验的hadoop版本为2.7.7(2.7.3之后默认Block Size为128MB,之前为64MB)
# 单位(bytes),置0表示关闭
a1.sinks.k1.hdfs.rollSize = 134217700
# 设置滚动文件存储的最大Event个数
# 此参数一般设置为0,即关闭,除非有严格生产需求并且知道Event大小能够自主控制
a1.sinks.k1.hdfs.rollCount = 0
# 设置flume每批次刷到hdfs中的Event个数(超过一定时长也会进行刷新,并非要等满一批次)
a1.sinks.k1.hdfs.batchSize = 100
# 设置hdfs文件格式,目前只支持(SequenceFile/DataStream/CompressedStream)
# CompressedStream类型需要配合hdfs.codeC参数来指定具体的压缩方式
# SequenceFile表示按照HDFS序列文件SequenceFile的方式进行压缩
# DataStream则表示不进行压缩
a1.sinks.k1.hdfs.fileType = DataStream
# 以下三组参数的配置配合转义序列(如%y %m %d %H %M %S等)能够自定义时间轮转最小刻度
# 设置hdfs时间向下取整
# 设置向下取整之后文件夹将按照一定时间大小的刻度进行创建文件夹
# 否则都是按照之前设置每分钟进行文件夹的创建

a1.sinks.k1.hdfs.round = true
# 设置hdfs时间向下取整的最小单元倍数
a1.sinks.k1.hdfs.roundValue = 30
# 设置hdfs时间向下取整的最小单位
a1.sinks.k1.hdfs.roundUnit = second
# 设定是否使用本地时间戳,默认为false(即使用Event的Header中的时间戳)
# 本次实验中Event的Header为空,需要使用本地时间戳
a1.sinks.k1.hdfs.useLocalTimeStamp = true
# Channels
# 定义a2的channerls.c1的类型为memory,即使用内存作为缓存/最多缓存的Event个数/单次传输的Event个数
a1.channels.c1.type = memory
a1.channels.c1.capacity = 1000
a1.channels.c1.transactionCapacity = 100
# Bind
# 注意:source可以绑定多个channel,但是sink只能绑定单个channel
a1.sources.r1.channels = c1
a1.sinks.k1.channel = c1

②使用flume-memory-hdfs.properties

[root@master conf]# flume-ng agent -c /usr/local/src/flume/conf/ -n a1 -f /usr/local/src/flume/conf/flume-memory-hdfs.properties  -Dflume.root.logger=INFO,console

image-20220514162359455

(三)Kafka
1.上传包解压配置
[root@master kafka]# vim /usr/local/src/kafka/config/server.properties
broker.id=0
zookeeper.connect=master,slave1,slave2
[root@master ~]# scp -r /usr/local/src/kafka/ slave1:/usr/local/src/
[root@master ~]# scp -r /usr/local/src/kafka/ slave2:/usr/local/src/
[root@slave1 ~]# vim /usr/local/src/kafka/config/server.properties
broker.id=1
[root@slave2 ~]# vim /usr/local/src/kafka/config/server.properties
broker.id=2
2.三个节点启动kafka(zk先启动)
[root@master ~]# /usr/local/src/kafka/bin/kafka-server-start.sh -daemon /usr/local/src/kafka/config/server.properties
[root@master ~]# jps
48756 Jps
47322 QuorumPeerMain
48687 Kafka
[root@slave1 config]# /usr/local/src/kafka/bin/kafka-server-start.sh -daemon  /usr/local/src/kafka/config/server.properties
[root@slave2 config]# /usr/local/src/kafka/bin/kafka-server-start.sh -daemon  /usr/local/src/kafka/config/server.properties

3.kafka的验证部署
[root@master ~]# /usr/local/src/kafka/bin/kafka-topics.sh  --create --zookeeper master:2181,slave1:2181,slave2:2181 --replication-factor 3 --topic test --partitions 1

①在slave1上创建一个消费者

[root@slave1 ~]# /usr/local/src/kafka/bin/kafka-console-consumer.sh  --zookeeper  master:2181,slave1:2181,slave2:2181  --topic test --from-beginning
Using the ConsoleConsumer with old consumer is deprecated and will be removed in a future major release. Consider using the new consumer by passing [bootstrap-server] instead of [zookeeper].

②在master创建一个生产者并发送消息,消费者能收到

[root@master bin]# /usr/local/src/kafka/bin/kafka-console-producer.sh --broker-list  master:9092,slave1:9092,slave2:9092 --topic test
>hello wwww
>this is wanzihao

image-20220514172236098

4.flume+kafka

①确保虚拟机上已经安装了jdk,zookeeper集群,kafka集群和flume组件

[root@master ~]# vim /usr/local/src/flume/conf/flume-kafka.conf
a1.sources = source1
a1.sinks = k1
a1.channels = c1

a1.sources.source1.type = exec
a1.sources.source1.command = tail -F /root/access.log

# Describe the sink

a1.sinks.k1.type = org.apache.flume.sink.kafka.KafkaSink
a1.sinks.k1.topic = test
a1.sinks.k1.brokerList = master:9092,slave1:9092,slave2:9092
a1.sinks.k1.requiredAcks = 1
a1.sinks.k1.batchSize = 20
a1.sinks.k1.channel = c1

# Use a channel which buffers events in memory

a1.channels.c1.type = memory
a1.channels.c1.capacity = 1000
a1.channels.c1.transactionCapacity = 100

# Bind the source and sink to the channel

a1.sources.source1.channels = c1
a1.sinks.k1.channel = c1

②新建access.log文件

[root@master ~]# touch access.log

③在slave1上启动消费者

[root@slave1 ~]# /usr/local/src/kafka/bin/kafka-console-consumer.sh  --zookeeper  master:2181,slave1:2181,slave2:2181  --topic test
Using the ConsoleConsumer with old consumer is deprecated and will be removed in a future major release. Consider using the new consumer by passing [bootstrap-server] instead of [zookeeper].

④安装flume的机器启动flume

[root@master ~]# flume-ng agent -c /usr/local/src/flume/conf/ -n a1 -f /usr/local/src/flume/conf/flume-kafka.conf  -Dflume.root.logger=INFO,console
//新建窗口向access.log输出信息
[root@master ~]# echo "123456" >> access.log
[root@master ~]# echo "wwwwww" >> access.log

⑤在消费者处能看见

image-20220514174216841

5.flume消费kafka的数据写入hdfs

①编辑配置文件,并启动(先start-all.sh启动hadoop集群)

[root@master ~]# vim /usr/local/src/flume/conf/kafkaToFlume.conf
## 组件
a1.sources=r1
a1.channels=c1
a1.sinks=k1

## source
a1.sources.r1.type = org.apache.flume.source.kafka.KafkaSource
a1.sources.r1.batchSize = 5000
a1.sources.r1.batchDurationMillis = 2000
a1.sources.r1.kafka.bootstrap.servers = master:9092,slave1:9092,slave2:9092
a1.sources.r1.kafka.topics=test

## channel
a1.channels.c1.type=memory
a1.channels.c1.capacity=100000
a1.channels.c1.transactionCapacity=10000

## sink
a1.sinks.k1.type = hdfs
a1.sinks.k1.hdfs.path = /test/%Y-%m-%d
a1.sinks.k1.hdfs.filePrefix = cp-api-

## 时间戳四舍五入,位数,单位
a1.sinks.k1.hdfs.round = true
a1.sinks.k1.hdfs.roundValue = 10
a1.sinks.k1.hdfs.roundUnit = second

## 文件生成时间秒,大小字节,数量
a1.sinks.k1.hdfs.rollInterval = 60
a1.sinks.k1.hdfs.rollSize = 134217728
a1.sinks.k1.hdfs.rollCount = 0

## 压缩格式
a1.sinks.k1.hdfs.fileType = CompressedStream 
a1.sinks.k1.hdfs.codeC = snappy

## 拼装
a1.sources.r1.channels = c1
a1.sinks.k1.channel= c1
[root@master ~]# flume-ng agent -c /usr/local/src/flume/conf/ -n a1 -f /usr/local/src/flume/conf/kafkaToFlume.conf  -Dflume.root.logger=INFO,console

②启动生产者

[root@slave1 ~]# /usr/local/src/kafka/bin/kafka-console-producer.sh --broker-list  master:9092,slave1:9092,slave2:9092 --topic test
>hello wangzihao

③在hdfs上查看文件内容

image-20220514175505664

五、Spark组件

(一)Scala语言
1.导入包安装配置
#set scala
export SCALA_HOME=/usr/local/src/scala
export PATH=$PATH:$SCALA_HOME/bin
2.scala的基本语法

①赋值

val x=1+1
var z:Array[Int]=Array(1,2,3,4)
z.length
z.head z.tail z(0)
scala> z.isEmpty
res18: Boolean = false
scala> z.isEmpty
res18: Boolean = false
scala> z.contains(1)
res19: Boolean = true
scala> z.contains(5)
res20: Boolean = false

②进入粘贴模式

scala> :paste
// Entering paste mode (ctrl-D to finish)			编写函数

object Test{
def addInt(a:Int,b:Int):Int={
var sum:Int=0
sum=a+b
sum
}
}

// Exiting paste mode, now interpreting.

defined object Test
调用函数			shift+delete删除
scala> Test.addInt(5,3)
res5: Int = 8

③返回最大函数

scala> :paste
// Entering paste mode (ctrl-D to finish)

object Max{
def maxtwo(a:Int,b:Int):Int={
if (a>b) a else b
}
}

// Exiting paste mode, now interpreting.

defined object Max

scala> Max.maxtwo(1,3)
res0: Int = 3

④匿名函数

scala> val addInt=(x:Int,y:Int)=>x+y
addInt: (Int, Int) => Int = <function2>

scala> addInt(1,3)
res4: Int = 4

或
scala> val addInt=(_:Int)+(_:Int)
addInt: (Int, Int) => Int = <function2>

scala> addInt(3,5)
res0: Int = 8

⑤列表

scala> val fruit:List[String]=List("apple","pear","banana")
fruit: List[String] = List(apple, pear, banana)

scala> fruit(0)
res1: String = apple

scala> fruit.take(2)
res2: List[String] = List(apple, pear)
scala> fruit.tail
res7: List[String] = List(pear, banana)

scala> val set:Set[Int]=Set(1,1,2,3,4)
set: Set[Int] = Set(1, 2, 3, 4)

⑥Map

scala> val person:Map[String,Int]=Map("tom"->21,"lucy"->22,"amy"->33)
person: Map[String,Int] = Map(tom -> 21, lucy -> 22, amy -> 33)

scala> person.values
res8: Iterable[Int] = MapLike(21, 22, 33)

scala> person.keys
res9: Iterable[String] = Set(tom, lucy, amy)

⑦元组(元组可以存放不同的数据类型,最大存放22个)

scala> val t=(1,3,14,"aaa",List("a","b","c"))
t: (Int, Int, Int, String, List[String]) = (1,3,14,aaa,List(a, b, c))

scala> val t=new Tuple4(1,3.14,"a","f")
t: (Int, Double, String, String) = (1,3.14,a,f)

scala> t._1
res0: Int = 1

scala> t._2
res1: Double = 3.14
3.函数

①map

scala> val num=List(1,2,3,4,5)
num: List[Int] = List(1, 2, 3, 4, 5)

scala> num.map(x=>x*x)
res4: List[Int] = List(1, 4, 9, 16, 25)

②foreach

scala> num.foreach(x=>print(x*x))
149162536

③filter

scala> num.filter(x=>x%2==1)
res6: List[Int] = List(1, 3, 5)

④二flatten 维列表转一维

scala> val list=List(List(1,2,3),List(4,5,6))
list: List[List[Int]] = List(List(1, 2, 3), List(4, 5, 6))

scala> list.flatten
res0: List[Int] = List(1, 2, 3, 4, 5, 6)

scala> list.flatMap(x=>x.map(_*2))
res11: List[Int] = List(2, 4, 6, 8, 10, 12)

⑤groupBy

scala> num.groupBy(x=>x%2==0)
res0: scala.collection.immutable.Map[Boolean,List[Int]] = Map(false -> List(1, 3, 5), true -> List(2, 4, 6))

scala> list.flatMap(x=>x.groupBy(x=>x%2==0))
res6: List[(Boolean, List[Int])] = List((false,List(1, 3)), (true,List(2)), (false,List(5)), (true,List(4, 6)))

(二)Spark的使用
1.上传包配置
export SPARK_HOME=/usr/local/src/spark
export PATH=$PATH:$SPARK_HOME/bin:$PATH
2.spark-submit执行jar包

四种模式:local、独立集群 standalone、yarn 的client、cluster

[root@master ~]# /usr/local/src/spark/bin/spark-submit --class org.apache.spark.examples.SparkPi --master local[*] --driver-memory 512M --executor-memory 512M --executor-cores 1 /usr/local/src/spark/examples/jars/spark-examples_2.11-2.1.1.jar 40
3.spark命令行
[root@master ~]# /usr/local/src/spark/bin/spark-shell

scala> val input=sc.textFile("/root/test.txt")
input: org.apache.spark.rdd.RDD[String] = /root/test.txt MapPartitionsRDD[1] at textFile at <console>:24

scala> input.count
res0: Long = 2

scala> input.flatMap(_.split(" ")).collect
res0: Array[String] = Array(aaaaa, wangzihao)

scala> input.flatMap(_.split(" ")).map(x=>(x,1)).collect
res1: Array[(String, Int)] = Array((aaaaa,1), (wangzihao,1))

scala> input.flatMap(_.split(" ")).map(x=>(x,1)).reduceByKey((x,y)=>x+y).collect
res4: Array[(String, Int)] = Array((wangzihao,1), (aaaaa,1))

scala> ss.flatMap(_.split(",")).map(x=>x.toInt).sum
res27: Double = 55.0

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值