win10+xshell6+xftp6+vmware18+centos7+hadoop3.2.0+hive+hbase+zookeerper+kafka+flink+idea

##NET配置
/etc/sysconfig/network-scripts/ifcfg-ens33

TYPE="Ethernet"
PROXY_METHOD="none"
BROWSER_ONLY="no"
BOOTPROTO="dhcp"
DEFROUTE="yes"
IPV4_FAILURE_FATAL="no"
IPV6INIT="yes"
IPV6_AUTOCONF="yes"
IPV6_DEFROUTE="yes"
IPV6_FAILURE_FATAL="no"
IPV6_ADDR_GEN_MODE="stable-privacy"
NAME="ens33"
UUID="e2ae4b99-f4ac-4c58-901a-61a9362c81fb"
DEVICE="ens33"
ONBOOT="yes"
IPADDR="192.168.116.130"
PREFIX="24"
GATEWAY="192.168.116.2"
DNS1="192.168.116.2"
IPV6_PRIVACY="no"
PEERDNS="no"

##HOST配置
192.168.116.130 localhost
192.168.116.130 centos
##环境变量
export JAVA_HOME=/usr/local/jdk1.8.0_201
export HADOOP_HOME=/usr/local/hadoop-3.2.0
export HADOOP_CONF_DIR=${HADOOP_HOME}/etc/hadoop
export HIVE_HOME=/usr/local/hive-2.3.4
export HIVE_CONF_DIR=/usr/local/hive-2.3.4/conf
export CLASSPATH=.:${JAVA_HOME}/lib/dt.jar:${JAVA_HOME}/lib/tools.jar
export PATH="${JAVA_HOME}/bin:${HADOOP_HOME}/bin:${HADOOP_HOME}/sbin:${HIVE_HOME}/bin:$PATH"
##SSH=====START=========================================================================
#SSH免密登陆
ps -e |grep ssh
service ssh start
ssh-keygen -t rsa -P ""
cd ~/.ssh
cat id_rsa.pub >> authorized_keys
##SSH=====END===========================================================================
##HADOOP=====START======================================================================
#hadoop-env.sh
export JAVA_HOME=/usr/local/jdk1.8.0_201
#core-site.xml
<property>
    <name>fs.default.name</name>
    <value>hdfs://192.168.116.130:9000</value>
</property>
#mapred-site.xml
<property>
    <name>mapred.job.tracker</name>
    <value>localhost:9001</value>
</property>
<!-- 通知框架MR使用YARN -->
<property>
    <name>mapreduce.framework.name</name>
    <value>yarn</value>
</property>
#hdfs-site.xml
<property>
    <name>dfs.name.dir</name>
    <value>/usr/local/hadoop-3.2.0/namenode</value>
</property>
<property>
    <name>dfs.data.dir</name>
    <value>/usr/local/hadoop-3.2.0/datanode</value>
</property>
<property>
    <name>dfs.replication</name>
    <value>1</value>
</property>
<property>
  <name>dfs.http.address</name>
  <value>0.0.0.0:50070</value>
</property>
#yarn-site.xml
<!-- reducer取数据的方式是mapreduce_shuffle -->
<property>
    <name>yarn.nodemanager.aux-services</name>
    <value>mapreduce_shuffle</value>
</property>

#将start-dfs.sh,stop-dfs.sh两个文件顶部添加以下参数
HDFS_DATANODE_USER=root
HADOOP_SECURE_DN_USER=hdfs
HDFS_NAMENODE_USER=root
HDFS_SECONDARYNAMENODE_USER=root

#start-yarn.sh,stop-yarn.sh顶部也需添加以下:
YARN_RESOURCEMANAGER_USER=root
HADOOP_SECURE_DN_USER=yarn
YARN_NODEMANAGER_USER=root

#格式化hadoop hdfs(可能需要rm -rf namenode和datanode下所有文件)
bin/hadoop namenode -format
#启停hadoop hdfs(检查ssh免密登陆)
./sbin/start-dfs.sh
./sbin/stop-dfs.sh
#启停yarn
./sbin/start-yarn.sh
./sbin/stop-yarn.sh
#查看hdfs
http://192.168.116.130:50070/dfshealth.html#tab-overview
#查看yarn
http://192.168.116.130:8088/cluster
#跑hadoop测试用例
./bin/hadoop jar /usr/local/hadoop-3.2.0/share/hadoop/mapreduce/hadoop-mapreduce-examples-3.2.0.jar wordcount hdfs://192.168.116.130:9000/words.txt hdfs://192.168.116.130:9000/out
##HADOOP=====END========================================================================
##HIVE=====START==========================================================================
hive-site.xml  cp from hive-default-template.xml
$HADOOP_HOME/bin/hadoop   fs   -mkdir   -p   /user/hive/warehouse
$HADOOP_HOME/bin/hadoop   fs   -chmod   777   /user/hive/warehouse

$HADOOP_HOME/bin/hadoop   fs   -mkdir  -p   /tmp/hive/
$HADOOP_HOME/bin/hadoop   fs   -chmod  777   /tmp/hive
#检查文件夹是否创建
$HADOOP_HOME/bin/hadoop   fs   -ls   /user/hive/
$HADOOP_HOME/bin/hadoop   fs   -ls   /tmp/

将hive-site.xml文件中的${system:java.io.tmpdir}替换为hive的临时目录
将${system:user.name}都替换为root

#数据库地址
<name>javax.jdo.option.ConnectionURL</name>  
<value>jdbc:mysql://192.168.32.54:3306/wangsong-test?createDatabaseIfNotExist=true</value>
#数据库驱动
<name>javax.jdo.option.ConnectionDriverName</name>
<value>com.mysql.jdbc.Driver</value>
#数据库账户
<name>javax.jdo.option.ConnectionUserName</name>
<value>root</value>
#数据库密码
<name>javax.jdo.option.ConnectionPassword</name>
<value>zdsoft</value>
#强制metastore的schema一致性,开启的话会校验在metastore中存储的信息的版本和hive的jar包中的版本一致性,并且关闭自动schema迁移,用户必须手动的升级hive并且迁移schema,关闭的话只会在版本不一致时给出警告,默认是false不开启;
<name>hive.metastore.schema.verification</name>
<value>false</value>

#hive-env.sh 配置
export  HADOOP_HOME=/usr/local/hadoop-2.8.0
export  HIVE_CONF_DIR=/usr/local/hive-2.3.4/conf
export  HIVE_AUX_JARS_PATH=/usr/local/hive-2.3.4/lib

#对数据库进行初始化,执行命令:
cd   /usr/local/hive-2.3.4/bin
schematool   -initSchema  -dbType  mysql
#启动
./hive
#测试
show functions;
#建库
create database  db_hive_edu;
#建表
use  db_hive_edu;
create  table  student(id int,name string)  row  format  delimited  fields   terminated  by  '\t';
#导入数据到表
load   data  local   inpath   '/usr/local/student.txt'   into   table   db_hive_edu.student;
#查询表
select *from student;
##HIVE=====END===========================================================================
##HBASE=====START========================================================================
hbase-env.sh中,
export JAVA_HOME=/usr/local/jdk1.8.0_201/
#false 时使用独立的,为true时使用默认自带的
export HBASE_MANAGES_ZK=true
#hbase-site.xml
<property>
<name>hbase.rootdir</name>
<value>file:///usr/local/hbase-2.0.4</value>
</property>
<property>
<name>hbase.zookeeper.property.dataDir</name>
<value>/usr/local/zookeeper-3.4.12</value>
</property>
#启动hbase
/bin/start-hbase.sh
#关闭hbase
/bin/stop-hbase.sh
#进入hbase shell
/bin/hbase shell
##HBASE=====END===========================================================================
##ZOOKEERPER=====START===================================================================
cp zoo_sample.cfg zoo.cfg
mkdir data
dataDir=/usr/local/zookeeper-3.4.12/data
1.启动ZK服务:      zkServer.sh start
2.查看ZK服务状态:  zkServer.sh status
3.停止ZK服务:      zkServer.sh stop
4.重启ZK服务:      zkServer.sh restart
##ZOOKEERPER=====END====================================================================
##KAFKA=====START=========================================================================
#service.properties
broker.id=1
#启动
bin/kafka-server-start.sh -daemon config/server.properties
#停止
bin/kafka-server-stop.sh -daemon config/server.properties
#创建测试用例
bin/kafka-topics.sh  --create --zookeeper localhost:2181 --replication-factor 1 --partitions 1 --topic topicname Created topic "topicname".
#查看创建的测试用例
bin/kafka-topics.sh --zookeeper localhost:2181 --describe --topic topicname
#查看主题列表
./bin/kafka-topics.sh --list --zookeeper 192.168.116.130:2181
#删除主题
./bin/kafka-topics.sh --delete --zookeeper 192.168.116.130:2181 --topic topic1,topic2
#往测试主题上发布消息
bin/kafka-console-producer.sh --broker-list localhost:9092 --topic topicname
#从测试主题上读取消息(topic.prefix+table.whitelist)
bin/kafka-console-consumer.sh --bootstrap-server=localhost:9092 --topic topicname --from-beginning

#KAFKA连接MYSQL
添加connector包到libs
1.mysql-connector-java-5.1.46.jar
2.confluentinc-kafka-connect-jdbc-5.1.2的所有jar
新增quickstart-mysql.properties配置如下:
tasks.max=1
name=mysql-connector
topic.prefix=test
table.whitelist=accounts,ages
mode=timestamp+incrementing
incrementing.column.name=id
timestamp.column.name=update_time
connector.class=io.confluent.connect.jdbc.JdbcSourceConnector
connection.url=jdbc:mysql://192.168.32.54:3306/wangsong-test
connection.user=root
connection.password=zdsoft

#或使用如下命令创建connector
echo '{"name":"mysql-connector","config":{"connector.class":"JdbcSourceConnector","connection.url":"jdbc:mysql://192.168.32.54:3306/wangsong-test","connection.user":"root","connection.password":"zdsoft","mode":"timestamp","table.whitelist":"accounts","validate.non.null":false,"timestamp.column.name":"id","topic.prefix":"test-"}}' | curl -X POST -d @- http://192.168.116.130:8083/connectors --header "Content-Type:application/json"

#启动connector(单机模式/集群模式)
1.bin/connect-standalone.sh config/connect-standalone.properties config/quickstart-mysql.properties
2.bin/connect-distributed.sh config/connect-distributed.properties config/quickstart-mysql.properties
#查看插件和配置的connector
1.http://192.168.116.130:8083/connector-plugins
2.http://192.168.116.130:8083/connectors
#connector查询和删除
1.curl -X GET  192.168.116.130:8083/connectors
2.curl -X DELETE 192.168.116.130:8083/connectors/mysql-connector
##KAFKA=====END============================================================================

##FLINK=====START===========================================================================
#启动flink
bin/start-cluster.sh  
#http://192.168.116.130:8081
#查看日志
tail log/flink-root-standalonesession-0-localhost.log
#停止flink
bin/stop-cluster.sh

#flink+消费kafka:
#MAVEN项目中添加依赖
<dependency>
    <groupId>org.apache.flink</groupId>
    <artifactId>flink-java</artifactId>
    <version>1.7.2</version>
</dependency>
<dependency>
    <groupId>org.apache.flink</groupId>
    <artifactId>flink-streaming-java_2.12</artifactId>
    <version>1.7.2</version>
</dependency>
<dependency>
    <groupId>org.apache.flink</groupId>
    <artifactId>flink-connector-kafka-0.10_2.12</artifactId>
    <version>1.7.2</version>
</dependency>
#main代码
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer010;
import org.apache.flink.streaming.util.serialization.SimpleStringSchema;
import java.util.Properties;
public class ReadFromKafka {
  public static void main(String[] args) throws Exception {
    StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    Properties properties = new Properties();
    properties.setProperty("bootstrap.servers", "192.168.116.130:9092");
    properties.setProperty("group.id", "consumer");
    DataStream<String> stream = env.addSource(new FlinkKafkaConsumer010<String>("testaccounts", new SimpleStringSchema(), properties));
    stream.map(new MapFunction<String, String>() {
              public String map(String value) throws Exception {
                return "Stream Value: " + value;
              }}).print();
    env.execute();
 }
}
##FLINK=====END================================================================================

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值