/home/deploy/hadoop-2.7.2
curl -fL https://github.com/coursier/launchers/raw/master/cs-x86_64-pc-linux.gz | gzip -d > cs && chmod +x cs && ./cs setup
cs install scala:2.13.8 scalac:2.13.8
启动hadoop
/home/deploy/hadoop-2.7.2/sbin/start-all.sh
/home/deploy/hadoop-2.7.2/sbin/stop-all.sh
启动hive
/home/deploy/hadoop-2.7.2/sbin/start-all.sh
/home/deploy/apache-hive-2.3.3-bin/bin/hive
启动 spark
/home/deploy/spark-2.4.3-bin-hadoop2.7/sbin/start-all.sh
访问地址
http://10.10.10.66:50070/dfshealth.html#tab-overview
http://10.10.10.66:8088/cluster
启动hive
http://10.10.10.66:8081/cluster
java --add-opens=java.base/sun.nio.ch=ALL-UNNAMED \
-jar kafdrop.jar \
--kafka.brokerConnect=10.10.10.99:9092 --server.port=9000 --management.server.port=9002
docker run -itd --restart=always --name=dinky -p 9999:8888 \
-e spring.datasource.url='jdbc:mysql://10.10.10.99:3306/dlink?useUnicode=true&characterEncoding=UTF-8&autoReconnect=true&useSSL=false&zeroDateTimeBehavior=convertToNull&serverTimezone=Asia/Shanghai&allowPublicKeyRetrieval=true' \
-e spring.datasource.username=root \
-e spring.datasource.password=liebe \
-v /home/demo/dinky/plugins:/opt/dinky/plugins \
-v /home/demo/dinky/lib:/opt/dinky/lib \
-v /home/demo/dinky/jar:/opt/dinky/jar \
registry.cn-beijing.aliyuncs.com/yue-open/dinky:0.6.4-flink1.15
环境变量与挂载点:
docker run -d --restart=always -v /home/demo/mysql/data:/var/lib/mysql -v /home/demo/mysql/config:/etc/mysql/conf.d -e MYSQL_ROOT_PASSWORD=liebe -p 3306:3306 --name mysql mysql:8.0.17
启动hadoop
/home/hadoop-3.3.3/sbin/start-all.sh
/home/hadoop-3.3.3/sbin/stop-all.sh
jps
启动spark
配置bin环境变量
/home/spark-3.3.0-bin-hadoop3/sbin/start-all.sh
/home/spark-3.3.0-bin-hadoop3/sbin/stop-all.sh
/home/kafka_2.12-3.2.1/bin/zookeeper-server-start.sh /home/kafka_2.12-3.2.1/config/zookeeper.properties
/home/kafka_2.12-3.2.1/bin/kafka-server-start.sh /home/kafka_2.12-3.2.1/config/server.properties
/home/spark-3.3.0-bin-hadoop3/bin/spark-submit --class org.example.KafkaSparkEsDemo --master yarn --deploy-mode client --driver-memory 512m --executor-memory 512m --executor-cores 1 spark-demo.jar
/home/spark-3.3.0-bin-hadoop3/bin/spark-submit --class org.example.KafkaSparkEsDemo --master yarn --deploy-mode client --driver-memory 512m --executor-memory 512m --executor-cores 1 --driver-java-options "-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8899" spark-demo.jar