1、相关环境
1.1、创建用户,配置免密
useradd hadoop;
echo "Hadoop#149" | passwd --stdin hadoop
#配置sudo免密
sed -i '$ahadoop ALL=(ALL) NOPASSWD: NOPASSWD: ALL' /etc/sudoers
sed -i 's/Defaults requirett/#Defaults requirett/g' /etc/sudoers
1.2、ssh免密配置
#切换到部署用户并配置ssh本机免密登录
su hadoop;
ssh-keygen -t rsa -P '' -f ~/.ssh/id_rsa
cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
chmod 600 ~/.ssh/authorized_keys
1.3、关闭防火墙 firewalld 和 Selinux
#关闭防火墙:
systemctl stop firewalld
systemctl status firewalld
systemctl disable firewalld
#修改selinux的enforcing为disabled:
sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config
#查看selinux状态:
sestatus
1.4、数据库初始化
在MySQL数据库中:
set global validate_password_policy=0;
set global validate_password_length=1;
CREATE DATABASE dolphinscheduler DEFAULT CHARACTER SET utf8 DEFAULT COLLATE utf8_general_ci;
CREATE USER ds @'%' IDENTIFIED BY 'Changxin*8';
GRANT ALL PRIVILEGES ON dolphinscheduler.* TO 'ds'@'%' IDENTIFIED BY 'Changxin*8';
GRANT ALL PRIVILEGES ON dolphinscheduler.* TO 'ds'@'localhost' IDENTIFIED BY 'Changxin*8';
flush privileges;
2、下载
wget https://mirrors.tuna.tsinghua.edu.cn/apache/dolphinscheduler/2.0.3/apache-dolphinscheduler-2.0.3-bin.tar.gz --no-check-certificate
3、部署
3.1、创建目录并赋权
#创建目录
mkdir -p /opt/dolphin
#赋权
chown -R hadoop:hadoop /opt/dolphin
cd /opt/dolphin
tar -zxvf apache-dolphinscheduler-2.0.3-bin.tar.gz -C /opt/dolphin
#修改目录权限,使得部署用户对dolphin-backend目录有操作权限
chown -R hadoop:hadoop apache-dolphinscheduler-2.0.3-bin
3.2、修改配置 application-mysql.yaml
spring:
datasource:
driver-class-name: com.mysql.cj.jdbc.Driver
url: jdbc:mysql://node03:3306/dolphinscheduler?useUnicode=true&characterEncoding=UTF-8
username: root
password: Changxin*8
hikari:
connection-test-query: select 1
minimum-idle: 5
auto-commit: true
validation-timeout: 3000
pool-name: DolphinScheduler
maximum-pool-size: 50
connection-timeout: 30000
idle-timeout: 600000
leak-detection-threshold: 0
initialization-fail-timeout: 1
3.3、添加MySQL8驱动
#手动添加 [ mysql-connector-java 驱动 jar ] 包到 lib 目录下
mysql-connector-java-8.0.27.jar
3.4、配置conf/config/install_config.conf
# ---------------------------------------------------------
# INSTALL MACHINE
# ---------------------------------------------------------
# A comma separated list of machine hostname or IP would be installed DolphinScheduler,
# including master, worker, api, alert. If you want to deploy in pseudo-distributed
# mode, just write a pseudo-distributed hostname
# Example for hostnames: ips="ds1,ds2,ds3,ds4,ds5", Example for IPs: ips="192.168.8.1,192.168.8.2,192.168.8.3,192.168.8.4,192.168.8.5"
ips="node03"
# Port of SSH protocol, default value is 22. For now we only support same port in all `ips` machine
# modify it if you use different ssh port
sshPort="22"
# A comma separated list of machine hostname or IP would be installed Master server, it
# must be a subset of configuration `ips`.
# Example for hostnames: masters="ds1,ds2", Example for IPs: masters="192.168.8.1,192.168.8.2"
masters="node03"
# A comma separated list of machine <hostname>:<workerGroup> or <IP>:<workerGroup>.All hostname or IP must be a
# subset of configuration `ips`, And workerGroup have default value as `default`, but we recommend you declare behind the hosts
# Example for hostnames: workers="ds1:default,ds2:default,ds3:default", Example for IPs: workers="192.168.8.1:default,192.168.8.2:default,192.168.8.3:default"
workers="node03:default"
# A comma separated list of machine hostname or IP would be installed Alert server, it
# must be a subset of configuration `ips`.
# Example for hostname: alertServer="ds3", Example for IP: alertServer="192.168.8.3"
alertServer="node03"
# A comma separated list of machine hostname or IP would be installed API server, it
# must be a subset of configuration `ips`.
# Example for hostname: apiServers="ds1", Example for IP: apiServers="192.168.8.1"
apiServers="node03"
# A comma separated list of machine hostname or IP would be installed Python gateway server, it
# must be a subset of configuration `ips`.
# Example for hostname: pythonGatewayServers="ds1", Example for IP: pythonGatewayServers="192.168.8.1"
pythonGatewayServers="node03"
# The directory to install DolphinScheduler for all machine we config above. It will automatically be created by `install.sh` script if not exists.
# Do not set this configuration same as the current path (pwd)
installPath="/opt/module/dolphinscheduler"
# The user to deploy DolphinScheduler for all machine we config above. For now user must create by yourself before running `install.sh`
# script. The user needs to have sudo privileges and permissions to operate hdfs. If hdfs is enabled than the root directory needs
# to be created by this user
deployUser="hadoop"
# The directory to store local data for all machine we config above. Make sure user `deployUser` have permissions to read and write this directory.
dataBasedirPath="/opt/module/dolphinscheduler/tmp"
# ---------------------------------------------------------
# DolphinScheduler ENV
# ---------------------------------------------------------
# JAVA_HOME, we recommend use same JAVA_HOME in all machine you going to install DolphinScheduler
# and this configuration only support one parameter so far.
javaHome="/usr/java/jdk1.8.0_202-amd64"
# DolphinScheduler API service port, also this is your DolphinScheduler UI component's URL port, default value is 12345
apiServerPort="12345"
# ---------------------------------------------------------
# Database
# NOTICE: If database value has special characters, such as `.*[]^${}\+?|()@#&`, Please add prefix `\` for escaping.
# ---------------------------------------------------------
# The type for the metadata database
# Supported values: ``postgresql``, ``mysql`, `h2``.
DATABASE_TYPE=${DATABASE_TYPE:-"mysql"}
# Spring datasource url, following <HOST>:<PORT>/<database>?<parameter> format, If you using mysql, you could use jdbc
# string jdbc:mysql://127.0.0.1:3306/dolphinscheduler?useUnicode=true&characterEncoding=UTF-8 as example
SPRING_DATASOURCE_URL=${SPRING_DATASOURCE_URL:-"jdbc:mysql://node03:3306/dolphinscheduler?useUnicode=true&characterEncoding=UTF-8"}
# Spring datasource username
SPRING_DATASOURCE_USERNAME=${SPRING_DATASOURCE_USERNAME:-"root"}
# Spring datasource password
SPRING_DATASOURCE_PASSWORD=${SPRING_DATASOURCE_PASSWORD:-"Changxin*8"}
# ---------------------------------------------------------
# Registry Server
# ---------------------------------------------------------
# Registry Server plugin name, should be a substring of `registryPluginDir`, DolphinScheduler use this for verifying configuration consistency
registryPluginName="zookeeper"
# Registry Server address.
registryServers="node03:2181"
# Registry Namespace
registryNamespace="dolphinscheduler"
# ---------------------------------------------------------
# Worker Task Server
# ---------------------------------------------------------
# Worker Task Server plugin dir. DolphinScheduler will find and load the worker task plugin jar package from this dir.
taskPluginDir="lib/plugin/task"
# resource storage type: HDFS, S3, NONE
resourceStorageType="HDFS"
# resource store on HDFS/S3 path, resource file will store to this hdfs path, self configuration, please make sure the directory exists on hdfs and has read write permissions. "/dolphinscheduler" is recommended
resourceUploadPath="/dolphinscheduler"
# if resourceStorageType is HDFS,defaultFS write namenode address,HA, you need to put core-site.xml and hdfs-site.xml in the conf directory.
# if S3,write S3 address,HA,for example :s3a://dolphinscheduler,
# Note,S3 be sure to create the root directory /dolphinscheduler
defaultFS="hdfs://node03:9000"
# if resourceStorageType is S3, the following three configuration is required, otherwise please ignore
s3Endpoint="http://192.168.xx.xx:9010"
s3AccessKey="xxxxxxxxxx"
s3SecretKey="xxxxxxxxxx"
# resourcemanager port, the default value is 8088 if not specified
resourceManagerHttpAddressPort="9088"
# if resourcemanager HA is enabled, please set the HA IPs; if resourcemanager is single node, keep this value empty
yarnHaIps="node03"
# if resourcemanager HA is enabled or not use resourcemanager, please keep the default value; If resourcemanager is single node, you only need to replace 'yarnIp1' to actual resourcemanager hostname
singleYarnIp="node03"
# who has permission to create directory under HDFS/S3 root path
# Note: if kerberos is enabled, please config hdfsRootUser=
hdfsRootUser="hadoop"
# kerberos config
# whether kerberos starts, if kerberos starts, following four items need to config, otherwise please ignore
kerberosStartUp="false"
# kdc krb5 config file path
krb5ConfPath="$installPath/conf/krb5.conf"
# keytab username,watch out the @ sign should followd by \\
keytabUserName="hdfs-mycluster\\@ESZ.COM"
# username keytab path
keytabPath="$installPath/conf/hdfs.headless.keytab"
# kerberos expire time, the unit is hour
kerberosExpireTime="2"
# use sudo or not
sudoEnable="true"
# worker tenant auto create
workerTenantAutoCreate="false"
3.5、env/dolphinscheduler_env.sh
export HADOOP_HOME=/opt/module/hadoop-3.1.4
export HADOOP_CONF_DIR=/opt/module/hadoop-3.1.4/etc/hadoop
#export SPARK_HOME1=/opt/soft/spark1
export SPARK_HOME2=/opt/module/spark-3.1.2
export PYTHON_HOME=/usr/bin/python
export JAVA_HOME=/usr/java/jdk1.8.0_202-amd64
export HIVE_HOME=/opt/module/hive-3.1.2
export FLINK_HOME=/opt/module/flink-1.13.5
export DATAX_HOME=/opt/module/datax
export PATH=$HADOOP_HOME/bin:$SPARK_HOME2/bin:$PYTHON_HOME/bin:$JAVA_HOME/bin:$HIVE_HOME/bin:$FLINK_HOME/bin:$DATAX_HOME/bin:$PATH
3.5、修改配置common.properties
# user data local directory path, please make sure the directory exists and have read write permissions
data.basedir.path=/opt/module/dolphinscheduler/tmp
# resource storage type: HDFS, S3, NONE
resource.storage.type=HDFS
# resource store on HDFS/S3 path, resource file will store to this hadoop hdfs path, self configuration, please make sure the directory exists on hdfs and have read write permissions. "/dolphinscheduler" is recommended
resource.upload.path=/dolphinscheduler
# whether to startup kerberos
hadoop.security.authentication.startup.state=false
# java.security.krb5.conf path
java.security.krb5.conf.path=/opt/module/dolphinscheduler/conf/krb5.conf
# login user from keytab username
login.user.keytab.username=hdfs-mycluster@ESZ.COM
# login user from keytab path
login.user.keytab.path=/opt/module/dolphinscheduler/conf/hdfs.headless.keytab
# kerberos expire time, the unit is hour
kerberos.expire.time=2
# resource view suffixs
#resource.view.suffixs=txt,log,sh,bat,conf,cfg,py,java,sql,xml,hql,properties,json,yml,yaml,ini,js
# if resource.storage.type=HDFS, the user must have the permission to create directories under the HDFS root path
hdfs.root.user=hadoop
# if resource.storage.type=S3, the value like: s3a://dolphinscheduler; if resource.storage.type=HDFS and namenode HA is enabled, you need to copy core-site.xml and hdfs-site.xml to conf dir
fs.defaultFS=hdfs://node03:9000
# if resource.storage.type=S3, s3 endpoint
fs.s3a.endpoint=http://192.168.xx.xx:9010
# if resource.storage.type=S3, s3 access key
fs.s3a.access.key=xxxxxxxxxx
# if resource.storage.type=S3, s3 secret key
fs.s3a.secret.key=xxxxxxxxxx
# resourcemanager port, the default value is 8088 if not specified
resource.manager.httpaddress.port=9088
# if resourcemanager HA is enabled, please set the HA IPs; if resourcemanager is single, keep this value empty
yarn.resourcemanager.ha.rm.ids=node03
# if resourcemanager HA is enabled or not use resourcemanager, please keep the default value; If resourcemanager is single, you only need to replace ds1 to actual resourcemanager hostname
yarn.application.status.address=http://node03:%s/ws/v1/cluster/apps/%s
# job history status url when application number threshold is reached(default 10000, maybe it was set to 1000)
yarn.job.history.status.address=http://node03:19888/ws/v1/history/mapreduce/jobs/%s
# datasource encryption enable
datasource.encryption.enable=false
# datasource encryption salt
datasource.encryption.salt=!@#$%^&*
# Whether hive SQL is executed in the same session
support.hive.oneSession=false
# use sudo or not, if set true, executing user is tenant user and deploy user needs sudo permissions; if set false, executing user is the deploy user and doesn't need sudo permissions
sudo.enable=true
# network interface preferred like eth0, default: empty
#dolphin.scheduler.network.interface.preferred=
# network IP gets priority, default: inner outer
#dolphin.scheduler.network.priority.strategy=default
# system env path
#dolphinscheduler.env.path=env/dolphinscheduler_env.sh
# development state
development.state=false
#datasource.plugin.dir config
datasource.plugin.dir=lib/plugin/datasource
3.6、执行script 目录下的创建表及导入基础数据脚本
#日志最后一行出现 create DolphinScheduler success 表示引入脚本成功 检查创建的元数据库里有没有生成对应的表
sh script/create-dolphinscheduler.sh
3.7、修改bin/dolphinscheduler-daemon.sh
if [ "$command" = "api-server" ]; then
LOG_FILE="-Dlogging.config=classpath:logback-api.xml"
CLASS=org.apache.dolphinscheduler.api.ApiApplicationServer
HEAP_OPTS="-Xms1g -Xmx1g -Xmn512m"
export DOLPHINSCHEDULER_OPTS="$HEAP_OPTS $DOLPHINSCHEDULER_OPTS $API_SERVER_OPTS"
export SPRING_PROFILES_ACTIVE="${SPRING_PROFILES_ACTIVE},api,${DATABASE_TYPE}"
elif [ "$command" = "master-server" ]; then
LOG_FILE="-Dlogging.config=classpath:logback-master.xml"
CLASS=org.apache.dolphinscheduler.server.master.MasterServer
HEAP_OPTS="-Xms1g -Xmx1g -Xmn512m"
export DOLPHINSCHEDULER_OPTS="$HEAP_OPTS $DOLPHINSCHEDULER_OPTS $MASTER_SERVER_OPTS"
export SPRING_PROFILES_ACTIVE="${SPRING_PROFILES_ACTIVE},master,${DATABASE_TYPE}"
elif [ "$command" = "worker-server" ]; then
LOG_FILE="-Dlogging.config=classpath:logback-worker.xml"
CLASS=org.apache.dolphinscheduler.server.worker.WorkerServer
HEAP_OPTS="-Xms1g -Xmx1g -Xmn512m"
export DOLPHINSCHEDULER_OPTS="$HEAP_OPTS $DOLPHINSCHEDULER_OPTS $WORKER_SERVER_OPTS"
export SPRING_PROFILES_ACTIVE="${SPRING_PROFILES_ACTIVE},worker,${DATABASE_TYPE}"
elif [ "$command" = "alert-server" ]; then
LOG_FILE="-Dlogback.configurationFile=conf/logback-alert.xml"
CLASS=org.apache.dolphinscheduler.alert.AlertServer
HEAP_OPTS="-Xms1g -Xmx1g -Xmn512m"
export DOLPHINSCHEDULER_OPTS="$HEAP_OPTS $DOLPHINSCHEDULER_OPTS $ALERT_SERVER_OPTS"
export SPRING_PROFILES_ACTIVE="${SPRING_PROFILES_ACTIVE},alert,${DATABASE_TYPE}"
elif [ "$command" = "logger-server" ]; then
CLASS=org.apache.dolphinscheduler.server.log.LoggerServer
HEAP_OPTS="-Xms1g -Xmx1g -Xmn512m"
export DOLPHINSCHEDULER_OPTS="$HEAP_OPTS $DOLPHINSCHEDULER_OPTS $LOGGER_SERVER_OPTS"
elif [ "$command" = "standalone-server" ]; then
CLASS=org.apache.dolphinscheduler.server.StandaloneServer
export SPRING_PROFILES_ACTIVE="${SPRING_PROFILES_ACTIVE},standalone,${DATABASE_TYPE}"
elif [ "$command" = "python-gateway-server" ]; then
CLASS=org.apache.dolphinscheduler.server.PythonGatewayServer
export SPRING_PROFILES_ACTIVE="${SPRING_PROFILES_ACTIVE},python-gateway,${DATABASE_TYPE}"
else
echo "Error: No command named '$command' was found."
exit 1
fi
3.8、修改script/dolphinscheduler-daemon.sh
if [ "$command" = "api-server" ]; then
LOG_FILE="-Dlogging.config=classpath:logback-api.xml"
CLASS=org.apache.dolphinscheduler.api.ApiApplicationServer
HEAP_OPTS="-Xms1g -Xmx1g -Xmn512m"
export DOLPHINSCHEDULER_OPTS="$HEAP_OPTS $DOLPHINSCHEDULER_OPTS $API_SERVER_OPTS"
export SPRING_PROFILES_ACTIVE="${SPRING_PROFILES_ACTIVE},api,${DATABASE_TYPE}"
elif [ "$command" = "master-server" ]; then
LOG_FILE="-Dlogging.config=classpath:logback-master.xml"
CLASS=org.apache.dolphinscheduler.server.master.MasterServer
HEAP_OPTS="-Xms1g -Xmx1g -Xmn512m"
export DOLPHINSCHEDULER_OPTS="$HEAP_OPTS $DOLPHINSCHEDULER_OPTS $MASTER_SERVER_OPTS"
export SPRING_PROFILES_ACTIVE="${SPRING_PROFILES_ACTIVE},master,${DATABASE_TYPE}"
elif [ "$command" = "worker-server" ]; then
LOG_FILE="-Dlogging.config=classpath:logback-worker.xml"
CLASS=org.apache.dolphinscheduler.server.worker.WorkerServer
HEAP_OPTS="-Xms1g -Xmx1g -Xmn512m"
export DOLPHINSCHEDULER_OPTS="$HEAP_OPTS $DOLPHINSCHEDULER_OPTS $WORKER_SERVER_OPTS"
export SPRING_PROFILES_ACTIVE="${SPRING_PROFILES_ACTIVE},worker,${DATABASE_TYPE}"
elif [ "$command" = "alert-server" ]; then
LOG_FILE="-Dlogback.configurationFile=conf/logback-alert.xml"
CLASS=org.apache.dolphinscheduler.alert.AlertServer
HEAP_OPTS="-Xms1g -Xmx1g -Xmn512m"
export DOLPHINSCHEDULER_OPTS="$HEAP_OPTS $DOLPHINSCHEDULER_OPTS $ALERT_SERVER_OPTS"
export SPRING_PROFILES_ACTIVE="${SPRING_PROFILES_ACTIVE},alert,${DATABASE_TYPE}"
elif [ "$command" = "logger-server" ]; then
CLASS=org.apache.dolphinscheduler.server.log.LoggerServer
HEAP_OPTS="-Xms1g -Xmx1g -Xmn512m"
export DOLPHINSCHEDULER_OPTS="$HEAP_OPTS $DOLPHINSCHEDULER_OPTS $LOGGER_SERVER_OPTS"
elif [ "$command" = "standalone-server" ]; then
CLASS=org.apache.dolphinscheduler.server.StandaloneServer
export SPRING_PROFILES_ACTIVE="${SPRING_PROFILES_ACTIVE},standalone,${DATABASE_TYPE}"
elif [ "$command" = "python-gateway-server" ]; then
CLASS=org.apache.dolphinscheduler.server.PythonGatewayServer
export SPRING_PROFILES_ACTIVE="${SPRING_PROFILES_ACTIVE},python-gateway,${DATABASE_TYPE}"
else
echo "Error: No command named '$command' was found."
exit 1
fi
3.9、切换到部署用户dolphinscheduler,然后执行一键部署脚本
#su - hadoop
sh install.sh
3.10、 启动的服务
ApiApplicationServer
WorkerServer
LoggerServer
AlertServer
MasterServer
3.11、启停命令
# 一键停止集群所有服务
sh ./bin/stop-all.sh
# 一键开启集群所有服务
sh ./bin/start-all.sh
# 启停 Master
sh ./bin/dolphinscheduler-daemon.sh stop master-server
sh ./bin/dolphinscheduler-daemon.sh start master-server
# 启停 Worker
sh ./bin/dolphinscheduler-daemon.sh start worker-server
sh ./bin/dolphinscheduler-daemon.sh stop worker-server
# 启停 Api
sh ./bin/dolphinscheduler-daemon.sh start api-server
sh ./bin/dolphinscheduler-daemon.sh stop api-server
# 启停 Logger
sh ./bin/dolphinscheduler-daemon.sh start logger-server
sh ./bin/dolphinscheduler-daemon.sh stop logger-server
# 启停 Alert
sh ./bin/dolphinscheduler-daemon.sh start alert-server
sh ./bin/dolphinscheduler-daemon.sh stop alert-server
4、测试
4.1、WebUI
访问前端页面地址,出现前端登录页面 主机名是部署了ApiApplicationServer 的机器
http://node03:12345/dolphinscheduler
默认用户名密码:admin/dolphinscheduler123
4.2、数据中心
4.3、资源中心
4.4、函数管理-UDF函数
4.5、Hive-UDF函数测试
4.6、查看运行日志