<workflow-app name="[WF-DEF-NAME]" xmlns="uri:oozie:workflow:0.1">
...
<decision name="[NODE-NAME]">
<switch>
<case to="[NODE_NAME]">[PREDICATE]</case>
...
<case to="[NODE_NAME]">[PREDICATE]</case>
<default to="[NODE_NAME]"/>
</switch>
</decision>
...
</workflow-app>
demo
<workflow-app name="foo-wf" xmlns="uri:oozie:workflow:0.1">
...
<decision name="mydecision">
<switch>
<case to="reconsolidatejob">
${fs:fileSize(secondjobOutputDir) gt 10 * GB}
</case> <case to="rexpandjob">
${fs:fileSize(secondjobOutputDir) lt 100 * MB}
</case>
<case to="recomputejob">
${ hadoop:counters('secondjob')[RECORDS][REDUCE_OUT] lt 1000000 }
</case>
<default to="end"/>
</switch>
</decision>
...
</workflow-app>
选择节点
<workflow-app name="[WF-DEF-NAME]" xmlns="uri:oozie:workflow:0.1">
...
<fork name="[FORK-NODE-NAME]">
<path start="[NODE-NAME]" />
...
<path start="[NODE-NAME]" />
</fork>
...
<join name="[JOIN-NODE-NAME]" to="[NODE-NAME]" />
...
</workflow-app>
demo
<workflow-app name="sample-wf" xmlns="uri:oozie:workflow:0.1">
...
<fork name="forking">
<path start="firstparalleljob"/>
<path start="secondparalleljob"/>
</fork>
<action name="firstparallejob">
<map-reduce>
<job-tracker>foo:8021</job-tracker>
<name-node>bar:8020</name-node>
<job-xml>job1.xml</job-xml>
</map-reduce>
<ok to="joining"/>
<error to="kill"/>
</action>
<action name="secondparalleljob">
<map-reduce>
<job-tracker>foo:8021</job-tracker>
<name-node>bar:8020</name-node>
<job-xml>job2.xml</job-xml>
</map-reduce>
<ok to="joining"/>
<error to="kill"/>
</action>
<join name="joining" to="nextaction"/>
...
</workflow-app>
任务节点
<workflow-app name="[WF-DEF-NAME]" xmlns="uri:oozie:workflow:0.5">
...
<action name="[NODE-NAME]">
<fs>
<delete path='[PATH]'/>
...
<mkdir path='[PATH]'/>
...
<move source='[SOURCE-PATH]' target='[TARGET-PATH]'/>
...
<chmod path='[PATH]' permissions='[PERMISSIONS]' dir-files='false' />
...
<touchz path='[PATH]' />
...
<chgrp path='[PATH]' group='[GROUP]' dir-files='false' />
</fs>
<ok to="[NODE-NAME]"/>
<error to="[NODE-NAME]"/>
</action>
...
</workflow-app>
demo
<workflow-app name="sample-wf" xmlns="uri:oozie:workflow:0.5">
...
<action name="hdfscommands">
<fs>
<delete path='hdfs://foo:8020/usr/tucu/temp-data'/>
<mkdir path='archives/${wf:id()}'/>
<move source='${jobInput}' target='archives/${wf:id()}/processed-input'/>
<chmod path='${jobOutput}' permissions='-rwxrw-rw-' dir-files='true'><recursive/></chmod>
<chgrp path='${jobOutput}' group='testgroup' dir-files='true'><recursive/></chgrp>
</fs>
<ok to="myotherjob"/>
<error to="errorcleanup"/>
</action>
...
</workflow-app>
Fs (HDFS) action
<workflow-app name="[WF-DEF-NAME]" xmlns="uri:oozie:workflow:0.1">
...
<action name="[NODE-NAME]">
<ssh>
<host>[USER]@[HOST]</host>
<command>[SHELL]</command>
<args>[ARGUMENTS]</args>
...
<capture-output/>
</ssh>
<ok to="[NODE-NAME]"/>
<error to="[NODE-NAME]"/>
</action>
...
</workflow-app>
demo
<workflow-app name="sample-wf" xmlns="uri:oozie:workflow:0.1">
...
<action name="myssjob">
<ssh>
<host>foo@bar.com<host>
<command>uploaddata</command>
<args>jdbc:derby://bar.com:1527/myDB</args>
<args>hdfs://foobar.com:8020/usr/tucu/myData</args>
</ssh>
<ok to="myotherjob"/>
<error to="errorcleanup"/>
</action>
...
</workflow-app>
Ssh Action
<workflow-app name="[WF-DEF-NAME]" xmlns="uri:oozie:workflow:0.1">
...
<action name="[NODE-NAME]">
<sub-workflow>
<app-path>[WF-APPLICATION-PATH]</app-path>
<propagate-configuration/>
<configuration>
<property>
<name>[PROPERTY-NAME]</name>
<value>[PROPERTY-VALUE]</value>
</property>
...
</configuration>
</sub-workflow>
<ok to="[NODE-NAME]"/>
<error to="[NODE-NAME]"/>
</action>
...
</workflow-app>
demo
<workflow-app name="sample-wf" xmlns="uri:oozie:workflow:0.1">
...
<action name="a">
<sub-workflow>
<app-path>child-wf</app-path>
<configuration>
<property>
<name>input.dir</name>
<value>${wf:id()}/second-mr-output</value>
</property>
</configuration>
</sub-workflow>
<ok to="end"/>
<error to="kill"/>
</action>
...
</workflow-app>
Sub-workflow Action
<workflow-app name="[WF-DEF-NAME]" xmlns="uri:oozie:workflow:0.1">
...
<action name="[NODE-NAME]">
<java>
<job-tracker>[JOB-TRACKER]</job-tracker>
<name-node>[NAME-NODE]</name-node>
<prepare>
<delete path="[PATH]"/>
...
<mkdir path="[PATH]"/>
...
</prepare>
<job-xml>[JOB-XML]</job-xml>
<configuration>
<property>
<name>[PROPERTY-NAME]</name>
<value>[PROPERTY-VALUE]</value>
</property>
...
</configuration>
<main-class>[MAIN-CLASS]</main-class>
<java-opts>[JAVA-STARTUP-OPTS]</java-opts>
<arg>ARGUMENT</arg>
...
<file>[FILE-PATH]</file>
...
<archive>[FILE-PATH]</archive>
...
<capture-output />
</java>
<ok to="[NODE-NAME]"/>
<error to="[NODE-NAME]"/>
</action>
...
</workflow-app>
demo
<workflow-app name="sample-wf" xmlns="uri:oozie:workflow:0.1">
...
<action name="myfirstjavajob">
<java>
<job-tracker>foo:8021</job-tracker>
<name-node>bar:8020</name-node>
<prepare>
<delete path="${jobOutput}"/>
</prepare>
<configuration>
<property>
<name>mapred.queue.name</name>
<value>default</value>
</property>
</configuration>
<main-class>org.apache.oozie.MyFirstMainClass</main-class>
<java-opts>-Dblah</java-opts>
<arg>argument1</arg>
<arg>argument2</arg>
</java>
<ok to="myotherjob"/>
<error to="errorcleanup"/>
</action>
...
</workflow-app>
java action
<workflow-app name="[WF-DEF-NAME]" xmlns="uri:oozie:workflow:0.5">
...
<action name="[NODE-NAME]">
<map-reduce>
<job-tracker>[JOB-TRACKER]</job-tracker>
<name-node>[NAME-NODE]</name-node>
<prepare>
<delete path="[PATH]"/>
...
<mkdir path="[PATH]"/>
...
</prepare>
<streaming>
<mapper>[MAPPER-PROCESS]</mapper>
<reducer>[REDUCER-PROCESS]</reducer>
<record-reader>[RECORD-READER-CLASS]</record-reader>
<record-reader-mapping>[NAME=VALUE]</record-reader-mapping>
...
<env>[NAME=VALUE]</env>
...
</streaming>
<!-- Either streaming or pipes can be specified for an action, not both -->
<pipes>
<map>[MAPPER]</map>
<reduce>[REDUCER]</reducer>
<inputformat>[INPUTFORMAT]</inputformat>
<partitioner>[PARTITIONER]</partitioner>
<writer>[OUTPUTFORMAT]</writer>
<program>[EXECUTABLE]</program>
</pipes>
<job-xml>[JOB-XML-FILE]</job-xml>
<configuration>
<property>
<name>[PROPERTY-NAME]</name>
<value>[PROPERTY-VALUE]</value>
</property>
...
</configuration>
<config-class>com.example.MyConfigClass</config-class>
<file>[FILE-PATH]</file>
...
<archive>[FILE-PATH]</archive>
...
</map-reduce> <ok to="[NODE-NAME]"/>
<error to="[NODE-NAME]"/>
</action>
...
</workflow-app>
demo
<workflow-app name="foo-wf" xmlns="uri:oozie:workflow:0.1">
...
<action name="myfirstHadoopJob">
<map-reduce>
<job-tracker>foo:8021</job-tracker>
<name-node>bar:8020</name-node>
<prepare>
<delete path="hdfs://foo:8020/usr/tucu/output-data"/>
</prepare>
<job-xml>/myfirstjob.xml</job-xml>
<configuration>
<property>
<name>mapred.input.dir</name>
<value>/usr/tucu/input-data</value>
</property>
<property>
<name>mapred.output.dir</name>
<value>/usr/tucu/input-data</value>
</property>
<property>
<name>mapred.reduce.tasks</name>
<value>${firstJobReducers}</value>
</property>
<property>
<name>oozie.action.external.stats.write</name>
<value>true</value>
</property>
</configuration>
</map-reduce>
<ok to="myNextAction"/>
<error to="errorCleanup"/>
</action>
...
</workflow-app>
<workflow-app name="sample-wf" xmlns="uri:oozie:workflow:0.1">
...
<action name="firstjob">
<map-reduce>
<job-tracker>foo:8021</job-tracker>
<name-node>bar:8020</name-node>
<prepare>
<delete path="${output}"/>
</prepare>
<streaming>
<mapper>/bin/bash testarchive/bin/mapper.sh testfile</mapper>
<reducer>/bin/bash testarchive/bin/reducer.sh</reducer>
</streaming>
<configuration>
<property>
<name>mapred.input.dir</name>
<value>${input}</value>
</property>
<property>
<name>mapred.output.dir</name>
<value>${output}</value>
</property>
<property>
<name>stream.num.map.output.key.fields</name>
<value>3</value>
</property>
</configuration>
<file>/users/blabla/testfile.sh#testfile</file>
<archive>/users/blabla/testarchive.jar#testarchive</archive>
</map-reduce>
<ok to="end"/>
<error to="kill"/>
</action>
...
</workflow-app>
<workflow-app name="sample-wf" xmlns="uri:oozie:workflow:0.1">
...
<action name="firstjob">
<map-reduce>
<job-tracker>foo:8021</job-tracker>
<name-node>bar:8020</name-node>
<prepare>
<delete path="${output}"/>
</prepare>
<pipes>
<program>bin/wordcount-simple#wordcount-simple</program>
</pipes>
<configuration>
<property>
<name>mapred.input.dir</name>
<value>${input}</value>
</property>
<property>
<name>mapred.output.dir</name>
<value>${output}</value>
</property>
</configuration>
<archive>/users/blabla/testarchive.jar#testarchive</archive>
</map-reduce>
<ok to="end"/>
<error to="kill"/>
</action>
...
</workflow-app>
mr action
1、Oozie中工作流的定义
Oozie中的工作流workflow包含控制流节点和Action节点。通过workflow.xml定义,通过schema进行约束。
Workflow的控制流节点包括:start、decision、fork、join、kill、end节点。
Action是执行或计算的任务,如 MapReduce job、Pig job、a shell command。跑的一个MapReduce任务就是一个MapReduce Action。Action节点有2个转移:ok和error。
Workflow的Action节点包括:MapReduce Action、Pig Action、Fs(HDFS)Action、Ssh Action、Sub-workflow Action、Java Action。Oozie的Workflow里面运行MapReduce、Hive、Sqoop或Shell脚本。
Action Extensions包括:Email Action、Shell Action、Hive Action、Hive 2 Action、Sqoop Action、Ssh Action、DistCp Action、Writing a Custom Action Executor。
Workflow的定义语言是基于XML的,叫做hPDL(Hadoop Process Defination Language)。节点名字范式:[a-zA-Z][\-_a-zA-Z0-9]*=,长度小于20个字符。
job.properties:用于指向workflow.xml文件所在的HDFS位置。
workflow.xml:包含start、action、kill、end。
lib 目录:存放依赖的jar包。
2、MapReduce Action
目的:使用Ooize调度MapReduce程序。
方式:将以前Java MapReduce程序中的Driver部分写到workflow.xml中的configuration里面。
示例:用Oozie安装包中自带的examples例子跑wordcount程序。
(1)在hdfs上创建wordcount输入文件夹,并向文件夹中上传wordcount要统计的文件。
hadoop-2.5.0-cdh5.3.6]$ bin/hdfs dfs -mkdir -p mapreduce/wordcount/input
hadoop-2.5.0-cdh5.3.6]$ bin/hdfs dfs -put /opt/datas/wc.input mapreduce/wordcount/input
(2)执行wordcount的jar包,指定输入输出路径。
hadoop-2.5.0-cdh5.3.6]$ bin/yarn jar share/hadoop/mapreduce/hadoop-mapreduce-examples-2.5.0-cdh5.3.6.jar wordcount mapreduce/wordcount/input mapreduce/wordcount/output
(3)将oozie自带的examples例子拷贝到新建的目录下,作为mapreduce action的示例。
oozie-4.0.0-cdh5.3.6]$ mkdir oozie-apps
oozie-4.0.0-cdh5.3.6]$ cd oozie-apps/
oozie-apps]$ cp -r ../examples/apps/map-reduce/ .
oozie-apps]$ mv map-reduce/ mr-wordcount-wf
(4)编辑配置文件/opt/cdh-5.3.6/oozie-4.0.0-cdh5.3.6/oozie-apps/mr-wordcount-wf/workflow.xml。
<workflow-app xmlns="uri:oozie:workflow:0.5" name="mr-wordcount-wf">
<start to="mr-node-wordcount"/>
<action name="mr-node-wordcount">
<map-reduce>
<job-tracker>${jobTracker}</job-tracker>
<name-node>${nameNode}</name-node>
<prepare>
<delete path="${nameNode}/${oozieDataRoot}/${outputDir}"/>
</prepare>
<configuration>
<property>
<name>mapred.mapper.new-api</name>
<value>true</value>
</property>
<property>
<name>mapred.reducer.new-api</name>
<value>true</value>
</property>
<property>
<name>mapreduce.job.queuename</name>
<value>${queueName}</value>
</property>
<property>
<name>mapreduce.job.map.class</name>
<value>com.ibeifeng.hadoop.senior.mapreduce.WordCount$WordCountMapper</value>
</property>
<property>
<name>mapreduce.job.reduce.class</name>
<value>com.ibeifeng.hadoop.senior.mapreduce.WordCount$WordCountReducer</value>
</property>
<property>
<name>mapreduce.map.output.key.class</name>
<value>org.apache.hadoop.io.Text</value>
</property>
<property>
<name>mapreduce.map.output.value.class</name>
<value>org.apache.hadoop.io.IntWritable</value>
</property>
<property>
<name>mapreduce.job.output.key.class</name>
<value>org.apache.hadoop.io.Text</value>
</property>
<property>
<name>mapreduce.job.output.value.class</name>
<value>org.apache.hadoop.io.IntWritable</value>
</property>
<property>
<name>mapreduce.input.fileinputformat.inputdir</name>
<value>${nameNode}/${oozieDataRoot}/${inputDir}</value>
</property>
<property>
<name>mapreduce.output.fileoutputformat.outputdir</name>
<value>${nameNode}/${oozieDataRoot}/${outputDir}</value>
</property>
</configuration>
</map-reduce>
<ok to="end"/>
<error to="fail"/>
</action>
<kill name="fail">
<message>Map/Reduce failed, error message[${wf:errorMessage(wf:lastErrorNode())}]</message>
</kill>
<end name="end"/>
</workflow-app>
(5)编辑配置文件/opt/cdh-5.3.6/oozie-4.0.0-cdh5.3.6/oozie-apps/mr-wordcount-wf/job.properties。
nameNode=hdfs://hadoop-senior.ibeifeng.com:8020
jobTracker=hadoop-senior.ibeifeng.com:8032
queueName=default
oozieAppsRoot=user/beifeng/oozie-apps
oozieDataRoot=user/beifeng/oozie/datas
oozie.wf.application.path=${nameNode}/${oozieAppsRoot}/mr-wordcount-wf/workflow.xml
inputDir=mr-wordcount-wf/input
outputDir=mr-wordcount-wf/output
(6)将之前编写并打包的mr-wc.jar文件放入mapreduce action例子程序的lib包中。
$ cd /opt/modules/hadoop-2.5.0/jars/
jars]$ cp mr-wc.jar /opt/cdh-5.3.6/oozie-4.0.0-cdh5.3.6/oozie-apps/mr-wordcount-wf/lib
mr-wordcount-wf]$ cd lib
lib]$ ls mr-wc.jar
(7)将mapreduce action文件夹上传到hdfs中。
oozie-4.0.0-cdh5.3.6]$ /opt/cdh-5.3.6/hadoop-2.5.0-cdh5.3.6/bin/hdfs dfs -put oozie-apps/ oozie-apps
(8)在hdfs上创建wordcount程序的输入文件夹。
hadoop-2.5.0-cdh5.3.6]$ bin/hdfs dfs -mkdir -p oozie/datas/mr-wordcount-wf/input
(9)将wordcount程序要统计的文件上传到hdfs的输入文件夹中。
hadoop-2.5.0-cdh5.3.6]$ bin/hdfs dfs -put /opt/datas/wc.input oozie/datas/mr-wordcount-wf/input
(10)如果上传错误,删除hdfs上的文件。
hadoop-2.5.0-cdh5.3.6]$ bin/hdfs dfs -r oozie/datas/input
(11)临时设置环境变量,为浏览器中访问oozie的url地址。
oozie-4.0.0-cdh5.3.6]$ export OOZIE_URL=http://hadoop-senior.ibeifeng.com:11000/oozie/
(12)运行oozie的工作流调度,执行mapreduce action。
oozie-4.0.0-cdh5.3.6]$ bin/oozie job -config oozie-apps/mr-wordcount-wf/job.properties -run
3、Hive Action
官网网址:http://archive.cloudera.com/cdh5/cdh/5/oozie-4.0.0-cdh5.3.6/DG_HiveActionExtension.html
(1)编辑配置文件/opt/cdh-5.3.6/oozie-4.0.0-cdh5.3.6/oozie-apps/hive-select/job.properties。
nameNode=hdfs://hadoop-senior.ibeifeng.com:8020
jobTracker=hadoop-senior.ibeifeng.com:8032
queueName=default
oozieAppsRoot=user/beifeng/oozie-apps
oozieDataRoot=user/beifeng/oozie/datas
oozie.use.system.libpath=true
oozie.wf.application.path=${nameNode}/${oozieAppsRoot}/hive-select
outputDir=hive-select/output
(2)编辑sql脚本/opt/cdh-5.3.6/oozie-4.0.0-cdh5.3.6/oozie-apps/hive-select/select-student.sql。执行时类似于执行shell命令:bin/hive -f select-student.sql。
insert overwrite directory '${OUTPUT}';
select count(1) cnt from default.student;
(3)编辑配置文件/opt/cdh-5.3.6/oozie-4.0.0-cdh5.3.6/oozie-apps/hive-select/workflow.xml。
<workflow-app xmlns="uri:oozie:workflow:0.5" name="wf-hive-select">
<start to="hive-node"/>
<action name="hive-node">
<hive xmlns="uri:oozie:hive-action:0.2">
<job-tracker>${jobTracker}</job-tracker>
<name-node>${nameNode}</name-node>
<prepare>
<delete path="${nameNode}/${oozieDataRoot}/${outputDir}"/>
</prepare>
<job-xml>${nameNode}/${oozieAppsRoot}/hive-select/hive-site.xml</job-xml>
<configuration>
<property>
<name>mapred.job.queue.name</name>
<value>${queueName}</value>
</property>
</configuration>
<script>select-student.sql</script>
<param>OUTPUT=${nameNode}/${oozieDataRoot}/${outputDir}</param>
</hive>
<ok to="end"/>
<error to="fail"/>
</action>
<kill name="fail">
<message>Hive failed, error message[${wf:errorMessage(wf:lastErrorNode())}]</message>
</kill>
<end name="end"/>
</workflow-app>
(4)将hive action例子程序上传到hdfs中。
oozie-apps]$ /opt/cdh-5.3.6/hadoop-2.5.0-cdh5.3.6/bin/hdfs dfs -put hive-select/ /user/beifeng/oozie-apps
(5)配置环境变量。
export OOZIE_URL=http://hadoop-senior.ibeifeng.com:11000/oozie/
(6)运行oozie工作流调度hive action。
oozie-4.0.0-cdh5.3.6]$ bin/oozie job -config oozie-apps/hive-select/job.properties -run
(7)将/opt/cdh-5.3.6/hive-0.13.1-cdh5.3.6/conf/hive-site.xml配置文件放入hive action文件夹中。
conf]$ cp hive-site.xml /opt/cdh-5.3.6/oozie-4.0.0-cdh5.3.6/oozie-apps/hive-select/
(8)上传错误,删除hdfs上的文件。
hive-select]$ /opt/cdh-5.3.6/hadoop-2.5.0-cdh5.3.6/bin/hdfs dfs -rm /user/beifeng/oozie-apps/hive-select/workflow.xml
(9)将hive-site.xml文件上传hdfs上的指定目录中。
hive-select]$ /opt/cdh-5.3.6/hadoop-2.5.0-cdh5.3.6/bin/hdfs dfs -put workflow.xml hive-site.xml /user/beifeng/oozie-apps/hive-select/
(10)将mysql的jar包放入hive action的lib包中。
hive-select]$ mkdir lib
hive-select]$ cp /opt/cdh-5.3.6/hive-0.13.1-cdh5.3.6/lib/mysql-connector-java-5.1.27-bin.jar ./lib/
(11)上传lib包中的mysql jar包到hdfs中。
hive-select]$ /opt/cdh-5.3.6/hadoop-2.5.0-cdh5.3.6/bin/hdfs dfs -put lib/ /user/beifeng/oozie-apps/hive-select/
(12)运行oozie工作流调度hive action程序。
oozie-4.0.0-cdh5.3.6]$ bin/oozie job -config oozie-apps/hive-select/job.properties -run
(13)杀掉oozie正在执行的任务。
oozie-4.0.0-cdh5.3.6]$ bin/oozie job -kill 0000001-180717120019494-oozie-beif-W
4、Sqoop Action
官网网址:http://archive.cloudera.com/cdh5/cdh/5/oozie-4.0.0-cdh5.3.6/DG_SqoopActionExtension.html
(1)进入/opt/cdh-5.3.6/oozie-4.0.0-cdh5.3.6/examples/apps目录,将examples中的sqoop例子程序拷贝到自己的目录中。
apps]$ cp -r sqoop ../../oozie-apps/
(2)查看mysql服务是否已启动。
$ su
Password:
[root@hadoop-senior apps]# service mysql status
MySQL running (1717) [ OK ]
[root@hadoop-senior apps]# exit
$ mysql -uroot -p123456
(3)编写第一个sqoop action。编辑配置文件/opt/cdh-5.3.6/oozie-4.0.0-cdh5.3.6/oozie-apps/sqoop-import-user/workflow.xml。
<workflow-app xmlns="uri:oozie:workflow:0.5" name="sqoop-wf">
<start to="sqoop-node"/>
<action name="sqoop-node">
<sqoop xmlns="uri:oozie:sqoop-action:0.3">
<job-tracker>${jobTracker}</job-tracker>
<name-node>${nameNode}</name-node>
<prepare>
<delete path="${nameNode}/${oozieDataRoot}/${outputDir}"/>
</prepare>
<configuration>
<property>
<name>mapred.job.queue.name</name>
<value>${queueName}</value>
</property>
</configuration>
<command>import --connect jdbc:mysql://hadoop-senior.ibeifeng.com:3306/test --username root --password 123456 --table my_user --target-dir /user/beifeng/oozie/datas/sqoop-import-user/output --fields-terminated-by "$$$" --num-mappers 1</command>
</sqoop>
<ok to="end"/>
<error to="fail"/>
</action>
<kill name="fail">
<message>Sqoop failed, error message[${wf:errorMessage(wf:lastErrorNode())}]</message>
</kill>
<end name="end"/>
</workflow-app>
(4)编写第一个sqoop action。编辑配置文件/opt/cdh-5.3.6/oozie-4.0.0-cdh5.3.6/oozie-apps/sqoop-import-user/job.properties。
nameNode=hdfs://hadoop-senior.ibeifeng.com:8020
jobTracker=hadoop-senior.ibeifeng.com:8032
queueName=default
oozieAppsRoot=user/beifeng/oozie-apps
oozieDataRoot=user/beifeng/oozie/datas
oozie.use.system.libpath=true
oozie.wf.application.path=${nameNode}/${oozieAppsRoot}/sqoop-import-user
outputDir=sqoop-import-user/output
(5)将mysql jar包拷贝到sqoop action的lib目录中。
(6)将sqoop action程序上传hdfs。
oozie-apps]$ /opt/cdh-5.3.6/hadoop-2.5.0-cdh5.3.6/bin/hdfs dfs -put sqoop-import-user/ /user/beifeng/oozie-apps
(7)运行oozie工作流调度sqoop action程序。
oozie-4.0.0-cdh5.3.6]$ bin/oozie job -config oozie-apps/sqoop-import-user/job.properties -run
(8)编写第二个sqoop action程序。编辑配置文件/opt/cdh-5.3.6/oozie-4.0.0-cdh5.3.6/oozie-apps/sqoop-import-user2/workflow.xml。
<workflow-app xmlns="uri:oozie:workflow:0.5" name="sqoop-wf">
<start to="sqoop-node"/>
<action name="sqoop-node">
<sqoop xmlns="uri:oozie:sqoop-action:0.3">
<job-tracker>${jobTracker}</job-tracker>
<name-node>${nameNode}</name-node>
<prepare>
<delete path="${nameNode}/${oozieDataRoot}/${outputDir}"/>
</prepare>
<configuration>
<property>
<name>mapred.job.queue.name</name>
<value>${queueName}</value>
</property>
</configuration>
<command>import --options-file ${imp-user}</command>
</sqoop>
<ok to="end"/>
<error to="fail"/>
</action>
<kill name="fail">
<message>Sqoop failed, error message[${wf:errorMessage(wf:lastErrorNode())}]</message>
</kill>
<end name="end"/>
</workflow-app>
(9)编辑配置文件/opt/cdh-5.3.6/oozie-4.0.0-cdh5.3.6/oozie-apps/sqoop-import-user2/job.properties。
nameNode=hdfs://hadoop-senior.ibeifeng.com:8020
jobTracker=hadoop-senior.ibeifeng.com:8032
queueName=default
oozieAppsRoot=user/beifeng/oozie-apps
oozieDataRoot=user/beifeng/oozie/datas
oozie.use.system.libpath=true
oozie.wf.application.path=${nameNode}/${oozieAppsRoot}/sqoop-import-user
outputDir=sqoop-import-user/output
imp-user=${nameNode}/${oozieAppsRoot}/sqoop-import-user/imp-user.sql
(10)编辑sql脚本/opt/cdh-5.3.6/oozie-4.0.0-cdh5.3.6/oozie-apps/sqoop-import-user2/imp-user.sql。执行时类似于执行shell命令:bin/sqoop --option-file imp-user.sql
--connect
jdbc:mysql://hadoop-senior.ibeifeng.com:3306/test
--username
root
--password
123456
--table
my_user
--target-dir
/user/beifeng/oozie/datas/sqoop-import-user/output
--fields-terminated-by
"\t"
--num-mappers
(11)将mysql jar包拷贝到第二个sqoop action的lib目录中。
(12)上传sqoop action的第二个示例程序。
oozie-apps]$ /opt/cdh-5.3.6/hadoop-2.5.0-cdh5.3.6/bin/hdfs dfs -put sqoop-import-user2 oozie-apps/
(13)运行oozie工作流调度第二个sqoop action程序。
oozie-4.0.0-cdh5.3.6]$ bin/oozie job -config oozie-apps/sqoop-import-user2/job.properties -run
5、Shell Action
官网网址:http://archive.cloudera.com/cdh5/cdh/5/oozie-4.0.0-cdh5.3.6/DG_ShellActionExtension.html
(1)进入/opt/cdh-5.3.6/oozie-4.0.0-cdh5.3.6/examples/apps目录中,将examples目录下的shell示例程序拷贝到自己的目录中,并按照需求改名。
apps]$ cp -r shell/ ../../oozie-apps/
oozie-apps]$ mv shell shell-hive-select/
(2)编辑配置文件/opt/cdh-5.3.6/oozie-4.0.0-cdh5.3.6/oozie-apps/shell-hive-select/job.properties。
nameNode=hdfs://hadoop-senior.ibeifeng.com:8020
jobTracker=hadoop-senior.ibeifeng.com:8032
queueName=default
oozieAppsRoot=user/beifeng/oozie-apps
oozieDataRoot=user/beifeng/oozie/datas
oozie.wf.application.path=${nameNode}/${oozieAppsRoot}/shell-hive-select
exec=student-select.sh
script=student-select.sql
(3)编辑配置文件/opt/cdh-5.3.6/oozie-4.0.0-cdh5.3.6/oozie-apps/shell-hive-select/workflow.xml。
<workflow-app xmlns="uri:oozie:workflow:0.5" name="shell-wf">
<start to="shell-node"/>
<action name="shell-node">
<shell xmlns="uri:oozie:shell-action:0.2">
<job-tracker>${jobTracker}</job-tracker>
<name-node>${nameNode}</name-node>
<configuration>
<property>
<name>mapred.job.queue.name</name>
<value>${queueName}</value>
</property>
</configuration>
<exec>${exec}</exec>
<file>${nameNode}/${oozieAppsRoot}/shell-hive-select/${exec}#${exec}</file>
<file>${nameNode}/${oozieAppsRoot}/shell-hive-select/${script}#${script}</file>
<capture-output/>
</shell>
<ok to="end"/>
<error to="fail"/>
</action>
<kill name="fail">
<message>Shell action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]</message>
</kill>
<end name="end"/>
</workflow-app>
(4)编辑shell脚本/opt/cdh-5.3.6/oozie-4.0.0-cdh5.3.6/oozie-apps/shell-hive-select/student-select.sh。
#!/usr/bin/env bash
## student select
/opt/cdh-5.3.6/hive-0.13.1-cdh5.3.6/bin/hive -f student-select.sql
(5)编辑sql脚本/opt/cdh-5.3.6/oozie-4.0.0-cdh5.3.6/oozie-apps/shell-hive-select/student-select.sql。
insert overwrite directory '/user/beifeng/oozie/datas/shell-hive-select/output'
select id, name from default.student;
(6)将shell action程序上传到hdfs中。
oozie-apps]$ /opt/cdh-5.3.6/hadoop-2.5.0-cdh5.3.6/bin/hdfs dfs -put shell-hive-select/ oozie-apps/
(7)运行Oozie工作流调度shell action程序。
oozie-4.0.0-cdh5.3.6]$ bin/oozie job -config oozie-apps/shell-hive-select/job.properties -run
6、多个Action的协作
配置文件workflow.xml中如下配置:
start node
hive action对输入数据进行分析,将结果存储在hdfs中
sqoop action将hdfs上的数据导入mysql中,mysql进行数据的前端展示
kill node
end node
------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Oozie是有web界面提供的,可以直接进行web操作,后期方便又实用,但是这里有个坑,通过添加服务打开的web界面上面显示如下图:
具体的解决办法见下图圈住的。必须下载Ext-2.2.zip文件,但是这个文件在官网上是需要付费下载的,而且国内的CSDN都需要付费下载,找了好久才找到。
下载好的文件安装包之后通过 unzip命令将zip解压到“/var/lib/oozie/目录下(unzip ext-2.2.zip -d /var/lib/oozie/),然后在CM的网页界面配置中启用web控制台,见如下图:
重启完成之后控制台界面如下图:
地址为:OozieserverIp:11000/oozie/ (OozieserverIp为oozie所在的服务器的ip地址)
注:在配置任务的时候需要注意时区问题,这里的时区要与自己程序执行的主体所在平台的时区相一致。(Setting—>Timezone)
Oozie的作业有三部分组成,分别是job.properties,workflow.xml,lib文件夹。下面分别介绍
Job.properties
从名称也能看出来,这个文件是用来配置作业(job)中用到的各种参数的,总结如下
nameNode hdfs地址
jobTracker jobTracker(ResourceManager)地址
queueName Oozie队列(默认填写default)
examplesRoot 全局目录(默认填写examples)
oozie.usr.system.libpath 是否加载用户lib目录(true/false)
oozie.libpath 用户lib库所在的位置
oozie.wf.application.path Oozie流程所在hdfs地址(workflow.xml所在的地址)
user.name 当前用户
Coordinator:oozie.coord.application.path Coordinator.xml地址(没有可以不写)
Bundle:oozie.bundle.application.path Bundle.xml地址(没有可以不写)
注:1、这个文件如果是在本地通过命令行进行任务提交的话,这个文件在本地就可以了,当然也可以放在hdfs上,与workflow.xml和lib处于同一层级。
2、nameNode,jobTracker和workflow.xml在hdfs中的位置必须设置。
eg:启动任务
ooziejob -oozie http://oozieServerIp:11000/oozie/-config job.properties-run
一个简单的job.properties文件如下:
Shell节点的job.properties文件:
nameNode=hdfs://cm1:8020
jobTracker=cm1:8032
queueName=default
examplesRoot=examples
oozie.wf.application.path=${nameNode}/user/workflow/oozie/shell
workflow.xml:
这个文件是定义任务的整体流程的文件,需要注意的有三点:版本信息,EL函数,节点信息。
先上一个例子:
1. <workflow-app xmlns="uri:oozie:workflow:0.4" name="${sys_name}-MonitorPlus-Task-${task_id}">
2. <start to=“check-xxx-succ-flag"/>
3.
4. <decision name=“check-xxx-succ-flag">
5. <switch>
6. <case to="check-mr-succ-flag">${fs:exists(concat(concat("/xxx/output/xxxList/",
7. task_id),"/_SUCCESS"))}</case>
8. <default to=“do-xxx"/>
9. </switch>
10. </decision>
11.
12. <decision name="check-mr-succ-flag">
13. <switch>
14. <case to="end">${fs:exists(concat(concat(“/xxx/output/", task_id),"/_SUCCESS"))}</case>
15. <default to="do-mr"/>
16. </switch>
17. </decision>
18.
19. <action name=“do-xxx" retry-max="3" retry-interval="10">
20. <java>
21. <job-tracker>${jobTracker}</job-tracker>
22. <name-node>${namenode}</name-node>
23. <configuration>
24. <property>
25. <name>mapreduce.job.queuename</name>
26. <value>${queueName}</value>
27. </property>
28. </configuration>
29. <main-class>com.xxx.Main</main-class>
30. </java>
31. <ok to=”end”/>
32. <error to=”fail”/>
33. </action>
34. <kill name=”fail”>
35. <message>Map/Reduce failed.error message[${wf:errorMessage(wf:lastErrorNode())}]</message>
36. </kill>
37. <end name=”end”/>
38. </workflow-app>
版本信息:
这个是写在第一行的,如下:
<workflow-app xmlns="uri:oozie:workflow:0.4" name="${sys_name}-MonitorPlus-Task-${task_id}">
在xmls属性中定义了workflow的版本为0.4,workflow的版本不能高于oozie兼容的最高版本,可以降低。
EL函数
但凡涉及到“${ }”这样的符号基本都是EL函数,花括号中写的是需要的函数信息。当然,如果是在job.properties中设置的常量也可以通过EL函数取出来,例如“${nameNode}”。常用的EL函数有基本的EL函数,workFlow EL函数和HDFSEL函数。分别如下:
节点:
Oozie的节点分成两种,流程控制节点和动作节点。所谓的节点实际就是一组标签。两种节点分别如下:
n 流程控制节点
u <start />——定义workflow的开始
u <end />——定义workflow的结束
u <decision />——实现switch功能
<switch><case /><default /></switch>标签连用
u <sub-workflow>——调用子workflow
u <kill />——程序出错后跳转到这个节点执行相关操作
u <fork />——并发执行workflow
u <join />——并发执行结束(与fork一起使用)
案例;
n 动作节点
u <shell />——表示运行的是shell操作
u <java />——表示运行的java程序
u <fs />——表示是对hdfs进行操作
u <MR />——表示进行的是MR操作
u <hive />——表示进程的是hive操作
u <sqoop />——表示进行的是sqoop的相关操作
Workflow.xml综述
文件需要被放在HDFS上才能被oozie调度,如果在启动需要调动MR任务,jar包同样需要在hdfs上。最终的目录结构如下:
l /xxx/oozie/workflow.xml
l /xxx/oozie/lib
l /xxx/oozie/lib/mr-1.7-SNAPSHOT-package.jar
l /xxx/oozie/lib/xxx.mr-1.7-SNAPSHOT-package.jar
Lib文件夹:
在workflow工作流定义的同级目录下,需要有一个lib目录,在lib目录中存在java节点MapReduce使用的jar包。需要注意的是,oozie并不是使用指定jar包的名称来启动任务的,而是通过制定主类来启动任务的。在lib包中绝对不能存在某个jar包的不同版本,不能够出现多个相同主类。
Oozie Cli命令:
这个是oozie的客户端使用的关于oozie job的命令,总结如下
l 启动任务
oozie job -oozie oozie_url -config job.properties_address-run
l 停止任务
oozie job -oozie oozie_url -kill jobId -oozie-oozi-W
l 提交任务
oozie job -oozie oozie_url -config job.properties_address -submit
l 开始任务
oozie job -oozie oozie_url -config job.properties_address -startJobId -oozie-oozi-W
l 查看任务执行情况
oozie job -oozieoozie_url -config job.properties_address -info jobId -oozie-oozi-W
说明: 所有的命令都是以oozie job -oozie oozie_url 开头的-config 制定job.properties文件夹的位置,-run 文件启动后会返回一个唯一的jobId,供之后使用。
启动任务,提交任务,开始任务的区别:
提交任务是指将任务提交给oozie但是并不运行,也会返回一个唯一的jobId。 -submit
开始任务是将提交的任务执行。-start
启动任务=提交任务+开始任务
自定义实现wordcount的workflow
- 在yarn上测试wordcount程序
$ bin/yarn jar /opt/datas/wd.jar mapreduce.WordCount /input /output
- 生产一个应用目录,并把wordcount的jar包放到应用目录中的lib目录下
$ cd /opt/modules/cdh/oozie-4.0.0-cdh5.3.6/
$ mkdir my-apps
$ cp -r examples/apps/map-reduce/ my-apps/
$ cp /opt/datas/wd.jar my-apps/map-reduce/lib/
- 修改job.properties文件
# 定义变量
nameNode=hdfs://bigdata-00:8020
jobTracker=bigdata-00:8032
queueName=default
oozieAppsRoot=user/wulei/my-apps
oozieDataRoot=user/wulei/oozie/datas
inputDir=map-reduce/input
outputDir=map-reduce/output
# 定义workflow.xml的路径
oozie.wf.application.path=${nameNode}/${oozieAppsRoot}/map-reduce/workflow.xml
- 修改workflow.xml文件
<workflow-app xmlns="uri:oozie:workflow:0.5" name="map-reduce-wf">
<start to="mr-node"/>
<action name="mr-node">
<map-reduce>
<job-tracker>${jobTracker}</job-tracker>
<name-node>${nameNode}</name-node>
<!-- 准备动作,如果输出路径存在,先进行删除-->
<prepare>
<delete path="${nameNode}/${oozieDataRoot}/${outputDir}"/>
</prepare>
<configuration>
<property>
<name>mapred.job.queue.name</name>
<value>${queueName}</value>
</property>
<!-- set map class and reduce class-->
<property>
<name>mapreduce.job.map.class</name>
<value>mapreduce.WordCount$WordCountMapper</value>
</property>
<property>
<name>mapreduce.job.reduce.class</name>
<value>mapreduce.WordCount$WordCountReducer</value>
</property>
<!-- set inputdir and outputdir-->
<property>
<name>mapreduce.input.fileinputformat.inputdir</name>
<value>${nameNode}/${oozieDataRoot}/${inputDir}</value>
</property>
<property>
<name>mapreduce.output.fileoutputformat.outputdir</name>
<value>${nameNode}/${oozieDataRoot}/${outputDir}</value>
</property>
<!-- 因为oozie会默认使用mapreduce1的api,这里需要设置为mapreduce新的api-->
<property>
<name>mapred.mapper.new-api</name>
<value>true</value>
</property>
<property>
<name>mapred.reducer.new-api</name>
<value>true</value>
</property>
<!-- set map output key and value class-->
<property>
<name>mapreduce.map.output.key.class</name>
<value>org.apache.hadoop.io.Text</value>
</property>
<property>
<name>mapreduce.map.output.value.class</name>
<value>org.apache.hadoop.io.LongWritable</value>
</property>
<!-- set reduceoutput key and value class-->
<property>
<name>mapreduce.job.output.key.class</name>
<value>org.apache.hadoop.io.Text</value>
</property>
<property>
<name>mapreduce.job.output.value.class</name>
<value>org.apache.hadoop.io.IntWritable</value>
</property>
</configuration>
</map-reduce>
<ok to="end"/>
<error to="fail"/>
</action>
<kill name="fail">
<message>Map/Reduce failed, error message[${wf:errorMessage(wf:lastErrorNode())}]</message>
</kill>
<end name="end"/>
</workflow-app>
- 在hdfs上创建目录,并上传input数据
$ bin/hdfs dfs -mkdir -p /user/wulei/oozie/datas/map-reduce/input
$ bin/hdfs dfs -put -put /opt/datas/a.txt oozie/datas/map-reduce/input
- 上传oozie-apps目录
$ bin/hdfs dfs -put /opt/modules/cdh/oozie-4.0.0-cdh5.3.6/my-apps/ my-apps
- 运行测试
$ bin/oozie job -oozie http://bigdata-00:11000/oozie -config my-apps/map-reduce/job.properties -run
-
查看测试结果
yarn任务执行界面oozie的web界面
自定义实现WorkFlow中shell action
- 拷贝默认的shell目录来进行修改
$ cp -r ./examples/apps/shell/ my-apps/
- 定义job.properties
nameNode=hdfs://bigdata-00:8020
jobTracker=bigdata-00:8032
queueName=default
examplesRoot=user/wulei/my-apps/shell
#定义workflow.xml路劲
oozie.wf.application.path=${nameNode}/${examplesRoot}/workflow.xml
#定义一个变量名存储shell文件名
EXEC=oozie.sh
- 定义workflow.properties
<workflow-app xmlns="uri:oozie:workflow:0.4" name="shell-wf">
<start to="shell-node"/>
<action name="shell-node">
<shell xmlns="uri:oozie:shell-action:0.2">
<job-tracker>${jobTracker}</job-tracker>
<name-node>${nameNode}</name-node>
<configuration>
<property>
<name>mapred.job.queue.name</name>
<value>${queueName}</value>
</property>
</configuration>
<exec>${EXEC}</exec>
<!-- 定义shell脚本路劲 -->
<file>${nameNode}/${examplesRoot}/${EXEC}#${EXEC}</file> <!--Copy the executable to compute node's current working directory -->
</shell>
<ok to="end"/>
<error to="fail"/>
</action>
<kill name="fail">
<message>Shell action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]</message>
</kill>
<end name="end"/>
</workflow-app>
-
定义要实现的脚本
-
上传到hdfs后运行
-
查看运行结果
Oozie coordinator调度
当有一个复杂的工作流job,希望每天定时执行,使用crontab方式调用需要编写大量的脚本,还要通过大量的判断来控制每个工作流job的执行顺序问题。Oozie中的Coordinator可以让每个工作流workflow当做一个动作来运行,这样就可以将多个job组织起来,并制定触发时间和频率。它的调度方式有两种:一种是基于时间调度;还有一种是基于数据可用性调度(判断数据是否存在和是否符合标准)。
-
如果要设置基于时间的调度,需要先配置时区,因为oozie默认使用的是UTC时区,服务器默认使用的是CST,我们把他们修改为GMT +0800(当然也可以是其他时区)
查看系统当前时区:如果输出的时区不是+0800,需要进行修改
$ rm -rf /etc/localtime
$ ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
修改oozie时区,在oozie-sitexml添加参数
<property>
<name>oozie.processing.timezone</name>
<value>GMT+0800</value>
</property>
修改oozie/-server/webapps/oozie/oozie-console.js中的时区设置,不然会影响web界面的时间,修改完成后重启生效
function getTimeZone() {
Ext.state.Manager.setProvider(new Ext.state.CookieProvider());
return Ext.state.Manager.get("TimezoneId","GMT+0800");
}
- 拷贝模板中的coordinator案例
$ cp -r examples/apps/cron-schedule/ my-apps/
- 修改job.properties
nameNode=hdfs://bigdata-00:8020
jobTracker=bigdata-00:8032
queueName=default
examplesRoot=user/wulei/my-apps/cron-schedule
# 定义coordinate路径
oozie.coord.application.path=${nameNode}/${examplesRoot}
start=2016-11-13T12:30+0800
end=2016-11-13T13:30+0800
# 定义workflow路劲
workflowAppUri=${nameNode}/${examplesRoot}
- 修改coorndinate.xml,让这个任务从开始时间到结束时间内每分钟执行一次
<coordinator-app name="cron-coord" frequency="${coord:minutes(1)}" start="${start}" end="${end}" timezone="GMT+0800"
xmlns="uri:oozie:coordinator:0.2">
<action>
<workflow>
<app-path>${workflowAppUri}</app-path>
<configuration>
<property>
<name>jobTracker</name>
<value>${jobTracker}</value>
</property>
<property>
<name>nameNode</name>
<value>${nameNode}</value>
</property>
<property>
<name>queueName</name>
<value>${queueName}</value>
</property>
</configuration>
</workflow>
</action>
</coordinator-app>
-
oozie的默认配置中,至少为每5分钟执行一次,所以需要修改默认配置
-
上传至hdfs后执行
-
查看执行结果
-------------------------------------------------------------------------------------------
在集群中选择一台服务器作为OozieServer高可用节点
1.安装Mysql驱动
[root@ip-172-31-30-69 ~]# mkdir /usr/share/java
[root@ip-172-31-30-69 ~]# scp mysql-connector-java-5.1.34.jar /usr/share/java
(可左右滑动)
2.创建MySQL驱动的软链
[root@ip-172-31-30-69 ~]# cd /usr/share/java/
[root@ip-172-31-30-69 java]# ln -s mysql-connector-java-5.1.34.jar mysql-connector-java.jar
(可左右滑动)
3.安装Haproxy并配置oozie server
Haproxy的配置如下:
#---------------------------------------------------------------------
# Example configuration for a possible web application. See the
# full configuration options online.
#
# http://haproxy.1wt.eu/download/1.4/doc/configuration.txt
#
#---------------------------------------------------------------------
#---------------------------------------------------------------------
# Global settings
#---------------------------------------------------------------------
global
log 127.0.0.1 local2
chroot /var/lib/haproxy
pidfile /var/run/haproxy.pid
maxconn 4000
user haproxy
group haproxy
daemon
# turn on stats unix socket
stats socket /var/lib/haproxy/stats
#---------------------------------------------------------------------
# common defaults that all the 'listen' and 'backend' sections will
# use if not designated in their block
#---------------------------------------------------------------------
defaults
mode http
log global
option httplog
option dontlognull
#option http-server-close
#option forwardfor except 127.0.0.0/8
option redispatch
retries 3
timeout http-request 10s
timeout queue 1m
timeout connect 10s
timeout client 1m
timeout server 1m
timeout http-keep-alive 10s
timeout check 10s
maxconn 3000
listen stats
bind 0.0.0.0:1080
mode http
option httplog
maxconn 5000
stats refresh 30s
stats uri /stats
listen ooziebalancer
bind 0.0.0.0:11001
mode tcp
option tcplog
balance leastconn
server ip-172-31-30-69.ap-southeast-1.compute.internal ip-172-31-30-69.ap-southeast-1.compute.internal:11000 check
server ip-172-31-16-68.ap-southeast-1.compute.internal ip-172-31-16-68.ap-southeast-1.compute.internal:11000 check
(可左右滑动)
具体Haproxy的安装及配置这里就不再详细描述,可以参考Fayson前面的文章
《如何使用HAProxy实现Impala的负载均衡》
《如何使用HAProxy实现HiveServer2负载均衡》
《如何使用HAProxy实现Kerberos环境下的Impala负载均衡》
《如何在Kerberos环境下使用Haproxy实现HiveServer2负载均衡》
3.启用Oozie服务HA
1.使用管理员登录Cloudera Manager的WEB界面,进入oozie服务
2.点击“操作”->”启用High Availability”
3.进入启用Oozie的HA引导界面
4.选择已安装MySQL驱动的节点作为Oozie Server
5.点击“继续”,填写Oozie HA时使用的Load Balancer地址(这里就是Haproxy服务所在地址)
ip-172-31-16-68.ap-southeast-1.compute.internal:11002
(可左右滑动)
6.点击“继续”,启用Oozie的High Availability
7.启用成功
点击“完成”,至此已完成Oozie Server的HA配置。
4.Oozie服务HA测试
1.在Hue中创建一个SparkWorkflow测试工作流
2.提交作业
3.在Workflow运行的过程中停掉其中一个OozieServer服务
4.作业运行成功
Yarn应用程序显示
5.总结
- 集群的元数据存在在外部数据库MySQL中,在部署Oozie服务的节点需要安装MySQL的JDBC驱动
- Oozie启用高可用时需要使用Haproxy或Keepalived等方式实现Oozie服务的高可用,这里使用Haproxy方式实现。
- Load Balancer配置为Haproxy服务所在host及监听的11002端口