这一讲相对上一讲会稍微难点
job.properties 文件 和 coordinato.xml 上一讲已经讲过,本次就不再做过多的赘述了
job.properties
# 集群参数
nameNode=hdfs://master:8020
jobTracker=master:8032
queueName=default
# oozie.coord.application.path=${nameNode}/work/coordinator.xml
# start=2016-12-06T16:00Z
# end=2019-07-30T16:00Z
workflowAppUri=${nameNode}/work/workflow1.xml
# workflow文件在hdfs上的目录
oozie.wf.application.path=${workflowAppUri}
# jar包的路径
jarsDir=/home/work/jars_test
hiveSQLDir=/home/work/HiveSql
# jar包所在机器的host
sshHost=master
sshUser=root
# workflow的名称
workflowName=workflow
# subworkflow
workflow2=workflow2.xml
workflow3=workflow3.xml
# impala
impalaIP=192.168.247.3
coordinator.xml
<coordinator-app name="bdmp_parallel_10_coordinator" frequency="${coord:hours(24)}" start="${start}" end="${end}" timezone="UTC" xmlns="uri:oozie:coordinator:0.2">
<action>
<workflow>
<app-path>${workflowAppUri}</app-path>
<configuration>
<property>
<name>jobTracker</name>
<value>${jobTracker}</value>
</property>
<property>
<name>nameNode</name>
<value>${nameNode}</value>
</property>
<property>
<name>queueName</name>
<value>${queueName}</value>
</property>
</configuration>
</workflow>
</action>
</coordinator-app>
workflow1.xml
<workflow-app xmlns="uri:oozie:workflow:0.2" name="${workflowName}">
<start to="work1"/>
<action name="work1">--执行开始的action
<ssh xmlns="uri:oozie:ssh-action:0.1">
<host>${sshUser}@${sshHost}</host>
<command>hadoop jar</command>
<args>${jarsDir}/work1.jar</args>
</ssh>
<ok to="work2"/>--work1执行成功以后执行work2
<error to="fail"/>
</action>
<action name="work2">
<ssh xmlns="uri:oozie:ssh-action:0.1">
<host>${sshUser}@${sshHost}</host>
<command>hadoop jar</command>
<args>${jarsDir}/work2.jar</args>
<args>:oozie:ssh:W=${workflowName}:A=work2:ID=${wf:id()}</args>
</ssh>
<ok to="fork_00"/>work2执行成功以后执行fork_00
<error to="fail"/>
</action>
<fork name="fork_00"> --fork标签的意思是并行的执行里面的任务
<path start="work3"/>
<path start="work4"/>
</fork>
<action name="work3">
<ssh xmlns="uri:oozie:ssh-action:0.1">
<host>${sshUser}@${sshHost}</host>
<command>hadoop jar</command>
<args>${jarsDir}/work3.jar</args>
<args>:oozie:ssh:W=${workflowName}:A=work3:ID=${wf:id()}</args>
</ssh>
<ok to="join_00"/>
<error to="fail"/>
</action>
<action name="work4">
<ssh xmlns="uri:oozie:ssh-action:0.1">
<host>${sshUser}@${sshHost}</host>
<command>hadoop jar</command>
<args>${jarsDir}/work4.jar</args>
<args>:oozie:ssh:W=${workflowName}:A=work4:ID=${wf:id()}</args>
</ssh>
<ok to="join_00"/>
<error to="fail"/>
</action>
<join name="join_00" to="work5"/>--join的意思是 只有在 work3 和work4 也就是fork的程序都执行成功以后再执行以下内容
<action name="work5">
<ssh xmlns="uri:oozie:ssh-action:0.1">
<host>${sshUser}@${sshHost}</host>
<command>hadoop jar</command>
<args>${jarsDir}/work5.jar</args>
<args>:oozie:ssh:W=${workflowName}:A=work5:ID=${wf:id()}</args>
</ssh>
<ok to="subworkflow_part2"/>
<error to="fail"/>
</action>
<action name="workflow2">---可以执行另外一个workflow
<sub-workflow>
<app-path>${nameNode}/user/${workflow2}</app-path>
<configuration>
<property>
<name>nameNode</name>
<value>${nameNode}</value>
</property>
<property>
<name>jobTracker</name>
<value>${jobTracker}</value>
</property>
<property>
<name>queueName</name>
<value>${queueName}</value>
</property>
<property>
<name>jarsDir</name>
<value>${jarsDir}</value>
</property>
<property>
<name>hiveSQLDir</name>
<value>${hiveSQLDir}</value>
</property>
<property>
<name>sshHost</name>
<value>${sshHost}</value>
</property>
<property>
<name>sshUser</name>
<value>${sshUser}</value>
</property>
<property>
<name>workflowName</name>
<value>${workflowName}_subworkflow_part2</value>
</property>
<property>
<name>subworkflow_part2_xml</name>
<value>${subworkflow_part2_xml}</value>
</property>
<property>
<name>subworkflow_part3_xml</name>
<value>${subworkflow_part3_xml}</value>
</property>
<property>
<name>impalaIP</name>
<value>${impalaIP}</value>
</property>
</configuration>
</sub-workflow>
<ok to="end"/>
<error to="fail"/>
</action>
<kill name="fail">
<message>Java failed, error message[${wf:errorMessage(wf:lastErrorNode())}]</message>
</kill>
<end name="end"/>
</workflow-app>