本文主要讲解ambari是如何安装一个服务的,这会涉及到ambari的资源请求,
请参看《ambari-server- 资源provider》,《ambari-server-资源请求分析》。
测试安装oozie&falcon,ambari会发送如下请求。
1.新建服务
Request URL:http://172.16.31.181:8080/api/v1/clusters/pezy_cluster01/services
Request Method:POST
data: [{"ServiceInfo":{"service_name":"OOZIE"}},{"ServiceInfo":{"service_name":"FALCON"}}]:
2.因为新组建安装导致旧组件配置变更。
Request URL:http://172.16.31.181:8080/api/v1/clusters/pezy_cluster01
Request Method:PUT
data:[{"Clusters":{"desired_config":[{"type":"core-site","tag":"version1502156872303","properties":{"hadoop.proxyuser.falcon.groups":"*","hadoop.proxyuser.falcon.hosts":"*","hadoop.proxyuser.oozie.hosts":"hadoop182","hadoop.proxyuser.oozie.groups":"*","fs.defaultFS":"hdfs://hadoop181:8020","fs.trash.interval":"360","ha.failover-controller.active-standby-elector.zk.op.retries":"120","hadoop.http.authentication.simple.anonymous.allowed":"true","hadoop.security.auth_to_local":"DEFAULT","hadoop.security.authentication":"simple","hadoop.security.authorization":"false","io.compression.codecs":"org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec","io.file.buffer.size":"131072","io.serializations":"org.apache.hadoop.io.serializer.WritableSerialization","ipc.client.connect.max.retries":"50","ipc.client.connection.maxidletime":"30000","ipc.client.idlethreshold":"8000","ipc.server.tcpnodelay":"true","mapreduce.jobtracker.webinterface.trusted":"false","net.topology.script.file.name":"/etc/hadoop/conf/topology_script.py","hadoop.proxyuser.ambari.groups":"*","hadoop.proxyuser.ambari.hosts":"hadoop181","hadoop.proxyuser.hcat.groups":"*","hadoop.proxyuser.hcat.hosts":"hadoop182","hadoop.proxyuser.hdfs.groups":"*","hadoop.proxyuser.hdfs.hosts":"*","hadoop.proxyuser.hive.groups":"*","hadoop.proxyuser.hive.hosts":"hadoop182"},"service_config_version_note":"","properties_attributes":{"final":{"fs.defaultFS":"true"}}}]}},{"Clusters":{"desired_config":[{"type":"storm-site","tag":"version1502156872303","properties":{"_storm.min.ruid":"null","_storm.thrift.nonsecure.transport":"org.apache.storm.security.auth.SimpleTransportPlugin","_storm.thrift.secure.transport":"org.apache.storm.security.auth.kerberos.KerberosSaslTransportPlugin","client.jartransformer.class":"org.apache.storm.hack.StormShadeTransformer","dev.zookeeper.path":"/tmp/dev-storm-zookeeper","drpc.childopts":"-Xmx768m _JAAS_PLACEHOLDER","drpc.invocations.port":"3773","drpc.port":"3772","drpc.queue.size":"128","drpc.request.timeout.secs":"600","drpc.worker.threads":"64","java.library.path":"/usr/local/lib:/opt/local/lib:/usr/lib:/usr/hdp/current/storm-client/lib","logviewer.appender.name":"A1","logviewer.childopts":"-Xmx128m _JAAS_PLACEHOLDER","logviewer.port":"8000","nimbus.childopts":"-Xmx1024m _JAAS_PLACEHOLDER -javaagent:/usr/hdp/current/storm-nimbus/contrib/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=localhost,port=8649,wireformat31x=true,mode=multicast,config=/usr/hdp/current/storm-nimbus/contrib/storm-jmxetric/conf/jmxetric-conf.xml,process=Nimbus_JVM","nimbus.cleanup.inbox.freq.secs":"600","nimbus.file.copy.expiration.secs":"600","nimbus.inbox.jar.expiration.secs":"3600","nimbus.monitor.freq.secs":"10","nimbus.reassign":"true","nimbus.seeds":"['hadoop182']","nimbus.supervisor.timeout.secs":"60","nimbus.task.launch.secs":"120","nimbus.task.timeout.secs":"30","nimbus.thrift.max_buffer_size":"1048576","nimbus.thrift.port":"6627","nimbus.thrift.threads":"196","nimbus.topology.validator":"org.apache.storm.nimbus.DefaultTopologyValidator","storm.cluster.mode":"distributed","storm.local.dir":"/hadoop/storm","storm.local.mode.zmq":"false","storm.log.dir":"{{log_dir}}","storm.messaging.netty.buffer_size":"5242880","storm.messaging.netty.client_worker_threads":"1","storm.messaging.netty.max_retries":"30","storm.messaging.netty.max_wait_ms":"1000","storm.messaging.netty.min_wait_ms":"100","storm.messaging.netty.server_worker_threads":"1","storm.messaging.transport":"org.apache.storm.messaging.netty.Context","storm.thrift.transport":"{{storm_thrift_transport}}","storm.zookeeper.connection.timeout":"30000","storm.zookeeper.port":"2181","storm.zookeeper.retry.interval":"1000","storm.zookeeper.retry.intervalceiling.millis":"30000","storm.zookeeper.retry.times":"5","storm.zookeeper.root":"/storm","storm.zookeeper.servers":"['hadoop182','hadoop183']","storm.zookeeper.session.timeout":"30000","supervisor.childopts":"-Xmx256m _JAAS_PLACEHOLDER -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.port={{jmxremote_port}} -javaagent:/usr/hdp/current/storm-supervisor/contrib/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=localhost,port=8650,wireformat31x=true,mode=multicast,config=/usr/hdp/current/storm-supervisor/contrib/storm-jmxetric/conf/jmxetric-conf.xml,process=Supervisor_JVM","supervisor.heartbeat.frequency.secs":"5","supervisor.monitor.frequency.secs":"3","supervisor.slots.ports":"[6700, 6701]","supervisor.worker.start.timeout.secs":"120","supervisor.worker.timeout.secs":"30","task.heartbeat.frequency.secs":"3","task.refresh.poll.secs":"10","topology.acker.executors":"null","topology.builtin.metrics.bucket.size.secs":"60","topology.debug":"false","topology.disruptor.wait.strategy":"com.lmax.disruptor.BlockingWaitStrategy","topology.enable.message.timeouts":"true","topology.error.throttle.interval.secs":"10","topology.executor.receive.buffer.size":"1024","topology.executor.send.buffer.size":"1024","topology.fall.back.on.java.serialization":"true","topology.kryo.factory":"org.apache.storm.serialization.DefaultKryoFactory","topology.max.error.report.per.interval":"5","topology.max.replication.wait.time.sec":"{{actual_topology_max_replication_wait_time_sec}}","topology.max.replication.wait.time.sec.default":"60","topology.max.spout.pending":"1000","topology.max.task.parallelism":"null","topology.message.timeout.secs":"30","topology.min.replication.count":"{{actual_topology_min_replication_count}}","topology.min.replication.count.default":"1","topology.optimize":"true","topology.receiver.buffer.size":"8","topology.skip.missing.kryo.registrations":"false","topology.sleep.spout.wait.strategy.time.ms":"1","topology.spout.wait.strategy":"org.apache.storm.spout.SleepSpoutWaitStrategy","topology.state.synchronization.timeout.secs":"60","topology.stats.sample.rate":"0.05","topology.tick.tuple.freq.secs":"null","topology.transfer.buffer.size":"1024","topology.trident.batch.emit.interval.millis":"500","topology.tuple.serializer":"org.apache.storm.serialization.types.ListDelegateSerializer","topology.worker.childopts":"null","topology.worker.shared.thread.pool.size":"4","topology.workers":"1","transactional.zookeeper.port":"null","transactional.zookeeper.root":"/transactional","transactional.zookeeper.servers":"null","ui.childopts":"-Xmx768m _JAAS_PLACEHOLDER","ui.filter":"null","ui.port":"8744","worker.childopts":"-Xmx768m _JAAS_PLACEHOLDER -javaagent:/usr/hdp/current/storm-client/contrib/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=localhost,port=8650,wireformat31x=true,mode=multicast,config=/usr/hdp/current/storm-client/contrib/storm-jmxetric/conf/jmxetric-conf.xml,process=Worker_%ID%_JVM","worker.heartbeat.frequency.secs":"1","zmq.hwm":"0","zmq.linger.millis":"5000","zmq.threads":"1","metrics.reporter.register":"org.apache.hadoop.metrics2.sink.storm.StormTimelineMetricsReporter","storm.cluster.metrics.consumer.register":"[{\"class\": \"org.apache.hadoop.metrics2.sink.storm.StormTimelineMetricsReporter\"}]","topology.metrics.aggregate.metric.evict.secs":"5","topology.metrics.aggregate.per.worker":"true","topology.metrics.consumer.register":"[{\"class\": \"org.apache.hadoop.metrics2.sink.storm.StormTimelineMetricsSink\", \"parallelism.hint\": 1, \"whitelist\": [\"kafkaOffset\\\\..+/\", \"__complete-latency\", \"__process-latency\", \"__receive\\\\.population$\", \"__sendqueue\\\\.population$\", \"__execute-count\", \"__emit-count\", \"__ack-count\", \"__fail-count\", \"memory/heap\\\\.usedBytes$\", \"memory/nonHeap\\\\.usedBytes$\", \"GC/.+\\\\.count$\", \"GC/.+\\\\.timeMs$\"]}]","topology.metrics.expand.map.type":"true","topology.metrics.metric.name.separator":"."},"service_config_version_note":""}]}}]
3.创建OOZIE服务对应组件(此部的代码推到出现问题)
Request URL:http://172.16.31.181:8080/api/v1/clusters/pezy_cluster01/services?ServiceInfo/service_name=OOZIE
Request Method:POST
data:{"components":[{"ServiceComponentInfo":{"component_name":"OOZIE_CLIENT"}},{"ServiceComponentInfo":{"component_name":"OOZIE_SERVER"}}]}:
创建FALCON服务对应组件
Request URL:http://172.16.31.181:8080/api/v1/clusters/pezy_cluster01/services?ServiceInfo/service_name=FALCON
Request Method:POST
data:{"components":[{"ServiceComponentInfo":{"component_name":"FALCON_CLIENT"}},{"ServiceComponentInfo":{"component_name":"FALCON_SERVER"}}]}:
4.创建主机对应组件
Request URL:http://172.16.31.181:8080/api/v1/clusters/pezy_cluster01/hosts
Request Method:POST
data: {"RequestInfo":{"query":"Hosts/host_name:hadoop182"},"Body":{"host_components":[{"HostRoles":{"component_name":"OOZIE_SERVER"}}]}}
Request URL:http://172.16.31.181:8080/api/v1/clusters/pezy_cluster01/hosts
Request Method:POST
Data:{"RequestInfo":{"query":"Hosts/host_name:hadoop182"},"Body":{"host_components":[{"HostRoles":{"component_name":"FALCON_SERVER"}}]}}
Request URL:http://172.16.31.181:8080/api/v1/clusters/pezy_cluster01/hosts
Request Method:POST
Data:"RequestInfo":{"query":"Hosts/host_name:hadoop181|Hosts/host_name=hadoop182|Hosts/host_name=hadoop183"},"Body":{"host_components":[{"HostRoles":{"component_name":"OOZIE_CLIENT"}}]}}
Request URL:http://172.16.31.181:8080/api/v1/clusters/pezy_cluster01/hosts
Request Method:POST
Data: {"RequestInfo":{"query":"Hosts/host_name:hadoop181|Hosts/host_name=hadoop182|Hosts/host_name=hadoop183"},"Body":{"host_components":[{"HostRoles":{"component_name":"FALCON_CLIENT"}}]}}
- 任务执行进度
Request URL:http://172.16.31.181:8080/api/v1/clusters/pezy_cluster01/requests/61?fields=tasks/Tasks/command,tasks/Tasks/command_detail,tasks/Tasks/exit_code,tasks/Tasks/start_time,tasks/Tasks/end_time,tasks/Tasks/host_name,tasks/Tasks/id,tasks/Tasks/role,tasks/Tasks/status&minimal_response=true&_=1502156877213
Request Method:GET
response:{
"Requests" : {
"cluster_name" : "pezy_cluster01",
"id" : 61
},
"tasks" : [
{
"Tasks" : {
"command" : "INSTALL",
"command_detail" : "FALCON_CLIENT INSTALL",
"end_time" : -1,
"exit_code" : 777,
"host_name" : "hadoop181",
"id" : 470,
"role" : "FALCON_CLIENT",
"start_time" : 1502156845173,
"status" : "IN_PROGRESS"
}
},
{
"Tasks" : {
"command" : "INSTALL",
"command_detail" : "OOZIE_CLIENT INSTALL",
"end_time" : -1,
"exit_code" : 999,
"host_name" : "hadoop181",
"id" : 471,
"role" : "OOZIE_CLIENT",
"start_time" : 1502156845174,
"status" : "QUEUED"
}
},
{
"Tasks" : {
"command" : "INSTALL",
"command_detail" : "FALCON_CLIENT INSTALL",
"end_time" : -1,
"exit_code" : 777,
"host_name" : "hadoop182",
"id" : 472,
"role" : "FALCON_CLIENT",
"start_time" : 1502156845174,
"status" : "IN_PROGRESS"
}
},
{
"Tasks" : {
"command" : "INSTALL",
"command_detail" : "FALCON_SERVER INSTALL",
"end_time" : -1,
"exit_code" : 999,
"host_name" : "hadoop182",
"id" : 473,
"role" : "FALCON_SERVER",
"start_time" : 1502156845174,
"status" : "QUEUED"
}
},
{
"Tasks" : {
"command" : "INSTALL",
"command_detail" : "OOZIE_CLIENT INSTALL",
"end_time" : -1,
"exit_code" : 999,
"host_name" : "hadoop182",
"id" : 474,
"role" : "OOZIE_CLIENT",
"start_time" : 1502156845174,
"status" : "QUEUED"
}
},
{
"Tasks" : {
"command" : "INSTALL",
"command_detail" : "OOZIE_SERVER INSTALL",
"end_time" : -1,
"exit_code" : 999,
"host_name" : "hadoop182",
"id" : 475,
"role" : "OOZIE_SERVER",
"start_time" : 1502156845174,
"status" : "QUEUED"
}
},
{
"Tasks" : {
"command" : "INSTALL",
"command_detail" : "FALCON_CLIENT INSTALL",
"end_time" : -1,
"exit_code" : 777,
"host_name" : "hadoop183",
"id" : 476,
"role" : "FALCON_CLIENT",
"start_time" : 1502156845174,
"status" : "IN_PROGRESS"
}
},
{
"Tasks" : {
"command" : "INSTALL",
"command_detail" : "OOZIE_CLIENT INSTALL",
"end_time" : -1,
"exit_code" : 999,
"host_name" : "hadoop183",
"id" : 477,
"role" : "OOZIE_CLIENT",
"start_time" : 1502156845174,
"status" : "QUEUED"
}
},
{
"Tasks" : {
"command" : "STOP",
"command_detail" : "NODEMANAGER STOP",
"end_time" : -1,
"exit_code" : 999,
"host_name" : "hadoop181",
"id" : 478,
"role" : "NODEMANAGER",
"start_time" : -1,
"status" : "PENDING"
}
},
{
"Tasks" : {
"command" : "STOP",
"command_detail" : "APP_TIMELINE_SERVER STOP",
"end_time" : -1,
"exit_code" : 999,
"host_name" : "hadoop182",
"id" : 479,
"role" : "APP_TIMELINE_SERVER",
"start_time" : -1,
"status" : "PENDING"
}
},
{
"Tasks" : {
"command" : "STOP",
"command_detail" : "HISTORYSERVER STOP",
"end_time" : -1,
"exit_code" : 999,
"host_name" : "hadoop182",
"id" : 480,
"role" : "HISTORYSERVER",
"start_time" : -1,
"status" : "PENDING"
}
},
{
"Tasks" : {
"command" : "STOP",
"command_detail" : "NODEMANAGER STOP",
"end_time" : -1,
"exit_code" : 999,
"host_name" : "hadoop182",
"id" : 481,
"role" : "NODEMANAGER",
"start_time" : -1,
"status" : "PENDING"
}
},
{
"Tasks" : {
"command" : "STOP",
"command_detail" : "RESOURCEMANAGER STOP",
"end_time" : -1,
"exit_code" : 999,
"host_name" : "hadoop182",
"id" : 482,
"role" : "RESOURCEMANAGER",
"start_time" : -1,
"status" : "PENDING"
}
},
{
"Tasks" : {
"command" : "STOP",
"command_detail" : "SECONDARY_NAMENODE STOP",
"end_time" : -1,
"exit_code" : 999,
"host_name" : "hadoop182",
"id" : 483,
"role" : "SECONDARY_NAMENODE",
"start_time" : -1,
"status" : "PENDING"
}
},
{
"Tasks" : {
"command" : "STOP",
"command_detail" : "NODEMANAGER STOP",
"end_time" : -1,
"exit_code" : 999,
"host_name" : "hadoop183",
"id" : 484,
"role" : "NODEMANAGER",
"start_time" : -1,
"status" : "PENDING"
}
},
{
"Tasks" : {
"command" : "STOP",
"command_detail" : "DATANODE STOP",
"end_time" : -1,
"exit_code" : 999,
"host_name" : "hadoop181",
"id" : 485,
"role" : "DATANODE",
"start_time" : -1,
"status" : "PENDING"
}
},
{
"Tasks" : {
"command" : "STOP",
"command_detail" : "NAMENODE STOP",
"end_time" : -1,
"exit_code" : 999,
"host_name" : "hadoop181",
"id" : 486,
"role" : "NAMENODE",
"start_time" : -1,
"status" : "PENDING"
}
},
{
"Tasks" : {
"command" : "STOP",
"command_detail" : "DATANODE STOP",
"end_time" : -1,
"exit_code" : 999,
"host_name" : "hadoop182",
"id" : 487,
"role" : "DATANODE",
"start_time" : -1,
"status" : "PENDING"
}
},
{
"Tasks" : {
"command" : "STOP",
"command_detail" : "DATANODE STOP",
"end_time" : -1,
"exit_code" : 999,
"host_name" : "hadoop183",
"id" : 488,
"role" : "DATANODE",
"start_time" : -1,
"status" : "PENDING"
}
}
]
}
6.更新服务状态
Request URL:http://172.16.31.181:8080/api/v1/clusters/pezy_cluster01/services?ServiceInfo/service_name.in(ACCUMULO)
Request Method:PUT
{"RequestInfo":{"context":"Install Services","operation_level":{"level":"CLUSTER","cluster_name":"pezy_cluster01"}},"Body":{"ServiceInfo":{"state":"INSTALLED"}}}: