文章目录
0.监控流程
1.Ambari+HDP集群搭建
1.1 大数据平台搭建
https://blog.csdn.net/happy_sunshine_boy/article/details/86595945
1.2 JMX_exporter
2.配置namenode、datanode的配置文件
2.1 namenode.yaml
# 10.180.210.232节点 namenode
startDelaySeconds: 0
hostPort: 127.0.0.1:8004 #本机IP(一般可设置为localhost);50071为想设置的jmx端口(可设置为未被占用的端口)
#jmxUrl: service:jmx:rmi:///jndi/rmi://127.0.0.1:1234/jmxrmi
ssl: false
lowercaseOutputName: false
lowercaseOutputLabelNames: false
2.2 datanode.yaml
# 10.180.210.232节点 datanode
startDelaySeconds: 0
hostPort: 127.0.0.1:8005
#jmxUrl: service:jmx:rmi:///jndi/rmi://127.0.0.1:1234/jmxrmi
ssl: false
lowercaseOutputName: false
lowercaseOutputLabelNames: false
# 10.180.210.235节点 datanode
startDelaySeconds: 0
hostPort: 127.0.0.1:8005
#jmxUrl: service:jmx:rmi:///jndi/rmi://127.0.0.1:1234/jmxrmi
ssl: false
lowercaseOutputName: false
lowercaseOutputLabelNames: false
# 10.180.210.243节点 datanode
startDelaySeconds: 0
hostPort: 127.0.0.1:8005
#jmxUrl: service:jmx:rmi:///jndi/rmi://127.0.0.1:1234/jmxrmi
ssl: false
lowercaseOutputName: false
lowercaseOutputLabelNames: false
2.3 修改所属组
chown -R hdfs:hadoop /usr/local/prom/exporter/jmx
3.修改hadoop相关参数
3.1 10.180.210.232节点 namenode、datanode
cat /usr/hdp/3.1.0.0-78/hadoop-hdfs/bin/hdfs
#!/bin/bash
export HADOOP_HOME=${HADOOP_HOME:-/usr/hdp/3.1.0.0-78/hadoop}
export HADOOP_MAPRED_HOME=${HADOOP_MAPRED_HOME:-/usr/hdp/3.1.0.0-78/hadoop-mapreduce}
export HADOOP_YARN_HOME=${HADOOP_YARN_HOME:-/usr/hdp/3.1.0.0-78/hadoop-yarn}
export HADOOP_LIBEXEC_DIR=${HADOOP_HOME}/libexec
export HDP_VERSION=${HDP_VERSION:-3.1.0.0-78}
export HADOOP_OPTS="${HADOOP_OPTS} -Dhdp.version=${HDP_VERSION}"
#jmx_exporter#
export HDFS_NAMENODE_OPTS="$HDFS_NAMENODE_JMX_OPTS -javaagent:/usr/local/prom/exporter/jmx/jmx_prometheus_javaagent-0.13.0.jar=9200:/usr/local/prom/exporter/jmx/namenode.yaml"
export HDFS_DATANODE_OPTS="$HDFS_DATANODE_JMX_OPTS -javaagent:/usr/local/prom/exporter/jmx/jmx_prometheus_javaagent-0.13.0.jar=9300:/usr/local/prom/exporter/jmx/datanode.yaml"
exec /usr/hdp/3.1.0.0-78//hadoop-hdfs/bin/hdfs.distro "$@"
cat /usr/hdp/3.1.0.0-78/hadoop/bin/hdfs
#!/bin/bash
export HADOOP_HOME=${HADOOP_HOME:-/usr/hdp/3.1.0.0-78/hadoop}
export HADOOP_MAPRED_HOME=${HADOOP_MAPRED_HOME:-/usr/hdp/3.1.0.0-78/hadoop-mapreduce}
export HADOOP_YARN_HOME=${HADOOP_YARN_HOME:-/usr/hdp/3.1.0.0-78/hadoop-yarn}
export HADOOP_LIBEXEC_DIR=${HADOOP_HOME}/libexec
export HDP_VERSION=${HDP_VERSION:-3.1.0.0-78}
export HADOOP_OPTS="${HADOOP_OPTS} -Dhdp.version=${HDP_VERSION}"
#jmx_exporter#
export HDFS_NAMENODE_OPTS="$HDFS_NAMENODE_JMX_OPTS -javaagent:/usr/local/prom/exporter/jmx/jmx_prometheus_javaagent-0.13.0.jar=9200:/usr/local/prom/exporter/jmx/namenode.yaml"
export HDFS_DATANODE_OPTS="$HDFS_DATANODE_JMX_OPTS -javaagent:/usr/local/prom/exporter/jmx/jmx_prometheus_javaagent-0.13.0.jar=9300:/usr/local/prom/exporter/jmx/datanode.yaml"
exec /usr/hdp/3.1.0.0-78//hadoop-hdfs/bin/hdfs.distro "$@"
3.2 10.180.210.235、10.180.210.243节点 datanode
cat /usr/hdp/3.1.0.0-78/hadoop-hdfs/bin/hdfs
#!/bin/bash
export HADOOP_HOME=${HADOOP_HOME:-/usr/hdp/3.1.0.0-78/hadoop}
export HADOOP_MAPRED_HOME=${HADOOP_MAPRED_HOME:-/usr/hdp/3.1.0.0-78/hadoop-mapreduce}
export HADOOP_YARN_HOME=${HADOOP_YARN_HOME:-/usr/hdp/3.1.0.0-78/hadoop-yarn}
export HADOOP_LIBEXEC_DIR=${HADOOP_HOME}/libexec
export HDP_VERSION=${HDP_VERSION:-3.1.0.0-78}
export HADOOP_OPTS="${HADOOP_OPTS} -Dhdp.version=${HDP_VERSION}"
#jmx_exporter#
export HDFS_DATANODE_OPTS="$HDFS_DATANODE_JMX_OPTS -javaagent:/usr/local/prom/exporter/jmx/jmx_prometheus_javaagent-0.13.0.jar=9300:/usr/local/prom/exporter/jmx/datanode.yaml"
exec /usr/hdp/3.1.0.0-78//hadoop-hdfs/bin/hdfs.distro "$@"
cat /usr/hdp/3.1.0.0-78/hadoop/bin/hdfs
#!/bin/bash
export HADOOP_HOME=${HADOOP_HOME:-/usr/hdp/3.1.0.0-78/hadoop}
export HADOOP_MAPRED_HOME=${HADOOP_MAPRED_HOME:-/usr/hdp/3.1.0.0-78/hadoop-mapreduce}
export HADOOP_YARN_HOME=${HADOOP_YARN_HOME:-/usr/hdp/3.1.0.0-78/hadoop-yarn}
export HADOOP_LIBEXEC_DIR=${HADOOP_HOME}/libexec
export HDP_VERSION=${HDP_VERSION:-3.1.0.0-78}
export HADOOP_OPTS="${HADOOP_OPTS} -Dhdp.version=${HDP_VERSION}"
#jmx_exporter#
export HDFS_DATANODE_OPTS="$HDFS_DATANODE_JMX_OPTS -javaagent:/usr/local/prom/exporter/jmx/jmx_prometheus_javaagent-0.13.0.jar=9300:/usr/local/prom/exporter/jmx/datanode.yaml"
exec /usr/hdp/3.1.0.0-78//hadoop-hdfs/bin/hdfs.distro "$@"
3.3 /etc/hadoop/3.1.0.0-78/0/hadoop-env.sh
Ambari - HDFS - CONFIGS - Advanced hadoop-env - hadoop-env template 添加
#jmx_exporter#
export HDFS_NAMENODE_OPTS="-Dcom.sun.management.jmxremote
-Dcom.sun.management.jmxremote.authenticate=false
-Dcom.sun.management.jmxremote.ssl=false
-Dcom.sun.management.jmxremote.local.only=false
-Dcom.sun.management.jmxremote.port=8004
$HDFS_NAMENODE_OPTS"
export HDFS_DATANODE_OPTS="-Dcom.sun.management.jmxremote
-Dcom.sun.management.jmxremote.authenticate=false
-Dcom.sun.management.jmxremote.ssl=false
-Dcom.sun.management.jmxremote.local.only=false
-Dcom.sun.management.jmxremote.port=8005
$HDFS_DATANODE_OPTS"
3.4 重启HDFS
4.访问
4.1 namenode
4.2 datanode
4.2.1 10.180.249.232:9300
4.2.2 10.180.249.235:9300
4.2.3 10.180.249.243:9300
5.prometheus配置
vim /opt/prometheus/cfg/prometheus.yml
- job_name: 'HDP-HDFS-namenode'
static_configs:
- targets: ['10.180.210.232:9200']
- job_name: 'HDP-HDFS-datanode'
static_configs:
- targets: ['10.180.210.232:9300', '10.180.210.235:9300', '10.180.210.243:9300']
重启Prometheus:systemctl restart prometheus.service
http://10.180.249.176:9090/targets
6.Grafana配置
没有找到Grafana现成的模板使用,需要手动创建:
http://10.180.249.176:3000/