DBus集群部署

部署InfluxDB

[root@hadoop003 software]# wget  https://dl.influxdata.com/influxdb/releases/influxdb-1.1.0.x86_64.rpm

[root@hadoop003 software]# yum -y localinstall influxdb-1.1.0.x86_64.rpm

[root@hadoop003 software]# systemctl start influxdb
[root@hadoop003 software]# systemctl status influxdb
● influxdb.service - InfluxDB is an open-source, distributed, time series database
   Loaded: loaded (/usr/lib/systemd/system/influxdb.service; enabled; vendor preset: disabled)
   Active: active (running) since 二 2020-01-21 22:20:12 CST; 4s ago
     Docs: https://docs.influxdata.com/influxdb/
 Main PID: 9510 (influxd)
   CGroup: /system.slice/influxdb.service
           └─9510 /usr/bin/influxd -config /etc/influxdb/influxdb.conf

1月 21 22:20:12 hadoop003 influxd[9510]: [shard-precreation] 2020/01/21 2...s
1月 21 22:20:12 hadoop003 influxd[9510]: [snapshot] 2020/01/21 22:20:12 S...e
1月 21 22:20:12 hadoop003 influxd[9510]: [continuous_querier] 2020/01/21 ...e
1月 21 22:20:12 hadoop003 influxd[9510]: [httpd] 2020/01/21 22:20:12 Star...e
1月 21 22:20:12 hadoop003 influxd[9510]: [httpd] 2020/01/21 22:20:12 Auth...e
1月 21 22:20:12 hadoop003 influxd[9510]: [httpd] 2020/01/21 22:20:12 List...6
1月 21 22:20:12 hadoop003 influxd[9510]: [retention] 2020/01/21 22:20:12 ...s
1月 21 22:20:12 hadoop003 influxd[9510]: [monitor] 2020/01/21 22:20:12 St...s
1月 21 22:20:12 hadoop003 influxd[9510]: 2020/01/21 22:20:12 Sending usag...m
1月 21 22:20:12 hadoop003 influxd[9510]: [run] 2020/01/21 22:20:12 Listen...s
Hint: Some lines were ellipsized, use -l to show in full.
[root@hadoop003 software]# systemctl enable influxdb

[root@hadoop003 software]# influx
Visit https://enterprise.influxdata.com to register for updates, InfluxDB server management, and monitoring.
Connected to http://localhost:8086 version 1.1.0
InfluxDB shell version: 1.1.0
> create database dbus_stat_db
> use dbus_stat_db
Using database dbus_stat_db
> CREATE USER "dbus" WITH PASSWORD '123456'
> ALTER RETENTION POLICY autogen ON dbus_stat_db DURATION 15d
> exit
[root@hadoop003 software]#

部署Grafana

[root@hadoop002 software]# wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.2.0-1.x86_64.rpm

[root@hadoop002 software]# yum -y localinstall grafana-4.2.0-1.x86_64.rpm

[root@hadoop002 software]# systemctl start grafana-server
[root@hadoop002 software]# systemctl status grafana-server
● grafana-server.service - Grafana instance
   Loaded: loaded (/usr/lib/systemd/system/grafana-server.service; disabled; vendor preset: disabled)
   Active: active (running) since 二 2020-01-21 22:27:47 CST; 6s ago
     Docs: http://docs.grafana.org
 Main PID: 10182 (grafana-server)
   CGroup: /system.slice/grafana-server.service
           └─10182 /usr/sbin/grafana-server --config=/etc/grafana/grafana.ini --pidfile= cfg:default.path...

1月 21 22:27:48 hadoop002 grafana-server[10182]: t=2020-01-21T22:27:48+0800 lvl=info msg="Executing m...v3"
1月 21 22:27:48 hadoop002 grafana-server[10182]: t=2020-01-21T22:27:48+0800 lvl=info msg="Executing m...v3"
1月 21 22:27:48 hadoop002 grafana-server[10182]: t=2020-01-21T22:27:48+0800 lvl=info msg="Created def...n]"
1月 21 22:27:48 hadoop002 grafana-server[10182]: t=2020-01-21T22:27:48+0800 lvl=info msg="Starting pl...ins
1月 21 22:27:48 hadoop002 grafana-server[10182]: t=2020-01-21T22:27:48+0800 lvl=warn msg="Plugin dir ...ins
1月 21 22:27:48 hadoop002 grafana-server[10182]: t=2020-01-21T22:27:48+0800 lvl=info msg="Plugin dir ...ins
1月 21 22:27:48 hadoop002 grafana-server[10182]: t=2020-01-21T22:27:48+0800 lvl=info msg="Initializin...er"
1月 21 22:27:48 hadoop002 grafana-server[10182]: t=2020-01-21T22:27:48+0800 lvl=info msg="Initializin...rl=
1月 21 22:27:48 hadoop002 grafana-server[10182]: t=2020-01-21T22:27:48+0800 lvl=info msg="Initializin...ine
1月 21 22:27:48 hadoop002 grafana-server[10182]: t=2020-01-21T22:27:48+0800 lvl=info msg="Initializin...nup
Hint: Some lines were ellipsized, use -l to show in full.
[root@hadoop002 software]# systemctl enable grafana-server.service
Created symlink from /etc/systemd/system/multi-user.target.wants/grafana-server.service to /usr/lib/systemd/system/grafana-server.service.

Grafana默认的用户密码是admin/admin,登录即可。

生成的key

eyJrIjoiRHRuQkwzaVJlNTlEd1NHN3FtVjJvV0dDN1NzUlE1UWIiLCJuIjoiZ3JhZm5hX2FkbWluIiwiaWQiOjF9

[root@hadoop001 ~]# mysql -uroot -p

mysql> set global validate_password_policy=0;
Query OK, 0 rows affected (0.00 sec)

mysql> set global validate_password_mixed_case_count=0;
Query OK, 0 rows affected (0.00 sec)

mysql> set global validate_password_number_count=3;
Query OK, 0 rows affected (0.00 sec)

mysql> set global validate_password_special_char_count=0;
Query OK, 0 rows affected (0.00 sec)

mysql> set global validate_password_length=3;
Query OK, 0 rows affected (0.00 sec)

mysql> create database dbusmgr DEFAULT CHARSET utf8 COLLATE utf8_general_ci;
Query OK, 1 row affected (0.00 sec)

mysql> create user 'dbusmgr'@'%' identified by '123456';
Query OK, 0 rows affected (0.01 sec)

mysql> flush privileges;
Query OK, 0 rows affected (0.00 sec)

mysql> grant all privileges on dbusmgr.* to 'dbusmgr'@'%' identified by '123456';
Query OK, 0 rows affected, 1 warning (0.00 sec)

mysql> flush privileges;
Query OK, 0 rows affected (0.00 sec)

部署Nginx

[hadoop@hadoop003 ~]$ sudo yum -y localinstall http://nginx.org/packages/centos/7/noarch/RPMS/nginx-release-centos-7-0.el7.ngx.noarch.rpm

[hadoop@hadoop003 ~]$ sudo yum install -y nginx

[hadoop@hadoop003 ~]$ sudo systemctl start nginx
[hadoop@hadoop003 ~]$ sudo systemctl status nginx
● nginx.service - nginx - high performance web server
   Loaded: loaded (/usr/lib/systemd/system/nginx.service; disabled; vendor preset: disabled)
   Active: active (running) since Tue 2020-01-21 22:42:36 CST; 31s ago
     Docs: http://nginx.org/en/docs/
  Process: 13756 ExecStart=/usr/sbin/nginx -c /etc/nginx/nginx.conf (code=exited, status=0/SUCCESS)
 Main PID: 13757 (nginx)
   CGroup: /system.slice/nginx.service
           ├─13757 nginx: master process /usr/sbin/nginx -c /etc/nginx/ngin...
           └─13758 nginx: worker process

Jan 21 22:42:36 hadoop003 systemd[1]: Starting nginx - high performance we....
Jan 21 22:42:36 hadoop003 systemd[1]: Started nginx - high performance web....
Hint: Some lines were ellipsized, use -l to show in full.
[hadoop@hadoop003 ~]$ sudo systemctl enable nginx
Created symlink from /etc/systemd/system/multi-user.target.wants/nginx.service to /usr/lib/systemd/system/nginx.service.

[hadoop@hadoop003 software]$ unzip dbuskeeper_web.zip

[hadoop@hadoop003 app]$ sudo mv /home/hadoop/app/dbus/dbuskeeper_web/nginx.conf /etc/nginx/

[hadoop@hadoop003 app]$ sudo mv /home/hadoop/app/dbus/dbuskeeper_web/build.zip /usr/share/nginx/html/

[hadoop@hadoop003 app]$ cd /usr/share/nginx/html/
[hadoop@hadoop003 html]$ sudo unzip build.zip

[hadoop@hadoop003 html]$ sudo systemctl restart nginx

[hadoop@hadoop003 html]$ vim /home/hadoop/app/dbus/dbuskeeper_web/config.properties

[hadoop@hadoop003 html]$ cd /home/hadoop/app/dbus/dbuskeeper_web

[hadoop@hadoop003 dbuskeeper_web]$ chmod u+x init.sh start.sh stop.sh

报错

没有免密 

[hadoop@hadoop003 dbuskeeper_web]$ ./init.sh
加载config文件...
检查配置文件中...
nginx地址检测通过
测试数据库连通性...
数据库连通性测试完毕
验证密钥是否存在...
密钥文件存在
kafka检测通过
influxdb地址检测通过
测试ZK连通性,请等待5秒
[main] INFO org.apache.curator.framework.imps.CuratorFrameworkImpl - Starting
[main] INFO org.apache.zookeeper.ZooKeeper - Client environment:zookeeper.version=3.4.8--1, built on 02/06/2016 03:18 GMT
[main] INFO org.apache.zookeeper.ZooKeeper - Client environment:host.name=hadoop003
[main] INFO org.apache.zookeeper.ZooKeeper - Client environment:java.version=1.8.0_231
[main] INFO org.apache.zookeeper.ZooKeeper - Client environment:java.vendor=Oracle Corporation
[main] INFO org.apache.zookeeper.ZooKeeper - Client environment:java.home=/usr/local/jdk1.8.0_231/jre
[main] INFO org.apache.zookeeper.ZooKeeper - Client environment:java.class.path=lib/dbus-keeper-auto-deploy-0.5.0-jar-with-dependencies.jar
[main] INFO org.apache.zookeeper.ZooKeeper - Client environment:java.library.path=/usr/java/packages/lib/amd64:/usr/lib64:/lib64:/lib:/usr/lib
[main] INFO org.apache.zookeeper.ZooKeeper - Client environment:java.io.tmpdir=/tmp
[main] INFO org.apache.zookeeper.ZooKeeper - Client environment:java.compiler=<NA>
[main] INFO org.apache.zookeeper.ZooKeeper - Client environment:os.name=Linux
[main] INFO org.apache.zookeeper.ZooKeeper - Client environment:os.arch=amd64
[main] INFO org.apache.zookeeper.ZooKeeper - Client environment:os.version=3.10.0-957.21.3.el7.x86_64
[main] INFO org.apache.zookeeper.ZooKeeper - Client environment:user.name=hadoop
[main] INFO org.apache.zookeeper.ZooKeeper - Client environment:user.home=/home/hadoop
[main] INFO org.apache.zookeeper.ZooKeeper - Client environment:user.dir=/home/hadoop/app/dbus/dbuskeeper_web
[main] INFO org.apache.zookeeper.ZooKeeper - Initiating client connection, connectString=hadoop001:2181 sessionTimeout=60000 watcher=org.apache.curator.ConnectionState@204f30ec
[main-SendThread(hadoop001:2181)] INFO org.apache.zookeeper.ClientCnxn - Opening socket connection to server hadoop001/172.31.36.137:2181. Will not attempt to authenticate using SASL (unknown error)
[main-SendThread(hadoop001:2181)] INFO org.apache.zookeeper.ClientCnxn - Socket connection established to hadoop001/172.31.36.137:2181, initiating session
[main-SendThread(hadoop001:2181)] INFO org.apache.zookeeper.ClientCnxn - Session establishment complete on server hadoop001/172.31.36.137:2181, sessionid = 0x16fc861d20f0029, negotiated timeout = 60000
[main-EventThread] INFO org.apache.curator.framework.state.ConnectionStateManager - State change: CONNECTED
[main] INFO org.apache.zookeeper.ZooKeeper - Session: 0x16fc861d20f0029 closed
ZK连通性测试完毕
新建logs目录...
[main-EventThread] INFO org.apache.zookeeper.ClientCnxn - EventThread shut down for session: 0x16fc861d20f0029
解压gateway...
配置gateway...
压缩gateway...
解压keeper-mgr...
配置keeper-mgr...
压缩keeper-mgr...
解压keeper-service...
配置keeper-service...
压缩keeper-service...
解压dbus-heartbeat-0.5.0...
配置dbus-heartbeat-0.5.0...
压缩dbus-heartbeat-0.5.0...
初始化完成

启动之后等待n分钟,等系统完全启动后登录web: http://hadoop003:8080 ,首次登陆会自动跳转到初始 化页面,如果没有自动进入请地址栏手动输入 http://hadoop003:8080/login /init地址进入,根据页面提示 填写相关信息

 

点击确定,一直没有跳转,

一直提示上面有误

查看日志 

[hadoop@hadoop003 logs]$ tailf service.log

2020-01-22 00:53:54.743  INFO 22061 --- [nio-8901-exec-6] com.creditease.dbus.utils.SSHUtils       : user:hadoop,host:hadoop003,port:22,keyPath:~/.ssh/id_rsa,command:cd /home/hadoop/app/dbus/heartbeat/dbus-heartbeat-0.5.0; nohup ./heartbeat.sh >/dev/null 2>&1 &
2020-01-22 00:53:55.864  INFO 22061 --- [nio-8901-exec-6] com.creditease.dbus.utils.SSHUtils       : inputMsg:
2020-01-22 00:53:55.864  INFO 22061 --- [nio-8901-exec-6] com.creditease.dbus.utils.SSHUtils       : errorMsg:
2020-01-22 00:53:55.864  INFO 22061 --- [nio-8901-exec-6] c.c.dbus.service.ConfigCenterService     : 2.heartbeat初始化完成。
2020-01-22 00:53:56.603  INFO 22061 --- [nio-8901-exec-6] c.c.dbus.service.ConfigCenterService     : 3.mgr数据库初始化完成。
2020-01-22 00:53:56.615  INFO 22061 --- [nio-8901-exec-6] c.c.dbus.service.ConfigCenterService     : 4.添加模板sink初始化完成。
2020-01-22 00:53:56.626  INFO 22061 --- [nio-8901-exec-6] c.c.dbus.service.ConfigCenterService     : 5.添加超级管理员初始化完成。
2020-01-22 00:53:56.626  INFO 22061 --- [nio-8901-exec-6] c.c.dbus.service.ConfigCenterService     : cmd: mkdir -pv /usr/hdp/current/storm-client/
2020-01-22 00:53:56.626  INFO 22061 --- [nio-8901-exec-6] com.creditease.dbus.utils.SSHUtils       : user:hadoop,host:hadoop002,port:22,keyPath:~/.ssh/id_rsa,command: mkdir -pv /usr/hdp/current/storm-client/
2020-01-22 00:53:57.725  INFO 22061 --- [nio-8901-exec-6] com.creditease.dbus.utils.SSHUtils       : inputMsg:
2020-01-22 00:53:57.725  INFO 22061 --- [nio-8901-exec-6] com.creditease.dbus.utils.SSHUtils       : errorMsg:
2020-01-22 00:53:57.860 ERROR 22061 --- [nio-8901-exec-6] com.creditease.dbus.utils.SSHUtils       : Permission denied

Wed Jan 22 00:02:48 CST 2020 WARN: Establishing SSL connection without server's identity verification is not recommended. According to MySQL 5.5.45+, 5.6.26+ and 5.7.6+ requirements SSL connection must be established by default if explicit option isn't set. For compliance with existing applications not using SSL the verifyServerCertificate property is set to 'false'. You need either to explicitly disable SSL by setting useSSL=false, or set useSSL=true and provide truststore for server certificate verification.
Error executing: CREATE TABLE `t_data_tables` (
  `id` int(11) unsigned NOT NULL AUTO_INCREMENT,
  `ds_id` int(11) unsigned NOT NULL COMMENT 't_dbus_datasource 表ID',
  `schema_id` int(11) unsigned NOT NULL COMMENT 't_tab_schema 表ID',
  `schema_name` varchar(64) DEFAULT NULL,
  `table_name` varchar(64) NOT NULL DEFAULT '' COMMENT '表名',
  `table_name_alias` varchar(64) NOT NULL DEFAULT '' COMMENT '别名',
  `physical_table_regex` varchar(96) DEFAULT NULL,
  `output_topic` varchar(96) DEFAULT '' COMMENT 'kafka_topic',
  `ver_id` int(11) unsigned DEFAULT NULL COMMENT '当前使用的meta版本ID',
  `status` varchar(32) NOT NULL DEFAULT 'abort' COMMENT 'ok,abort,inactive,waiting\r\nok:正常使用;abort:需要抛弃该表的数据;waiting:等待拉全量
.  Cause: com.mysql.jdbc.exceptions.jdbc4.MySQLSyntaxErrorException: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near ''ok,abort,inactive,waiting\r\nok:正常使用;abort:需要抛弃该表的数据;' at line 11
Error executing:   `create_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '创建时间',
  `meta_change_flg` int(1) DEFAULT '0' COMMENT 'meta变更标识,初始值为:0,表示代表没有发生变更,1:代表meta发生变更。该字段目前mysql appender模块使用。',
  `batch_id` int(11) DEFAULT '0' COMMENT '批次ID,用来标记拉全量的批次,每次拉全量会++,增量只使用该字段并不修改',
  `ver_change_history` varchar(128) DEFAULT NULL,
  `ver_change_notice_flg` int(1) NOT NULL DEFAULT '0',
  `output_before_update_flg` int(1) NOT NULL DEFAULT '0',
  `description` varchar(128) DEFAULT NULL,
  `fullpull_col` varchar(255) DEFAULT '' COMMENT '全量分片列:配置column名称',
  `fullpull_split_shard_size` varchar(255) DEFAULT '' COMMENT '全量分片大小配置:配置-1代表不分片',
  `fullpull_split_style` varchar(255) DEFAULT '' COMMENT '全量分片类型:MD5',
  `is_open` int(1) DEFAULT '0' COMMENT 'mongo是否展开节点,0不展开,1一级展开',
  `is_auto_complete` tinyint(4) DEFAULT '0' COMMENT 'mongoDB的表是否补全数据;如果开启,增量中更新操作会回查并补全数据',
  PRIMARY KEY (`id`),
  UNIQUE KEY `idx_sid_tabname` (`schema_id`,`table_name`) USING BTREE
) ENGINE=InnoDB  DEFAULT CHARSET=utf8
.  Cause: com.mysql.jdbc.exceptions.jdbc4.MySQLSyntaxErrorException: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '`create_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTA' at line 1
Error executing: CREATE TABLE `t_name_alias_mapping` (
  `id` int(11) unsigned NOT NULL AUTO_INCREMENT COMMENT '主键ID',
  `type` int(2) NOT NULL COMMENT '别名类型1,router拓扑别名
.  Cause: com.mysql.jdbc.exceptions.jdbc4.MySQLSyntaxErrorException: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near ''别名类型1,router拓扑别名' at line 3
Error executing:   `name` varchar(64) NOT NULL COMMENT '名称',
  `name_id` int(11) NOT NULL COMMENT '名称对应ID',
  `alias` varchar(64) NOT NULL COMMENT '别名',
  `update_time` timestamp NULL DEFAULT NULL ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间',
  PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='别名表'
.  Cause: com.mysql.jdbc.exceptions.jdbc4.MySQLSyntaxErrorException: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '`name` varchar(64) NOT NULL COMMENT '名称',
  `name_id` int(11) NOT NULL COMME' at line 1
2020-01-22 00:06:44.712  INFO 22062 --- [trap-executor-0] c.n.d.s.r.aws.ConfigClusterResolver      : Resolving eureka endpoints via configuration
2020-01-22 00:11:44.714  INFO 22062 --- [trap-executor-0] c.n.d.s.r.aws.ConfigClusterResolver      : Resolving eureka endpoints via configuration

就这个问题困扰了两个多小时

[hadoop@hadoop002 3.1.4.0-315]$ sudo chown -R hadoop:hadoop storm

管理员初始账号/密码: admin/12345678

额外创建表

CREATE TABLE `t_data_tables` (
`id` int(11) unsigned NOT NULL AUTO_INCREMENT,
`ds_id` int(11) unsigned NOT NULL COMMENT 't_dbus_datasource 表ID',
`schema_id` int(11) unsigned NOT NULL COMMENT 't_tab_schema 表ID', `schema_name` varchar(64) DEFAULT NULL,
`table_name` varchar(64) NOT NULL DEFAULT '' COMMENT '表名',
`table_name_alias` varchar(64) NOT NULL DEFAULT '' COMMENT '别名', `physical_table_regex` varchar(96) DEFAULT NULL,
`output_topic` varchar(96) DEFAULT '' COMMENT 'kafka_topic',
`ver_id` int(11) unsigned DEFAULT NULL COMMENT '当前使用的meta版本ID',
`status` varchar(32) NOT NULL DEFAULT 'abort' COMMENT 'ok,abort,inactive,waiting\r\nok:正常使用;abort:需要抛弃该表的数据;waiting:等待拉全 量;inactive:不可用 ',
`create_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '创建时间',
`meta_change_flg` int(1) DEFAULT '0' COMMENT 'meta变更标识,初始值为:0,表示代表没有发生变更,1:代表meta发生变更。该字段目前mysql appender模块使用。',
`batch_id` int(11) DEFAULT '0' COMMENT '批次ID,用来标记拉全量的批次,每次拉全量会++,增量只使用该字段并不修改',
`ver_change_history` varchar(128) DEFAULT NULL,
`ver_change_notice_flg` int(1) NOT NULL DEFAULT '0', `output_before_update_flg` int(1) NOT NULL DEFAULT '0',
`description` varchar(128) DEFAULT NULL,
`fullpull_col` varchar(255) DEFAULT '' COMMENT '全量分片列:配置column名称', `fullpull_split_shard_size` varchar(255) DEFAULT '' COMMENT '全量分片大小配置:配
置-1代表不分片',
`fullpull_split_style` varchar(255) DEFAULT '' COMMENT '全量分片类型:MD5',
`is_open` int(1) DEFAULT '0' COMMENT 'mongo是否展开节点,0不展开,1一级展开',
`is_auto_complete` tinyint(4) DEFAULT '0' COMMENT 'mongoDB的表是否补全数据;如果开
启,增量中更新操作会回查并补全数据',
PRIMARY KEY (`id`),
UNIQUE KEY `idx_sid_tabname` (`schema_id`,`table_name`) USING BTREE
) ENGINE=InnoDB  DEFAULT CHARSET=utf8;

至此,DBus搞定!

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值