本文主要实现logstash到datahub的功能
1、下载
【快传】我给你发了 lo...ar.gz, 快来看看 https://www.alipan.com/t/LmKbT2eJ9ELywpcXBLHg 点击链接即可保存。「阿里云盘」APP ,无需下载极速在线查看,视频原画倍速播放。
2、logstash文件到datahub
input {
file {
path => "/soft/data/data.csv"
start_position => "beginning"
}
}
filter{
csv {
columns => ['col1', 'col2', 'col3']
}
}
output {
datahub {
access_id => "**********"
access_key => "********************"
endpoint => "https://datahub.cn-beijing-**********.com"
project_name => "bigdata"
topic_name => "txt_logstash"
#shard_id => "0"
#shard_keys => ["thread_id"]
dirty_data_continue => true
dirty_data_file => "/soft/data/dirty.data"
dirty_data_file_max_size => 1000
}
}
3、logstash 全量 mysql到datahub
input {
jdbc {
jdbc_driver_library => "/soft/logstash-with-datahub-8.10.3/mysql-connector-java-8.0.13.jar" # MySQL JDBC connector路径
jdbc_driver_class => "com.mysql.cj.jdbc.Driver"
jdbc_connection_string => "jdbc:mysql://192.168.140.1:3306/flinkcdc?serverTimezone=UTC"
jdbc_user => "flink"
jdbc_password => "flink"
schedule => "* * * * *" # 每分钟执行一次
statement => "SELECT id,name,age FROM user_info"
}
}
output {
datahub {
access_id => "*********"
access_key => "**************"
endpoint => "https://datahub.cn-**************.com"
project_name => "bigdata"
topic_name => "txt_logstash"
#shard_id => "0"
#shard_keys => ["thread_id"]
dirty_data_continue => true
dirty_data_file => "/soft/data/dirty.data"
dirty_data_file_max_size => 1000
}
}
4、logstash number增量到datahub
input {
jdbc {
jdbc_driver_library => "/soft/logstash-with-datahub-8.10.3/mysql-connector-java-8.0.13.jar" # MySQL JDBC connector路径
jdbc_driver_class => "com.mysql.cj.jdbc.Driver"
jdbc_connection_string => "jdbc:mysql://192.168.140.1:3306/flinkcdc?serverTimezone=UTC"
jdbc_user => "flink"
jdbc_password => "flink"
schedule => "* * * * *" # 每分钟执行一次
statement => "SELECT id as col1,name as col2,age as col3 FROM user_info where id>:sql_last_value order by id asc" #字段名必须映射到datahub的schema名称一致
use_column_value => true
tracking_column => "col1" #这里必须用别名或者原字段名与datahub字段名一致
clean_run => true
last_run_metadata_path => "/soft/data/last_run.txt" # 记录上次运行时间的文件路径
record_last_run => true
}
}
output {
datahub {
access_id => "************"
access_key => "**********"
endpoint => "https://datahub.cn-******************.com"
project_name => "bigdata"
topic_name => "txt_logstash"
#shard_id => "0"
#shard_keys => ["thread_id"]
dirty_data_continue => true
dirty_data_file => "/soft/data/dirty.data"
dirty_data_file_max_size => 1000
}
}
5、logstash timestamp 增量到datahub
input {
jdbc {
jdbc_driver_library => "/soft/logstash-with-datahub-8.10.3/mysql-connector-java-8.0.13.jar" # MySQL JDBC connector路径
jdbc_driver_class => "com.mysql.cj.jdbc.Driver"
jdbc_connection_string => "jdbc:mysql://192.168.140.1:3306/flinkcdc?serverTimezone=UTC"
jdbc_user => "flink"
jdbc_password => "flink"
schedule => "* * * * *" # 每分钟执行一次
statement => "SELECT id as col1,name as col2,age as col3 FROM user_regist where create_time>:sql_last_value or update_time>:sql_last_value order by update_time asc"
use_column_value => true
tracking_column => "update_time"
clean_run => true
last_run_metadata_path => "/soft/data/last_run.txt" # 记录上次运行时间的文件路径
record_last_run => true
tracking_column_type => "timestamp"
}
}
output {
datahub {
access_id => "********"
access_key => "***********"
endpoint => "https://datahub.cn-**************.com"
project_name => "bigdata"
topic_name => "txt_logstash"
#shard_id => "0"
#shard_keys => ["thread_id"]
dirty_data_continue => true
dirty_data_file => "/soft/data/dirty.data"
dirty_data_file_max_size => 1000
}
}
6、启动
./bin/logstash -f mysql2datahub.conf