一、拼接put发送的json
dict(zip(update_config_key, update_config_value))完成json拼接
def concat_json ( ) :
config_url = f"http://p-awsbj-sas-hadoop-debezium-001.mynextev.net:8083/connectors/mysql-unified_view_test-connector11/tasks"
config_info = s. get( config_url)
config_info = config_info. json( )
connector_name = config_info[ 0 ] [ 'id' ] [ 'connector' ]
print ( f"connector.name: { connector_name} " )
connector_class = config_info[ 0 ] [ 'config' ] [ 'connector.class' ]
print ( f"connector.class: { connector_class} " )
task_max = config_info[ 0 ] [ 'config' ] [ 'task.max' ]
print ( f"task.max: { task_max} " )
database_hostname = config_info[ 0 ] [ 'config' ] [ 'database.hostname' ]
print ( f"database.hostname: { database_hostname} " )
database_port = config_info[ 0 ] [ 'config' ] [ 'database.port' ]
print ( f"database.port: { database_port} " )
database_dbname = config_info[ 0 ] [ 'config' ] [ 'database.dbname' ]
print ( f"database.dbname: { database_dbname} " )
database_user = config_info[ 0 ] [ 'config' ] [ 'database.user' ]
print ( f"database.user: { database_user} " )
database_password = config_info[ 0 ] [ 'config' ] [ 'database.password' ]
print ( f"database.password: { database_password} " )
database_server_id = config_info[ 0 ] [ 'config' ] [ 'database.server.id' ]
print ( f"database.server.id: { database_server_id} " )
database_server_name = config_info[ 0 ] [ 'config' ] [ 'database.server.name' ]
print ( f"database.server.name: { database_server_name} " )
database_include_list = config_info[ 0 ] [ 'config' ] [ 'database.include.list' ]
print ( f"database.include.list: { database_include_list} " )
table_include_list = config_info[ 0 ] [ 'config' ] [ 'table.include.list' ]
print ( f"table.include.list: { table_include_list} " )
kafka_bootstrap = config_info[ 0 ] [ 'config' ] [ 'database.history.kafka.bootstrap.servers' ]
print ( f"database.history.kafka.bootstrap.servers: { kafka_bootstrap} " )
history_kafka_topic = config_info[ 0 ] [ 'config' ] [ 'database.history.kafka.topic' ]
print ( f"database.history.kafka.topic: { history_kafka_topic} " )
producer_sasl_mechanism = config_info[ 0 ] [ 'config' ] [ 'database.history.producer.sasl.mechanism' ]
print ( f"database.history.producer.sasl.mechanism: { producer_sasl_mechanism} " )
producer_security_protocol = config_info[ 0 ] [ 'config' ] [ 'database.history.producer.security.protocol' ]
print ( f"database.history.producer.security.protocol: { producer_security_protocol} " )
producer_sasl_jass_config = config_info[ 0 ] [ 'config' ] [ 'database.history.producer.sasl.jaas.config' ]
print ( f"database.history.producer.sasl.jaas.config: { producer_sasl_jass_config} " )
consumer_sasl_mechanism = config_info[ 0 ] [ 'config' ] [ 'database.history.consumer.sasl.mechanism' ]
print ( f"database.history.consumer.sasl.mechanism: { consumer_sasl_mechanism} " )
consumer_security_protocol = config_info[ 0 ] [ 'config' ] [ 'database.history.consumer.security.protocol' ]
print ( f"database.history.consumer.security.protocol: { consumer_security_protocol} " )
consumer_sasl_jaas_config = config_info[ 0 ] [ 'config' ] [ 'database.history.consumer.sasl.jaas.config' ]
print ( f"database.history.consumer.sasl.jaas.config: { consumer_sasl_jaas_config} " )
include_schema_changes = config_info[ 0 ] [ 'config' ] [ 'include.schema.changes' ]
print ( f"include.schema.changes: { include_schema_changes} " )
include_query = config_info[ 0 ] [ 'config' ] [ 'include.query' ]
print ( f"include.query: { include_query} " )
snapshot_locking_mode = config_info[ 0 ] [ 'config' ] [ 'snapshot.locking.mode' ]
print ( f"snapshot.locking.mode: { snapshot_locking_mode} " )
snapshot_mode = config_info[ 0 ] [ 'config' ] [ 'snapshot.mode' ]
print ( f"snapshot.mode: { snapshot_mode} " )
topic_replication_factor = config_info[ 0 ] [ 'config' ] [ 'topic.creation.default.replication.factor' ]
print ( f"topic.creation.default.replication.factor: { topic_replication_factor} " )
topic_partitions = config_info[ 0 ] [ 'config' ] [ 'topic.creation.default.partitions' ]
print ( f"topic.creation.default.partitions: { topic_partitions} " )
topic_compression_type = config_info[ 0 ] [ 'config' ] [ 'topic.creation.default.compression.type' ]
print ( f"topic.creation.default.compression.type: { topic_compression_type} " )
update_config_key = [ ]
update_config_key. append( 'connector.name' )
update_config_key. append( 'connector.class' )
update_config_key. append( 'task.max' )
update_config_key. append( 'database.hostname' )
update_config_key. append( 'database.port' )
update_config_key. append( 'database.dbname' )
update_config_key. append( 'database.user' )
update_config_key. append( 'database.password' )
update_config_key. append( 'database.server.id' )
update_config_key. append( 'database.server.name' )
update_config_key. append( 'database.include.list' )
update_config_key. append( 'table.include.list' )
update_config_key. append( 'database.history.kafka.bootstrap.servers' )
update_config_key. append( 'database.history.kafka.topic' )
update_config_key. append( 'database.history.producer.sasl.mechanism' )
update_config_key. append( 'database.history.producer.security.protocol' )
update_config_key. append( 'database.history.producer.sasl.jaas.config' )
update_config_key. append( 'database.history.consumer.sasl.mechanism' )
update_config_key. append( 'database.history.consumer.security.protocol' )
update_config_key. append( 'database.history.consumer.sasl.jaas.config' )
update_config_key. append( 'include.schema.changes' )
update_config_key. append( 'include.query' )
update_config_key. append( 'snapshot.locking.mode' )
update_config_key. append( 'snapshot.mode' )
update_config_key. append( 'topic.creation.default.replication.factor' )
update_config_key. append( 'topic.creation.default.partitions' )
update_config_key. append( 'topic.creation.default.compression.type' )
print ( update_config_key)
update_config_value = [ ]
update_config_value. append( connector_name)
update_config_value. append( connector_class)
update_config_value. append( task_max)
update_config_value. append( database_hostname)
update_config_value. append( database_port)
update_config_value. append( database_dbname)
update_config_value. append( database_user)
update_config_value. append( database_password)
update_config_value. append( database_server_id)
update_config_value. append( database_server_name)
update_config_value. append( database_include_list)
update_config_value. append( table_include_list)
update_config_value. append( kafka_bootstrap)
update_config_value. append( history_kafka_topic)
update_config_value. append( producer_sasl_mechanism)
update_config_value. append( producer_security_protocol)
update_config_value. append( producer_sasl_jass_config)
update_config_value. append( consumer_sasl_mechanism)
update_config_value. append( consumer_security_protocol)
update_config_value. append( consumer_sasl_jaas_config)
update_config_value. append( include_schema_changes)
update_config_value. append( include_query)
update_config_value. append( snapshot_locking_mode)
update_config_value. append( snapshot_mode)
update_config_value. append( topic_replication_factor)
update_config_value. append( topic_partitions)
update_config_value. append( topic_compression_type)
print ( update_config_value)
update_config_dic = dict ( zip ( update_config_key, update_config_value) )
print ( update_config_dic)
update_config_json = json. dumps( update_config_dic)
print ( update_config_json)
return update_config_json
二、request发送put请求
import requests
s = requests. session( )
s. auth = ( 'debezium' , '4a3s4d02234h' )
headers = { 'Content-Type' : 'application/json;charset=utf-8' }
def update_connector_config ( connector_name, json_content) :
update_config_url = f"http://p-awsbj-sas-hadoop-debezium-001.mynextev.net:8083/connectors/ { connector_name} /config"
print ( update_config_url)
print ( json_content)
response_put = s. put( update_config_url, data= json_content, headers= headers)
print ( response_put. json( ) )
print ( "更新配置信息成功" )
三、主程序
if __name__ == "__main__" :
data = concat_json( )
connector_name = "mysql-unified_view_test-connector11"
update_connector_config( connector_name, data)