本地生成-单个表
在pycharm终端里面运行命令python test/datax配置文件-单个表.py test1库名 stu表名
注意生成在E盘目录test下!!!(目录需要自己建)
import json
import sys
import pymysql
def gen_json(dbname,tablename):
s1 = {
"job": {
"content": [
{
"reader": {
"name": "mysqlreader",
"parameter": {
"connection": [
{
"jdbcUrl": ["jdbc:mysql://hadoop11:3306/"+dbname+"?useSSL=false"],
"table": [tablename]
}
],
"password": "123456",
"username": "root",
"column": getColumn(dbname,tablename)
}
},
"writer": {
"name": "hdfswriter",
"parameter": {
"column": getColumnAndType(dbname,tablename),
"defaultFS": "hdfs://hadoop11:8020",
"fileType": "text",
"path": "/user/hive/warehouse/ods.db/"+tablename+"/dt=$dt",
"fieldDelimiter": "\t",
"fileName": tablename,
"writeMode": "append"
}
}
}
],
"setting": {
"speed": {
"channel": "1"
}
}
}
}
with open('e:/test/'+tablename+'.json', 'w') as f:
json.dump(s1, f, indent=4)
def queryDataBase(dbname,tablename):
conn = pymysql.connect(user='root', password='123456', host='hadoop11')
cursor = conn.cursor()
cursor.execute(
"select column_name ,data_type from information_schema.`COLUMNS` where TABLE_SCHEMA = %s and table_name = %s order by ordinal_position",[dbname,tablename])
fetchall = cursor.fetchall()
cursor.close()
conn.close()
return fetchall
def getColumn(dbname,tablename):
k1 = queryDataBase(dbname,tablename)
k2 = list(map(lambda x: x[0], k1))
return k2
def getColumnAndType(dbname,tablename):
k1 = queryDataBase(dbname,tablename)
mappings = {
"bigint": "bigint",
"int": "bigint",
"smallint": "bigint",
"tinyint": "bigint",
"decimal": "string",
"double": "double",
"float": "float",
"binary": "string",
"char": "string",
"varchar": "string",
"datetime": "string",
"time": "string",
"timestamp": "string",
"date": "string",
"text": "string",
"varbinary": "binary"
}
k2 = list(map(lambda x: {"name": x[0], "type": mappings[x[1].lower()]}, k1))
return k2
if __name__ == '__main__':
args = sys.argv[1:]
if len(args) == 0:
print('请输入第一个参数:mysql数据库名字')
print('请输入第二个参数:表名字')
exit()
dbname = args[0]
tablename = args[1]
gen_json(dbname,tablename)
本地生成-单个库下所有表
在pycharm终端里面运行命令python test/datax配置文件-整个库.py test1库名
注意生成在E盘目录test下!!!(目录需要自己建)
import json
import sys
import pymysql
def gen_json(databesename,tablename):
s1 = {
"job": {
"content": [
{
"reader": {
"name": "mysqlreader",
"parameter": {
"connection": [
{
"jdbcUrl": ["jdbc:mysql://hadoop11:3306/"+databesename+"?useSSL=false"],
"table": [tablename]
}
],
"password": "123456",
"username": "root",
"column": getColumn(databesename,tablename)
}
},
"writer": {
"name": "hdfswriter",
"parameter": {
"column": getColumnAndType(databesename,tablename),
"defaultFS": "hdfs://hadoop11:8020",
"fileType": "text",
"path": "/user/hive/warehouse/ods.db/"+tablename+"/dt=$dt",
"fieldDelimiter": "\t",
"fileName": tablename,
"writeMode": "append"
}
}
}
],
"setting": {
"speed": {
"channel": "1"
}
}
}
}
with open('e:/test/'+tablename+'.json', 'w') as f:
json.dump(s1, f, indent=4)
def queryDataBase(databesename,tablename):
conn = pymysql.connect(user='root', password='123456', host='hadoop11')
cursor = conn.cursor()
cursor.execute(
f"select column_name ,data_type from information_schema.`COLUMNS` where TABLE_SCHEMA = '{databesename}' and table_name = %s order by ordinal_position",[tablename])
fetchall = cursor.fetchall()
cursor.close()
conn.close()
return fetchall
def getColumn(databesename,tablename):
k1 = queryDataBase(databesename,tablename)
k2 = list(map(lambda x: x[0], k1))
return k2
def getColumnAndType(databesename,tablename):
k1 = queryDataBase(databesename,tablename)
mappings = {
'bigint':'bigint',
'varchar':'string',
'int':'int',
'datetime':'string',
'text':'string',
'decimal':'double',
'double':'double',
'date':'string',
'time':'string',
'varbinary':'string'
}
k2 = list(map(lambda x: {"name": x[0], "type": mappings[x[1].lower()]}, k1))
return k2
def getAllTableName(databesename):
conn = pymysql.connect(user='root', password='123456', host='hadoop11')
cursor = conn.cursor()
cursor.execute(f"select table_name from information_schema.`TABLES` where TABLE_SCHEMA = '{databesename}'")
fetchall = cursor.fetchall()
cursor.close()
conn.close()
return [ x[0] for x in fetchall ]
if __name__ == '__main__':
args = sys.argv[1:]
if len(args) == 0:
print("请输入一个参数 数据库名字")
else:
databesename = args[0]
print(f"你传递了一个参数(库名)正在生成{databesename}库下所有表对应的datax配置文件")
list1 = getAllTableName(databesename)
for tablename in list1:
gen_json(databesename,tablename)
linux生成(单个表&多个表)
在linux上面使用 下面命令
python get_datax_json.py -d db1
python 脚本名字 -d 数据库名字
其中代码中source_database = "db1"改为自己对应的MySQL数据库的名字
# coding=utf-8
import os
import sys
import getopt
import json
import pymysql
pymysql.install_as_MySQLdb()
# MySQL相关配置,需根据实际情况作出修改
mysql_host = "hadoop11"
mysql_port = "3306"
mysql_user = "root"
mysql_passwd = "123456"
# HDFS NameNode相关配置,需根据实际情况作出修改
hdfs_nn_host = "hadoop11"
hdfs_nn_port = "8020"
# 生成配置文件的目标路径,可根据实际情况作出修改
output_path = "/opt/python/job"
#连接mysql
def get_connection():
return pymysql.connect(host=mysql_host, port=int(mysql_port), user=mysql_user, passwd=mysql_passwd)
#获取数据库表的列 类型
def get_mysql_meta(database, table):
connection = get_connection()
cursor = connection.cursor()
sql = "SELECT COLUMN_NAME,DATA_TYPE from information_schema.COLUMNS WHERE TABLE_SCHEMA=%s AND TABLE_NAME=%s ORDER BY ORDINAL_POSITION"
cursor.execute(sql, [database, table])
fetchall = cursor.fetchall()
cursor.close()
connection.close()
return fetchall
def get_mysql_columns(database, table):
return (list(map(lambda x: x[0], get_mysql_meta(database, table))))
def get_hive_columns(database, table):
def type_mapping(mysql_type):
mappings = {
"bigint": "bigint",
"int": "bigint",
"smallint": "bigint",
"tinyint": "bigint",
"decimal": "string",
"double": "double",
"float": "float",
"binary": "string",
"char": "string",
"varchar": "string",
"datetime": "string",
"time": "string",
"timestamp": "string",
"date": "string",
"text": "string",
"varbinary": "binary"
}
return mappings[mysql_type]
meta = get_mysql_meta(database, table)
return (list(map(lambda x: {"name": x[0], "type": type_mapping(x[1].lower())}, meta)))
def generate_json(source_database, source_table):
job = {
"job": {
"setting": {
"speed": {
"channel": 2
},
"errorLimit": {
"record": 0,
"percentage": 0.02
}
},
"content": [{
"reader": {
"name": "mysqlreader",
"parameter": {
"username": mysql_user,
"password": mysql_passwd,
"column": get_mysql_columns(source_database, source_table),
"splitPk": "",
"connection": [{
"table": [source_table],
"jdbcUrl": [
"jdbc:mysql://" + mysql_host + ":" + mysql_port + "/" + source_database + "?useSSL=false"]
}]
}
},
"writer": {
"name": "hdfswriter",
"parameter": {
"defaultFS": "hdfs://" + hdfs_nn_host + ":" + hdfs_nn_port,
"fileType": "text",
"path": f"/user/hive/warehouse/ods.db/{source_table}",
"fileName": source_table,
"column": get_hive_columns(source_database, source_table),
"writeMode": "append",
"fieldDelimiter": ","
}
}
}]
}
}
if not os.path.exists(output_path):
os.makedirs(output_path)
with open(os.path.join(output_path, ".".join([source_database, source_table, "json"])), "w") as f:
json.dump(job, f)
def get_tablename(source_database):
connection = get_connection()
cursor = connection.cursor()
sql = "SELECT distinct TABLE_NAME from information_schema.COLUMNS WHERE TABLE_SCHEMA=%s"
cursor.execute(sql, source_database)
fetchall = cursor.fetchall()
cursor.close()
connection.close()
return fetchall
def main(args):
# 指定数据库,并生成其所有表的datax-json文件
source_database = "db1"
'''
# 如果每次只想生成单表,那么解开source_table = ""和generate_json(source_database, source_table)行,
# 并将res = get_tablename(source_database)
for res1 in res:
generate_json(source_database, res1[0])行注释
'''
# source_table = ""
options, arguments = getopt.getopt(args, '-d:-t:', ['sourcedb=', 'sourcetbl='])
for opt_name, opt_value in options:
if opt_name in ('-d', '--sourcedb'):
source_database = opt_value
if opt_name in ('-t', '--sourcetbl'):
source_table = opt_value
# generate_json(source_database, source_table)
# 一次性生成指定数据库所有表datax的json配置文件
res = get_tablename(source_database)
for res1 in res:
generate_json(source_database, res1[0])
if __name__ == '__main__':
main(sys.argv[1:])
如果只想输出一个表的话,按照代码中,给对应注释打开
解释:传递的第一个参数为数据库名字,第二个参数是数据库中的表