#coding=utf-8
import os
import sys
import getopt
import json
import pymysql
pymysql.install_as_MySQLdb()
#MySQL相关配置,需根据实际情况作出修改
mysql_host = "192.168.1.xx"
mysql_port = "3306"
mysql_user = "xx"
mysql_passwd = "xxxx"
#HDFS NameNode相关配置,需根据实际情况作出修改
hdfs_nn_host = "node2"
hdfs_nn_port = "9000"
#生成配置文件的目标路径,可根据实际情况作出修改
output_path = "D:\\idea_workspace\\bigdata\\测试环境\\hive\\datax\\"
def get_connection():
return pymysql.connect(host=mysql_host, port=int(mysql_port), user=mysql_user, passwd=mysql_passwd)
def get_mysql_meta(database, table):
connection = get_connection()
cursor = connection.cursor()
sql= "SELECT COLUMN_NAME,DATA_TYPE from information_schema.COLUMNS WHERE TABLE_SCHEMA=%s AND TABLE_NAME=%s ORDER BY ORDINAL_POSITION"
cursor.execute(sql, [database, table])
fetchall = cursor.fetchall()
cursor.close()
connection.close()
return fetchall
def get_mysql_columns(database, table):
return (list(map(lambda x: x[0], get_mysql_meta(database, table))))
def get_hive_columns(database, table):
def type_mapping(mysql_type):
mappings = {
"bigint": "bigint",
"int": "int",
"smallint": "smallint",
"tinyint": "tinyint",
"decimal": "double",
"double": "double",
"float": "float",
"binary": "string",
"char": "string",
"varchar": "string",
"datetime": "timestamp",
"time": "string",
"timestamp": "timestamp",
"date": "date",
"text": "string"
}
return mappings[mysql_type]
meta = get_mysql_meta(database, table)
return (list(map(lambda x: {"name": x[0], "type": type_mapping(x[1].lower())}, meta)))
def generate_json(source_database, source_table ,hive_database_prefix):
job = {
"job": {
"setting": {
"speed": {
"channel": 3
},
"errorLimit": {
"record": 0,
"percentage": 0.02
}
},
"content": [{
"reader": {
"name": "mysqlreader",
"parameter": {
"username": mysql_user,
"password": mysql_passwd,
"column": get_mysql_columns(source_database, source_table),
"splitPk": "",
"connection": [{
"table": [source_table],
"jdbcUrl": ["jdbc:mysql://" + mysql_host + ":" + mysql_port + "/" + source_database + "?useSSL=false"]
}]
}
},
"writer": {
"name": "hdfswriter",
"parameter": {
"defaultFS": "hdfs://" + hdfs_nn_host + ":" + hdfs_nn_port ,
"fileType": "orc",
"path": "/user/hive/warehouse/" +hive_database_prefix +"cwhouse" +"/" +hive_database_prefix +source_database+"_"+ source_table + "/dt=2024-04-03",
"fileName": source_table,
"column": get_hive_columns(source_database, source_table),
"writeMode": "append",
"fieldDelimiter": "\u0001",
"compress": "SNAPPY"
}
}
}]
}
}
if not os.path.exists(output_path):
os.makedirs(output_path)
with open(os.path.join(output_path, ".".join([source_database +"_" +source_table, "json"])), "w") as f:
json.dump(job, f)
def main(args):
# source_database = "xxxx"
# source_tables = "xxx"
# hive_database_prefix="ods_"
generate_json(source_database, source_table,hive_database_prefix)
if __name__ == '__main__':
main(sys.argv[1:])
03-28