离线数仓(4) 使用dataX全量同步业务表(脚本实现mysql_to_hdfs)

dataX的基本使用可以参考:离线数仓(2)DataX的使用详解

DataX配置文件批量生成脚本

# coding=utf-8
import json
import getopt
import os
import sys
import MySQLdb

#MySQL相关配置,需根据实际情况作出修改
mysql_host = "server15"
mysql_port = "3306"
mysql_user = "root"
mysql_passwd = "root"

#HDFS NameNode相关配置,需根据实际情况作出修改
hdfs_nn_host = "server16"
hdfs_nn_port = "8020"

#生成配置文件的目标路径,可根据实际情况作出修改
output_path = "/opt/yyds/apps/datax/job/import"

#获取mysql连接
def get_connection():
    return MySQLdb.connect(host=mysql_host, port=int(mysql_port), user=mysql_user, passwd=mysql_passwd)

#获取表格的元数据  包含列名和数据类型
def get_mysql_meta(database, table):
    connection = get_connection()
    cursor = connection.cursor()
    sql = "SELECT COLUMN_NAME,DATA_TYPE from information_schema.COLUMNS WHERE TABLE_SCHEMA=%s AND TABLE_NAME=%s ORDER BY ORDINAL_POSITION"
    cursor.execute(sql, [database, table])
    fetchall = cursor.fetchall()
    cursor.close()
    connection.close()
    return fetchall

#获取mysql表的列名
def get_mysql_columns(database, table):
    return map(lambda x: x[0], get_mysql_meta(database, table))

#将获取的元数据中mysql的数据类型转换为hive的数据类型  写入到hdfswriter中
def get_hive_columns(database, table):
    def type_mapping(mysql_type):
        mappings = {
            "bigint": "bigint",
            "int": "bigint",
            "smallint": "bigint",
            "tinyint": "bigint",
            "decimal": "string",
            "double": "double",
            "float": "float",
            "binary": "string",
            "char": "string",
            "varchar": "string",
            "datetime": "string",
            "time": "string",
            "timestamp": "string",
            "date": "string",
            "text": "string"
        }
        return mappings[mysql_type]

    meta = get_mysql_meta(database, table)
    return map(lambda x: {"name": x[0], "type": type_mapping(x[1].lower())}, meta)

#生成json文件
def generate_json(source_database, source_table):
    job = {
        "job": {
            "setting": {
                "speed": {
                    "channel": 3
                },
                "errorLimit": {
                    "record": 0,
                    "percentage": 0.02
                }
            },
            "content": [{
                "reader": {
                    "name": "mysqlreader",
                    "parameter": {
                        "username": mysql_user,
                        "password": mysql_passwd,
                        "column": get_mysql_columns(source_database, source_table),
                        "splitPk": "",
                        "connection": [{
                            "table": [source_table],
                            "jdbcUrl": ["jdbc:mysql://" + mysql_host + ":" + mysql_port + "/" + source_database]
                        }]
                    }
                },
                "writer": {
                    "name": "hdfswriter",
                    "parameter": {
                        "defaultFS": "hdfs://" + hdfs_nn_host + ":" + hdfs_nn_port,
                        "fileType": "text",
                        "path": "${targetdir}",
                        "fileName": source_table,
                        "column": get_hive_columns(source_database, source_table),
                        "writeMode": "append",
                        "fieldDelimiter": "\t",
                        "compress": "gzip"
                    }
                }
            }]
        }
    }
    if not os.path.exists(output_path):
        os.makedirs(output_path)
    with open(os.path.join(output_path, ".".join([source_database, source_table, "json"])), "w") as f:
        json.dump(job, f)


def main(args):
    source_database = ""
    source_table = ""

    options, arguments = getopt.getopt(args, '-d:-t:', ['sourcedb=', 'sourcetbl='])
    for opt_name, opt_value in options:
        if opt_name in ('-d', '--sourcedb'):
            source_database = opt_value
        if opt_name in ('-t', '--sourcetbl'):
            source_table = opt_value

    generate_json(source_database, source_table)


if __name__ == '__main__':
    main(sys.argv[1:])

使用该脚本

-- 使用脚本文件,生成datax的配置文件
[root@server16 run_sh]# python gen_import_config.py -d gmall2022 -t activity_info
-- 结果
[root@server16 import]# pwd
/opt/yyds/apps/datax/job/import
[root@server16 import]# ll
total 4
-rw-r--r-- 1 root root 961 Apr 26 10:54 gmall2022.activity_info.json


-- hdfs上创建文件夹
[root@server16 ~]# hdfs dfs -mkdir -p /origin_data/gmall2022/db/activity_info_full/2020-06-14

-- 执行命令
python /opt/yyds/apps/datax/bin/datax.py -p"-Dtargetdir=/origin_data/gmall2022/db/activity_info_full/2020-06-14" /opt/yyds/apps/datax/job/import/gmall2022.activity_info.json

使用脚本批量同步全量数据(gen_import_config.sh)

#!/bin/bash

python gen_import_config.py -d gmall2022 -t activity_info
python gen_import_config.py -d gmall2022 -t activity_rule
python gen_import_config.py -d gmall2022 -t base_category1
python gen_import_config.py -d gmall2022 -t base_category2
python gen_import_config.py -d gmall2022 -t base_category3
python gen_import_config.py -d gmall2022 -t base_dic
python gen_import_config.py -d gmall2022 -t base_province
python gen_import_config.py -d gmall2022 -t base_region
python gen_import_config.py -d gmall2022 -t base_trademark
python gen_import_config.py -d gmall2022 -t cart_info
python gen_import_config.py -d gmall2022 -t coupon_info
python gen_import_config.py -d gmall2022 -t sku_attr_value
python gen_import_config.py -d gmall2022 -t sku_info
python gen_import_config.py -d gmall2022 -t sku_sale_attr_value
python gen_import_config.py -d gmall2022 -t spu_info

创建mysql_to_hdfs_full.sh

#!/bin/bash

DATAX_HOME=/opt/yyds/apps/datax

# 如果传入日期则do_date等于传入的日期,否则等于前一天日期
if [ -n "$2" ] ;then
    do_date=$2
else
    do_date=`date -d "-1 day" +%F`
fi

#处理目标路径,此处的处理逻辑是,如果目标路径不存在,则创建;若存在,则清空,目的是保证同步任务可重复执行
handle_targetdir() {
  hadoop fs -test -e $1
  if [[ $? -eq 1 ]]; then
    echo "路径$1不存在,正在创建......"
    hadoop fs -mkdir -p $1
  else
    echo "路径$1已经存在"
    fs_count=$(hadoop fs -count $1)
    content_size=$(echo $fs_count | awk '{print $3}')
    if [[ $content_size -eq 0 ]]; then
      echo "路径$1为空"
    else
      echo "路径$1不为空,正在清空......"
      hadoop fs -rm -r -f $1/*
    fi
  fi
}

#数据同步
import_data() {
  datax_config=$1
  target_dir=$2

  handle_targetdir $target_dir
  python $DATAX_HOME/bin/datax.py -p"-Dtargetdir=$target_dir" $datax_config
}

case $1 in
"activity_info")
  import_data /opt/yyds/apps/datax/job/import/gmall2022.activity_info.json /origin_data/gmall2022/db/activity_info_full/$do_date
  ;;
"activity_rule")
  import_data /opt/yyds/apps/datax/job/import/gmall2022.activity_rule.json /origin_data/gmall2022/db/activity_rule_full/$do_date
  ;;
"base_category1")
  import_data /opt/yyds/apps/datax/job/import/gmall2022.base_category1.json /origin_data/gmall2022/db/base_category1_full/$do_date
  ;;
"base_category2")
  import_data /opt/yyds/apps/datax/job/import/gmall2022.base_category2.json /origin_data/gmall2022/db/base_category2_full/$do_date
  ;;
"base_category3")
  import_data /opt/yyds/apps/datax/job/import/gmall2022.base_category3.json /origin_data/gmall2022/db/base_category3_full/$do_date
  ;;
"base_dic")
  import_data /opt/yyds/apps/datax/job/import/gmall2022.base_dic.json /origin_data/gmall2022/db/base_dic_full/$do_date
  ;;
"base_province")
  import_data /opt/yyds/apps/datax/job/import/gmall2022.base_province.json /origin_data/gmall2022/db/base_province_full/$do_date
  ;;
"base_region")
  import_data /opt/yyds/apps/datax/job/import/gmall2022.base_region.json /origin_data/gmall2022/db/base_region_full/$do_date
  ;;
"base_trademark")
  import_data /opt/yyds/apps/datax/job/import/gmall2022.base_trademark.json /origin_data/gmall2022/db/base_trademark_full/$do_date
  ;;
"cart_info")
  import_data /opt/module/datax/job/import/gmall2022.cart_info.json /origin_data/gmall2022/db/cart_info_full/$do_date
  ;;
"coupon_info")
  import_data /opt/yyds/apps/datax/job/import/gmall2022.coupon_info.json /origin_data/gmall2022/db/coupon_info_full/$do_date
  ;;
"sku_attr_value")
  import_data /opt/yyds/apps/datax/job/import/gmall2022.sku_attr_value.json /origin_data/gmall2022/db/sku_attr_value_full/$do_date
  ;;
"sku_info")
  import_data /opt/yyds/apps/datax/job/import/gmall2022.sku_info.json /origin_data/gmall2022/db/sku_info_full/$do_date
  ;;
"sku_sale_attr_value")
  import_data /opt/yyds/apps/datax/job/import/gmall2022.sku_sale_attr_value.json /origin_data/gmall2022/db/sku_sale_attr_value_full/$do_date
  ;;
"spu_info")
  import_data /opt/yyds/apps/datax/job/import/gmall2022.spu_info.json /origin_data/gmall2022/db/spu_info_full/$do_date
  ;;
"all")
  import_data /opt/yyds/apps/datax/job/import/gmall2022.activity_info.json /origin_data/gmall2022/db/activity_info_full/$do_date
  import_data /opt/yyds/apps/datax/job/import/gmall2022.activity_rule.json /origin_data/gmall2022/db/activity_rule_full/$do_date
  import_data /opt/yyds/apps/datax/job/import/gmall2022.base_category1.json /origin_data/gmall2022/db/base_category1_full/$do_date
  import_data /opt/yyds/apps/datax/job/import/gmall2022.base_category2.json /origin_data/gmall2022/db/base_category2_full/$do_date
  import_data /opt/yyds/apps/datax/job/import/gmall2022.base_category3.json /origin_data/gmall2022/db/base_category3_full/$do_date
  import_data /opt/yyds/apps/datax/job/import/gmall2022.base_dic.json /origin_data/gmall2022/db/base_dic_full/$do_date
  import_data /opt/yyds/apps/datax/job/import/gmall2022.base_province.json /origin_data/gmall2022/db/base_province_full/$do_date
  import_data /opt/yyds/apps/datax/job/import/gmall2022.base_region.json /origin_data/gmall2022/db/base_region_full/$do_date
  import_data /opt/yyds/apps/datax/job/import/gmall2022.base_trademark.json /origin_data/gmall2022/db/base_trademark_full/$do_date
  import_data /opt/yyds/apps/datax/job/import/gmall2022.cart_info.json /origin_data/gmall2022/db/cart_info_full/$do_date
  import_data /opt/yyds/apps/datax/job/import/gmall2022.coupon_info.json /origin_data/gmall2022/db/coupon_info_full/$do_date
  import_data /opt/yyds/apps/datax/job/import/gmall2022.sku_attr_value.json /origin_data/gmall2022/db/sku_attr_value_full/$do_date
  import_data /opt/yyds/apps/datax/job/import/gmall2022.sku_info.json /origin_data/gmall2022/db/sku_info_full/$do_date
  import_data /opt/yyds/apps/datax/job/import/gmall2022.sku_sale_attr_value.json /origin_data/gmall2022/db/sku_sale_attr_value_full/$do_date
  import_data /opt/yyds/apps/datax/job/import/gmall2022.spu_info.json /origin_data/gmall2022/db/spu_info_full/$do_date
  ;;
esac
  • 0
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 3
    评论
好的,我可以为您提供一些关于使用DataX进行MySQL到Hive数据迁移的指导。 首先,您需要安装DataX并配置好环境。然后,您需要编写一个DataX任务配置文件,以指定源和目标数据源的详细信息。 以下是一个简单的MySQL到Hive的DataX任务配置文件示例: ``` { "job": { "content": [ { "reader": { "name": "mysqlreader", "parameter": { "username": "your_mysql_username", "password": "your_mysql_password", "column": [ "column1", "column2", "column3" ], "connection": [ { "jdbcUrl": "jdbc:mysql://your_mysql_server:3306/your_mysql_database", "table": [ "your_mysql_table" ] } ] } }, "writer": { "name": "hdfswriter", "parameter": { "defaultFS": "hdfs://your_hdfs_server:9000", "fileType": "text", "path": "/your_hdfs_path", "fileName": "your_hdfs_file_name", "fieldDelimiter": "\t", "writeMode": "append", "column": [ "column1", "column2", "column3" ] } } } ], "setting": { "speed": { "channel": "3" } } } } ``` 在上面的配置文件中,您需要将以下信息进行替换: - `your_mysql_username`:MySQL数据库的用户名。 - `your_mysql_password`:MySQL数据库的密码。 - `your_mysql_server`:MySQL数据库的服务器地址。 - `your_mysql_database`:MySQL数据库的名称。 - `your_mysql_table`:要迁移的MySQL的名称。 - `your_hdfs_server`:HDFS服务器的地址。 - `your_hdfs_path`:HDFS中要写入数据的路径。 - `your_hdfs_file_name`:在HDFS中要写入的文件名。 - `column1`,`column2`和`column3`:要迁移的列名称。 完成配置文件后,您可以使用以下命令来执行DataX任务: ``` python datax.py your_job_config.json ``` 这将启动DataX并开始将MySQL中的数据传输到Hive中。 希望这可以帮助您进行MySQL到Hive的数据迁移。如果您有任何其他问题,请随时问我。
评论 3
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值