python实现每天定时把多个sql server数据库中多个表,前一天数据定时存储到自己数据库同名的表中

main.py文件

import schedule
import time
import pyodbc
import pandas as pd
from datetime import datetime, timedelta
import warnings
import logging  # 导入日志库
import json
import os


# 获取数据库配置信息
def load_db_config():
    # 获取当前执行文件的目录
    base_dir = os.path.dirname(os.path.abspath(__file__))
    config_file_path = os.path.join(base_dir, 'db_config.json')

    with open(config_file_path, 'r', encoding='utf-8-sig') as file:
        config = json.load(file)
    return config


# 连接数据库
def connect_to_db(config):
    connection_string = f"DRIVER={{ODBC Driver 17 for SQL Server}};SERVER={config['server']};DATABASE={config['database']};UID={config['username']};PWD={config['password']}"
    return pyodbc.connect(connection_string)


# 处理数据
def process_data(df, branch_id, fendian_field):
    df[fendian_field] = branch_id  # 动态更新id
    return df


# 获取前一天的数据
def get_yesterday_data(connection, table, date_field):
    # 获取昨天的日期和时间(0点)
    yesterday_start = datetime.now().replace(hour=0, minute=0, second=0, microsecond=0) - timedelta(days=1)
    # 获取昨天的日期和时间(23:59:59)
    yesterday_end = yesterday_start + timedelta(hours=23, minutes=59, seconds=59)

    # 查询前一天的数据以包含 upload 不为 5 或为 NULL 的条件 ,如果为5表示已经插入过了,不能重复插入
    query = f"SELECT * FROM {table} WHERE {date_field} BETWEEN ? AND ? AND (upload != 5 OR upload IS NULL)"

    # 忽略pandas发出的特定UserWarning
    warnings.filterwarnings('ignore', category=UserWarning,
                            message="pandas only supports SQLAlchemy connectable")

    return pd.read_sql(query, connection, params=[yesterday_start, yesterday_end])


# 获取目标表的列名
def get_target_columns(connection, table):
    cursor = connection.cursor()
    cursor.execute(f"SELECT COLUMN_NAME FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = '{table}'")
    columns = [row.COLUMN_NAME for row in cursor.fetchall()]
    cursor.close()
    return columns


# 数据类型转换函数
def convert_data_types(row):
    new_row = []
    for value in row:
        if pd.isnull(value):
            new_row.append(None)
        elif isinstance(value, pd.Timestamp):
            new_row.append(value.to_pydatetime())
        else:
            new_row.append(value)
    return tuple(new_row)


# 批量插入数据到目标数据库并更新 upload 字段
def batch_insert_data_to_target(source_connection, target_connection, table, unique_field, df, db_config,
                                batch_size=1000):
    print(f"{time.ctime()} —— {db_config['database_name']}:表-{table}: 数据批量插入中...")
    source_cursor = source_connection.cursor()
    target_cursor = target_connection.cursor()

    target_columns = get_target_columns(target_connection, table)
    df_columns = df.columns.tolist()

    # 过滤出目标表存在的列
    common_columns = [col for col in df_columns if col in target_columns]

    # 检查列名与占位符匹配
    columns = ', '.join(common_columns)
    placeholders = ', '.join(['?' for _ in common_columns])

    insert_query = f"INSERT INTO {table} ({columns}) VALUES ({placeholders})"

    success_count = 0
    failure_count = 0
    error = ''

    # 按照 batch_size 分批插入
    for start in range(0, len(df), batch_size):
        batch_df = df.iloc[start:start + batch_size]
        data_to_insert = [convert_data_types(row[common_columns]) for _, row in batch_df.iterrows()]

        try:
            # 批量插入当前批次
            target_cursor.executemany(insert_query, data_to_insert)
            target_connection.commit()

            # 获取当前批次的唯一值列表
            unique_values = batch_df[unique_field].tolist()

            # 更新源数据库中 upload 字段为 5
            update_source_query = f"UPDATE {table} SET upload = 5 WHERE {unique_field} IN ({', '.join(['?' for _ in unique_values])})"
            source_cursor.execute(update_source_query, unique_values)
            source_connection.commit()

            # 更新目标数据库中 upload 字段为 5
            update_target_query = f"UPDATE {table} SET upload = 5 WHERE {unique_field} IN ({', '.join(['?' for _ in unique_values])})"
            target_cursor.execute(update_target_query, unique_values)
            target_connection.commit()

            success_count += len(data_to_insert)
        except Exception as e:
            failure_count += len(data_to_insert)
            error = str(e)
            target_connection.rollback()
            source_connection.rollback()

    source_cursor.close()
    target_cursor.close()

    message = f"{time.ctime()} —— {db_config['database_name']}:表-{table}: 插入成功 {success_count} 条, 插入失败 {failure_count} 条"

    if failure_count > 0:
        message += f", 失败原因: {error}"

    print(message)


# 主任务
def main_task():
    # 加载数据库配置列表
    db_config_list = load_db_config()
    # 源数据库
    source_databases = db_config_list['source_databases']
    # 目标数据库
    target_database = db_config_list['target_database']
    # 要处理的表
    tables = db_config_list['tables']

    target_conn = None

    try:
        # 连接目标数据库
        target_conn = connect_to_db(target_database)
        print(f"{time.ctime()} —— {target_database['database_name']}-数据库连接成功!")

        for db_config in source_databases:
            source_conn = None
            try:
                # 连接源数据库
                source_conn = connect_to_db(db_config)
                print(f"{time.ctime()} —— {db_config['database_name']}-数据库连接成功!")

                for table, fields in tables.items():
                    unique_field = fields['unique_field']
                    date_field = fields['date_field']
                    fendian_field = fields['fendian_field']
                    try:
                        df = get_yesterday_data(source_conn, table, date_field)
                        if not df.empty:
                            processed_df = process_data(df, db_config['branch_id'], fendian_field)
                            # 批量插入
                            batch_insert_data_to_target(source_conn, target_conn, table, unique_field, processed_df, db_config)
                        else:
                            print(f"{time.ctime()} —— {db_config['database_name']}:表-{table}: 暂无待处理的昨日数据!")
                    except Exception as e:
                        print(f"{time.ctime()} —— {db_config['database_name']}:表-{table}:处理数据失败!error: {e}")
            except Exception as e:
                logging.error(f"{time.ctime()} —— {db_config['database_name']}-数据库连接失败! error: {e}")
            finally:
                try:
                    if source_conn is not None:
                        source_conn.close()
                        print(f"{time.ctime()} —— 关闭数据库连接 {db_config['database_name']}")
                except Exception as e:
                    logging.error(f"{time.ctime()} —— 关闭 {db_config['database_name']}-数据库连接时出错: {e}")

    except Exception as e:
        logging.error(f"{time.ctime()} —— {target_database['database_name']}-数据库连接失败! error: {e}")
    finally:
        try:
            if target_conn is not None:
                target_conn.close()
                print(f"{time.ctime()} —— 关闭数据库连接 {target_database['database_name']}")
        except Exception as e:
            logging.error(f"{time.ctime()} —— 关闭 {target_database['database_name']}-数据库连接时出错: {e}")


# 定时任务
db_config = load_db_config()
schedule_time = db_config.get('schedule_time', '03:00')  # 有则取配置文件的值,没有则默认'03:00'
schedule.every().day.at(schedule_time).do(main_task)

print(f"程序已运行,将在 {schedule_time} 点执行数据抓取!")


while True:
    schedule.run_pending()
    time.sleep(40)

db_config.json配置文件

{
  "schedule_time": "03:00", # 执行时间
  "source_databases": [ # 源数据库
    {
      "database_name": "名称(自定义)",
      "server": "主机名",
      "database": "数据库名称",
      "username": "用户名",
      "password": "密码",
      "branch_id": "数据库标识(自定义)"
    }
  ],
  "target_database": { # 目标数据库
    "database_name": "名称(自定义)",
    "server": "主机名",
    "database": "数据库名称",
    "username": "用户名",
    "password": "密码"
  },
  "tables": { # 要插入的表
    "表名": {
      "unique_field": "唯一标识",
      "date_field": "日期字段",
      "fendian_field": "数据库标识(从哪个数据库插入来的数据)"
    }
  }
}
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

A_ugust__

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值