软件2_算法功能23

这里写目录标题

功能2

import xgboost as xgb
import lightgbm as lgbm
import pandas as pd
import numpy as np
import pymysql
import xgboost as xgb
import lightgbm as lgbm
from sklearn.metrics import f1_score
from sklearn.metrics import accuracy_score
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import StratifiedKFold
from sklearn.linear_model import LogisticRegression
from sklearn import preprocessing
from datetime import date, timedelta
import gc
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import LabelEncoder
import lightgbm as lgb
from datetime import datetime
from sklearn.cluster import KMeans
from sklearn.preprocessing import StandardScaler
import pymysql
from sklearn.decomposition import PCA
from flask import Flask, request, session
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LinearRegression
from flask_cors import CORS
import psycopg2
from dataclasses import dataclass
import pymysql
import json
from flask import Flask, request, url_for, redirect, render_template, jsonify
app = Flask(__name__)
app.config['JSON_AS_ASCII'] = False  # 解决中文乱码问题
import warnings
warnings.filterwarnings("ignore")
import joblib
import datetime
# import mysql.connector
import time
import random
import json
import collections
import random
import matplotlib.pyplot as plt
import os
import copy
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.model_selection import train_test_split
import pandas as pd
import xgboost as xgb
import psycopg2
import json
import psycopg2
from psycopg2 import Binary
import joblib
import pickle
import lightgbm as lgb
from joblib import dump, load
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.svm import LinearSVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score
from sqlalchemy import create_engine
import json
from gevent import pywsgi
import knn
import creat_table
import random_forest
app = Flask(__name__)
CORS(app)
#################################################################################### 1 获取数据
# host = "localhost"
# port = "5432"
# database = "copd"
# user = "postgres"
# password = 111111
#
host = "10.16.48.219"
port = "5432"
database = "software1"
user = "pg"
password = 111111
def connect_pg():
    pg_connection = psycopg2.connect(database=database,
                                     user=user,
                                     password=password,
                                     host=host,
                                     port=port)
    return pg_connection
def connect_mysql():
    connection = pymysql.connect(
        host='10.16.48.219',
        user='root',
        password='111111',
        database='medical',
        cursorclass=pymysql.cursors.DictCursor
    )
    return connection
## connection, table_name
def get_data(connection, table_name):
    query = f"select * from \"{table_name}\""
    data = pd.read_sql(query, connection)
    connection.close()
    return data
#################################################################################### 2 数据处理:处理 字段、特征
def per_data(data, disease_code):
    # 1 列名(直接获取列名)
    params = data.columns
    print(params)
    # 2 筛选列名(自己定义需要的字段)
    params = [col for col in params if col not in ['Case_ID', 'BUN', 'M1_M2', 'TH2', 'TH2', 'IBILI', 'GLO']]
    features = data[params]
    features = features.fillna(0)
    print(data)
    print(features)
    # 3 筛选样本(根据test_id)
    train = features.iloc[:150]
    test = features.iloc[150:]
    return train, test
#################################################################################### 3 模型训练
# xgb
def train_xgb(train, test, model_name):
    params = {
        "booster": 'gbtree',
        'objective': 'binary:logistic',
        'eval_metric': 'auc',
        'silent': 0,  # (静默模式,10)
        'eta': 0.01,  # (0.01~0.2,,,0.01)
        'max_depth': 5,  # (3~10,,,6)
        'min_child_weight': 1,
        'gamma': 0,
        'lambda': 1,
        'colsample_bylevel': 0.7,  # (作用与subsample相似)
        'colsample_bytree': 0.7,  # (0.5~1)
        'subsample': 0.9,  # (0.5~1)
        'scale_pos_weight': 1,  # (算法更快收敛)
    }
    dtrain = xgb.DMatrix(train.drop(['label'], axis=1), label=train['label'])
    dtest = xgb.DMatrix(test.drop(['label'], axis=1))
    watchlist = [(dtrain, 'train')]
    model = xgb.train(params, dtrain, 200, watchlist)
    predict = model.predict(dtest)
    predict = pd.DataFrame(predict, columns=['target'])
    ################################ 连接到 pg 数据库
    connection = connect_pg()
    cursor = connection.cursor()
    # current_time = datetime.datetime.now()
    # time = current_time
    type = 1
    model_name = model_name
    model_description = 'xgboost_model'
    trainer = 'Gpb'
    model_str = pickle.dumps(model)
    # 构造 SQL INSERT 语句
    query = "INSERT INTO train_model3 (type, model_name, model_description, trainer, training_parameters) VALUES (%s, %s, %s, %s, %s)"
    cursor.execute(query, (type, model_name, model_description, trainer, model_str))
    connection.commit()
    # 关闭数据库连接
    cursor.close()
    connection.close()
    return 0
# gbdt
def train_gbdt(train, test, model_name):
    print("%%%%训练_model_gbdt%%%%")
    gbdt_model = GradientBoostingClassifier(n_estimators=100, learning_rate=0.1, max_depth=3, random_state=42)
    X_train = train.drop(['label'], axis=1)
    x_test = test.drop(['label'], axis=1)
    y_train = train['label']
    gbdt_model.fit(X_train, y_train)
    y_pred = gbdt_model.predict(x_test)
    y_pred = pd.DataFrame(y_pred, columns=['prob'])
    ################################ 连接到 pg 数据库
    connection = connect_pg()
    cursor = connection.cursor()
    # current_time = datetime.datetime.now()
    # time = current_time
    type = 2
    model_name = model_name
    model_description = 'gbdt_model'
    trainer = 'Gpb'
    model_str = pickle.dumps(gbdt_model)
    # 构造 SQL INSERT 语句
    query = "INSERT INTO train_model3 (type, model_name, model_description, trainer, training_parameters) VALUES (%s, %s, %s, %s, %s)"
    cursor.execute(query, (type, model_name, model_description, trainer, model_str))
    connection.commit()
    # 关闭数据库连接
    cursor.close()
    connection.close()
    return 0
# lr
def train_lr(train, test, model_name):
    print("%%%%训练_model_lr%%%%")
    lr_model = LogisticRegression(random_state=42)
    X_train = train.drop(['label'], axis=1)
    x_test = test.drop(['label'], axis=1)
    y_train = train['label']
    lr_model.fit(X_train, y_train)
    y_pred = lr_model.predict(x_test)
    y_pred = pd.DataFrame(y_pred, columns=['prob'])
    ################################ 连接到 pg 数据库
    connection = connect_pg()
    cursor = connection.cursor()
    # current_time = datetime.datetime.now()
    # time = current_time
    type = 3
    model_name = model_name
    model_description = 'lr_model'
    trainer = 'Gpb'
    model_str = pickle.dumps(lr_model)
    # 构造 SQL INSERT 语句
    query = "INSERT INTO train_model3 (type, model_name, model_description, trainer, training_parameters) VALUES (%s, %s, %s, %s, %s)"
    cursor.execute(query, (type, model_name, model_description, trainer, model_str))
    connection.commit()
    # 关闭数据库连接
    cursor.close()
    connection.close()
    return 0
# svm
def train_svm(train, test, model_name):
    print("%%%%训练_model_svm%%%%")
    svm_model = LinearSVC(C=1.0, random_state=42)
    X_train = train.drop(['label'], axis=1)
    x_test = test.drop(['label'], axis=1)
    y_train = train['label']
    svm_model.fit(X_train, y_train)
    y_pred = svm_model.predict(x_test)
    y_pred = pd.DataFrame(y_pred, columns=['prob'])
    ################################ 连接到 pg 数据库
    connection = connect_pg()
    cursor = connection.cursor()
    # current_time = datetime.datetime.now()
    # time = current_time
    type = 4
    model_name = model_name
    model_description = 'svm_model'
    trainer = 'Gpb'
    model_str = pickle.dumps(svm_model)
    # 构造 SQL INSERT 语句
    query = "INSERT INTO train_model3 (type, model_name, model_description, trainer, training_parameters) VALUES (%s, %s, %s, %s, %s)"
    cursor.execute(query, (type, model_name, model_description, trainer, model_str))
    connection.commit()
    # 关闭数据库连接
    cursor.close()
    connection.close()
    return 0
# rf
def train_rf(train, test, model_name):
    print("%%%%训练_model_rf%%%%")
    rf_model = RandomForestClassifier(n_estimators=100, random_state=42)
    X_train = train.drop(['label'], axis=1)
    x_test = test.drop(['label'], axis=1)
    y_train = train['label']
    rf_model.fit(X_train, y_train)
    y_pred = rf_model.predict(x_test)
    y_pred = pd.DataFrame(y_pred, columns=['prob'])
    ################################ 连接到 pg 数据库
    connection = connect_pg()
    cursor = connection.cursor()
    # current_time = datetime.datetime.now()
    # time = current_time
    type = 5
    model_name = model_name
    model_description = 'rf_model'
    trainer = 'Gpb'
    model_str = pickle.dumps(rf_model)
    # 构造 SQL INSERT 语句
    query = "INSERT INTO train_model3 (type, model_name, model_description, trainer, training_parameters) VALUES (%s, %s, %s, %s, %s)"
    cursor.execute(query, (type, model_name, model_description, trainer, model_str))
    connection.commit()
    # 关闭数据库连接
    cursor.close()
    connection.close()
    return 0
# cart
def train_cart(train, test, model_name):
    print("%%%%训练_model_cart%%%%")
    cart_model = DecisionTreeClassifier(criterion='gini', max_depth=5, random_state=42)
    X_train = train.drop(['label'], axis=1)
    x_test = test.drop(['label'], axis=1)
    y_train = train['label']
    cart_model.fit(X_train, y_train)
    y_pred = cart_model.predict(x_test)
    y_pred = pd.DataFrame(y_pred, columns=['prob'])
    ################################ 连接到 pg 数据库
    connection = connect_pg()
    cursor = connection.cursor()
    # current_time = datetime.datetime.now()
    # time = current_time
    type = 6
    model_name = model_name
    model_description = 'cart_model'
    trainer = 'Gpb'
    model_str = pickle.dumps(cart_model)
    # 构造 SQL INSERT 语句
    query = "INSERT INTO train_model3 (type, model_name, model_description, trainer, training_parameters) VALUES (%s, %s, %s, %s, %s)"
    cursor.execute(query, (type, model_name, model_description, trainer, model_str))
    connection.commit()
    # 关闭数据库连接
    cursor.close()
    connection.close()
    return 1
#################################################################################### 接口2:模型训练
# 前端给算法的请求  四个参数(model_name, algorithm_code, table_name, disease_code):
# {
#             "model_name": "慢性非阻塞_cart",
#             "algorithm_code": 6,
#             "table_name": "Diabetes",
#             "disease_code": "慢性阻塞性肺病"
# }
# 算法给前端响应
# {
#     "success": true,
#     "message": "请求成功",
#     "data": [
#         {
#             "id": 1 //训练好的模型,存入数据库,返回数据库中的id
#         }
#     ]
# }
def per_data_pulic():
    model_name = "慢性非阻塞_xgb"
    algorithm_code = 1
    table_name = "merge_copy2"
    disease_code = "慢性阻塞性肺病"
    connection_pulic = psycopg2.connect(database="medical",
                                        user="pg",
                                        password=111111,
                                        host="10.16.48.219",
                                        port="5432")
    data = get_data(connection_pulic, table_name)
    data.fillna(0, inplace=True)
    def try_convert_to_float(value):
        if isinstance(value, datetime.date):
            return value
        try:
            return float(value)
        except ValueError:
            return value
    data = data.applymap(try_convert_to_float)
    numeric_cols = data.select_dtypes(include=['int', 'float']).columns.tolist()
    numeric_cols = data.select_dtypes(include=['int', 'float']).assign(diagname="diagname").columns.tolist()
    print(numeric_cols)
    numeric_cols_data = data.loc[:, numeric_cols]
    numeric_cols_data['label'] = 0
    numeric_cols_data['label'] = numeric_cols_data['diagname'].apply(lambda x: 1 if x == disease_code else 0)
    numeric_cols_data['label'] = numeric_cols_data['label'].astype(int)
    numeric_cols_data.drop(columns=['diagname'], inplace=True)
    print(numeric_cols_data)
    train = numeric_cols_data.iloc[:200]
    test = numeric_cols_data.iloc[-200:]
    return train, test
def train_model_cs(model_name, algorithm_code, table_name, disease_code):
    connection = connect_pg()
    data = get_data(connection, table_name)
    train, test = per_data_pulic()
    if algorithm_code == 1:
        predict = train_xgb(train, test, model_name)
        print(predict)
        print('train_test_xgb')
    elif algorithm_code == 2:
        predict = train_gbdt(train, test, model_name)
        print(predict)
        print('train_test_gbdt')
    elif algorithm_code == 3:
        predict = train_lr(train, test, model_name)
        print(predict)
        print('train_test_lr')
    elif algorithm_code == 4:
        predict = train_svm(train, test, model_name)
        print(predict)
        print('train_test_svm')
    elif algorithm_code == 5:
        predict = train_rf(train, test, model_name)
        print(predict)
        print('train_test_rf')
    elif algorithm_code == 6:
        predict = train_cart(train, test, model_name)
        print(predict)
        print('train_test_cart')
    else:
        return 0
    return predict
# 训练 (model_name, algorithm_code, table_name, disease_code):
# model_name = "慢性非阻塞_6"
# algorithm_code =  6
# table_name = "Diabetes"
# disease_code = "慢性阻塞性肺病"
# predict = train_model_cs(model_name, algorithm_code, table_name, disease_code)

#   http://127.0.0.1:5000/interface2
# 模型训练
@app.route("/interface2", methods=["POST"])
def train_model():
    a = request
    model_name = eval(request.data)['model_name']
    algorithm_code =  eval(request.data)['algorithm_code']
    table_name = eval(request.data)['table_name']
    disease_code =  eval(request.data)['disease_code']
    # print(data) eval(request.data)['model_name']
    # model_name = str(request.data).form["model_name"]
    # algorithm_code = request.body.get("algorithm_code")
    # table_name = request.body.get("table_name")
    # disease_code = request.body.get("disease_code")
    print(model_name,algorithm_code,table_name,disease_code)
    connection = connect_pg()
    data = get_data(connection, table_name)
    train, test = per_data_pulic()
    if algorithm_code == 1:
        predict = train_xgb(train, test, model_name)
        print(predict)
        print('train_test_xgb')
    elif algorithm_code == 2:
        predict = train_gbdt(train, test, model_name)
        print(predict)
        print('train_test_gbdt')
    elif algorithm_code == 3:
        predict = train_lr(train, test, model_name)
        print(predict)
        print('train_test_lr')
    elif algorithm_code == 4:
        predict = train_svm(train, test, model_name)
        print(predict)
        print('train_test_svm')
    elif algorithm_code == 5:
        predict = train_rf(train, test, model_name)
        print(predict)
        print('train_test_rf')
    elif algorithm_code == 6:
        predict = train_cart(train, test, model_name)
        print(predict)
        print('train_test_cart')
    else:
        return 0
    return "1"

#################################################################################### 接口5:预测id选择
# 前端给算法的请求   点击即可
# {
#             "id": "121",
#             "id": "122",
#             "id": "123",
#             "id": "124"
# }
# 算法给前端响应
# {
#     "success": true,
#     "message": "请求成功",
#     "data": [
#         {
#             "id": "121",
#             "id": "122",
#             "id": "123",
#             "id": "124"   //重新返回id , 这里会有预测结果
#         }
#     ]
# }

# pulic_test 数据读取
def per_data_pulic_test(test_id):
    table_name = "merge_copy2"
    disease_code = "慢性阻塞性肺病"
    connection_pulic = psycopg2.connect(database="medical",
                                        user="pg",
                                        password=111111,
                                        host="10.16.48.219",
                                        port="5432")
    data = get_data(connection_pulic, table_name)
    data.fillna(0, inplace=True)
    # 自动的选取 int和float列 方便 机器学习模型 训练
    def try_convert_to_float(value):
        if isinstance(value, datetime.date):
            return value
        try:
            return float(value)
        except ValueError:
            return value
    data = data.applymap(try_convert_to_float)
    numeric_cols = data.select_dtypes(include=['int', 'float']).assign(diagname="diagname").columns.tolist()
    numeric_cols.append('patient_id')               # 保留 patient_id 用于筛选 test_id
    numeric_cols_data = data.loc[:, numeric_cols]
    numeric_cols_data['label'] = 0
    numeric_cols_data['label'] = numeric_cols_data['diagname'].apply(lambda x: 1 if x == disease_code else 0)
    numeric_cols_data['label'] = numeric_cols_data['label'].astype(int)
    # 筛选 test_id   筛选完就删除 patient_id列
    # numeric_cols_data['patient_id']如果是float类型就改为 int类型,其他类型的不变
    numeric_cols_data['patient_id'] = numeric_cols_data['patient_id'].apply(lambda x: int(x) if isinstance(x, float) else x)
    numeric_cols_data['patient_id'] = numeric_cols_data['patient_id'].astype(str)
    numeric_cols_data = numeric_cols_data.loc[numeric_cols_data['patient_id'].isin(test_id)]
    patient_id_copy = numeric_cols_data.copy()
    numeric_cols_data.drop(columns=['diagname'], inplace=True)
    numeric_cols_data.drop(columns=['patient_id'], inplace=True)
    return numeric_cols_data , patient_id_copy
def test_model_cs(model_name, test_id):
    test , patient_id_copy = per_data_pulic_test(test_id)
    connection = connect_pg()
    cursor = connection.cursor()
    x_test = test.drop(['label'], axis=1)
    # # 从数据库中检索模型字段
    query = "SELECT training_parameters FROM train_model3 WHERE model_name = %s"  # 假设模型 ID1
    cursor.execute(query, (model_name,))
    model_str = cursor.fetchone()[0]
    # # 从数据库中检索模型字段
    query = "SELECT type FROM train_model3 WHERE model_name = %s"  # 假设模型 ID1
    cursor.execute(query, (model_name,))
    type = cursor.fetchone()[0]
    loaded_model1111111 = pickle.loads(model_str)
    if type == 1:
        dtest = xgb.DMatrix(x_test)
        predictsloaded_model1111111 = loaded_model1111111.predict(dtest)
    else:
        predictsloaded_model1111111 = loaded_model1111111.predict(x_test)

    print('------------------------------test----patient_id_copy--------------------------')
    patient_id_copy = patient_id_copy.reset_index(drop=True)
    predict_df = pd.DataFrame({'predict': predictsloaded_model1111111})
    result_df = pd.concat([patient_id_copy, predict_df], axis=1)
    result_df['predict'] = result_df['predict'].apply(lambda x: 1 if x > 0.5 else 0)
    print(result_df)
    result_df  = result_df[['patient_id', 'predict']]
    result_dict = result_df.set_index('patient_id')['predict'].to_dict()
    print(result_dict)
    print('------------------------------test----patient_id_copy--------------------------')
    return result_dict
# 预测 test_model(disease_code, id, test_id):
# model_name = "慢性非阻塞_xgb"
# test_id = ['801080447', '801030382', 'J00356713']
# predict = test_model_cs(model_name, test_id)
# print(predict)
#
# model_name = "慢性非阻塞_xgb"
# test_id = ['801080447', '801030382', 'J00356713']
# 模型预测
@app.route("/interface5", methods=["POST"])
def test_model_cs():

    model_name = eval(request.data)['model_name']
    test_id = eval(request.data)['test_id']  # test_id = ['801080447', '801030382', 'J00356713'] 里面的字符串是merge表中的parent_id字段

    test , patient_id_copy = per_data_pulic_test(test_id)
    connection = connect_pg()
    cursor = connection.cursor()
    x_test = test.drop(['label'], axis=1)
    # # 从数据库中检索模型字段
    query = "SELECT training_parameters FROM train_model3 WHERE model_name = %s"  # 假设模型 ID1
    cursor.execute(query, (model_name,))
    model_str = cursor.fetchone()[0]
    # # 从数据库中检索模型字段
    query = "SELECT type FROM train_model3 WHERE model_name = %s"  # 假设模型 ID1
    cursor.execute(query, (model_name,))
    type = cursor.fetchone()[0]
    loaded_model1111111 = pickle.loads(model_str)
    if type == 1:
        dtest = xgb.DMatrix(x_test)
        predictsloaded_model1111111 = loaded_model1111111.predict(dtest)
    else:
        predictsloaded_model1111111 = loaded_model1111111.predict(x_test)
    print('------------------------------test----patient_id_copy--------------------------')
    patient_id_copy = patient_id_copy.reset_index(drop=True)
    predict_df = pd.DataFrame({'predict': predictsloaded_model1111111})
    result_df = pd.concat([patient_id_copy, predict_df], axis=1)
    result_df['predict'] = result_df['predict'].apply(lambda x: 1 if x > 0.5 else 0)
    print(result_df)
    result_df  = result_df[['patient_id', 'predict']]
    result_dict = result_df.set_index('patient_id')['predict'].to_dict()
    print(result_dict)
    print('------------------------------test----patient_id_copy--------------------------')
    return result_dict


if __name__ == '__main__':
    app.config['JSON_AS_ASCII'] = False
    CORS(app)
    app.run(host='127.0.0.1', port=5000)

功能3

import xgboost as xgb
import lightgbm as lgbm
import pandas as pd
import numpy as np
import pymysql
import xgboost as xgb
import lightgbm as lgbm
from sklearn.metrics import f1_score
from sklearn.metrics import accuracy_score
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import StratifiedKFold
from sklearn.linear_model import LogisticRegression
from sklearn import preprocessing
from datetime import date, timedelta
import gc
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import LabelEncoder
import lightgbm as lgb
from datetime import datetime
from sklearn.cluster import KMeans
from sklearn.preprocessing import StandardScaler
import pymysql
from sklearn.decomposition import PCA
from flask import Flask, request,session
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LinearRegression
from flask_cors import CORS
import psycopg2
from dataclasses import dataclass
import pymysql
from flask import Flask, request, url_for, redirect, render_template, jsonify
app = Flask(__name__)
import warnings
warnings.filterwarnings("ignore")
import joblib
import datetime
#import mysql.connector
import time
import random
import json
import collections
import random
import matplotlib.pyplot as plt
import os
import copy
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.model_selection import train_test_split
import pandas as pd
import xgboost as xgb
import json
import psycopg2
from psycopg2 import Binary
import joblib
import pickle
import lightgbm as lgb
from joblib import dump, load
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.svm import LinearSVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score
from sqlalchemy import create_engine
import json
from gevent import pywsgi
import knn
import creat_table
import random_forest
app = Flask(__name__)
CORS(app)
#################################################################################### 1 获取数据
# host = "localhost"
# port = "5432"
# database = "copd"
# user = "postgres"
# password = 111111
#
host = "10.16.48.219"
port = "5432"
database = "medical"
user = "pg"
password = 111111
def connect_pg_3():
    pg_connection = psycopg2.connect(database="medical",
                     user=user,
                     password=password,
                     host=host,
                     port=port)
    return pg_connection
def connect_pg_software1():
    pg_connection = psycopg2.connect(database="software1",
                     user=user,
                     password=password,
                     host=host,
                     port=port)
    return pg_connection
def connect_mysql():
    connection = pymysql.connect(
        host='10.16.48.219',
        user='root',
        password='111111',
        database='public',
        cursorclass=pymysql.cursors.DictCursor)
    return connection
## connection, table_name
def get_data(connection, table_name):
    query = f"select * from \"{table_name}\""
    data = pd.read_sql(query, connection)
    connection.close()
    return data
# 接口 3_5
# 计算 0 1 比例(数据,疾病名称,条件列,范围)
def Interface3_5_hbl(test_name,test_describe,test_conditions,disease,table_name,age_left,age_right,sexname,city,zb,zba_left,zba_right,zbb_left,zbb_right):

    # 存储 test 信息 test_name,test_describe,test_conditions
    connection = connect_pg_3()
    data = get_data(connection, table_name)
    #1 age 条件筛选
    data['age'] = data['age'].astype(int)
    data = data.loc[data['age'] > age_left]
    data = data.loc[data['age'] < age_right]
    #2 sexname 条件筛选
    data = data.loc[data['sexname'] == sexname]
    print(data['sexname'])
    #3 city 条件筛选
    data = data.loc[data['city'] == city]
    print(data['city'])
    data['diagname_code'] = 0
    data.loc[data['diagname'] == disease, 'diagname_code'] = 1
    zba = data.copy()
    zbb = data.copy()
    #4 AB试验 统一指标 左右区间
    # A
    zba[zb] = zba[zb].astype(float)
    zba = zba.loc[zba[zb] > zba_left]
    zba = zba.loc[zba[zb] < zba_right]
    # B
    zbb[zb] = zbb[zb].astype(float)
    zbb = zbb.loc[zbb[zb] > zbb_left]
    zbb = zbb.loc[zbb[zb] < zbb_right]
    # 疾病比例计算
    print('zba样本数:', len(zba))
    zba_1 = len(zba[zba['diagname_code'] == 1])
    zba_0 = len(zba[zba['diagname_code'] == 0])
    zba_reat = zba_1 / (zba_1 + zba_0)
    # 疾病比例计算
    print('zbb样本数:',len(zbb))
    zbb_1 = len(zbb[zbb['diagname_code'] == 1])
    zbb_0 = len(zbb[zbb['diagname_code'] == 0])
    zbb_reat = zbb_1 / (zbb_1 + zbb_0)
    ################################ 连接到 pg 数据库
    connection = connect_pg_software1()
    cursor = connection.cursor()
    # current_time = datetime.datetime.now(
    # 构造 SQL INSERT 语句
    query = "INSERT INTO testab_model1 (test_name,test_describe,test_conditions,disease,age_left,age_right,sexname,city,zb,zba_left,zba_right,zbb_left,zbb_right,table_name,zba_reat,zbb_reat) VALUES (%s, %s, %s,%s, %s, %s, %s, %s,%s, %s, %s, %s, %s,%s, %s, %s)"
    cursor.execute(query, (test_name,test_describe,test_conditions,disease,age_left,age_right,sexname,city,zb,zba_left,zba_right,zbb_left,zbb_right,table_name,zba_reat,zbb_reat))
    connection.commit()
    # 关闭数据库连接
    cursor.close()
    connection.close()
    return zba_reat,zbb_reat

@app.route("/interface3_5", methods=["POST"])
def Interface3_5():
    # a_ms = "aaaaaa"
    # b_ms = "bbbbbb"
    a_ms = eval(request.data)['a_ms']
    b_ms = eval(request.data)['b_ms']
    test_name = eval(request.data)['test_name']
    test_describe = eval(request.data)['test_describe']
    test_conditions = eval(request.data)['test_conditions']
    disease = eval(request.data)['disease']
    table_name = eval(request.data)['table_name']
    age_left = eval(request.data)['age_left']
    age_right = eval(request.data)['age_right']
    sexname = eval(request.data)['sexname']
    city = eval(request.data)['city']
    zb = eval(request.data)['zb']
    zba_left = eval(request.data)['zba_left']
    zba_right = eval(request.data)['zba_right']
    zbb_left = eval(request.data)['zbb_left']
    zbb_right = eval(request.data)['zbb_right']

    # 存储 test 信息 test_name,test_describe,test_conditions
    connection = connect_pg_3()
    data = get_data(connection, table_name)
    #1 age 条件筛选
    age_left = float(age_left)
    age_right = float(age_right)
    data['age'] = data['age'].astype(int)
    data = data.loc[data['age'] > age_left]
    data = data.loc[data['age'] < age_right]
    #2 sexname 条件筛选
    data = data.loc[data['sexname'] == sexname]
    print(data['sexname'])
    #3 city 条件筛选
    data = data.loc[data['city'] == city]
    print(data['city'])
    data['diagname_code'] = 0
    data.loc[data['diagname'] == disease, 'diagname_code'] = 1
    zba = data.copy()
    zbb = data.copy()
    #4 AB试验 统一指标 左右区间
    # A
    zba_left = float(zba_left)
    zba_right = float(zba_right)
    zba[zb] = zba[zb].astype(float)
    zba = zba.loc[zba[zb] > zba_left]
    zba = zba.loc[zba[zb] < zba_right]
    # B
    zbb_left = float(zbb_left)
    zbb_right = float(zbb_right)
    zbb[zb] = zbb[zb].astype(float)
    zbb = zbb.loc[zbb[zb] > zbb_left]
    zbb = zbb.loc[zbb[zb] < zbb_right]
    # 疾病比例计算
    print('zba样本数:', len(zba))
    zba_1 = len(zba[zba['diagname_code'] == 1])
    zba_0 = len(zba[zba['diagname_code'] == 0])
    if ((zba_1 + zba_0)==0):
        zba_reat = 0
    else:
        zba_reat = zba_1 / (zba_1 + zba_0)
    # 疾病比例计算
    strlen = len(zbb)
    print('zbb样本数:',len(zbb))
    zbb_1 = len(zbb[zbb['diagname_code'] == 1])
    zbb_0 = len(zbb[zbb['diagname_code'] == 0])
    if ((zba_1 + zba_0)==0):
        zba_reat = 0
    else:
        zbb_reat = zbb_1 / (zbb_1 + zbb_0)
    zbb_reat=0
    ################################ 连接到 pg 数据库
    connection = connect_pg_software1()
    cursor = connection.cursor()
    # current_time = datetime.datetime.now(
    # 构造 SQL INSERT 语句
    print('test_name',test_name)
    print('test_describe',test_describe)
    print('test_conditions',test_conditions)
    print('disease',disease)
    print('age_left',age_left)
    print('age_right',age_right)
    print('sexname',sexname)
    print('city',city)
    print('zb',zb)
    print('zba_left',zba_left)
    print('zba_right',zba_right)
    print('zba_left',zbb_left)
    print('zbb_right',zbb_right)
    print('table_name',table_name)
    print('zba_reat',zba_reat)
    print('zbb_reat',zbb_reat)
    query = "INSERT INTO testab_model1 (test_name,test_describe,test_conditions,disease,age_left,age_right,sexname,city,zb,zba_left,zba_right,zbb_left,zbb_right,table_name,zba_reat,zbb_reat) VALUES (%s, %s, %s, %s,%s, %s, %s, %s, %s,%s, %s, %s, %s, %s,%s, %s)"
    cursor.execute(query, (test_name,test_describe,test_conditions,disease,age_left,age_right,sexname,city,zb,zba_left,zba_right,zbb_left,zbb_right,table_name,zba_reat,zbb_reat))
    connection.commit()
    # 关闭数据库连接
    cursor.close()
    connection.close()

    data = {
        "strlen": strlen,
        "test_name": test_name,
        "test_describe": test_describe,
        "disease": disease,
        "test_conditions": test_conditions,
        "table_name": table_name,
        "a_ms": a_ms,
        "b_ms": b_ms,
        "zba_reat": zba_reat,
        "zbb_reat": zbb_reat
    }
    return jsonify(data)

# (数据,疾病名称,条件列,范围)
# 特殊标签 AB验证
connection = connect_pg_3()
data = get_data(connection, "merge")
###### 输入 (test_name,test_describe,test_conditions,disease,table_name,age,age_left,age_right,sexname,city,zb,zba_left,zba_right,zbb_left,zbb_right):
# 试验标题
test_name = '高血压是否影响慢性阻塞性肺病3'
# 试验描述
test_describe = '判断高血压是否影响慢性阻塞性肺病3'
# 试验场景
test_conditions = '临床试验'
# 疾病      接口
disease = '慢性阻塞性肺病'
# 选择数据   接口
table_name = 'merge'
# 1 年龄 左右区间
age_left = 60
age_right = 80
# 2 性别
sexname = '男性'
# 3 城市
city = '绵阳市'
# 4 AB试验 统一指标 左右区间
zb = 'LYMPH_per'
zba_left = 6
zba_right = 80
zbb_left = 6
zbb_right = 80
#
zba_reat,zbb_reat = Interface3_5_hbl(test_name,test_describe,test_conditions,disease,table_name,age_left,age_right,sexname,city,zb,zba_left,zba_right,zbb_left,zbb_right)
print("zba_reat患病率为:",zba_reat*100,"%")
print("zbb_reat患病率为:",zbb_reat*100,"%")

###### 输出
# 展示样本量

if __name__ == '__main__':
    app.config['JSON_AS_ASCII'] = False
    CORS(app)
    app.run(host='127.0.0.1', port=5000)

功能23

import xgboost as xgb
import lightgbm as lgbm
import pandas as pd
import numpy as np
import pymysql
import xgboost as xgb
import lightgbm as lgbm
from sklearn.metrics import f1_score
from sklearn.metrics import accuracy_score
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import StratifiedKFold
from sklearn.linear_model import LogisticRegression
from sklearn import preprocessing
from datetime import date, timedelta
import gc
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import LabelEncoder
import lightgbm as lgb
from datetime import datetime
from sklearn.cluster import KMeans
from sklearn.preprocessing import StandardScaler
import pymysql
from sklearn.decomposition import PCA
from flask import Flask, request, session
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LinearRegression
from flask_cors import CORS
import psycopg2
from dataclasses import dataclass
import pymysql
import json
from flask import Flask, request, url_for, redirect, render_template, jsonify
app = Flask(__name__)
app.config['JSON_AS_ASCII'] = False  # 解决中文乱码问题
import warnings
warnings.filterwarnings("ignore")
import joblib
import datetime
# import mysql.connector
import time
import random
import json
import collections
import random
import matplotlib.pyplot as plt
import os
import copy
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.model_selection import train_test_split
import pandas as pd
import xgboost as xgb
import psycopg2
import json
import psycopg2
from psycopg2 import Binary
import joblib
import pickle
import lightgbm as lgb
from joblib import dump, load
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.svm import LinearSVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score
from sqlalchemy import create_engine
import json
from gevent import pywsgi
import knn
import creat_table
import random_forest
app = Flask(__name__)
CORS(app)
#################################################################################### 1 获取数据
# host = "localhost"
# port = "5432"
# database = "copd"
# user = "postgres"
# password = 111111
#
host = "10.16.48.219"
port = "5432"
database = "software1"
user = "pg"
password = 111111
def connect_pg():
    pg_connection = psycopg2.connect(database=database,
                                     user=user,
                                     password=password,
                                     host=host,
                                     port=port)
    return pg_connection
def connect_mysql():
    connection = pymysql.connect(
        host='10.16.48.219',
        user='root',
        password='111111',
        database='medical',
        cursorclass=pymysql.cursors.DictCursor
    )
    return connection
## connection, table_name
def get_data(connection, table_name):
    query = f"select * from \"{table_name}\""
    data = pd.read_sql(query, connection)
    connection.close()
    return data
#################################################################################### 2 数据处理:处理 字段、特征
def per_data(data, disease_code):
    # 1 列名(直接获取列名)
    params = data.columns
    print(params)
    # 2 筛选列名(自己定义需要的字段)
    params = [col for col in params if col not in ['Case_ID', 'BUN', 'M1_M2', 'TH2', 'TH2', 'IBILI', 'GLO']]
    features = data[params]
    features = features.fillna(0)
    print(data)
    print(features)
    # 3 筛选样本(根据test_id)
    train = features.iloc[:150]
    test = features.iloc[150:]
    return train, test
#################################################################################### 3 模型训练
# xgb
def train_xgb(train, test, model_name):
    params = {
        "booster": 'gbtree',
        'objective': 'binary:logistic',
        'eval_metric': 'auc',
        'silent': 0,  # (静默模式,10)
        'eta': 0.01,  # (0.01~0.2,,,0.01)
        'max_depth': 5,  # (3~10,,,6)
        'min_child_weight': 1,
        'gamma': 0,
        'lambda': 1,
        'colsample_bylevel': 0.7,  # (作用与subsample相似)
        'colsample_bytree': 0.7,  # (0.5~1)
        'subsample': 0.9,  # (0.5~1)
        'scale_pos_weight': 1,  # (算法更快收敛)
    }
    dtrain = xgb.DMatrix(train.drop(['label'], axis=1), label=train['label'])
    dtest = xgb.DMatrix(test.drop(['label'], axis=1))
    watchlist = [(dtrain, 'train')]
    model = xgb.train(params, dtrain, 200, watchlist)
    predict = model.predict(dtest)
    predict = pd.DataFrame(predict, columns=['target'])
    ################################ 连接到 pg 数据库
    connection = connect_pg()
    cursor = connection.cursor()
    # current_time = datetime.datetime.now()
    # time = current_time
    type = 1
    model_name = model_name
    model_description = 'xgboost_model'
    trainer = 'Gpb'
    model_str = pickle.dumps(model)
    # 构造 SQL INSERT 语句
    query = "INSERT INTO train_model3 (type, model_name, model_description, trainer, training_parameters) VALUES (%s, %s, %s, %s, %s)"
    cursor.execute(query, (type, model_name, model_description, trainer, model_str))
    connection.commit()
    # 关闭数据库连接
    cursor.close()
    connection.close()
    return 0
# gbdt
def train_gbdt(train, test, model_name):
    print("%%%%训练_model_gbdt%%%%")
    gbdt_model = GradientBoostingClassifier(n_estimators=100, learning_rate=0.1, max_depth=3, random_state=42)
    X_train = train.drop(['label'], axis=1)
    x_test = test.drop(['label'], axis=1)
    y_train = train['label']
    gbdt_model.fit(X_train, y_train)
    y_pred = gbdt_model.predict(x_test)
    y_pred = pd.DataFrame(y_pred, columns=['prob'])
    ################################ 连接到 pg 数据库
    connection = connect_pg()
    cursor = connection.cursor()
    # current_time = datetime.datetime.now()
    # time = current_time
    type = 2
    model_name = model_name
    model_description = 'gbdt_model'
    trainer = 'Gpb'
    model_str = pickle.dumps(gbdt_model)
    # 构造 SQL INSERT 语句
    query = "INSERT INTO train_model3 (type, model_name, model_description, trainer, training_parameters) VALUES (%s, %s, %s, %s, %s)"
    cursor.execute(query, (type, model_name, model_description, trainer, model_str))
    connection.commit()
    # 关闭数据库连接
    cursor.close()
    connection.close()
    return 0
# lr
def train_lr(train, test, model_name):
    print("%%%%训练_model_lr%%%%")
    lr_model = LogisticRegression(random_state=42)
    X_train = train.drop(['label'], axis=1)
    x_test = test.drop(['label'], axis=1)
    y_train = train['label']
    lr_model.fit(X_train, y_train)
    y_pred = lr_model.predict(x_test)
    y_pred = pd.DataFrame(y_pred, columns=['prob'])
    ################################ 连接到 pg 数据库
    connection = connect_pg()
    cursor = connection.cursor()
    # current_time = datetime.datetime.now()
    # time = current_time
    type = 3
    model_name = model_name
    model_description = 'lr_model'
    trainer = 'Gpb'
    model_str = pickle.dumps(lr_model)
    # 构造 SQL INSERT 语句
    query = "INSERT INTO train_model3 (type, model_name, model_description, trainer, training_parameters) VALUES (%s, %s, %s, %s, %s)"
    cursor.execute(query, (type, model_name, model_description, trainer, model_str))
    connection.commit()
    # 关闭数据库连接
    cursor.close()
    connection.close()
    return 0
# svm
def train_svm(train, test, model_name):
    print("%%%%训练_model_svm%%%%")
    svm_model = LinearSVC(C=1.0, random_state=42)
    X_train = train.drop(['label'], axis=1)
    x_test = test.drop(['label'], axis=1)
    y_train = train['label']
    svm_model.fit(X_train, y_train)
    y_pred = svm_model.predict(x_test)
    y_pred = pd.DataFrame(y_pred, columns=['prob'])
    ################################ 连接到 pg 数据库
    connection = connect_pg()
    cursor = connection.cursor()
    # current_time = datetime.datetime.now()
    # time = current_time
    type = 4
    model_name = model_name
    model_description = 'svm_model'
    trainer = 'Gpb'
    model_str = pickle.dumps(svm_model)
    # 构造 SQL INSERT 语句
    query = "INSERT INTO train_model3 (type, model_name, model_description, trainer, training_parameters) VALUES (%s, %s, %s, %s, %s)"
    cursor.execute(query, (type, model_name, model_description, trainer, model_str))
    connection.commit()
    # 关闭数据库连接
    cursor.close()
    connection.close()
    return 0
# rf
def train_rf(train, test, model_name):
    print("%%%%训练_model_rf%%%%")
    rf_model = RandomForestClassifier(n_estimators=100, random_state=42)
    X_train = train.drop(['label'], axis=1)
    x_test = test.drop(['label'], axis=1)
    y_train = train['label']
    rf_model.fit(X_train, y_train)
    y_pred = rf_model.predict(x_test)
    y_pred = pd.DataFrame(y_pred, columns=['prob'])
    ################################ 连接到 pg 数据库
    connection = connect_pg()
    cursor = connection.cursor()
    # current_time = datetime.datetime.now()
    # time = current_time
    type = 5
    model_name = model_name
    model_description = 'rf_model'
    trainer = 'Gpb'
    model_str = pickle.dumps(rf_model)
    # 构造 SQL INSERT 语句
    query = "INSERT INTO train_model3 (type, model_name, model_description, trainer, training_parameters) VALUES (%s, %s, %s, %s, %s)"
    cursor.execute(query, (type, model_name, model_description, trainer, model_str))
    connection.commit()
    # 关闭数据库连接
    cursor.close()
    connection.close()
    return 0
# cart
def train_cart(train, test, model_name):
    print("%%%%训练_model_cart%%%%")
    cart_model = DecisionTreeClassifier(criterion='gini', max_depth=5, random_state=42)
    X_train = train.drop(['label'], axis=1)
    x_test = test.drop(['label'], axis=1)
    y_train = train['label']
    cart_model.fit(X_train, y_train)
    y_pred = cart_model.predict(x_test)
    y_pred = pd.DataFrame(y_pred, columns=['prob'])
    ################################ 连接到 pg 数据库
    connection = connect_pg()
    cursor = connection.cursor()
    # current_time = datetime.datetime.now()
    # time = current_time
    type = 6
    model_name = model_name
    model_description = 'cart_model'
    trainer = 'Gpb'
    model_str = pickle.dumps(cart_model)
    # 构造 SQL INSERT 语句
    query = "INSERT INTO train_model3 (type, model_name, model_description, trainer, training_parameters) VALUES (%s, %s, %s, %s, %s)"
    cursor.execute(query, (type, model_name, model_description, trainer, model_str))
    connection.commit()
    # 关闭数据库连接
    cursor.close()
    connection.close()
    return 1
#################################################################################### 接口2:模型训练
# 前端给算法的请求  四个参数(model_name, algorithm_code, table_name, disease_code):
# {
#             "model_name": "慢性非阻塞_cart",
#             "algorithm_code": 6,
#             "table_name": "Diabetes",
#             "disease_code": "慢性阻塞性肺病"
# }
# 算法给前端响应
# {
#     "success": true,
#     "message": "请求成功",
#     "data": [
#         {
#             "id": 1 //训练好的模型,存入数据库,返回数据库中的id
#         }
#     ]
# }
def per_data_pulic():
    model_name = "慢性非阻塞_xgb"
    algorithm_code = 1
    table_name = "merge_copy2"
    disease_code = "慢性阻塞性肺病"
    connection_pulic = psycopg2.connect(database="medical",
                                        user="pg",
                                        password=111111,
                                        host="10.16.48.219",
                                        port="5432")
    data = get_data(connection_pulic, table_name)
    data.fillna(0, inplace=True)
    def try_convert_to_float(value):
        if isinstance(value, datetime.date):
            return value
        try:
            return float(value)
        except ValueError:
            return value
    data = data.applymap(try_convert_to_float)
    numeric_cols = data.select_dtypes(include=['int', 'float']).columns.tolist()
    numeric_cols = data.select_dtypes(include=['int', 'float']).assign(diagname="diagname").columns.tolist()
    print(numeric_cols)
    numeric_cols_data = data.loc[:, numeric_cols]
    numeric_cols_data['label'] = 0
    numeric_cols_data['label'] = numeric_cols_data['diagname'].apply(lambda x: 1 if x == disease_code else 0)
    numeric_cols_data['label'] = numeric_cols_data['label'].astype(int)
    numeric_cols_data.drop(columns=['diagname'], inplace=True)
    print(numeric_cols_data)
    train = numeric_cols_data.iloc[:200]
    test = numeric_cols_data.iloc[-200:]
    return train, test
def train_model_cs(model_name, algorithm_code, table_name, disease_code):
    connection = connect_pg()
    data = get_data(connection, table_name)
    train, test = per_data_pulic()
    if algorithm_code == 1:
        predict = train_xgb(train, test, model_name)
        print(predict)
        print('train_test_xgb')
    elif algorithm_code == 2:
        predict = train_gbdt(train, test, model_name)
        print(predict)
        print('train_test_gbdt')
    elif algorithm_code == 3:
        predict = train_lr(train, test, model_name)
        print(predict)
        print('train_test_lr')
    elif algorithm_code == 4:
        predict = train_svm(train, test, model_name)
        print(predict)
        print('train_test_svm')
    elif algorithm_code == 5:
        predict = train_rf(train, test, model_name)
        print(predict)
        print('train_test_rf')
    elif algorithm_code == 6:
        predict = train_cart(train, test, model_name)
        print(predict)
        print('train_test_cart')
    else:
        return 0
    return predict
# 训练 (model_name, algorithm_code, table_name, disease_code):
# model_name = "慢性非阻塞_6"
# algorithm_code =  6
# table_name = "Diabetes"
# disease_code = "慢性阻塞性肺病"
# predict = train_model_cs(model_name, algorithm_code, table_name, disease_code)

#   http://127.0.0.1:5000/interface2
# 模型训练
@app.route("/interface2", methods=["POST"])
def train_model():
    a = request
    model_name = eval(request.data)['model_name']
    algorithm_code =  eval(request.data)['algorithm_code']
    table_name = eval(request.data)['table_name']
    disease_code =  eval(request.data)['disease_code']
    # print(data) eval(request.data)['model_name']
    # model_name = str(request.data).form["model_name"]
    # algorithm_code = request.body.get("algorithm_code")
    # table_name = request.body.get("table_name")
    # disease_code = request.body.get("disease_code")
    print(model_name,algorithm_code,table_name,disease_code)
    connection = connect_pg()
    data = get_data(connection, table_name)
    train, test = per_data_pulic()
    if algorithm_code == 1:
        predict = train_xgb(train, test, model_name)
        print(predict)
        print('train_test_xgb')
    elif algorithm_code == 2:
        predict = train_gbdt(train, test, model_name)
        print(predict)
        print('train_test_gbdt')
    elif algorithm_code == 3:
        predict = train_lr(train, test, model_name)
        print(predict)
        print('train_test_lr')
    elif algorithm_code == 4:
        predict = train_svm(train, test, model_name)
        print(predict)
        print('train_test_svm')
    elif algorithm_code == 5:
        predict = train_rf(train, test, model_name)
        print(predict)
        print('train_test_rf')
    elif algorithm_code == 6:
        predict = train_cart(train, test, model_name)
        print(predict)
        print('train_test_cart')
    else:
        return 0
    return "1"

#################################################################################### 接口5:预测id选择
# 前端给算法的请求   点击即可
# {
#             "id": "121",
#             "id": "122",
#             "id": "123",
#             "id": "124"
# }
# 算法给前端响应
# {
#     "success": true,
#     "message": "请求成功",
#     "data": [
#         {
#             "id": "121",
#             "id": "122",
#             "id": "123",
#             "id": "124"   //重新返回id , 这里会有预测结果
#         }
#     ]
# }

# pulic_test 数据读取
def per_data_pulic_test(test_id):
    table_name = "merge_copy2"
    disease_code = "慢性阻塞性肺病"
    connection_pulic = psycopg2.connect(database="medical",
                                        user="pg",
                                        password=111111,
                                        host="10.16.48.219",
                                        port="5432")
    data = get_data(connection_pulic, table_name)
    data.fillna(0, inplace=True)
    # 自动的选取 int和float列 方便 机器学习模型 训练
    def try_convert_to_float(value):
        if isinstance(value, datetime.date):
            return value
        try:
            return float(value)
        except ValueError:
            return value
    data = data.applymap(try_convert_to_float)
    numeric_cols = data.select_dtypes(include=['int', 'float']).assign(diagname="diagname").columns.tolist()
    numeric_cols.append('patient_id')               # 保留 patient_id 用于筛选 test_id
    numeric_cols_data = data.loc[:, numeric_cols]
    numeric_cols_data['label'] = 0
    numeric_cols_data['label'] = numeric_cols_data['diagname'].apply(lambda x: 1 if x == disease_code else 0)
    numeric_cols_data['label'] = numeric_cols_data['label'].astype(int)
    # 筛选 test_id   筛选完就删除 patient_id列
    # numeric_cols_data['patient_id']如果是float类型就改为 int类型,其他类型的不变
    numeric_cols_data['patient_id'] = numeric_cols_data['patient_id'].apply(lambda x: int(x) if isinstance(x, float) else x)
    numeric_cols_data['patient_id'] = numeric_cols_data['patient_id'].astype(str)
    numeric_cols_data = numeric_cols_data.loc[numeric_cols_data['patient_id'].isin(test_id)]
    patient_id_copy = numeric_cols_data.copy()
    numeric_cols_data.drop(columns=['diagname'], inplace=True)
    numeric_cols_data.drop(columns=['patient_id'], inplace=True)
    return numeric_cols_data , patient_id_copy
def test_model_cs(model_name, test_id):
    test , patient_id_copy = per_data_pulic_test(test_id)
    connection = connect_pg()
    cursor = connection.cursor()
    x_test = test.drop(['label'], axis=1)
    # # 从数据库中检索模型字段
    query = "SELECT training_parameters FROM train_model3 WHERE model_name = %s"  # 假设模型 ID1
    cursor.execute(query, (model_name,))
    model_str = cursor.fetchone()[0]
    # # 从数据库中检索模型字段
    query = "SELECT type FROM train_model3 WHERE model_name = %s"  # 假设模型 ID1
    cursor.execute(query, (model_name,))
    type = cursor.fetchone()[0]
    loaded_model1111111 = pickle.loads(model_str)
    if type == 1:
        dtest = xgb.DMatrix(x_test)
        predictsloaded_model1111111 = loaded_model1111111.predict(dtest)
    else:
        predictsloaded_model1111111 = loaded_model1111111.predict(x_test)

    print('------------------------------test----patient_id_copy--------------------------')
    patient_id_copy = patient_id_copy.reset_index(drop=True)
    predict_df = pd.DataFrame({'predict': predictsloaded_model1111111})
    result_df = pd.concat([patient_id_copy, predict_df], axis=1)
    result_df['predict'] = result_df['predict'].apply(lambda x: 1 if x > 0.5 else 0)
    print(result_df)
    result_df  = result_df[['patient_id', 'predict']]
    result_dict = result_df.set_index('patient_id')['predict'].to_dict()
    print(result_dict)
    print('------------------------------test----patient_id_copy--------------------------')
    return result_dict
# 预测 test_model(disease_code, id, test_id):
# model_name = "慢性非阻塞_xgb"
# test_id = ['801080447', '801030382', 'J00356713']
# predict = test_model_cs(model_name, test_id)
# print(predict)
#
# model_name = "慢性非阻塞_xgb"
# test_id = ['801080447', '801030382', 'J00356713']
# 模型预测
@app.route("/interface5", methods=["POST"])
def test_model_cs():

    model_name = eval(request.data)['model_name']
    test_id = eval(request.data)['test_id']  # test_id = ['801080447', '801030382', 'J00356713'] 里面的字符串是merge表中的parent_id字段

    test , patient_id_copy = per_data_pulic_test(test_id)
    connection = connect_pg()
    cursor = connection.cursor()
    x_test = test.drop(['label'], axis=1)
    # # 从数据库中检索模型字段
    query = "SELECT training_parameters FROM train_model3 WHERE model_name = %s"  # 假设模型 ID1
    cursor.execute(query, (model_name,))
    model_str = cursor.fetchone()[0]
    # # 从数据库中检索模型字段
    query = "SELECT type FROM train_model3 WHERE model_name = %s"  # 假设模型 ID1
    cursor.execute(query, (model_name,))
    type = cursor.fetchone()[0]
    loaded_model1111111 = pickle.loads(model_str)
    if type == 1:
        dtest = xgb.DMatrix(x_test)
        predictsloaded_model1111111 = loaded_model1111111.predict(dtest)
    else:
        predictsloaded_model1111111 = loaded_model1111111.predict(x_test)
    print('------------------------------test----patient_id_copy--------------------------')
    patient_id_copy = patient_id_copy.reset_index(drop=True)
    predict_df = pd.DataFrame({'predict': predictsloaded_model1111111})
    result_df = pd.concat([patient_id_copy, predict_df], axis=1)
    result_df['predict'] = result_df['predict'].apply(lambda x: 1 if x > 0.5 else 0)
    print(result_df)
    result_df  = result_df[['patient_id', 'predict']]
    result_dict = result_df.set_index('patient_id')['predict'].to_dict()
    print(result_dict)
    print('------------------------------test----patient_id_copy--------------------------')
    return result_dict


def connect_pg_3():
    pg_connection = psycopg2.connect(database="medical",
                     user=user,
                     password=password,
                     host=host,
                     port=port)
    return pg_connection
def connect_pg_software1():
    pg_connection = psycopg2.connect(database="software1",
                     user=user,
                     password=password,
                     host=host,
                     port=port)
    return pg_connection
def connect_mysql():
    connection = pymysql.connect(
        host='10.16.48.219',
        user='root',
        password='111111',
        database='public',
        cursorclass=pymysql.cursors.DictCursor)
    return connection
## connection, table_name
def get_data(connection, table_name):
    query = f"select * from \"{table_name}\""
    data = pd.read_sql(query, connection)
    connection.close()
    return data
# 接口 3_5
# 计算 0 1 比例(数据,疾病名称,条件列,范围)
def Interface3_5_hbl(test_name,test_describe,test_conditions,disease,table_name,age_left,age_right,sexname,city,zb,zba_left,zba_right,zbb_left,zbb_right):

    # 存储 test 信息 test_name,test_describe,test_conditions
    connection = connect_pg_3()
    data = get_data(connection, table_name)
    #1 age 条件筛选
    data['age'] = data['age'].astype(int)
    data = data.loc[data['age'] > age_left]
    data = data.loc[data['age'] < age_right]
    #2 sexname 条件筛选
    data = data.loc[data['sexname'] == sexname]
    print(data['sexname'])
    #3 city 条件筛选
    data = data.loc[data['city'] == city]
    print(data['city'])
    data['diagname_code'] = 0
    data.loc[data['diagname'] == disease, 'diagname_code'] = 1
    zba = data.copy()
    zbb = data.copy()
    #4 AB试验 统一指标 左右区间
    # A
    zba[zb] = zba[zb].astype(float)
    zba = zba.loc[zba[zb] > zba_left]
    zba = zba.loc[zba[zb] < zba_right]
    # B
    zbb[zb] = zbb[zb].astype(float)
    zbb = zbb.loc[zbb[zb] > zbb_left]
    zbb = zbb.loc[zbb[zb] < zbb_right]
    # 疾病比例计算
    print('zba样本数:', len(zba))
    zba_1 = len(zba[zba['diagname_code'] == 1])
    zba_0 = len(zba[zba['diagname_code'] == 0])
    zba_reat = zba_1 / (zba_1 + zba_0)
    # 疾病比例计算
    print('zbb样本数:',len(zbb))
    zbb_1 = len(zbb[zbb['diagname_code'] == 1])
    zbb_0 = len(zbb[zbb['diagname_code'] == 0])
    zbb_reat = zbb_1 / (zbb_1 + zbb_0)
    ################################ 连接到 pg 数据库
    connection = connect_pg_software1()
    cursor = connection.cursor()
    # current_time = datetime.datetime.now(
    # 构造 SQL INSERT 语句
    query = "INSERT INTO testab_model1 (test_name,test_describe,test_conditions,disease,age_left,age_right,sexname,city,zb,zba_left,zba_right,zbb_left,zbb_right,table_name,zba_reat,zbb_reat) VALUES (%s, %s, %s,%s, %s, %s, %s, %s,%s, %s, %s, %s, %s,%s, %s, %s)"
    cursor.execute(query, (test_name,test_describe,test_conditions,disease,age_left,age_right,sexname,city,zb,zba_left,zba_right,zbb_left,zbb_right,table_name,zba_reat,zbb_reat))
    connection.commit()
    # 关闭数据库连接
    cursor.close()
    connection.close()
    return zba_reat,zbb_reat

@app.route("/interface3_5", methods=["POST"])
def Interface3_5():
    # a_ms = "aaaaaa"
    # b_ms = "bbbbbb"
    a_ms = eval(request.data)['a_ms']
    b_ms = eval(request.data)['b_ms']
    test_name = eval(request.data)['test_name']
    test_describe = eval(request.data)['test_describe']
    test_conditions = eval(request.data)['test_conditions']
    disease = eval(request.data)['disease']
    table_name = eval(request.data)['table_name']
    age_left = eval(request.data)['age_left']
    age_right = eval(request.data)['age_right']
    sexname = eval(request.data)['sexname']
    city = eval(request.data)['city']
    zb = eval(request.data)['zb']
    zba_left = eval(request.data)['zba_left']
    zba_right = eval(request.data)['zba_right']
    zbb_left = eval(request.data)['zbb_left']
    zbb_right = eval(request.data)['zbb_right']

    # 存储 test 信息 test_name,test_describe,test_conditions
    connection = connect_pg_3()
    data = get_data(connection, table_name)
    #1 age 条件筛选
    age_left = float(age_left)
    age_right = float(age_right)
    data['age'] = data['age'].astype(int)
    data = data.loc[data['age'] > age_left]
    data = data.loc[data['age'] < age_right]
    #2 sexname 条件筛选
    data = data.loc[data['sexname'] == sexname]
    print(data['sexname'])
    #3 city 条件筛选
    data = data.loc[data['city'] == city]
    print(data['city'])
    data['diagname_code'] = 0
    data.loc[data['diagname'] == disease, 'diagname_code'] = 1
    zba = data.copy()
    zbb = data.copy()
    #4 AB试验 统一指标 左右区间
    # A
    zba_left = float(zba_left)
    zba_right = float(zba_right)
    zba[zb] = zba[zb].astype(float)
    zba = zba.loc[zba[zb] > zba_left]
    zba = zba.loc[zba[zb] < zba_right]
    # B
    zbb_left = float(zbb_left)
    zbb_right = float(zbb_right)
    zbb[zb] = zbb[zb].astype(float)
    zbb = zbb.loc[zbb[zb] > zbb_left]
    zbb = zbb.loc[zbb[zb] < zbb_right]
    # 疾病比例计算
    print('zba样本数:', len(zba))
    zba_1 = len(zba[zba['diagname_code'] == 1])
    zba_0 = len(zba[zba['diagname_code'] == 0])
    if ((zba_1 + zba_0)==0):
        zba_reat = 0
    else:
        zba_reat = zba_1 / (zba_1 + zba_0)
    # 疾病比例计算
    strlen = len(zbb)
    print('zbb样本数:',len(zbb))
    zbb_1 = len(zbb[zbb['diagname_code'] == 1])
    zbb_0 = len(zbb[zbb['diagname_code'] == 0])
    if ((zba_1 + zba_0)==0):
        zba_reat = 0
    else:
        zbb_reat = zbb_1 / (zbb_1 + zbb_0)
    zbb_reat=0
    ################################ 连接到 pg 数据库
    connection = connect_pg_software1()
    cursor = connection.cursor()
    # current_time = datetime.datetime.now(
    # 构造 SQL INSERT 语句
    print('test_name',test_name)
    print('test_describe',test_describe)
    print('test_conditions',test_conditions)
    print('disease',disease)
    print('age_left',age_left)
    print('age_right',age_right)
    print('sexname',sexname)
    print('city',city)
    print('zb',zb)
    print('zba_left',zba_left)
    print('zba_right',zba_right)
    print('zba_left',zbb_left)
    print('zbb_right',zbb_right)
    print('table_name',table_name)
    print('zba_reat',zba_reat)
    print('zbb_reat',zbb_reat)
    query = "INSERT INTO testab_model1 (test_name,test_describe,test_conditions,disease,age_left,age_right,sexname,city,zb,zba_left,zba_right,zbb_left,zbb_right,table_name,zba_reat,zbb_reat) VALUES (%s, %s, %s, %s,%s, %s, %s, %s, %s,%s, %s, %s, %s, %s,%s, %s)"
    cursor.execute(query, (test_name,test_describe,test_conditions,disease,age_left,age_right,sexname,city,zb,zba_left,zba_right,zbb_left,zbb_right,table_name,zba_reat,zbb_reat))
    connection.commit()
    # 关闭数据库连接
    cursor.close()
    connection.close()

    data = {
        "strlen": strlen,
        "test_name": test_name,
        "test_describe": test_describe,
        "disease": disease,
        "test_conditions": test_conditions,
        "table_name": table_name,
        "a_ms": a_ms,
        "b_ms": b_ms,
        "zba_reat": zba_reat,
        "zbb_reat": zbb_reat
    }
    return jsonify(data)

# (数据,疾病名称,条件列,范围)
# 特殊标签 AB验证
connection = connect_pg_3()
data = get_data(connection, "merge")
###### 输入 (test_name,test_describe,test_conditions,disease,table_name,age,age_left,age_right,sexname,city,zb,zba_left,zba_right,zbb_left,zbb_right):
# 试验标题
test_name = '高血压是否影响慢性阻塞性肺病3'
# 试验描述
test_describe = '判断高血压是否影响慢性阻塞性肺病3'
# 试验场景
test_conditions = '临床试验'
# 疾病      接口
disease = '慢性阻塞性肺病'
# 选择数据   接口
table_name = 'merge'
# 1 年龄 左右区间
age_left = 60
age_right = 80
# 2 性别
sexname = '男性'
# 3 城市
city = '绵阳市'
# 4 AB试验 统一指标 左右区间
zb = 'LYMPH_per'
zba_left = 6
zba_right = 80
zbb_left = 6
zbb_right = 80
#
zba_reat,zbb_reat = Interface3_5_hbl(test_name,test_describe,test_conditions,disease,table_name,age_left,age_right,sexname,city,zb,zba_left,zba_right,zbb_left,zbb_right)
print("zba_reat患病率为:",zba_reat*100,"%")
print("zbb_reat患病率为:",zbb_reat*100,"%")

###### 输出
# 展示样本量


if __name__ == '__main__':
    app.config['JSON_AS_ASCII'] = False
    CORS(app)
    app.run(host='127.0.0.1', port=5000)

  • 2
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值