app.py备份

import xgboost as xgb
import lightgbm as lgbm
import pandas as pd
import numpy as np
import pymysql
import xgboost as xgb
import lightgbm as lgbm
from sklearn.metrics import f1_score
from sklearn.metrics import accuracy_score
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import StratifiedKFold
from sklearn.linear_model import LogisticRegression
from sklearn import preprocessing
from datetime import date, timedelta
import gc
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import LabelEncoder
import lightgbm as lgb
from datetime import datetime
from sklearn.cluster import KMeans
from sklearn.preprocessing import StandardScaler
import pymysql
from sklearn.decomposition import PCA
from flask import Flask, request, session
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LinearRegression
from flask_cors import CORS
import psycopg2
from dataclasses import dataclass
import pymysql
import json
from flask import Flask, request, url_for, redirect, render_template, jsonify
app = Flask(__name__)
app.config['JSON_AS_ASCII'] = False  # 解决中文乱码问题
import warnings
warnings.filterwarnings("ignore")
import joblib
import datetime
# import mysql.connector
import time
import random
import json
import collections
import random
import matplotlib.pyplot as plt
import os
import copy
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.model_selection import train_test_split
import pandas as pd
import xgboost as xgb
import psycopg2
import json
import psycopg2
from psycopg2 import Binary
import joblib
import pickle
import lightgbm as lgb
from joblib import dump, load
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.svm import LinearSVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from flask import Flask, request,session
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LinearRegression
import numpy as np
import pandas as pd
import pymysql
from flask_cors import CORS
import psycopg2
import json
from gevent import pywsgi
import knn
import creat_table
import random_forest
app = Flask(__name__)
CORS(app)
host = "10.16.48.219"
port = "5432"
database = "software1"
user = "pg"
password = 111111
def connect_pg():
    pg_connection = psycopg2.connect(database=database,
                     user=user,
                     password=password,
                     host=host,
                     port=port)
    return pg_connection
def connect_mysql():
    connection = pymysql.connect(
        host='10.16.48.219',
        user='root',
        password='111111',
        database='medical',
        cursorclass=pymysql.cursors.DictCursor
    )
    return connection
## connection, table_name
def get_data(connection, table_name):
    query = f"select * from \"{table_name}\""
    data = pd.read_sql(query, connection)
    connection.close()
    return data
# 执行主成分分析(PCA),并返回降维后的结果以及每个主成分的贡献率列表
# 接受参数(接受一个 POST 请求,该请求应该包含一个 JSON 对象作为参数,其中包含要在 PCA 中使用的特征名称列表) 返回(返回一个 JSON 对象,其中包含降维后的结果和每个主成分的贡献率列表)
# 通过路径  /pca/yangxing
@app.route('/pca', methods=['POST'])
def pca():
    connection = connect_pg()
    # 查询数据库获取连接
    data = get_data(connection, "Diabetes")
    # 获取需要降维的特征
    # features = data.drop(['Case_ID'],axis=1)
    params = request.get_json()
    features = data[params]
    # 数据预处理:处理缺失值
    features = features.fillna(0)
    print(features)
    # 特征标准化
    scaler = StandardScaler()
    scaled_features = scaler.fit_transform(features)
    # 计算协方差矩阵
    covariance_matrix = np.cov(scaled_features.T)
    # 计算特征值和特征向量
    eigenvalues, eigenvectors = np.linalg.eig(covariance_matrix)
    # 选择主成分数量
    # 这里假设选择保留前10个主成分
    n_components = len(params) - 1
    # 降维
    pca = PCA(n_components=n_components)
    reduced_features = pca.fit_transform(scaled_features)
    result = pd.DataFrame(reduced_features)
    explained_variance_ratio = pca.explained_variance_ratio_
    contribution_list = explained_variance_ratio.tolist()
    return [result.to_dict(), contribution_list]
# 执行 k 最近邻(KNN)算法
# 接受参数(接受一个 POST 请求,并应该包含一个 JSON 对象作为参数,然后调用一个名为 knn.knn() 的函数,并将参数传递给它) 返回(knn算法预测的结果)
@app.route("/knn", methods=['POST'])
def get_knn():
    param = request.get_json()
    print(param)
    return knn.knn(param)
# 根据请求,获取前15行数据
# 接受参数(接收一个 POST 请求,该请求应该包含一个 JSON 对象作为参数) 返回(执行一个查询以获取新创建表中的前15行数据,并将结果以字符串形式返回)
@app.route("/featureCreate", methods=['POST'])
def feature_create():
    param = request.get_json()
    new_table = creat_table.create_table(param)
    # 连接到数据库
    connection = connect_mysql()
    # 创建游标对象
    cursor = connection.cursor()
    # 从数据库中读取数据
    query = f"SELECT * FROM {new_table} limit 15"
    cursor.execute(query)
    # 获取查询结果
    rows = cursor.fetchall()
    return str(rows)
#
# 接受参数() 返回()
@app.route("/randomForest", methods=['POST'])
def fe():
    param = request.get_json()
    # {'tableName1': 'cardio_train', 'tableName2': 'stroke', 'aiName': 'randomForest', 'runParams': ['Case_ID', 'AGE', 'SEX']}
    return random_forest.random_forest(param)
#
# 接受参数() 返回()
@app.route("/randomForest1", methods=['POST'])
def fe1():
    param = request.get_json()
    # {'tableName1': 'cardio_train', 'tableName2': 'stroke', 'aiName': 'randomForest', 'runParams': ['Case_ID', 'AGE', 'SEX']}
    return random_forest.random_forest1(param)
#
# 接受参数(接受一个 POST 请求,从请求中获取参数) 返回(计算该列的中位数、均值、众数、最大值和最小值,并将这些结果放入一个 JSON 对象中返回)
#   参数 字典值: tabelName:xxx,  aliName: xxx runParams: []
@app.route("/notDiscreteFeatureDesc", methods=['POST'])
def notDiscreteFeatureDesc():
    # 连接到数据库
    param = request.get_json()
    colName = param['runParams'][0]
    pg_connection = connect_pg()
    tableName = param['tableName']
    sql = f"select \"{colName}\" from {tableName}"
    databaseData = pd.read_sql(sql, con=pg_connection);
    databaseData[colName] = pd.to_numeric(databaseData[colName], errors='coerce')
    total_count = databaseData.shape[0]
    print("total_count:");
    # 删除 NaN 值
    databaseData.dropna(subset=[colName], inplace=True)
    # 计算中位数
    median = databaseData[colName].median()
    print("median", median)
    # 计算均值
    mean = databaseData[colName].mean()
    print("mean", mean)
    # 计算众数
    mode = databaseData[colName].mode().iloc[0]
    print("mode", mode)
    # 计算最大值
    max_value = databaseData[colName].max()
    print("max_value", max_value)
    # 计算最小值
    min_value = databaseData[colName].min()
    print("min_value" ,min_value)
    res = {
        "total": total_count,
        "average": mean,
        "middle": median,
        "min": min_value,
        "max": max_value,
        "mode": mode
    }
    print("aaaa")
    print(res)
    return json.dumps(res);
# 接受参数(接受一个 POST 请求,从请求中获取参数) 返回(对该列的空值进行众数填充,将这些结果放入一个 JSON 对象中返回)
#   参数 字典值: tabelName:xxx,  aliName: xxx runParams: []
@app.route("/modePadding", methods=['POST'])
def modePadding():
    # 连接到数据库
    param = request.get_json()
    colName = param['runParams'][0]
    pg_connection = connect_pg()
    tableName = param['tableName']
    sql = f"select \"{colName}\" from {tableName}"
    databaseData = pd.read_sql(sql, con=pg_connection);
    old_data = databaseData.values.tolist()
    # 计算列的众数
    mode_value = databaseData[colName].mode()[0]
    # 对空值进行众数填充
    databaseData[colName] = databaseData[colName].fillna(mode_value)
    res = {
        "old_data": old_data,
        "new_data": databaseData.values.tolist()
    }
    print(res)
    return json.dumps(res);
# 接受参数(接受一个 POST 请求,从请求中获取参数) 返回(对该列的空值进行均值替换)
@app.route("/meanReplacement", methods=['POST'])
def meanReplacement(): # 均数替换
    # 连接到数据库
    param = request.get_json()
    colName = param['runParams'][0]
    pg_connection = connect_pg()
    tableName = param['tableName']
    sql = f"select \"{colName}\" from {tableName}"
    databaseData = pd.read_sql(sql, con=pg_connection);
    # 计算均值
    # databaseData[colName] = pd.to_numeric(databaseData[colName], errors='coerce')
    len = 0;
    sum = 0
    for value in databaseData[colName]:
        len+=1
        number = pd.to_numeric(value)
        if np.isnan(number):
           continue;
        sum+=number;
    mean_value = round(sum/len,2)
    print("均值:", mean_value)
    old_data = databaseData.values.tolist();
    # 替换空值为均值
    databaseData[colName].fillna(str(mean_value), inplace=True)
    # print("替换过后的数据:",databaseData[colName])
    res = {
        "old_data": old_data,
        "new_data": databaseData.values.tolist()
    }
    print("最后的数据为:")
    print(res)
    return json.dumps(res);
# 接受参数(接受一个 POST 请求,从请求中获取参数) 返回(对该列的空值进行最邻近插值处理)
@app.route("/nearestNeighborInterpolation", methods=['POST'])
def nearestNeighborInterpolation():
    # 连接到数据库
    param = request.get_json()
    colName = param['runParams'][0]
    pg_connection = connect_pg()
    tableName = param['tableName']
    sql = f"select \"{colName}\" from {tableName}"
    databaseData = pd.read_sql(sql, con=pg_connection);
    old_data = databaseData[colName].values.tolist()
    # 将数据转换为数字类型并进行最邻近插值处理
    databaseData[colName] = pd.to_numeric(databaseData[colName], errors='coerce')
    databaseData[colName] = databaseData[colName].interpolate(method='nearest')
    # 将插值后的数据保存在 new_data 中,并将其转换为字符串
    new_data = databaseData[colName].astype(str).tolist()
    res = {
        "old_data": old_data,
        "new_data": new_data
    }
    print(res)
    return json.dumps(res);
# 接受参数(接受一个 POST 请求,从请求中获取参数) 返回(对该列的空值进行前向填充)
@app.route("/forwardFilling", methods=['POST'])
def forwardFilling():
    # 连接到数据库
    param = request.get_json()
    colName = param['runParams'][0]
    pg_connection = connect_pg()
    tableName = param['tableName']
    sql = f"select \"{colName}\" from {tableName}"
    databaseData = pd.read_sql(sql, con=pg_connection);
    old_data = databaseData[colName].values.tolist()
    # 将缺失值进行前向填充
    databaseData[colName].fillna(method='ffill', inplace=True)
    res = {
        "old_data": old_data,
        "new_data": databaseData[colName].values.tolist()
    }
    print(res)
    return json.dumps(res);
# 接受参数(接受一个 POST 请求,从请求中获取参数) 返回(对该列的空值进行中位数替换)
@app.route("/medianReplacement", methods=['POST'])
def medianReplacement():
    # 连接到数据库
    param = request.get_json()
    colName = param['runParams'][0]
    pg_connection = connect_pg()
    tableName = param['tableName']
    sql = f"select \"{colName}\" from {tableName}"
    databaseData = pd.read_sql(sql, con=pg_connection);
    old_data = databaseData[colName].values.tolist()
    # 计算中位数
    median_value = databaseData[colName].median()
    # 使用中位数填充空值
    databaseData[colName].fillna(median_value, inplace=True)
    # 将填充后的数据转换为字符串(如果需要)
    databaseData[colName] = databaseData[colName].astype(str)
    res = {
        "old_data": old_data,
        "new_data": databaseData[colName].values.tolist()
    }
    print(res)
    return json.dumps(res);
# 接受参数(接受一个 POST 请求,从请求中获取参数) 返回(使用线性回归模型根据数据的其他部分来预测和替换缺失值)
@app.route("/regressionAnalysisReplacement", methods=['POST'])
def regressionAnalysisReplacement():
    # 连接到数据库
    param = request.get_json()
    colName = param['runParams'][0]
    pg_connection = connect_pg()
    tableName = param['tableName']
    sql = f"select \"{colName}\" from {tableName}"
    databaseData = pd.read_sql(sql, con=pg_connection);
    old_data = databaseData[colName].values.tolist()
    # 创建一个新的 DataFrame,只包含非空值
    non_null_data = databaseData[databaseData[colName].notnull()]
    # 提取特征和标签
    X = non_null_data.index.values.reshape(-1, 1)
    y = non_null_data[colName].values.reshape(-1, 1)
    # 初始化并拟合线性回归模型
    regression_model = LinearRegression()
    regression_model.fit(X, y)
    # 使用模型进行预测
    X_predict = databaseData.index.values.reshape(-1, 1)
    predicted_values = regression_model.predict(X_predict)
    predicted_values_rounded = np.round(predicted_values, decimals=2)
    # 将预测值填充到原始数据中
    databaseData[colName].fillna(pd.Series(predicted_values_rounded.flatten(), index=databaseData.index), inplace=True)
    # 将填充后的数据转换为字符串
    databaseData[colName] = databaseData[colName].astype(str)
    # 构造返回结果,将填充后的数据转换为列表
    new_data = databaseData[colName].tolist()
    res = {
        "old_data": old_data,
        "new_data": new_data
    }
    print(res)
    return json.dumps(res)
# 接受参数(接受一个 POST 请求,从请求中获取参数) 返回(填充整个列的缺失值而不是只填充特定的行)
@app.route("/eucarFilling", methods=['POST'])
def eucarFilling():
    # 连接到数据库
    param = request.get_json()
    colName = param['runParams'][0]
    pg_connection = connect_pg()
    tableName = param['tableName']
    sql = f"select * from {tableName}"
    databaseData = pd.read_sql(sql, con=pg_connection);
    old_data = databaseData[colName].values.tolist()
    # 计算中位数
    median_value = databaseData[colName].median()
    # 使用中位数填充空值
    databaseData[colName].fillna(median_value, inplace=True)
    # 将填充后的数据转换为字符串(如果需要)
    databaseData[colName] = databaseData[colName].astype(str)
    res = {
        "old_data": old_data,
        "new_data": databaseData[colName].values.tolist()
    }
    print(res)
    return json.dumps(res);

def connect_pg():
    pg_connection = psycopg2.connect(database=database,
                                     user=user,
                                     password=password,
                                     host=host,
                                     port=port)
    return pg_connection


def connect_mysql():
    connection = pymysql.connect(
        host='10.16.48.219',
        user='root',
        password='111111',
        database='medical',
        cursorclass=pymysql.cursors.DictCursor
    )
    return connection


## connection, table_name
def get_data(connection, table_name):
    query = f"select * from \"{table_name}\""
    data = pd.read_sql(query, connection)
    connection.close()
    return data


#################################################################################### 2 数据处理:处理 字段、特征
def per_data(data, disease_code):
    # 1 列名(直接获取列名)
    params = data.columns
    print(params)
    # 2 筛选列名(自己定义需要的字段)
    params = [col for col in params if col not in ['Case_ID', 'BUN', 'M1_M2', 'TH2', 'TH2', 'IBILI', 'GLO']]
    features = data[params]
    features = features.fillna(0)
    print(data)
    print(features)
    # 3 筛选样本(根据test_id)
    train = features.iloc[:150]
    test = features.iloc[150:]
    return train, test


def per_data_pulic():
    model_name = "慢性非阻塞_xgb"
    algorithm_code = 1
    table_name = "merge_copy2"
    disease_code = "慢性阻塞性肺病"
    connection_pulic = psycopg2.connect(database="medical",
                                        user="pg",
                                        password=111111,
                                        host="10.16.48.219",
                                        port="5432")
    data = get_data(connection_pulic, table_name)
    data.fillna(0, inplace=True)

    def try_convert_to_float(value):
        if isinstance(value, datetime.date):
            return value
        try:
            return float(value)
        except ValueError:
            return value

    data = data.applymap(try_convert_to_float)
    numeric_cols = data.select_dtypes(include=['int', 'float']).columns.tolist()
    numeric_cols = data.select_dtypes(include=['int', 'float']).assign(diagname="diagname").columns.tolist()
    print(numeric_cols)
    numeric_cols_data = data.loc[:, numeric_cols]
    numeric_cols_data['label'] = 0
    numeric_cols_data['label'] = numeric_cols_data['diagname'].apply(lambda x: 1 if x == disease_code else 0)
    numeric_cols_data['label'] = numeric_cols_data['label'].astype(int)
    numeric_cols_data.drop(columns=['diagname'], inplace=True)
    print(numeric_cols_data)
    train = numeric_cols_data.iloc[:200]
    test = numeric_cols_data.iloc[-200:]
    return train, test


#################################################################################### 3 模型训练
# xgb
def train_xgb(train, test, model_name):
    params = {
        "booster": 'gbtree',
        'objective': 'binary:logistic',
        'eval_metric': 'auc',
        'silent': 0,  # (静默模式,10)
        'eta': 0.01,  # (0.01~0.2,,,0.01)
        'max_depth': 5,  # (3~10,,,6)
        'min_child_weight': 1,
        'gamma': 0,
        'lambda': 1,
        'colsample_bylevel': 0.7,  # (作用与subsample相似)
        'colsample_bytree': 0.7,  # (0.5~1)
        'subsample': 0.9,  # (0.5~1)
        'scale_pos_weight': 1,  # (算法更快收敛)
    }
    dtrain = xgb.DMatrix(train.drop(['label'], axis=1), label=train['label'])
    dtest = xgb.DMatrix(test.drop(['label'], axis=1))
    watchlist = [(dtrain, 'train')]
    model = xgb.train(params, dtrain, 200, watchlist)
    predict = model.predict(dtest)
    predict = pd.DataFrame(predict, columns=['target'])
    ################################ 连接到 pg 数据库
    connection = connect_pg()
    cursor = connection.cursor()
    # current_time = datetime.datetime.now()
    # time = current_time
    type = 1
    model_name = model_name
    model_description = 'xgboost_model'
    trainer = 'Gpb'
    model_str = pickle.dumps(model)
    # 构造 SQL INSERT 语句
    query = "INSERT INTO train_model3 (type, model_name, model_description, trainer, training_parameters) VALUES (%s, %s, %s, %s, %s)"
    cursor.execute(query, (type, model_name, model_description, trainer, model_str))
    connection.commit()
    # 关闭数据库连接
    cursor.close()
    connection.close()
    return 0


# gbdt
def train_gbdt(train, test, model_name):
    print("%%%%训练_model_gbdt%%%%")
    gbdt_model = GradientBoostingClassifier(n_estimators=100, learning_rate=0.1, max_depth=3, random_state=42)
    X_train = train.drop(['label'], axis=1)
    x_test = test.drop(['label'], axis=1)
    y_train = train['label']
    gbdt_model.fit(X_train, y_train)
    y_pred = gbdt_model.predict(x_test)
    y_pred = pd.DataFrame(y_pred, columns=['prob'])
    ################################ 连接到 pg 数据库
    connection = connect_pg()
    cursor = connection.cursor()
    # current_time = datetime.datetime.now()
    # time = current_time
    type = 2
    model_name = model_name
    model_description = 'gbdt_model'
    trainer = 'Gpb'
    model_str = pickle.dumps(gbdt_model)
    # 构造 SQL INSERT 语句
    query = "INSERT INTO train_model3 (type, model_name, model_description, trainer, training_parameters) VALUES (%s, %s, %s, %s, %s)"
    cursor.execute(query, (type, model_name, model_description, trainer, model_str))
    connection.commit()
    # 关闭数据库连接
    cursor.close()
    connection.close()
    return 0


# lr
def train_lr(train, test, model_name):
    print("%%%%训练_model_lr%%%%")
    lr_model = LogisticRegression(random_state=42)
    X_train = train.drop(['label'], axis=1)
    x_test = test.drop(['label'], axis=1)
    y_train = train['label']
    lr_model.fit(X_train, y_train)
    y_pred = lr_model.predict(x_test)
    y_pred = pd.DataFrame(y_pred, columns=['prob'])
    ################################ 连接到 pg 数据库
    connection = connect_pg()
    cursor = connection.cursor()
    # current_time = datetime.datetime.now()
    # time = current_time
    type = 3
    model_name = model_name
    model_description = 'lr_model'
    trainer = 'Gpb'
    model_str = pickle.dumps(lr_model)
    # 构造 SQL INSERT 语句
    query = "INSERT INTO train_model3 (type, model_name, model_description, trainer, training_parameters) VALUES (%s, %s, %s, %s, %s)"
    cursor.execute(query, (type, model_name, model_description, trainer, model_str))
    connection.commit()
    # 关闭数据库连接
    cursor.close()
    connection.close()
    return 0


# svm
def train_svm(train, test, model_name):
    print("%%%%训练_model_svm%%%%")
    svm_model = LinearSVC(C=1.0, random_state=42)
    X_train = train.drop(['label'], axis=1)
    x_test = test.drop(['label'], axis=1)
    y_train = train['label']
    svm_model.fit(X_train, y_train)
    y_pred = svm_model.predict(x_test)
    y_pred = pd.DataFrame(y_pred, columns=['prob'])
    ################################ 连接到 pg 数据库
    connection = connect_pg()
    cursor = connection.cursor()
    # current_time = datetime.datetime.now()
    # time = current_time
    type = 4
    model_name = model_name
    model_description = 'svm_model'
    trainer = 'Gpb'
    model_str = pickle.dumps(svm_model)
    # 构造 SQL INSERT 语句
    query = "INSERT INTO train_model3 (type, model_name, model_description, trainer, training_parameters) VALUES (%s, %s, %s, %s, %s)"
    cursor.execute(query, (type, model_name, model_description, trainer, model_str))
    connection.commit()
    # 关闭数据库连接
    cursor.close()
    connection.close()
    return 0


# rf
def train_rf(train, test, model_name):
    print("%%%%训练_model_rf%%%%")
    rf_model = RandomForestClassifier(n_estimators=100, random_state=42)
    X_train = train.drop(['label'], axis=1)
    x_test = test.drop(['label'], axis=1)
    y_train = train['label']
    rf_model.fit(X_train, y_train)
    y_pred = rf_model.predict(x_test)
    y_pred = pd.DataFrame(y_pred, columns=['prob'])
    ################################ 连接到 pg 数据库
    connection = connect_pg()
    cursor = connection.cursor()
    # current_time = datetime.datetime.now()
    # time = current_time
    type = 5
    model_name = model_name
    model_description = 'rf_model'
    trainer = 'Gpb'
    model_str = pickle.dumps(rf_model)
    # 构造 SQL INSERT 语句
    query = "INSERT INTO train_model3 (type, model_name, model_description, trainer, training_parameters) VALUES (%s, %s, %s, %s, %s)"
    cursor.execute(query, (type, model_name, model_description, trainer, model_str))
    connection.commit()
    # 关闭数据库连接
    cursor.close()
    connection.close()
    return 0


# cart
def train_cart(train, test, model_name):
    print("%%%%训练_model_cart%%%%")
    cart_model = DecisionTreeClassifier(criterion='gini', max_depth=5, random_state=42)
    X_train = train.drop(['label'], axis=1)
    x_test = test.drop(['label'], axis=1)
    y_train = train['label']
    cart_model.fit(X_train, y_train)
    y_pred = cart_model.predict(x_test)
    y_pred = pd.DataFrame(y_pred, columns=['prob'])
    ################################ 连接到 pg 数据库
    connection = connect_pg()
    cursor = connection.cursor()
    # current_time = datetime.datetime.now()
    # time = current_time
    type = 6
    model_name = model_name
    model_description = 'cart_model'
    trainer = 'Gpb'
    model_str = pickle.dumps(cart_model)
    # 构造 SQL INSERT 语句
    query = "INSERT INTO train_model3 (type, model_name, model_description, trainer, training_parameters) VALUES (%s, %s, %s, %s, %s)"
    cursor.execute(query, (type, model_name, model_description, trainer, model_str))
    connection.commit()
    # 关闭数据库连接
    cursor.close()
    connection.close()
    return 1


#################################################################################### 接口2:模型训练
# 前端给算法的请求  四个参数(model_name, algorithm_code, table_name, disease_code):
# {
#             "model_name": "慢性非阻塞_cart",
#             "algorithm_code": 6,
#             "table_name": "Diabetes",
#             "disease_code": "慢性阻塞性肺病"
# }
# 算法给前端响应
# {
#     "success": true,
#     "message": "请求成功",
#     "data": [
#         {
#             "id": 1 //训练好的模型,存入数据库,返回数据库中的id
#         }
#     ]
# }

#   http://127.0.0.1:5000/interface2
# 模型训练
@app.route("/interface2", methods=["POST"])
def train_model():
    model_name = request.form.get("model_name")
    algorithm_code = request.form.get("algorithm_code")
    table_name = request.form.get("table_name")
    disease_code = request.form.get("disease_code")

    connection = connect_pg()
    data = get_data(connection, table_name)
    train, test = per_data_pulic()
    if algorithm_code == 1:
        predict = train_xgb(train, test, model_name)
        print(predict)
        print('train_test_xgb')
    elif algorithm_code == 2:
        predict = train_gbdt(train, test, model_name)
        print(predict)
        print('train_test_gbdt')
    elif algorithm_code == 3:
        predict = train_lr(train, test, model_name)
        print(predict)
        print('train_test_lr')
    elif algorithm_code == 4:
        predict = train_svm(train, test, model_name)
        print(predict)
        print('train_test_svm')
    elif algorithm_code == 5:
        predict = train_rf(train, test, model_name)
        print(predict)
        print('train_test_rf')
    elif algorithm_code == 6:
        predict = train_cart(train, test, model_name)
        print(predict)
        print('train_test_cart')
    else:
        return 0
    return predict


def train_model_cs(model_name, algorithm_code, table_name, disease_code):
    connection = connect_pg()
    data = get_data(connection, table_name)
    train, test = per_data_pulic()
    if algorithm_code == 1:
        predict = train_xgb(train, test, model_name)
        print(predict)
        print('train_test_xgb')
    elif algorithm_code == 2:
        predict = train_gbdt(train, test, model_name)
        print(predict)
        print('train_test_gbdt')
    elif algorithm_code == 3:
        predict = train_lr(train, test, model_name)
        print(predict)
        print('train_test_lr')
    elif algorithm_code == 4:
        predict = train_svm(train, test, model_name)
        print(predict)
        print('train_test_svm')
    elif algorithm_code == 5:
        predict = train_rf(train, test, model_name)
        print(predict)
        print('train_test_rf')
    elif algorithm_code == 6:
        predict = train_cart(train, test, model_name)
        print(predict)
        print('train_test_cart')
    else:
        return 0
    return predict


# 训练 (model_name, algorithm_code, table_name, disease_code):
# model_name = "慢性非阻塞_6"
# algorithm_code =  6
# table_name = "Diabetes"
# disease_code = "慢性阻塞性肺病"
# predict = train_model_cs(model_name, algorithm_code, table_name, disease_code)


#################################################################################### 接口5:预测id选择
# 前端给算法的请求   点击即可
# {
#             "id": "121",
#             "id": "122",
#             "id": "123",
#             "id": "124"
# }
# 算法给前端响应
# {
#     "success": true,
#     "message": "请求成功",
#     "data": [
#         {
#             "id": "121",
#             "id": "122",
#             "id": "123",
#             "id": "124"   //重新返回id , 这里会有预测结果
#         }
#     ]
# }
#

# 模型预测
@app.route("/interface5", methods=["POST"])
def test_model():
    disease_code = request.form.get("disease_code")
    id = request.form.get("id")
    test_id = request.form.get("test_id")

    print("预测模型:")
    # print(algorithm_code)
    connection = connect_pg()
    data = get_data(connection, "Diabetes")
    train, test = per_data_pulic()
    connection = connect_pg()
    cursor = connection.cursor()
    X_train = train.drop(['label'], axis=1)
    x_test = test.drop(['label'], axis=1)
    y_train = train['label']
    # # 从数据库中检索模型字段
    query = "SELECT training_parameters FROM train_model3 WHERE id = %s"  # 假设模型 ID1
    cursor.execute(query, (id,))
    model_str = cursor.fetchone()[0]
    # # 从数据库中检索模型字段
    query = "SELECT type FROM train_model3 WHERE id = %s"  # 假设模型 ID1
    cursor.execute(query, (id,))
    type = cursor.fetchone()[0]
    loaded_model1111111 = pickle.loads(model_str)
    if type == 1:
        dtest = xgb.DMatrix(x_test)
        predictsloaded_model1111111 = loaded_model1111111.predict(dtest)
    else:
        predictsloaded_model1111111 = loaded_model1111111.predict(x_test)
    return predictsloaded_model1111111


# 模型预测
def test_model_cs(disease_code, id, test_id):
    print("预测模型:")
    print(id)
    connection = connect_pg()
    data = get_data(connection, "Diabetes")
    train, test = per_data_pulic()
    connection = connect_pg()
    cursor = connection.cursor()
    X_train = train.drop(['label'], axis=1)
    x_test = test.drop(['label'], axis=1)
    y_train = train['label']
    # # 从数据库中检索模型字段
    query = "SELECT training_parameters FROM train_model3 WHERE id = %s"  # 假设模型 ID1
    cursor.execute(query, (id,))
    model_str = cursor.fetchone()[0]
    # # 从数据库中检索模型字段
    query = "SELECT type FROM train_model3 WHERE id = %s"  # 假设模型 ID1
    cursor.execute(query, (id,))
    type = cursor.fetchone()[0]
    loaded_model1111111 = pickle.loads(model_str)
    if type == 1:
        dtest = xgb.DMatrix(x_test)
        predictsloaded_model1111111 = loaded_model1111111.predict(dtest)
    else:
        predictsloaded_model1111111 = loaded_model1111111.predict(x_test)
    return predictsloaded_model1111111


if __name__ == '__main__':
    server = pywsgi.WSGIServer(('0.0.0.0', 5000), app)
    server.serve_forever()

  • 1
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值