#数学建模线性规划模型
'''
max: z = 4x1 + 3x2
st: 2x1 + 3x2<=10
x1 + x2 <=8
x2 <= 7
x1,x2 > 0
'''
from scipy.optimize import linprog
c = [-4,-3]#res求解的为最小值,若需了解最大值,则在C内加上负号
A = [[2,3],[1,1]]
b = [10,8]
x1_bounds = [0,None]
x2_bounds = [0,7]
res = linprog(c ,A_ub = A,b_ub= b,bounds=(x1_bounds,x2_bounds))#A_ub = A不等式约束条件系数,b_ub不等式约束条件结果,A_eq=a,b_eq=b等式约束条件参数
print(res)
# 一 LinearRegression
x = []
y = []
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(test_size=0.25)
from sklearn.linear_model import LinearRegression
IR = LinearRegression()
IR.fit(X_train,y_train)
# 二 随机梯度下降的线性回归
from sklearn.linear_model import SGDRegressor
model = SGDRegressor()
model.fit(X_train,y_train)#拟合模型
SGDRegressor(alpha=0.0001, average=False, epsilon=0.1, eta0=0.01,
fit_intercept=True, l1_ratio=0.15, learning_rate='invscaling',
loss='squared_loss', n_iter=5, penalty='l2', power_t=0.25,
random_state=None, shuffle=True, verbose=0, warm_start=False)
sgdr_y_predict=model.predict(X_test)#做预测
# 三 岭回归
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn.linear_model import Ridge,RidgeCV # Ridge岭回归,RidgeCV带有广义交叉验证的岭回归
model = Ridge(alpha=0.5)
# model = RidgeCV(alphas=[0.1, 1.0, 10.0]) # 通过RidgeCV可以设置多个参数值,算法使用交叉验证获取最佳参数值
model.fit(X_train, y_train) # 线性回归建模
# print('交叉验证最佳alpha值',model.alpha_) # 只有在使用RidgeCV算法时才有效
# 使用模型预测
predicted = model.predict(X_test)
RidgeCV(alphas=[0.1, 1.0, 10.0], cv=None, fit_intercept=True, gcv_mode=None,
normalize=False, scoring=None, store_cv_values=False)