python笔记第七周

import numpy as np
import pandas as pd
import matplotlib.pyplot as plt

from sklearn import datasets
boston = datasets.load_boston()

boston.data

import pandas as pd
boston_df = pd.DataFrame(boston.data,columns = boston.feature_names)
boston_df.head(20)

import matplotlib.pyplot as plt
digits = datasets.load_digits()
digits.images[100]
plt.matshow(digits.images[1208])

from sklearn import preprocessing
std = preprocessing.StandardScaler()
std

from sklearn import linear_model

std.get_params()
std.fit(boston.data)

ZX = std.transform(boston.data)
ZX[:2]

=第二次上课=========

des = boston_df.describe()
boston_df.describe()

from sklearn import preprocessing
boston_scaled = preprocessing.scale(boston_df)

boston_scaled
boston_scaled.mean(axis = 0)
boston_scaled.std(axis = 0)

from sklearn import preprocessing
boston_scale = preprocessing.scale(boston_df)

boston_scaled_all = preprocessing.scale(boston_df,axis = 1)

boston_scaled_all.mean(axis = 0)
boston_scaled_all.std(axis = 0)
boston_scaled_all,boston_scaled_all.std()

preprocessing.scale(boston.target)

std = preprocessing.StandardScaler()
std.fit(boston_df)
std.mean_,std.scale_

std.transform(boston_df)

std.transform(boston_df[:3])
#===========
class sklearn.preprocessing.MinMaxScaler(feature_range = (0,1),copy = True):

class sklearn.preprocessing.MaxAbsScaler(copy = True):
#===========

scaler = preprocessing.MinMaxScaler((1,10))
scaler.fit_transform(boston_df)

scaler_1 = preprocessing.MaxAbsScaler((1,10))
scaler_1.fit_transform(boston_df)

help(preprocessing.MaxAbsScaler)

x = [[-1,-1,2]]
x_normalized = preprocessing.normalize(x,norm=‘l2’,return_norm = True)
x_normalized

-1/2.44948974

preprocessing.robust_scale(boston_df)

rscaler = preprocessing.RobustScaler()
rs = rscaler.fit_transform(boston_df)
rs

rscaler_1 = preprocessing.RobustScaler()
rs_1 = rscaler_1.fit(boston_df)
rs_1_trans = rs_1.transform(boston_df)
rs_1_trans

np.median(rs,axis = 0)
rs.mean(axis = 0)
rs.std(axis = 1)

imp = preprocessing.Imputer()
imp.fit([[1,2],[np.nan,3],[7,6]])
imp.statistics_

imp.transform([[np.nan,2],[6,np.nan]])

from sklearn import impute
imp_1 = impute.SimpleImputer()
imp_1.fit([[1,2],[np.nan,3],[7,6]])
imp_1.statistics_

imp_1.transform([[np.nan,2],[6,np.nan]])

poly = preprocessing.PolynomialFeatures(interaction_only = True)
poly_res = poly.fit_transform(boston_df.iloc[:,[0,1,2,3]])
poly_res[:1]

poly = preprocessing.PolynomialFeatures(interaction_only = True)
poly_res = poly.fit_transform(boston_df)
poly_res[:1]

trans = preprocessing.FunctionTransformer(np.sqrt)
x = np.array([[1,2],[3,4]])
trans.transform(x)

from sklearn import feature_selection
x = [[0,2,0,3],
[0,1,4,3],
[0,1,1,3]]
selec = feature_selection.VarianceThreshold()
selec.fit(x)
selec.variances_

selec.transform(x)

from sklearn import feature_selection
sele = feature_selection.SelectKBest(feature_selection.f_regression,k=2)
sele.fit_transform(boston.data,boston.target)

sele.pvalues_
sele.scores_

sel_fpr = feature_selection.SelectFpr(feature_selection.f_regression,alpha = 10e-10).fit(boston.data,boston.target)
sel_fpr.pvalues_

general = feature_selection.GenericUnivariateSelect(feature_selection.f_regression,mode = ‘fpr’,param = 10e-10).fit(boston.data,boston.target)
general.pvalues_

general.transform(boston.data)[:1]

from sklearn import linear_model
reg = linear_model.LinearRegression()
reg.fit(boston.data,boston.target)
reg.coef_

from sklearn import feature_selection
sfm = feature_selection.SelectFromModel(reg,threshold=0.1)
sfm.fit(boston.data,boston.target)

sfm.transform(boston.data)[:1]

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值