本文整理汇总了Python中sklearn.preprocessing.scale方法的典型用法代码示例。如果您正苦于以下问题:Python preprocessing.scale方法的具体用法?Python preprocessing.scale怎么用?Python preprocessing.scale使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在模块sklearn.preprocessing的用法示例。
在下文中一共展示了preprocessing.scale方法的27个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: violin_jitter
点赞 6
# 需要导入模块: from sklearn import preprocessing [as 别名]
# 或者: from sklearn.preprocessing import scale [as 别名]
def violin_jitter(X, genes, gene, labels, focus, background=None,
xlabels=None):
gidx = list(genes).index(gene)
focus_idx = focus == labels
if background is None:
background_idx = focus != labels
else:
background_idx = background == labels
if xlabels is None:
xlabels = [ 'Background', 'Focus' ]
x_gene = X[:, gidx].toarray().flatten()
x_focus = x_gene[focus_idx]
x_background = x_gene[background_idx]
plt.figure()
sns.violinplot(data=[ x_focus, x_background ], scale='width', cut=0)
sns.stripplot(data=[ x_focus, x_background ], jitter=True, color='black', size=1)
plt.xticks([0, 1], xlabels)
plt.savefig('{}_violin_{}.png'.format(NAMESPACE, gene))
开发者ID:brianhie,项目名称:geosketch,代码行数:24,
示例2: train_FFM_model_demo
点赞 6
# 需要导入模块: from sklearn import preprocessing [as 别名]
# 或者: from sklearn.preprocessing import scale [as 别名]
def train_FFM_model_demo():
# Step1: 导入数据
x_train, y_train, x_test, y_test, feature2field = load_dataset()
x_train = preprocessing.scale(x_train, with_mean=True, with_std=True)
x_test = preprocessing.scale(x_test, with_mean=True, with_std=True)
class_num = len(set([y for y in y_train] + [y for y in y_test]))
# FFM模型
ffm = FFM_layer(field_map_dict=feature2field, fea_num=x_train.shape[1], reg_l1=0.01, reg_l2=0.01,
class_num=class_num, latent_factor_dim=10).to(DEVICE)
# 定义损失函数还有优化器
optm = torch.optim.Adam(ffm.parameters())
train_loader = get_batch_loader(x_train, y_train, BATCH_SIZE, shuffle=True)
test_loader = get_batch_loader(x_test, y_test, BATCH_SIZE, shuffle=False)
for epoch in range(1, EPOCHS + 1):
train(ffm, DEVICE, train_loader, optm, epoch)
test(ffm, DEVICE, test_loader)
开发者ID:JianzhouZhan,项目名称:Awesome-RecSystem-Models,代码行数:23,
示例3: train_FM_model_demo
点赞 6
# 需要导入模块: from sklearn import preprocessing [as 别名]
# 或者: from sklearn.preprocessing import scale [as 别名]
def train_FM_model_demo():
# Step1: 导入数据
x_train, y_train, x_test, y_test = load_dataset()
x_train = preprocessing.scale(x_train, with_mean=True, with_std=True)
x_test = preprocessing.scale(x_test, with_mean=True, with_std=True)
class_num = len(set([y for y in y_train] + [y for y in y_test]))
# FM模型
fm = FM_layer(class_num=class_num, feature_num=x_train.shape[1], latent_factor_dim=40).to(DEVICE)
# 定义损失函数还有优化器
optm = torch.optim.Adam(fm.parameters())
train_loader = get_batch_loader(x_train, y_train, BATCH_SIZE, shuffle=True)
test_loader = get_batch_loader(x_test, y_test, BATCH_SIZE, shuffle=False)
for epoch in range(1, EPOCHS + 1):
train(fm, DEVICE, train_loader, optm, epoch)
test(fm, DEVICE, test_loader)
开发者ID:JianzhouZhan,项目名称:Awesome-RecSystem-Models,代码行数:22,
示例4: test_elastic_net_versus_sgd
点赞 6
# 需要导入模块: from sklearn import preprocessing [as 别名]
# 或者: from sklearn.preprocessing import scale [as 别名]
def test_elastic_net_versus_sgd(C, l1_ratio):
# Compare elasticnet penalty in LogisticRegression() and SGD(loss='log')
n_samples = 500
X, y = make_classification(n_samples=n_samples, n_classes=2, n_features=5,
n_informative=5, n_redundant=0, n_repeated=0,
random_state=1)
X = scale(X)
sgd = SGDClassifier(
penalty='elasticnet', random_state=1, fit_intercept=False, tol=-np.inf,
max_iter=2000, l1_ratio=l1_ratio, alpha=1. / C / n_samples, loss='log')
log = LogisticRegression(
penalty='elasticnet', random_state=1, fit_intercept=False, tol=1e-5,
max_iter=1000, l1_ratio=l1_ratio, C=C, solver='saga')
sgd.fit(X, y)
log.fit(X, y)
assert_array_almost_equal(sgd.coef_, log.coef_, decimal=1)
开发者ID:PacktPublishing,项目名称:Mastering-Elasticsearch-7.0,代码行数:20,
示例5: run_pca
点赞 6
# 需要导入模块: from sklearn import preprocessing [as 别名]
# 或者: from sklearn.preprocessing import scale [as 别名]
def run_pca(self, whiten=True):
# Normalize
for_pca_df = self.features_df.T
for_pca_df_scaled = pd.DataFrame(preprocessing.scale(for_pca_df), columns=for_pca_df.columns)
# Run PCA
self.num_components = min(len(for_pca_df.T.columns), len(for_pca_df.T.index))
pca = PCA(n_components=self.num_components, whiten=whiten)
pca_fit = pca.fit_transform(for_pca_df_scaled)
self.pc_names_list = ['PC{} ({:.0%})'.format(x + 1, pca.explained_variance_ratio_[x]) for x in
range(self.num_components)]
self.pc_names_dict = {k.split(' ')[0]: k for k in self.pc_names_list}
principal_df = pd.DataFrame(data=pca_fit, columns=self.pc_names_list, index=for_pca_df.index)
principal_df.index.name = 'strain'
self.principal_df = principal_df
self.pca = pca
# self.principal_observations_d