# 正式决定当程序员的第十七天~二十五天

正式决定当程序员的第十七天~二十五天

1、理论学习
这几天反复温习了一下mysql的语句,同时校内实习复习了一下深度学习的基础算法,numpy库等
也学习了一点基础的冒泡排序、选择排序和双指针算法,leetcode真是对我来说太难了emm。

import numpy as np
score = np.array([[80,89,86,67,79],
                 [78,97,87,67,81],
                 [90,94,78,67,74],
                 [91,91,90,67,69],
                 [76,87,75,67,86],
                 [70,79,84,67,84],
                 [94,92,93,67,64],
                 [86,85,83,67,80]])
score

import random
import time
import numpy as np
a=[]
for i in range(1000):
    a.append(random.random())
%time sum1=sum(a)
b=np.array(a)

a=np.array([[1,2,3],[4,5,6]])
b=np.array([1,2,3,4])
c=np.array([[[1,2,3],[4,5,6]],[[1,2,3],[4,5,6]]])
a.shape
b.shape
c.shape

zero=np.zeros([3,4])
zero

a=np.array([[1,2,3],[4,5,6]])
a1=np.array(a)
a2=np.asarray(a)

x1 = np.random.uniform(-1,1,1000)
x1

stock_change = np.random.normal(0,1,(8,10))
stock_change

temp = np.array([[1,2,3,4],[3,4,5,6]])
np.unique(temp)

arr = np.array([[1,2,3,2,1,4],[5,6,1,2,3,1]])
arr+1
arr/2
a=[1,2,3,4,5]
a*3

a=np.array([[80,86],
           [82,80],
           [85,78],
           [90,90],
           [82,90],
           [78,80],
           [92,94]])
b=np.array([[0,7],[0.3]])

import pandas as pd
stock_change = np.random.normal(0,1,(10,5))
stock_day_rise = pd.DataFrame(stock_change)

stock_code = ['股票'+str(i) for i in range(stock_day_rise.shape[0])]
data = pd.DataFrame(stock_change,index=stock_code)
data

date = pd.date_range('2017-01-01',periods=stock_day_rise.shape[1],freq='B')
data = pd.DataFrame(stock_change,index=stock_code,columns=date)

import torch
from torch.autograd import Variable

#创建一个变量
x=Variable(torch.ones(2,2),requires_grad=True)
print(x)

y=x+2
y
z=y*y*3
out=z.mean()
out
out.backward(retain_graph=True)
print(x.grad)

x=torch.randn(3)
x=Variable(x,requires_grad=True)
y=x*2
while y.data.norm()<1000:
    y=y*2
print(y)
gradients=torch.FloatTensor([0.1,1.0,0.0001])
y.backward(gradients)
print(x.grad)

import torch.optim as optim
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(),lr=0.001,momentum=0.9)

#练习
import pandas as pd
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import KNeighborsClassifier
data = pd.read_csv("/Users/a1171892504/Desktop/train.csv")
facebook_data = data.query("x>2.0 & x<2.5 & y>2.0 & y<2.5")
facebook_data["time"].head()
time = pd.to_datetime(facebook_data["time"], unit="s")
time = pd.DatetimeIndex(time)
facebook_data["hour"] = time.hour
facebook_data["day"] = time.day
facebook_data["weekday"] = time.weekday
place_count = facebook_data.groupby("place_id").count()
place_count = place_count[place_count["row_id"] > 3]
facebook_data = facebook_data[facebook_data["place_id"].isin(place_count.index)]
x = facebook_data[["x", "y", "accuracy", "day", "hour", "weekday"]]
y = facebook_data["place_id"]
x_train, x_test, y_train, y_test = train_test_split(x, y, random_state=2, test_size=0.2)
transfer = StandardScaler()

x_train = transfer.fit_transform(x_train)
x_test = transfer.transform(x_test)
estimator = KNeighborsClassifier()

param_grid = {"n_neighbors" : [3,5,7]}
estimator = GridSearchCV(estimator=estimator, param_grid=param_grid, cv=9, n_jobs=14)

estimator.fit(x_train, y_train)
y_pre = estimator.predict(x_test)
print("预测值为:\n", y_pre)

score = estimator.score(x_test, y_test)
print("准确率为:\n", score)
print("最好的模型:\n", estimator.best_estimator_)
print("最好的结果:\n", estimator.best_score_)
print("整体模型结果:\n", estimator.cv_results_)
print("最好的模型:\n", estimator.best_estimator_)
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值