深度学习-2

Jupyter Notebook
深度学习2
最后检查: 上星期二09:41
(自动保存)
Current Kernel Logo
Python 3
File
Edit
View
Insert
Cell
Kernel
Widgets
Help

import sys , os
import sys , os
sys.path.append(os.pardir)
sys.path.append(os.pardir)
import numpy as np
import numpy as np
from dataset.mnist import load_mnist
(x_train,t_train),(x_test,t_test) = load_mnist(normalize=True,one_hot_label=True)
print(x_train.shape)
(60000, 784)
t_test.shape
(10000, 10)
print(t_train.shape)
(60000, 10)
train_size = x_train.shape[0]
batch_size = 10000
batch_mask = np.random.choice(train_size,batch_size)
x_batch = x_train[batch_mask]
t_batch = t_train[batch_mask]
2
grad = network.gradient(x_batch, t_batch)
grad[‘b2’].shape
(10,)
x_batch.shape
(100, 784)
t
t_batch.shape
(100, 10)
def cross_entropy_error(y,t):
if y.ndim == 1:
t = t.reshape(1,t.size)
y = y.reshape(1,y.size)

batc_size = y.shape[0]
return -np.sum(t * np.log(y + 1e-7)) / batch_size

def cross_entropy_error(y,t):
if y.ndim == 1:
t = t.reshape(1,t.size)
y = y.reshape(1,y.size)

batc_size = y.shape[0]
return -np.sum(t * np.log(y[np.arange(batch_size),t] + 1e-7)) / batch_size

#x_batch[np.arange(10000),x_test]
x_test.shape
(10000, 784)
cross_entropy_error(x_batch,x_test)

IndexError Traceback (most recent call last)
in
----> 1 cross_entropy_error(x_batch,x_test)

in cross_entropy_error(y, t)
5
6 batc_size = y.shape[0]
----> 7 return -np.sum(t * np.log(y[np.arange(batch_size),t] + 1e-7)) / batch_size

IndexError: arrays used as indices must be of integer (or boolean) type

np.arange(x_batch.shape[0])
array([ 0, 1, 2, …, 9997, 9998, 9999])
delta = 1e-7

np.log(delta+0.6)

np.log(0.6)
-0.5108256237659907
np.log(0.6)+np.log(0.6)
-1.0216512475319814
def numerical_diff(f, x):
h = 10e-50
return (f(x+h) - f(x)) / h
def numerical_diff(f, x):
h = 1e-4
return (f(x+h)-f(x-h)) / (2h)
def function_1(x):
return 0.01
x**2 + 0.1*x
import matplotlib.pyplot as plt
x = np.arange(0.0,20.0,0.1)
y = function_1(x)
plt.plot(x,y)
[<matplotlib.lines.Line2D at 0x8df4048>]

numerical_diff(function_1,10)
0.2999999999986347
def function_2(x0, x1):
return x02 + x12
import numpy as np
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d

x, y = np.mgrid[-3 : 3 : 20j, -3 : 3 : 20j]
z =function_2(x,y) # 测试数据
ax = plt.subplot(111, projection = ‘3d’) # 三维图形
ax.plot_surface(x, y, z, rstride = 2, cstride = 1, cmap = plt.cm.Blues_r)
ax.set_xlabel(‘x’) # 设置坐标轴标签
ax.set_ylabel(‘y’)
ax.set_zlabel(‘z’)
plt.show()

def function_tmp1(x0):
return x0*x0 + 42
numerical_diff(function_tmp1,3.0)
6.00000000000378
def function_tmp2(x1):
return 3
2 + x1**2
numerical_diff(function_tmp2,4)
7.999999999999119
a = np.array([1,2,3])
np.zeros_like(a)
array([0, 0, 0])
np.zeros_like(a)
array([0, 0, 0])
def function_2(x):
return x[0]**2 + x[1]**2
def numerical_gradient(f, x):
h = 1e-4 #0.0001
grad = np.zeros_like(x) #生成和x相容形状的数组

for idx in range(x.size):
    tmp_val = x[idx]
    # f(x+h)的计算
    x[idx] = tmp_val + h
    fxh1 = f(x)
    # f(x-h)的计算
    x[idx] = tmp_val - h
    fxh2 = f(x)
    
    grad[idx] = (fxh1 - fxh2) / (2*h)
    x[idx] = tmp_val # 还原值
    
return grad

def function_2(x):
if x.ndim == 1:
return np.sum(x2)
else:
return np.sum(x
2, axis=1)
def _numerical_gradient_no_batch(f, x):
h = 1e-4 # 0.0001
grad = np.zeros_like(x)

for idx in range(x.size):
    tmp_val = x[idx]
    x[idx] = float(tmp_val) + h
    fxh1 = f(x) # f(x+h)
    
    x[idx] = tmp_val - h 
    fxh2 = f(x) # f(x-h)
    grad[idx] = (fxh1 - fxh2) / (2*h)
    
    x[idx] = tmp_val # 还原值
    
return grad

numerical_gradient(function_2,np.array([3.0,4.0]))
array([6., 8.])
numerical_gradient(function_2,np.array([0.0,2.0]))
array([0., 4.])
numerical_gradient(function_2,np.array([3.0,0.0]))
array([6., 0.])
def gradient_descent(f, init_x, lr=0.01, step_num=100):
x = init_x

for i in range(step_num):
    grad = numerical_gradient(f, x)
    x -= lr * grad

return x  

gradient_descent(function_2,np.array([-3.0,4.0]),lr=0.1,step_num=100)
array([-6.11110793e-10, 8.14814391e-10])
import matplotlib.pylab as plt

def gradient_descent(f, init_x, lr=0.01, step_num=100):
x = init_x
x_history = []

for i in range(step_num):
x_history.append( x.copy() )

grad = numerical_gradient(f, x)
x -= lr * grad

return x, np.array(x_history)


init_x = np.array([-3.0, 4.0])

lr = 0.1
step_num = 20
x, x_history = gradient_descent(function_2, init_x, lr=lr, step_num=step_num)

1.圆半径

r = 1.0

2.圆心坐标

a, b = (0., 0.)

参数方程

theta = np.arange(0, 2np.pi, 0.01)
x = a + r * np.cos(theta)
y = b + r * np.sin(theta)

plt.plot( x, y, ‘–b’)
plt.plot( 2
x, 2y, ‘–b’)
plt.plot( 3
x, 3y, ‘–b’)
plt.plot( 4
x, 4y, ‘–b’)
plt.plot( 5
x, 5*y, ‘–b’)
plt.plot(x_history[:,0], x_history[:,1], ‘o’)
plt.axis(‘equal’)
plt.xlim(-3.5, 3.5)
plt.ylim(-4.5, 4.5)
plt.xlabel(“X0”)
plt.ylabel(“X1”)

plt.show()


sys.path.append(‘D:/【源代码】深度学习入门:基于Python的理论与实现’)
sys.path
sys.path
[‘C:\Users\Administrator\Desktop\python’,
‘C:\Users\Administrator\Anaconda3\python37.zip’,
‘C:\Users\Administrator\Anaconda3\DLLs’,
‘C:\Users\Administrator\Anaconda3\lib’,
‘C:\Users\Administrator\Anaconda3’,
‘’,
‘C:\Users\Administrator\Anaconda3\lib\site-packages’,
‘C:\Users\Administrator\Anaconda3\lib\site-packages\win32’,
‘C:\Users\Administrator\Anaconda3\lib\site-packages\win32\lib’,
‘C:\Users\Administrator\Anaconda3\lib\site-packages\Pythonwin’,
‘C:\Users\Administrator\Anaconda3\lib\site-packages\IPython\extensions’,
‘C:\Users\Administrator\.ipython’,
‘D:/【源代码】深度学习入门:基于Python的理论与实现’]
from common.functions import softmax,cross_entropy_error
from common.gradient import numerical_gradient
class simpleNet:
def init(self):
self.W = np.random.randn(2,3)

def predict(self, x):
    return np.dot(x, self.W)

def loss(self, x, t):
    z = self.predict(x)
    y = softmax(z)
    loss = cross_entropy_error(y, t)

    return loss

from common.functions import softmax,cross_entropy_error
from common.gradient import numerical_gradient
class simpleNet:
def init(self):
self.W = np.random.randn(2,3)

def predict(self, x):
return np.dot(x, self.W)

def loss(self, x, t):
z = self.predict(x)
y = softmax(z)
loss = cross_entropy_error(y, t)

return loss
x = np.array([0.6, 0.9])
t = np.array([0, 0, 1])
x = np.array([0.6, 0.9])
t = np.array([0, 0, 1])
net = simpleNet()
print(net.W)
[[ 0.82188277 -0.65448563 0.44413638]
[ 0.36925563 -2.23472022 1.82941247]]
p = net.predict(x)
p = net.predict(x)
print§
[ 0.82545973 -2.40393958 1.91295305]
net.loss(x,t)
net.loss(x,t)
0.30040190467900846
def f(W):
return net.loss(x,t)
def f(W):
return net.loss(x,t)
dW = numerical_gradient(f, net.W)
dW
array([[ 0.14976002, 0.00592769, -0.15568771],
[ 0.22464003, 0.00889154, -0.23353157]])

coding: utf-8

import sys, os # 为了导入父目录的文件而进行的设定
from common.functions import *
from common.gradient import numerical_gradient


class TwoLayerNet:

def init(self, input_size, hidden_size, output_size, weight_init_std=0.01):
# 初始化权重
self.params = {}
self.params[‘W1’] = weight_init_std * np.random.randn(input_size, hidden_size)
self.params[‘b1’] = np.zeros(hidden_size)
self.params[‘W2’] = weight_init_std * np.random.randn(hidden_size, output_size)
self.params[‘b2’] = np.zeros(output_size)

def predict(self, x):
W1, W2 = self.params[‘W1’], self.params[‘W2’]
b1, b2 = self.params[‘b1’], self.params[‘b2’]

    a1 = np.dot(x, W1) + b1
    z1 = sigmoid(a1)
    a2 = np.dot(z1, W2) + b2
    y = softmax(a2)
    
    return y
    
# x:输入数据, t:监督数据
def loss(self, x, t):
    y = self.predict(x)
    
    return cross_entropy_error(y, t)

def accuracy(self, x, t):
    y = self.predict(x)
    y = np.argmax(y, axis=1)
    t = np.argmax(t, axis=1)
    
    accuracy = np.sum(y == t) / float(x.shape[0])
    return accuracy
    
# x:输入数据, t:监督数据
def numerical_gradient(self, x, t):
    loss_W = lambda W: self.loss(x, t)
    
    grads = {}
    grads['W1'] = numerical_gradient(loss_W, self.params['W1'])
    grads['b1'] = numerical_gradient(loss_W, self.params['b1'])
    grads['W2'] = numerical_gradient(loss_W, self.params['W2'])
    grads['b2'] = numerical_gradient(loss_W, self.params['b2'])
    
    return grads
    
def gradient(self, x, t):
    W1, W2 = self.params['W1'], self.params['W2']
    b1, b2 = self.params['b1'], self.params['b2']
    grads = {}
    
    batch_num = x.shape[0]
    
    # forward
    a1 = np.dot(x, W1) + b1
    z1 = sigmoid(a1)
    a2 = np.dot(z1, W2) + b2
    y = softmax(a2)
    
    # backward
    dy = (y - t) / batch_num
    grads['W2'] = np.dot(z1.T, dy)
    grads['b2'] = np.sum(dy, axis=0)
    
    da1 = np.dot(dy, W2.T)
    dz1 = sigmoid_grad(a1) * da1
    grads['W1'] = np.dot(x.T, dz1)
    grads['b1'] = np.sum(dz1, axis=0)


return grads
sys.path()
sys.path
[‘C:\Users\Administrator\Desktop\python’,
‘C:\Users\Administrator\Anaconda3\python37.zip’,
‘C:\Users\Administrator\Anaconda3\DLLs’,
‘C:\Users\Administrator\Anaconda3\lib’,
‘C:\Users\Administrator\Anaconda3’,
‘’,
‘C:\Users\Administrator\Anaconda3\lib\site-packages’,
‘C:\Users\Administrator\Anaconda3\lib\site-packages\win32’,
‘C:\Users\Administrator\Anaconda3\lib\site-packages\win32\lib’,
‘C:\Users\Administrator\Anaconda3\lib\site-packages\Pythonwin’,
‘C:\Users\Administrator\Anaconda3\lib\site-packages\IPython\extensions’,
‘C:\Users\Administrator\.ipython’,
‘D:/【源代码】深度学习入门:基于Python的理论与实现’]

coding: utf-8

import sys, os
import numpy as np
import matplotlib.pyplot as plt
from dataset.mnist import load_mnist
from ch04.two_layer_net import TwoLayerNet

读入数据

(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True, one_hot_label=True)

network = TwoLayerNet(input_size=784, hidden_size=50, output_size=10)

iters_num = 10000 # 适当设定循环的次数
train_size = x_train.shape[0]
batch_size = 100
learning_rate = 0.1

train_loss_list = []
train_acc_list = []
test_acc_list = []

iter_per_epoch = max(train_size / batch_size, 1)

for i in range(iters_num):
batch_mask = np.random.choice(train_size, batch_size)
x_batch = x_train[batch_mask]
t_batch = t_train[batch_mask]

# 计算梯度
#grad = network.numerical_gradient(x_batch, t_batch)
grad = network.gradient(x_batch, t_batch)

# 更新参数
for key in ('W1', 'b1', 'W2', 'b2'):
    network.params[key] -= learning_rate * grad[key]

loss = network.loss(x_batch, t_batch)
train_loss_list.append(loss)

if i % iter_per_epoch == 0:
    train_acc = network.accuracy(x_train, t_train)
    test_acc = network.accuracy(x_test, t_test)
    train_acc_list.append(train_acc)
    test_acc_list.append(test_acc)
    print("train acc, test acc | " + str(train_acc) + ", " + str(test_acc))

绘制图形

markers = {‘train’: ‘o’, ‘test’: ‘s’}
x = np.arange(len(train_acc_list))
plt.plot(x, train_acc_list, label=‘train acc’)
plt.plot(x, test_acc_list, label=‘test acc’, linestyle=’–’)
plt.xlabel(“epochs”)
plt.ylabel(“accuracy”)
plt.ylim(0, 1.0)
plt.legend(loc=‘lower right’)
plt.show()
train acc, test acc | 0.10218333333333333, 0.101
train acc, test acc | 0.7984166666666667, 0.8042
train acc, test acc | 0.87805, 0.8823
train acc, test acc | 0.8979666666666667, 0.9007
train acc, test acc | 0.90755, 0.9091
train acc, test acc | 0.9137833333333333, 0.9168
train acc, test acc | 0.91965, 0.9214
train acc, test acc | 0.9233833333333333, 0.925
train acc, test acc | 0.9280333333333334, 0.9292
train acc, test acc | 0.9308666666666666, 0.9303
train acc, test acc | 0.93355, 0.9335
train acc, test acc | 0.9363, 0.9362
train acc, test acc | 0.9393833333333333, 0.9384
train acc, test acc | 0.94075, 0.9403
train acc, test acc | 0.9435166666666667, 0.9427
train acc, test acc | 0.9451333333333334, 0.9442
train acc, test acc | 0.94725, 0.9447

for key in (‘W1’, ‘b1’, ‘W2’, ‘b2’):
network.params[key] -= learning_rate * grad[key]
network.params
{‘W1’: array([[-2.12352143e-02, 5.77188009e-03, 1.30034209e-02, …,
4.90189610e-03, -5.79992797e-05, 1.15963550e-02],
[ 9.12928042e-04, -1.62814875e-02, 9.11469487e-04, …,
2.70843871e-03, -9.77440992e-04, 8.01840483e-03],
[ 3.84816258e-03, 1.19005087e-02, -1.67185404e-02, …,
2.07592679e-02, -8.19937704e-03, 6.51305254e-03],
…,
[ 4.01199565e-03, 1.83204240e-03, 1.05519837e-02, …,
-1.36998654e-03, -1.23009423e-02, -1.01548304e-02],
[ 1.08326103e-03, -5.19679670e-03, 1.46563687e-02, …,
5.38464610e-03, -4.13919284e-03, -1.69702935e-02],
[-5.47620188e-03, 6.91263222e-04, -1.82995933e-02, …,
-4.54715468e-03, -2.26430019e-02, -1.38000558e-02]]),
‘b1’: array([-0.08049239, 0.02720145, 0.01659081, 0.06802775, 0.02819812,
0.08020881, 0.01705251, 0.13554762, -0.28444771, -0.06013621,
-0.00515455, -0.08480636, 0.17225249, 0.08103265, -0.10589572,
-0.02774475, 0.0935105 , 0.09706771, -0.21250643, 0.05775094,
-0.08307618, -0.02433998, -0.04295293, 0.06424644, -0.01744574,
-0.2038545 , -0.03808364, 0.15086946, 0.29202097, 0.11136187,
-0.16580763, -0.04497457, 0.02130836, -0.0319001 , 0.0334675 ,
-0.15115509, 0.13722454, -0.10486986, 0.04455699, -0.0800529 ,
0.12206756, 0.07197267, -0.1966139 , 0.18160699, -0.05929192,
0.14368772, 0.01187118, 0.04095818, 0.11625965, -0.04149997]),
‘W2’: array([[ 7.88089781e-01, 1.63931140e-01, 5.52915289e-01,
3.74764019e-01, -5.56078503e-01, -8.26167829e-01,
2.29651673e-01, -5.09902687e-01, 6.56945770e-01,
-9.03604579e-01],
[-1.28050176e+00, -2.42635248e-01, -6.98249698e-01,
-9.94764463e-02, 1.14452963e+00, 9.55722164e-01,
-4.64510761e-01, -6.18211033e-02, 3.30857535e-01,
4.12882239e-01],
[-5.42563575e-01, -3.37858207e-01, -1.97884024e-01,
-7.30944979e-01, 5.98482882e-01, -6.83225678e-01,
7.36987631e-01, 7.36626905e-01, -1.33311384e-01,
5.79284992e-01],
[-4.67159096e-01, 5.65373179e-01, 1.17326396e-01,
-4.34045775e-01, 3.08623413e-01, -2.00833232e-01,
-7.26755469e-02, 8.69429465e-01, -5.92770633e-01,
-1.91260117e-01],
[-5.51920798e-01, 6.85802089e-01, -5.69122988e-01,
-7.20001697e-01, 3.34774134e-01, 1.63408179e-01,
-6.18482975e-01, 4.50149606e-01, -2.44089038e-01,
1.13555094e+00],
[-1.04217268e+00, 5.60071709e-01, 2.18333237e-02,
1.41334424e-01, -4.93782237e-01, 9.07258468e-01,
-5.50295950e-01, -1.00019668e-01, 6.77172313e-01,
-1.17462371e-01],
[ 3.03590095e-01, -6.80110385e-02, 8.94914145e-01,
-9.07740909e-01, -5.60279373e-01, -9.34612360e-02,
6.82505276e-01, 5.28319754e-02, 2.83956670e-01,
-5.90362701e-01],
[-8.08128545e-01, -2.07411633e-01, 2.19800841e-01,
-6.17109211e-01, 1.38152634e+00, -6.46161155e-01,
1.27604381e-01, 7.32748686e-01, -1.19090749e+00,
1.01290625e+00],
[-6.95768042e-01, 7.14473550e-01, 1.01403095e+00,
1.38700392e+00, 1.42926838e-01, -1.65270742e+00,
-9.46898328e-01, -1.87216660e-01, -4.55788295e-02,
2.74568879e-01],
[ 1.05973075e+00, -9.14989055e-01, 1.96086495e+00,
8.95765957e-01, -9.36681419e-01, 1.37397202e+00,
-6.95888379e-01, -2.32065134e-02, -1.41389935e+00,
-1.25225788e+00],
[-1.03271275e+00, 6.81531491e-01, 1.10241860e-01,
-7.29280821e-01, 3.20951651e-01, -1.18707646e-02,
9.65449464e-01, -6.78815451e-01, 5.60856167e-01,
-1.49031097e-01],
[-1.03099006e+00, 7.22951807e-02, 5.11790161e-01,
-2.37197723e-01, 5.74508502e-01, -2.55612969e-01,
-1.00032277e+00, -6.14970044e-01, 1.25645961e+00,
8.14949695e-01],
[-2.74883581e-01, 1.14015533e-01, 4.24435560e-01,
1.82333426e-01, 1.04494399e+00, 1.07792273e-01,
-5.73388596e-01, 8.70906801e-01, -1.12213336e+00,
-7.29485624e-01],
[-5.56050918e-01, 9.18843165e-01, -8.42282418e-01,
1.09102434e+00, -2.23282951e-01, 4.27999524e-01,
-3.64133994e-01, 1.87234995e-01, -5.37544607e-01,
-6.95093628e-02],
[ 4.30869547e-01, -5.64859338e-01, -9.11123610e-01,
1.90851105e-01, -3.39135113e-03, -4.07939088e-01,
-8.43995209e-01, 3.78210247e-01, 1.12640381e+00,
6.35994523e-01],
[ 7.15193458e-01, -2.97513297e-01, -3.10262645e-01,
1.04616995e-03, -9.72356027e-01, -1.59627307e-01,
-1.04739443e+00, 8.83583793e-01, -2.54432773e-01,
1.42819526e+00],
[-1.20878882e+00, 2.97753349e-01, -1.17230300e+00,
7.21786014e-01, -2.25422155e-02, 7.48826087e-01,
-1.62700742e+00, 1.00863120e+00, 1.00885372e+00,
2.41077094e-01],
[ 1.03779965e+00, -5.39396214e-01, -8.23185462e-01,
-3.51356159e-01, -9.89976371e-01, 1.50570154e+00,
-6.25321298e-01, 3.61161681e-01, -6.63304327e-02,
5.00992214e-01],
[-9.75172261e-01, 8.86327741e-01, 4.90874595e-01,
4.28698936e-01, -5.75968844e-01, -8.34896023e-01,
1.22818177e+00, -1.88541332e-01, 3.91742555e-01,
-8.39775402e-01],
[ 9.24733372e-01, -4.94104292e-01, 8.85157187e-01,
-7.79504716e-01, 7.75425303e-01, 4.76779426e-02,
8.31556460e-01, -6.08044980e-01, -7.01183009e-01,
-8.56450948e-01],
[-8.52076198e-01, 8.86193605e-01, 2.87560360e-01,
9.50634114e-01, -8.52805965e-01, 6.49797153e-01,
-8.51926437e-01, -7.69543758e-02, 5.35033841e-01,
-6.72160982e-01],
[ 2.19268111e-01, -3.41578393e-01, 1.79414438e+00,
-6.26131646e-01, -1.70039763e-03, -3.34895278e-01,
1.26578569e+00, -7.48571547e-01, -1.14016630e+00,
-5.27211082e-02],
[ 3.46670685e-01, 1.49186041e-01, 1.64084855e-01,
7.83141844e-01, -8.73825917e-01, 1.07082645e+00,
-1.05092033e+00, -5.15138077e-01, 7.02833805e-01,
-7.79659730e-01],
[-6.38942459e-01, 5.86613533e-01, 1.66203622e-01,
2.54948671e-01, -8.42864705e-01, -6.84536390e-01,
-3.19165763e-01, 1.15749840e+00, -7.45885266e-01,
1.08572420e+00],
[-3.79900678e-01, -7.19679124e-01, -1.45663396e+00,
1.04438308e+00, 5.05102710e-01, 4.46378672e-01,
-2.40930866e-01, -3.99984682e-01, 4.90740228e-01,
6.72072168e-01],
[ 8.08852845e-01, 1.17906013e-01, 8.93866289e-01,
-1.23195361e-01, -1.32895729e+00, -8.14083304e-01,
-8.55284190e-01, 2.87994668e-01, 3.08675607e-01,
7.04658496e-01],
[ 9.62176366e-01, -5.01609972e-01, -8.79296717e-01,
2.32704198e-01, -6.72181000e-01, 1.41312752e+00,
2.46942913e-01, 5.19657885e-02, -1.09966181e+00,
1.81950630e-01],
[-3.44340025e-01, 6.60375708e-01, -3.20949556e-01,
-3.41956602e-01, 4.71118012e-01, 2.40246827e-01,
-3.26727932e-01, 1.20039446e+00, -1.05802135e+00,
-1.61079555e-01],
[ 5.04362438e-01, -3.62964301e-02, -4.68652914e-01,
-1.05418640e+00, 1.47871001e+00, 1.10929091e+00,
1.90081193e-01, -1.19771728e-01, -1.02914132e-01,
-1.45024425e+00],
[-5.62129768e-01, 4.48471504e-01, 2.68291861e-01,
-6.81176956e-01, -3.44136566e-01, 9.39139821e-01,
-4.10874304e-01, 1.41517030e+00, -3.27162786e-01,
-7.80140497e-01],
[-4.83121144e-02, -7.02699282e-01, -9.37966176e-02,
-9.32454962e-01, 5.49925459e-01, 2.93930687e-01,
3.76238067e-01, -1.07570774e+00, 9.53728553e-01,
6.63607000e-01],
[-5.76333208e-01, 4.28956900e-01, 1.88496812e-01,
-5.02592898e-01, -1.95625742e-01, -1.00086358e+00,
2.92791290e-01, 6.13372407e-01, 4.37117734e-01,
3.01236169e-01],
[ 4.39981846e-01, -1.99299126e+00, 8.43257357e-01,
6.91272808e-01, 6.27550750e-02, -6.46750608e-01,
2.20292827e-02, 2.84864457e-01, 5.45689661e-01,
-2.44316065e-01],
[ 5.19336924e-01, 7.24502399e-02, 1.87308789e-01,
-2.56041999e-01, -8.72961071e-01, 5.57650403e-01,
6.19972975e-01, -9.21979858e-01, 9.25776950e-01,
-8.39066529e-01],
[ 4.55100353e-01, 2.53879857e-01, -1.06316441e+00,
4.25389181e-01, -7.95641844e-01, 6.59501930e-01,
1.09693764e+00, -1.96481223e-01, -1.52114563e-01,
-6.14701149e-01],
[-2.42751837e-01, 7.54107956e-01, 8.81954163e-01,
1.15687263e+00, -9.15666464e-01, -4.85993233e-01,
6.43076782e-01, 4.11715253e-01, -1.02542921e+00,
-1.16200037e+00],
[-6.89189467e-01, 7.90066720e-01, -1.25344693e+00,
-9.03810750e-01, 1.17117160e+00, 3.77045525e-01,
1.32839960e+00, -4.71334163e-01, -6.03012575e-01,
2.75136038e-01],
[ 3.68037353e-01, -3.71974387e-01, -1.30927684e-01,
1.99796048e-01, 3.94719124e-01, -1.49838789e-01,
1.07317924e-01, -1.82033889e+00, 5.06616587e-01,
9.48834511e-01],
[ 3.15555619e-01, -1.00480430e-01, -2.64807795e-01,
2.83416967e-01, -9.48508748e-01, -1.90534089e-02,
-6.31810218e-01, 1.26331380e+00, -1.08507138e+00,
1.18414402e+00],
[ 2.19926483e-01, -5.58599290e-01, 6.76061421e-01,
3.90528503e-01, 2.87551173e-01, -1.69509227e-01,
-9.63739896e-01, -1.55601142e-01, 1.11663370e-01,
1.52947430e-01],
[ 1.51491665e+00, -3.25634715e-01, -3.68807477e-01,
-6.64499344e-01, -2.42198490e-01, 1.21062810e-01,
-4.54371688e-01, 1.96191559e+00, -8.34795754e-01,
-7.25264322e-01],
[-2.62924063e-01, -6.30249728e-01, -9.50909800e-02,
1.51662966e+00, 9.28091968e-01, -6.69021998e-01,
-4.57249350e-01, 6.57307058e-01, -1.18937341e+00,
2.00286114e-01],
[-8.14034079e-01, 6.11575208e-01, 1.08454574e+00,
-5.92645868e-01, -2.27658018e-01, -4.27708122e-01,
1.09714067e+00, -3.00539548e-01, 3.74810811e-01,
-8.24517436e-01],
[ 8.03987796e-01, 5.69124041e-02, 1.40994106e-03,
-6.29163170e-02, -7.59289250e-01, 1.68252626e+00,
7.94226973e-01, -5.70730200e-01, -1.17771902e+00,
-7.41542608e-01],
[-7.59561797e-01, 1.05313638e+00, -1.29425611e+00,
-5.78454955e-01, -8.45777301e-02, 1.12448337e+00,
2.13451442e-01, -1.20121345e+00, 7.30036254e-01,
7.86337206e-01],
[ 1.24768776e+00, -9.28167944e-01, -5.01493085e-01,
-1.22280009e+00, 5.53589013e-01, -9.09436649e-01,
7.43759300e-01, 9.29602878e-01, -2.93214728e-01,
3.55440012e-01],
[-2.99806152e-01, 1.02178579e+00, 3.15851172e-01,
-5.76826526e-01, -4.73763771e-01, 1.71437951e-01,
5.42772274e-01, 2.43393476e-01, -6.00137791e-01,
-3.30090206e-01],
[ 4.64512793e-01, -1.22579301e+00, -1.32403190e-01,
-3.86600193e-01, -7.30176006e-03, 6.18239845e-01,
2.51760608e-01, 8.38229230e-02, 1.65632607e-01,
1.10994954e-01],
[-9.85239960e-01, -4.30437784e-01, 3.42215378e-01,
-4.08676197e-02, 1.05782423e+00, 3.66019780e-01,
9.94883751e-01, -6.61159353e-01, -1.29038253e+00,
6.53242828e-01],
[-1.15450226e+00, 1.38605941e+00, 1.50626025e-01,
1.35660299e+00, -2.19083064e-01, 3.29859411e-01,
-4.55330442e-01, -4.61102392e-01, -6.72674123e-01,
-2.64325836e-01]]),
‘b2’: array([ 0.01867417, 0.0003279 , -0.00857341, 0.0204949 , 0.02587447,
0.02321397, -0.00246737, 0.03766014, -0.12843373, 0.01322896])}
loss = network.loss(x_batch, t_batch)
train_loss_list.append(loss)
train_loss_list
len(train_loss_list)
10001
x = np.arange(10001)
y = train_loss_list
plt.plot(x,y)
[<matplotlib.lines.Line2D at 0xd3bf9e8>]

:1000
x = np.arange(1000)
y = train_loss_list[:1000]
plt.plot(x,y)
[<matplotlib.lines.Line2D at 0xd2f88d0>]

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值