tensorflow 线性回归二

用到的 tensorflow 指令(tensorflow module API 索引):

tf.constant([4 ,3.])

dataset = tf.data.Dataset.from_tensor_slices(data_arrays)

dataset.shuffle(buffer_size = 3000) 

dataset.batch(batch_size)

initializer = tf.initializers.RandomNormal(stddev = 0.01)

net = tf.keras.Sequential()

net.add(tf.keras.layers.Dense(1,kernel_initializer = initializer))

net.trainable_variables

loss = tf.losses.MeanSquaredError()

lost = loss(net(feature,training = True),lable)

optimizer = tf.keras.optimizers.SGD()

optimizer.apply_gradients(zip(grad, net.trainable_variables))

输出:

tf.Tensor(0.00010774888, shape=(), dtype=float32)
tf.Tensor(0.0001031273, shape=(), dtype=float32)
tf.Tensor(9.670573e-05, shape=(), dtype=float32)
tf.Tensor(9.975378e-05, shape=(), dtype=float32)
tf.Tensor(0.000100455494, shape=(), dtype=float32)
tf.Tensor(9.750764e-05, shape=(), dtype=float32)
tf.Tensor(0.00010110715, shape=(), dtype=float32)
tf.Tensor(0.00010778586, shape=(), dtype=float32)
tf.Tensor(9.970649e-05, shape=(), dtype=float32)
tf.Tensor(0.0001071509, shape=(), dtype=float32)
[[4.000005]
 [2.999966]]
[2.0000412]

源代码:

# -*- coding: utf-8 -*-
"""
Created on Wed Mar 22 14:29:06 2023

@author: Shawn.Li
"""

import numpy as np
import tensorflow as tf
from d2l import tensorflow as d2l



# 定义超参数,生成样本数据
w_true = tf.constant([4 ,3.])
b_true = 2.
features,lables = d2l.synthetic_data(w_true, b_true, 3000)

# 读取数据集
def load_array(data_arrays,batch_size,is_train = True): #@save
    dataset = tf.data.Dataset.from_tensor_slices(data_arrays)
    if(is_train == True):
        dataset = dataset.shuffle(buffer_size = 3000)
    batch_data = dataset.batch(batch_size)
    return batch_data

batch_size = 50
data_iter = load_array((features,lables), batch_size)
    
# 定义模型,初始化模型参数
initializer = tf.initializers.RandomNormal(stddev = 0.01)
net = tf.keras.Sequential()
net.add(tf.keras.layers.Dense(1,kernel_initializer = initializer))


# 定义损失函数
loss = tf.losses.MeanSquaredError()

# 定义优化算法
optimizer = tf.keras.optimizers.SGD()



# 训练网络
epochs_num = 3

for i in range(epochs_num):
    for feature,lable in data_iter:
        with tf.GradientTape() as g:
            lost = loss(net(feature,training = True),lable)                     # 求损失
        
        grad = g.gradient(lost, net.trainable_variables)                        # 求梯度
        optimizer.apply_gradients(zip(grad, net.trainable_variables))           # 梯度优化
        
    lost = loss(net(feature),lable)
    print(lost)
        
print(net.get_weights()[0])
print(net.get_weights()[1])



评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值