tensorflow2.0-1

# Tensorflow 1 + 1
import tensorflow as tf
import numpy as np

# random_float = tf.random.uniform(shape=())

# zero_vec = tf.zeros(shape=(2))
#
#
# A = tf.constant([[1., 2.], [3., 4.]])
# B = tf.constant([[5., 6.], [7., 8.]])
#
# print(A.shape)
# print(A.dtype)
# print(A.numpy())
# print(B.numpy())
#
#
# C = tf.add(A, B)
# D = tf.matmul(A, B)
# print("C = ", C.numpy())
# print("D = ", D.numpy())
# a=1


# 自动求导机制
# x = tf.Variable(initial_value=3.)
# with tf.GradientTape() as tape:
#     y = tf.square(x)
#
# y_grad = tape.gradient(y, x)
# print(y, y_grad)

# 自动求导机制-多元函数求偏导
# X = tf.constant([[1., 2.], [3., 4.]])
# y = tf.constant([[1.], [2.]])
#
# w = tf.Variable(initial_value=[[1.], [2.]])
# print(w.shape)
# b = tf.Variable(initial_value=1.)
# print(tf.matmul(X, w))
# print(tf.matmul(X, w) + b)
# print(tf.matmul(X, w) + b-y)
# print(tf.square(tf.matmul(X, w) + b - y))
#
# with tf.GradientTape() as tape:
#     L = tf.reduce_sum(tf.square(tf.matmul(X, w) + b - y))
#
# w_grad, b_grad = tape.gradient(L, [w, b])  # 计算(w, b) 关于w, b的偏导
# print("L:", L)
# print("w_grad:", w_grad)
# print("b_grad:", b_grad)

# 线性回归示例 - numpy
X_raw = np.array([2013, 2014, 2015, 2016, 2017], dtype=np.float32)
y_raw = np.array([12000, 14000, 15000, 16500, 17500], dtype=np.float32)

X = (X_raw - X_raw.min()) / (X_raw.max() - X_raw.min())
y = (y_raw - y_raw.min()) / (y_raw.max() - y_raw.min())
print("X = ", X)
print("y = ", y)

# a, b = 0, 0
# num_epoch = 10000
# learning_rate = 5e-4
# for e in range(num_epoch):
#     y_pred = a * X + b
#     grad_a, grad_b = 2*(y_pred-y).dot(X), 2*(y_pred-y).sum()  # 手动偏导
#     a, b = a-learning_rate*grad_a, b-learning_rate*grad_b
#     if e % 500 == 0:
#         print("e = ", e, "a = ", a, "b = ", b)

# tensorflow-线性回归
X = tf.constant(X)
Y = tf.constant(y)
print(X)
print(y)

a = tf.Variable(initial_value=0.)
b = tf.Variable(initial_value=0.)
variables = [a, b]

num_epoch = 10000
optimizer = tf.keras.optimizers.SGD(learning_rate=5e-4)
for e in range(num_epoch):
    with tf.GradientTape() as tape:
        y_pred = a * X + b
        loss = tf.reduce_sum(tf.square(y_pred - y))
    grads = tape.gradient(loss, variables)
    optimizer.apply_gradients(grads_and_vars=zip(grads, variables))
    if e % 500 == 0:
        print("----"*25)
        print("e=", e, "\na=", a, "\nb=", b)



 

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值