.NET环境中使用TensorFlow

1、确保项目安装下面三个库

测试代码来自TensorFlow.NET官方文档:TensorFlow.NET

using System;
using Tensorflow.NumPy;
using static Tensorflow.Binding;
namespace TensorFlow_Test
{
    class TensorFlowTest
    {
        static void Main(string[] args)
        {
            // Parameters        
            var training_steps = 1000;
            var learning_rate = 0.01f;
            var display_step = 100;

            // Sample data
            var X = np.array(3.3f, 4.4f, 5.5f, 6.71f, 6.93f, 4.168f, 9.779f, 6.182f, 7.59f, 2.167f,
                         7.042f, 10.791f, 5.313f, 7.997f, 5.654f, 9.27f, 3.1f);
            var Y = np.array(1.7f, 2.76f, 2.09f, 3.19f, 1.694f, 1.573f, 3.366f, 2.596f, 2.53f, 1.221f,
                         2.827f, 3.465f, 1.65f, 2.904f, 2.42f, 2.94f, 1.3f);
            var n_samples = X.shape[0];

            // We can set a fixed init value in order to demo
            var W = tf.Variable(-0.06f, name: "weight");
            var b = tf.Variable(-0.73f, name: "bias");
            var optimizer = tf.keras.optimizers.SGD(learning_rate);

            // Run training for the given number of steps.
            foreach (var step in range(1, training_steps + 1))
            {
                // Run the optimization to update W and b values.
                // Wrap computation inside a GradientTape for automatic differentiation.
                using var g = tf.GradientTape();
                // Linear regression (Wx + b).
                var pred = W * X + b;
                // Mean square error.
                var loss = tf.reduce_sum(tf.pow(pred - Y, 2)) / (2 * n_samples);
                // should stop recording
                // Compute gradients.
                var gradients = g.gradient(loss, (W, b));

                // Update W and b following gradients.
                optimizer.apply_gradients(zip(gradients, (W, b)));

                if (step % display_step == 0)
                {
                    pred = W * X + b;
                    loss = tf.reduce_sum(tf.pow(pred - Y, 2)) / (2 * n_samples);
                    print($"step: {step}, loss: {loss.numpy()}, W: {W.numpy()}, b: {b.numpy()}");
                }
            }

        }
    }
}

 训练结果:

 

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值