pythontensorflow2.0_python-在基本Tensorflow 2.0中运行简单回归

我正在学习Tensorflow 2.0,我认为在Tensorflow中实现最基本的简单线性回归将是一个好主意.不幸的是,我遇到了几个问题,我想知道这里是否有人可以提供帮助.

考虑以下设置:

import tensorflow as tf # 2.0.0-alpha0

import numpy as np

x_data = np.random.randn(2000, 1)

w_real = [0.7] # coefficients

b_real = -0.2 # global bias

noise = np.random.randn(1, 2000) * 0.5 # level of noise

y_data = np.matmul(w_real, x_data.T) + b_real + noise

现在进行模型定义:

# modelling this data with tensorflow (manually!)

class SimpleRegressionNN(tf.keras.Model):

def __init__(self):

super(SimpleRegressionNN, self).__init__()

self.input_layer = tf.keras.layers.Input

self.output_layer = tf.keras.layers.Dense(1)

def call(self, data_input):

model = self.input_layer(data_input)

model = self.output_layer(model)

# open question: how to account for the intercept/bias term?

# Ideally, we'd want to generate preds as matmult(X,W) + b

return model

nn_regressor = SimpleRegressionNN()

reg_loss = tf.keras.losses.MeanSquaredError()

reg_optimiser = tf.keras.optimizers.SGD(0.1)

metric_accuracy = tf.keras.metrics.mean_squared_error

# define forward step

@tf.function

def train_step(x_sample, y_sample):

with tf.GradientTape() as tape:

predictions = nn_regressor(x_sample)

loss = reg_loss(y_sample, predictions)

gradients = tape.gradient(loss, nn_regressor.trainable_variables) # had to indent this!

reg_optimiser.apply_gradients(zip(gradients, nn_regressor.trainable_variables))

metric_accuracy(y_sample, predictions)

#%%

# run the model

for epoch in range(10):

for x_point, y_point in zip(x_data.T[0], y_data[0]): # batch of 1

train_step(x_sample=x_point, y_sample=y_point)

print("MSE: {}".format(metric_accuracy.result()))

不幸的是,我收到以下错误:

TypeError: You are attempting to use Python control flow in a layer that was not declared to be dynamic. Pass `dynamic=True` to the class constructor.

Encountered error:

"""

Tensor objects are only iterable when eager execution is enabled. To iterate over this tensor use tf.map_fn.

"""

完整的错误输出在这里:

---------------------------------------------------------------------------

TypeError Traceback (most recent call last)

/anaconda3/lib/python3.6/site-packages/tensorflow/python/keras/engine/base_layer.py in __call__(self, inputs, *args, **kwargs)

611 inputs)) as auto_updater:

--> 612 outputs = self.call(inputs, *args, **kwargs)

613 auto_updater.set_outputs(outputs)

in call(self, data_input)

7 def call(self, data_input):

----> 8 model = self.input_layer(data_input)

9 model = self.output_layer(model)

/anaconda3/lib/python3.6/site-packages/tensorflow/python/keras/engine/input_layer.py in Input(shape, batch_size, name, dtype, sparse, tensor, **kwargs)

232 sparse=sparse,

--> 233 input_tensor=tensor)

234 # Return tensor including `_keras_history`.

/anaconda3/lib/python3.6/site-packages/tensorflow/python/keras/engine/input_layer.py in __init__(self, input_shape, batch_size, dtype, input_tensor, sparse, name, **kwargs)

93 if input_shape is not None:

---> 94 batch_input_shape = (batch_size,) + tuple(input_shape)

95 else:

/anaconda3/lib/python3.6/site-packages/tensorflow/python/framework/ops.py in __iter__(self)

448 raise TypeError(

--> 449 "Tensor objects are only iterable when eager execution is "

450 "enabled. To iterate over this tensor use tf.map_fn.")

TypeError: Tensor objects are only iterable when eager execution is enabled. To iterate over this tensor use tf.map_fn.

During handling of the above exception, another exception occurred:

TypeError Traceback (most recent call last)

in ()

3 #train_step(x_sample=x_data.T[0], y_sample=y_data[0])

4 for x_point, y_point in zip(x_data.T[0], y_data[0]):

----> 5 train_step(x_sample=x_point, y_sample=y_point)

6 print("MSE: {}".format(metric_accuracy.result()))

7

/anaconda3/lib/python3.6/site-packages/tensorflow/python/eager/def_function.py in __call__(self, *args, **kwds)

416 # In this case we have not created variables on the first call. So we can

417 # run the first trace but we should fail if variables are created.

--> 418 results = self._stateful_fn(*args, **kwds)

419 if self._created_variables:

420 raise ValueError("Creating variables on a non-first call to a function"

/anaconda3/lib/python3.6/site-packages/tensorflow/python/eager/function.py in __call__(self, *args, **kwargs)

1285 def __call__(self, *args, **kwargs):

1286 """Calls a graph function specialized to the inputs."""

-> 1287 graph_function, args, kwargs = self._maybe_define_function(args, kwargs)

1288 return graph_function._filtered_call(args, kwargs) # pylint: disable=protected-access

1289

/anaconda3/lib/python3.6/site-packages/tensorflow/python/eager/function.py in _maybe_define_function(self, args, kwargs)

1609 relaxed_arg_shapes)

1610 graph_function = self._create_graph_function(

-> 1611 args, kwargs, override_flat_arg_shapes=relaxed_arg_shapes)

1612 self._function_cache.arg_relaxed[rank_only_cache_key] = graph_function

1613

/anaconda3/lib/python3.6/site-packages/tensorflow/python/eager/function.py in _create_graph_function(self, args, kwargs, override_flat_arg_shapes)

1510 arg_names=arg_names,

1511 override_flat_arg_shapes=override_flat_arg_shapes,

-> 1512 capture_by_value=self._capture_by_value),

1513 self._function_attributes)

1514

/anaconda3/lib/python3.6/site-packages/tensorflow/python/framework/func_graph.py in func_graph_from_py_func(name, python_func, args, kwargs, signature, func_graph, autograph, autograph_options, add_control_dependencies, arg_names, op_return_value, collections, capture_by_value, override_flat_arg_shapes)

692 converted_func)

693

--> 694 func_outputs = python_func(*func_args, **func_kwargs)

695

696 # invariant: `func_outputs` contains only Tensors, IndexedSlices,

/anaconda3/lib/python3.6/site-packages/tensorflow/python/eager/def_function.py in wrapped_fn(*args, **kwds)

315 # __wrapped__ allows AutoGraph to swap in a converted function. We give

316 # the function a weak reference to itself to avoid a reference cycle.

--> 317 return weak_wrapped_fn().__wrapped__(*args, **kwds)

318 weak_wrapped_fn = weakref.ref(wrapped_fn)

319

/anaconda3/lib/python3.6/site-packages/tensorflow/python/framework/func_graph.py in wrapper(*args, **kwargs)

684 optional_features=autograph_options,

685 force_conversion=True,

--> 686 ), args, kwargs)

687

688 # Wrapping around a decorator allows checks like tf_inspect.getargspec

/anaconda3/lib/python3.6/site-packages/tensorflow/python/autograph/impl/api.py in converted_call(f, owner, options, args, kwargs)

390 return _call_unconverted(f, args, kwargs)

391

--> 392 result = converted_f(*effective_args, **kwargs)

393

394 # The converted function's closure is simply inserted into the function's

/var/folders/8_/pl9fgq297ld3b7kgy5tmvf700000gn/T/tmpluzodr7d.py in tf__train_step(x_sample, y_sample)

2 def tf__train_step(x_sample, y_sample):

3 with tf.GradientTape() as tape:

----> 4 predictions = ag__.converted_call(nn_regressor, None, ag__.ConversionOptions(recursive=True, verbose=0, strip_decorators=(tf.function, defun, ag__.convert, ag__.do_not_convert, ag__.converted_call), force_conversion=False, optional_features=(), internal_convert_user_code=True), (x_sample,), {})

5 loss = ag__.converted_call(reg_loss, None, ag__.ConversionOptions(recursive=True, verbose=0, strip_decorators=(tf.function, defun_1, ag__.convert, ag__.do_not_convert, ag__.converted_call), force_conversion=False, optional_features=(), internal_convert_user_code=True), (y_sample, predictions), {})

6 gradients = ag__.converted_call('gradient', tape, ag__.ConversionOptions(recursive=True, verbose=0, strip_decorators=(tf.function, defun_2, ag__.convert, ag__.do_not_convert, ag__.converted_call), force_conversion=False, optional_features=(), internal_convert_user_code=True), (loss, nn_regressor.trainable_variables), {})

/anaconda3/lib/python3.6/site-packages/tensorflow/python/autograph/impl/api.py in converted_call(f, owner, options, args, kwargs)

265

266 if not options.force_conversion and conversion.is_whitelisted_for_graph(f):

--> 267 return _call_unconverted(f, args, kwargs)

268

269 # internal_convert_user_code is for example turned off when issuing a dynamic

/anaconda3/lib/python3.6/site-packages/tensorflow/python/autograph/impl/api.py in _call_unconverted(f, args, kwargs)

186 return f.__self__.call(args, kwargs)

187

--> 188 return f(*args, **kwargs)

189

190

/anaconda3/lib/python3.6/site-packages/tensorflow/python/keras/engine/base_layer.py in __call__(self, inputs, *args, **kwargs)

623 'dynamic. Pass `dynamic=True` to the class '

624 'constructor.\nEncountered error:\n"""\n' +

--> 625 exception_str + '\n"""')

626 raise

627 else:

TypeError: You are attempting to use Python control flow in a layer that was not declared to be dynamic. Pass `dynamic=True` to the class constructor.

Encountered error:

"""

Tensor objects are only iterable when eager execution is enabled. To iterate over this tensor use tf.map_fn.

"""

麻烦的是,默认情况下2.0设置为急于执行!

除了这个问题,我还有几个其他问题:

>在这里解释拦截项的最佳方法是什么?

>通用方法是否合理,或者我在这里做任何奇怪的事情? (忽略批次大小和我必须验证数据的事实,这只是一个玩具示例)

非常感谢!

解决方法:

我有以下几点评论:

>您在SimpleRegression模型中不需要输入层.另外,不要通过“模型”名称调用图层的张量输出(就像在call()方法中一样).这真是令人困惑.

>您没有将正确的形状传递给train_step函数.它希望在传递(input_dim,)时接收(n_samples,input_dim).

>请记住,在张量流中,张量的第一维始终是批量大小(即样本数).始终那样使用它,而不进行转置.

>为什么您将metric_accuracy = tf.keras.metrics.mean_squared_error称为准确性?您有一个回归问题,没有回归的准确性.另外,为什么还要定义两次并计算两次mse?

>如果使用tf.convert_to_tensor()转换数据,则执行速度会更快.

>函数train_step()执行向前和向后传递,而不仅仅是向前传递.

>使用较小的数据集作为玩具示例(2-10个样本,而不是2000个样本),尤其是在您不知道代码是否有效的情况下!

>您的函数train_step()不返回任何内容,您希望如何显示mse损失的值.

这是您的代码的更正版本:

import tensorflow as tf # 2.0.0-alpha0

import numpy as np

x_data = np.random.randn(5, 2)

w_real = 0.7 # coefficients

b_real = -0.2 # global bias

noise = np.random.randn(5, 2) * 0.01 # level of noise

y_data = w_real * x_data + b_real + noise

class SimpleRegressionNN(tf.keras.Model):

def __init__(self):

super(SimpleRegressionNN, self).__init__()

self.output_layer = tf.keras.layers.Dense(1, input_shape=(2, ))

def call(self, data_input):

result = self.output_layer(data_input)

return result

reg_loss = tf.keras.losses.MeanSquaredError()

reg_optimiser = tf.keras.optimizers.SGD(0.1)

nn_regressor = SimpleRegressionNN()

@tf.function

def train_step(x_sample, y_sample):

with tf.GradientTape() as tape:

predictions = nn_regressor(x_sample)

loss = reg_loss(y_sample, predictions)

gradients = tape.gradient(loss, nn_regressor.trainable_variables) # had to indent this!

reg_optimiser.apply_gradients(zip(gradients, nn_regressor.trainable_variables))

return loss

for x_point, y_point in zip(x_data, y_data): # batch of 1

x_point, y_point = tf.convert_to_tensor([x_point]), tf.convert_to_tensor([y_point])

mse = train_step(x_sample=x_point, y_sample=y_point)

print("MSE: {}".format(mse.numpy()))

标签:tensorflow,regression,machine-learning,deep-learning,python

来源: https://codeday.me/bug/20191108/2005713.html

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值