p17_tf_basic

import numpy as np
import tensorflow as tf



a = np.array([1, 2, 3])
print(a)
print(a.shape)
b = a + 3
print(b)

a = np.random.uniform(size=[2, 3, 4])
b = np.random.uniform(size=[1, 4])
print('-' * 200)
print(a.shape, b.shape)
print(a)
print('-' * 20)
print(b)
print('-' * 20)
print(a + b)  #  a-b, a*b, a/b, a%b,
print('-' * 200)
print(a > b)
print('-' * 200)
print(np.logical_and(a > b, a < 3))
print((a>3).astype(np.float32))
print('=' * 200)

c = np.random.normal(10, 3, [2, 3, 4])
d = np.random.normal(10, 3, [2, 4, 9])
e = np.matmul(c, d)
print(e.shape)




a = tf.constant([1, 2, 3])
print(a.shape)
b = a + 3

a = tf.random_uniform([2, 3, 4])
b = tf.random_uniform([1, 4])

c = a > b
d = tf.cast(c, tf.float32)

e = tf.logical_and(c, a > 0.3)

tf.sin, tf.cos
tf.matmul
tf.nn.relu, tf.nn.sigmoid, tf.nn.tanh, tf.nn.relu6, tf.nn.leaky_relu, \
        tf.nn.elu, tf.nn.selu, tf.nn.crelu
# relu(x) === maximum(x, 0)
tf.maximum, tf.minimum
tf.matmul

f = tf.random_uniform([2, 3])

g = tf.get_variable('g', [2, 3], tf.float32)
with tf.Session() as session:
    session.run(tf.global_variables_initializer())
    print(session.run(a))
    print(session.run(b))
    print(session.run(c))
    print(session.run(d))
    print(session.run(f))
    print(session.run(f))
    print('-' * 200)
    print(session.run(g))
    print(session.run(g))



D:\Anaconda\python.exe D:/AI20/HJZ/05-深度学习项目/deeplearning_20/p17_tf_basic.py
[1 2 3]
(3,)
[4 5 6]
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
(2, 3, 4) (1, 4)
[[[0.88159504 0.2389287  0.35967951 0.64821762]
  [0.16066399 0.16373432 0.53843989 0.64935206]
  [0.2904436  0.86819394 0.64724457 0.38851738]]

 [[0.48409914 0.95392485 0.84978474 0.13165715]
  [0.74903869 0.09219363 0.65371558 0.85831939]
  [0.28045262 0.26292267 0.29201848 0.96626748]]]
--------------------
[[0.15566468 0.8953767  0.05152791 0.82352448]]
--------------------
[[[1.03725973 1.1343054  0.41120742 1.4717421 ]
  [0.31632867 1.05911102 0.58996779 1.47287654]
  [0.44610828 1.76357064 0.69877248 1.21204186]]

 [[0.63976382 1.84930155 0.90131265 0.95518163]
  [0.90470337 0.98757033 0.70524349 1.68184387]
  [0.4361173  1.15829937 0.34354639 1.78979196]]]
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
[[[ True False  True False]
  [ True False  True False]
  [ True False  True False]]

 [[ True  True  True False]
  [ True False  True  True]
  [ True False  True  True]]]
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
[[[ True False  True False]
  [ True False  True False]
  [ True False  True False]]

 [[ True  True  True False]
  [ True False  True  True]
  [ True False  True  True]]]
[[[0. 0. 0. 0.]
  [0. 0. 0. 0.]
  [0. 0. 0. 0.]]

 [[0. 0. 0. 0.]
  [0. 0. 0. 0.]
  [0. 0. 0. 0.]]]
========================================================================================================================================================================================================
(2, 3, 9)
(3,)
2020-02-26 20:47:18.641292: I tensorflow/core/platform/cpu_feature_guard.cc:141] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX AVX2
[[[0.08960831 0.09944808 0.21347654 0.02046752]
  [0.60505784 0.624668   0.7794361  0.34513807]
  [0.05473447 0.91516817 0.5455359  0.62006927]]

 [[0.95154095 0.7755872  0.10627317 0.38030994]
  [0.8472835  0.99132836 0.8122591  0.56913304]
  [0.05938315 0.40653324 0.5540432  0.47170877]]]
[[0.03635395 0.5666157  0.88078785 0.7053106 ]]
[[[ True False False False]
  [ True False  True  True]
  [False False False  True]]

 [[False False False  True]
  [ True False  True  True]
  [ True False False False]]]
[[[1. 1. 1. 0.]
  [1. 0. 1. 1.]
  [0. 1. 0. 0.]]

 [[0. 1. 1. 1.]
  [1. 1. 0. 1.]
  [0. 1. 1. 1.]]]
[[0.96491146 0.36368692 0.15789664]
 [0.9438263  0.35830712 0.45065653]]
[[0.7112533  0.35499382 0.44211316]
 [0.52771103 0.46087265 0.06759739]]
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
[[ 1.0700586  -0.396928    0.28480113]
 [ 0.696686   -0.54275686  0.6507021 ]]
[[ 1.0700586  -0.396928    0.28480113]
 [ 0.696686   -0.54275686  0.6507021 ]]

Process finished with exit code 0

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值