'''import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data/',one_hot=True)
learning_rate = 0.01
training_epochs = 10
batch_size = 256
display_step = 1
n_input = 784
X = tf.placeholder('float',[None,n_input])
n_hidden_1 = 128
n_hidden_2 = 64
n_hidden_3 = 10
n_hidden_4 = 2
weights = {
'encoder_h1':tf.Variable(tf.truncated_normal([n_input,n_hidden_1],)),
'encoder_h2':tf.Variable(tf.truncated_normal([n_hidden_1,n_hidden_2],)),
'encoder_h3':tf.Variable(tf.truncated_normal([n_hidden_2,n_hidden_3],)),
'encoder_h4':tf.Variable(tf.truncated_normal([n_hidden_3,n_hidden_4],)),
'decoder_h1':tf.Variable(tf.truncated_normal([n_hidden_4,n_hidden_3],)),
'decoder_h2':tf.Variable(tf.truncated_normal([n_hidden_3,n_hidden_2],)),
'decoder_h3':tf.Variable(tf.truncated_normal([n_hidden_2,n_hidden_1],)),
'decoder_h4':tf.Variable(tf.truncated_normal([n_hidden_1,n_input],)),
}
biases = {
'encoder_b1':tf.Variable(tf.random_normal([n_hidden_1])),
'encoder_b2':tf.Variable(tf.random_normal([n_hidden_2])),
'encoder_b3':tf.Variable(tf.random_normal([n_hidden_3])),
'encoder_b4':tf.Variable(tf.random_normal([n_hidden_4])),
'decoder_b1':tf.Variable(tf.random_normal([n_hidden_3])),
'decoder_b2':tf.Variable(tf.random_normal([n_hidden_2])),
'decoder_b3':tf.Variable(tf.random_normal([n_hidden_1])),
'decoder_b4':tf.Variable(tf.random_normal([n_input])),
}
def encoder(x):
layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x,weights['encoder_h1']),biases['encoder_b1']))
layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1,weights['encoder_h2']),biases['encoder_b2']))
layer_3 = tf.nn.sigmoid(tf.add(tf.matmul(layer_2,weights['encoder_h3']),biases['encoder_b3']))
layer_4 = tf.add(tf.matmul(layer_3,weights['encoder_h4']),biases['encoder_b4'])
return layer_4
def decoder(x):
layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x,weights['decoder_h1']),biases['decoder_b1']))
layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1,weights['decoder_h2']),biases['decoder_b1']))
layer_3 = tf.sigmoid(tf.add(tf.matmul(layer_2,weights['decoder_h3']),biases['decoder_b2']))
layer_4 = tf.nn.sigmoid(tf.add(tf.matmul(layer_3,weights['decoder_h4']),biases['decoder_b4']))
return layer_4
encoder_op = encoder(X)
decoder_op = decoder(encoder_op)
y_pred = decoder_op
y_true = X
cost = tf.reduce_mean(tf.pow(y_true-y_pred,2))
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
total_batch = int(mnist.train.num_examples/batch_size)
for epoch in range(training_epochs):
for i in range(total_batch):
batch_xs,batch_ys = mnist.train.next_batch(batch_size)
_,c = sess.run([optimizer,cost],feed_dict={X:batch_xs})
if epoch % display_step == 0:
print('Epoch:','%04d'% (epoch+1),'cost=','{:.9f}'.format(c))
print('Optimization Finished!')
encoder_result = sess.run(encoder_op,feed_dict={X:mnist.test.labels})
plt.scatter(encoder_result[:,0],encoder_result[:,1],c=mnist.test.labels)
plt.colorbar()
plt.show()'''
from __future__ import division
import numpy as np
import tensorflow as tf
def encoding(x):
with tf.variable_scope('encoding',reuse=tf.AUTO_REUSE):
conv1 = tf.layers.conv1d(inputs=x,filters=256,kernel_size=5,padding='same')
conv1 = tf.nn.relu(conv1)
conv2 = tf.layers.conv1d(inputs=conv1,filters=128,kernel_size=3,padding='same')
conv2 = tf.nn.relu(conv2)
conv3 = tf.layers.conv1d(inputs=conv2,filters=64,kernel_size=3,padding='same')
conv3 = tf.nn.relu(conv3)
conv4 = tf.layers.conv1d(inputs=conv3,filters=2,kernel_size=3,padding='same')
layer_4_normalized = tf.scalar_mul(tf.sqrt(tf.cast(block_length/2,tf.float32)),
tf.nn.l2_normalize(conv4, dim=1))
return layer_4_normalized
def decoding(x,channel_info):
x_combine = tf.concat([x,channel_info],-1)
with tf.variable_scope('decoding',reuse=tf.AUTO_REUSE):
conv1 = tf.layers.conv1d(inputs=x_combine,filters=256,kernel_size=5,padding='same')
conv1 = tf.nn.relu(conv1)
conv2_ori = tf.layers.conv1d(inputs=conv1,filters=128,kernel_size=5,padding='same')
conv2 = tf.nn.relu(conv2_ori)
conv2 = tf.layers.conv1d(inputs=conv2,filters=128,kernel_size=5,padding='same')
conv2 = tf.nn.relu(conv2)
conv2 = tf.layers.conv1d(inputs=conv2,filters=128,kernel_size=5,padding='same')
conv2 += conv2_ori
conv2 = tf.nn.relu(conv2)
conv3_ori = tf.layers.conv1d(inputs=conv2,filters=64,kernel_size=5,padding='same')
conv3 = tf.nn.relu(conv3_ori)
conv3 = tf.layers.conv1d(inputs=conv3,filters=64,kernel_size=5,padding='same')
conv3 = tf.nn.relu(conv3)
conv3 = tf.layers.conv1d(inputs=conv3,filters=64,kernel_size=5,padding='same')
conv3 += conv3_ori
conv3 = tf.nn.relu(conv3)
conv4 = tf.layers.conv1d(inputs=conv3,filters=32,kernel_size=3,padding='same')
conv4 = tf.nn.relu(conv4)
Decoding_logit = tf.layers.conv1d(inputs=conv4,filters=1,kernel_size=3,padding='same')
Decoding_prob = tf.nn.sigmoid(Decoding_logit)
return Decoding_logit,Decoding_prob
def sample_z(sample_size):
return np.random.normal(size=sample_size )
def Rayleigh_noise_layer(input_layer,h_r,h_i,std):
h_complex = tf.complex(real=h_r,imag=h_i)
input_layer_real = input_layer [:,:,0]
input_layer_imag = input_layer [:,:,1]
input_layer_complex = tf.complex(real=input_layer_real,imag=input_layer_imag)
noise = tf.complex(real=tf.random_normal(shape=tf.shape(input_layer_complex),mean=0.0,stddev=std,dtype=tf.float32),
imag=tf.random_normal(shape=tf.shape(input_layer_complex),mean=0.0,stddev=std,dtype=tf.float32))
output_complex = tf.add(tf.multiply(h_complex, input_layer_complex), noise)
output_complex_reshape = tf.reshape(output_complex,[-1,block_length,1])
print('shape of output complex',output_complex,output_complex_reshape)
return tf.concat([tf.real(output_complex_reshape),tf.imag(output_complex_reshape)],-1)
def sample_h(sample_size):
return np.random.normal(size=sample_size)/np.sqrt(2.)
""" Start of the Main function """
''' Building the Graph'''
batch_size = 320
block_length = 64
z_dim_c = 16
learning_rate = 1e-4
X = tf.placeholder(tf.float32,shape=[None,block_length,1])
E = encoding(X)
Z = tf.placeholder(tf.float32,shape=[None,block_length,z_dim_c])
Noise_std = tf.placeholder(tf.float32,shape=[])
h_r = tf.placeholder(tf.float32,shape=[None,1])
h_i = tf.placeholder(tf.float32,shape=[None,1])
Channel_info = tf.tile(tf.concat([tf.reshape(h_r,[-1,1,1]),tf.reshape(h_i,[-1,1,1])],-1),[1,block_length,1])
Conditions = tf.concat([E,Channel_info],axis=-1)
R_sample = Rayleigh_noise_layer(E,h_r,h_i,Noise_std)
R_decodings_logit,R_decodings_prob = decoding(R_sample,Channel_info)
Tx_vars = [v for v in tf.trainable_variables() if v.name.startswith('encoding')]
Rx_vars = [v for v in tf.trainable_variables() if v.name.startswith('decoding')]
#set up slovers
loss_receiver= tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits (
logits=R_decodings_logit,labels=X))
optimizer = tf.train.AdamOptimizer(learning_rate= learning_rate)
solver = optimizer.minimize(loss_receiver,var_list= Rx_vars )
accuracy_R = tf.reduce_mean(tf.cast((tf.abs(R_decodings_prob -X)>0.5),tf.float32))
WER_R = 1-tf.reduce_mean(tf.cast(tf.reduce_all(tf.abs(R_decodings_prob-X)<0.5,1),tf.float32))
init = tf.global_variables_initializer()
number_iterations = 10
EbNo_train = 3.
EbNo_train = 10.**(EbNo_train/10.)
R = 0.5
def generate_batch_data(batch_size):
global start_idx,data
if start_idx +batch_size >= N_training:
start_idx = 0
data = np.random.binomial(1,0.5,[N_training,block_length,1])
batch_x = data[start_idx:start_idx +batch_size]
start_idx += batch_size
return batch_x
number_steps = 5000
N_training = int(1e6)
data = np.random.binomial(1,0.5,[N_training,block_length,1])
N_test = int(1e4)
test_data = np.random.binomial(1,0.5,[N_test,block_length,1])
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
start_idx = 0
for epoch in range(number_iterations):
print('epoch is',epoch)
for step in range(number_steps):
batch_x = generate_batch_data(batch_size)
sess.run(solver,feed_dict={X: batch_x, Z: sample_z([batch_size,block_length,z_dim_c]),
h_i: sample_h([batch_size, 1]),
h_r: sample_h([batch_size, 1]),
Noise_std: (np.sqrt(1 / (2 * R * EbNo_train)))
})
loss,acc = sess.run([loss_receiver,accuracy_R],feed_dict = {X:batch_x,
h_i:sample_h([batch_size,1]),
h_r:sample_h([batch_size,1]),
Noise_std: (np.sqrt(1 / (2 * R * EbNo_train)))})
EbNodB_range = np.arange(0, 30)
ber = np.ones(len(EbNodB_range))
wer = np.ones(len(EbNodB_range))
for n in range(0, len(EbNodB_range)):
EbNo = 10.0 ** (EbNodB_range[n] / 10.0)
ber[n], wer[n] = sess.run([accuracy_R, WER_R],
feed_dict={X: test_data, Noise_std: (np.sqrt(1 / (2 * R * EbNo))),
h_i: sample_h([len(test_data), 1]),
h_r: sample_h([len(test_data), 1]),
})
print('SNR:', EbNodB_range[n], 'BER:', ber[n], 'WER:', wer[n])
print(ber)
print(wer)
修改了训练方式,epoch=10(原epoch=1000),发射机和接收机一同训练,不再采用GAN网络作为反向传播的桥梁,直接采用真实信道,total_batch(steps)=5000(原number_steps以5000次/epoch叠加),期望减少训练速度(神经网络规模也要改小),但BER和WER可能不准确。
查看运行时间:
from time import time()
startTime = time()
with tf.Session() as sess:
for…
sess.run(…)
duration = time - startTime
print(‘Train Finished takes:’,duration)
emmmm跑的速度是快了。训练效果果然够差,大概差8dB