项目实训 --主观题阅卷评分网站(八)

项目进度:

对考生答案进行了一维卷积,提取了考生答案的特征向量

import tensorflow.compat.v1 as tf

tf.disable_v2_behavior()
import matplotlib.pyplot as plt
import csv
import os
import random
import numpy as np
import random
from tensorflow.python.framework import ops

ops.reset_default_graph()

# ---------------------------------------------------|
# -------------------1D-data-------------------------|
# ---------------------------------------------------|

# Create graph session 创建初始图结构
ops.reset_default_graph()
sess = tf.Session()

# parameters for the run运行参数
data_size = 50
conv_size = 10  # 卷积核宽度方向的大小
maxpool_size = 10  # 池化层核宽度方向上的大小
stride_size = 1  # 卷积核宽度方向上的步长
f = open("juesha5.txt", "a")

csv_file = open('bert_replace50.csv')  # 打开csv文件
csv_reader_lines = csv.reader(csv_file)  # 逐行读取csv文件
date = []  # 创建列表准备接收csv各行数据
for one_line in csv_reader_lines:
    date.append(one_line)  # 将读取的csv分行数据按行存入列表‘date’中

# ensure reproducibility 确保复现性
seed = 13
np.random.seed(seed)
tf.set_random_seed(seed)

res = []
# Generate 1D data 生成一维数据

# --------Convolution--------
def conv_layer_1d(input_1d, my_filter, stride):
    # TensorFlow's 'conv2d()' function only works with 4D arrays:
    # [batch, height, width, channels], we have 1 batch, and
    # width = 1, but height = the length of the input, and 1 channel.
    # So next we create the 4D array by inserting dimension 1's.
    # 关于数据维度的处理十分关键,因为tensorflow中卷积操作只支持四维的张量,
    # 所以要人为的把数据补充为4维数据[1,1,25,1]
    input_2d = tf.expand_dims(input_1d, 0)
    input_3d = tf.expand_dims(input_2d, 0)
    input_4d = tf.expand_dims(input_3d, 3)
    # Perform convolution with stride = 1, if we wanted to increase the stride,
    # to say '2', then strides=[1,1,2,1]
    convolution_output = tf.nn.conv2d(input_4d, filter=my_filter, strides=[1, 1, stride, 1], padding="VALID")
    # Get rid of extra dimensions 去掉多余的层数,只保留数字
    conv_output_1d = tf.squeeze(convolution_output)
    return (conv_output_1d)



# --------Activation--------
def activation(input_1d):
    return (tf.nn.relu(input_1d))



# --------Max Pool--------
def max_pool(input_1d, width, stride):
    # Just like 'conv2d()' above, max_pool() works with 4D arrays.
    # [batch_size=1, width=1, height=num_input, channels=1]
    # 因为在处理卷积层的结果时,使用squeeze函数对结果输出进行降维,所以此处要将最大池化层的维度提升为4维
    input_2d = tf.expand_dims(input_1d, 0)
    input_3d = tf.expand_dims(input_2d, 0)
    input_4d = tf.expand_dims(input_3d, 3)
    # Perform the max pooling with strides = [1,1,1,1]
    # If we wanted to increase the stride on our data dimension, say by
    # a factor of '2', we put strides = [1, 1, 2, 1]
    # We will also need to specify the width of the max-window ('width')
    pool_output = tf.nn.max_pool(input_4d, ksize=[1, 1, width, 1],
                                 strides=[1, 1, stride, 1],
                                 padding='VALID')
    # Get rid of extra dimensions
    pool_output_1d = tf.squeeze(pool_output)
    return (pool_output_1d)




# --------Fully Connected--------
def fully_connected(input_layer, num_outputs):
    # First we find the needed shape of the multiplication weight matrix:
    # The dimension will be (length of input) by (num_outputs)
    weight_shape = tf.squeeze(tf.stack([tf.shape(input_layer), [num_outputs]]))
    # squeeze函数用于去掉维度为1的维度。保留数据。

    # Initialize such weight
    # 初始化weight
    weight = tf.random_normal(weight_shape, stddev=0.1)
    # Initialize the bias
    # 初始化bias
    bias = tf.random_normal(shape=[num_outputs])
    # Make the 1D input array into a 2D array for matrix multiplication
    # 将一维的数组添加一维成为2维数组
    input_layer_2d = tf.expand_dims(input_layer, 0)
    # Perform the matrix multiplication and add the bias
    full_output = tf.add(tf.matmul(input_layer_2d, weight), bias)
    # Get rid of extra dimensions
    # 去掉多余的维度只保留数据
    full_output_1d = tf.squeeze(full_output)
    return (full_output_1d)
for i in range(10):
    data_1d = date[i]

    # Placeholder
    x_input_1d = tf.placeholder(dtype=tf.float32, shape=[data_size])

    # Create filter for convolution.
    my_filter = tf.Variable(tf.random_normal(shape=[1, conv_size, 1, 1]))
    # Create convolution layer
    my_convolution_output = conv_layer_1d(x_input_1d, my_filter, stride=stride_size)

    my_activation_output = activation(my_convolution_output)

    my_maxpool_output = max_pool(my_activation_output, width=maxpool_size, stride=stride_size)

    my_full_output = fully_connected(my_maxpool_output, 5)

    # Create activation layer


    # Run graph
    # Initialize Variables
    init = tf.global_variables_initializer()
    sess.run(init)

    feed_dict = {x_input_1d: data_1d}

    # print('>>>> 1D Data <<<<')

    # Convolution Output
    # print('Input = array of length %d' % (x_input_1d.shape.as_list()[0]))  # 25
    # print('Convolution w/ filter, length = %d, stride size = %d, results in an array of length %d:' %
    #       (conv_size, stride_size, my_convolution_output.shape.as_list()[0]))  # 21
    print(sess.run(my_convolution_output, feed_dict=feed_dict))
    sess.run(my_convolution_output, feed_dict=feed_dict)
    # Activation Output
    # print('\nInput = above array of length %d' % (my_convolution_output.shape.as_list()[0]))  # 21
    # print('ReLU element wise returns an array of length %d:' % (my_activation_output.shape.as_list()[0]))  # 21
    print(sess.run(my_activation_output, feed_dict=feed_dict))
    sess.run(my_activation_output, feed_dict=feed_dict)
    # # Max Pool Output
    # print('\nInput = above array of length %d' % (my_activation_output.shape.as_list()[0]))  # 21
    # print('MaxPool, window length = %d, stride size = %d, results in the array of length %d' %
    #       (maxpool_size, stride_size, my_maxpool_output.shape.as_list()[0]))  # 17
    print(sess.run(my_maxpool_output, feed_dict=feed_dict))
    sess.run(my_maxpool_output, feed_dict=feed_dict)
    # # # Fully Connected Output
    # print('\nInput = above array of length %d' % (my_maxpool_output.shape.as_list()[0]))  # 17
    # print('Fully connected layer on all 4 rows with %d outputs:' %
    #       (my_full_output.shape.as_list()[0]))  # 5

    print(sess.run(my_full_output, feed_dict=feed_dict))
    res.append(sess.run(my_full_output, feed_dict=feed_dict))
    print(i)
    f.write(str(sess.run(my_full_output, feed_dict=feed_dict))+'\n')

在这里插入图片描述
上图输出依次为卷积结果,池化结果,全连接结果,成功将考生答案特征向量降到五维。

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值