Python:程序员向程序媛表白的方法

如题,在进行表白的时候,我们要先伪装一下,不然都是同行,一下就看出来了。

上代码:

#!D:/workplace/python
# -*- coding: utf-8 -*-
# @File  : heart.py
# @Author: WangYe
# @Date  : 2019/4/23
# @Software: PyCharm
#!D:/workplace/python
# -*- coding: utf-8 -*-
# @File  : lstm_test.py
# @Author: WangYe
# @Date  : 2018/8/1
# @Software: PyCharm
import numpy
from keras.models import Sequential
from keras.layers import Dense, LSTM, Activation
from keras.activations import relu, tanh
from keras.utils import np_utils
# fix random seed for reproducibility
numpy.random.seed(7)
# define the raw dataset
alphabet = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
# create mapping of characters to integers (0-25) and the reverse
char_to_int = dict((c, i) for i, c in enumerate(alphabet))
int_to_char = dict((i, c) for i, c in enumerate(alphabet))
# prepare the dataset of input to output pairs encoded as integers
seq_length = 3
dataX = []
dataY = []
for i in range(0, len(alphabet) - seq_length, 1):
    seq_in = alphabet[i:i + seq_length]
    seq_out = alphabet[i + seq_length]
    dataX.append([char_to_int[char] for char in seq_in])
    dataY.append(char_to_int[seq_out])
    print(seq_in, '->', seq_out)
# reshape X to be [samples, time steps, features]
#print(alphabet)
X = numpy.reshape(dataX, (len(dataX), 1, seq_length))
#将X转化为24个元素(abc等换为123数字),每个元素的长度为1
#在这一个元素中,有存着3个元素,所以为[[[22 23 24]]]的矩阵
#print(dataY)
# normalize
X = X / float(len(alphabet))#X转换为0到1
print(X)
# one hot encode the output variable
y = np_utils.to_categorical(dataY)#转换成一个标签的矩阵
print(y)
# 链接:https://blog.csdn.net/zlrai5895/article/details/79560353
#print(X)
# print(X.shape[0])#23
# print(X.shape[1])#1
# print(X.shape[2])#3
# print(y.shape[0])#23
# print(y.shape[1])#26

# create and fit the model
model = Sequential()
model.add(LSTM(units=32, input_shape=(X.shape[1], X.shape[2])))  # units
model.add(Activation(relu))
model.add(Dense(y.shape[1], activation='softmax'))  #全链接层
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(X, y, nb_epoch=500, batch_size=1, verbose=2)
# summarize performance of the model
scores = model.evaluate(X, y, verbose=0)
print("Model Accuracy: %.2f%%" % (scores[1]*100))
# demonstrate some model predictions
for pattern in dataX:
    x = numpy.reshape(pattern, (1, 1, len(pattern)))
    x = x / float(len(alphabet))
    prediction = model.predict(x, verbose=0)
    index = numpy.argmax(prediction)
    result = int_to_char[index]
    seq_in = [int_to_char[value] for value in pattern]
    # print(seq_in, "->", result)

#####   关键代码
import time
sentence = "WY love LQP"
for char in sentence.split():
   allChar = []
   for y in range(12, -12, -1):
       lst = []
       lst_con = ''
       for x in range(-30, 30):
            formula = ((x*0.05)**2+(y*0.1)**2-1)**3-(x*0.05)**2*(y*0.1)**3
            if formula <= 0:
                lst_con += char[(x) % len(char)]
            else:
                lst_con += ' '
       lst.append(lst_con)
       allChar += lst
   print('\n'.join(allChar))
   time.sleep(1)

代码中前75行是一个keras的LSTM代码(这段代码是打掩护的),数据集是一个字符串“ABCDEFG”,所以只要有keras包就能跑的。用深度学习的代码主要是因为迭代啊,然后说让程序媛帮忙训练神经网络一下,然后问她准确率。等她估计差不多迭代完了0的时候,她就看到了一下截图:

 

75行之后的  sentence字符串是控制心形图形的填充,大家可以自行更改~~

评论 6
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值