用自己的思路写的python飞机大战

半年前开始自学python编程,就学着玩玩,买了本入门的书,网上看了几个视频教程,飞机大战主要是看某马的教程,就是600集的那个。不过跟着教程写了个飞机大战游戏的皮就断了,没有后续了,很多我想学的都还没讲,估计是为了吸引你买他们的课,不过基本的语法和编程思路我学到了。

游戏代码是按我自己的思路写的,跟教程里和网上的都有不一样的地方,动画效果也是自己琢磨写出来的,这样边写边学还是挺有成就感的,边写边调试,遇到bug自己找自己解决,有时候有些没有达到自己预期的也要自己琢磨修改代码完善,挺好玩的,不过自己写的代码肯定还有很多可以优化的地方。
先分享下成果吧,里面除了第1版,后续版本都有源代码,游戏素材在网上找的。
网盘链接
第1版和教程比,添加了玩家飞机的尾气动态动画,增加了我方飞机和敌机的死亡爆炸动画,设置大点的敌机要打5下才能消灭,玩家拥有3条命并展示在右下角,每死一次减少一个,然后从玩家的出生点复活,剩最后一个时再撞到敌机就结束了。这些都是教程里没教的。

第2版增加了背景音乐和音效,第3版增加了计分系统和大型敌机,重构了主程序的代码结构,把主程序也class成一个对象。添加的大型敌机拥有12滴血,飞行速度为中型敌机的一半,中型敌机由5滴血改为6滴血,在第一次运行游戏并用完三条命更新最高纪录时会在游戏程序同目录下创建一个highscore.txt文件,用来保存最高纪录,之后每次运行游戏时都会从中读取历史最高纪录并在游戏中显示出来,如果游戏中当前得分超过了历史最高纪录,则历史最高分显示会同步更新成当前得分,但还是要三条命用完才会保存到文件里,消灭一个小型敌机得100分,中型敌机500分,大型敌机1000分,玩家飞机被撞也会加分。

现在还没有难度变化,慢慢做吧,我想自己想思路逻辑,这样才能把这些知识变成自己的。附个游戏截图。
游戏截图
有兴趣的新手朋友可以一起讨论,我知道这里大多都是高手,高手可以不吝指教,谢谢!

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 5
    评论
import matplotlib.pylab as plt import numpy as np import random from scipy.linalg import norm import PIL.Image class Rbm: def __init__(self,n_visul, n_hidden, max_epoch = 50, batch_size = 110, penalty = 2e-4, anneal = False, w = None, v_bias = None, h_bias = None): self.n_visible = n_visul self.n_hidden = n_hidden self.max_epoch = max_epoch self.batch_size = batch_size self.penalty = penalty self.anneal = anneal if w is None: self.w = np.random.random((self.n_visible, self.n_hidden)) * 0.1 if v_bias is None: self.v_bias = np.zeros((1, self.n_visible)) if h_bias is None: self.h_bias = np.zeros((1, self.n_hidden)) def sigmod(self, z): return 1.0 / (1.0 + np.exp( -z )) def forward(self, vis): #if(len(vis.shape) == 1): #vis = np.array([vis]) #vis = vis.transpose() #if(vis.shape[1] != self.w.shape[0]): vis = vis.transpose() pre_sigmod_input = np.dot(vis, self.w) + self.h_bias return self.sigmod(pre_sigmod_input) def backward(self, vis): #if(len(vis.shape) == 1): #vis = np.array([vis]) #vis = vis.transpose() #if(vis.shape[0] != self.w.shape[1]): back_sigmod_input = np.dot(vis, self.w.transpose()) + self.v_bias return self.sigmod(back_sigmod_input) def batch(self): eta = 0.1 momentum = 0.5 d, N = self.x.shape num_batchs = int(round(N / self.batch_size)) + 1 groups = np.ravel(np.repeat([range(0, num_batchs)], self.batch_size, axis = 0)) groups = groups[0 : N] perm = range(0, N) random.shuffle(perm) groups = groups[perm] batch_data = [] for i in range(0, num_batchs): index = groups == i batch_data.append(self.x[:, index]) return batch_data def rbmBB(self, x): self.x = x eta = 0.1 momentum = 0.5 W = self.w b = self.h_bias c = self.v_bias Wavg = W bavg = b cavg = c Winc = np.zeros((self.n_visible, self.n_hidden)) binc = np.zeros(self.n_hidden) cinc = np.zeros(self.n_visible) avgstart = self.max_epoch - 5; batch_data = self.batch() num_batch = len(batch_data) oldpenalty= self.penalty t = 1 errors = [] for epoch in range(0, self.max_epoch): err_sum = 0.0 if(self.anneal): penalty = oldpenalty - 0.9 * epoch / self.max_epoch * oldpenalty for batch in range(0, num_batch): num_dims, num_cases = batch_data[batch].shape data = batch_data[batch] #forward ph = self.forward(data) ph_states = np.zeros((num_cases, self.n_hidden)) ph_states[ph > np.random.random((num_cases, self.n_hidden))] = 1 #backward nh_states = ph_states neg_data = self.backward(nh_states) neg_data_states = np.zeros((num_cases, num_dims)) neg_data_states[neg_data > np.random.random((num_cases, num_dims))] = 1 #forward one more time neg_data_states = neg_data_states.transpose() nh = self.forward(neg_data_states) nh_states = np.zeros((num_cases, self.n_hidden)) nh_states[nh > np.random.random((num_cases, self.n_hidden))] = 1 #update weight and biases dW = np.dot(data, ph) - np.dot(neg_data_states, nh) dc = np.sum(data, axis = 1) - np.sum(neg_data_states, axis = 1) db = np.sum(ph, axis = 0) - np.sum(nh, axis = 0) Winc = momentum * Winc + eta * (dW / num_cases - self.penalty * W) binc = momentum * binc + eta * (db / num_cases); cinc = momentum * cinc + eta * (dc / num_cases); W = W + Winc b = b + binc c = c + cinc self.w = W self.h_bais = b self.v_bias = c if(epoch > avgstart): Wavg -= (1.0 / t) * (Wavg - W) cavg -= (1.0 / t) * (cavg - c) bavg -= (1.0 / t) * (bavg - b) t += 1 else: Wavg = W bavg = b cavg = c #accumulate reconstruction error err = norm(data - neg_data.transpose()) err_sum += err print epoch, err_sum errors.append(err_sum) self.errors = errors self.hiden_value = self.forward(self.x) h_row, h_col = self.hiden_value.shape hiden_states = np.zeros((h_row, h_col)) hiden_states[self.hiden_value > np.random.random((h_row, h_col))] = 1 self.rebuild_value = self.backward(hiden_states) self.w = Wavg self.h_bais = b self.v_bias = c def visualize(self, X): D, N = X.shape s = int(np.sqrt(D)) if s == int(np.floor(s)): num = int(np.ceil(np.sqrt(N))) a = np.zeros((num*s + num + 1, num * s + num + 1)) - 1.0 x = 0 y = 0 for i in range(0, N): z = X[:,i] z = z.reshape(s,s,order='F') z = z.transpose() a[x*s+1+x - 1:x*s+s+x , y*s+1+y - 1:y*s+s+y ] = z x = x + 1 if(x >= num): x = 0 y = y + 1 d = True else: a = X return a def readData(path): data = [] for line in open(path, 'r'): ele = line.split(' ') tmp = [] for e in ele: if e != '': tmp.append(float(e.strip(' '))) data.append(tmp) return data if __name__ == '__main__': data = readData('data.txt') data = np.array(data) data = data.transpose() rbm = Rbm(784, 100,max_epoch = 50) rbm.rbmBB(data) a = rbm.visualize(data) fig = plt.figure(1) ax = fig.add_subplot(111) ax.imshow(a) plt.title('original data') rebuild_value = rbm.rebuild_value.transpose() b = rbm.visualize(rebuild_value) fig = plt.figure(2) ax = fig.add_subplot(111) ax.imshow(b) plt.title('rebuild data') hidden_value = rbm.hiden_value.transpose() c = rbm.visualize(hidden_value) fig = plt.figure(3) ax = fig.add_subplot(111) ax.imshow(c) plt.title('hidden data') w_value = rbm.w d = rbm.visualize(w_value) fig = plt.figure(4) ax = fig.add_subplot(111) ax.imshow(d) plt.title('weight value(w)') plt.show()
评论 5
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值