怎么用逻辑电路玩pong_jQuery 3d Pong让我们玩得开心!

怎么用逻辑电路玩pong

jquery 3d pong

是的,那是对的人! 惊人。 这个乒乓球游戏是使用jQuery而非Flash或Java创建的! 谁说编码很无聊?

播放规则: “只需将光标移到下面的字段上,即可查看乒乓球拍的运动。 单击以击中球,然后看它会飞向对手。 不要忘记反击,否则您的对手将获得一分。”

玩jQuery 3d jPong尽情享受!

jpong with no blocks game by jquery

资料来源: http : //demo.marcofolio.net/jpong/

翻译自: https://www.sitepoint.com/jpong-3d-pong-created-jquery/

怎么用逻辑电路玩pong

好的,这是一个使用 PyTorch 实现 DQN 算法来 Pong 游戏的完整代码,你可以参考它: ```python import gym import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F import random import numpy as np from collections import deque # 设置超参数 BATCH_SIZE = 32 GAMMA = 0.99 EPS_START = 1 EPS_END = 0.02 EPS_DECAY = 1000000 TARGET_UPDATE = 1000 MEMORY_CAPACITY = 100000 LR = 1e-4 ENV_NAME = "Pong-v0" # 设置环境 env = gym.make(ENV_NAME) n_actions = env.action_space.n # 设置设备 device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # 定义神经网络 class DQN(nn.Module): def __init__(self): super(DQN, self).__init__() self.conv1 = nn.Conv2d(4, 32, kernel_size=8, stride=4) self.conv2 = nn.Conv2d(32, 64, kernel_size=4, stride=2) self.conv3 = nn.Conv2d(64, 64, kernel_size=3, stride=1) self.fc1 = nn.Linear(7 * 7 * 64, 512) self.fc2 = nn.Linear(512, n_actions) def forward(self, x): x = F.relu(self.conv1(x)) x = F.relu(self.conv2(x)) x = F.relu(self.conv3(x)) x = x.view(x.size(0), -1) x = F.relu(self.fc1(x)) x = self.fc2(x) return x # 定义经验回放类 class ReplayMemory(object): def __init__(self, capacity): self.capacity = capacity self.memory = deque(maxlen=capacity) def push(self, state, action, reward, next_state, done): self.memory.append((state, action, reward, next_state, done)) def sample(self, batch_size): batch = random.sample(self.memory, batch_size) state, action, reward, next_state, done = zip(*batch) return state, action, reward, next_state, done def __len__(self): return len(self.memory) # 定义 DQN 算法类 class DQNAgent(object): def __init__(self): self.policy_net = DQN().to(device) self.target_net = DQN().to(device) self.target_net.load_state_dict(self.policy_net.state_dict()) self.target_net.eval() self.optimizer = optim.Adam(self.policy_net.parameters(), lr=LR) self.memory = ReplayMemory(MEMORY_CAPACITY) self.steps_done = 0 self.episode_durations = [] self.episode_rewards = [] def select_action(self, state): sample = random.random() eps_threshold = EPS_END + (EPS_START - EPS_END) * \ np.exp(-1. * self.steps_done / EPS_DECAY) self.steps_done += 1 if sample > eps_threshold: with torch.no_grad(): state = torch.FloatTensor(state).unsqueeze(0).to(device) q_value = self.policy_net(state) action = q_value.max(1)[1].view(1, 1) else: action = torch.tensor([[random.randrange(n_actions)]], device=device, dtype=torch.long) return action def optimize_model(self): if len(self.memory) < BATCH_SIZE: return state, action, reward, next_state, done = self.memory.sample(BATCH_SIZE) state_batch = torch.FloatTensor(state).to(device) action_batch = torch.LongTensor(action).unsqueeze(1).to(device) reward_batch = torch.FloatTensor(reward).to(device) next_state_batch = torch.FloatTensor(next_state).to(device) done_batch = torch.FloatTensor(done).to(device) q_values = self.policy_net(state_batch).gather(1, action_batch) next_q_values = torch.zeros(BATCH_SIZE, device=device) next_q_values[~done_batch] = self.target_net(next_state_batch[~done_batch]).max(1)[0].detach() expected_q_values = (next_q_values * GAMMA) + reward_batch loss = F.smooth_l1_loss(q_values, expected_q_values.unsqueeze(1)) self.optimizer.zero_grad() loss.backward() self.optimizer.step() def train(self, num_episodes): for i_episode in range(num_episodes): state = env.reset() state = np.stack((state, state, state, state), axis=0) episode_reward = 0 for t in range(10000): action = agent.select_action(state) next_state, reward, done, _ = env.step(action.item()) episode_reward += reward next_state = np.append(np.expand_dims(next_state, 0), state[:3, :, :], axis=0) agent.memory.push(state, action.item(), reward, next_state, done) state = next_state agent.optimize_model() if done: agent.episode_durations.append(t + 1) agent.episode_rewards.append(episode_reward) if i_episode % 10 == 0: print("Episode: {}, Reward: {}".format(i_episode, episode_reward)) break if i_episode % TARGET_UPDATE == 0: agent.target_net.load_state_dict(agent.policy_net.state_dict()) env.close() if __name__ == "__main__": agent = DQNAgent() agent.train(1000) ``` 注意:这段代码需要安装 gym 和 PyTorch 库。在运行代码之前,请确保你已经按照这些库。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值