Learning_log—vector

Subscripting Does Not Add Elements

 

Programming new to C++ sometimes think that subscripting a vectors adds elements;it does not:

 

vector<int> ivec; //empty vector

           for (vector<int>::size_type ix = 0; ix != 10; ++ix)

                    ivec[ix] = ix; // disaster: ivec has no elements

This code intended to insert new 10 elements into ivec, giving the elements the values from 0 through 9.

 

However, ivec is an empty vector and subscripts can only be used to fectch existing elements.

 

The right way to write this loop would be :

 

           for (vector<int>::size_stype ix = 0; ix != 10; ++ix)

                    ivec.push_back(ix); //ok:adds new elements with value ix

NOTE: An element must exist in order to subscript it; elements are not added when we assign through a subscript.

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
TRPO(Trust Region Policy Optimization)是一种用于强化学习的优化算法,用于更新策略参数。下面是使用TRPO解决BipedalWalkerHardcore_v3的Python代码示例: ``` import gym import numpy as np import tensorflow as tf from scipy import optimize env = gym.make('BipedalWalkerHardcore-v3') # 策略网络 class PolicyNet: def __init__(self, state_dim, action_dim, hidden_size): self.state = tf.placeholder(tf.float32, [None, state_dim]) l1 = tf.layers.dense(self.state, hidden_size, tf.nn.relu) l2 = tf.layers.dense(l1, hidden_size, tf.nn.relu) self.action_mean = tf.layers.dense(l2, action_dim, tf.nn.tanh) self.action_std = tf.Variable(1.0, trainable=True) self.action = tf.placeholder(tf.float32, [None, action_dim]) self.advantage = tf.placeholder(tf.float32, [None]) normal_dist = tf.distributions.Normal(self.action_mean, self.action_std) log_prob = normal_dist.log_prob(self.action) loss = -tf.reduce_mean(log_prob * self.advantage) kl = tf.distributions.kl_divergence(normal_dist, normal_dist) self.kl_mean = tf.reduce_mean(kl) self.train_op = self._create_train_op(loss) def _create_train_op(self, loss): optimizer = tf.train.AdamOptimizer() grads_and_vars = optimizer.compute_gradients(loss) flat_grads = tf.concat([tf.reshape(g, [-1]) for g, _ in grads_and_vars], axis=0) var_shapes = [tf.reshape(v, [-1]).shape for _, v in grads_and_vars] var_sizes = [np.prod(s) for s in var_shapes] cum_sizes = np.cumsum([0] + var_sizes) flat_params = tf.concat([tf.reshape(v, [-1]) for _, v in grads_and_vars], axis=0) kl_grads = tf.gradients(self.kl_mean, grads_and_vars) kl_grads = [tf.reshape(g, [-1]) / tf.cast(tf.reduce_prod(s), tf.float32) for g, (s, _) in zip(kl_grads, var_shapes)] kl_grad = tf.concat(kl_grads, axis=0) grad_kl_grad = tf.reduce_sum(flat_grads * kl_grad) hessian_vector_product = tf.gradients(grad_kl_grad, flat_params) hessian_vector_product = tf.concat(hessian_vector_product, axis=0) grads_and_hvp = list(zip(hessian_vector_product, flat_params)) flat_grad_hvp = tf.concat([tf.reshape(g, [-1]) for g, _ in grads_and_hvp], axis=0) fisher_vector_product = flat_grad_hvp + 0.1 * flat_params gradient = tf.stop_gradient(fisher_vector_product) learning_rate = tf.sqrt(0.01 / tf.norm(gradient)) clipped_gradient = tf.clip_by_norm(gradient, 0.5) train_op = tf.assign_sub(flat_params, learning_rate * clipped_gradient) train_op = tf.group(*[tf.assign(v, p) for (v, _), p in zip(grads_and_vars, tf.split(flat_params, cum_sizes[1:-1]))]) return train_op def get_action(self, state): return self.action_mean.eval(feed_dict={self.state: state.reshape(1, -1)})[0] def get_kl(self, state, action): return self.kl_mean.eval(feed_dict={self.state: state, self.action: action}) def train(self, state, action, advantage): feed_dict = {self.state: state, self.action: action, self.advantage: advantage} self.train_op.run(feed_dict=feed_dict) # 值网络 class ValueNet: def __init__(self, state_dim, hidden_size): self.state = tf.placeholder(tf.float32, [None, state_dim]) l1 = tf.layers.dense(self.state, hidden_size, tf.nn.relu) l2 = tf.layers.dense(l1, hidden_size, tf.nn.relu) self.value = tf.layers.dense(l2, 1) self.target_value = tf.placeholder(tf.float32, [None]) loss = tf.reduce_mean(tf.square(self.value - self.target_value)) self.train_op = tf.train.AdamOptimizer().minimize(loss) def get_value(self, state): return self.value.eval(feed_dict={self.state: state.reshape(1, -1)})[0, 0] def train(self, state, target_value): feed_dict = {self.state: state, self.target_value: target_value} self.train_op.run(feed_dict=feed_dict) # 训练 def train(): state_dim = env.observation_space.shape[0] action_dim = env.action_space.shape[0] hidden_size = 64 policy_net = PolicyNet(state_dim, action_dim, hidden_size) value_net = ValueNet(state_dim, hidden_size) gamma = 0.99 lam = 0.95 batch_size = 2048 max_step = 1000000 render = False state = env.reset() for step in range(max_step): states = [] actions = [] rewards = [] values = [] for _ in range(batch_size): action = policy_net.get_action(state) next_state, reward, done, _ = env.step(action) states.append(state) actions.append(action) rewards.append(reward) if done: values.append(0) state = env.reset() else: values.append(value_net.get_value(next_state)) state = next_state if render: env.render() values = np.array(values) returns = np.zeros_like(rewards) advantages = np.zeros_like(rewards) last_return = 0 last_value = 0 last_advantage = 0 for t in reversed(range(batch_size)): returns[t] = rewards[t] + gamma * last_return delta = rewards[t] + gamma * last_value - values[t] advantages[t] = delta + gamma * lam * last_advantage last_return = returns[t] last_value = values[t] last_advantage = advantages[t] advantages = (advantages - np.mean(advantages)) / np.std(advantages) policy_net.train(np.array(states), np.array(actions), advantages) value_net.train(np.array(states), returns) if step % 100 == 0: print('step=%d, reward=%f' % (step, np.mean(rewards))) if np.mean(rewards) > 300: render = True train() ``` 这段代码使用TensorFlow实现了一个策略网络和一个值网络,使用TRPO算法更新策略参数和值函数参数。在训练过程中,首先采集一定数量的数据,然后计算每个状态的回报和优势,并使用这些数据来更新策略网络和值网络。在每一步训练之后,打印出当前的平均回报。当平均回报超过300时,开始渲染环境。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值