Python 工业应用开发:Code Soft 软件的2次开发(1)

简介:

之前用Python做简易版MES系统的全栈开发的时候,有涉及到对Code Soft工业打印软件的调用功能。当时研究可行性的时候发现网上只有Java版本和C#版本的Demo样例,没找到Python的相关的示例。于是就自己解析了一下其ActiveX组件,并参照C#等示例中的用法构建了如下的python样例,供大家参考。

Mini示例

"""
    CodeSoft软件调用的最简Demo

by mark 2020
"""
import win32com.client as winc  # 主要功能库 用来调用codesoft软件的核心部分

if __name__ == '__main__':
    my_printer = winc.DispatchEx("Lppx2.Application")   # 调用CodeSoft软件的核心组件
    # 无传参式打印
    my_lab_doc = my_printer.ActiveX.Documents.Open(
                        # 规范路径 转路径为windows路径
                        os.path.normpath('codesoft软件的lab文件的路径'),False)  # 实例打开.lab文件
    my_lab_doc.PrintLabelEx(2)     # 传输该.lab文件的模板内容和打印指令给打印机 Function参数为打印张数
    my_lab_doc.FormFeed     # 控制打印机执行上述动作

"""
    # 传参式打印  用不用参数是由codesoft软件创建的.lab模板本身决定的
    my_lab_doc_arg = my_printer.ActiveX.Documents.Open(
                        # 规范路径 转路径为windows路径
                        os.path.normpath('label document dispath'),False)

    my_lab_doc_arg.Variables.FreeVariables(i).Value = 'arg value'  # 指定第i个参数接受参数
    ...
    my_lab_doc_arg.PrintLabelEx(number)
    my_lab_doc_arg.FormFeed
"""

  • 1
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 3
    评论
以下是一个使用TensorFlow 1.x实现Soft Actor-Critic(SAC)算法的代码示例: ```python import tensorflow as tf import numpy as np import gym # Hyperparameters gamma = 0.99 tau = 0.005 hidden_sizes = [256, 256] batch_size = 100 buffer_size = 10000 learning_rate = 0.0003 alpha = 0.2 target_entropy = -1.0 # Create the replay buffer class ReplayBuffer: def __init__(self, obs_dim, act_dim, size): self.obs_buf = np.zeros((size, obs_dim), dtype=np.float32) self.act_buf = np.zeros((size, act_dim), dtype=np.float32) self.rew_buf = np.zeros(size, dtype=np.float32) self.next_obs_buf = np.zeros((size, obs_dim), dtype=np.float32) self.done_buf = np.zeros(size, dtype=np.float32) self.ptr, self.size, self.max_size = 0, 0, size def store(self, obs, act, rew, next_obs, done): self.obs_buf[self.ptr] = obs self.act_buf[self.ptr] = act self.rew_buf[self.ptr] = rew self.next_obs_buf[self.ptr] = next_obs self.done_buf[self.ptr] = done self.ptr = (self.ptr+1) % self.max_size self.size = min(self.size+1, self.max_size) def sample_batch(self, batch_size=batch_size): idxs = np.random.randint(0, self.size, size=batch_size) return dict(obs=self.obs_buf[idxs], act=self.act_buf[idxs], rew=self.rew_buf[idxs], next_obs=self.next_obs_buf[idxs], done=self.done_buf[idxs]) # Create the actor and critic networks class MLP(tf.keras.Model): def __init__(self, sizes, activation=tf.nn.relu, output_activation=None): super(MLP, self).__init__() self.layers_ = [] for i, size in enumerate(sizes[:-1]): layer = tf.keras.layers.Dense(units=size, activation=activation) self.layers_.append(layer) self.layers_.append(tf.keras.layers.Dense(units=sizes[-1], activation=output_activation)) def call(self, inputs): x = inputs for layer in self.layers_: x = layer(x) return x class ActorCritic(tf.keras.Model): def __init__(self, obs_dim, act_dim, hidden_sizes, activation=tf.nn.relu, output_activation=None): super(ActorCritic, self).__init__() self.q1 = MLP(hidden_sizes + [1], activation, output_activation) self.q2 = MLP(hidden_sizes + [1], activation, output_activation) self.v = MLP(hidden_sizes + [1], activation, output_activation) self.pi = MLP(hidden_sizes + [act_dim], activation, tf.nn.tanh) def call(self, obs, act=None): q1 = self.q1(tf.concat([obs, act], axis=-1)) q2 = self.q2(tf.concat([obs, act], axis=-1)) v = self.v(obs) pi = self.pi(obs) return q1, q2, v, pi def act(self, obs): pi = self.pi(obs) return pi.numpy() # Create the SAC agent class SAC: def __init__(self, obs_dim, act_dim, hidden_sizes, buffer_size, batch_size, learning_rate, alpha, gamma, tau, target_entropy): self.q_optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate) self.v_optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate) self.pi_optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate) self.replay_buffer = ReplayBuffer(obs_dim=obs_dim, act_dim=act_dim, size=buffer_size) self.batch_size = batch_size self.alpha = alpha self.gamma = gamma self.tau = tau self.target_entropy = target_entropy self.obs_dim = obs_dim self.act_dim = act_dim self.hidden_sizes = hidden_sizes self.actor_critic = ActorCritic(obs_dim, act_dim, hidden_sizes) def update(self, data): obs = data['obs'] act = data['act'] rew = data['rew'] next_obs = data['next_obs'] done = data['done'] with tf.GradientTape(persistent=True) as tape: q1, q2, v, pi = self.actor_critic(obs, act) _, _, _, next_pi = self.actor_critic(next_obs) v_target = self.target_v(next_obs, next_pi) q_target = rew + self.gamma * (1 - done) * v_target q1_loss = tf.reduce_mean(tf.square(q1 - q_target)) q2_loss = tf.reduce_mean(tf.square(q2 - q_target)) v_loss = tf.reduce_mean(tf.square(v - v_target)) pi_loss = tf.reduce_mean(self.alpha * pi.log_prob(act) - q1) alpha_loss = tf.reduce_mean(-self.alpha * (self.target_entropy - pi.entropy())) q1_grads = tape.gradient(q1_loss, self.actor_critic.q1.trainable_variables) self.q_optimizer.apply_gradients(zip(q1_grads, self.actor_critic.q1.trainable_variables)) q2_grads = tape.gradient(q2_loss, self.actor_critic.q2.trainable_variables) self.q_optimizer.apply_gradients(zip(q2_grads, self.actor_critic.q2.trainable_variables)) v_grads = tape.gradient(v_loss, self.actor_critic.v.trainable_variables) self.v_optimizer.apply_gradients(zip(v_grads, self.actor_critic.v.trainable_variables)) pi_grads = tape.gradient(pi_loss, self.actor_critic.pi.trainable_variables) self.pi_optimizer.apply_gradients(zip(pi_grads, self.actor_critic.pi.trainable_variables)) alpha_grads = tape.gradient(alpha_loss, [self.alpha]) self.alpha = tf.clip_by_value(self.alpha - learning_rate * alpha_grads[0], 0, 1) del tape def target_v(self, obs, pi): q1, q2, _, _ = self.actor_critic(obs, pi) q = tf.minimum(q1, q2) v_target = tf.reduce_mean(q - self.alpha * pi.entropy()) return v_target def train(self): data = self.replay_buffer.sample_batch(self.batch_size) self.update(data) def store(self, obs, act, rew, next_obs, done): self.replay_buffer.store(obs, act, rew, next_obs, done) def act(self, obs): return self.actor_critic.act(obs) def save(self, save_path): self.actor_critic.save_weights(save_path) def load(self, load_path): self.actor_critic.load_weights(load_path) # Train the SAC agent on the gym environment env = gym.make('Pendulum-v0') obs_dim = env.observation_space.shape[0] act_dim = env.action_space.shape[0] sac = SAC(obs_dim, act_dim, hidden_sizes, buffer_size, batch_size, learning_rate, alpha, gamma, tau, target_entropy) for i in range(1000): obs = env.reset() total_reward = 0 done = False while not done: act = sac.act(obs.reshape(1, -1)) next_obs, rew, done, _ = env.step(act[0]) sac.store(obs, act, rew, next_obs, done) sac.train() obs = next_obs total_reward += rew print('Epoch: {}, Total Reward: {:.2f}'.format(i, total_reward)) sac.save('sac_model') ``` 请注意,这只是一个基本的实现示例,其中有许多可以进行改进和优化的方面。
评论 3
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值