tensorflow实战google深度学习框架阅读笔记——可视化embedding以及编译rensorflow的jar包和so文件

最近在阅读《tensorflow实战google深度学习框架》,对里面讲到的内容,重点部分做下摘抄和笔记,以备后面查阅。部分内容为本人个人理解,如果错误,请指正,如果侵权,请联系删除,谢谢。转载请注明出处,谢谢。


将训练得到的embedding可视化

# Step 6:Visualize the embeddings.

# pylint:disable=missing-docstring

# Function todraw visualization of distance between embeddings.

defplot_with_labels(low_dim_embs, labels, filename):

  assert low_dim_embs.shape[0] >=len(labels), 'More labels than embeddings'

  plt.figure(figsize=(18, 18))  # in inches

  for i, label in enumerate(labels):

    x, y = low_dim_embs[i, :]

    plt.scatter(x, y)

    plt.annotate(label,

                 xy=(x, y),

                 xytext=(5, 2),

                 textcoords='offset points',

                 ha='right',

                 va='bottom')

 

  plt.savefig(filename)

 

try:

  # pylint: disable=g-import-not-at-top

  from sklearn.manifold import TSNE

  import matplotlib.pyplot as plt

#指定默认字体 

  plt.rcParams['font.sans-serif'] =['SimHei']  

  plt.rcParams['font.family']='sans-serif' 

#解决负号'-'显示为方块的问题 

  plt.rcParams['axes.unicode_minus'] = False

  tsne = TSNE(perplexity=30, n_components=2,init='pca', n_iter=5000, method='exact')

  plot_only = 500

  low_dim_embs =tsne.fit_transform(final_embeddings[:plot_only, :])

  labels = [reverse_dictionary[i] for i inxrange(plot_only)]

  plot_with_labels(low_dim_embs, labels,os.path.join(gettempdir(), 'tsne.png'))

 

exceptImportError as ex:

  print('Please install sklearn, matplotlib,and scipy to show embeddings.')

  print(ex)

Linux上编译tensorflow的jar包和so文件

以下编译命令在tensorflow根目录执行,文件路径是相对于tensorflow根目录而言

编译so:

 

bazel build -c opt//tensorflow/contrib/android:libtensorflow_inference.so \

  --crosstool_top=//external:android/crosstool \

  --host_crosstool_top=@bazel_tools//tools/cpp:toolchain \

  --cpu=armeabi-v7a

 

编译后文件位置:

 

bazel-bin/tensorflow/contrib/android/libtensorflow_inference.so

 

编译jar文件:

 

bazel build//tensorflow/contrib/android:android_tensorflow_inference_java

 

编译后文件位置:

 

bazel-bin/tensorflow/contrib/android/libandroid_tensorflow_inference_java.jar



作者:追逐丶
链接:https://www.jianshu.com/p/d3e2eced12b2
來源:简书

著作权归作者所有。商业转载请联系作者获得授权,非商业转载请注明出处。



  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
以下是使用PyTorch实现Transformer的代码: ```python import torch import torch.nn as nn import torch.nn.functional as F class MultiHeadAttention(nn.Module): def __init__(self, heads, d_model): super().__init__() self.heads = heads self.d_model = d_model self.d_head = d_model // heads self.q_linear = nn.Linear(d_model, d_model) self.v_linear = nn.Linear(d_model, d_model) self.k_linear = nn.Linear(d_model, d_model) self.out = nn.Linear(d_model, d_model) def forward(self, q, k, v, mask=None): bs = q.size(0) # Linear projections k = self.k_linear(k).view(bs, -1, self.heads, self.d_head) q = self.q_linear(q).view(bs, -1, self.heads, self.d_head) v = self.v_linear(v).view(bs, -1, self.heads, self.d_head) # Transpose and dot product attention k = k.transpose(1,2) q = q.transpose(1,2) v = v.transpose(1,2) scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.d_head) if mask is not None: mask = mask.unsqueeze(1) scores = scores.masked_fill(mask == 0, -1e9) scores = F.softmax(scores, dim=-1) # Output attention output = torch.matmul(scores, v) # Concatenate and linear projection output = output.transpose(1,2).contiguous().view(bs, -1, self.d_model) return self.out(output) class PositionwiseFeedforward(nn.Module): def __init__(self, d_model, d_ff=2048): super().__init__() self.linear1 = nn.Linear(d_model, d_ff) self.linear2 = nn.Linear(d_ff, d_model) def forward(self, x): x = self.linear1(x) x = F.relu(x) x = self.linear2(x) return x class EncoderLayer(nn.Module): def __init__(self, d_model, heads, dropout=0.1): super().__init__() self.norm_1 = nn.LayerNorm(d_model) self.norm_2 = nn.LayerNorm(d_model) self.attn = MultiHeadAttention(heads, d_model) self.ff = PositionwiseFeedforward(d_model) self.dropout_1 = nn.Dropout(dropout) self.dropout_2 = nn.Dropout(dropout) def forward(self, x, mask): x2 = self.norm_1(x) x = x + self.dropout_1(self.attn(x2, x2, x2, mask)) x2 = self.norm_2(x) x = x + self.dropout_2(self.ff(x2)) return x class TransformerEncoder(nn.Module): def __init__(self, input_dim, d_model, heads, num_layers): super().__init__() self.input_dim = input_dim self.d_model = d_model self.heads = heads self.num_layers = num_layers self.embedding = nn.Embedding(input_dim, d_model) self.pe = PositionalEncoder(d_model) self.layers = nn.ModuleList([EncoderLayer(d_model, heads) for _ in range(num_layers)]) def forward(self, src_seq, src_mask): x = self.embedding(src_seq) x = self.pe(x) for i in range(self.num_layers): x = self.layers[i](x, src_mask) return x class PositionalEncoder(nn.Module): def __init__(self, d_model, dropout=0.1, max_len=5000): super().__init__() self.dropout = nn.Dropout(p=dropout) pe = torch.zeros(max_len, d_model) position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1) div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model)) pe[:, 0::2] = torch.sin(position * div_term) pe[:, 1::2] = torch.cos(position * div_term) pe = pe.unsqueeze(0).transpose(0, 1) self.register_buffer('pe', pe) def forward(self, x): x = x + self.pe[:x.size(0), :] return self.dropout(x) class Transformer(nn.Module): def __init__(self, input_dim, output_dim, d_model, heads, num_layers, dropout=0.1): super().__init__() self.encoder = TransformerEncoder(input_dim, d_model, heads, num_layers) self.fc = nn.Linear(d_model, output_dim) self.dropout = nn.Dropout(dropout) def forward(self, src_seq, src_mask): x = self.encoder(src_seq, src_mask) x = x.mean(dim=1) x = self.fc(x) return x ``` 以上是使用PyTorch实现Transformer的代码,其中括了Transformer的编码器、多头自注意力机制、位置编码等模块。需要注意的是,该代码中使用了Layer Normalization进行层归一

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值