1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99
|
import math
import torch from torch.functional import F
print('sigmoid'.center(60, '-'))
def sigmoid(x): return 1 / (1 + math.exp(-x))
print(torch.sigmoid(torch.tensor([1., 2.]))) print([sigmoid(1.), sigmoid(2.)])
print('softmax'.center(60, '-'))
def softmax(xs): z_exp = [math.exp(x) for x in xs] sum_z_exp = sum(z_exp) return [_z_exp / sum_z_exp for _z_exp in z_exp]
print(torch.softmax(torch.tensor([1., 2., 7.]), dim=-1)) print(softmax([1., 2., 7.]))
print('tanh'.center(60, '-'))
def tanh(x): return (math.exp(x) - math.exp(-x)) / (math.exp(x) + math.exp(-x))
print(torch.tanh(torch.tensor([1., 2.]))) print([tanh(1.), tanh(2.)])
print('cross_entropy'.center(60, '-')) import torch
def my_cross_entropy(input, target, reduction="mean"):
exp = torch.exp(input) tmp1 = exp.gather(1, target.unsqueeze(-1)).squeeze() tmp2 = exp.sum(1) softmax = tmp1 / tmp2 log = -torch.log(softmax) if reduction == "mean": return log.mean() else: return log.sum()
input = torch.tensor([[0.1, 0.9], [0.9, 0.1]]) target = torch.tensor([1, 0])
loss1_mean = F.cross_entropy(input, target) loss2_mean = my_cross_entropy(input, target) print(loss1_mean) print(loss2_mean)
loss1_sum = F.cross_entropy(input, target, reduction="sum") loss2_sum = my_cross_entropy(input, target, reduction="sum") print(loss1_sum) print(loss2_sum)
def bce_loss_with_logit(y_pred, y_true, reduction='mean'): y_pred = sigmoid(y_pred)
loss = -y_true * torch.log(y_pred) - (1 - y_true) * torch.log(1 - y_pred) if reduction == 'mean': return torch.mean(loss) raise NotImplementedError
|