endswith()
方法用于判断字符串是否以指定后缀结尾,如果是则返回 True,否则返回 False
str.endswith(".txt") #能判断str字符串是否以.txt结尾
x.issubset(y)
方法用于判断集合 x 的所有元素是否都包含在集合 y ,如果是则返回 True,否则返回 False
x = {"a", "b", "c"}
y = {"f", "e", "d", "c", "b", "a"}
z = x.issubset(y)
print(z)#输出True
copy.deepcopy()
的用法是将某一个变量的值赋值给另一个变量(此时两个变量地址不同)
a = [1, 2, 3]
d = copy.deepcopy(a) # a和d的地址不相同
python copy.deepcopy()深入解读-CSDN博客
round()
方法返回浮点数x的四舍五入值
x=round(80.23456, 2) #80.23
eval()
用字符串来新建对象,新建列表、元组、字典什么的。
字符串转换为字典
a = "{1: 'a', 2: 'b'}"
print(type(a))
b = eval(a)
print(type(b))
print(b)
字符串新建对象
class Life():
animal = 'cat'
food = 'banana'
sport = 'run'
language = 'python'
def __init__(self):
pass
life1 = Life()
life2 = eval("Life()")
#两种方法都可以新建 Life 类的对象
实际上就是去掉字符串的引号"",直接把那一段字符串输入进代码的对应部分。比如下面两段代码是等价的
class Life():
animal = 'cat'
food = 'banana'
sport = 'run'
language = 'python'
def __init__(self):
pass
a=eval("Life()")
class Life():
animal = 'cat'
food = 'banana'
sport = 'run'
language = 'python'
def __init__(self):
pass
a=Life()
numel()函数
获取tensor中一共包含多少个元素
import torch
x = torch.randn(3,3)
print("number elements of x is ",x.numel())
y = torch.randn(3,10,5)
print("number elements of y is ",y.numel())
输出:
number elements of x is 9
number elements of y is 150
extend()函数
用于在列表末尾一次性追加另一个序列中的多个值(用新列表扩展原来的列表)
A = [1, 2, 3]
B = [['a', 'b']]
A.extend([4])
A.extend([5, 6])
B.extend(['c', 'd'])
B.extend([['e', 'f']])
print(A)
print(B)
结果:
[1, 2, 3, 4, 5, 6]
[['a', 'b'], 'c', 'd', ['e', 'f']]
torch.argmax()函数
函数功能:求最大值序号
x = torch.randn(3, 5)
print(x)
print(torch.argmax(x))
print(torch.argmax(x, dim=0))
print(torch.argmax(x, dim=-2))
print(torch.argmax(x, dim=1))
print(torch.argmax(x, dim=-1))
输出:
tensor([[-1.0214, 0.7577, -0.0481, -1.0252, 0.9443],
[ 0.5071, -1.6073, -0.6960, -0.6066, 1.6297],
[-0.2776, -1.3551, 0.0036, -0.9210, -0.6517]])
tensor(9)
tensor([1, 0, 2, 1, 1])
tensor([1, 0, 2, 1, 1])
tensor([4, 4, 2])
tensor([4, 4, 2])
torch.cat()函数
在指定维度上对tensor拼接,在深度学习的concat层中常用,一般是
z=[x,y]
z=torch.cat(z,1)#因为第0维是batch_size,第1维一般是channel,[batch_size, ch, w, h]
import torch
x=torch.arange(1,25).reshape([2,3,4])
y=torch.arange(101,125).reshape([2,3,4])
z=[x,y]
print(z)
z=torch.cat(z,1)
print(z)
print(z.shape)
输出
[tensor([[[ 1, 2, 3, 4],
[ 5, 6, 7, 8],
[ 9, 10, 11, 12]],
[[13, 14, 15, 16],
[17, 18, 19, 20],
[21, 22, 23, 24]]]), tensor([[[101, 102, 103, 104],
[105, 106, 107, 108],
[109, 110, 111, 112]],
[[113, 114, 115, 116],
[117, 118, 119, 120],
[121, 122, 123, 124]]])]
tensor([[[ 1, 2, 3, 4],
[ 5, 6, 7, 8],
[ 9, 10, 11, 12],
[101, 102, 103, 104],
[105, 106, 107, 108],
[109, 110, 111, 112]],
[[ 13, 14, 15, 16],
[ 17, 18, 19, 20],
[ 21, 22, 23, 24],
[113, 114, 115, 116],
[117, 118, 119, 120],
[121, 122, 123, 124]]])
torch.Size([2, 6, 4])
view()
相当于reshape()
x = torch.randn(4, 4)
print(x.size())
y = x.view(16)
print(y.size())
z = x.view(-1, 8) # -1表示该维度取决于其它维度大小,即(4*4)/ 8
print(z.size())
m = x.view(2, 2, 4) # 也可以变为更多维度
print(m.size())
结果:
torch.Size([4, 4])
torch.Size([16])
torch.Size([2, 8])
torch.Size([2, 2, 4])