path = “D:/”
os
#split the name of file and the extension name,ep"file.txt",file is file name,.txt is extension name
1.os.path.splitext(“123.jpg”)
[out]: ["123",".jpg"]
# accumulate the size of file
2.os.path.getsize(path)
if os.path.isfile(path):
print("accumulate file size")
Path
1.Path(path) / “case”
[out]: D:/case
str(Path(path)) + os.sep + “case”
[out]: D:/case
tail = Path(“D:/cat.jpg”).suffix
[out]: .jpg
head = Path(“D:/cat.jpg”).stem
[out]: cat
str
1.str1.join(str2)
str1 = "-"
str2 = "123"
[out]: 1-2-3
# x is the split character,number is the split time
2.str1.rsplit(x,number)
str1 = "123"
x = '1'
number = 1
[out]: ["","23"]
3.str1.join(str2.rsplit(x,number))
str1 = os.sep+"labels"+os.sep
str2 = "123"
[out]: "/labels/23"
yaml
dict_file is dictionary ,sort_keys default True,sort by the rank of the alphabet
1.yaml.dump(dict_variable,yaml_file,sort_keys)
save_dump与dump作用相同
with open("test.yaml",'w') as f:
yaml.dump(dict_variable,f,sort_keys=False)
2.yaml.load(yaml_file,Loader)
with open("test.yaml",'r') as f:
files = yaml.load(f,Loader=yaml.FullLoader)
vars(opt)-> convert opt to dict
3.vars(opt)
parse = argparse.ArgumentParser()
parse.add_argument('adam', action='store_true', help='123')
parse.add_argument('-n_a', nargs='?', type=int, default=1)
parse.add_argument('--w-b', nargs='+', type=str, default='oo')
opt = parse.parse_args()
opt.epoch = 30
opt.global_rank = 1
opt.b = opt.adam
[out]: {'adam': True, 'n_a': 1, 'w_b': 'oo', 'global_rank': 1, 'b': True, 'epoch': 30}
glob
test_a = glob.glob(“D:”+os.sep+".")
test_b = glob.iglob(“D:”+os.sep+".")
glob.glob return list
glob.iglob return generator object _iglob
for val in test_b:
pass
for idx,val in enumerate(test_b):
pass
@contextmanager
context mananger,package code
@contextmanager
def torch_distributed_zero_first(local_rank: int):
# if main process not lock
# module "torch.distributed parallel" support,only linux,not support windows
if local_rank not in [-1, 0]:
torch.distributed().barrier()
yield
# if child process is lock
if local_rank == 0:
torch.distributed().barrier()
with torch_distributed_zero_first(rank):
attempt_download(weights)
execute 1.if local_rank not in [-1,0]
2.attempt_download(weights)
3.if local_rank == 0
zip
zip and unzip
zip()作用在于将可迭代的对象作为参数,将对象对应的元素打包成一个个元组,返回由元组组成的对象,节省内存
zip(x,y)
# zip(x,y) -> return wrapped tuple
x = [1, 2, 3, 4, 5]
y = [6, 7, 8, 9, 10]
zip_list = list(zip(x,y))
[out]: [(1, 6), (2, 7), (3, 8), (4, 9), (5, 10)]
zip(*(x,y))
# zip(*(x,y)) -> return two-dimensional matrix
x = [1, 2, 3, 4, 5]
y = [6, 7, 8, 9, 10]
unzip_2_array = list(zip(*zip_list))
[out]: [(1, 2, 3, 4, 5), (6, 7, 8, 9, 10)]
*arg
def add(*num):
print(num)
print(type(num))
a = num[0]
print(a)
if __name__ == "__main__":
num = [1, 2, 3, 5]
# 将num当作一个参数传入([1,2,3,5],)
add(num)
# 将num中的元素依次传入,也就是4个参数(1,2,3,5)
add(*num)
EasyDict
EasyDict可以让字典中的key值像属性一样调用
from easydict import EasyDict as edict
import argparse
parse = argparse.ArgumentParser()
parse.add_argument("--one", default=1, type=int)
parse.add_argument("--two", default=2, type=int)
parse.add_argument("--three", default=3, type=int)
opt = parse.parse_args()
args = vars(opt)
out = edict(args)
out.five = 5
print(out)
print(out.three)
[out]:{'one': 1, 'two': 2, 'three': 3, 'five': 5}
3
冒号类型注解
a : int = 10
a变量后的int是类型注解,这里仅仅只是注释,起到提示作用,python解释器不会去校验a的类型是否真是注解标注的类型,所以变量a后的类型注解是可以随意定义的,例如a:str = 10
也是正确的。
a : int = 10
[out]:10
hasattr
判断对象是否包含指定属性
class Test:
a = 10
b = 11
t = Test()
print(hasattr(t,"a"))
print(hasattr(t,"c"))
[out]:
True
False
numpy
np.transpose(x,axes)
# 交换维度
n_array = np.arange(30).reshpe(2,3,5) # shape (2,3,5)
n_array.transpose(1,0,2) # axis 0 1 exchange
# or you can write the following format
np.transpose(n_array,(1,0,2)) # shape (3,2,5)
np.expand_dims(x,axis)
n_array = np.arange(10) # shape (10,)
np.expand_dims(n_array,axis=0) # shape (1,10)
np.expand_dims(n_array,axis=1) # shape (10,1)
pytorch
torch.unsqueeze(tensor,dim)
# 升维 dim=1,表示在1维上升维,dim=0,表示在0维上升维
tensor = torch.as_tensor(np.arange(10))
torch.unsqueeze(tensor,dim=1) # torch.Size (10,1)
torch.unsqueeze(tensor,dim=0) # torch.Size (1,10)