python
字符串find()
查找成功,返回首index;
查找失败,返回-1;
s = "Hello, World!"
index = s.find("World")
print(index) # 输出:7
列表操作
lt = [1, 2, 3]
temp = ls[0 : 10] # 不会因为右边界超出最大值,而报错;超出最大值时,取最大值
单元素元组
当我们只有一个元素的元组时,需要在这个元素后面加上逗号,以表示这是一个元组。如果不加逗号,Python就会把这个元素当做其他数据类型,而不是元组。
my_tuple = (3,)
my_tuple = (3)
print(type(my_tuple)) # Output: <class 'int'>
序列化,反序列化,pickle协议
序列化:将数据结构或对象,转换为一系列字节,以便于文件存储,内存存储,网络传输等
在python中,使用pickle协议可以实现序列化和反序列化
os
os.path.splitext()
import os
filename = '/path/to/file.txt'
name, ext = os.path.splitext(filename)
print(name) # /path/to/file
print(ext) # .txt
string = 'a,b,c,d'
substrings = string.split(',')
print(substrings) # ['a', 'b', 'c', 'd']
os.path.basename()
import os
path = "/home/user/documents/file.txt"
filename = os.path.basename(path)
print(filename)
输出
file.txt
多线程
Queue()
线程安全,适用于多个线程共享的数据结构
创建线程
Process(target=bg_target, args=(self.queue,))
Process(target=bg_target, args=(self.queue,)) 创建了一个新的进程,其中target参数为bg_target函数,args参数为一个元组,包含了bg_target函数所需要的参数。这里将self.queue作为参数传递给了bg_target函数。新创建的进程会运行bg_target函数。
启动线程
for p in self.process: p.start()
深度学习
使用多块gpu
简单结构深度学习项目,完整代码
import torch
import torch.nn as nn
import torch.optim as optim
import torch.utils.data as data
import torch.nn.functional as F
from torch.nn.parallel import DataParallel
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
class MyModel(nn.Module):
def __init__(self):
super(MyModel, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1)
self.conv2 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1)
self.fc1 = nn.Linear(64 * 32 * 32, 512)
self.fc2 = nn.Linear(512, 10)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = x.view(-1, 64 * 32 * 32)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
class MyLoss(nn.Module):
def __init__(self):
super(MyLoss, self).__init__()
def forward(self, output, target):
loss = F.cross_entropy(output, target)
return loss
model = MyModel().to(device)
loss_fn = MyLoss().to(device)
model = DataParallel(model, device_ids=[0, 1])
optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
# Training loop
for epoch in range(epochs):
for inputs, targets in train_loader:
inputs, targets = inputs.to(device), targets.to(device)
optimizer.zero_grad()
outputs = model(inputs)
loss = loss_fn(outputs, targets)
loss.backward()
optimizer.step()
# Evaluation loop
model.eval()
with torch.no_grad():
for inputs, targets in test_loader:
inputs, targets = inputs.to(device), targets.to(device)
outputs = model(inputs)
loss = loss_fn(outputs, targets)
...
默认dtype
numpy的ndarray:uint8
pytorch的tensor:float32