python常用程序代码(持续更新收集)

深度学习的图片等比resize后,再把图片反向resize回来,验证通过

import cv2
import numpy as np


def restore_image(image, original_size):

    # 获取图像的原始宽度和高度
    height, width = original_size

    # 计算缩放比例
    scale = min(image.shape[1]/width, image.shape[0]/height)

    # 计算缩放后的宽度和高度
    new_width = int(width * scale)
    new_height = int(height * scale)

    # 计算填充位置
    x_offset = (image.shape[1] - new_width) // 2
    y_offset = (image.shape[0] - new_height) // 2

    # 将调整后的图像放置在原始图像中心
    restored_image = image[y_offset:y_offset + new_height, x_offset:x_offset + new_width]


    # 调整图像大小
    # restored_image = cv2.resize(restored_image, None,None,1/scale,1/scale,interpolation=cv2.INTER_LANCZOS4)

    
    restored_image = cv2.resize(restored_image,(width,height),interpolation=cv2.INTER_LANCZOS4)

    return restored_image



def resize_image(image, target_size, fill_value):
    # 获取图像的原始宽度和高度
    height, width = image.shape[:2]

    # 计算缩放比例
    scale = min(target_size[0] / width, target_size[1] / height)

    # 计算缩放后的宽度和高度
    new_width = int(width * scale)
    new_height = int(height * scale)
    # print(" new_width = {}, new_height = {} ".format(new_width, new_height))
    # 调整图像大小
    resized_image = cv2.resize(image, (new_width, new_height),interpolation=cv2.INTER_LANCZOS4)

    # 创建目标大小的空白图像,并填充特定值
    padded_image = np.full((target_size[1], target_size[0], image.shape[2]), fill_value, dtype=np.uint8)

    # 计算填充位置
    x_offset = (target_size[0] - new_width) // 2
    y_offset = (target_size[1] - new_height) // 2

    # print(" x_offset = {}, y_offset = {} ".format(x_offset, y_offset))
    # 将调整后的图像放置在填充图像中心
    padded_image[y_offset:y_offset + new_height, x_offset:x_offset + new_width] = resized_image

    return padded_image


# 读取图像
image = cv2.imread('1.jpg')

# 设置目标大小(这里设置为300x300)
target_size = (512, 512)

# 设置填充值
fill_value = 114

# 进行图像缩放和填充
resized_image = resize_image(image, target_size, fill_value)


# 将缩放后的图像恢复到原始大小
restored_image = restore_image(resized_image, (image.shape[0],image.shape[1]))


cv2.imwrite("1_reszie.jpg",resized_image )
save_params = [cv2.IMWRITE_JPEG_QUALITY, 100]
cv2.imwrite("1_restored.jpg",restored_image,save_params)
# # 显示原始图像和缩放后的图像
# cv2.imshow('Original Image', image)
# cv2.imshow('Resized Image', resized_image)
# cv2.waitKey(0)
# cv2.destroyAllWindows()

 

文件夹遍历和文件遍历

# 遍历文件夹和文件,返回文件的路径和对应的文件的名称,同时可以根据自己的应用需要进行更改
def get_img_file(file_name):
    imagelist = []
    for parent, dirnames, filenames in os.walk(file_name):
        for filename in filenames:
            if filename.lower().endswith(('.bmp', '.dib', '.png', '.jpg', '.jpeg', '.pbm', '.pgm', '.ppm', '.tif', '.tiff')):
                imagelist.append(os.path.join(parent, filename))
        return imagelist,filenames

只遍历当前文件或者只遍历当前文件夹

def GetDirectory(path):
    directory=os.listdir(path)
    directory_list=[]
    for dirs in directory:
        if not os.path.isfile(os.path.join(path,dirs)):
           directory_list.append(os.path.join(path,dirs))
    return directory_list
def GetFile(path):
    files=os.listdir(path)
    file_list=[]
    for file in files:
        if os.path.isfile(os.path.join(path,file)):
           file_list.append(os.path.join(path,file))
    return file_list


和上段代码的功能类似,遍历根目录下的所有文件夹和文件,只是这个是组个遍历,可以人为的调整和修改:

def preprocess(src_root, dst_root):
    """
    :param src_root:
    :param dst_root:
    :return:
    """
    if not os.path.isdir(src_root):
        print("[Err]: invalid source root")
        return

    if not os.path.isdir(dst_root):
        os.makedirs(dst_root)
        print("{} made".format(dst_root))

    # 创建用于训练MOT的目录结构
    dst_img_dir_train = dst_root + '/images/train'
    dst_img_dir_test = dst_root + '/images/test'
    dst_labels_with_ids = dst_root + '/labels_with_ids'
    if not os.path.isdir(dst_img_dir_train):
        os.makedirs(dst_img_dir_train)
    if not os.path.isdir(dst_img_dir_test):
        os.makedirs(dst_img_dir_test)
    if not os.path.isdir(dst_labels_with_ids):
        os.makedirs(dst_labels_with_ids)

    # 遍历src_root, 进一步完善训练目录并拷贝文件
    for x in os.listdir(src_root):
        x_path = src_root + '/' + x
        if os.path.isdir(x_path):
            for y in os.listdir(x_path):
                if y.endswith('.jpg'):
                    y_path = x_path + '/' + y
                    if os.path.isfile(y_path):
                        # 创建用于训练的图片目标目录
                        dst_img1_dir = dst_img_dir_train + '/' + x + '/img1'
                        if not os.path.isdir(dst_img1_dir):
                            os.makedirs(dst_img1_dir)

                        # copy image to train image dir
                        dst_f_path = dst_img1_dir + y
                        if os.path.isfile(dst_f_path):
                            shutil.copy(y_path, dst_img1_dir)
                            print('{} cp to {}'.format(y, dst_img1_dir))
                        else:
                            print('{} already exists.'.format(dst_f_path))

等比resize

import cv2
import matplotlib.pyplot as plt

# 封装resize函数
def resize_img_keep_ratio(img_name,target_size):
    img = cv2.imread(img_name) # 读取图片
    old_size= img.shape[0:2] # 原始图像大小
    ratio = min(float(target_size[i])/(old_size[i]) for i in range(len(old_size))) # 计算原始图像宽高与目标图像大小的比例,并取其中的较小值
    new_size = tuple([int(i*ratio) for i in old_size]) # 根据上边求得的比例计算在保持比例前提下得到的图像大小
    img = cv2.resize(img,(new_size[1], new_size[0])) # 根据上边的大小进行放缩
    pad_w = target_size[1] - new_size[1] # 计算需要填充的像素数目(图像的宽这一维度上)
    pad_h = target_size[0] - new_size[0] # 计算需要填充的像素数目(图像的高这一维度上)
    top,bottom = pad_h//2, pad_h-(pad_h//2)
    left,right = pad_w//2, pad_w -(pad_w//2)
    img_new = cv2.copyMakeBorder(img,top,bottom,left,right,cv2.BORDER_CONSTANT,None,(0,0,0)) 
    return img_new

if __name__ == "__main__":
	img = r'D:\SAMM\crop\006_1\1.jpg' # 待处理的图片地址, 替换成你的地址就好
	target_size=[224, 224] # 目标图像大小
	resized_img = resize_img_keep_ratio(img, target_size) 
	plt.imshow(resized_img)
	plt.show()

多张图片生成视频代码

import os
import cv2

def generate_video(path,size,fps=25):
    # fps = 24                  #帧率
    # size = (640, 480)
    videowriter = cv2.VideoWriter("./resultout/test.mp4",cv2.VideoWriter_fourcc(*'mp4v'),fps,size)
    #path = r'F:/data/predict_landmark/'
    for rootpath, dirs, names in os.walk(path):
        names.sort()
        for name in names:
            img = cv2.imread(os.path.join(rootpath, name))
            print("--", name)
            videowriter.write(img)
    videowriter.release()

generate_video('./resultout/frame/',(1920, 1080),25)  

删除文件和文件夹

需要在执行某些代码前清空指定的文件夹,如果直接用os.remove(),可能出现因文件夹中文件被占用而无法删除,解决方法也很简单,先强制删除文件夹,再重新建同名文件夹即可 

import shutil
shutil.rmtree('要清空的文件夹名')
os.mkdir('要清空的文件夹名')

把一个文件从一个文件夹移动到另一个文件夹,并同时重命名,用shutil也很简单

shutil.move('原文件夹/原文件名','目标文件夹/目标文件名')

同理一个文件夹的文件复制到另一个文件夹可以使用下面的代码:

shutil.copy("源目录/文件名", "目的目录/文件名")

数据集元素分割

这里有多种方式,先上一个简单的,然后来一个比较完全的。

import os
import random


def main():
    random.seed(0)  # 设置随机种子,保证随机结果可复现

    files_path = "F:\\车道线大图"
    assert os.path.exists(files_path), "path: '{}' does not exist.".format(files_path)

    val_rate = 0.5
    # 读取数据,并遍历图片名称,通过'.'进行分割字符串,取第一部分,然后排序
    files_name = sorted([file.split(".")[0] for file in os.listdir(files_path)])
    files_num = len(files_name)
    # 生成随机索引,k表示从输入的序列中随机选择k个无重复的元素
    val_index = random.sample(range(0, files_num), k=int(files_num*val_rate))
    train_files = []
    val_files = []
    for index, file_name in enumerate(files_name):
        if index in val_index:
            val_files.append(file_name)
        else:
            train_files.append(file_name)

    try:
        train_f = open("train.txt", "x")
        eval_f = open("val.txt", "x")
        train_f.write("\n".join(train_files))
        eval_f.write("\n".join(val_files))
    except FileExistsError as e:
        print(e)
        exit(1)
'''
Description: 学习使用
Version: 1.0
Author: 赵守风
Email: 1583769112@qq.com
Date: 2021-03-24 14:17:19
LastEditors: zsf
LastEditTime: 2021-03-24 17:06:01
'''

import torch
import os
import random
import shutil
import math



'''
@description: 直接切分数据集,copy数据到对应的训练集、测试集和验证集,这里不考虑类别的问题
@param {*} datapath: 原始数据路径
@param {*} rootpath: 保存切分后的路径root
@param {*} train_rate: 训练集占总数的百分比
@param {*} val_rate: 验证集占总数百分比
@param {*} test_rate: 测试集占总数的百分比
@return {*}
@author: zsf
'''
def data_split(datapath, rootpath="./data_split", train_rate=0.8, val_rate=0.1, test_rate=0.1):
    for parent, dirnames, filenames in os.walk(datapath):
        for dirname in dirnames:
            filepaths = os.listdir(os.path.join(parent, dirname))
            # 删除其他的文件
            filepaths = [x for x in filepaths if x.lower().endswith(('.bmp', '.dib', '.png', '.jpg', '.jpeg', '.pbm', '.pgm', '.ppm', '.tif', '.tiff')) ]           
            # 开始切分
            random.shuffle(filepaths)    
            for i in range(len(filepaths)):
                if i < math.floor(train_rate*len(filepaths)):
                    sub_path = os.path.join(rootpath, 'train_set', dirname)
                elif i < math.floor((train_rate+val_rate)*len(filepaths)):
                    sub_path = os.path.join(rootpath, 'val_set', dirname)
                elif i < len(filepaths):
                    sub_path = os.path.join(rootpath, 'test_set', dirname)
                if os.path.exists(sub_path) == 0:
                    os.makedirs(sub_path)
                shutil.copy(os.path.join(datapath, dirname, filepaths[i]), os.path.join(sub_path, filepaths[i]))   # 复制图片,从源到目的地




'''
@description: 这里生成对应的txt文件不挪动图片
@param {*} datapath: 原始数据路径
@param {*} rootpath: 保存切分后的路径root
@param {*} train_rate: 训练集占总数的百分比
@param {*} val_rate: 验证集占总数百分比
@param {*} test_rate: 测试集占总数的百分比
@return {*}
@author: zsf
'''
def data_split_txt(datapath, rootpath="./data_split", train_rate=0.8, val_rate=0.1, test_rate=0.1):
    for parent, dirnames, filenames in os.walk(datapath):
        index = 0
        for dirname in dirnames:
            filepaths = os.listdir(os.path.join(parent, dirname))
            # 删除其他的文件
            filepaths = [x for x in filepaths if x.lower().endswith(('.bmp', '.dib', '.png', '.jpg', '.jpeg', '.pbm', '.pgm', '.ppm', '.tif', '.tiff')) ]           
            # 开始切分
            random.shuffle(filepaths)    
            for i in range(len(filepaths)):
                if i < math.floor(train_rate*len(filepaths)):
                    txt_path = 'train_set.txt'
                elif i < math.floor((train_rate+val_rate)*len(filepaths)):
                    txt_path = 'val_set.txt'
                elif i < len(filepaths):
                    txt_path = 'test_set.txt'
                with open(os.path.join(rootpath, txt_path), mode='a') as file:
                    file.write(str(index) + ' ' + os.path.join(datapath, dirname, filepaths[i]) + '\n')     
            index += 1 

一张图片均匀切割

import os
from PIL import Image
path="D:\\SR\\USRNet-master\\USRNet-master\\testsets\\image\\20.jpg"
smallpath='D:\\SR\\USRNet-master\\USRNet-master\\testsets\\small'
def splitimage(src, rownum, colnum, dstpath):
    img = Image.open(src)
    w, h = img.size
    if rownum <= h and colnum <= w:
        print('Original image info: %sx%s, %s, %s' % (w, h, img.format, img.mode))
        print('开始处理图片切割, 请稍候...')

        s = os.path.split(src)
        if dstpath == '':
            dstpath = s[0]
        fn = s[1].split('.')
        basename = fn[0]
        ext = fn[-1]

        num = 0
        rowheight = h // rownum
        colwidth = w // colnum
        for r in range(rownum):
            for c in range(colnum):
                box = (c * colwidth, r * rowheight, (c + 1) * colwidth, (r + 1) * rowheight)
                p = os.path.join(dstpath, basename + '_' + str(num) + '.' + ext)
                img.crop(box).save(p)
                num = num + 1
                print("all = {}, num = {}".format(rownum*colnum, num))

        print('图片切割完毕,共生成 %s 张小图片。' % num)
    else:
        print('不合法的行列切割参数!')

splitimage(path, 10, 10, smallpath)

视频转图片

cap = cv2.VideoCapture('D:/SR/src/2.mp4')
i =0
while(1):
    ret, frame = cap.read()
    #cv2.imshow("image",frame)
    cv2.imwrite("D:/SR/USRNet-master/USRNet-master/testsets/image/"+str(i)+'.jpg',frame)
    i+=1

路径字符串处理 

def save_image(output_dir, image_path):
    if not os.path.exists(output_dir):
            os.makedirs(output_dir)
    image_name = os.path.split(image_path)[-1]
    name, ext = os.path.splitext(image_name)
    return os.path.join(output_dir, "{}".format(name)) + ext

两种文件缺少彼此的进行互删,如jpg对应xml文件,有时候会缺少jpg或者xml,需要删除多余的

import os

import shutil
imagelist=[]
for parent, dirnames, filenames in os.walk("E:\\MOTdata\\helmet_20210908\\data1"):
    for filename in filenames:
        if filename.lower().endswith(('.bmp', '.dib', '.png', '.jpg', '.jpeg', '.pbm', '.pgm', '.ppm', '.tif', '.tiff')):
            path = os.path.join(parent, filename)
            print(filename[:-4]+".json")
            if filename[:-4]+".json" not in filenames:
                os.remove(path)

jsonlist=[]
for parent, dirnames, filenames in os.walk("E:\\MOTdata\\helmet_20210908\\data1"):
    for filename in filenames:
        if filename.lower().endswith(('.json')):
            path = os.path.join(parent, filename)
            if filename[:-5]+".jpg" not in filenames:
                os.remove(path)

Python ——保存字典到文件

在 Python 中使用 pickle 模块的 dump 函数将字典保存到文件中

import pickle

my_dict = { 'Apple': 4, 'Banana': 2, 'Orange': 6, 'Grapes': 11}
# 保存文件
with open("myDictionary.pkl", "wb") as tf:
    pickle.dump(my_dict,tf)
# 读取文件
with open("myDictionary.pkl", "wb") as tf:
    new_dict = pickle.load(tf)

print(new_dict.item())

在 Python 中使用 NumPy 库的 save 函数将一个字典保存到文件中

import numpy as np

my_dict = { 'Apple': 4, 'Banana': 2, 'Orange': 6, 'Grapes': 11}
# 保存文件
np.save('file.npy', my_dict)
# 读取文件
new_dict = np.load('file.npy', allow_pickle='TRUE')
print(new_dict)

在 Python 中使用 json 模块的 dump 函数将一个字典保存到文件中

import json

my_dict = { 'Apple': 4, 'Banana': 2, 'Orange': 6, 'Grapes': 11}
# 保存文件
tf = open("myDictionary.json", "w")
json.dump(my_dict,tf)
tf.close()
# 读取文件
tf = open("myDictionary.json", "r")
new_dict = json.load(tf)
print(new_dict)

大数据集的遍历(数据分块,使用进程并行处理)

from ntpath import realpath
import os
from pathlib import Path
from random import shuffle
import shutil
import pandas as pd
from multiprocessing import Process,Queue
from time import sleep


# root = "/data/FaceData/FaceTrainData"
# relative_path = "/data/FaceData/FaceTrainData_Mask"

# file_path = os.path.join(root,"FaceTrainDataMask.txt")

# with open(file_path) as files:
#     for item in files:   
#         temp = item.strip('\n').split(" ")            
#         p = Path(item).parts
#         p1 = os.path.join(relative_path,p[0])
#         if(not os.path.exists(p1)):
#             os.mkdir(p1)
#         p2 = os.path.join(p1,p[1])
#         if(not os.path.exists(p2)):
#             os.mkdir(p2)

#         name = p[2].strip('\n').split(' ')

#         image_path = os.path.join(root, temp[0])
#         save_path = os.path.join(p2, name[0])
#         shutil.copy(image_path,save_path)



def process_data(my_queue):
    root = "/data/FaceData/FaceTrainData"
    relative_path = "/data/FaceData/FaceTrainData_NotMask"
    # print(df)
    file_path = os.path.join(root,"FaceTrainDataNotMask.txt")

    # for row in df.itertuples():
    #     print(row[1])
    # with open(file_path) as files:
    df = my_queue.get()
    i=0;
    for row in df.itertuples():  
        item = row[1]
        temp = item.strip('\n').split(" ")            
        p = Path(item).parts
        p1 = os.path.join(relative_path,p[0])
        if(not os.path.exists(p1)):
            os.mkdir(p1)
        p2 = os.path.join(p1,p[1])
        if(not os.path.exists(p2)):
            os.mkdir(p2)

        name = p[2].strip('\n').split(' ')
        
        image_path = os.path.join(root, temp[0])
        save_path = os.path.join(p2, name[0])
        # print(image_path)
        shutil.copy(image_path,save_path)
        i+=1
    print("-------------------------------------i={}--------------------------".format(i))


root = "/data/FaceData/FaceTrainData"
file_path = os.path.join(root,"FaceTrainDataNotMask.txt")

x = int(20979331/40)
chunksize = x   # 每100万行处理一次数据
reader = pd.read_table(file_path, encoding = 'utf-8', iterator=True, chunksize=chunksize,names=["A"])
chunk_num  = 0
for chunk in reader:
    my_queue = Queue()
    print("Chunk: " + str(chunk_num) + ' >'*30 + '\n')
    df = chunk
    my_queue.put(df)
    # print(df)
    # for row in df.itertuples():
    #     print(row[1])
    p = Process(target=process_data,args=(my_queue,))
    p.start()


    chunk_num += 1

while True:
    sleep(10)
    print("------------------")

  • 5
    点赞
  • 32
    收藏
    觉得还不错? 一键收藏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值