1.指定一个源文件,实现copy到目标目录
fn1 = 'test.txt'
fn2 = 'test2.txt'
with open(fn1,'w+') as f:
f.writelines('\n'.join(['abc','123','stone']))
f.seek(0)
print(f.read())
f.close()
def copy(src,dst):
with open(src,'rb') as f1:
with open(dst,'wb') as f2:
length = 16*1027
while True:
buf = f1.read(length)
if not buf:
break
f2.write(buf)
copy(fn1,fn2)
2.复制目录,选择一个已存在的目录作为当前工作目录,在其下创建a/b/c/d这样的子目录结构,并在这些子目录的不同层级生成50个普通文件,要求文件名由4个随机小写字母构成。将a目录下所有内容复制到当前工作目录dst目录下去,要求复制的普通文件的文件名必须是x,y,z开头
import shutil
from pathlib import Path
from string import ascii_lowercase
import random
basedir = Path('d:/temp')
sub = Path('a/b/c/d')
dirs = [sub]+list(sub.parents)[:-1]
(basedir/sub).mkdir(parents=True,exist_ok=True)
filenames = ("".join(random.choices(ascii_lowercase,k=4)) for i in range(50))
for filename in filenames:
(basedir/random.choice(dirs)/filename).touch()
def ignore_files(src,names,exclude=set('xyz')):
return set(filter(lambda name:name[0] not in exclude and Path(src,name).is_file(),names))
shutil.copytree(str(basedir/'a'),str(basedir/'dst'),ignore=ignore_files)
3.单词统计,对一个指定文件进行单词统计,不区分大小写,并显示单词重复最多的10个单词
def makekey(s:str):
chars = set(r"""!()-=*'"/\#.[]{},""")
key = s.lower()
ret = []
for c in key:
if c in chars:
ret.append(' ')
else:
ret.append(c)
return "".join(ret).split()
d = {}
with open(filename,encoding='utf-8') as f:
for line in f:
for word in makekey(line):
d[word] = d.get(word,0) + 1
for k,v in sorted(d.items(),key=lambda x:x[1],reverse=True)[:10]:
print(k,v)
# 运行结果
path 138
the 136
is 60
a 59
os 50
if 43
and 40
to 34
on 33
of 33
4.有一个test.ini内容如下,将其转换为json格式
[DEFAULT]
a = test
[mysql]
default-character-set = utf8
a = 1000
[mysqld]
datadir = /dbserver/data
port = 33060
character-set-server = utf8
sql_node = NO_ENGINE_SUBSTITUTION,STRICT_TRANS_TABLES
代码如下:
from configparser import ConfigParser
import json
filename = 'test.ini'
jsonname = 'mysql.json'
cfg = ConfigParser()
cfg.read(filename)
dest = {}
for section in cfg.sections():
dest[section] = dict(cfg.items(section))
with open(jsonname,'w') as f:
json.dump(dest,f)