需求:有一个字典,有三千多个key,需要对其进行字典切割,切割后存入列表(list)中。
# -*- coding: UTF-8 -*-
import multiprocessing # 加载多进程库
import itertools #加载字典切割库
import time
from requests.adapters import HTTPAdapter
import requests
# 拼接url,返回一个字典url_dict,该字典长度有三千多个
domains=".x.xxx.com/data/newworldkill"
with open("/root/carl11.txt","r") as f:
url_dict={}
num="1"
for line in f.readlines():
list = line.strip().split("|")
for i in list[0].split(","):
url="http://s"+str(i)+domains
dbnum=list[2]
url_dict[url]=dbnum
#get_status(url,dbnum)
# 分割字典
def splitDict(d):
lists = []
n = len(d)//100 # length of smaller half
i = iter(d.items()) # alternatively, i = d.iteritems() works in Python 2
for x in range(100):
d = dict(itertools.islice(i, n)) # grab first n items
lists.append(d)
return lists
# 运行主体函数
def run(url_dicts):
headers = {
'Connection': 'close',
}
for value,key in url_dicts.items():
status_code = requests.get(value,headers=headers).status_code
if str(status_code) == "200":
print(value+"|"+str(key))
else:
print(str(key)+u"区不正常")
if __name__ == '__main__':
# 多进程运行程序
for i in range(100):
lists = splitDict(url_dict)
p = multiprocessing.Process(target=run,args=(lists[i],))
p.start()```