import requests
import csv
from threading import Lock
head=['id', 'prodName', 'prodCatid', 'prodCat', 'prodPcatid',
'prodPcat', 'lowPrice', 'highPrice', 'avgPrice', 'place',
'specInfo', 'unitInfo', 'pubDate', 'status', 'userIdCreate',
'userIdModified', 'userCreate', 'userModified', 'gmtCreate',
'gmtModified']
def run1(i): #爬取数据
star=time.time()
url = 'http://www.xinfadi.com.cn/getPriceData.html'
data = {'limit': 20, 'current': i}
ret = requests.post(url, data)
ret_list = ret.json()['list']
print(f'{i}----{time.time()-star}')
return ret_list
def run2(n): #写入csv
lock.acquire() #加锁写入
for i in n:
writer.writerow(dict(i))
lock.release() #解锁
star1=time.time()
with open('q.csv', 'w', newline='') as q:
lock=Lock()
writer = csv.DictWriter(q, head)
writer.writeheader()
for n in range(10):
with ThreadPoolExecutor(20) as pool: #上下文管理线程池
a = pool.map(run1, range(n*10+1, n*10+11)) #按线程启动顺序收集线程返回值=a,
pool.map(run2, a) #此出启动了写入csv的线程,同时调用a作为线程run2的参数
# (a可迭代)a会使run1的线程阻塞,让其等待run1所有线程完成任务,如果不
#调用a,就不形成阻塞
print(time.time()-star1)
Python爬虫多线程获取数据顺序混乱的解决方法
于 2023-01-09 13:21:09 首次发布