Python并行化
1)并行化介绍
- [x] 多个线程同时处理任务
- [x] 高效
- [x] 快速
2)Map的使用
- map函数一手包办了序列的操作,参数传递和结果保存等一系列的操作。
- from multiprocessing.dummy import Pool
- pool = Pool(计算机核数)
- results = pool.map(爬取函数,网址列表)
from multiprocessing.dummy import Pool as ThreadPool
import requests
import time
def getsource(url):
html = requests.get(url)
urls = []
for i in range(1,21):
newpage = 'http://tieba.baidu.com/p/3522395718?pn=' + str(i)
urls.append(newpage)
time1 = time.time()
for i in urls:
print i
getsource(i)
time2 = time.time()
print u'单线程耗时:' + str(time2-time1)
pool = ThreadPool(2)
time3 = time.time()
results = pool.map(getsource, urls)
pool.close()
pool.join()
time4 = time.time()
print u'并行耗时:' + str(time4-time3)
- 1
- 2
- 3
- 4
- 5
- 6
- 7
- 8
- 9
- 10
- 11
- 12
- 13
- 14
- 15
- 16
- 17
- 18
- 19
- 20
- 21
- 22
- 23
- 24
- 25
- 26
- 27
- 28
5.实战–百度贴吧爬虫
tiebaspider.py
from lxml import etree
from multiprocessing.dummy import Pool as ThreadPool
import requests
import json
import sys
reload(sys)
sys.setfaultencoding('utf-8')
def towrite(contentdict):
f.writelines(u'回帖时间:' +str(contentdict['topic_reply_time']) + '\n')
f.writelines(u'回帖内容:' +unicode(contentdict['topic_reply_content']) + '\n')
f.writelines(u'回帖人:' +str(contentdict['user_time']) + '\n\n')
def spider(url):
html = requests.get(url)
selector = etree.HTML(html.text)
content_field = selector.xpath('//div[@class="l_post l_post_bright "]')
item = {}
for each in content_field:
reply_info = json.loads(each.xpath('@data-field')[0].replace('"',''))
author = reply_info['author']['user_name']
content = each.xpath('div[@class="d_post_content_main"]/div/cc/div[@class="d_post_content j_d_post_content "]/text()')[0]
reply_time = reply_info['content']['date']
print content
print reply_time
print author
item['user_name'] = author
item['topic_reply_content'] = content
item['topic_reply_time'] = reply_time
towrite(item)
if __name__ == "__main__":
pool = ThreadPool(2)
f = open('content.txt', 'a')
page = []
for i in range(1, 21):
newpage = 'http://tieba.baidu.com/p/3522395718?pn=' + str(i)
page.append(newpage)
results = pool.map(spider, page)
pool.close()
pool.join()
f.close()