import requests
from lxml import etree
from threading import Thread
from queue import Queue
class BaiduImageSpider:
def __init__(self, tieba_name, stop_page):
self.headers = {"User-Agent": "Mozilla/5.0"}
self.baseurl = "http://tieba.baidu.com"
self.pageurl = "http://tieba.baidu.com/f?"
self.search = tieba_name
self.stop_page = stop_page
self.params_queue = Queue()
self.t_link_queue = Queue()
self.t_image_queue = Queue()
# 获取贴吧首页分页列表
def getUrl(self):
for i in range(self.stop_page):
params = {
"kw": self.search,
"pn": i * 50
}
self.params_queue.put(params)
# 获取个人主页链接网址
def homePageUrl(self):
while True:
params = self.params_queue.get()
res = requests.get(self.pageurl, params=params, headers=self.headers)
res.encoding = "utf-8"
html = res.text
# 构建解析对象
parseHtml = etree.HTML(html)
# 帖子链接列表
t_list = parseHtml.xpath('//div[@class="t_con cleafix"]/div/div/div/a/@href')
# t_list : ['/p/233432','/p/2039820',..]
# print(t_list)
for t_link in t_list:
# 拼接帖子完整链接
t_link = self.baseurl + t_link
self.t_link_queue.put(t_link)
self.params_queue.task_done()
print('homePage')
# 获取所有帖子图片链接
def getImageUrl(self):
while True:
t_link = self.t_link_queue.get()
res = requests.get(t_link, headers=self.headers)
res.encoding = "utf-8"
html = res.text
# 构造解析对象
parseHtml = etree.HTML(html)
img_list = parseHtml.xpath('//img[@class="BDE_Image"]/@src')
# print(img_list)
for img_link in img_list:
print(img_link)
self.t_image_queue.put(img_link)
self.t_link_queue.task_done()
print('getImage')
# 获取个个人主页图片并保存到本地
def writeImage(self):
# 获取图片的bytes
while True:
img_link = self.t_image_queue.get()
res = requests.get(img_link, headers=self.headers)
res.encoding = "utf-8"
html = res.content
filename = './photo/' + img_link[-10:]
with open(filename, "wb") as f:
f.write(html)
print("%s下载成功" % filename)
self.t_image_queue.task_done()
print('writeimage')
def run(self):
t_list = []
t_url = Thread(target=self.getUrl)
t_list.append(t_url)
t_homePageUrl = Thread(target=self.homePageUrl)
t_list.append(t_homePageUrl)
for i in range(10):
t_getImageUrl = Thread(target=self.getImageUrl)
t_list.append(t_getImageUrl)
for i in range(10):
t_writeImage = Thread(target=self.writeImage)
t_list.append(t_writeImage)
for t in t_list:
t.setDaemon(True)
t.start()
for q in [self.params_queue,self.t_link_queue,self.t_image_queue]:
q.join()
print('over')
if __name__ == '__main__':
spider = BaiduImageSpider('美女',2)#参数为需要爬取的贴吧名 以及爬取的页码数范围
spider.run()
今天尝试了一下多线程爬虫,对贴吧进行图片的爬取 出现的问题 是程序最终无法停止 原因继续寻找中