python爬虫运行时容易卡住_pycharm多线程爬虫调试没问题运行就卡住

[Python] 纯文本查看 复制代码import requests

from fake_useragent import UserAgent

import os

import threading

from lxml import etree

from queue import Queue

import time

import re

class Producer(threading.Thread):

headers = {

"Referer": "https://www.隐藏.com",

"User-Agent": UserAgent().random

}

def __init__(self, page_queue, img_queue):

threading.Thread.__init__(self)

self.page_queue = page_queue

self.img_queue = img_queue

def get_url(self, page_url):

re1 = requests.get(page_url, headers=self.headers)

e1 = etree.HTML(re1.text)

title_urls_half = e1.xpath("//div[@class='box list channel']/ul/li/a/@href")

zhu_url = get_zhuye()

for title_urls in title_urls_half:

title_url = zhu_url + title_urls

re2 = requests.get(title_url, headers=self.headers)

e2 = etree.HTML(re2.text)

img_urls = e2.xpath("//div[@class='content']/p/img/@src")

for img_url in img_urls:

self.img_queue.put(img_url)

# img_url 就是图片地址

def run(self):

while self.page_queue.empty() == False:

page_url = self.page_queue.get()

self.get_url(page_url)

class Consumer(threading.Thread):

headers = {

"Referer": "https://www.隐藏.com",

"User-Agent": UserAgent().random

}

def __init__(self, page_queue, img_queue):

threading.Thread.__init__(self)

self.page_queue = page_queue

self.img_queue = img_queue

def run(self):

while True:

time.sleep(2)

if self.img_queue.empty():

break

img_url = self.img_queue.get()

filename = img_url.split("/")[-1]

try:

response = requests.get(img_url, headers=self.headers, timeout=5)

print(img_url)

with open(filename, "wb") as f:

f.write(response.content)

except:

print("超时")

def get_zhuye():

headers = {

"Referer": "https://www.隐藏.com",

"User-Agent": UserAgent().random

}

base_url = "https://www.隐藏.com"

response = requests.get(base_url, headers=headers)

zhu_url = re.findall(r'window\.location\.href="(.+?)"', response.text)[0][:-1]

return zhu_url

def main():

zhu_url = get_zhuye()

page_queue = Queue(50)

img_queue = Queue(1000)

a = int(input("输入下载开始的页码(≥2):"))

b = int(input("输入下载结束的页码(≥2):"))

for x in range(a, b + 1):

page_url = zhu_url + "/pic/2/index_%d.html" % x

page_queue.put(page_url)

for x in range(5):

t = Producer(page_queue, img_queue)

t.start()

# t.join()

for x in range(5):

t = Consumer(page_queue, img_queue)

t.start()

# t.join()

if __name__ == '__main__':

images_path = os.path.join(os.path.dirname(__file__), "images")

if not os.path.exists(images_path):

os.mkdir(images_path)

os.chdir(images_path)

main()

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值