from multiprocessing import Process,Queue,Lock
import requests
import time
import random
import multiprocessing as mp
import re
from lxml import etree
from selenium import webdriver
import more_itertools as mi
from lxml.html import fromstring, tostring
from models import *
from sqlalchemy import *
import json
from googletranslate import Trans
from concurrent.futures import ProcessPoolExecutor
class Crawler(Process):
def __init__(self,url_queue):
super(Crawler, self).__init__()
self.url_queue=url_queue
agentlist = [
"Mozilla/5.0(Macintosh;U;IntelMacOSX10_6_8;en-us)AppleWebKit/534.50(KHTML,likeGecko)Version/5.1Safari/534.50",
"Mozilla/5.0(Windows;U;WindowsNT6.1;en-us)AppleWebKit/534.50(KHTML,likeGecko)Version/5.1Safari/534.50",
"Mozilla/4.0(compatible;MSIE8.0;WindowsNT6.0;Trident/4.0)",
"Mozilla/4.0(compatible;MSIE6.0;WindowsNT5.1)",
"Mozilla/4.0(compatible;MSIE7.0;WindowsNT6.0",
"Mozilla/5.0(Macintosh;IntelMacOSX10.6;rv:2.0.1)Gecko/20100101Firefox/4.0.1",
"Mozilla/5.0(WindowsNT6.1;rv:2.0.1)Gecko/20100101Firefox/4.0.1",
"Opera/9.80(Macintosh;IntelMacOSX10.6.8;U;en)Presto/2.8.131Version/11.11",
"Opera/9.80(WindowsNT6.1;U;en)Presto/2.8.131Version/11.11",
"Mozilla/4.0(compatible;MSIE7.0;WindowsNT5.1;360SE)",
"Mozilla/5.0(iPhone;U;CPUiPhoneOS4_3_3likeMacOSX;en-us)AppleWebKit/533.17.9(KHTML,likeGecko)Version/5.0.2Mobile/8J2Safari/6533.18.5",
]
self.headers = {'User-Agent': random.choice(agentlist),
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9",
}
def R_get_text(self,url):
try:
Response=requests.get(url=url,headers=self.headers)
Response.raise_for_status()
except Exception as e:
print('e:',e)
return
Response.encoding=Response.apparent_encoding
print('---:',Response.status_code)
html = Response.text
return self.parse_page(html,url,'1')
def parse_page(self, html, url, status):
...
def replace(self, con):
list_ = [['<', ''], ['>', ''], ['&', '&'], ['"', '"']]
con_ = con.replace(list_[0][0], list_[0][1]).replace(list_[1][0], list_[1][1]).replace(list_[2][0],
list_[2][1]).replace(
list_[3][0], list_[3][1])
return con_
def get_lrc_link_list(self):
while True:
if self.url_queue.empty():
break
value=self.url_queue.get()
qsize = self.url_queue.qsize()
if len(value) == 2:
requests.get(f'http://127.0.0.1:8083/qsize?qsize={qsize}&project={value[1]}')
val = self.R_get_text(value[0])
else:
val = self.R_get_text(value)
# print('val:',val)
time.sleep(3)
def run(self):
self.get_lrc_link_list()
class Mains():
def main(self,pubs):
print('pubs:',type(pubs))
url_queue=Queue()
for public in pubs:
print('public:',public)
if isinstance(public,str):
url_queue.put('https://patents.google.com/patent/' + public)
else:
url_queue.put(('https://patents.google.com/patent/'+public[0],public[1]))
process=[]
for p in range(4):
process.append(Crawler(url_queue))
for p in process:
p.start()
time.sleep(3)
for p in process:
p.join()
def translist(arr):
try:
if isinstance(arr,list):
# arr = [Trans("EN").translate(i.strip()) for i in arr if len(i.strip()) > 0]
arr = [i.strip() for i in arr if len(i.strip()) > 0]
else:
arr = arr
with ProcessPoolExecutor(max_workers=4) as pool:
arr = list(pool.map(Trans('EN').translate,arr))
return arr
except Exception as e:
print('e:',e)
return ''
在使用多进程爬取网页的时候,突然给我报错BrokenPipeError: [WinError 232] 管道正在被关闭,查了很多资料,发现是在多进程爬取的时候,所有进程都报错(报错的原因是因为请求网址的时候,网页返回404,所以导致报错),导致死锁,于是就在Response.raise_for_status()那边添加try..except,捕获erro人,将进程一直走下,就不会导致进程死锁