实现目标:爬取每家公司对应的监管记录。
目标网页:
通过基本测试,结论如下:
1、不管是否登录的情况下,首页内容都无法翻页。
2、单独搜索需要登录账号。
3、注册账号有滑动验证码,点选文字验证码。
4、账号查询到一定次数后,进入步步验证模式。
5、同一个IP注册账号太多会封IP
实现代码如下:
# -*- coding:utf-8 -*-
import time
import re, uuid, oss2, json, os
import traceback
import requests
from bson import ObjectId
from pymongo.mongo_client import MongoClient
from bs4 import BeautifulSoup
from urllib.parse import unquote, quote
count = 0
constructionDB_db = None
def conn_DB():
global constructionDB_db
while True:
try:
conn_open=MongoClient("127.0.07",27017) #填写你的mongo连接
constructionDB_db=conn_open.get_database('constructionDB')
#用户名,密码,数据库
constructionDB_db.authenticate('constructionDB','ConstructionDB1408~','constructionDB')
break
except:
pass
def removeTag(pre_str):
return re.sub("[\s\\\.!/_$,%^*()()::+\"\'—!,。??、~@#¥%…&“”‘’;;\|{}\{\}【】\[\]=\-《》<>]+", "",pre_str)
# 切换IP
# def changeIP():
# global constructionDB_db
# while True:
# try:
# print ('==========切换IP========')
# IP_PORT = constructionDB_db.IP_PORT #连接IP池
# ip_result =IP_PORT.find_and_modify({"IPE" : False},{"$set":{"IPE":True}},safe=True,new=True)
# if ip_result == None:
# IP_PORT.update_many({},{"$set":{"IPE" : False}})
# ip_result =IP_PORT.find_and_modify({"IPE" : False},{"$set":{"IPE":True}},safe=True,new=True)
# # return ip_result['ip']
# return {"http": "http://"+ip_result['ip']}
# except:
# conn_DB()
# # phantomjs添加cookies时去掉多余的domain
# def getPureDomainCookies(cookies):
# if not cookies:
# return
# domain2cookie={} #做一个域到cookie的映射
# for cookie in cookies:
# domain=cookie['domain']
# if domain in domain2cookie:
# domain2cookie[domain].append(cookie)
# else:
# domain2cookie[domain]=[]
# maxCnt=0
# ansDomain=''
# for domain in domain2cookie.keys():
# cnt=len(domain2cookie[domain])
# if cnt > maxCnt:
# maxCnt=cnt
# ansDomain=domain
# ansCookies=domain2cookie[ansDomain]
# return ansCookies
# 连接数据库
while True:
try:
IpeDB = constructionDB_db.IpeDB #连接储存表
break
except:
conn_DB()
# // 跳转至详情页监管记录url解析
# function locationUrl_Records(companyId, dataType,showtype) {
# var hd_type = $('#hd_type').val();
# window.open('regulatory-record.aspx?companyId=' + companyId + '&dataType=' + dataType + '&isyh=' + hd_type+ '&showtype=' + showtype);
# }
# def locationUrl_Records(companyId, dataType, showtype):
# hd_type = 0
# return 'http://www.ipe.org.cn/IndustryRecord/regulatory-record.aspx?companyId=' + str(companyId) + '&dataType=' + str(dataType) + '&isyh=' + str(hd_type) + '&showtype=' + str(showtype)
#html转json格式
def cur_htmlToJson(content):
if content:
json_txt = re.sub(r'\{|\}|\"', '', content.decode('utf-8').replace("\'", '\"'))
all_mes = json_txt.split(',')
js_txt = {}
for mes in all_mes:
mes = mes.split(':')
if len(mes) > 1:
js_txt[mes[0]] = mes[1]
return js_txt
return ''
class getIPE():
def __init__(self):
self.all_cookies = [
# '.ASPXAUTH' :填入你的.ASPXAUTH参数,通过多个账户的.ASPXAUTH进行持续爬取
#'8CCB9E5310E27C8585664F0FFABD3B70B451CBC9B80CCA14D519C032F1F05E2CE11EC2C81D9EA1134B78E15A0FCCD3350F36420673125A0A4027EAA776B245939596E00D9E029B4CB458D644E8FB493B686906E6233CCAF6E37275CC8D96E051EAE2BA36F9D22DACA3D46E5D1508BCCCD3600768889EA7CE0DB1065FEC492D87D3A9E84FA385498C7DE3FC6B1529F4394D8BCF85F0A7798E8D8F0E0F',
]
self.index = 0
self.headers = {
'Host': 'www.ipe.org.cn',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:74.0) Gecko/20100101 Firefox/74.0',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'X-Requested-With': 'XMLHttpRequest',
'Origin': 'http://www.ipe.org.cn',
'DNT': '1',
'Referer': 'http://www.ipe.org.cn/IndustryRecord/Regulatory.html?keycode=4543j9f9ri334233r3rixxxyyo12'
}
self.cookie = {
'.ASPXAUTH':self.all_cookies[self.index]
}
# self.proxyIP = changeIP() #获得代理IP
def __del__(self):
pass
def getHtml(self, method, url, params = None):
while True:
try:
if method == 'post':
# res = requests.post(url, headers = self.headers, cookies = self.cookie, params = params, proxies = self.proxyIP, timeout = 10) #使用代理的连接
res = requests.post(url, headers = self.headers, cookies = self.cookie, params = params, timeout = 10)
else:
# res = requests.get(url, cookies = self.cookie, params = params, headers = self.headers, proxies = self.proxyIP, timeout = 10)
res = requests.get(url, cookies = self.cookie, params = params, headers = self.headers, timeout = 10)
# requests.get(url = url, params = params, headers = headers, proxies = staticIndex.proxy_auth,timeout = 10)
if res.text == '':
self.set_cookie()
continue
if res.status_code == 200:
return res
except:
traceback.print_exc()
time.sleep(1)
# self.proxyIP = changeIP() #切换代理
# 切换cookies
def set_cookie(self):
self.index += 1
if self.index >= len(self.all_cookies):
# self.index = 0
os._exit(0)
print ('=====切换cookie')
print ('=====使用第%d个cookie' % (self.index + 1))
self.cookie = {
'.ASPXAUTH':self.all_cookies[self.index]
}
# 储存数据
def save_data(self, item):
while True:
try:
message_date = IpeDB.find_one({"name":item['name'], "source":item['source'], "html":item['html']})
if message_date != None :
# print ('更新这条数据!-----')
# IpeDB.update({"_id":ObjectId(message_date['_id'])},item)
print ('已有这条数据!-----')
else:
print ("添加这条数据!----")
IpeDB.insert_one(item)
break
except:
traceback.print_exc()
conn_DB()
def get_mess(self,company):
#列表
post_url = 'http://www.ipe.org.cn/data_ashx/GetAirData.ashx?xx=getRecords'
params = {
'cmd': "getRecords",
'keycode': "4543j9f9ri334233r3rixxxyyo12",
'pageSize': "15",
'pageIndex': "1",
'countryId': "1",
'provinceId': "-1",
'cityId': "-1",
'startYear': "-1",
'endYear': "-1",
'professionId': "-1",
'itemType': "0",
'companyType': "0",
'indusName': str(company),
'fengxian': "0",
'ishistory': "0",
'hasvg': "0",
'code': "",
'index': "0"
}
res = self.getHtml('post', post_url, params=params)
# 因为符号等问题,不能通过json.loads转json格式
js_txt = cur_htmlToJson(res.content)
while int(js_txt['isSuccess']) <= 0:
if js_txt['isSuccess'] == '0':
try:
print (js_txt['Msg']) #无数据
return
except:
while js_txt['isSuccess'] == '0':
# self.proxyIP = changeIP() #需要切换代理
res = self.getHtml('post', post_url, params=params)
js_txt = cur_htmlToJson(res.content)
if int(js_txt['isSuccess']) < 0:
while int(js_txt['isSuccess']) < 0:
self.set_cookie()
# self.proxyIP = changeIP() #需要切换代理同时换cookies
res = self.getHtml('post', post_url, params=params)
js_txt = cur_htmlToJson(res.content)
content = js_txt['content']
content = unquote(content, encoding="gbk").replace('%', '\\').encode('utf-8').decode('unicode_escape')
soup = BeautifulSoup(content, 'lxml')
all_tr = soup.find_all('tr')
for tr in all_tr:
print (str(tr))
lcUrl_fun = re.search(r'locationUrl_Records\((\d*),(\d*),(\d*)\);', str(tr))
# detail_url = locationUrl_Records(lcUrl_fun.group(1), lcUrl_fun.group(2), lcUrl_fun.group(3))
title = re.search(r'title="(.+?)"', str(tr)).group(1)
loc = re.search(r'<td>([\u4e00-\u9fa5]+?) / <span class="text-prov">([\u4e00-\u9fa5]+?)</span>', str(tr))
loc = loc.group(1) + '/' + loc.group(2)
name = re.sub(r'存续|在营|开业|在册|、|\(|\)|(|)', '', title)
if re.sub(r'\(|\)|(|)','',company) != name:
continue
# detail_res = self.getHtml('get', detail_url)
# detail_soup = BeautifulSoup(detail_res.text, 'lxml')
#获取所有年份
all_years_url = 'http://www.ipe.org.cn/data_ashx/GetAirData.ashx?xx=getrecord123eachyear1&keycode=4543j9f9ri334233r3rixxxyyo12'
params_year = {
'cmd': "getrecord123eachyear1",
'keycode': "4543j9f9ri334233r3rixxxyyo12",
'companyId': lcUrl_fun.group(1)
}
res_years = self.getHtml('post', all_years_url, params=params_year)
year_txt = cur_htmlToJson(res_years.content)
year_content = year_txt['yearContent']
# url编码转gbk
year_content = unquote(year_content, encoding="gbk").replace('%', '\\').encode('utf-8').decode('unicode_escape')
year_soup = BeautifulSoup(year_content, 'lxml')
all_years = year_soup.find_all('li')
for year in all_years:
all_a = year.find('div', class_ = 'record-select').find_all('a', recursive = False)
for a in all_a:
recordId = re.search(r"getRecordInfo\((\d*?),\'\d*?\'\)", str(a)).group(1)
cur_year = re.search(r'getRecordInfo\(\d*?,\'(\d*?)\'\)', str(a)).group(1)
source = re.search(r'title=\"(.*?)\"', str(a), re.M|re.S).group(1)
params_t = {
'cmd': "getRecordInfo",
'keycode': "4543j9f9ri334233r3rixxxyyo12",
'recordId': str(recordId)
}
#获取每年每条的具体内容
post_url_t = 'http://www.ipe.org.cn/data_ashx/GetAirData.ashx?xx=getRecordInfo&keycode=4543j9f9ri334233r3rixxxyyo12'
res_t = self.getHtml('post', post_url_t, params=params_t)
js_t = cur_htmlToJson(res_t.content)
detail_html = js_t['content']
try:
detail_html = unquote(detail_html, encoding="gbk").replace('%', '\\').encode('utf-8').decode('unicode_escape')
except:
traceback.print_exc()
detail_html = unquote(detail_html, encoding="gbk").replace('%', '\\')
all_path = re.findall(r'file:.+?.png', detail_html)
for index, path in enumerate(all_path):
detail_html = detail_html.replace(path, '0000000000' + str(index))
# print (detail_html[7460:7500])
detail_html = detail_html.encode('utf-8').decode('unicode_escape')
for index, path in enumerate(all_path):
detail_html = detail_html.replace('0000000000' + str(index), path)
item = {}
item['name'] = name #匹配到的名称
item['companyName'] = company #爬取时的名称
item['loc'] = loc #处罚所在地
item['year'] = cur_year #处罚年份
item['html'] = detail_html #详细网页
item['source'] = source #来源
item['is_clear'] = False #清洗状态设置
# print (item)
print ('公司名称:', company)
print ('项目名称:', source)
print ('年份:', cur_year)
#上传数据
self.save_data(item)
print ('')
if __name__ == "__main__":
getIPE().get_mess('河南晋开化工投资控股集团有限责任公司一分公司')
未解决问题:
1、验证码。