点击上方“Python爬虫与数据挖掘”,进行关注
回复“书籍”即可获赠Python从入门到进阶共10本电子书
今
日
鸡
汤
随风潜入夜,润物细无声。什么样的面包品牌最好卖?什么样的口感最受欢迎?相信每一位喜欢面包的朋友都会关心这些问题。本文通过爬取京东面包类数据,一方面回答大家关于此前关于面包的各种问题,另一方面也带大家了解一份完整的数据报告,可以从中有所借鉴。
构建解析详情页的代理
def disguiser(): 构建解析详情页的代理 try:req = request.Request( http://www.agent.cn/xdaili-api//greatRecharge/getGreatIp?spiderId=8f75fb741de34cfb95adf347910db7a9&orderno=YZ20191169208Yi1jmu&returnType=2&count=1 )resp = request.urlopen(req)jsonIP = resp.read().decode()jsonIP = re.sub( , ,jsonIP)ipList = re.findall( "ip":"(.*?)" ,jsonIP)portList = re.findall( "port":"(.*?)" ,jsonIP)value = list(map(lambda x,y : x + : + y,ipList, portList))key = [ http ]ipDict = {key[index] : value[index] for index in range(len(key))}print(ipDict)# 1. 使用ProxyHandler,传入代理构建一个handlerhandler = request.ProxyHandler(ipDict) # key: http/https val: ip:port# 2. 使用上面创建的handler构建一个openeropener = request.build_opener(handler)print(opener)except:time.sleep(6)req = request.Request( http://www.agent.cn/xdaili-api//greatRecharge/getGreatIp?spiderId=8f75fb741de34cfb95adf347910db7a9&orderno=YZ20191169208Yi1jmu&returnType=2&count=1 )resp = request.urlopen(req)jsonIP = resp.read().decode()jsonIP = re.sub( , ,jsonIP)ipList = re.findall( "ip":"(.*?)" ,jsonIP)portList = re.findall( "port":"(.*?)" ,jsonIP)value = list(map(lambda x,y : x + : + y,ipList, portList))key = [ http ]ipDict = {key[index] : value[index] for index in range(len(key))}print(ipDict)# 1. 使用ProxyHandler,传入代理构建一个handlerhandler = request.ProxyHandler(ipDict) # key: http/https val: ip:port# 2. 使用上面创建的handler构建一个openeropener = request.build_opener(handler)return opener
构建解析详情页的代理
try:
req = request.Request( http://www.agent.cn/xdaili-api//greatRecharge/getGreatIp?spiderId=8f75fb741de34cfb95adf347910db7a9&orderno=YZ20191169208Yi1jmu&returnType=2&count=1 )
resp = request.urlopen(req)
jsonIP = resp.read().decode()
jsonIP = re.sub( , ,jsonIP)
ipList = re.findall( "ip":"(.*?)" ,jsonIP)
portList = re.findall( "port":"(.*?)" ,jsonIP)
value = list(map(lambda x,y : x + : + y,ipList, portList))
key = [ http ]
ipDict = {key[index] : value[index] for index in range(len(key))}
print(ipDict)
# 1. 使用ProxyHandler,传入代理构建一个handler
handler = request.ProxyHandler(ipDict) # key: http/https val: ip:port
# 2. 使用上面创建的handler构建一个opener
opener = request.build_opener(handler)
print(opener)
except:
time.sleep(6)
req = request.Request( http://www.agent.cn/xdaili-api//greatRecharge/getGreatIp?spiderId=8f75fb741de34cfb95adf347910db7a9&orderno=YZ20191169208Yi1jmu&returnType=2&count=1 )
resp = request.urlopen(req)
jsonIP = resp.read().decode()
jsonIP = re.sub( , ,jsonIP)
ipList = re.findall( "ip":"(.*?)" ,jsonIP)
portList = re.findall( "port":"(.*?)" ,jsonIP)
value = list(map(lambda x,y : x + : + y,ipList, portList))
key = [ http ]
ipDict = {key[index] : value[index] for index in range(len(key))}
print(ipDict)
# 1. 使用ProxyHandler,传入代理构建一个handler
handler = request.ProxyHandler(ipDict) # key: http/https val: ip:port
# 2. 使用上面创建的handler构建一个opener
opener = request.build_opener(handler)
return opener
解析详情页的内容
def parser(pageQueue, uaPool, priceRequestDoc, PRICEBASEURL, detailRequestDoc, open): 解析详情页的内容 detailUrl = pageQueue.get()[1]print(detailUrl)# 价格PRICEURL = PRICEBASEURL + re.search( d+ ,detailUrl).group()priceRequestDoc = re.sub(r , ,priceRequestDoc)headers_for_price = dict(re.findall( ([-wd]*?):(.*) ,priceRequestDoc))headers_for_price.update(uaPool[random.randint(0,len(uaPool)-1)]) # 获取商品价格信息请求的headers信息req = request.Request(PRICEURL, headers = headers_for_price)resp = open(req) #第一次响应print(PRICEURL, 商品价格页请求响应码:,resp.getcode())if resp.getcode() == 200:info = resp.read().decode()elif SERVER_ERROR_MIN <= response.status_code < SERVER_ERROR_MAX:for i in range(5):time.sleep(i**i) #可以继续优化,第一次1秒,第二次10秒,第三次100秒...resp = open(req)if resp.getcode() == 200:breakelif SERVER_ERROR_MIN <= response.status_code < SERVER_ERROR_MAX:if response.status_code == 404:print( page not found )elif response.status_code == 403:print( have no right )else:passinfo = json.loads(info)item_price = info[0][ p ]# 名称 品牌 是否含糖 保质期 配料 包装 商品产地...detailRequestDoc = re.sub(r , ,detailRequestDoc)headers_for_detail = dict(re.findall( ([-wd:]*):(.*) ,detailRequestDoc))headers_for_detail.update(uaPool[random.randint(0,9)]) # 获取商品价格信息请求的headers信息req = request.Request(detailUrl, headers = headers_for_detail)resp = open(req) # 第二个响应print(detailUrl, 详情页请求响应:,resp.getcode())if resp.getcode() == 200:passelif SERVER_ERROR_MIN <= response.status_code < SERVER_ERROR_MAX:for i in range(5):time.sleep(i**i) #可以继续优化,第一次1秒,第二次10秒,第三次100秒...resp = open(req)if resp.getcode() == 200:breakelif SERVER_ERROR_MIN <= response.status_code < SERVER_ERROR_MAX:if response.status_code == 404:print(detailUrl, page not found )elif response.status_code == 403:print(detailUrl, have no right )else:passparser = etree.HTMLParser(encoding = gbk )html = etree.parse(resp, parser = parser)print(html)elements = html.xpath("//ul[@class= parameter2 p-parameter-list ]//text() | //dl[@class= clearfix ]//text()")detailInfo = list(filter(lambda msg : len(msg.strip()) > 0 and msg, elements))detailInfo = ( # ).join(detailInfo)try:item_name = re.search( 商品名称:(.*?)# ,detailInfo).group(1)except AttributeError:# print( 商品没有 item_name 信息 )item_name = ntry:item_id = re.search( d+ ,detailUrl).group()except AttributeError:# print( 商品没有 item_id 信息 )item_id = n# 大商品名称elementTitle = html.xpath("//title//text()")[0]elementTitle = elementTitle.strip()item_fullName = re.search( (【.*】)*(.*)?【 ,elementTitle).group(2)# 品牌elementBrand = html.xpath("//*[@id= crumb-wrap ]/div/div[1]/div[7]/div/div/div[1]/a/text()")elementBrand = list(filter(lambda msg : len(msg.strip()) > 0 and msg, elementBrand))try:item_brand = elementBrand[0]except IndexError:item_brand = nplyield { item_id :item_id, item_fullName :item_fullName, item_name :item_name, item_price :item_price, item_brand :item_brand, gross_weight :gross_weight, item_origin :item_origin, item_certification :item_certification, processing_technology :processing_technology, packing_unit :packing_unit, is_suger :is_suger}
解析详情页的内容
detailUrl = pageQueue.get()[1]
print(detailUrl)
# 价格
PRICEURL = PRICEBASEURL + re.search( d+ ,detailUrl).group()
priceRequestDoc = re.sub(r , ,priceRequestDoc)
headers_for_price = dict(re.findall( ([-wd]*?):(.*) ,priceRequestDoc))
headers_for_price.update(uaPool[random.randint(0,len(uaPool)-1)]) # 获取商品价格信息请求的headers信息
req = request.Request(PRICEURL, headers = headers_for_price)
resp = open(req) #第一次响应
print(PRICEURL, 商品价格页请求响应码:,resp.getcode())
if resp.getcode() == 200:
info = resp.read().decode()
elif SERVER_ERROR_MIN <= response.status_code < SERVER_ERROR_MAX:
for i in range(5):
time.sleep(i**i) #可以继续优化,第一次1秒,第二次10秒,第三次100秒...
resp = open(req)
if resp.getcode() == 200:
break
elif SERVER_ERROR_MIN <= response.status_code < SERVER_ERROR_MAX:
if response.status_code == 404:
print( page not found )
elif response.status_code == 403:
print( have no right )
else:
pass
info = json.loads(info)
item_price = info[0][ p ]
# 名称 品牌 是否含糖 保质期 配料 包装 商品产地...
detailRequestDoc = re.sub(r , ,detailRequestDoc)
headers_for_detail = dict(re.findall( ([-wd:]*):(.*) ,detailRequestDoc))
headers_for_detail.update(uaPool[random.randint(0,9)]) # 获取商品价格信息请求的headers信息
req = request.Request(detailUrl, headers = headers_for_detail)
resp = open(req) # 第二个响应
print(detailUrl, 详情页请求响应:,resp.getcode())
if resp.getcode() == 200:
pass
elif SERVER_ERROR_MIN <= response.status_code < SERVER_ERROR_MAX:
for i in range(5):
time.sleep(i**i) #可以继续优化,第一次1秒,第二次10秒,第三次100秒...
resp = open(req)
if resp.getcode() == 200:
break
elif SERVER_ERROR_MIN <= response.status_code < SERVER_ERROR_MAX:
if response.status_code == 404:
print(detailUrl, page not found )
elif response.status_code == 403:
print(detailUrl, have no right )
else:
pass
parser = etree.HTMLParser(encoding = gbk )
html = etree.parse(resp, parser = parser)
print(html)
elements = html.xpath("//ul[@class= parameter2 p-parameter-list ]//text() | //dl[@class= clearfix ]//text()")
detailInfo = list(filter(lambda msg : len(msg.strip()) > 0 and msg, elements))
detailInfo = ( # ).join(detailInfo)
try:
item_name = re.search( 商品名称:(.*?)# ,detailInfo).group(1)
except AttributeError:
# print( 商品没有 item_name 信息 )
item_name = n
try:
item_id = re.search( d+ ,detailUrl).group()
except AttributeError:
# print( 商品没有 item_id 信息 )
item_id = n
# 大商品名称
elementTitle = html.xpath("//title//text()")[0]
elementTitle = elementTitle.strip()
item_fullName = re.search( (【.*】)*(.*)?【 ,elementTitle).group(2)
# 品牌
elementBrand = html.xpath("//*[@id= crumb-wrap ]/div/div[1]/div[7]/div/div/div[1]/a/text()")
elementBrand = list(filter(lambda msg : len(msg.strip()) > 0 and msg, elementBrand))
try:
item_brand = elementBrand[0]
except IndexError:
item_brand = npl
yield {
item_id :item_id,
item_fullName :item_fullName,
item_name :item_name,
item_price :item_price,
item_brand :item_brand,
gross_weight :gross_weight,
item_origin :item_origin,
item_certification :item_certification,
processing_technology :processing_technology,
packing_unit :packing_unit,
is_suger :is_suger
}
由于公众号篇幅有限,无法展示本文全部代码,我们已将代码放入百度云盘。后台回复“面包”,可以获取本文代码。
公众号后台回复“面包”,可以获取本文代码
------------------- End -------------------
后台回复"学习"加入Python学习群
万水千山总是情,点个「在看」行不行
往期精彩文章推荐:
网络爬虫过程中5种网页去重方法简要介绍
Python环境搭建—安利Python小白的Python和Pycharm安装详细教程
Python大佬分析了15万歌词,告诉你民谣歌手们到底在唱什么
如何利用Python词云和wordart可视化工具对朋友圈数据进行可视化展示
看完本文有收获?请转发分享给更多的人
Python爬虫与数据挖掘
入群请在微信后台回复【入群】
欢迎大家点赞,留言,转发,转载,感谢大家的相伴与支持。
做知识的传播者,随手转发,Python进阶者与您同行