代码方面,我们使用scrapy框架爬取酒店信息,经过测试发现,使用这种方法不会被封ip和cookie。
思路:
1.得到城市的编号
2.通过编号,进入酒店列表,并且得到酒店总数
3.计算酒店页数,构造得到网址
4.爬取相关的信息
结果截图:
爬取字段:
‘id’, ‘酒店名称’, ‘地址’, ‘评分’, ‘入住总数’, ‘类型’,‘简称’, ‘维度’, ‘经度’,‘城市’
items.py
import scrapy
class XiechengItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
id=scrapy.Field()
name=scrapy.Field()
address=scrapy.Field()
score=scrapy.Field()
dpcount=scrapy.Field()
stardesc=scrapy.Field()
shortName=scrapy.Field()
lat=scrapy.Field()
lon=scrapy.Field()
cityname= scrapy.Field()
ur=scrapy.Field()
pip.py
import csv
class xiechengPipeline:
def __init__(self):
self.f = open("携程1.csv", "w", encoding='utf-8', newline='')
self.writer = csv.writer(self.f)
self.writer.writerow(['id', '酒店名称', '地址', '评分', '入住总数', '类型','简称', '维度', '经度','城市','ur'])
def process_item(self, item, spider):
wangyiyun_list = [item['id'], item['name'], item['address'], item['score'], item['dpcount'], item['stardesc'], item['shortName'],item['lat'], item['lon'],item['cityname'],item['ur']]
self.writer.writerow(wangyiyun_list)
return item
def close_spider(self, spider): # 关闭
# self.writer.close()
self.f.close()
srtting.py
# -*- coding: utf-8 -*-
# Scrapy settings for xiecheng project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest