爬取去哪网景点数据

import urllib.parse
import urllib.request
import requests
from bs4 import BeautifulSoup
import csv
import time
import re

sd=['名字','地址','价格','月销量','景点概述']
with open('C:\\Users\\惠普\\Desktop\\ac2.csv','a+',newline='',encoding='utf-8')as f:
       writers=csv.writer(f)
       writers.writerow(sd)

header={'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36',
'X-Requested-With':'XMLHttpRequest'}
base='&subject=&sku='
for i in range(1,80):
   url='http://piao.qunar.com/ticket/list.htm?keyword=%E7%83%AD%E9%97%A8%E6%99%AF%E7%82%B9&region=&from=mpl_search_suggest&page={}'.format(i)
   url=url+base
   response=requests.get(url,headers=header)
   soup=BeautifulSoup(response.text)
   name=soup.find_all('h3',{'class':'sight_item_caption'})
   address=soup.find_all('p',{'class':'address color999'})
   price=soup.find_all('span',{'class':'sight_item_price'})
   xiaoliang=soup.find_all('span',{'class':'hot_num'})
   note=soup.find_all('div',{'class':'intro color999'})
   with open('C:\\Users\\惠普\\Desktop\\ac2.csv','a+',newline='',encoding='utf-8')as f:
       writers=csv.writer(f)
       for i in range(len(name)):
           listw=[]
           c=price[i].text.replace('¥','').replace('','')
           print(c)
           if c=='免费':
               listw=[name[i].text,address[i].text.replace('地址:',''),'0','0',note[i].text]
               writers.writerow(listw)
               
           else:
               listw=[name[i].text,address[i].text.replace('地址:',''),c,xiaoliang[i].text,note[i].text]
               writers.writerow(listw)

 

转载于:https://www.cnblogs.com/persistence-ok/p/10982403.html

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值