在首页中获取所有地点信息
正常的网页解析
from bs4 import BeautifulSoup
import requests
url = 'http://www.tripadvisor.cn/Attractions-g60763-Activities-New_York_City_New_York.html'
wb_data = requests.get(url)
soup = BeautifulSoup(wb_data.text, 'lxml') #text使其变得可读,然后用BeautifulSoup解析网页
#print(soup)
titles = soup.select('div.property_title > a[target="_blank"]')
images = soup.select('img[width="160"]')
cates = soup.select('div.p13n_reasoning_v2')
for title, image, cate in zip(titles, images, cates):
data = {
'title':title.get_text(),
'image':image.get('src'),
'cate':list(cate.stripped_strings),
}
print(data)
通过传入headers获取收藏好的列表信息
需要headers的网页解析
from bs4 import BeautifulSoup
import requests
headers = {
'User-Agent':'',
'Cookie':'',
}
url_saves = 'http://www.tripadvisor.cn/Saves#1'
wb_data = requests.get(url_saves, headers=headers)
soup = BeautifulSoup(wb_data.text, 'lxml')
titles = soup.select('a.location-name')
images = soup.select('img.photo_image')
metas = soup.select('span.format_address')
for title, image, meta in zip(titles, images, metas):
data = {
'title':title.get_text(),
'image':image.get('src'),
'meta':list(meta.stripped_strings)
}
print(data)
连续爬取30页的列表信息
from bs4 import BeautifulSoup
import requests
import time
url_saves = 'http://www.tripadvisor.com/Saves#37685322'
url = 'https://cn.tripadvisor.com/Attractions-g60763-Activities-New_York_City_New_York.html'
urls = ['https://cn.tripadvisor.com/Attractions-g60763-Activities-oa{}-New_York_City_New_York.html#ATTRACTION_LIST'.format(str(i)) for i in range(30,930,30)]
#列表解析式
headers = {
'User-Agent':'',
'Cookie':''
}
#在首页中获取所有地点信息的函数
def get_attractions(url,data=None):
wb_data = requests.get(url)
time.sleep(4)
soup = BeautifulSoup(wb_data.text,'lxml')
titles = soup.select('div.property_title > a[target="_blank"]')
imgs = soup.select('img[width="160"]')
cates = soup.select('div.p13n_reasoning_v2')
if data == None:
for title,img,cate in zip(titles,imgs,cates):
data = {
'title' :title.get_text(),
'img' :img.get('src'),
'cate' :list(cate.stripped_strings),
}
print(data)
#连续爬取多个页面的信息的步奏
for single_url in urls:
get_attractions(single_url)
###########################################################
#和这个函数没有关系,没有使用这个函数
#通过传入headers获取收藏好的列表信息的函数
def get_favs(url,data=None):
wb_data = requests.get(url,headers=headers)
soup = BeautifulSoup(wb_data.text,'lxml')
titles = soup.select('a.location-name')
imgs = soup.select('div.photo > div.sizedThumb > img.photo_image')
metas = soup.select('span.format_address')
if data == None:
for title,img,meta in zip(titles,imgs,metas):
data = {
'title' :title.get_text(),
'img' :img.get('src'),
'meta' :list(meta.stripped_strings)
}
print(data)
####################################################
通过伪造手机客户端爬取网页
因为网站进行了反爬虫处理,所以从手机端爬取比较方便
from bs4 import BeautifulSoup
import requests
headers = {
'User-Agent':'', #mobile device user agent from chrome
}
url = ''
mb_data = requests.get(url,headers=headers)
soup = BeautifulSoup(mb_data.text,'lxml')
imgs = soup.select('div.thumb.thumbLLR.soThumb > img')
for i in imgs:
print(i.get('src'))