课时11 第三节课程:真实世界中的网页解析

在首页中获取所有地点信息

正常的网页解析

from bs4 import BeautifulSoup
import requests

url = 'http://www.tripadvisor.cn/Attractions-g60763-Activities-New_York_City_New_York.html'
wb_data = requests.get(url)

soup = BeautifulSoup(wb_data.text, 'lxml') #text使其变得可读,然后用BeautifulSoup解析网页
#print(soup)
titles = soup.select('div.property_title > a[target="_blank"]')
images = soup.select('img[width="160"]')
cates = soup.select('div.p13n_reasoning_v2')

for title, image, cate in zip(titles, images, cates):
    data = {
        'title':title.get_text(),
        'image':image.get('src'),
        'cate':list(cate.stripped_strings),
    }
    print(data)

通过传入headers获取收藏好的列表信息

需要headers的网页解析

from bs4 import BeautifulSoup
import requests

headers = {
    'User-Agent':'',
    'Cookie':'',
}
url_saves = 'http://www.tripadvisor.cn/Saves#1'
wb_data = requests.get(url_saves, headers=headers)
soup = BeautifulSoup(wb_data.text, 'lxml')
titles = soup.select('a.location-name')
images = soup.select('img.photo_image')
metas = soup.select('span.format_address')

for title, image, meta in zip(titles, images, metas):
    data = {
        'title':title.get_text(),
        'image':image.get('src'),
        'meta':list(meta.stripped_strings)
    }
    print(data)

连续爬取30页的列表信息

from bs4 import BeautifulSoup
import requests
import time

url_saves = 'http://www.tripadvisor.com/Saves#37685322'
url = 'https://cn.tripadvisor.com/Attractions-g60763-Activities-New_York_City_New_York.html'
urls = ['https://cn.tripadvisor.com/Attractions-g60763-Activities-oa{}-New_York_City_New_York.html#ATTRACTION_LIST'.format(str(i)) for i in range(30,930,30)]
#列表解析式

headers = {
    'User-Agent':'',
    'Cookie':''
}

#在首页中获取所有地点信息的函数
def get_attractions(url,data=None):
    wb_data = requests.get(url)
    time.sleep(4)
    soup = BeautifulSoup(wb_data.text,'lxml')
    titles    = soup.select('div.property_title > a[target="_blank"]')
    imgs      = soup.select('img[width="160"]')
    cates     = soup.select('div.p13n_reasoning_v2')

    if data == None:
        for title,img,cate in zip(titles,imgs,cates):
            data = {
                'title'  :title.get_text(),
                'img'    :img.get('src'),
                'cate'   :list(cate.stripped_strings),
                }
        print(data)
#连续爬取多个页面的信息的步奏
for single_url in urls:
    get_attractions(single_url)


###########################################################
#和这个函数没有关系,没有使用这个函数
#通过传入headers获取收藏好的列表信息的函数
def get_favs(url,data=None):
    wb_data = requests.get(url,headers=headers)
    soup      = BeautifulSoup(wb_data.text,'lxml')
    titles    = soup.select('a.location-name')
    imgs      = soup.select('div.photo > div.sizedThumb > img.photo_image')
    metas = soup.select('span.format_address')

    if data == None:
        for title,img,meta in zip(titles,imgs,metas):
            data = {
                'title'  :title.get_text(),
                'img'    :img.get('src'),
                'meta'   :list(meta.stripped_strings)
            }
            print(data)
####################################################

通过伪造手机客户端爬取网页

因为网站进行了反爬虫处理,所以从手机端爬取比较方便

from bs4 import BeautifulSoup
import requests

headers = {
    'User-Agent':'', #mobile device user agent from chrome
}
url = ''
mb_data = requests.get(url,headers=headers)
soup = BeautifulSoup(mb_data.text,'lxml')
imgs = soup.select('div.thumb.thumbLLR.soThumb > img')
for i in imgs:
    print(i.get('src'))
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值