python爬取猫途鹰网站上的评论

1 篇文章 0 订阅
1 篇文章 0 订阅
'''
功能:以爬取猫途鹰网站上的英文评论(以“慕田峪长城”为例)
网址:https://www.tripadvisor.cn/Attraction_Review-g294212-d325811-Reviews-Mutianyu_Great_Wall-Beijing.html
'''
#导入模块
import time
import requests
from bs4 import BeautifulSoup
from lxml import etree

#1、需要爬取的为英文评论,猫途鹰支撑此网址进行英文类评论筛选,则最好采用post方式获取英文评论;
#2、英文评论中每条评论均有一个“更多”的按钮查看评论全部内容,则需要进一步分析获取每一条评论全部内容。
#获取英文评论内容
def get_reports():
    #获取评论响应结果
    urls = [
        'https://www.tripadvisor.cn/Attraction_Review-g294212-d325811-Reviews-or{}-Mutianyu_Great_Wall-Beijing.html'.format(str(i + 10)) for i in range(0, 10, 10)
    ]
    data = {
        'preferFriendReviews': 'FALSE',
        't': '',
        'q': '',
        'filterSeasons': '',
        'filterLang': 'en',
        'filterSafety': 'FALSE',
        'filterSegment': '',
        'trating': '',
        'reqNum': '1',
        'isLastPoll': 'false',
        'paramSeqId': '1',
        'waitTime': '107',
        'changeSet': 'REVIEW_LIST',
        'puid': 'Xkvrr8CoASkABrD8V34AAAGL'
    }
	headers = {'User-Agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Mobile Safari/537.36',
        }
        #分析不同分页的url,并利用各个url获取各个页面评论,具体如下
    for url in urls:
        response = requests.post(url=url, data=data, headers=headers)
        status_code = response.status_code
        if str(status_code) != '200':
            print('请求响应异常,请检查相关参数')

        else:
            html = response.text
            #获取评论更多中评论人id,方法一,利用etrr中xpath方式
            # elements = etree.HTML(response.text)
            # report_id = elements.xpath('//*[@class="reviewSelector"]/@data-reviewid')
            # reviewid = ",".join(report_id)
            # print(reviewid)

            #获取评论更多中评论人id,方法二,利用BeautifulSoup中find方式
            reviewid_list = []
            soup = BeautifulSoup(html, 'lxml')
            reports = soup.find_all('div', attrs={"class":"review-container"})
            for report in reports:
                report_id = report['data-reviewid']
                reviewid_list.append(report_id)
            reviewid = ",".join(reviewid_list)

            #调用获取全部评论的函数
            get_all_report(reviewid)

	#获取点击“更多”后的全部评论
def get_all_report(reviews):
    url = 'https://www.tripadvisor.cn/OverlayWidgetAjax?Mode=EXPANDED_HOTEL_REVIEWS_RESP&metaReferer='
    data = {
    'reviews': reviews,
    'contextChoice': 'DETAIL',
    'loadMtHeader': 'true',
    'haveJses': 'earlyRequireDefine,amdearly,promise-polyfill-standalone,global_error,long_lived_global,apg-Attraction_Review,apg-Attraction_Review-in,bootstrap,responsive-calendar-templates-dust-zh_CN,@ta/common.global,@ta/tracking.interactions,@ta/public.maps,@ta/overlays.pieces,@ta/overlays.shift,@ta/overlays.internal,@ta/overlays.attached-overlay,@ta/overlays.managers,@ta/overlays.attached-arrow-overlay,@ta/overlays.popover,social.share-cta,attractions.tab-bar-commerce,@ta/overlays.fullscreen-overlay,@ta/overlays.modal,attractions.attraction-detail-about-card,@ta/daodao.mobile-app-smartbutton,@ta/platform.import,@ta/platform.runtime,masthead_search_late_load,p13n_masthead_search__deferred__lateHandlers',
    'haveCsses': 'apg-Attraction_Review-in,responsive_calendars_control',
    'Action': 'install'
}
    headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36'}
    resp = requests.post(url=url, data=data, headers=headers)
    status_code = resp.status_code
    if str(status_code) != '200':
        print('请求响应异常,请检查相关参数')
    else:
        html = resp.text
        #print(html)
        soup = BeautifulSoup(html, 'lxml')
        report_alls = soup.find_all('p', attrs={"class": "partial_entry"})
        for report_all in report_alls:
            report_text = report_all.get_text()
            print(report_text)

if __name__ == "__main__":
    get_reports()
  • 1
    点赞
  • 14
    收藏
    觉得还不错? 一键收藏
  • 2
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值