web crawler案例篇(四)

获取所需内容的URL

在这里插入图片描述

"""爬取TMAO评论"""
"""导入需要的库"""
import requests
from bs4 import  BeautifulSoup as bs
import json
import csv
import re
import time
import random
import pandas as pd
#存储目标js的URL列表
comment_page_url = []

#生成链接列表
def Get_Url(num):
    urlFront = 'https://rate.tmall.com/list_detail_rate.htm?itemId=616280047803&spuId=1606165105&sellerId=1114511827&order=3&currentPage='
    urlRear = '&append=0&content=1&tagId=&posi=&picture=&groupId=&ua=098%23E1hvNvvnvPOvUpCkvvvvvjiWP25w0jY8R2cy0jivPmPhzjDUP2qOgjn2nLspgjYRPuQCvvyvhjZ2cDgvR2mVvpvhvvpvvvgCvvpvvvvvKvhv8vvvvvCvpvvvvvv2UZCvC26vvUnvphvpgvvv96CvpCvOvvm2phCvhCkUvpvVmvvC9jaPuvhvmvvv9buNeLOimvhvLvsGPpvjfiVvQRA1%2B2n79RLIAfUTnZJt9b7gRbIs7T2UlnA4DVQCKWjxsLpZwxkQrfFCKdyIvWmy%2BE7re169rWoQABoXfHTQD7zwdipvvpvVvmvvvhCvRvhvCvvvvvmevpvhvvmv9uQCvvyvmnu22p6vLRevvpvZ7Dsfh8Pw7Di4fic5jWn4KldXz69gvpvhvvvvvv%3D%3D&needFold=0&_ksTS=1605764555984_663&callback=jsonp664'
    for i in range(0,num):
        comment_page_url.append(urlFront+str(1+i)+urlRear)
#获取评论数据
def GetInfo(num):
    all_content=pd.DataFrame() #建立数据框,用来存放爬取的内容
    nickname = []    #存放用户
    auctionSku = []  #存放物品
    ratecontent = [] #存放时间
    ratedate = []    #存放评论
    for i in range(num):       #循环获取每一页评论
        headers = {
            'cookie':'cna=dWpBF2Pv9QECAbfFDOopLfUH; hng=CN%7Czh-CN%7CCNY%7C156; lid=t_1502455534824_0412; xlly_s=1; sgcookie=E8oBT%2FERviagKafirFG9C; t=8dc5969b72f63bf782d40cc5c00cdc45; tracknick=t_1502455534824_0412; _tb_token_=eeee3798860; cookie2=198a442f144dea14cbcf08dc47bf049a; _m_h5_tk=b1fd32d1537844ad93a3921c0c96cf21_1605772786693; _m_h5_tk_enc=fd3d580ac0c2eb70a8ee9c1fbef957a7; tfstk=cWyGBQmRU5l6M4G3NOM_2BNm-bycZ5pqr-yQLJJ-k31QuReFijRea1tOxVYPJI1..; l=eBIDPwQRQC9XXOGvBOfwhurza77OGIRfguPzaNbMiOCPO2fWPu81WZ7Po78XCnGVHsT6R3rEQAfYByYOiyIVokb4d_BkdlkmndC..; isg=BK-vep1MLJCIFCmlT2yTDUWVPsO5VAN2jtz_BcE8Jp4IEM4SySYTxsiGkgAuaNvu',
            'user-agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36',
            'referer': 'https://detail.tmall.com/item.htm?spm=a220m.1000858.1000725.1.481761e9rsLHLx&id=616280047803&skuId=4478591848253&user_id=1114511827&cat_id=2&is_b=1&rn=439c1b3c72a8a5fd26a2d53c7a3f6bad',
            'accept': '*/*',
            'accept-encoding':'gzip, deflate, br',
            'accept-language': 'zh-CN,zh;q=0.9'
        }
        #正则表达式解析JS文件内容
        content = requests.get(comment_page_url[i],headers=headers).text
        nk = re.findall('"displayUserNick":"(.*?)"', content)
        nickname.extend(nk)
        auctionSku.extend(re.findall('"auctionSku":"(.*?)"', content))
        ratecontent.extend(re.findall('"rateContent":"(.*?)"', content))
        ratedate.extend(re.findall('"rateDate":"(.*?)"', content))
        all_content=all_content.append(pd.DataFrame({'auctionSku':auctionSku, 
                                                     'rateDate':ratedate,
                                                     'rateContent':ratecontent})) #获取的内容存入数据框中
        all_content.to_csv('E:\\TmallContent.csv',index=False,encoding='gb18030')
        time.sleep(random.randint(2,3))
        print("第{}页爬取完成".format(i+1))
#主函数
if __name__ == "__main__":
    Page_Num = 5
    Get_Url(Page_Num)
    GetInfo(5)

在这里插入图片描述

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值