python3爬取微博评论api情感分析_京东热水器评论情感分析:爬取信息

爬取目标

本文的目标是爬取京东商城美的电热水器的品名、价格、评论数、好评率、差评率、中评率、标签、评论、评论时间、昵称、购买时间。

爬取过程

import requests

from bs4 import BeautifulSoup

import json

import re

import pymysql

import random

from multiprocessing import Pool

connection = pymysql.connect('localhost','root','0102003','spider')

cursor = connection.cursor()

首先导入相关的包,并连接到本地的MySQL数据库,以便将爬取数据存入数据库。

requests.DEFAULT_RETRIES = 5

uas = [ 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1',

'Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11',

'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6',

'Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6',

'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1',

'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5',

'Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5',

'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3',

'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3',

'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3',

'Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3',

'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3',

'Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3',

'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3',

'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3',

'Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3',

'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24',

'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24']

def get_html(url):

"""

用于获取html

:param url: 网站的url地址

:return: html

"""

ua = random.choice(uas)

head = {'user-agent': ua,

'authority': 'item.jd.com',

'method': ' GET',

'path': '/1106432.',

'scheme': 'https'}

try:

r = requests.get(url, head)

return r.text

except Exception as e:

print(e,r.status_code)

get_html(url)

get_html函数用于发送请求并获取response,其中,使用random模块的random.choice()随机选取user-agent,try-except用于捕捉异常并打印异常类型和异常状态码。由于在使用过程中出现502异常,因此执行重复操作。

def get_detail_urls():

"""

获取所有美的热水器的详情页url

:return:

"""

detail_url_list = []

for i in range(1, 14):

url = 'https://list.jd.com/list.html?cat=737,13297,13690&ev=exbrand%5F12380&page={}&' \

'sort=sort_totalsales15_desc&trans=1&JL=6_0_0#J_main'.format(i)

soup = BeautifulSoup(get_html(url), 'lxml')

for j in range(1, len(soup.select('#plist > ul > li'))+1):

url = "https:" + soup.select('#plist > ul > li:nth-child({}) > div > div.p-name > a'.format(j))[0]\

.attrs['href']

name = soup.select('#plist > ul > li:nth-child(1) > div > div.p-name > a > em')[0].get_text().strip()

detail_url_list.append((url, name))

return detail_url_list

get_detail_urls()原本是想获取商品详情页链接,然后在详情页获取相关数据,但是由于大多详情页数据是通过js加载的,无法直接从html网页获得,而且,获取详情页需要模拟登陆。因此,get_detail_urls()实际上获取了商品名称和url,其中url中有用是商品id信息,以此跳过对详情页的访问。

def save_to_sql(l):

sql = "insert into media values {}".format(tuple(l))

try:

cursor.execute(sql)

connection.commit()

except Exception as e:

print(e)

用于将数据插入MySQL数据库中。

def get_info(url, title):

id = url.split('/')[-1].split('.')[0] # 获取商品id

price_url = 'https://p.3.cn/prices/mgets?skuIds=J_' + id

comment_info_url = 'https://club.jd.com/comment/productCommentSummaries.action?referenceIds={}'.format(id)

comment_tag_url = 'https://club.jd.com/comment/productPageComments.action?callback=fetchJSON_comment98&' \

'productId={}&score=0&sortType=5&page=0&pageSize=10&isShadowSku=0&fold=1'.format(id)

comment_info = json.loads(get_html(comment_info_url)).get('CommentsCount')[0]

r = re.compile(".*?\((.*)\)")

comment_tag = json.loads(r.findall(get_html(comment_tag_url))[0])

tag = [] # 评价标签

for i in comment_tag.get('hotCommentTagStatistics'):

tag.append(i.get('name') + "("+str(i.get('count'))+ ")")

tag = str(tag)

price = float(json.loads(get_html(price_url))[0].get('p')) # 商品价格

CommentCount = comment_info.get('CommentCount') # 评价总数

GoodRate = comment_info.get('GoodRate') # 好评率

PoorRate = comment_info.get('PoorRate') # 差评率

GeneralRate = comment_info.get('GeneralRate') # 中评率

name = title

out = False

for i in range(100):

comment_url = 'https://club.jd.com/comment/productPageComments.action?callback=fetchJSON_comment98&' \

'productId={}&score=0&sortType=5&page={}&pageSize=10&isShadowSku=0&fold=1'.format(id, i)

r = re.compile(".*?\((.*)\);")

try:

comment = json.loads(r.findall(get_html(comment_url))[0]).get('comments')

except Exception as e:

print(id, i)

print(e)

break

for j in range(10):

try:

com = comment[j].get('content')

except IndexError as e:

print(e)

print(id, i, j)

out = True

break

except TypeError as e:

print(e)

print(id, i, j)

break

comment_time = comment[j].get('creationTime')

color = comment[j].get('productColor')

bought_time = comment[j].get('referenceTime')

nickname = comment[j].get('nickname')

l = [name, price, CommentCount, GoodRate, GeneralRate, PoorRate,

tag, com, comment_time, nickname, bought_time]

save_to_sql(l)

if out:

break

获取某型号热水器的各种信息。

首先,使用url获取商品的id,用于请求价格、评论等信息。price_url是使用商品id构建的一个返回包含商品价格的json文件的url,使用json.loads()将json文件解码成python的字典,然后获取价格信息。

comment_info_url是一个包含商品评价的聚合统计量(如评论数、好评率等)的json文件,这些信息也可以在之后的comment_tag_url中获取。

comment_tag_url是comment_url的第一页,京东只保存了商品的最多100页评论信息,每页最多十条评论,对页码循环并构建url。由于返回的文件并不是标准的json格式文件,因此,需要使用正则表达式获取标准的json信息。当评论数较少,循环页码超出时跳出循环。对每一页评论信息循环获取每条评论内容,同理,当不足十条时,跳出循环。将所需信息存入列表后插入MySQL数据库。

def job(z):

return get_info(z[0], z[1])

def main():

urls = get_detail_urls()

print(len(urls))

pool = Pool()

pool.map(job, urls)

pool.close()

pool.join()

main函数只需串联起来即可,使用multiprocessing的Pool实现多进程爬取,代码综合如下:

import requests

from bs4 import BeautifulSoup

import json

import re

import pymysql

import random

from multiprocessing import Pool

connection = pymysql.connect('localhost','root','0102003','spider')

cursor = connection.cursor()

requests.DEFAULT_RETRIES = 5

uas = ["Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1",

"Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11",

"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",

"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6",

"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1",

"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5",

"Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5",

"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",

"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",

"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",

"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",

"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",

"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",

"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",

"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",

"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3",

"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24",

"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24"]

def get_html(url):

"""

用于获取html

:param url: 网站的url地址

:return: html

"""

ua = random.choice(uas)

head = {"user-agent": ua,

'authority': 'item.jd.com',

'method': ' GET',

'path': '/1106432.',

'scheme': 'https'}

try:

r = requests.get(url, head)

return r.text

except Exception as e:

print(e)

get_html(url)

def get_detail_urls():

"""

获取所有美的热水器的详情页url

:return:

"""

detail_url_list = []

for i in range(1, 14):

url = 'https://list.jd.com/list.html?cat=737,13297,13690&ev=exbrand%5F12380&page={}&' \

'sort=sort_totalsales15_desc&trans=1&JL=6_0_0#J_main'.format(i)

soup = BeautifulSoup(get_html(url), 'lxml')

for j in range(1, len(soup.select('#plist > ul > li'))+1):

url = "https:" + soup.select('#plist > ul > li:nth-child({}) > div > div.p-name > a'.format(j))[0]\

.attrs['href']

name = soup.select('#plist > ul > li:nth-child(1) > div > div.p-name > a > em')[0].get_text().strip()

detail_url_list.append((url, name))

return detail_url_list

def save_to_sql(l):

sql = "insert into media values {}".format(tuple(l))

try:

cursor.execute(sql)

connection.commit()

except Exception as e:

print(e)

def get_info(url, title):

id = url.split('/')[-1].split('.')[0] # 获取商品id

price_url = 'https://p.3.cn/prices/mgets?skuIds=J_' + id

comment_info_url = 'https://club.jd.com/comment/productCommentSummaries.action?referenceIds={}'.format(id)

comment_tag_url = 'https://club.jd.com/comment/productPageComments.action?callback=fetchJSON_comment98&' \

'productId={}&score=0&sortType=5&page=0&pageSize=10&isShadowSku=0&fold=1'.format(id)

comment_info = json.loads(get_html(comment_info_url)).get('CommentsCount')[0]

r = re.compile(".*?\((.*)\)")

comment_tag = json.loads(r.findall(get_html(comment_tag_url))[0])

tag = [] # 评价标签

for i in comment_tag.get('hotCommentTagStatistics'):

tag.append(i.get('name') + "("+str(i.get('count'))+ ")")

tag = str(tag)

price = float(json.loads(get_html(price_url))[0].get('p')) # 商品价格

CommentCount = comment_info.get('CommentCount') # 评价总数

GoodRate = comment_info.get('GoodRate') # 好评率

PoorRate = comment_info.get('PoorRate') # 差评率

GeneralRate = comment_info.get('GeneralRate') # 中评率

name = title

out = False

for i in range(100):

comment_url = 'https://club.jd.com/comment/productPageComments.action?callback=fetchJSON_comment98&' \

'productId={}&score=0&sortType=5&page={}&pageSize=10&isShadowSku=0&fold=1'.format(id, i)

r = re.compile(".*?\((.*)\);")

try:

comment = json.loads(r.findall(get_html(comment_url))[0]).get('comments')

except Exception as e:

print(id, i)

print(e)

break

for j in range(10):

try:

com = comment[j].get('content')

except IndexError as e:

print(e)

print(id, i, j)

out = True

break

except TypeError as e:

print(e)

print(id, i, j)

break

comment_time = comment[j].get('creationTime')

color = comment[j].get('productColor')

bought_time = comment[j].get('referenceTime')

nickname = comment[j].get('nickname')

l = [name, price, CommentCount, GoodRate, GeneralRate, PoorRate,

tag, com, comment_time, nickname, bought_time]

save_to_sql(l)

if out:

break

def job(z):

return get_info(z[0], z[1])

def main():

urls = get_detail_urls()

print(len(urls))

pool = Pool()

pool.map(job, urls)

pool.close()

pool.join()

if __name__ == '__main__':

main()

  • 0
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值