# -*- coding:utf-8 -*-
# author: MrLuo
import requests
from bs4 import BeautifulSoup
from lxml import etree
import random
headers = [
{'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:34.0) Gecko/20100101 Firefox/34.0'},
{'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6'},
{
'User-Agent': 'Mozilla/5.0 (Windows NT 6.2) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.12 '
'Safari/535.11'},
{'User-Agent': 'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.2; Trident/6.0)'},
{'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:40.0) Gecko/20100101 Firefox/40.0'},
{
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu '
'Chromium/44.0.2403.89 Chrome/44.0.2403.89 Safari/537.36'},
{
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36'}]
# 发出请求获得HTML源码的函数
def get_html(url):
# 伪装成浏览器访问
resp = requests.get(url, headers=random.choice(headers)).text
return resp
# 解析页面获得短评URL
# def get_comments_url():
# for item in soup.select('div[class="pl2"] a'):
# book_urls = item.get('href')
# book_urls = book_urls + 'comments/?start='
# book_names.append(book_urls)
# return book_names
def get_comments(books):
# for url in get_comments_url():
for i in range(1):
comments = []
url = books + str(i * 20)
soup = requests.get(url, headers=random.choice(headers)).text
html = etree.HTML(soup)
titles = html.xpath('//*[@id="comments"]/div[1]/ul/li/div[2]/p/span/text()')
users = html.xpath('//*[@id="comments"]/div[1]/ul/li/div[2]/h3/span[2]/a/text()')
for title, user in zip(titles, users):
data = user + ':' + title + '\n'
comments.append(data)
a = ''.join([str(i) for i in comments]) # 列表字符串化
# print(comments)
return a
# 解析页面,获得数据信息
def html_parse():
# 调用函数,for循环迭代出所有页面
for url in all_page():
# BeautifulSoup的解析
soup = BeautifulSoup(get_html(url), 'lxml')
# 书名
alldiv = soup.find_all('div', class_='pl2')
names = [a.find('a')['title'] for a in alldiv]
# 作者
allp = soup.find_all('p', class_='pl')
authors = [p.get_text() for p in allp]
# 评分
starspan = soup.find_all('span', class_='rating_nums')
scores = [s.get_text() for s in starspan]
# 简介
sumspan = soup.find_all('span', class_='inq')
sums = [i.get_text() for i in sumspan]
# 原文摘录
# excerpts_list = soup.find_all('li', class_='hd')
# excerpts = [m.get_text() for m in excerpts_list]
comments_url = soup.select('div[class="pl2"] a')
book_urls = [t.get('href') for t in comments_url]
# book_url = book_urls + 'comments/?start='
comments = []
for book_url in book_urls:
books = book_url + 'comments/?start='
comment = get_comments(books)
comments.append(comment)
for name, author, score, sum,com in zip(names, authors, scores, sums, comments):
name = '书名:' + str(name) + '\n'
author = '作者:' + str(author) + '\n'
score = '评分:' + str(score) + '\n'
sum = '简介:' + str(sum) + '\n'
comm = '短评:' + str(com) + '\n'
# excerpt = '原文摘录:' + str(excerpt) + '\n'
data = name + author + score + sum + comm
print(data)
# 获得所有页面的函数
def all_page():
base_url = 'https://book.douban.com/top250?start='
urllist = []
# 从0到225,间隔25的数组
for page in range(0, 25, 25):
allurl = base_url + str(page)
urllist.append(allurl)
return urllist
# # 文件名
# filename = '豆瓣图书Top250.txt'
# # 保存文件操作
# f = open(filename, 'w', encoding='utf-8')
# # 调用函数
# html_parse()
# f.close()
# print('保存成功。')
html_parse()
爬虫获取豆瓣短评
最新推荐文章于 2023-12-23 15:58:59 发布