from urllib import request
import codecs
import re
import json
import time
'''
1.在get_html 中根据url地址,获取目标数据,判断请求的是否为完整影评的json数据,如果是将返回的数据赋值给json属性,如果不是就赋值html属性
2.在parse_list函数中,根据正则解析当前页的所有电影的链接,拼接完整的影评链接地址,发送请求,解析影评数据, 找到下一页的链接,发送请求,重新调用parse_list函数解析下一页数据....
3.在parse_comments()函数中,根据正则解析当前页的所有影评信息,for遍历,找到每一个影评id,根据id拼接完整的影评地址,发送请求,解析完整影评,处理数据,提取分数,输出或保存..... 查找下一页的链接,拼接地址,发送请求,重新调用parse_comments()函数解析数据..
'''
class DBSpider(object):
def __init__(self):
# 读取本地文件,做数据的解析
# with codecs.open('douban.html','r',encoding='utf-8') as f:
# self.html = f.read()
self.html = ''
self.headers = {
'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Cookie':'ll="128517"; bid=JWYJzKhS_Vc; __yadk_uid=t8sJyKbxnVnF3NFso9uRLV8WW0oJDKxH; _vwo_uuid_v2=D639114FFF67EC089E7D03E92504EBEC5|8ba743c5c00bd36a97c0fec4da764061; ps=y; dbcl2="175108542:665ypl9UG1Q"; ck=MI-6; _pk_ref.100001.4cf6=%5B%22%22%2C%22%22%2C1520490928%2C%22https%3A%2F%2Fwww.douban.com%2Faccounts%2Flogin%3Fredir%3Dhttps%253A%252F%252Fmovie.douban.com%252Ftop250%253Fqq-pf-to%253Dpcqq.group%22%5D; _pk_ses.100001.4cf6=*; push_noty_num=0; push_doumail_num=0; _pk_id.100001.4cf6=b7a8892258d1a5dd.1517454912.4.1520493388.1520478251.',
'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.186 Safari/537.36'
}
# 获取html源代码
def get_html(self,url):
# 发请请求
req = request.Request(url, headers=self.headers)
response = request.urlopen(req)
# 判断是否为json数据,包含的是完整影评
if '/full' in url:
self.json = response.read().decode('utf-8')
else:
self.html = response.read().decode('utf-8')
time.sleep(2)
# 解析首页列表
def parse_list(self):
pattern = re.compile('<div class="hd.*?href="(.*?)"',re.S)
res = re.findall(pattern,self.html)
# for循环遍历列表,取出每一个电影信息
for link in res:
# 拼接完整的电影影评地址
url = link + 'reviews'
# 发起请求,拿回影评页面
self.get_html(url)
# 解析详情
self.parse_comments(url)
# 读取本地文件,做数据解析
# with codecs.open('detail.html','r',encoding='utf-8') as f:
# # 详情html
# self.html = f.read()
# # 解析详情页面的数据
# self.parse_comments(url)
# 找下一页
pattern = re.compile('<link rel="next" href="(.*?)"')
res = re.search(pattern,self.html)
if res:
# 拼接下一页的url地址
next_href= 'https://movie.douban.com/top250?'+res.group(1)
# 发起请求,拿回下一页数据
self.get_html(next_href)
# 调用此函数,解析数据
self.parse_list()
else:
print('没有下一页')
# 解析影评页面
def parse_comments(self,url):
pattern = re.compile('<div class="main review-item.*?id="(.*?)".*?v:reviewer.*?>(.*?)</a>.*?<span.*?class="(.*?)".*?title="(.*?)".*?<span.*?>(.*?)</span',re.S)
res = re.findall(pattern,self.html)
# for循环遍历数据
for ID,name,star,suggest,date in res:
# 读取本地数据,做数据解析
# with codecs.open('test.json','r',encoding='utf-8') as f:
# info = f.read()
# com_dict = json.loads(info)
# comment = com_dict.get('html')
# comment = re.sub(re.compile('<br>'),'\n',comment)
# 根据url发送请求,拿回完整的影评信息
url = 'https://movie.douban.com/j/review/%s/full' % ID
self.get_html(url)
com_dict = json.loads(self.json)
comment = com_dict.get('html')
# 处理数据
comment = re.sub(re.compile('<br>'),'\n',comment)
comment = re.sub(re.compile('<.*?>|\n| ',re.S),'',comment)
# 把影评分数匹配出来
sorce = re.search(re.compile('\d+'),star).group()
print('作者:{}\n作者ID:{}\n分数:{}\n建议:{}\n发布日期:{}\n评价内容:{}'.format(name,ID,sorce,suggest,date,comment))
# 找下一页
pattern = re.compile('<link rel="next" href="(.*?)"')
res = re.search(pattern,self.html)
if res:
# 拼接下一页的url地址
next_href = url + res.group(1)
# 发起请求,拿回下一页数据
self.get_html(next_href)
# 调用此函数,解析数据
self.parse_comments(url)
else:
print('没有下一页')
def start(self):
self.get_html('https://movie.douban.com/top250?qq-pf-to=pcqq.group')
self.parse_list()
if __name__ == '__main__':
db = DBSpider()
db.start()
import codecs
import re
import json
import time
'''
1.在get_html 中根据url地址,获取目标数据,判断请求的是否为完整影评的json数据,如果是将返回的数据赋值给json属性,如果不是就赋值html属性
2.在parse_list函数中,根据正则解析当前页的所有电影的链接,拼接完整的影评链接地址,发送请求,解析影评数据, 找到下一页的链接,发送请求,重新调用parse_list函数解析下一页数据....
3.在parse_comments()函数中,根据正则解析当前页的所有影评信息,for遍历,找到每一个影评id,根据id拼接完整的影评地址,发送请求,解析完整影评,处理数据,提取分数,输出或保存..... 查找下一页的链接,拼接地址,发送请求,重新调用parse_comments()函数解析数据..
'''
class DBSpider(object):
def __init__(self):
# 读取本地文件,做数据的解析
# with codecs.open('douban.html','r',encoding='utf-8') as f:
# self.html = f.read()
self.html = ''
self.headers = {
'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Cookie':'ll="128517"; bid=JWYJzKhS_Vc; __yadk_uid=t8sJyKbxnVnF3NFso9uRLV8WW0oJDKxH; _vwo_uuid_v2=D639114FFF67EC089E7D03E92504EBEC5|8ba743c5c00bd36a97c0fec4da764061; ps=y; dbcl2="175108542:665ypl9UG1Q"; ck=MI-6; _pk_ref.100001.4cf6=%5B%22%22%2C%22%22%2C1520490928%2C%22https%3A%2F%2Fwww.douban.com%2Faccounts%2Flogin%3Fredir%3Dhttps%253A%252F%252Fmovie.douban.com%252Ftop250%253Fqq-pf-to%253Dpcqq.group%22%5D; _pk_ses.100001.4cf6=*; push_noty_num=0; push_doumail_num=0; _pk_id.100001.4cf6=b7a8892258d1a5dd.1517454912.4.1520493388.1520478251.',
'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.186 Safari/537.36'
}
# 获取html源代码
def get_html(self,url):
# 发请请求
req = request.Request(url, headers=self.headers)
response = request.urlopen(req)
# 判断是否为json数据,包含的是完整影评
if '/full' in url:
self.json = response.read().decode('utf-8')
else:
self.html = response.read().decode('utf-8')
time.sleep(2)
# 解析首页列表
def parse_list(self):
pattern = re.compile('<div class="hd.*?href="(.*?)"',re.S)
res = re.findall(pattern,self.html)
# for循环遍历列表,取出每一个电影信息
for link in res:
# 拼接完整的电影影评地址
url = link + 'reviews'
# 发起请求,拿回影评页面
self.get_html(url)
# 解析详情
self.parse_comments(url)
# 读取本地文件,做数据解析
# with codecs.open('detail.html','r',encoding='utf-8') as f:
# # 详情html
# self.html = f.read()
# # 解析详情页面的数据
# self.parse_comments(url)
# 找下一页
pattern = re.compile('<link rel="next" href="(.*?)"')
res = re.search(pattern,self.html)
if res:
# 拼接下一页的url地址
next_href= 'https://movie.douban.com/top250?'+res.group(1)
# 发起请求,拿回下一页数据
self.get_html(next_href)
# 调用此函数,解析数据
self.parse_list()
else:
print('没有下一页')
# 解析影评页面
def parse_comments(self,url):
pattern = re.compile('<div class="main review-item.*?id="(.*?)".*?v:reviewer.*?>(.*?)</a>.*?<span.*?class="(.*?)".*?title="(.*?)".*?<span.*?>(.*?)</span',re.S)
res = re.findall(pattern,self.html)
# for循环遍历数据
for ID,name,star,suggest,date in res:
# 读取本地数据,做数据解析
# with codecs.open('test.json','r',encoding='utf-8') as f:
# info = f.read()
# com_dict = json.loads(info)
# comment = com_dict.get('html')
# comment = re.sub(re.compile('<br>'),'\n',comment)
# 根据url发送请求,拿回完整的影评信息
url = 'https://movie.douban.com/j/review/%s/full' % ID
self.get_html(url)
com_dict = json.loads(self.json)
comment = com_dict.get('html')
# 处理数据
comment = re.sub(re.compile('<br>'),'\n',comment)
comment = re.sub(re.compile('<.*?>|\n| ',re.S),'',comment)
# 把影评分数匹配出来
sorce = re.search(re.compile('\d+'),star).group()
print('作者:{}\n作者ID:{}\n分数:{}\n建议:{}\n发布日期:{}\n评价内容:{}'.format(name,ID,sorce,suggest,date,comment))
# 找下一页
pattern = re.compile('<link rel="next" href="(.*?)"')
res = re.search(pattern,self.html)
if res:
# 拼接下一页的url地址
next_href = url + res.group(1)
# 发起请求,拿回下一页数据
self.get_html(next_href)
# 调用此函数,解析数据
self.parse_comments(url)
else:
print('没有下一页')
def start(self):
self.get_html('https://movie.douban.com/top250?qq-pf-to=pcqq.group')
self.parse_list()
if __name__ == '__main__':
db = DBSpider()
db.start()