import lxml.html
import pymongo
import requests
'''
1.爬取相应主题贴吧,解析出所有帖子(取帖子标题、作者、时间)
2.下载帖子详情页的1楼信息(只要文字,不要多媒体信息)
3.能够点击下一页进行翻页
4.将解析结果存入数据库(mongodb)
'''
#连接mongodb数据库并创建tieba数据库和tiezi集合
client = pymongo.MongoClient(host='localhost', port=27017)
db = client.tieba
collection = db.tiezi
#获取页面信息,并用xpath解析内容,通过页面分析可知道每一个帖子都是一个li
response=requests.get('https://tieba.baidu.com/f?kw=lol&ie=utf-8&pn=0')
parse_result=lxml.html.fromstring(response.text)
tiezis=parse_result.xpath('//li[@class=" j_thread_list clearfix"]')
#循环遍历取出内容,并拼接帖子url,进入帖子详情页面通过html分析获得一楼文本
for tiezi in tiezis:
title=tiezi.xpath('.//a[@class="j_th_tit "]/text()')[0]#标题
author=tiezi.xpath('.//span[@data-field]/@title')[0]#作者
time=tiezi.xpath('.//span[@title="创建时间"]/text()')[0]#时间
lianjie='https://tieba.baidu.com'+tiezi.xpath('.//a[@class="j_th_tit "]/@href')[0]#作者连接
details = requests.get(lianjie)
deta_html = lxml.html.fromstring(details.text)
content = deta_html.xpath(
'//div[@class="d_post_content_main d_post_content_firstfloor"]//div[starts-with(@id,"post_content_")]/text()')[0]
# print(title)
# print(author)
# print(time)
# print(lianjie)
# print(content)
#根据发帖人不同创建不同信息,然后导入数据库中
info=author
info= {
'title': title,
'author': author,
'time': time,
'lianjie': lianjie,
'content': content
}
collection.insert_one(info)