coding=utf-8
“”"
author:lei
function:
“”"
import requests
from lxml import etree
import json
class TieBaBaSa(object):
def __init__(self, name):
self.url = "https://tieba.baidu.com/f?ie=utf-8&kw={}".format(name)
self.headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36"}
def get_data(self, url):
response = requests.get(url, headers=self.headers).content.decode()
return response.replace("<!--", "").replace("-->", "")
def parse_data(self, data):
html = etree.HTML(data)
el_list = html.xpath('''//ul[@id='thread_list']//li//div[@class="col2_right j_threadlist_li_right"]
‘’’)
if len(el_list) == 0:
el_list = html.xpath(’’’//ul[@id=‘thread_list’]//li//div[@class="col2_right j_threadlist_li_right "]’’’)
print(el_list)
data_list = []
for el in el_list:
temp = {}
temp["title"] = el.xpath("./div/div/a/text()")[0]
temp["writer"] = el.xpath("./div/div[2]/span/@title")[0]
# temp["content"] = el.xpath("./div/div/div[2]/text()")
# print(temp)
data_list.append(temp)
next_url = "https:" + html.xpath("//a[contains(text(), '下一页>')]/@href")[0]
return data_list, next_url
def save_data(self, data_list):
with open("tieba.json", "a", encoding="utf-8") as f:
f.write(json.dumps(data_list, ensure_ascii=False))
print("保存成功!")
def run(self):
next_url = self.url
while True:
data = self.get_data(next_url)
data_list, next_url = self.parse_data(data)
self.save_data(data_list)
if next_url is None:
break
if name == ‘main’:
tieba = TieBaBaSa(“巴塞罗那”)
tieba.run()