python爬取百度贴吧
import requests
from lxml import etree
import json
class Tieba(object):
def __init__(self,name):
self.url = 'https://tieba.baidu.com/f?ie=utf-8&kw={}'.format(name)
self.headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.81 Safari/537.36'}
def get_data(self,url):
response = requests.get(url,headers=self.headers)
print(url)
return response.content
def parse_data(self,data):
data = data.decode().replace("<!--","").replace("-->","")
# print(data)
html = etree.HTML(data)
el_list = html.xpath('//li[@class=" j_thread_list clearfix thread_item_box"]/div/div[2]/div[1]/div[1]/a')
data_list = []
for el in el_list:
temp = {}
temp['title'] = el.xpath('./text()')[0]
temp['link'] = 'https:' + el.xpath('//a[contains(text(),"下一页>")]/@href')[0]
data_list.append(temp)
try:
next_url = 'https:' + html.xpath('//*[@id="frs_list_pager"]/a[10]/@href')[0]
except:
next_url = None
return data_list,next_url
def save(self,data_list):
with open('news', 'w', encoding='utf-8') as f:
for data in data_list:
datas = json.dumps(data, ensure_ascii=False)
f.write(datas)
print(data)
def run(self):
next_url = self.url
while True:
data = self.get_data(next_url)
data_list,next_url = self.parse_data(data)
self.save(data_list)
print(next_url)
if next_url == None:
break
if __name__ == '__main__':
tieba = Tieba('小米')
tieba.run()

628

被折叠的 条评论
为什么被折叠?



