import re
import requests
# import os
headers = {
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.92 Safari/537.36'
}
def parse_page(url):
response = requests.get(url, headers=headers)
text = response.text
# print(text)
con = []
contents = re.findall(r'<div\sclass="content".*?<span>(.*?)</span>', text, re.DOTALL)
for content in contents:
x = re.sub(r'<.*?>', "", content)
# x = re.sub(r'', "", content)
con.append(x.strip())
# print(con)
# print("="*30)
num = 0
with open('D:/program/untitled/douban/joke.txt', 'w', encoding='utf-8') as fp: # 自己修改路径可以生成相应文件
for co in con:
num = num + 1
fp.write(str(num)+'\n'+co.strip()+'\n'*2)
def main():
base_url = "https://www.qiushibaike.com/text/page/{}/"
for z in range(1,20): # 可爬取1-19页的内容
url = base_url.format(z)
parse_page(url)
# os.system("pause")
if __name__ == "__main__":
main()
利用正则表达式爬取糗事百科的段子