IP代理爬取
直接代码
import re
import requests
import time
# url = 'https://www.gushiwen.org/default_1.aspx'
def parse_page(url):
headers = {
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.92 Safari/537.36'
}
response = requests.get(url, headers)
text = response.text
# print(text)
titles = re.findall(r'<div\sclass="cont">.*?<b>(.*?)</b>', text, re.DOTALL)
# print(titles)
dynasties = re.findall(r'<p\sclass="source".*?<a.*?>(.*?)</a>', text, re.DOTALL)
# print(dynasties)
authors = re.findall(r'<p\sclass="source".*?</a>.*?<a.*?>(.*?)</a>', text, re.DOTALL)
# print(authors)
contents = re.findall(r'<div\sclass="contson".*?>(.*?)</div>', text, re.DOTALL)
# print(contents)
poems = []
for content in contents:
# print(content)
x = re.sub(r'<.*?>', "", content)
# print(x)
poems.append(x.strip())
# print(poems)
shis = []
for value in zip(titles, dynasties, authors, poems):
title, dynasty, author, poem = value
f=open('main.txt','a')
w=title+'@'+title+','+dynasty+','+author+','+poem
w=w.replace('·','.').replace('/','-')
print(w)
time.sleep(2)
def main():
base_url = 'https://www.gushiwen.org/default_{}.aspx'
for x in range(1, 11): # 利用正则表达式爬取1-10页的内容
url = base_url.format(x)
parse_page(url)
if __name__ == '__main__':
main()