代码如下
import re
import requests
from lxml import etree
import csv
from urllib.parse import urljoin
url_list = "https://www.woniuxy.com/note/page-{}"
base_url = "https://www.woniuxy.com/"
content_list=[]
#获取page的url
def parse_list_page():
for i in range(1,9):
print("正在处理第{}页的数据,qingdengdai....".format(str(i)))
resp=requests.get(url_list.format(str(i)))
html=etree.HTML(resp.text)
page_urls=html.xpath("//div[@class='title']/a")
for url in page_urls:
yield urljoin(base_url,url.get('href'))#程序遇到yield关键字,然后把yield想想成return,return了之后,程序停止,并不会给后面的赋值,再调用的时候是从中断的地方调用
#urljoin(base_url,url.get('href'))获取全部地址从1循环到9
#获取正文
def page_note_page(url):
print("正在提取{}页面内容...".format(url))
resp = requests.get(url)
html = etree.HTML(resp.text)
# 提取文章标题
title = html.xpath("//div[contains(@class,'title')]")[0].text.strip()
# 获取文章作者等信息
info_object = html.xpath("//div[contains(@class,'info')]")[1].text.strip()
# 正则表达式获取作者等信息
result = re.findall('作者:(.*?)\s+类型:(.*?)\s+类别:(.*?)\s+日期:(.*?)\s+阅读:(.*?)\s+消耗积分:(.*?)\s', info_object)
# 把result的值赋值
(author, texst_type, article_type, date, read_num, score) = result[0]
# 提取文章正文内容
content = html.xpath("//div[@id='content']")[0].xpath("string(.)").strip()
# t提取图片
img_list = []
for pic in html.xpath("//div[@id='content']//img/@src"):
img_list.append(pic)
content_list.append([url, title, author, texst_type, article_type, date, read_num, score, content, img_list])
if __name__=='__main__':
with open('woniuwang.csv',"w",newline="",encoding="utf-8")as f:#newline=""消除空格行
csv_writer=csv.writer(f,dialect="excel")
csv_writer.writerow(['url', 'title', 'author', 'texst_type', 'article_type', 'date', 'read_num', 'score', 'content', 'img_list'])
for url in parse_list_page():
page_note_page(url)
print("内容提取完毕,开始写入内容")
for page in content_list:
csv_writer.writerow(page)
print("处理完毕!")