写在前面:
本文已获得原博主授权转载,转载链接:Python爬虫分析CSDN个人博客数据_Caso_卡索的博客-CSDN博客
代码来源Caso_卡索博主的github:https://github.com/xiaoma101017/ParseCSDNBlog
转载目的仅供存档学习及日常使用,请勿做商业用途。
Step one:检查网页源代码:
爬取代码运行过程:
爬取结果:
附:完整代码(有部分修改)
import requests
from bs4 import BeautifulSoup
import pandas as pd
import os
import re
Host = "lddwarehouse.blog.csdn.net" # 请求头host参数
User_Agent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.61 Safari/537.36 "
Source = 'html.txt' # 临时保存博客列表html源码
EachSource = 'each.txt' # 临时保存每篇博客html源码
OUTPUT = "蓝多多博客汇总.csv" # 输出博客信息到 csv 文件
results = [] # 存储全部数据
#相关函数定义
def parseEachBlog(link):
referer = "Referer: " + link
headers = {"Referer": referer, "User-Agent": User_Agent}
r = requests.post(link, headers=headers)
html = r.text
with open(EachSource, 'w', encoding='UTF-8') as f:
f.write(html)
soup = BeautifulSoup(open(EachSource, 'r', encoding='UTF-8'), features="html.parser")
readcontent = soup.select('.bar-content .read-count')
collection = soup.select('.bar-content .get-collection')
readcounts = re.sub(r'\D', "", str(readcontent[0]))
collections = re.sub(r'\D', "", str(collection[0]))
print(soup.select('.title-article')[0])
blogname = soup.select('.title-article')[0].text
time = soup.select('.bar-content .time')[0].text
programa = soup.select('.blog-tags-box .tag-link')[0].text
print(programa)
type = soup.select('.text-center .font')[0].text #这里代码有误,需要用到下面的图像识别
print(type)
eachBlog = [blogname, link, readcounts, collections, time, programa, type]
return eachBlog
def getBlogList(pages):
listhome = "https://" + Host + "/article/list/"
pagenums = [] # 转换后的pages页数
for i in range(1, int(pages)+1):
pagenums.append(str(i))
for number in pagenums:
url = listhome + number + "?t=1"
headers = {"Referer": url, "Host": Host, "User-Agent": User_Agent}
response = requests.post(url, headers=headers)
html = response.text
with open(Source, 'a', encoding='UTF-8') as f:
f.write(html)
# 获取全部博客的链接
soup = BeautifulSoup(open(Source, 'r', encoding='UTF-8'), features="html.parser")
hrefs = []
re_patterm = "^https://lddwarehouse.blog.csdn.net/article/details/\d+$"
for a in soup.find_all('a', href=True):
if a.get_text(strip=True):
href = a['href']
if re.match(re_patterm, href):
if hrefs.count(href) == 0:
hrefs.append(href)
return hrefs
def parseData():
results.sort(key=lambda result:int(result[2]), reverse=True) # 按浏览量排序
dataframe = pd.DataFrame(data=results)
dataframe.columns = ['文章标题', '文章链接', '浏览量', '收藏量', '发布时间','第一所属专栏','文章类型']
dataframe.to_csv(OUTPUT, index=False, sep=',',encoding='gbk')
def delTempFile():
if os.path.exists(Source):
os.remove(Source)
if os.path.exists(EachSource):
os.remove(EachSource)
if __name__ == '__main__':
pages = input("请输入博客列表页数: ")
linklist = getBlogList(pages)
print("开始获取数据...")
for i in linklist:
print("当前获取: %s"%(i))
results.append(parseEachBlog(i))
parseData()
delTempFile()
附:文章类型的图像识别
川川大佬:python菜鸟_川川菜鸟_CSDN博客 帮蓝多多解答了关于文章类型是图片如何处理的问题,非常感谢!
Step one :先使用soup.select获取该篇文章类型的图片描述:
Step two :使用save函数将这个图保存到本地(在下面的识别完成以后再删除)
Step three:读取图片并提取文字:
import ddddocr
ocr = ddddocr.DdddOcr()
with open('c.png', 'rb') as f:
img_bytes = f.read()
res = ocr.classification(img_bytes)
print(res)
即可获得该文章类型的text:
交流:
(代码还没有修改,有兴趣可以自己先试试嗷)