爬虫、爬取CSDN博主文章标题和简介
代码比较简单,主要是库比较好用。爬虫重点在于解析网页,难点在于反爬、网页登录验证、滑窗、翻页等操作。
from time import sleep
from selenium.webdriver.common.by import By
from selenium import webdriver
import os
def main():
url = 'https://blog.csdn.net/'
print('请输入用户名(如:lxh248866):')
name=input()
url_path='https://blog.csdn.net/{}?type=blog'.format(name)
print('爬取中...')
driver = webdriver.Edge()
driver.implicitly_wait(2)
driver.minimize_window()
driver.get(url_path)
sleep(3)
blogstop_lst = []
blogscontent_lst = []
m = 0
blogs_top = driver.find_elements(By.CSS_SELECTOR,".blog-list-box-top") # 博客标题
blogs_content = driver.find_elements(By.CSS_SELECTOR,'.blog-list-content') # 博客内容简介
for con in blogs_top:
data = con.text
blogstop_lst.append(data)
for con in blogs_content:
data = con.text
blogscontent_lst.append(data)
if os.path.exists('./blogs.txt'):
os.remove('./blogs.txt')
with open("./blogs" + ".txt", "w", encoding='utf8') as txtfile:
for i in range(len(blogstop_lst)):
m=m+1
txtfile.write(str(m)+'. '+blogstop_lst[i]+'\n')
txtfile.write(blogscontent_lst[i]+'\n')
txtfile.write('\n')
print('爬取博客完成!')
txtfile.close()
if __name__ == '__main__':
main()
#爬取CSDN博主网页文章标题和简介
#一、各文件介绍
1.mesdgedriver.exe为Edge浏览器驱动
2.main.py为爬取博客的代码,输入为要爬取的用户名,如:lxh248866
3.blogs.txt为生成的存放的内容文档
#二、环境配置
```
pip install selenium
pip install os
pip install time
```
#三、运行
```
python3
```