基于python爬虫的数据存储
1.安装python环境
2.安装 requests库和beautifulsoup4
PS E:\PythonProjects\pythonProject> $env:Path += ";E:\PythonProjects\pythonProject\.venv\Scripts"
PS E:\PythonProjects\pythonProject> pip install requests
E:\PythonProjects\pythonProject\.venv\Scripts\python.exe -m pip install beautifulsoup4
3.将爬取的数据存储到 MongoDB 数据库中,需要使用 pymongo
库
pip install pymongo
3.开启mongodb数据库 以及连接到navicat
4.写个程序测试python连接到Mongodb数据库是否成功
import logging
from pymongo import MongoClient
def connect_to_mongodb():
try:
# 尝试连接到 MongoDB
client = MongoClient('mongodb://localhost:27017/')
db = client['yjh']
return db
except Exception as e:
# 记录连接异常
logging.error(f"连接失败: {e}")
# 抛出异常,让调用者处理
raise
def main():
# 记录连接日志
logging.basicConfig(level=logging.INFO)
try:
# 检查连接是否成功
db = connect_to_mongodb()
if db is not None:
logging.info("连接成功")
# 在这里可以继续执行其他操作,比如查询或插入数据
except Exception as e:
# 处理连接异常
logging.error(f"发生错误: {e}")
if __name__ == "__main__":
main()
5.对象设计
就选择豆瓣网
代码:
import requests
from bs4 import BeautifulSoup
import time
from pymongo import MongoClient
def connect_to_mongodb():
try:
# 连接到 MongoDB
client = MongoClient('mongodb://localhost:27017/')
# 选择或创建一个数据库
db = client['yjh']
return db
except Exception as e:
# 记录连接错误并引发异常
raise
def save_to_mongodb(db, top, name, score, link):
# 选择或创建一个集合(类似于关系数据库中的表)
collection = db['douban']
# 创建要插入的数据
data = {'top': top, 'name': name, 'score': score, 'link': link}
# 插入数据
collection.insert_one(data)
# 请求网页
def page_request(url, us):
response = requests.get(url, headers=us)
html = response.content.decode('utf-8')
return html
# 解析网页
def page_parse(html):
soup = BeautifulSoup(html, 'html.parser')
sentence = soup.select(("#content > div > div.article > ol > li"))
db = connect_to_mongodb()
for i in range(len(sentence)):
temp = sentence[i]
# 获取影片名称
name = temp.select(".hd > a > span")[0].get_text()
# 获取影片排名
top = temp.select(".pic > em")[0].get_text()
# 获取影片评分
score = temp.select(".bd > div > span.rating_num")[0].get_text()
# 获取影片链接
link = temp.select(".hd > a")[0].get('href')
# 对获取内容进行打印
save_to_mongodb(db, top, name, score, link)
print(top + "-" + name + "-" + score + "-" + link)
if __name__ == '__main__':
print("开始爬取")
ua = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/102.0.5005.103 Safari/537.36",
}
for i in range(1, 11):
url = f'https://movie.douban.com/top250?start={(i - 1) * 25}'
time.sleep(1)
html = page_request(url, ua)
page_parse(html)