提示:文章写完后,目录可以自动生成,如何生成可参考右边的帮助文档
文章目录
一、创建一个爬虫类
import json
from util.getConnection import GetConnection
import requests
db = GetConnection()
class GetDouBanMovie(object):
def __init__(self, start): # start:起始页
self.url = "https://movie.douban.com/j/search_subjects"
self.params = {
"type": "movie",
"tag": "科幻",
"sort": "recommend",
"page_limit": 20,
"page_start": start
}
self.headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36'
}
def get_data(self):
response = requests.get(url=self.url, params=self.params, headers=self.headers)
return response
def run(self):
resp = self.get_data()
# 返回JSON对象
data = resp.json()
self.parse_data(data)
print(data)
# 记得将response连接关闭
resp.close()
def parse_data(self, dict_data):
# 获取键subjects的值
movies = dict_data['subjects']
for movie in movies: # 每个movie都是一个电影信息字典
movie_title = movie['title']
movie_url = movie['url']
movie_img = movie['cover']
data = [movie_title, movie_url, movie_img]
db.save_data(data)
if __name__ == "__main__":
start = int(input("请输入起始页:"))
end = int(input('请输入结束页:'))
for i in range(start, end):
get_movie = GetDouBanMovie(i)
get_movie.run()
二、构造一个工具类getConnection
就像java的getConnection方法一样,以后需要连接MySQL直接调这个类就行了,不需要重复写这些代码了
import threading
import pymysql
class GetConnection:
# 创建单例模式
_instance_lock = threading.Lock()
def __new__(cls, *args, **kwargs):
if not hasattr(GetConnection, "_instance"):
with GetConnection._instance_lock:
GetConnection._instance = object.__new__(cls)
return GetConnection._instance
def __init__(self) -> object:
# 建立数据库连接
self.conn = pymysql.connect(host='127.0.0.1', user='root', password=密码, database='pymysql', charset='utf8')
self.cursor = self.conn.cursor()
def save_data(self, data):
# 将爬取到的数据存入到mysql,因为我是索引自动递增,所以传对应的三个参数就好了
sql = 'insert into douban_science_movie(movie_title,movie_url,movie_img_url) values(%s,%s,%s)'
try:
self.cursor.execute(sql, data)
self.conn.commit()
except Exception as e:
print('插入失败', e)
self.conn.rollback()
def __del__(self):
self.cursor.close()
self.conn.close()
三、在MySQL中创建对应的表
因为要存入要MySQL中,就在MySQL中创建对应的表
四、总结
爬取数据时要先分析网站的结构,需要哪些参数,比如豆瓣,因为它的数据都是局部刷新,所以我就直接在network的XHR中找到了对应的参数,然后就可以快速爬取自己的数据了,第一次写博客,有不足的地方希望大家指出