分享一个爬虫小项目
豆瓣网电影top250
适用对象:初学者
要点知识:
① 数据过滤(正则)
② 分页
③ 数据保存
细节特么的自己看代码
#-*- codeing = utf-8 -*-
#@Time:2021/1/2814:32
#@Author:小魁
#@File:DouBan_Test.py
#@Software:PyCharm
import sys
# 正则表达式,进行文字匹配
import re
# 指定url,获取网页数据
import urllib.request,urllib.error,urllib.response
# 进行excel操作
import xlwt
# 获取数据解析网页
from bs4 import BeautifulSoup
# 数据入库
import sqlite3
def main():
baseurl = "https://movie.douban.com/top250?start="
datalist = GetData(baseurl)
# savepath = "豆瓣电影Top250.xls"
dbpath = "movie.db"
# 保存到excel表格
# SavePath(datalist,savepath)
# 保存到数据库sqlite
SaveDatas2DB(datalist,dbpath)
AskUrl("https://movie.douban.com/top250?start=")
# 定义一个全局变量即正则表达式的对象
findLink = re.compile(r'<a href="(.*?)">')
# 影片图片,re.S让换行符包含在字符中
findImgSrc = re.compile(r'<img.*src="(.*?)"',re.S)
# 影片名
findTitle = re.compile(r'<span class="title">(.*)</span>')
# 评分
findRating = re.compile(r'<span class="rating_num" property="v:average">(.*)</span>')
# 评价人数
findJudge = re.compile(r'<span>(\d*)人评价</span>')
# 影片介绍
findInq = re.compile(r'<span class="inq">(.*)</span>')
# 影片相关内容
findDb = re.compile(r'<p class="">(.*?)</p>',re.S)
# 获取数据
def GetData(baseurl):
datalist = []
# 根据网址规律拼接url
for i in range(0,10): # 翻页
url = baseurl + str(i*25) # 每页25条数据,一共250条
html = AskUrl(url) # 保存网页原码
# 网页逐一解析
soup = BeautifulSoup(html,"html.parser")
for item in soup.find_all('div',class_="item"): # class要加下划线,因为它与系统定义的参数重名
# print(item)
# 保存一部电影的所有信息
data = []
item = str(item)
# 提取数据
link = re.findall(findLink,item)[0]
data.append(link)
imgSrc = re.findall(findImgSrc,item)[0]
data.append(imgSrc)
# 电影名可能有中文外文两种名字或只有其中一种名字,需要做处理
title = re.findall(findTitle,item)
if(len(title)==2):
ctitle = title[0]
data.append(ctitle)
# 去掉英文名前面的/,替换掉
otitle = title[1].replace("/","")
data.append(otitle)
else:
data.append(title[0])
# 外文名留空
data.append('')
rating = re.findall(findRating,item)[0]
data.append(rating)
judge = re.findall(findJudge,item)[0]
data.append(judge)
inq = re.findall(findInq,item)
if len(inq) != 0:
inq = inq[0].replace("。","")
data.append(inq)
else:
data.append("")
db = re.findall(findDb,item)[0]
# 去掉<br/>
db = re.sub('<br(\s+)?/>(\s+)',"",db)
# 替换/
db = re.sub('/',"",db)
# 去掉前后空格
data.append(db.strip())
# 将一部电影信息放入列表
datalist.append(data)
return datalist
# 获取指定页面的内容
def AskUrl(url):
head = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.102 Safari/537.36"
}
# 开始发送请求
request = urllib.request.Request(url,headers=head)
html = ""
try:
response = urllib.request.urlopen(request)
html = response.read().decode("utf-8")
# print(html)
except urllib.error.URLError as e:
if hasattr(e,"code"):
return (e.code)
if hasattr(e,"reason"):
return (e.reason)
return html
# 保存数据到excel
def SavePath(datalist,savepath):
workbook = xlwt.Workbook(encoding='utf-8',style_compression=0)
worksheet = workbook.add_sheet('豆瓣电影Top250',cell_overwrite_ok=True)
# 写入列名
col = ("电影详情链接","图片链接","中文名","外文名","评分","评价数","概括","相关信息")
for i in range(0,8):
worksheet.write(0,i,col[i])
# 放入打印条数
for i in range(0,250):
print("第%d条"%(i+1))
# 放入打印内容
data = datalist[i]
for j in range(0,8):
worksheet.write(i+1,j,data[j])
workbook.save(savepath)
# 保存数据到数据库sqlite
def SaveDatas2DB(datalist,dbpath):
init_db(dbpath)
conn = sqlite3.connect(dbpath)
cur = conn.cursor()
for data in datalist:
# 获取每一列
for index in range(len(data)):
if index == 4 or index == 5:
continue
# 加双引号
data[index] = '"'+data[index]+'"'
sql = '''
insert into movie250(
infou_link, pic_link, cname, ename, score, rated, instroduction, info
) values (%s)
'''%",".join(data)
cur.execute(sql)
conn.commit()
cur.close()
conn.close()
# 创建数据库
def init_db(dbpath):
# 建表数据表
sql = '''
create table movie250
(
id integer primary key autoincrement,
infou_link text,
pic_link text,
cname varchar ,
ename varchar ,
score numeric ,
rated numeric ,
instroduction text,
info text
)
'''
# 拼接指引
conn = sqlite3.connect(dbpath)
# 创建游标
cursor = conn.cursor()
# 对目标数据库执行sql语句
cursor.execute(sql)
# 提交
conn.commit()
# 关闭
conn.close()
# 当程序被执行时,调用函数,使流程都在以下可控范围内被执行
if __name__ == "__main__":
main()
# init_db("Tmov250.db")
print("爬取完毕")