# -*- codeing = utf-8 -*-
# @Time : time
from bs4 import BeautifulSoup # 网页解析,获取数据
import re # 正则表达式,进行文字匹配
import urllib.request, urllib.error # 指定url,获取网页数据
import xlwt # 进行excel操作
import sqlite3 # 进行SqlLite数据库操作
P = re.compile(r'<p class="">(.*)</p>', re.S)
def main():
baseurl = "https://movie.douban.com/top250?start="
# 1.爬取网页
datalist = getData(baseurl)
#savepath = ".\\豆瓣电影Top250.xls"
dbpath = "movie.db"
# 3.保存数据
#saveData(datalist,savepath)
saveData2Db(datalist,dbpath)
# print("hello",a)
# askURL("https://movie.douban.com/top250?start=")
#影片详情链接的规则
findLink = re.compile(r'<a href="(.*?)">') #创建正则表达式对象,表示规则(字符串的模式)
# 影片图片
findImgSrc = re.compile(r'<img.*src="(.*?)"', re.S) #re.S让换行符包含在字符中
# 影片片名
findTitle = re.compile(r'<span class="title">(.*)</span>')
# 影片评分
findRating = re.compile(r'<span class="rating_num" property="v:average">(.*)</span>')
# 找到评价人数
findjudge = re.compile(r'<span>(\d*)人评价</span>')
# 找的概况
findInq = re.compile(r'<span class="inq">(.*)</span>')
# 找到影片的相关内容
findBd = re.compile(r'<p class="">(.*?)</p>',re.S)
# 爬取网页
def getData(baseurl):
datalist = []
for i in range(0, 10): # 调用获取页面信息的函数、10次
url = baseurl + str(i*25)
html = askURL(url) # 保存获取到的网页源码
# 2.逐一解析数据
soup = BeautifulSoup(html,"html.parser")
for item in soup.find_all('div', class_ = "item"): #查找符合要求的字符串,形成列表
data = [] # 保存一部电影的所有信息
item = str(item)
# print(item)
# break
# 影片详情的链接
link = re.findall(findLink,item)[0] #re库用来通过正则表达式查找指定的字符串
# print(link)
data.append(link)
# 影片图片
ImgSrc = re.findall(findImgSrc, item)[0]
data.append(ImgSrc)
# 影片片名
titles = re.findall(findTitle, item)
if len(titles) == 2:
ctitle = titles[0] #添加中文名
data.append(ctitle)
otitle = titles[1].replace("/","") #添加外国名 去掉无关的 /
data.append(otitle)
else:
data.append(titles[0])
data.append(' ') #外国名字留空
# 影片评分
rating = re.findall(findRating, item)[0]
data.append(rating)
# 找到评价人数
judgenum = re.findall(findjudge, item)[0]
data.append(judgenum)
# 找的概况
inq = re.findall(findBd, item)
if len(inq) != 0:
inq = inq[0].replace("。", "")
data.append(inq)
else:
data.append(" ")
# 找到影片的相关内容
bd = re.findall(findBd, item)[0]
bd = re.sub('<br(\s+)?/>(\s+)?', " ",bd) #去掉<br/>
data.append(bd.strip()) #去掉前后的空格
datalist.append(data) # 把处理存的一部电影信放入datalist
#print(datalist)
return datalist
# 得到指定一个UPL的网页内容
def askURL(url):
head = { # 模拟浏览器头部信息:向豆瓣服务器发送消息
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36 Edg/87.0.664.66"
}
# 用户代理·表示告诉豆播服务器﹐我们是什么类型的机器·浏览器(本质上是告诉浏览器﹐我们可T以接收什么水平的文件内容)
request = urllib.request.Request(url, headers=head)
html = ""
try:
response = urllib.request.urlopen(request)
html = response.read().decode("utf-8")
# print(html)
except urllib.error.URLError as e:
if hasattr(e, "code"):
print(e.code)
if hasattr(e, "reason"):
print(e.reason)
return html
# main(2)
# 保存数据
def saveData(datalist,savepath):
# print(1)
book = xlwt.Workbook(encoding="utf-8", style_compression=0) # 创建workbook对象
sheet = book.add_sheet('豆瓣电影Top250',cell_overwrite_ok=True) # 创建工作表
col = ("电影详情链梓","图片链接”","影片中立名","影片外国名","评分","评价数","概况","相关信息")
for i in range(0, 8):
sheet.write(0, i, col[i]) #列名
for i in range(0, 250):
print("第%d条"%(i+1))
data = datalist[i]
for j in range(0,8):
sheet.write(i+1,j,data[j])
book.save(savepath) # 保存数据表
def saveData2Db(datalist,dbpath):
init_db(dbpath)
conn = sqlite3.connect(dbpath)
cur = conn.cursor()
for data in datalist:
for index in range(len(data)):
if index == 4 or index == 5:
continue
else:
data[index] = '"'+data[index]+'"'
sql = '''
insert into movie250 (
info_link,pic_link,cname,ename,score,rated,instroduction,info)
values(%s)'''%",".join(data)
# print(sql)
cur.execute(sql)
conn.commit()
cur.close()
conn.close()
print("...")
def init_db(dbpath):
# 创建数据表
sql = """
create table movie250(
id integer primary key autoincrement,
info_link text,
pic_link text,
cname varchar,
ename varchar,
score numeric,
rated numeric,
instroduction text,
info text
)
"""
conn = sqlite3.connect(dbpath)
cursor = conn.cursor()
cursor.execute(sql)
conn.commit()
conn.close()
if __name__ == "__main__": # 当程序执行时
# 调用函数
main()
print("爬取完毕")