有数据库 和 表格 没有详细分开 后续整理吧
先上代码
import urllib.request, urllib.error
import bs4
import re
import xlwt
import sqlite3
findLink = re.compile(r'<a href="(.*?)">') # 创建正则表达对象,表示规则
findImgSrc = re.compile(r'img.*src="(.*?)"', re.S) # re.S 让换行符包含在内
findTitle = re.compile(r'<span class="title">(.*)</span>')
#影片评分
findRating = re.compile(r'<span class="rating_num" property="v:average">(.*)</span>')
#评价人数
findJudge = re.compile(r'<span>(\d*)人评价</span>')
#概述
findInq = re.compile(r'<span class="inq">(.*)</span>')
#相关内容
findBd = re.compile(r'<p class="">(.*?)</p>', re.S)
def main():
baseurl = "https://movie.douban.com/top250?start="
# 1.爬取网页
dataList = getData(baseurl)
#savepath = "豆瓣电影Top250.xls"
dbpath = "movie.db"
# 3.保存数据
#saveData(dataList, savepath)
saveDataDB(dataList, dbpath)
# 爬取网页
def getData(baseurl):
dataList = []
for i in range(0, 250, 25): # 调用获取页面
url = baseurl + str(i)
html = askURL(url) # 保存获取到的网页源码
# 2.逐步解析数据
soup = bs4.BeautifulSoup(html, "html.parser")
for item in soup.find_all('div', class_="item"): # 查找符合要求的字符串,形成列表
data = [] # 保存一部电影的所有信息
item = str(item)
# 影片详情连接
link = re.findall(findLink, item)[0] # re库通过正则表达式查找指定字符
data.append(link)
imgSrc = re.findall(findImgSrc, item)[0]
data.append(imgSrc)
titles = re.findall(findTitle, item) # 片名可能只有一个中文名,没有英语
if len(titles) == 2:
ctitle = titles[0]
data.append(ctitle)
otitle = titles[1].replace(" ", "")
otitle = titles[1].replace("/", "")
data.append(otitle)
else:
data.append(titles[0])
data.append(' ') # 留空占位
rating = re.findall(findRating, item)[0]
data.append(rating)
judge = re.findall(findJudge, item)[0]
data.append(judge)
inq = re.findall(findInq, item)
if len(inq) != 0:
inq = inq[0].replace("。", "") # 去掉句号
data.append(inq)
else:
data.append(" ") # 留空
bd = re.findall(findBd, item)[0]
bd = re.sub('<br(\s+)?>(\s+)?', " ", bd) # 去掉<br>
bd = re.sub('/', " ", bd)
data.append(bd.strip()) # 去掉空格
dataList.append(data)
return dataList
# 得到直顶URL的网页内容
def askURL(url):
# 伪装为windows
head = { # 模拟浏览器头部信息,向豆瓣发消息
"User-Agent": "Mozilla / 5.0(Windows NT 10.0;Win64;x64) AppleWebKit / 537.36(KHTML, like; Gecko) Chrome / 86.0; .4240; .198; Safari / 537.36"
}
request = urllib.request.Request(url, headers=head)
html = ""
try:
response = urllib.request.urlopen(request)
html = response.read().decode('utf-8')
# print(html)
except urllib.error.URLError as e:
if hasattr(e, "code"):
print(e.code)
if hasattr(e, "reason"):
print(e.reason)
return html
# 3.保存数据
def saveData(dataList, savepath):
book = xlwt.Workbook(encoding="utf-8", style_compression=0) # 创建对象
sheet = book.add_sheet('豆瓣电影top250', cell_overwrite_ok=True) # 创建工作表
col = ("电影详情链接", "图片链接", "中文名", "外国名", "评分", "评价数", "概述", "相关信息")
for i in range(8):
sheet.write(0, i, col[i]) # 列名
for i in range(250):
print("第%d条" % (i + 1))
data = dataList[i]
for j in range(0,8):
sheet.write(i + 1, j, data[j]) # 数据
book.save(savepath)
def saveDataDB(dataList,dbpath):
init_db(dbpath)
conn = sqlite3.connect(dbpath)
cur = conn.cursor()
for data in dataList:
for index in range(len(data)):
if index ==4 or index == 5:
continue
data[index] = '"'+data[index]+'"'
sql = '''
insert into movie250(
info_link,pic_link,cname,oname,score,rated,introduction,info)
values(%s) '''%",".join(data)
cur.execute(sql)
conn.commit()
cur.close()
conn.close()
def init_db(dbpath):
sql = '''
create table movie250
(id integer primary key autoincrement,
info_link text,
pic_link text,
cname varchar,
oname varchar,
score numeric,
rated numeric,
introduction text,
info text);
''' #创建数据表
conn = sqlite3.connect(dbpath)
cursor = conn.cursor()
cursor.execute(sql)
conn.commit()
conn.close()
if __name__ == '__main__':
main()
#init_db("movietest.db") #测试