import requests
import re
import pymysql
from bs4 import BeautifulSoup
def gethtml(url):#爬豆瓣页面
try:
headers={
'Host':'movie.douban.com',
'Referer':'https://movie.douban.com/top250?start=0&filter=',
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36',
}
r=requests.get(url,headers=headers)
r.encoding='utf-8'
return r.text
except:
return ''
def writemysql(top,title,p,infourl,score,peoples):#将数据写入数据库
global db
db.query('INSERT INTO doubanTOP250 value (%d,"%s","%s","%s",%f,"%s")'%(top,title,p,infourl,score,peoples))
db.commit()
def getinfo(html):#获取top电影信息
soup=BeautifulSoup(html,'html.parser')
divs=soup.find_all('div',attrs={'class':'item'})
for i in range(len(divs)):
top = int(divs[i].find_all('em', attrs={'class': ''})[0].string)
title=divs[i].find_all('span',attrs={'class':'title'})[0].string
p=divs[i].find_all('p',attrs={'class':""})[0].text
p=re.sub('\s\s+',' ',p)
infourl=divs[i].find_all('a')[0]['href']
score=float(divs[i].find_all('span', attrs={'class':"rating_num"})[0].string)
peoples=divs[i].find_all('div',attrs={'class':'star'})[0]
peoples=peoples.find_all('span')[-1].string
print(top,title,p,infourl,score,peoples)
writemysql(top,title,p,infourl,score,peoples)
def main(url): #主程序
while 1:
html=gethtml(url)
getinfo(html)
try:#进入下一页 找到下一页的网址,找不到就退出说明到最后一页了
url='https://movie.douban.com/top250'+re.findall('<a href="(.+?)" >后页.*?</a>',html)[0]
except:
break
db.commit()
db.close()
#设定网页、连接数据库、新建库、新建表、设置utf-8编码
url='https://movie.douban.com/top250'
db=pymysql.connect(host='localhost',port=3306,user='root',password='root')
db.query('CREATE DATABASE douban')
db.query('USE douban')
db.query('CREATE TABLE doubanTOP250 (id int,title varchar(200),info varchar(300),url varchar(100),score decimal(3,1),peoples varchar(10))')
db.query('alter table doubanTOP250 CONVERT TO CHARACTER SET utf8;')
main(url)