简单爬虫操作
#-*- coding = utf-8 -*-
#@Time : 2020/7/16 0:18
#@Author : LZR
#@File : 爬虫训练.py
#@Software : PyCharm
import sys
from bs4 import BeautifulSoup #网页解析
import re
import urllib.request#获取网页
import urllib.error
import xlwt #excel
import sqlite3
def main():
baseurl='https://movie.douban.com/top250?start='
datalist=getData(baseurl)
savepath='doubantop250.xsl'
saveData(datalist,savepath)
findlink=re.compile(r'<a href="(.*?)">') #创建正则对象
findImagSrc = re.compile(r'<img.*src="(.*?)"',re.S) #图片的链接
findTitle = re.compile(r'<span class="title">(.*)</span>') #片名
findRating = re.compile(r'<span class="rating_num" property="v:average">(.*)</span>')
#找到评价人数
findJudge = re.compile(r'<span>(\d*)人评价</span>')
#找到概况
findInq = re.compile(r'<span class="inq">(.*)</span>')
#找到影片的相关内容
findBd = re.compile(r'<p class="">(.*?)</p>,re.S')
def getData(baseurl): #爬取网页
datalist=[]
for i in range(0,10):
url=baseurl+str(i*25)
html=aksURl(url) #保存获取到的网页源码
#逐一解析
soup=BeautifulSoup(html,"html.parser")
for item in soup.find_all('div',class_="item"):
data=[] #保存一部电影的所有信息
item=str(item)
#获取影片详情的连接
link=re.findall(findlink,item) #re库来通过正则表达式
data.append(link)
imgSrc = re.findall(findImagSrc,item)
data.append((imgSrc))
titles = re.findall(findTitle,item)
if(len(titles)==2):
ctitles = titles
data.append(ctitles)
otitle = titles[1].replace("/","")
data.append(otitle)
else:
data.append(titles[0])
data.append(' ') #外文名留空
rating = re.findall(findRating,item)
data.append(rating)
judgeNum = re.findall(findJudge,item)
data.append(judgeNum)
inq = re.findall(findInq,item)
if len(inq) !=0 :
inq = inq[0].replace("。","") #去掉句号
data.append(inq)
else:
data.append(" ")
#bd = re.findall(findBd,item)
#bd = re.sub('<br(\S+)?/>(\S+)?'," ",bd) #去掉<br/>
#bd = re.sub('/'," ",bd)
#data.append(bd.strip())
datalist.append(data) #一部影片
print(datalist)
return datalist
#保存数据
def saveData(datalist,savepath):
book = xlwt.Workbook(encoding="utf-8",style_compression=0)
sheet = book.add_sheet('豆瓣电影Top250',cell_overwrite_ok=True)
col = ('电影详情链接','图片链接','影片中文名','影片外文名','评分','评价数','相关信息')
for i in range(0,7):
sheet.write(0,i,col[i])
for i in range(0,250):
print("第%d条"%(i+1))
data = datalist[i]
for j in range(0,7):
sheet.write(i+1,j,data[j])
book.save('250.xls')
def aksURl(url):
head={
"User-Agent":"Mozilla / 5.0(Windows NT 10.0; Win64;x64) AppleWebKit / 537.36(KHTML, likeGecko) Chrome / 83.04103.116Safari / 537.36Edg / 83.0.478.61"
} #用户代理,告诉豆瓣服务器,我们是什么类型的机器
request=urllib.request.Request(url,headers=head)
html=""
try:
response=urllib.request.urlopen(request)
html=response.read().decode("utf-8")
print(html)
except urllib.error.URLError as e:
if hasattr(e,"code"):
print(e.code)
if hasattr(e,"reason"):
print(e.reason)
return html
if __name__ == '__main__':
main()
print("爬取完毕")