#encoding:'utf-8'
import urllib.request
from bs4 import BeautifulSoup
import os
import time
import xlrd
import xlwt
from xlutils.copy import copy
def getDatas():
# url="https://movie.douban.com/top250"
url="file:///E:/scrapy/2018-04-27/movie/movie.html"
header={'User-Agent': "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1"}
ret=urllib.request.Request(url=url,headers=header)
res=urllib.request.urlopen(ret)
# 转化格式
response=BeautifulSoup(res,'html.parser')
# 找到想要数据的父元素
datas=response.find_all('div',{'class':'item'})
# print(datas)
folder_name="output"
if not os.path.exists(folder_name):
os.mkdir(folder_name)
# 定义文件
current_time=time.strftime('%Y-%m-%d',time.localtime())
file_name="move"+current_time+".xls"
# 文件路径
file_path=folder_name+"/"+file_name
workbook=xlwt.Workbook(encoding="utf-8")
sheet=workbook.add_sheet(u'豆瓣数据')
head=['排行','标题','图片路径']
for index,item in enumerate(head):
sheet.write(0,index,item)
workbook.save(file_path)
index=1
for item in datas:
# print(item)
dict1={}
dict1['rank']=item.find('div',{'class':'pic'}).find('em').get_text()
dict1['title']=item.find('div',{'class':'info'}).find('div',{'class':'hd'}).find('a').find('span',{'class':'title'}).get_text()
dict1['picUrl']=item.find('div',{'class':'pic'}).find('a').find('img').get('src')
# print(dict1['picUrl'])
# # 保存数据为excel格式
wk=xlrd.open_workbook(file_path,formatting_info=True)
newwk=copy(wk)
newsheet=newwk.get_sheet(0)
line=[dict1['rank'],dict1['title'],dict1['picUrl']]
for i,item in enumerate(line):
newsheet.write(index,i,item)
newwk.save(file_path)
index=index+1
if __name__=="__main__":
getDatas()