1.爬虫新手要点:了解request
2.xpath的书写方法,百度即可
3保存文件
encoding='utf8'
import json
import requests
from lxml import etree
def getPage(n):
#选择你要爬的网址,其中n为页数
url=f'https://maoyan.com/board/4?offset={n*10}'#.format(n)
#以浏览器的身份访问网址,就要用,打开浏览器的 network刷新然后随便点一个往下拉找到User-Agent
header={'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.116 Safari/537.36'}
r=requests.get(url,headers=header)
return r.text
def parse(text):
html=etree.HTML(text)
names=html.xpath('//div[@class="movie-item-info"]/p[@class="name"]/a/@title')
updatatimes=html.xpath('//p[@class="releasetime"]/text()')
for name,updatatime in zip(names,updatatimes):
#定义字典格式
item={}
item['name']=name
item['updatatime']=updatatime
#生成器 循环迭代
yield item
def savefile(data):
with open('movie.json','a',encoding='utf-8')as f:
#把字典 列表转换成字符串
data=json.dumps(data,ensure_ascii=False)+'\n'
f.write(data)
def run():
for n in range(0,10):
text=getPage(n)
items=parse(text)
for item in items:
savefile(item)
if __name__ == "__main__":
run()
运行结果:是输出为文件即movie.json文件