import requests
from lxml import etree
from fake_useragent import UserAgent
url = 'https://tech.163.com/20/0716/07/FHL0LPK300097U7T.html'
headers={
'User-Agent':UserAgent().chrome
}
response = requests.get(url, headers=headers)
e=etree.HTML(response.text)
title=e.xpath('//h1/text()')
contents= e.xpath('''//div[@class='post_text']/p''')
imgs = e.xpath('''//p[@class='f_center']/img/@src''')
img=imgs[0]
response1 = requests.get(img, headers=headers)
print(img)
print(response1.text)
with open('tu.jpg','wb') as f:
f.write(response1.content)
print('---'*80)
info=[]
for p in contents:
content=p.xpath('string(.)')
info.append(content)
info_str = ''.join(info)
title=title[0]
print(title)
print(info_str)
with open('wenj.txt','w',encoding='utf-8') as f:
f.write(info_str)
(18)爬虫xpath实战
最新推荐文章于 2023-12-02 16:35:00 发布