import requests
import re
import json
from bs4 import BeautifulSoup
def get_html(url):
try:
d = {'User Agent': "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 SE 2.X MetaSr 1.0"}
r = requests.get(url, params=d )
if r.status_code == 200:
return r.text
except:
print("爬取失败")
def parse_html(html):
soup = BeautifulSoup(html, "html.parser")
data = soup.body.find_all('table', attrs = {'width': "100%",'class':""})
with open('C:/Users/root/Desktop/py/gg.txt', 'a',encoding='utf-8') as f:
for i in data:
people = i.text.replace('\n', "").strip()
f.write(json.dumps(people, ensure_ascii=False) + '\n')
f.close()
print("文本写入完成")
photo = soup.select('img[width="75"]')
photo = repr(photo)
p = re.compile(r'https://img3.doubanio.com/view/photo/s_ratio_poster/public/p25\S*.jpg')
url_photo = p.findall(photo)
return url_photo
def save_photo(list):
pass
d = {'User Agent':"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 SE 2.X MetaSr 1.0"}
root = "C:/Users/root/Desktop/py/"
for i in list:
path = root + i.split('/')[-1]
address = requests.get(i, params=d)
with open(path,'wb') as f:
f.write(address.content)
f.close()
print("图片存储完成")
def main():
url = "https://movie.douban.com/chart"
html = get_html(url)
list = parse_html(html)
save_photo(list)
main()
爬虫入门实例(六)
最新推荐文章于 2024-10-29 19:06:15 发布