import csv
import time
import requests
from bs4 import BeautifulSoup
# url = 'https://movie.douban.com/top250'
# headers = {
# 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.74 Safari/537.36User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.74 Safari/537.36'
# }
#
# resp = requests.get(url=url, headers=headers)
# print(resp.text)
# page = BeautifulSoup(resp.text, 'html.parser')
# table = page.find('table', attrs={'class': 'hq_table'})
#
# trs = table.find_all("tr")[1:]
# f = open('a.csv', mode='w', encoding='utf-8')
# writer = csv.writer(f)
# for i in trs:
# tds = i.find_all('td')
# n1 = tds[0].txt
# n2 = tds[1].txt
# writer.writerow([n1, n2])
url = 'https://www.53pic.com/bizhi/weimei/'
domain = 'https://www.53pic.com'
headers = {
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.51 Safari/537.36'
}
resp = requests.get(url=url, headers=headers)
resp.encoding = 'utf-8'
# print(resp.text)
page = BeautifulSoup(resp.text, 'html.parser')
div = page.find('div', attrs={'class': 'work-list-box'}).find_all('a', attrs={'class': 'card-img-hover'})
# print(div)
# link_list = []
for i in div:
# 拿到子页面源代码
target = domain + i.get('href')
# link_list.append(target)
# print(target)
# print(link_list)
child_page_resp = requests.get(target)
child_page_resp.encoding = 'utf-8'
page2 = BeautifulSoup(child_page_resp.text, 'html.parser')
# 拿到子页面图片地址
img = page2.find('img')
img_src = img.get('src')
# print(img_src)
# 下载图片
imp_resp = requests.get(img_src)
img_name = img_src.split('/')[-1]
with open(img_name, mode='wb') as f: # 图片是字节形式存储的
f.write(imp_resp.content) # 图片写入文件
print("over", img_name)
time.sleep(1) # 防止被服务器检查到
print("all over")
resp.close()
bs4实战
最新推荐文章于 2023-01-02 12:13:40 发布