import requests
import lxml
from bs4 import BeautifulSoup
import random
from urllib import request
import os
if not os.path.exists('苹果图集'):
os.makedirs('苹果图集')
header1 = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 '
'(KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36',
'Host': 'sc.chinaz.com'
}
header2 = {
'User-Agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.18362",
'Host': 'sc.chinaz.com'
} # ie
header3 = {
'User-Agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:71.0) Gecko/20100101 Firefox/71.0",
'Host': 'sc.chinaz.com'
}
header_list = [header1, header2, header3]
page = 2
for page in range(2, 7):
url = 'http://sc.chinaz.com/tupian/pingguotupian_%d.html' % page
header = header_list[random.randint(0, 2)] # 能取到2
req = requests.get(url, header)
req.encoding = 'utf-8'
print(req.status_code)
html = req.text
bf = BeautifulSoup(html, 'lxml')
soup = bf.find('div', id='container')
a_list = soup.find_all('a')
name = 40
for a in a_list:
try:
url = a.find('img')['src2']
print(url)
request.urlretrieve(filename="苹果图集/%s.jpg" % name, url=url)
name += 1
except:
continue