from bs4 import BeautifulSoup
import urllib.request
import os
path = os.getcwd()
new_path = os.path.join(path,"photos")
if not os.path.isdir(new_path):
os.mkdir(new_path)
os.chdir(new_path)
t=1
n=input("请输入获取的页数:")
user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
headers = { 'User-Agent' : user_agent }
for x in range(int(n)):
html=urllib.request.Request("http://www.qiushibaike.com/pic/page/"+str(x+1),headers=headers)
response = urllib.request.urlopen(html).read()
soup = BeautifulSoup(response, "html.parser")
#print (soup)
for i in soup.find_all('div', {"class":{'thumb',"author clearfix"}}):
pic_name = str(t) + '.jpg'
img_src = i.find('img').get('src')
#print (img_src)
urllib.request.urlretrieve(img_src, pic_name)
t+=1
print ("已经获取页数"+str(x+1))
print("获取完成")
import urllib.request
import os
path = os.getcwd()
new_path = os.path.join(path,"photos")
if not os.path.isdir(new_path):
os.mkdir(new_path)
os.chdir(new_path)
t=1
n=input("请输入获取的页数:")
user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
headers = { 'User-Agent' : user_agent }
for x in range(int(n)):
html=urllib.request.Request("http://www.qiushibaike.com/pic/page/"+str(x+1),headers=headers)
response = urllib.request.urlopen(html).read()
soup = BeautifulSoup(response, "html.parser")
#print (soup)
for i in soup.find_all('div', {"class":{'thumb',"author clearfix"}}):
pic_name = str(t) + '.jpg'
img_src = i.find('img').get('src')
#print (img_src)
urllib.request.urlretrieve(img_src, pic_name)
t+=1
print ("已经获取页数"+str(x+1))
print("获取完成")