# -*- coding: utf-8 -*-
"""
Created on Wed Oct 26 11:28:18 2016
"""
import requests
from bs4 import BeautifulSoup
#import urllib 已全部被requests代替
import os
import re
import time
import pyprind
url='https://bbs.sjtu.edu.cn/bbstdoc,board,PPPerson.html'
#url='https://bbs.sjtu.edu.cn/bbstdoc,board,FreeTalk.html'
def getURL(url): #返回图片网页地址list
res=requests.get(url)
res.encoding='GBK'
soup=BeautifulSoup(res.text,"lxml")
# print(soup)
URL=[]
for a in soup.select('table a'):
if a['href'].startswith('bbstcon,') or a['href'].startswith('bbscon,'):
#print(a['href'])
URL.append([a.text,r'https://bbs.sjtu.edu.cn/'+a['href']]) #标题+地址
return(URL)
def getPreURL(url): #返回图片上一页地址
res=requests.get(url)
res.encoding='GBK'
soup=BeautifulSoup(res.text,"lxml")
newurl=''
for link in soup.select('a'):
if link.text=='上一页':
newurl=r'https://bbs.sjtu.edu.cn/'+link['href']
return newurl
def mkdir(path):
path = path.strip()
isExists=os.path.exists(path)
if not isExists:
print (u"新建了名字叫做***",path,u'***的文件夹')
os.makedirs(path)
return True
else:
print (u"名为***",path,'***的文件夹已经存在')
return False
def getimg(URL): #URL,[名称,图片地址及]
print('\n'+'*'*10,'Crawl at:',time.strftime('%Y-%m-%d',time.localtime(time.time())),'*'*10)
for i in range(len(URL)):
foldername=r'G:\Python\Learning\spider\pics'
string = re.sub('[\s+\.\!\/_,$%^*(+\"\']+|[+——!,。?、~@#¥%……&*()○【】\[\]]+', '',URL[i][0]) #过滤非法字符
foldername=foldername+'\\'+string
print('—'*80) #分割线
time.sleep(1)
res=requests.get(URL[i][1])
res.encoding='GBK'
soup=BeautifulSoup(res.text,'lxml')
imglink,imgsite=[],[] #URL地址及来源网址
for link in soup.select('img'):
if link['src'].startswith('/file'): #内网
imglink.append(r'https://bbs.sjtu.edu.cn'+link['src'])
imgsite.append('bbs.sjtu.edu.cn')
# continue
elif re.match(r'https?://([^/:]+)',link['src']): #外网
imglink.append(link['src'])
imgsite.append(re.match(r'https?://([^/:]+)',link['src']).group(1))
imglink=list(set(imglink)) #去掉重合元素
imgsite=list(set(imgsite)) #去掉重合元素
print('总进度 >>>',(i+1),'/',len(URL),'>>>')
if len(imglink)>0:
if mkdir(foldername):
print('此目录下共有 ',len(imglink),'张照片')
for site in imgsite:
print("from website:",site) #显示所有来源,但是没有具体区分每张图片来自哪里
else:continue #跳出循环
else:
print('No pics at link:',URL[i])
continue
num=1
# bar = pyprind.ProgBar(len(imglink),track_time=False,bar_char='█',monitor=False,width=10)
perc = pyprind.ProgPercent(len(imglink),track_time=False)
for img in imglink:
splitPath = img.split('.')
fTail = splitPath.pop()
if len(fTail) > 3:
fTail = "jpg"
fileName = foldername + "/" + str(num) + "." + fTail
with open(fileName, 'wb') as f:
# data=urllib.request.urlopen(img).read()
data=requests.get(img).content
f.write(data)
# print (u"Saving:".rjust(30),fileName)
perc.update(force_flush=True)
print (u" Saving:",fileName)
num+=1
def getimgtitle(URL): #URL,图片地址及名称List,过滤非法字符
for i in range(len(URL)):
temp=URL[i][0]
string = re.sub("[\s+\.\!\/_,$%^*(+\"\']+|[+——!,。?、~@#¥%……&*()○【】\[\]]+", "",temp)
#print(temp)
print(i,':',string)
print('—'*80) #分割线
if __name__ == '__main__':
times=4 #下载页面数
URL=[]
newurl=url
for i in range(times):
URL.extend(getURL(newurl))
# URL=URL+getURL(newurl) #与上面语法一样
newurl=getPreURL(newurl)
# print(URL)
# getimgtitle(URL)
getimg(URL)
"""
Created on Wed Oct 26 11:28:18 2016
"""
import requests
from bs4 import BeautifulSoup
#import urllib 已全部被requests代替
import os
import re
import time
import pyprind
url='https://bbs.sjtu.edu.cn/bbstdoc,board,PPPerson.html'
#url='https://bbs.sjtu.edu.cn/bbstdoc,board,FreeTalk.html'
def getURL(url): #返回图片网页地址list
res=requests.get(url)
res.encoding='GBK'
soup=BeautifulSoup(res.text,"lxml")
# print(soup)
URL=[]
for a in soup.select('table a'):
if a['href'].startswith('bbstcon,') or a['href'].startswith('bbscon,'):
#print(a['href'])
URL.append([a.text,r'https://bbs.sjtu.edu.cn/'+a['href']]) #标题+地址
return(URL)
def getPreURL(url): #返回图片上一页地址
res=requests.get(url)
res.encoding='GBK'
soup=BeautifulSoup(res.text,"lxml")
newurl=''
for link in soup.select('a'):
if link.text=='上一页':
newurl=r'https://bbs.sjtu.edu.cn/'+link['href']
return newurl
def mkdir(path):
path = path.strip()
isExists=os.path.exists(path)
if not isExists:
print (u"新建了名字叫做***",path,u'***的文件夹')
os.makedirs(path)
return True
else:
print (u"名为***",path,'***的文件夹已经存在')
return False
def getimg(URL): #URL,[名称,图片地址及]
print('\n'+'*'*10,'Crawl at:',time.strftime('%Y-%m-%d',time.localtime(time.time())),'*'*10)
for i in range(len(URL)):
foldername=r'G:\Python\Learning\spider\pics'
string = re.sub('[\s+\.\!\/_,$%^*(+\"\']+|[+——!,。?、~@#¥%……&*()○【】\[\]]+', '',URL[i][0]) #过滤非法字符
foldername=foldername+'\\'+string
print('—'*80) #分割线
time.sleep(1)
res=requests.get(URL[i][1])
res.encoding='GBK'
soup=BeautifulSoup(res.text,'lxml')
imglink,imgsite=[],[] #URL地址及来源网址
for link in soup.select('img'):
if link['src'].startswith('/file'): #内网
imglink.append(r'https://bbs.sjtu.edu.cn'+link['src'])
imgsite.append('bbs.sjtu.edu.cn')
# continue
elif re.match(r'https?://([^/:]+)',link['src']): #外网
imglink.append(link['src'])
imgsite.append(re.match(r'https?://([^/:]+)',link['src']).group(1))
imglink=list(set(imglink)) #去掉重合元素
imgsite=list(set(imgsite)) #去掉重合元素
print('总进度 >>>',(i+1),'/',len(URL),'>>>')
if len(imglink)>0:
if mkdir(foldername):
print('此目录下共有 ',len(imglink),'张照片')
for site in imgsite:
print("from website:",site) #显示所有来源,但是没有具体区分每张图片来自哪里
else:continue #跳出循环
else:
print('No pics at link:',URL[i])
continue
num=1
# bar = pyprind.ProgBar(len(imglink),track_time=False,bar_char='█',monitor=False,width=10)
perc = pyprind.ProgPercent(len(imglink),track_time=False)
for img in imglink:
splitPath = img.split('.')
fTail = splitPath.pop()
if len(fTail) > 3:
fTail = "jpg"
fileName = foldername + "/" + str(num) + "." + fTail
with open(fileName, 'wb') as f:
# data=urllib.request.urlopen(img).read()
data=requests.get(img).content
f.write(data)
# print (u"Saving:".rjust(30),fileName)
perc.update(force_flush=True)
print (u" Saving:",fileName)
num+=1
def getimgtitle(URL): #URL,图片地址及名称List,过滤非法字符
for i in range(len(URL)):
temp=URL[i][0]
string = re.sub("[\s+\.\!\/_,$%^*(+\"\']+|[+——!,。?、~@#¥%……&*()○【】\[\]]+", "",temp)
#print(temp)
print(i,':',string)
print('—'*80) #分割线
if __name__ == '__main__':
times=4 #下载页面数
URL=[]
newurl=url
for i in range(times):
URL.extend(getURL(newurl))
# URL=URL+getURL(newurl) #与上面语法一样
newurl=getPreURL(newurl)
# print(URL)
# getimgtitle(URL)
getimg(URL)