xx才是第一生产力
爬取页面http://tieba.baidu.com/p/2166231880
1.getHtml(url)根据页面链接获取页面内容,值得值得注意的是这里使用了see_lz=1的参数获得只看楼主的页面,防止其他图片的干扰
2.getImgUrl(ulist,html)使用了bs4库的find_all()方法得到所有图片的链接并存储到ulist中
3.downloadImg(ulist)方法根据链接下载图片,运行程序后图片存储在d:/pic/目录中
#爬取美女图片
import requests
from bs4 import BeautifulSoup
import bs4
import os
def getHtml(url):
kv={'see_lz':1}#设置只看楼主参数
try:
r = requests.get(url,params=kv)
r.raise_for_status()
r.encoding = r.apparent_encoding#因为爬取的是图片,其实没必要改变编码
return r.text
except:
return ""
'''
方法功能:获取页面所有的图片链接,并储存在ulist中
'''
def getImgUrl(ulist,html):
soup = BeautifulSoup(html,'html.parser')
ccs = soup('cc')
for cc in ccs:
for img in cc('img'):
ulist.append(img.attrs['src'])
pass
def downloadImg(ulist):
root = "D://pics//"
try:
if not os.path.exists(root):
os.mkdir(root)
for url in ulist:
path = root+url.split('/')[-1]
r = requests.get(url)
if not os.path.exists(path):
with open(path,'wb') as f:
f.write(r.content)
f.close()
except:
pass
def main():
ulist=[]
url='http://tieba.baidu.com/p/2166231880'
html = getHtml(url)
getImgUrl(ulist,html)
downloadImg(ulist)
main()