#!/usr/bin/python
import requests
from bs4 import BeautifulSoup
import sys
import re
import json
reload(sys)
sys.setdefaultencoding('utf-8')
class PicFromWeb:
def set_url(self, url_pic):
self.url_pic = url_pic
print self.url_pic
pass
def GetUrlList(url_pic_pattern, num_page, num_len):
url_pic = re.sub('PAGE', '%d' % num_page, url_pic_pattern)
url_pic = re.sub('LEN', '%d' % num_len, url_pic)
r = requests.get(url_pic)
r.encoding = 'utf-8'
jo = json.loads(r.text)
list_url = []
for i in range(0, num_len):
list_url.append(jo['data'][i]['thumbURL'])
return list_url
if __name__ == '__main__':
#url_pic = 'http://image.baidu.com/search/index?tn=baiduimage&ct=201326592&lm=-1&cl=2&ie=gbk&word=%CD%BC%C6%AC&fr=ala&ala=1&alatpl=others&pos=0'
url_pic_pattern = 'http://image.baidu.com/search/acjson?tn=resultjson_com&ipn=rj&ct=201326592&is=&fp=result&queryWord=%E5%9B%BE%E7%89%87&cl=2&lm=-1&ie=utf-8&oe=utf-8&adpicid=&st=&z=&ic=&word=%E5%9B%BE%E7%89%87&s=&se=&tab=&width=&height=&face=&istype=&qc=&nc=&fr=&pn=PAGE&rn=LEN&gsm=5e&1465654782303= HTTP/1.1'
num_page = 0
num_len = 30
list_url = []
for i in range(0, 2):
num_page = i * 30
list_url = GetUrlList(url_pic_pattern, num_page, num_len)
print list_url
for item in list_url:
name = re.findall('u=(.*?),', item)[0] + '.jpg'
headers = {
'User-Agent' : 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:46.0) Gecko/20100101 Firefox/46.0',
'Referer' : 'http://image.baidu.com/search/index?tn=baiduimage&ct=201326592&lm=-1&cl=2&ie=gbk&word=%CD%BC%C6%AC&fr=ala&ala=1&alatpl=others&pos=0',
'Cache-Control' : 'max-age=0'
}
r = requests.get(item, headers = headers)
#print r.content
with open('./baidu/' + name, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
f.flush()
f.close()
Python 获取百度的预览图片
最新推荐文章于 2024-04-15 02:41:16 发布