requests模块用法举例

json 请求

#! /usr/bin/python3
import requests
import json


class url_request():
  def __init__(self):
    ''' init '''

if __name__ == '__main__':
  heard = {'Content-Type': 'application/json'}
  payload = {'CountryName': '中国',
        'ProvinceName': '四川省',
        'L1CityName': 'chengdu',
        'L2CityName': 'yibing',
        'TownName': '',
        'Longitude': '107.33393',
        'Latitude': '33.157131',
        'Language': 'CN'}
  r = requests.post("http://www.xxxxxx.com/CityLocation/json/LBSLocateCity", heards=heard, data=payload)
  data = r.json()
  if r.status_code!=200:
    print('LBSLocateCity API Error' + str(r.status_code))
  print(data['CityEntities'][0]['CityID']) # 打印返回json中的某个key的value
  print(data['ResponseStatus']['Ack'])
  print(json.dump(data, indent=4, sort_keys=True, ensure_ascii=False)) # 树形打印json,ensure_ascii必须设为False否则中文会显示为unicode

XML请求

#! /usr/bin/python3
import requests

class url_request():
  def __init__(self):
    """init"""

if __name__ == '__main__':
  heards = {'Content-type': 'text/xml'}
  XML = '<?xml version="1.0" encoding="utf-8"?><soap:Envelope xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/"><soap:Body><Request xmlns="http://tempuri.org/"><jme><JobClassFullName>WeChatJSTicket.JobWS.Job.JobRefreshTicket,WeChatJSTicket.JobWS</JobClassFullName><Action>RUN</Action><Param>1</Param><HostIP>127.0.0.1</HostIP><JobInfo>1</JobInfo><NeedParallel>false</NeedParallel></jme></Request></soap:Body></soap:Envelope>'
  url = 'http://jobws.push.mobile.xxxxxxxx.com/RefreshWeiXInTokenJob/RefreshService.asmx'
  r = requests.post(url=url, heards=heards, data=XML)
  data = r.text
  print(data)

状态异常处理

import requests

URL = 'http://ip.taobao.com/service/getIpInfo.php' # 淘宝IP地址库API
try:
  r = requests.get(URL, params={'ip': '8.8.8.8'}, timeout=1)
  r.raise_for_status() # 如果响应状态码不是 200,就主动抛出异常
except requests.RequestException as e:
  print(e)
else:
  result = r.json()
  print(type(result), result, sep='\n')

Python中json.dump() 和 json.dumps()的区别

JSON字符串用json.dumps, json.loads JSON文件名用json.dump, json.load#

以下内容摘自:《Python Cookbook》

json 模块提供了一种很简单的方式来编码和解码JSON数据。 其中两个主要的函数是 json.dumps() 和 json.loads() , 要比其他序列化函数库如pickle的接口少得多。 下面演示如何将一个Python数据结构转换为JSON:

import json

data = {
    'name' : 'ACME',
    'shares' : 100,
    'price' : 542.23
}

json_str = json.dumps(data)

下面演示如何将一个JSON编码的字符串转换回一个Python数据结构:

data = json.loads(json_str)

如果你要处理的是文件而不是字符串,你可以使用 json.dump() 和 json.load() 来编码和解码JSON数据。例如:

# Writing JSON data
with open('data.json', 'w') as f:
    json.dump(data, f)

# Reading data back
with open('data.json', 'r') as f:
    data = json.load(f)

1、Python requests模块说明

requests是使用Apache2 licensed 许可证的HTTP库。

用python编写。

比urllib2模块更简洁。

Request支持HTTP连接保持和连接池,支持使用cookie保持会话,支持文件上传,支持自动响应内容的编码,支持国际化的URL和POST数据自动编码。

在python内置模块的基础上进行了高度的封装,从而使得python进行网络请求时,变得人性化,使用Requests可以轻而易举的完成浏览器可有的任何操作。

现代,国际化,友好。

requests会自动实现持久连接keep-alive

2、Python requests模块基础入门

1)导入模块

import requests

2)发送请求的简洁

示例代码:获取一个网页(个人github)

import requests
r = requests.get('https://github.com/Ranxf')    # 最基本的不带参数的get请求
r1 = requests.get(url='http://dict.baidu.com/s', params={'wd': 'python'})   # 带参数的get请求

我们还可以使用requests模块其它请求方法

1   requests.get(‘https://github.com/timeline.json')                                # GET请求

2   requests.post(“http://httpbin.org/post”)                                        # POST请求

3   requests.put(“http://httpbin.org/put”)                                          # PUT请求

4   requests.delete(“http://httpbin.org/delete”)                                    # DELETE请求

5   requests.head(“http://httpbin.org/get”)                                         # HEAD请求

6   requests.options(“http://httpbin.org/get” )                                     # OPTIONS请求

3)为url传递参数

>>> url_params = {'key':'value'}    #  字典传递参数,如果值为None的键不会被添加到url中
>>> r = requests.get('your url',params = url_params)
>>> print(r.url)
 
  your url?key=value

4)响应的内容

r.encoding                       #获取当前的编码

r.encoding = 'utf-8'             #设置编码

r.text                           #以encoding解析返回内容。字符串方式的响应体,会自动根据响应头部的字符编码进行解码。

r.content                        #以字节形式(二进制)返回。字节方式的响应体,会自动为你解码 gzip 和 deflate 压缩。

r.headers                        #以字典对象存储服务器响应头,但是这个字典比较特殊,字典键不区分大小写,若键不存在则返回None

r.status_code                     #响应状态码

r.raw                             #返回原始响应体,也就是 urllib 的 response 对象,使用 r.raw.read()   

r.ok                              # 查看r.ok的布尔值便可以知道是否登陆成功

 #*特殊方法*#

r.json()                         #Requests中内置的JSON解码器,以json形式返回,前提返回的内容确保是json格式的,不然解析出错会抛异常

r.raise_for_status()             #失败请求(非200响应)抛出异常

post发送json请求:

import requests
import json
  
r = requests.post('https://api.github.com/some/endpoint', data=json.dumps({'some': 'data'}))
 
print(r.json())

5)定制头和cookie信息

header = {'user-agent': 'my-app/0.0.1''}
cookie = {'key':'value'}
 r = requests.get/post('your url',headers=header,cookies=cookie) 
data = {'some': 'data'}
headers = {'content-type': 'application/json',
      'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:22.0) Gecko/20100101 Firefox/22.0'}
 
r = requests.post('https://api.github.com/some/endpoint', data=data, headers=headers)
print(r.text)

6)响应状态码

使用requests方法后,会返回一个response对象,其存储了服务器响应的内容,如上实例中已经提到的 r.text、r.status_code……

获取文本方式的响应体实例:当你访问 r.text 之时,会使用其响应的文本编码进行解码,并且你可以修改其编码让 r.text 使用自定义的编码进行解码。

r = requests.get('http://www.itwhy.org')
print(r.text, '\n{}\n'.format('*'*79), r.encoding)
r.encoding = 'GBK'
print(r.text, '\n{}\n'.format('*'*79), r.encoding)

示例:

import requests

r = requests.get('https://github.com/Ranxf')    # 最基本的不带参数的get请求
print(r.status_code)                # 获取返回状态
r1 = requests.get(url='http://dict.baidu.com/s', params={'wd': 'python'})   # 带参数的get请求
print(r1.url)
print(r1.text)    # 打印解码后的返回数据

运行结果:

/usr/bin/python3.5 /home/rxf/python3_1000/1000/python3_server/python3_requests/demo1.py

200

http://dict.baidu.com/s?wd=python

…………

Process finished with exit code 0

 r.status_code                      #如果不是200,可以使用 r.raise_for_status() 抛出异常

7)响应

r.headers                                  #返回字典类型,头信息

r.requests.headers                         #返回发送到服务器的头信息

r.cookies                                  #返回cookie

r.history                                  #返回重定向信息,当然可以在请求是加上allow_redirects = false 阻止重定向

8)超时

1

r = requests.get('url',timeout=1)      #设置秒数超时,仅对于连接有效

9)会话对象,能够跨请求保持某些参数

s = requests.Session()
s.auth = ('auth','passwd')
s.headers = {'key':'value'}
r = s.get('url')
r1 = s.get('url1') 

10)代理

1

2

proxies = {'http':'ip1','https':'ip2' }

requests.get('url',proxies=proxies)

汇总:

# HTTP请求类型
# get类型
r = requests.get('https://github.com/timeline.json')
# post类型
r = requests.post("http://m.ctrip.com/post")
# put类型
r = requests.put("http://m.ctrip.com/put")
# delete类型
r = requests.delete("http://m.ctrip.com/delete")
# head类型
r = requests.head("http://m.ctrip.com/head")
# options类型
r = requests.options("http://m.ctrip.com/get")

# 获取响应内容
print(r.content) #以字节的方式去显示,中文显示为字符
print(r.text) #以文本的方式去显示

#URL传递参数
payload = {'keyword': '香港', 'salecityid': '2'}
r = requests.get("http://m.ctrip.com/webapp/tourvisa/visa_list", params=payload) 
print(r.url) #示例为http://m.ctrip.com/webapp/tourvisa/visa_list?salecityid=2&keyword=香港

#获取/修改网页编码
r = requests.get('https://github.com/timeline.json')
print (r.encoding)


#json处理
r = requests.get('https://github.com/timeline.json')
print(r.json()) # 需要先import json  

# 定制请求头
url = 'http://m.ctrip.com'
headers = {'User-Agent' : 'Mozilla/5.0 (Linux; Android 4.2.1; en-us; Nexus 4 Build/JOP40D) AppleWebKit/535.19 (KHTML, like Gecko) Chrome/18.0.1025.166 Mobile Safari/535.19'}
r = requests.post(url, headers=headers)
print (r.request.headers)

#复杂post请求
url = 'http://m.ctrip.com'
payload = {'some': 'data'}
r = requests.post(url, data=json.dumps(payload)) #如果传递的payload是string而不是dict,需要先调用dumps方法格式化一下

# post多部分编码文件
url = 'http://m.ctrip.com'
files = {'file': open('report.xls', 'rb')}
r = requests.post(url, files=files)

# 响应状态码
r = requests.get('http://m.ctrip.com')
print(r.status_code)
  
# 响应头
r = requests.get('http://m.ctrip.com')
print (r.headers)
print (r.headers['Content-Type'])
print (r.headers.get('content-type')) #访问响应头部分内容的两种方式
  
# Cookies
url = 'http://example.com/some/cookie/setting/url'
r = requests.get(url)
r.cookies['example_cookie_name']  #读取cookies
  
url = 'http://m.ctrip.com/cookies'
cookies = dict(cookies_are='working')
r = requests.get(url, cookies=cookies) #发送cookies

#设置超时时间
r = requests.get('http://m.ctrip.com', timeout=0.001)

#设置访问代理
proxies = {
      "http": "http://10.10.1.10:3128",
      "https": "http://10.10.1.100:4444",
     }
r = requests.get('http://m.ctrip.com', proxies=proxies)


#如果代理需要用户名和密码,则需要这样:
proxies = {
  "http": "http://user:pass@10.10.1.10:3128/",
}

# HTTP请求类型
# get类型
r = requests.get('https://github.com/timeline.json')
# post类型
r = requests.post("http://m.ctrip.com/post")
# put类型
r = requests.put("http://m.ctrip.com/put")
# delete类型
r = requests.delete("http://m.ctrip.com/delete")
# head类型
r = requests.head("http://m.ctrip.com/head")
# options类型
r = requests.options("http://m.ctrip.com/get")

# 获取响应内容
print(r.content) #以字节的方式去显示,中文显示为字符
print(r.text) #以文本的方式去显示

#URL传递参数
payload = {'keyword': '香港', 'salecityid': '2'}
r = requests.get("http://m.ctrip.com/webapp/tourvisa/visa_list", params=payload) 
print(r.url) #示例为http://m.ctrip.com/webapp/tourvisa/visa_list?salecityid=2&keyword=香港

#获取/修改网页编码
r = requests.get('https://github.com/timeline.json')
print (r.encoding)


#json处理
r = requests.get('https://github.com/timeline.json')
print(r.json()) # 需要先import json  

# 定制请求头
url = 'http://m.ctrip.com'
headers = {'User-Agent' : 'Mozilla/5.0 (Linux; Android 4.2.1; en-us; Nexus 4 Build/JOP40D) AppleWebKit/535.19 (KHTML, like Gecko) Chrome/18.0.1025.166 Mobile Safari/535.19'}
r = requests.post(url, headers=headers)
print (r.request.headers)

#复杂post请求
url = 'http://m.ctrip.com'
payload = {'some': 'data'}
r = requests.post(url, data=json.dumps(payload)) #如果传递的payload是string而不是dict,需要先调用dumps方法格式化一下

# post多部分编码文件
url = 'http://m.ctrip.com'
files = {'file': open('report.xls', 'rb')}
r = requests.post(url, files=files)

# 响应状态码
r = requests.get('http://m.ctrip.com')
print(r.status_code)
  
# 响应头
r = requests.get('http://m.ctrip.com')
print (r.headers)
print (r.headers['Content-Type'])
print (r.headers.get('content-type')) #访问响应头部分内容的两种方式
  
# Cookies
url = 'http://example.com/some/cookie/setting/url'
r = requests.get(url)
r.cookies['example_cookie_name']  #读取cookies
  
url = 'http://m.ctrip.com/cookies'
cookies = dict(cookies_are='working')
r = requests.get(url, cookies=cookies) #发送cookies

#设置超时时间
r = requests.get('http://m.ctrip.com', timeout=0.001)

#设置访问代理
proxies = {
      "http": "http://10.10.1.10:3128",
      "https": "http://10.10.1.100:4444",
     }
r = requests.get('http://m.ctrip.com', proxies=proxies)


#如果代理需要用户名和密码,则需要这样:
proxies = {
  "http": "http://user:pass@10.10.1.10:3128/",
}

示例代码:

GET请求

# 1、无参数实例
 
import requests
 
ret = requests.get('https://github.com/timeline.json')
 
print(ret.url)
print(ret.text)
 
 
 
# 2、有参数实例
 
import requests
 
payload = {'key1': 'value1', 'key2': 'value2'}
ret = requests.get("http://httpbin.org/get", params=payload)
 
print(ret.url)
print(ret.text)

POST请求

# 1、基本POST实例
 
import requests
 
payload = {'key1': 'value1', 'key2': 'value2'}
ret = requests.post("http://httpbin.org/post", data=payload)
 
print(ret.text)
 
 
# 2、发送请求头和数据实例
 
import requests
import json
 
url = 'https://api.github.com/some/endpoint'
payload = {'some': 'data'}
headers = {'content-type': 'application/json'}
 
ret = requests.post(url, data=json.dumps(payload), headers=headers)
 
print(ret.text)
print(ret.cookies)

参数示例代码

def param_method_url():
  # requests.request(method='get', url='http://127.0.0.1:8000/test/')
  # requests.request(method='post', url='http://127.0.0.1:8000/test/')
  pass


def param_param():
  # - 可以是字典
  # - 可以是字符串
  # - 可以是字节(ascii编码以内)

  # requests.request(method='get',
  # url='http://127.0.0.1:8000/test/',
  # params={'k1': 'v1', 'k2': '水电费'})

  # requests.request(method='get',
  # url='http://127.0.0.1:8000/test/',
  # params="k1=v1&k2=水电费&k3=v3&k3=vv3")

  # requests.request(method='get',
  # url='http://127.0.0.1:8000/test/',
  # params=bytes("k1=v1&k2=k2&k3=v3&k3=vv3", encoding='utf8'))

  # 错误
  # requests.request(method='get',
  # url='http://127.0.0.1:8000/test/',
  # params=bytes("k1=v1&k2=水电费&k3=v3&k3=vv3", encoding='utf8'))
  pass


def param_data():
  # 可以是字典
  # 可以是字符串
  # 可以是字节
  # 可以是文件对象

  # requests.request(method='POST',
  # url='http://127.0.0.1:8000/test/',
  # data={'k1': 'v1', 'k2': '水电费'})

  # requests.request(method='POST',
  # url='http://127.0.0.1:8000/test/',
  # data="k1=v1; k2=v2; k3=v3; k3=v4"
  # )

  # requests.request(method='POST',
  # url='http://127.0.0.1:8000/test/',
  # data="k1=v1;k2=v2;k3=v3;k3=v4",
  # headers={'Content-Type': 'application/x-www-form-urlencoded'}
  # )

  # requests.request(method='POST',
  # url='http://127.0.0.1:8000/test/',
  # data=open('data_file.py', mode='r', encoding='utf-8'), # 文件内容是:k1=v1;k2=v2;k3=v3;k3=v4
  # headers={'Content-Type': 'application/x-www-form-urlencoded'}
  # )
  pass


def param_json():
  # 将json中对应的数据进行序列化成一个字符串,json.dumps(...)
  # 然后发送到服务器端的body中,并且Content-Type是 {'Content-Type': 'application/json'}
  requests.request(method='POST',
           url='http://127.0.0.1:8000/test/',
           json={'k1': 'v1', 'k2': '水电费'})


def param_headers():
  # 发送请求头到服务器端
  requests.request(method='POST',
           url='http://127.0.0.1:8000/test/',
           json={'k1': 'v1', 'k2': '水电费'},
           headers={'Content-Type': 'application/x-www-form-urlencoded'}
           )


def param_cookies():
  # 发送Cookie到服务器端
  requests.request(method='POST',
           url='http://127.0.0.1:8000/test/',
           data={'k1': 'v1', 'k2': 'v2'},
           cookies={'cook1': 'value1'},
           )
  # 也可以使用CookieJar(字典形式就是在此基础上封装)
  from http.cookiejar import CookieJar
  from http.cookiejar import Cookie

  obj = CookieJar()
  obj.set_cookie(Cookie(version=0, name='c1', value='v1', port=None, domain='', path='/', secure=False, expires=None,
             discard=True, comment=None, comment_url=None, rest={'HttpOnly': None}, rfc2109=False,
             port_specified=False, domain_specified=False, domain_initial_dot=False, path_specified=False)
          )
  requests.request(method='POST',
           url='http://127.0.0.1:8000/test/',
           data={'k1': 'v1', 'k2': 'v2'},
           cookies=obj)


def param_files():
  # 发送文件
  # file_dict = {
  # 'f1': open('readme', 'rb')
  # }
  # requests.request(method='POST',
  # url='http://127.0.0.1:8000/test/',
  # files=file_dict)

  # 发送文件,定制文件名
  # file_dict = {
  # 'f1': ('test.txt', open('readme', 'rb'))
  # }
  # requests.request(method='POST',
  # url='http://127.0.0.1:8000/test/',
  # files=file_dict)

  # 发送文件,定制文件名
  # file_dict = {
  # 'f1': ('test.txt', "hahsfaksfa9kasdjflaksdjf")
  # }
  # requests.request(method='POST',
  # url='http://127.0.0.1:8000/test/',
  # files=file_dict)

  # 发送文件,定制文件名
  # file_dict = {
  #   'f1': ('test.txt', "hahsfaksfa9kasdjflaksdjf", 'application/text', {'k1': '0'})
  # }
  # requests.request(method='POST',
  #         url='http://127.0.0.1:8000/test/',
  #         files=file_dict)

  pass


def param_auth():
  from requests.auth import HTTPBasicAuth, HTTPDigestAuth

  ret = requests.get('https://api.github.com/user', auth=HTTPBasicAuth('wupeiqi', 'sdfasdfasdf'))
  print(ret.text)

  # ret = requests.get('http://192.168.1.1',
  # auth=HTTPBasicAuth('admin', 'admin'))
  # ret.encoding = 'gbk'
  # print(ret.text)

  # ret = requests.get('http://httpbin.org/digest-auth/auth/user/pass', auth=HTTPDigestAuth('user', 'pass'))
  # print(ret)
  #


def param_timeout():
  # ret = requests.get('http://google.com/', timeout=1)
  # print(ret)

  # ret = requests.get('http://google.com/', timeout=(5, 1))
  # print(ret)
  pass


def param_allow_redirects():
  ret = requests.get('http://127.0.0.1:8000/test/', allow_redirects=False)
  print(ret.text)


def param_proxies():
  # proxies = {
  # "http": "61.172.249.96:80",
  # "https": "http://61.185.219.126:3128",
  # }

  # proxies = {'http://10.20.1.128': 'http://10.10.1.10:5323'}

  # ret = requests.get("http://www.proxy360.cn/Proxy", proxies=proxies)
  # print(ret.headers)


  # from requests.auth import HTTPProxyAuth
  #
  # proxyDict = {
  # 'http': '77.75.105.165',
  # 'https': '77.75.105.165'
  # }
  # auth = HTTPProxyAuth('username', 'mypassword')
  #
  # r = requests.get("http://www.google.com", proxies=proxyDict, auth=auth)
  # print(r.text)

  pass


def param_stream():
  ret = requests.get('http://127.0.0.1:8000/test/', stream=True)
  print(ret.content)
  ret.close()

  # from contextlib import closing
  # with closing(requests.get('http://httpbin.org/get', stream=True)) as r:
  # # 在此处理响应。
  # for i in r.iter_content():
  # print(i)


def requests_session():
  import requests

  session = requests.Session()

  ### 1、首先登陆任何页面,获取cookie

  i1 = session.get(url="http://dig.chouti.com/help/service")

  ### 2、用户登陆,携带上一次的cookie,后台对cookie中的 gpsd 进行授权
  i2 = session.post(
    url="http://dig.chouti.com/login",
    data={
      'phone': "8615131255089",
      'password': "xxxxxx",
      'oneMonth': ""
    }
  )

  i3 = session.post(
    url="http://dig.chouti.com/link/vote?linksId=8589623",
  )
  print(i3.text)

附:爬取百度图片

#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File    :   spider_baidu.py    
@Contact :   476423448@qq.com
@License :   (C)Copyright 2020-2021, AIgroup-KPCQ
爬取百度图片 爬虫 输入关键字和页数即可
@Modify Time      @Author    @Version    @Desciption
------------      -------    --------    -----------
7/17/21 2:21 PM   gavin      1.0         None
'''

from threading import Thread
import re
import time
import hashlib

import requests
import os
import urllib

class Spider_baidu_image():
    def __init__(self,path):
        self.url = 'http://image.baidu.com/search/acjson?'
        self.headers = {
            'User-Agent':'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36' }
        self.headers_image = {
            'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36',
            'Referer': 'https://image.baidu.com/search/index?tn=baiduimage&ct=201326592&lm=-1&cl=2&ie=gb18030&word=%D3%EA%DA%A1&fr=ala&ala=1&alatpl=normal&pos=0'}

        self.keyword = input("请输入搜索图片关键字:")
        self.paginator = int(input("请输入搜索页数,每页30张图片:"))
        self.path = path
        # self.paginator = 50
        # print(type(self.keyword),self.paginator)
        # exit()

    def get_param(self):
        """
        获取url请求的参数,存入列表并返回
        :return:
        """
        keyword = urllib.parse.quote(self.keyword)
        params = []
        for i in range(1, self.paginator + 1):
            params.append(
                'tn=resultjson_com&ipn=rj&ct=201326592&is=&fp=result&queryWord={}&cl=2&lm=-1&ie=utf-8&oe=utf-8&adpicid=&st=-1&z=&ic=&hd=1&latest=0&copyright=0&word={}&s=&se=&tab=&width=&height=&face=0&istype=2&qc=&nc=1&fr=&expermode=&force=&cg=star&pn={}&rn=30&gsm=78&1557125391211='.format(
                    keyword, keyword, 30 * i))
        return params

    def get_urls(self, params):
        """
        由url参数返回各个url拼接后的响应,存入列表并返回
        :return:
        """
        urls = []
        for i in params:
            urls.append(self.url + i)
        return urls

    def get_image_url(self, urls):
        image_url = []
        for url in urls:
            json_data = requests.get(url, headers=self.headers).json()
            json_data = json_data.get('data')
            for i in json_data:
                if i:
                    image_url.append(i.get('thumbURL'))
        return image_url

    def get_image(self, image_url):
        """
        根据图片url,在本地目录下新建一个以搜索关键字命名的文件夹,然后将每一个图片存入。
        :param image_url:
        :return:
        """
        #cwd = os.getcwd()
        file_name = os.path.join(self.path, self.keyword)
        if not os.path.exists(self.keyword):
            os.makedirs(file_name)
        for index, url in enumerate(image_url, start=1):
            file_save = os.path.join(file_name,'{}.jpg'.format(index))
            with open(file_save, 'wb') as f:
                f.write(requests.get(url, headers=self.headers_image).content)
            if index != 0 and index % 30 == 0:
                print('{}第{}页下载完成'.format(self.keyword, index / 30))

    def __call__(self, *args, **kwargs):
        params = self.get_param()
        urls = self.get_urls(params)
        image_url = self.get_image_url(urls)
        self.get_image(image_url)


# if __name__ == '__main__':
#     spider = Spider_baidu_image()
#     spider()


class BaiDu(object):
    """
    爬取百度图片
    """
    def __init__(self, name, page, path):
        self.start_time = time.time()
        self.name = name
        self.page = page
        #self.url = 'https://image.baidu.com/search/acjson?tn=resultjson_com&ipn=rj&rn=60&'
        self.url = 'https://image.baidu.com/search/acjson'
        self.header = {'User-Agent':'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36' }# 添加为自己的
        self.num = 0
        self.path = path

    def queryset(self):
        """
        将字符串转换为查询字符串形式
        """
        pn = 0
        for i in range(int(self.page)):
            pn += 60 * i
            name = {'word': self.name, 'pn': pn, 'tn':'resultjson_com', 'ipn':'rj', 'rn':60}
            url = self.url
            self.getrequest(url, name)

    def getrequest(self, url, data):
        """
        发送请求
        """
        print('[INFO]: 开始发送请求:' + url)
        ret = requests.get(url, headers=self.header, params=data)

        if str(ret.status_code) == '200':
            print('[INFO]: request 200 ok :' + ret.url)
        else:
            print('[INFO]: request {}, {}'.format(ret.status_code, ret.url))

        response = ret.content.decode()
        img_links = re.findall(r'thumbURL.*?\.jpg', response)
        links = []
        # 提取url
        for link in img_links:

            links.append(link[11:])

        self.thread(links)

    def saveimage(self, link):
        """
        保存图片
        """
        print('[INFO]:正在保存图片:' + link)
        m = hashlib.md5()
        m.update(link.encode())
        name = m.hexdigest()
        ret = requests.get(link, headers = self.header)
        image_content = ret.content
        if not os.path.exists(self.path + self.name):
            os.mkdir(self.path + self.name )
        filename = self.path + self.name + '/'  + name + '.jpg'
        with open(filename, 'wb') as f:
            f.write(image_content)
        print('[INFO]:保存成功,图片名为:{}.jpg'.format(name))

    def thread(self, links):
        """多线程"""
        self.num +=1
        for i, link in enumerate(links):
            print('*'*50)
            print(link)
            print('*' * 50)
            if link:
                # time.sleep(0.5)
                t = Thread(target=self.saveimage, args=(link,))
                t.start()
                # t.join()
            self.num += 1
        print('一共进行了{}次请求'.format(self.num))

    def __del__(self):

        end_time = time.time()
        print('一共花费时间:{}(单位秒)'.format(end_time - self.start_time))

def main():
    # names = ['冰雹', '降雪', '降雨', '结冰', '露', '霜', '雾霾', '雾凇', '雨凇']
    # for name in names:
    name = '雨凇'
    page = 10
    baidu = BaiDu(name, page, '/media/gavin/home/gavin/DataSet/tmp')
    baidu.queryset()


if __name__ == '__main__':
    #main()

    spider = Spider_baidu_image('/media/gavin/home/gavin/DataSet/tmp')
    spider()

参考:

1.Python requests模块基础使用方法实例及高级应用(自动登陆,抓取网页源码)实例详解

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值