本文整理汇总了Python中requests.packages.urllib3.disable_warnings方法的典型用法代码示例。如果您正苦于以下问题:Python urllib3.disable_warnings方法的具体用法?Python urllib3.disable_warnings怎么用?Python urllib3.disable_warnings使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在模块requests.packages.urllib3的用法示例。
在下文中一共展示了urllib3.disable_warnings方法的25个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: download_tarball
点赞 6
# 需要导入模块: from requests.packages import urllib3 [as 别名]
# 或者: from requests.packages.urllib3 import disable_warnings [as 别名]
def download_tarball(tarball_url, verify=False, proxy_server=None):
'''
Downloads a tarball to /tmp and returns the path
'''
try:
if not verify:
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
kwargs = {}
if proxy_server:
kwargs['proxies'] = {
'http': proxy_server,
'https': proxy_server,
'ftp': proxy_server
}
tarball_filename = tempfile.mkstemp(prefix='armada')[1]
response = requests.get(tarball_url, verify=verify, **kwargs)
with open(tarball_filename, 'wb') as f:
f.write(response.content)
return tarball_filename
except Exception:
raise source_exceptions.TarballDownloadException(tarball_url)
开发者ID:airshipit,项目名称:armada,代码行数:25,
示例2: download_tarball
点赞 6
# 需要导入模块: from requests.packages import urllib3 [as 别名]
# 或者: from requests.packages.urllib3 import disable_warnings [as 别名]
def download_tarball(tarball_url, verify=False):
'''
Downloads a tarball to /tmp and returns the path
'''
try:
if not verify:
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
tarball_filename = tempfile.mkstemp(prefix='armada')[1]
response = requests.get(tarball_url, verify=verify)
with open(tarball_filename, 'wb') as f:
f.write(response.content)
return tarball_filename
except Exception:
raise source_exceptions.TarballDownloadException(tarball_url)
开发者ID:att-comdev,项目名称:armada,代码行数:19,
示例3: get_html
点赞 6
# 需要导入模块: from requests.packages import urllib3 [as 别名]
# 或者: from requests.packages.urllib3 import disable_warnings [as 别名]
def get_html(url,submit_cookies):
# 设置请求头,模拟人工
header = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36',
'Referer' : 'http://ui.ptlogin2.qq.com/cgi-bin/login?appid=549000912&s_url=http://qun.qq.com/member.html'
}
# 屏蔽https证书警告
urllib3.disable_warnings()
# 网页访问,get方式
html = get(url, cookies = submit_cookies, headers=header, verify=False)
return html
# post访问网页
开发者ID:shengqiangzhang,项目名称:examples-of-web-crawlers,代码行数:19,
示例4: get_profile_picture
点赞 6
# 需要导入模块: from requests.packages import urllib3 [as 别名]
# 或者: from requests.packages.urllib3 import disable_warnings [as 别名]
def get_profile_picture(self, qq_number, size=100):
# 获取指定qq的头像,size的值可为40、100、140,默认为100
# 屏蔽https证书警告
urllib3.disable_warnings()
# 设置请求头,模拟人工
header = {
'Accept': 'application/json, text/javascript, */*; q=0.01',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Referer':'http://find.qq.com/'
}
# 网页访问,get方式
html = get('http://q1.qlogo.cn/g?b=qq&nk=' + str(qq_number) + '&s=' + str(size), headers=header, verify=False)
return html.content
开发者ID:shengqiangzhang,项目名称:examples-of-web-crawlers,代码行数:18,
示例5: get_quit_of_group
点赞 6
# 需要导入模块: from requests.packages import urllib3 [as 别名]
# 或者: from requests.packages.urllib3 import disable_warnings [as 别名]
def get_quit_of_group(self):
# 获取最近30天内退出的群
# 需要提交的数据
# bkn由参数skey通过另一个加密函数得到
bkn = hash33_bkn(self.cookies_merge_dict_in_qun_qq_com['skey'])
submit_data = {'bkn': str(bkn)}
# 设置请求头,模拟人工
header = {
'Accept': 'application/json, text/javascript, */*; q=0.01',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36',
'Content-Type': 'text/plain',
'origin': 'https://huifu.qq.com',
'referer' : 'https://huifu.qq.com/recovery/index.html?frag=0'
}
# 屏蔽https证书警告
urllib3.disable_warnings()
# 网页访问,post方式
html = post('https://huifu.qq.com/cgi-bin/gr_grouplist', data=submit_data, cookies=self.cookies_merge_dict_in_qun_qq_com, headers=header, verify=False)
# 将返回数据解析为python对象
result = loads(html.text)
return result
开发者ID:shengqiangzhang,项目名称:examples-of-web-crawlers,代码行数:27,
示例6: get_pay_for_another
点赞 6
# 需要导入模块: from requests.packages import urllib3 [as 别名]
# 或者: from requests.packages.urllib3 import disable_warnings [as 别名]
def get_pay_for_another(self):
# 获取帮别人的代付
# 需要提交的数据
skey = str(self.cookies_merge_dict_in_qun_qq_com['skey'])
url = 'https://pay.qq.com/cgi-bin/personal/account_msg.cgi?p=0.6796416908412624&cmd=1&sck=' + get_sck(skey) + '&type=100&showitem=2&per=100&pageno=1&r=0.3177912609760205'
# 设置请求头,模拟人工
header = {
'Accept': 'application/json, text/javascript, */*; q=0.01',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36',
'Accept-Encoding': 'gzip, deflate',
'Referer': &#