python中writeheader_Python request.add_header方法代码示例

本文整理汇总了Python中urllib.request.add_header方法的典型用法代码示例。如果您正苦于以下问题:Python request.add_header方法的具体用法?Python request.add_header怎么用?Python request.add_header使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在模块urllib.request的用法示例。

在下文中一共展示了request.add_header方法的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: get_web_page

​点赞 6

# 需要导入模块: from urllib import request [as 别名]

# 或者: from urllib.request import add_header [as 别名]

def get_web_page(url, headers, cookies):

try:

logging.info(f'Fetching {url}')

request = urllib.request.Request(url, None, headers)

request.add_header('Authorization', cookies)

response = urllib.request.urlopen(request)

if response.info().get('Content-Encoding') == 'gzip':

buf = BytesIO(response.read())

f = gzip.GzipFile(fileobj=buf)

r = f.read()

else:

r = response.read()

return r

except urllib.error.HTTPError as e:

logging.info(f"Error processing webpage: {e}")

if e.code == ALREADY_CLICKED_CODE:

return ALREADY_CLICKED_CODE

if e.code == UNAUTHORIZED:

return UNAUTHORIZED

return None

开发者ID:joaopsys,项目名称:AutoChronoGG,代码行数:22,

示例2: makePut

​点赞 6

# 需要导入模块: from urllib import request [as 别名]

# 或者: from urllib.request import add_header [as 别名]

def makePut(self,url):

"""Puts data to Canvas (passes token as header)"""

try:

# Tack on http://.../ to the beginning of the url if needed

if self.CANVAS_API not in url:

urlString = self.CANVAS_API+url

else:

urlString = url

print("Putting: " +urlString)

request = urllib.request.Request(urlString, method='PUT')

request.add_header("Authorization", "Bearer " + self.CANVAS_TOKEN);

response = urllib.request.urlopen(request)

json_string = response.read().decode('utf-8');

retVal = dict(json.loads(json_string))

#print (retVal)

if(response.status == 200):

return True

else:

return False

except Exception as ex:

print(ex)

e = sys.exc_info()[0]

print(e)

raise

开发者ID:skuhl,项目名称:autograder,代码行数:27,

示例3: get_blob

​点赞 6

# 需要导入模块: from urllib import request [as 别名]

# 或者: from urllib.request import add_header [as 别名]

def get_blob(self, thread_id, blob_id):

"""Returns a file-like object with the contents of the given blob from

the given thread.

The object is described in detail here:

https://docs.python.org/2/library/urllib2.html#urllib2.urlopen

"""

request = Request(

url=self._url("blob/%s/%s" % (thread_id, blob_id)))

if self.access_token:

request.add_header("Authorization", "Bearer " + self.access_token)

try:

return urlopen(request, timeout=self.request_timeout)

except HTTPError as error:

try:

# Extract the developer-friendly error message from the response

message = json.loads(error.read().decode())["error_description"]

except Exception:

raise error

raise QuipError(error.code, message, error)

开发者ID:quip,项目名称:quip-api,代码行数:22,

示例4: _fetch_json

​点赞 6

# 需要导入模块: from urllib import request [as 别名]

# 或者: from urllib.request import add_header [as 别名]

def _fetch_json(self, path, post_data=None, **args):

request = Request(url=self._url(path, **args))

if post_data:

post_data = dict((k, v) for k, v in post_data.items()

if v or isinstance(v, int))

request_data = urlencode(self._clean(**post_data))

if PY3:

request.data = request_data.encode()

else:

request.data = request_data

if self.access_token:

request.add_header("Authorization", "Bearer " + self.access_token)

try:

return json.loads(

urlopen(

request, timeout=self.request_timeout).read().decode())

except HTTPError as error:

try:

# Extract the developer-friendly error message from the response

message = json.loads(error.read().decode())["error_description"]

except Exception:

raise error

raise QuipError(error.code, message, error)

开发者ID:quip,项目名称:quip-api,代码行数:26,

示例5: download

​点赞 6

# 需要导入模块: from urllib import request [as 别名]

# 或者: from urllib.request import add_header [as 别名]

def download(self, source: str, destination: str) -> int:

destination_path: str = os.path.join(DATA_BASE_PATH, destination)

url: str = os.path.join(STORAGE_BASE_URL, source)

request = urllib.request.Request(url)

request.add_header('User-Agent', self.USERAGENT)

response = urllib.request.urlopen(request)

with open(destination_path, 'wb') as output:

filesize: int = 0

while source:

chunk = response.read(self.CHUNK_SIZE)

if not chunk:

break

filesize += len(chunk)

output.write(chunk)

# assume that we always distribute data as .tar.xz archives

with lzma.open(destination_path) as f:

with tarfile.open(fileobj=f) as tar:

tar.extractall(os.path.dirname(destination_path))

return filesize

开发者ID:bureaucratic-labs,项目名称:dostoevsky,代码行数:21,

示例6: ip_adress_proxies

​点赞 6

# 需要导入模块: from urllib import request [as 别名]

# 或者: from urllib.request import add_header [as 别名]

def ip_adress_proxies(url='https://www.ip-adress.com/proxy_list/'):

# Downloading without proxy

opener = urllib.request.build_opener(urllib.request.ProxyHandler())

urllib.request.install_opener(opener)

request = urllib.request.Request(url)

request.add_header('user-agent', USER_AGENT)

parsed_uri = urlparse(url)

host = '{uri.scheme}://{uri.netloc}/'.format(uri=parsed_uri)

request.add_header('referer', host)

s = False

try:

context = ssl._create_unverified_context()

with urlopen(request, context=context, timeout=3000) as response:

s = response.read().decode('utf-8')

except Exception as er:

print(er)

pattern = r'\d*\.\d*\.\d*\.\d*\:\d*'

found = [i.replace('', '') + '\n' for i in re.findall(pattern, s)]

return found

开发者ID:rendrom,项目名称:rosreestr2coord,代码行数:21,

示例7: downloadUrls

​点赞 6

# 需要导入模块: from urllib import request [as 别名]

# 或者: from urllib.request import add_header [as 别名]

def downloadUrls(self, urls):

url_data = {}

for u in urls:

url = self.base_url + u

request = urllib.request.Request(url)

# the .htaccess file checks for the header, and if it exists returns unprocessed data.

request.add_header('User-agent', 'our-web-crawler')

try:

response = urllib.request.urlopen(request)

data = response.read()

except urllib.request.HTTPError:

log (url)

raise

except urllib.request.URLError:

log (url)

raise

yield (u,data)

开发者ID:pygame,项目名称:pygameweb,代码行数:22,

示例8: perform_metadata_exchange

​点赞 6

# 需要导入模块: from urllib import request [as 别名]

# 或者: from urllib.request import add_header [as 别名]

def perform_metadata_exchange(self, endpoint, xaddr):

if not (xaddr.startswith('http://') or xaddr.startswith('https://')):

logger.debug('invalid XAddr: {}'.format(xaddr))

return

host = None

url = xaddr

if self.mch.family == socket.AF_INET6:

host = '[{}]'.format(url.partition('[')[2].partition(']')[0])

url = url.replace(']', '%{}]'.format(self.mch.interface.name))

body = self.build_getmetadata_message(endpoint)

request = urllib.request.Request(url, data=body, method='POST')

request.add_header('Content-Type', 'application/soap+xml')

request.add_header('User-Agent', 'wsdd')

if host is not None:

request.add_header('Host', host)

try:

with urllib.request.urlopen(request, None, 2.0) as stream:

self.handle_metadata(stream.read(), endpoint, xaddr)

except urllib.error.URLError as e:

logger.warn('could not fetch metadata from: {}'.format(url, e))

开发者ID:christgau,项目名称:wsdd,代码行数:25,

示例9: put

​点赞 6

# 需要导入模块: from urllib import request [as 别名]

# 或者: from urllib.request import add_header [as 别名]

def put(self, location, params=None):

"""Dispatch a PUT request to a SeaMicro chassis.

The seamicro box has order-dependent HTTP parameters, so we build

our own get URL, and use a list vs. a dict for data, as the order is

implicit.

"""

opener = urllib.request.build_opener(urllib.request.HTTPHandler)

url = self.build_url(location, params)

request = urllib.request.Request(url)

request.get_method = lambda: "PUT"

request.add_header("content-type", "text/json")

response = opener.open(request)

json_data = self.parse_response(url, response)

return json_data["result"]

开发者ID:maas,项目名称:maas,代码行数:18,

示例10: getHtml

​点赞 6

# 需要导入模块: from urllib import request [as 别名]

# 或者: from urllib.request import add_header [as 别名]

def getHtml(url, headers=None, encode=None, maxError=3, timeout=10):

error = 0

while error < maxError:

try:

if not headers:

headers = c_spider.defaultHeaders

headers.__setitem__('Referer', url)

request = urllib.request.Request(url)

for key in headers:

request.add_header(key, headers[key])

response = urllib.request.urlopen(request, timeout=timeout)

html = response.read()

if encode:

return html.decode(encode)

else:

return html

except:

error += 1

# 获取网页源代码

开发者ID:JxiaoC,项目名称:animeMusic,代码行数:24,

示例11: plugin

​点赞 5

# 需要导入模块: from urllib import request [as 别名]

# 或者: from urllib.request import add_header [as 别名]

def plugin(srv, item):

""" addrs: (node, name) """

srv.logging.debug("*** MODULE=%s: service=%s, target=%s", __file__, item.service, item.target)

url = item.config['url']

apikey = item.config['apikey']

timeout = item.config['timeout']

node = item.addrs[0]

name = item.addrs[1]

value = item.payload

try:

params = { 'apikey': apikey, 'node': node, 'json': json.dumps({ name : value }) }

resource = url + '/input/post.json?' + urllib.parse.urlencode(params)

request = urllib.request.Request(resource)

request.add_header('User-agent', srv.SCRIPTNAME)

response = urllib.request.urlopen(request, timeout=timeout)

data = response.read()

except Exception as e:

srv.logging.warn("Failed to send GET request to EmonCMS using %s: %s" % (resource, e))

return False

return True

开发者ID:jpmens,项目名称:mqttwarn,代码行数:29,

示例12: get_raw

​点赞 5

# 需要导入模块: from urllib import request [as 别名]

# 或者: from urllib.request import add_header [as 别名]

def get_raw(self, url):

# print("Raw request:", url)

request = urllib.request.Request(url)

context = ssl._create_unverified_context()

# setup private request headers if appropriate

if self._engine.token != None:

if self._engine.name == "gitlab":

request.add_header('PRIVATE-TOKEN',self._engine.token)

else:

if self._verbose: print("Tokens not setup for engine yet")

# run the request

try:

result = urllib.request.urlopen(request,context=context)

except urllib.error.HTTPError as e:

self._error = "HTTP error"

self._error_msg = str(e.code)

self._update_ready = None

except urllib.error.URLError as e:

self._error = "URL error, check internet connection"

self._error_msg = str(e.reason)

self._update_ready = None

return None

else:

result_string = result.read()

result.close()

return result_string.decode()

# result of all api calls, decoded into json format

开发者ID:ndee85,项目名称:coa_tools,代码行数:32,

示例13: _make_request

​点赞 5

# 需要导入模块: from urllib import request [as 别名]

# 或者: from urllib.request import add_header [as 别名]

def _make_request(self, url):

request = urllib.request.Request(url)

request.add_header('User-Agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 '

'(KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36')

with urllib.request.urlopen(request, timeout=5) as response:

html = response.read()

return html

开发者ID:wkeeling,项目名称:selenium-wire,代码行数:10,

示例14: __send_request

​点赞 5

# 需要导入模块: from urllib import request [as 别名]

# 或者: from urllib.request import add_header [as 别名]

def __send_request(self, method, uri, data):

url = self.__url + uri

request = urllib.request.Request(url)

if (method == 'POST'):

request.data = bytes(json.dumps(data), 'utf-8')

auth = str(

base64.b64encode(

bytes('%s:%s' % (self.user, self.password), 'utf-8')

),

'ascii'

).strip()

request.add_header('Authorization', 'Basic %s' % auth)

request.add_header('Content-Type', 'application/json')

e = None

try:

response = urllib.request.urlopen(request).read()

except urllib.error.HTTPError as ex:

response = ex.read()

e = ex

if response:

result = json.loads(response.decode())

else:

result = {}

if e != None:

if result and 'error' in result:

error = '"' + result['error'] + '"'

else:

error = 'No additional error message received'

raise APIError('TestRail API returned HTTP %s (%s)' %

(e.code, error))

return result

开发者ID:qxf2,项目名称:makemework,代码行数:37,

示例15: start_cron_threads

​点赞 5

# 需要导入模块: from urllib import request [as 别名]

# 或者: from urllib.request import add_header [as 别名]

def start_cron_threads():

"""Start threads to trigger essential cron jobs."""

request_timeout = 10 * 60 # 10 minutes.

def trigger(interval_seconds, target):

"""Trigger a cron job."""

while True:

time.sleep(interval_seconds)

try:

url = 'http://{host}/{target}'.format(

host=constants.CRON_SERVICE_HOST, target=target)

request = urllib.request.Request(url)

request.add_header('X-Appengine-Cron', 'true')

response = urllib.request.urlopen(request, timeout=request_timeout)

response.read(60) # wait for request to finish.

except Exception:

continue

crons = (

(90, 'cleanup'),

(60, 'triage'),

(6 * 3600, 'schedule-progression-tasks'),

(12 * 3600, 'schedule-corpus-pruning'),

)

for interval, cron in crons:

thread = threading.Thread(target=trigger, args=(interval, cron))

thread.daemon = True

thread.start()

开发者ID:google,项目名称:clusterfuzz,代码行数:33,

示例16: _fetch_url_page

​点赞 5

# 需要导入模块: from urllib import request [as 别名]

# 或者: from urllib.request import add_header [as 别名]

def _fetch_url_page(self, url, timeout=None):

"""

Makes a GET request to the given lyrics page URL and returns

the HTML content in the case of a valid response.

"""

request = urllib.request.Request(url)

request.add_header("User-Agent", "urllib")

try:

response = urllib.request.urlopen(request, timeout=timeout)

except urllib.request.HTTPError:

raise LyricsNotFoundError(

"Could not find Genius lyrics at URL: {}".format(url)

)

else:

return response.read()

开发者ID:ritiek,项目名称:spotify-downloader,代码行数:17,

示例17: _fetch_search_page

​点赞 5

# 需要导入模块: from urllib import request [as 别名]

# 或者: from urllib.request import add_header [as 别名]

def _fetch_search_page(self, url, timeout=None):

"""

Returns search results from a given URL in JSON.

"""

request = urllib.request.Request(url)

request.add_header("User-Agent", "urllib")

response = urllib.request.urlopen(request, timeout=timeout)

metadata = json.loads(response.read())

if len(metadata["response"]["sections"][0]["hits"]) == 0:

raise LyricsNotFoundError(

"Genius returned no lyric results for the search URL: {}".format(url)

)

return metadata

开发者ID:ritiek,项目名称:spotify-downloader,代码行数:15,

示例18: _make_request

​点赞 5

# 需要导入模块: from urllib import request [as 别名]

# 或者: from urllib.request import add_header [as 别名]

def _make_request(self, url):

request = urllib.request.Request(url)

for header in self.network_headers:

request.add_header(*header)

return urllib.request.urlopen(request)

开发者ID:ritiek,项目名称:spotify-downloader,代码行数:7,

示例19: makeRequest

​点赞 5

# 需要导入模块: from urllib import request [as 别名]

# 或者: from urllib.request import add_header [as 别名]

def makeRequest(self,url):

"""Makes the given request (passes token as header)"""

try:

# Tack on http://.../ to the beginning of the url if needed

if self.CANVAS_API not in url:

urlString = self.CANVAS_API+url

else:

urlString = url

#print("Requesting: " +urlString)

request = urllib.request.Request(urlString)

request.add_header("Authorization", "Bearer " + self.CANVAS_TOKEN);

response = urllib.request.urlopen(request)

json_string = response.read().decode('utf-8');

retVal = json.loads(json_string)

# Deal with pagination:

# https://canvas.instructure.com/doc/api/file.pagination.html

#

# Load the next page if needed and tack the results onto

# the end.

response_headers = dict(response.info())

if "Link" not in response_headers:

return retVal

link_header = response_headers['Link']

link_header_split = link_header.split(",")

for s in link_header_split:

match = re.match('; rel="next"', s)

if not match:

continue

else:

retVal.extend(self.makeRequest(match.group(1)))

return retVal

except:

e = sys.exc_info()[0]

print(e)

raise

开发者ID:skuhl,项目名称:autograder,代码行数:40,

示例20: free_proxies

​点赞 5

# 需要导入模块: from urllib import request [as 别名]

# 或者: from urllib.request import add_header [as 别名]

def free_proxies(url='https://free-proxy-list.net/'):

# Downloading without proxy

opener = urllib.request.build_opener(urllib.request.ProxyHandler())

urllib.request.install_opener(opener)

request = urllib.request.Request(url)

request.add_header('user-agent', USER_AGENT)

parsed_uri = urlparse(url)

host = '{uri.scheme}://{uri.netloc}/'.format(uri=parsed_uri)

request.add_header('referer', host)

f = urllib.request.urlopen(request)

pattern = r'\d*\.\d*\.\d*\.\d*\

\d*'

s = f.read().decode('utf-8')

found = [i.replace('

', ':') +

'\n' for i in re.findall(pattern, s)]

return found

开发者ID:rendrom,项目名称:rosreestr2coord,代码行数:17,

注:本文中的urllib.request.add_header方法示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。

import time import csv import pandas as pd from selenium import webdriver from selenium.webdriver import Chrome,ChromeOptions from selenium.webdriver.common.by import By # driver_path = r'C:\Program Files\Google\Chrome\Application\chrome.exe' driver = webdriver.Chrome() driver.get('https://www.jd.com/') def drop_down(): for x in range(1,9,2): time.sleep(1) j = x /9 js = 'document.documentElement.scrollTop = document.documentElement.scrollHeight * %f' % j driver.execute_script(js) driver.find_element(By.CSS_SELECTOR,'#key').send_keys('燕麦') driver.find_element(By.CSS_SELECTOR,'.button').click() f = open(f'B:\京东商品数据.csv', mode='a', encoding='gbk', newline='') csv_writer = csv.DictWriter(f, fieldnames=[ '商品标题', '商品价格', '店铺名字', '标签', '商品详情页', ]) csv_writer.writeheader() 商品信息 = [] def get_shop(): time.sleep(10) # driver.implicitly_wait(10) drop_down() lis = driver.find_elements(By.CSS_SELECTOR,'#J_goodsList ul li') for li in lis: title = li.find_element(By.CSS_SELECTOR,'.p-name em').text.replace('\n', '') price = li.find_element(By.CSS_SELECTOR,'.p-price strong i').text shop_name = li.find_element(By.CSS_SELECTOR,'.J_im_icon a').text href = li.find_element(By.CSS_SELECTOR,'.p-img a').get_attribute('href') icons = li.find_elements(By.CSS_SELECTOR,'.p-icons i') icon = ','.join([i.text for i in icons]) dit = { '商品标题':title, '商品价格':price, '店铺名字':shop_name, '标签':icon, '商品详情页':href, } csv_writer.writerow(dit) # print(title,price,href,icon,sep=' | ') for page in range(1,3): time.sleep(1) drop_down() get_shop() driver.find_element(By.CSS_SELECTOR,'.pn-next').click() driver.quit() # data = csv.reader(open('B:\京东商品数据.csv'),delimiter=',') # sortedl = sorted(data,key=lambda x:(x[0],x[1]),reverse=True) # print('最贵的商品信息') # print(sortedl) # with open('B:\京东商品数据.csv','r',encoding='gbk') as f: # f_csv = csv.reader(f) # max_price = 0 # next(f_csv) # for row in f_csv: # if row[1].isdigit() and int(row[1]) > max_price: # max_price = int(row[1]) # print(max_price) with open('B:\京东商品数据.csv', 'r') as file: reader = csv.reader(file) header =next(reader) next(reader) max_price = float('-inf') for row in reader: num = float(row[1]) if num > max_price: max_price = num item = row[0] name_0= row[2] print(item,max_price,name_0)程序出现ValueError: could not convert string to float: '商品价格'解决方法
最新发布
06-11
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值