python爬虫_2

requests模块

requests. get()
import requests
data = {'wd':'python'}
url = 'https://www.baidu.com/s'
get_1 = requests.get(url,params=data)#多了 params参数

print(get_1.url)
#https://www.baidu.com/s?wd=python
requests. post()
data = {'firstname':'5','lastname':'5'}
post_url = 'http://pythonscraping.com/pages/files/processing.php'
post_ = requests.post(post_url,data=data)

print(post_.text)
#cookies 模拟登陆
#1
form_data = {'username':'anything','password':'password'}
url_post = 'http://pythonscraping.com/pages/cookies/welcome.php'
r = requests.post(url_post,data=form_data)
print(r.cookies.get_dict())

r1 = requests.get(url_post,cookies=r.cookies)
print(r1.text)

#2
session = requests.Session()
data_form = {'username':'anything','password':'password'}
r2 = session.post(url_post,data=data_form)
print(r2.cookies.get_dict())

r3 = session.get(url_post)
print(r3.text)
下载文件
#1
from urllib.request import urlretrieve
urlretrieve(image_url,'img/img1.png')

#2
r = requests.get(image_url)
with open('img/img2.png','wb') as f:
  f.write(r.content)

#3
r = requests.get(image_url, stream=True)    # stream loading

with open('img/img3.png', 'wb') as f:
    for chunk in r.iter_content(chunk_size=32):
        f.write(chunk)
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值