aws python sdk_AWS s3 python sdk code examples

Yet another easy-to-understand, easy-to-use aws s3 python sdk code examples.

"""

Yet another s3 python sdk example.

based on boto 2.27.0

"""

import time

import os

import urllib

import boto.s3.connection

import boto.s3.key

def test():

print ‘--- running AWS s3 examples ---‘

c = boto.s3.connection.S3Connection(‘‘, ‘‘)

print ‘original bucket number:‘, len(c.get_all_buckets())

bucket_name = ‘yet.another.s3.example.code‘

print ‘creating a bucket:‘, bucket_name

try:

bucket = c.create_bucket(bucket_name)

except boto.exception.S3CreateError as e:

print ‘ ‘ * 4, ‘error occured:‘

print ‘ ‘ * 8, ‘http status code:‘, e.status

print ‘ ‘ * 8, ‘reason:‘, e.reason

print ‘ ‘ * 8, ‘body:‘, e.body

return

test_bucket_name = ‘no.existence.yet.another.s3.example.code‘

print ‘if you just want to know whether the bucket(\‘%s\‘) exists or not‘ % (test_bucket_name,), ‘and don\‘t want to get this bucket‘

try:

test_bucket = c.head_bucket(test_bucket_name)

except boto.exception.S3ResponseError as e:

if e.status == 403 and e.reason == ‘Forbidden‘:

print ‘ ‘ * 4, ‘the bucket(\‘%s\‘) exists but you don\‘t have the permission.‘ % (test_bucket_name,)

elif e.status == 404 and e.reason == ‘Not Found‘:

print ‘ ‘ * 4, ‘the bucket(\‘%s\‘) doesn\‘t exist.‘ % (test_bucket_name,)

print ‘or use lookup() instead of head_bucket() to do the same thing.‘, ‘it will return None if the bucket does not exist instead of throwing an exception.‘

test_bucket = c.lookup(test_bucket_name)

if test_bucket is None:

print ‘ ‘ * 4, ‘the bucket(\‘%s\‘) doesn\‘t exist.‘ % (test_bucket_name,)

print ‘now you can get the bucket(\‘%s\‘)‘ % (bucket_name,)

bucket = c.get_bucket(bucket_name)

print ‘add some objects to bucket ‘, bucket_name

keys = [‘sample.txt‘, ‘notes/2006/January/sample.txt‘, ‘notes/2006/February/sample2.txt‘, ‘notes/2006/February/sample3.txt‘, ‘notes/2006/February/sample4.txt‘, ‘notes/2006/sample5.txt‘]

print ‘ ‘ * 4, ‘these key names are:‘

for name in keys:

print ‘ ‘ * 8, name

filename = ‘./_test_dir/sample.txt‘

print ‘ ‘ * 4, ‘you can contents of object(\‘%s\‘) from filename(\‘%s\‘)‘ % (keys[0], filename,)

key = boto.s3.key.Key(bucket, keys[0])

bytes_written = key.set_contents_from_filename(filename)

assert bytes_written == os.path.getsize(filename), ‘ error occured:broken file‘

print ‘ ‘ * 4, ‘or set contents of object(\‘%s\‘) by opened file object‘ % (keys[1],)

fp = open(filename, ‘r‘)

key = boto.s3.key.Key(bucket, keys[1])

bytes_written = key.set_contents_from_file(fp)

assert bytes_written == os.path.getsize(filename), ‘ error occured:broken file‘

print ‘ ‘ * 4, ‘you can also set contents the remaining key objects from string‘

for name in keys[2:]:

print ‘ ‘ * 8, ‘key:‘, name

key = boto.s3.key.Key(bucket, name)

s = ‘This is the content of %s ‘ % (name,)

key.set_contents_from_string(s)

print ‘ ‘ * 8, ‘..contents:‘, key.get_contents_as_string()

# use get_contents_to_filename() to save contents to a specific file in the filesystem.

#print ‘You have %d objects in bucket %s‘ % ()

print ‘list all objects added into \‘%s\‘ bucket‘ % (bucket_name,)

objs = bucket.list()

for key in objs:

print ‘ ‘ * 4, key.name

p = ‘notes/2006/‘

print ‘list objects start with \‘%s\‘‘ % (p,)

objs = bucket.list(prefix = p)

for key in objs:

print ‘ ‘ * 4, key.name

print ‘list objects or key prefixs like \‘%s/*\‘, something like what\‘s in the top of \‘%s\‘ folder ?‘ % (p, p,)

objs = bucket.list(prefix = p, delimiter = ‘/‘)

for key in objs:

print ‘ ‘ * 4, key.name

keys_per_page = 4

print ‘manually handle the results paging from s3,‘, ‘ number of keys per page:‘, keys_per_page

print ‘ ‘ * 4, ‘get page 1‘

objs = bucket.get_all_keys(max_keys = keys_per_page)

for key in objs:

print ‘ ‘ * 8, key.name

print ‘ ‘ * 4, ‘get page 2‘

last_key_name = objs[-1].name #last key of last page is the marker to retrive next page.

objs = bucket.get_all_keys(max_keys = keys_per_page, marker = last_key_name)

for key in objs:

print ‘ ‘ * 8, key.name

"""

get_all_keys() a lower-level method for listing contents of a bucket.

This closely models the actual S3 API and requires you to manually handle the paging of results.

For a higher-level method that handles the details of paging for you, you can use the list() method.

"""

print ‘you must delete all objects in the bucket \‘%s\‘ before delete this bucket‘ % (bucket_name, )

print ‘ ‘ * 4, ‘you can delete objects one by one‘

bucket.delete_key(keys[0])

print ‘ ‘ * 4, ‘or you can delete multiple objects using a single HTTP request with delete_keys().‘

bucket.delete_keys(keys[1:])

print ‘now you can delete the bucket \‘%s\‘‘ % (bucket_name,)

c.delete_bucket(bucket)

#references:

# [1] http://docs.pythonboto.org/

# [2] amazon s3 api references

if __name__ == ‘__main__‘:

test()

转载本文请注明作者和出处[Gary的影响力]http://garyelephant.me,请勿用于任何商业用途!

Author: Gary Gao( garygaowork[at]gmail.com) 关注互联网、分布式、高性能、NoSQL、自动化、软件团队

支持我的工作:https://me.alipay.com/garygao

原文:http://blog.csdn.net/gaoyingju/article/details/25392833

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值