Giới thiệu
Tài liệu hướng dẫn sử dụng SDKs
Trong trang này sẽ bao gôm những ví dụ đơn giản nhất để tìm hiểu về chức năng của s3.
Các chức năng khi kết hợp với nhau sẽ giúp người dùng có được tính năng hay và thú vị.
Cài đặt
pip install 'boto>=2.0,<3.0'
pip install boto3
//Tạo 1 project maven
//Thêm vào file pom.xml dependency sau :
<dependency>
<groupId>com.amazonaws</groupId>
<artifactId>aws-java-sdk</artifactId>
<version>1.8.11</version>
<scope>compile</scope>
</dependency>
// Cài đặt libs3-dev
sudo apt install libs3-dev # ubuntu
sudo yum install libs3-devel # centos
// include libs3.h vào source code
#include <libs3.h>
// link s3 vào object bằng tham số -l của gcc
gcc ... -ls3
// Đây là các bước cài đặt trên ubuntu, yêu cầu tối thiếu 4G RAM và gcc 4.9.
// Các HĐH khác tham khảo https://docs.aws.amazon.com/sdk-for-cpp/v1/developer-guide/setup.html
sudo apt-get install libcurl4-openssl-dev libssl-dev uuid-dev zlib1g-dev libpulse-dev
git clone https://github.com/aws/aws-sdk-cpp.git
mkdir sdk_build
cd sdk_build
cmake <path/to/sdk/source> -DBUILD_ONLY="s3"
make
sudo make install
// Sau khi cài đặt, để biên dịch chương trình cần phải thêm các tham số sau vào g++
g++ ... -std=gnu++11 -laws-cpp-sdk-s3 -laws-cpp-sdk-core
// Cài đặt bằng nuget package manager
Install-Package AWSSDK -Version 2.3.55.2
gem 'aws-sdk-s3', '~> 1'
// Có nhiều cách để cài đặt PHP SDK
// 1. Sử dụng Composer
php -d memory_limit=-1 composer.phar require aws/aws-sdk-php
// require composer autoload
<?php
require '/path/to/vendor/autoload.php';
?>
// 2. Sử dụng Packaged Phar
// Download tại http://docs.aws.amazon.com/aws-sdk-php/v3/download/aws.phar và require vào script
<?php
require '/path/to/aws.phar';
?>
// 3. Sử dụng gói zip
// Download tại http://docs.aws.amazon.com/aws-sdk-php/v3/download/aws.zip và require vào script
<?php
require '/path/to/aws-autoloader.php';
?>
// Để cài đặt Javascript SDK
// 1. Tải bản sdk cho releases tại địa chỉ: https://github.com/aws/aws-sdk-js/releases
// 2. Giải nén thư mục dist/ của bản sdk
// 3. Sử dụng file sdk cho trình duyệt băng cách nhúng vào file html sử dụng
<script src="./dist/aws-sdk.min.js"></script>
// 4. Cấu hình CORS cho bucket theo hướng dẫn : https://docs.bizflycloud.vn/simple_storage/howtos/bucket_config
npm install aws-sdk@2.x
go get "github.com/aws/aws-sdk-go/aws"
Khởi tạo kết nối
API: https://hn.ss.bfcplatform.vn
from boto.s3.connection import S3Connection
conn = S3Connection('<ACCESS_KEY_ID>', '<SECRET_KEY_ID>', host='hn.ss.bfcplatform.vn')
import boto3
s3 = boto3.resource('s3', endpoint_url='https://hn.ss.bfcplatform.vn',
aws_access_key_id = '<ACCESS_KEY_ID>',
aws_secret_access_key = '<SECRET_KEY_ID>')
s3client = boto3.client('s3', endpoint_url='https://hn.ss.bfcplatform.vn',
aws_access_key_id = '<ACCESS_KEY_ID>',
aws_secret_access_key = '<SECRET_KEY_ID>')
String accessKey = "<ACCESS_KEY_ID>";
String secretKey = "<SECRET_KEY_ID>";
AWSCredentials credentials = new BasicAWSCredentials(accessKey, secretKey);
AmazonS3 client = new AmazonS3Client(credentials);
client.setS3ClientOptions(new S3ClientOptions().withPathStyleAccess(true));
client.setEndpoint("hn.ss.bfcplatform.vn");
#include <stdio.h>
#include <libs3.h>
// Khai báo callback
S3Status responsePropertiesCallback(const S3ResponseProperties *properties,
void* callbackData) {
return S3StatusOK;
}
static void responseCompleteCallback(S3Status status,
const S3ErrorDetails* error,
void* callbackData) {
return;
}
S3ResponseHandler responseHandler = {
&responsePropertiesCallback,
&responseCompleteCallback
};
int main(int argc, char** argv) {
// Khởi tạo kết nối
S3_initialize("libs3", S3_INIT_ALL, "hn.ss.bfcplatform.vn");
// Làm mọi việc ở đây
// Đóng kết nối
S3_deinitialize();
return EXIT_SUCCESS;
}
#include <aws/core/Aws.h>
#include <aws/core/auth/AWSCredentialsProvider.h>
#include <aws/s3/S3Client.h>
int main(int argc, char** argv)
{
Aws::SDKOptions options;
Aws::InitAPI(options);
{
Aws::Client::ClientConfiguration config;
config.endpointOverride = "hn.ss.bfcplatform.vn";
const Aws::String accessKeyId = "<ACCESS_KEY_ID>";
const Aws::String secretKey = "<SECRET_KEY_ID>";
Aws::Auth::AWSCredentials credentials(accessKeyId, secretKey);
Aws::S3::S3Client s3_client(credentials, config);
// make your SDK calls here.
}
Aws::ShutdownAPI(options);
}
string accessKey = "<ACCESS_KEY_ID>";
string secretKey = "<SECRET_KEY_ID>";
AmazonS3Config config = new AmazonS3Config();
config.ServiceURL = "https://hn.ss.bfcplatform.vn";
config.ForcePathStyle = true;
AmazonS3Client client = new AmazonS3Client(
accessKey,
secretKey,
config
);
require 'aws-sdk-s3'
s3 = Aws::S3::Resource.new(
endpoint: 'https://hn.ss.bfcplatform.vn',
access_key_id: '<ACCESS_KEY_ID>',
secret_access_key: '<SECRET_KEY_ID>',
signature_version: 's3',
log_level: :info
)
<?php
$accessKey = "<ACCESS_KEY_ID>";
$secretKey = "<SECRET_KEY_ID>";
$credentials = new Aws\Credentials\Credentials($accessKey, $secretKey);
$options = [
'version'=>'latest',
'region' => 'hn',
'signature_version' => 'v4',
'credentials' => $credentials,
'endpoint' => 'https://hn.ss.bfcplatform.vn'
];
$s3Client = new Aws\S3\S3Client($options);
accessKey = "<ACCESS_KEY_ID>";
secretKey = "<SECRET_KEY_ID>";
AWS.config.update({
accessKeyId: accessKey ,
secretAccessKey: secretKey,
region: 'hn',
endpoint: 'https://hn.ss.bfcplatform.vn',
apiVersions: {
s3: '2006-03-01'
}
})
const s3 = new AWS.S3()
const AWS = require('aws-sdk')
AWS.config.update({
accessKeyId: `<ACCESS_KEY_ID> `,
secretAccessKey: `<SECRET_KEY_ID>`,
region: 'hn',
endpoint: 'https://hn.ss.bfcplatform.vn',
apiVersions: {
s3: '2006-03-01'
},
logger: process.stdout
})
s3Config := &aws.Config{
Credentials: credentials.NewStaticCredentials("<ACCESS_KEY_ID>", "<SECRET_KEY_ID>", ""),
Endpoint: aws.String("https://hn.ss.bfcplatform.vn"),
Region: aws.String("hn"),
}
newSession := session.New(s3Config)
Client = s3.New(newSession)
Trong đó <ACCESS_KEY_ID>
và <SECRET_KEY_ID>
được lấy từ giao diện quản lý API key
Tính năng Secure Token
Mô tả : tính năng này giúp người dùng tạo ra các cặp key, secret đi kèm session token có thời hạn sử dụng (tối đa là 24 giờ). Việc sử dụng key, secret và session token có thể thực hiện tất cả các thao tác với bucket và file như sử dụng cặp key, secret bình thường.
Chức năng này rất có ích trong việc sử dụng frontend upload nhiều file liên tiếp hoặc đưa cho các ứng dụng sử dụng trong thời gian ngắn.
Tạo các cặp key, secret tạm
Thực hiện việc tạo các cặp key, secret tạm.
sts_client = boto3.client('sts',
aws_access_key_id='<ACCESS_KEY_ID>',
aws_secret_access_key='<SECRET_KEY_ID>',
endpoint_url='https://hn.ss.bfcplatform.vn',
region_name='',
)
temp_token = sts_client.get_session_token(
DurationSeconds=43200
)
Working on it.
Working on it.
Working on it.
Working on it.
Working on it.
Working on it.
Working on it.
Working on it.
Sử dụng các cặp key, secret tạm
Sử dụng các cặp key, secret tạm
s3client_sts = boto3.client('s3',
aws_access_key_id = temp_token['Credentials']['AccessKeyId'],
aws_secret_access_key = temp_token['Credentials']['SecretAccessKey'],
aws_session_token = temp_token['Credentials']['SessionToken'],
endpoint_url='https://hn.ss.bfcplatform.vn',
region_name='')
# Ví dụ thực hiện thao tác tạo bucket và list tất cả các bucket
bucket_name = 'sts-test1'
s3client_sts.create_bucket(Bucket=bucket_name)
response = s3client_sts.list_buckets()
for bucket in response["Buckets"]:
print "{name}\t{created}".format(
name = bucket['Name'],
created = bucket['CreationDate'],
)
Working on it.
Working on it.
Working on it.
Working on it.
Working on it.
Working on it.
Working on it.
Working on it.
Quản lý bucket
Lấy, tạo mới, xoá
Tham số
<BUCKET-NAME>
: tên bucket chỉ nên có các ký tự số (0->9), chữ thường (a->z), gạch ngang "-" để các bucket có thể truy cập từ đường link url không bị lỗi và các thao tác config không bị lỗi trong quá trình sử dụng.
Lấy danh sách
conn.get_all_buckets()
# [<Bucket: bucket-01>, <Bucket: bucket-02>, <Bucket: bucket-03>]
for bucket in s3.buckets.all():
print(bucket.name)
for (Bucket bucket : clien.listBuckets()) {
System.out.println(" - " + bucket.getName());
}
// Callback
static S3Status listServiceCallback(const char *ownerId,
const char *ownerDisplayName,
const char *bucketName,
int64_t creationDate, void *callbackData) {
char *header_printed = callbackData;
if (!*header_printed) {
*header_printed = 1;
printf("%-22s", " Bucket");
printf(" %-20s %-12s", " Owner ID", "Display Name");
printf("\n");
printf("----------------------");
printf(" --------------------" " ------------");
printf("\n");
}
printf("%-22s", bucketName);
printf(" %-20s %-12s", ownerId ? ownerId : "", ownerDisplayName ? ownerDisplayName : "");
printf("\n");
return S3StatusOK;
}
// Main
S3ListServiceHandler listServiceHandler = {
responseHandler,
&listServiceCallback
};
char header_printed = 0;
S3_list_service(S3ProtocolHTTPS, "<ACCESS_KEY_ID>", "<SECRET_KEY_ID>", NULL, NULL, NULL,
&listServiceHandler, &header_printed);
// Include
#include <aws/s3/model/Bucket.h>
// Code
auto outcome = s3_client.ListBuckets();
if (outcome.IsSuccess())
{
Aws::Vector<Aws::S3::Model::Bucket> bucket_list =
outcome.GetResult().GetBuckets();
for (auto const &bucket : bucket_list)
{
std::cout << " * " << bucket.GetName() << std::endl;
}
}
else
{
std::cout << "ListBuckets error: "
<< outcome.GetError().GetExceptionName() << " - "
<< outcome.GetError().GetMessage() << std::endl;
}
ListBucketsResponse response = client.ListBuckets();
foreach (S3Bucket b in response.Buckets)
{
Console.WriteLine("{0}\t{1}", b.BucketName, b.CreationDate);
}
buckets = s3.buckets
buckets.each do |bucket|
puts "Creation Date: #{bucket.creation_date} - Name: #{bucket.name}"
end
<?php
$buckets = $s3Client->listBuckets();
printf("Owner ID: %s\n", $buckets['Owner']['ID']);
printf("Owner DisplayName: %s\n\n", $buckets['Owner']['DisplayName']);
echo "Bucket Name\t\tCreation Date\n";
foreach ($buckets['Buckets'] as $bucket) {
echo $bucket['Name']."\t\t". $bucket['CreationDate']."\n";
}
var s3 = new AWS.S3()
var params = {}
s3.listBuckets(params, (err, data) => {
if (err) console.log(err, err.stack)
else console.log(data)
})
const s3 = new AWS.S3()
const params = {}
s3.listBuckets(params, (err, data) => {
if (err) console.log(err, err.stack)
else console.log(data)
})
buckets, _ := Client.ListBuckets(&s3.ListBucketsInput{})
Ví dụ lấy bucket có tên là
bucket-01
conn.get_bucket('bucket-01')
# <Bucket: bucket-01>
bucket = s3.Bucket('bucket-01')
# Không hỗ trợ
// Không hỗ trợ
// Working on it
# Không hỗ trợ
bucket = s3.bucket('bucket-01')
bucket.name
# => "bucket-01"
bucket.creation_date
# => 2018-07-03 02:26:12 UTC
// Không hỗ trợ
/* Không hỗ hợ */
/* Không hỗ hợ */
bucket, _ := Client.ListObjects(&s3.ListObjectsInput{
Bucket: aws.String("bucket-01"),
})
Tạo mới bucket
conn.create_bucket('<BUCKET-NAME>')
s3.create_bucket(Bucket='bucket-02')
client.createBucket("<BUCKET-NAME>");
/* Create a bucket */
S3_create_bucket(S3ProtocolHTTPS, "<ACCESS_KEY_ID>", "<SECRET_KEY_ID>", NULL, NULL,
"<BUCKET-NAME>", S3CannedAclPublicRead,
NULL, NULL, &responseHandler, NULL);
// Include
#include <aws/s3/model/CreateBucketRequest.h>
// Code
Aws::S3::Model::CreateBucketRequest request;
request.SetBucket("<BUCKET-NAME>");
auto outcome = s3_client.CreateBucket(request);
if (outcome.IsSuccess())
{
std::cout << "Done!" << std::endl;
}
else
{
std::cout << "CreateBucket error: "
<< outcome.GetError().GetExceptionName() << std::endl
<< outcome.GetError().GetMessage() << std::endl;
}
PutBucketRequest request = new PutBucketRequest();
request.BucketName = "<BUCKET-NAME>";
client.PutBucket(request);
s3.create_bucket({
bucket: '<BUCKET-NAME>',
create_bucket_configuration: {
location_constraint: 'hn'
}
})
<?php
$result = $s3Client->createBucket([
'Bucket' => '<BUCKET-NAME>',
'ACL' => 'private'
]);
var s3 = new AWS.S3()
s3.createBucket(params, function(err, data) {
if (err) console.log(err, err.stack); // an error occurred
else console.log(data); // successful response
});
const s3 = new AWS.S3()
s3.createBucket({ Bucket: '<BUCKET-NAME>' }).promise()
Client.CreateBucket(&s3.CreateBucketInput{
Bucket: aws.String("<BUCKET-NAME>"),
})
Xoá
conn.delete_bucket('<BUCKET-NAME>')
bucket = s3.Bucket('bucket-01')
for key in bucket.objects.all():
key.delete()
bucket.delete()
client.deleteBucket("<BUCKET-NAME>");
/* Delete a bucket. The Bucket must be empty! Otherwise it won’t work! */
S3_delete_bucket(S3ProtocolHTTPS, S3UriStylePath, "<ACCESS_KEY_ID>", "<SECRET_KEY_ID>",
NULL, NULL, "<BUCKET-NAME>", NULL, &responseHandler, NULL);
// Include
#include <aws/s3/model/DeleteBucketRequest.h>
// Code
Aws::S3::Model::DeleteBucketRequest bucket_request;
bucket_request.SetBucket("<BUCKET-NAME>");
auto outcome = s3_client.DeleteBucket(bucket_request);
if (outcome.IsSuccess())
{
std::cout << "Done!" << std::endl;
}
else
{
std::cout << "DeleteBucket error: "
<< outcome.GetError().GetExceptionName() << " - "
<< outcome.GetError().GetMessage() << std::endl;
}
DeleteBucketRequest request_delete = new DeleteBucketRequest();
request_delete.BucketName = "<BUCKET-NAME>";
client.DeleteBucket(request_delete);
bucket = s3.bucket('<BUCKET-NAME>')
bucket.delete
<?php
$result = $s3Client->deleteBucket([
'Bucket' => '<BUCKET-NAME>',
]);
var params = {
Bucket: "<BUCKET-NAME>"
};
s3.deleteBucket(params, function(err, data) {
if (err) console.log(err, err.stack); // an error occurred
else console.log(data); // successful response
});
const s3 = new AWS.S3()
s3.deleteBucket({ Bucket: '<BUCKET-NAME>' }).promise()
Client.DeleteBucket(&s3.DeleteBucketInput{
Bucket: aws.String("<BUCKET-NAME>"),
})
Danh sách file
Lấy ra danh sách các file có trong bucket, mặc định sẽ trả về 1000 kết quả.
Ví dụ lấy danh sách file trong thư mục
gach/
trong bucketbucket-01
files = bucket1.list('gach/', delimiter='/')
for file in files:
print(file.name)
# .DS_Store
# Desktop_.ini
# aXana-WC-red.png
result = s3client.list_objects(Bucket="bucket-01",
Prefix='gach/',
Delimiter='/'
)
print(result)
ObjectListing objectListing = client.listObjects(new ListObjectsRequest()
.withBucketName("bucket-01")
.withPrefix("gach/"));
for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) {
System.out.println(" - " + objectSummary.getKey() + " " +
"(size = " + objectSummary.getSize() + ")");
}
// Callback
static S3Status listBucketCallback(
int isTruncated,
const char *nextMarker,
int contentsCount,
const S3ListBucketContent *contents,
int commonPrefixesCount,
const char **commonPrefixes,
void *callbackData) {
printf("%-22s", " Object Name");
printf(" %-5s %-20s", "Size", " Last Modified");
printf("\n");
printf("----------------------");
printf(" -----" " --------------------");
printf("\n");
char timebuf[256];
char sizebuf[16];
int i = 0;
for (i = 0; i < contentsCount; i++) {
const S3ListBucketContent *content = &(contents[i]);
time_t t = (time_t) content->lastModified;
strftime(timebuf, sizeof (timebuf), "%Y-%m-%dT%H:%M:%SZ", gmtime(&t));
sprintf(sizebuf, "%5llu", (unsigned long long) content->size);
printf("%-22s %s %s\n", content->key, sizebuf, timebuf);
S3_delete_object(&bucketContext, content->key, NULL, &responseHandler, NULL);
}
return S3StatusOK;
}
// main
/* List bucket's content */
S3ListBucketHandler listBucketHandler = {
responseHandler,
&listBucketCallback
};
S3BucketContext bucketContext = {
"hn.ss.bfcplatform.vn",
"<BUCKET-NAME>",
S3ProtocolHTTPS,
S3UriStylePath,
"<ACCESS_KEY_ID>",
"<SECRET_KEY_ID>"
};
S3_list_bucket(&bucketContext, "gach/", NULL, NULL,
0, NULL, &listBucketHandler, NULL);
// Include
#include <aws/s3/model/ListObjectsRequest.h>
#include <aws/s3/model/Object.h>
// Code
Aws::S3::Model::ListObjectsRequest objects_request;
objects_request.WithBucket("bucket-01").WithPrefix("gach/");
auto list_objects_outcome = s3_client.ListObjects(objects_request);
if (list_objects_outcome.IsSuccess())
{
Aws::Vector<Aws::S3::Model::Object> object_list =
list_objects_outcome.GetResult().GetContents();
for (auto const &s3_object : object_list)
{
std::cout << "* " << s3_object.GetKey() << std::endl;
}
}
else
{
std::cout << "ListObjects error: " <<
list_objects_outcome.GetError().GetExceptionName() << " " <<
list_objects_outcome.GetError().GetMessage() << std::endl;
}
ListObjectsRequest request_list_object = new ListObjectsRequest();
request_list_object.BucketName = "bucket-01";
request_list_object.Prefix = "gach/";
ListObjectsResponse response_list_object = client.ListObjects(request_list_object);
foreach (S3Object k in response_list_object.S3Objects)
{
Console.WriteLine("{0}\t{1}\t{2}", k.Key, k.Size, k.LastModified);
}
bucket = s3.bucket('bucket-01')
objects = bucket.objects(prefix: 'gach/', delimiter: '/')
objects.each do |obj|
puts obj.key
end
<?php
/* Returns some or all (up to 1000) of the objects in a bucket. */
$result = $s3Client->listObjects([
'Bucket' => '<BUCKET-NAME>',
'Prefix' => 'gach/',
]);
foreach ($result['Contents'] as $content) {
echo $content['Key'] . "\t" . $content['Size'] . "\t" . $content['LastModified'] . "\n";
}
var params = {
Bucket: "<BUCKET-NAME>",
MaxKeys: 2
};
s3.listObjectsV2(params, function(err, data) {
if (err) console.log(err, err.stack); // an error occurred
else console.log(data); // successful response
});
const s3 = new AWS.S3()
const bucketParams = {
Bucket: 'bucket-01',
Delimiter: '/',
Prefix: 'gach/',
}
s3.listObjectsV2(bucketParams, (err, data) => {
if (err) console.log("Error", err)
else console.log("Success", data)
})
listFile, _ := Client.ListObjects(&s3.ListObjectsInput{
Bucket: aws.String("bucket-01"),
Prefix: aws.String("gach/"),
})
Quản lý Bucket ACL
ACL - Access Control List: cho phép bạn quản lý truy cập vào bucket. Mỗi bucket có một ACL, nó định nghĩa người dùng hoặc nhóm người dùng nào có quyền truy cập gì vào bucket của bạn.
Khi tạo một bucket, một ACL mặc định (FULL_CONTROL
) được đính vào bucket cho phép chủ sở hữu bucket có toàn quyền trên bucket đó.
Get
bucket.get_acl()
# <Policy: demo (owner) = FULL_CONTROL>
acl = s3client.get_bucket_acl(Bucket='bucket-1')
print(acl)
client.getBucketAcl("<BUCKET-NAME>");
// Callback
typedef struct bucket_info {
char *ownerId;
char *ownerDisplayName;
char *bucketName;
int64_t creationDate;
} bucket_info;
static S3Status listServiceCallback(const char *ownerId,
const char *ownerDisplayName,
const char *bucketName,
int64_t creationDate, void *callbackData) {
bucket_info* bucket = callbackData;
bucket->bucketName = strdup(bucketName);
bucket->ownerDisplayName = strdup(ownerDisplayName);
bucket->ownerId = strdup(ownerId);
bucket->creationDate = creationDate;
return S3StatusOK;
}
// main
S3BucketContext bucketContext = {
"hn.ss.bfcplatform.vn",
"<BUCKET-NAME>",
S3ProtocolHTTPS,
S3UriStylePath,
"<ACCESS_KEY_ID>",
"<SECRET_KEY_ID>"
};
bucket_info bucket;
memset(&bucket, 0, sizeof (bucket));
S3ListServiceHandler listServiceHandler = {
responseHandler,
&listServiceCallback
};
S3_list_service(S3ProtocolHTTPS, "<ACCESS_KEY_ID>", "<SECRET_KEY_ID>",
NULL, NULL, NULL, &listServiceHandler, &bucket);
S3AclGrant returnGrants[S3_MAX_ACL_GRANT_COUNT];
int returnGrantCount = 0;
S3_get_acl(&bucketContext, NULL, bucket.ownerId, bucket.ownerDisplayName,
&returnGrantCount, returnGrants, NULL, &responseHandler, NULL);
if (bucket.bucketName) {
free(bucket.bucketName);
}
if (bucket.ownerDisplayName) {
free(bucket.ownerDisplayName);
}
if (bucket.ownerId) {
free(bucket.ownerId);
}
// Include
#include <aws/s3/model/GetBucketAclRequest.h>
#include <aws/s3/model/Permission.h>
#include <aws/s3/model/Grant.h>
// Code
Aws::String GetPermissionString(const Aws::S3::Model::Permission p)
{
switch (p)
{
case Aws::S3::Model::Permission::NOT_SET:
return "NOT_SET";
case Aws::S3::Model::Permission::FULL_CONTROL:
return "FULL_CONTROL";
case Aws::S3::Model::Permission::WRITE:
return "WRITE";
case Aws::S3::Model::Permission::READ:
return "READ";
case Aws::S3::Model::Permission::WRITE_ACP:
return "WRITE_ACP";
case Aws::S3::Model::Permission::READ_ACP:
return "READ_ACP";
default:
return "*unknown!*";
}
}
Aws::S3::Model::GetBucketAclRequest get_request;
get_request.WithBucket("<BUCKET-NAME>");
auto outcome = s3_client.GetBucketAcl(get_request);
if (outcome.IsSuccess())
{
Aws::Vector<Aws::S3::Model::Grant> grants = outcome.GetResult().GetGrants();
for (auto it = grants.begin(); it != grants.end(); it++)
{
Aws::S3::Model::Grant grant = *it;
std::cout << grant.GetGrantee().GetDisplayName() << ": "
<< GetPermissionString(grant.GetPermission())
<< std::endl;
}
}
else
{
std::cout << "GetBucketAcl error: " <<
outcome.GetError().GetExceptionName() << " " <<
outcome.GetError().GetMessage() << std::endl;
}
GetACLRequest request_get_acl = new GetACLRequest();
request_get_acl.BucketName = "<BUCKET-NAME>";
GetACLResponse response_get_acl = new GetACLResponse();
response_get_acl = client.GetACL(request_get_acl);
Console.WriteLine(response_get_acl.AccessControlList);
foreach (S3Grant s3grant in response_get_acl.AccessControlList.Grants)
{
Console.WriteLine("{0}\t{1}\t{2}", s3grant.Grantee.DisplayName, s3grant.Grantee.URI,s3grant.Permission);
}
acl = bucket.acl
acl.owner
# => #<struct Aws::S3::Types::Owner display_name="demo", id="6fbdcf78106c4443a99b86d6a25a3260">
acl.grants
# => [#<struct Aws::S3::Types::Grant grantee=#<struct Aws::S3::Types::Grantee display_name="demo", email_address=nil, id="6fbdcf78106c4443a99b86d6a25a3260", type="CanonicalUser", uri=nil>, permission="FULL_CONTROL">]
<?php
$resp = $s3Client->getBucketAcl([
'Bucket' => '<BUCKET-NAME>'
]);
var params = {
Bucket: "<BUCKET-NAME>", /* required */
};
s3.getBucketAcl(params, function(err, data) {
if (err) console.log(err, err.stack); // an error occurred
else console.log(data); // successful response
});
const s3 = new AWS.S3()
s3.getBucketAcl({ Bucket: 'bucket-01' }, (err, data) => {
if (err) console.log(err, err.stack) // an error occurred
else console.log(data) // successful response
})
acl, _ := Client.GetBucketAcl(&s3.GetBucketAclInput{
Bucket: aws.String("bucket"),
})
Set
bucket.set_acl('<CANNED_ACL>')
bucket = s3.Bucket('bucket-1')
bucket.Acl().put(ACL='<CANNED_ACL>')
// Canned ACL chỉ support các tham số ở mô tả
import com.amazonaws.services.s3.model.CannedAccessControlList;
client.setBucketAcl("<BUCKET-NAME>", CannedAccessControlList.<CANNED_ACL>);
// Callback
typedef struct bucket_info {
char *ownerId;
char *ownerDisplayName;
char *bucketName;
int64_t creationDate;
} bucket_info;
static S3Status listServiceCallback(const char *ownerId,
const char *ownerDisplayName,
const char *bucketName,
int64_t creationDate, void *callbackData) {
bucket_info* bucket = callbackData;
bucket->bucketName = strdup(bucketName);
bucket->ownerDisplayName = strdup(ownerDisplayName);
bucket->ownerId = strdup(ownerId);
bucket->creationDate = creationDate;
return S3StatusOK;
}
// main
S3AclGrant grants[] = {
{
S3GranteeTypeAllUsers,
{
{}
},
S3PermissionRead
}
};
S3BucketContext bucketContext = {
"hn.ss.bfcplatform.vn",
"<BUCKET-NAME>",
S3ProtocolHTTPS,
S3UriStylePath,
"<ACCESS_KEY_ID>",
"<SECRET_KEY_ID>"
};
bucket_info bucket;
memset(&bucket, 0, sizeof (bucket));
S3ListServiceHandler listServiceHandler = {
responseHandler,
&listServiceCallback
};
S3_list_service(S3ProtocolHTTPS, "<ACCESS_KEY_ID>", "<SECRET_KEY_ID>",
NULL, NULL, NULL, &listServiceHandler, &bucket);
S3_set_acl(&bucketContext, NULL, bucket.ownerId, bucket.ownerDisplayName,
1, grants, NULL, &responseHandler, NULL);;
if (bucket.bucketName) {
free(bucket.bucketName);
}
if (bucket.ownerDisplayName) {
free(bucket.ownerDisplayName);
}
if (bucket.ownerId) {
free(bucket.ownerId);
}
// Include
#include <aws/s3/model/PutBucketAclRequest.h>
#include <aws/s3/model/Permission.h>
#include <aws/s3/model/Grant.h>
Aws::S3::Model::PutBucketAclRequest put_request;
put_request.WithBucket("<BUCKET-NAME>");
put_request.SetACL(Aws::S3::Model::BucketCannedACL::<CANNED_ACL>);
auto outcome = s3_client.PutBucketAcl(put_request);
if (outcome.IsSuccess())
{
std::cout << "Done!" << std::endl;
}
else
{
std::cout << "PutBucketAcl error: " <<
outcome.GetError().GetExceptionName() << " " <<
outcome.GetError().GetMessage() << std::endl;
}
PutACLRequest request_put_acl = new PutACLRequest();
request_put_acl.BucketName = "<BUCKET-NAME>";
request_put_acl.CannedACL = S3CannedACL.<CANNED_ACL>;
PutACLResponse response_put_cal = new PutACLResponse();
response_put_cal = client.PutACL(request_put_acl);
if (response_put_cal.HttpStatusCode == System.Net.HttpStatusCode.OK)
{
Console.WriteLine("Put bucket ACL successfully");
}
bucket.acl.put(acl: '<CANNED_ACL>')
<?php
// Sets the permissions on a bucket using access control lists (ACL).
$params = [
'ACL' => '<CANNED_ACL>',
'Bucket' => '<BUCKET-NAME>',
];
$resp = $s3Client->putBucketAcl($params);
var params = {
Bucket: '<BUCKET-NAME>',
ACL: '<CANNED_ACL>'
}
s3.putBucketAcl(params, (err, data) => {
if (err) console.log(err, err.stack) // an error occurred
else console.log(data) // successful response
})
const s3 = new AWS.S3()
const params = {
Bucket: 'bucket-01',
ACL: '<CANNED_ACL>'
}
s3.putBucketAcl(params, (err, data) => {
if (err) console.log(err, err.stack) // an error occurred
else console.log(data) // successful response
})
sess := session.Must(session.NewSessionWithOptions(session.Options{
SharedConfigState: session.SharedConfigEnable,
Config: *s3Config,
}))
svc := s3.New(sess)
result, _ := svc.GetBucketAcl(&s3.GetBucketAclInput{Bucket: aws.String("bucket-01")})
userType := "AmazonCustomerByEmail"
owner := *result.Owner.DisplayName
ownerId := *result.Owner.ID
grants := result.Grants
var newGrantee = s3.Grantee{EmailAddress: aws.String("email"), Type: &userType}
var newGrant = s3.Grant{Grantee: &newGrantee, Permission: aws.String("<CANNED_ACL>")}
grants = append(grants, &newGrant)
params := &s3.PutBucketAclInput{
Bucket: aws.String("bucket-01"),
AccessControlPolicy: &s3.AccessControlPolicy{
Grants: grants,
Owner: &s3.Owner{
DisplayName: &owner,
ID: &ownerId,
},
},
}
svc.PutBucketAcl(params)
Tham số
<CANNED_ACL>
thuộc một trong các giá trị sau:
private
Chủ sở hữu có toàn quyền (FULL_CONTROL), không ai khác có quyền truy cập (mặc định)
public-read
Chủ sở hữu có toàn quyền (FULL_CONTROL), Tất cả người dùng khác có quyền đọc (READ)
public-read-write
Chủ sở hữu có toàn quyền (FULL_CONTROL), Tất cả người dùng khác có quyền đọc (READ) và ghi (WRITE)
authenticated-read
Chủ sở hữu có toàn quyền (FULL_CONTROL), Tất cả người dùng đăng nhập khác có quyền đọc (READ)
Quản lý Bucket Policy
Bucket Policy: cho phép phân quyền chi tiết cho từng người dùng cụ thể ở trong hệ thống Simple Storage của BizflyCloud.
Chức năng này được sử dụng để thực hiện chia sẻ tài nguyên dùng chung giữa nhiều tài khoản khác nhau.
Kết hợp chức năng này với chức năng Payment Requester sẽ được sử dụng như là tính năng phân quyền cho từng người dùng và tính toán chi phí cho từng dự án (hay người) khác nhau.
Get
bucket.get_policy()
bucket_policy = s3client.get_bucket_policy(Bucket='bucket-1')
print(bucket_policy)
client.getBucketPolicy("bucket-01").getPolicyText();
// Không hỗ trợ
// Include
#include <aws/s3/model/GetBucketPolicyRequest.h>b
// Code
Aws::S3::Model::GetBucketPolicyRequest request;
request.SetBucket("<BUCKET-NAME>");
auto outcome = s3_client.GetBucketPolicy(request);
if (outcome.IsSuccess())
{
Aws::StringStream policyStream;
Aws::String line;
while (outcome.GetResult().GetPolicy())
{
outcome.GetResult().GetPolicy() >> line;
policyStream << line;
}
std::cout << "Policy: " << std::endl << policyStream.str() << std::endl;
}
else
{
std::cout << "GetBucketPolicy error: " <<
outcome.GetError().GetExceptionName() << std::endl <<
outcome.GetError().GetMessage() << std::endl;
}
GetBucketPolicyResponse response_get_bucket_policy = client.GetBucketPolicy(bucket_name);
Console.WriteLine(response_get_bucket_policy.Policy);
policy_string = bucket.policy.data.policy.read
JSON.load(policy_string)
<?php
$resp = $s3Client->getBucketPolicy([
'Bucket' => '<BUCKET-NAME>'
]);
echo $resp->get('Policy');
var params = {
Bucket: "<BUCKET-NAME>"
};
s3.getBucketPolicy(params, function(err, data) {
if (err) console.log(err, err.stack); // an error occurred
else console.log(data); // successful response
});
const s3 = new AWS.S3()
const params = {
Bucket: 'bucket-01' /* required */
}
s3.getBucketPolicy(params, (err, data) => {
if (err) console.log(err, err.stack) // an error occurred
else console.log(data) // successful response
})
policy, _ := Client.GetBucketPolicy(&s3.GetBucketPolicyInput{
Bucket: aws.String("bucket-01"),
})
Set
policy_definition = {
'Version': '2012-10-17',
'Statement': [
{
'Effect': 'Allow',
'Principal': {
'AWS': ['arn:aws:iam:::user/<USER-UID>']
},
'Action': [
's3:<ACTION-NAME>',
's3:<ACTION-NAME>',
# ...
],
'Resource': [
'arn:aws:s3:::<BUCKET-NAME>',
'arn:aws:s3:::<BUCKET-NAME>/*'
]
}
]
}
bucket.set_policy(json.dumps(policy_definition))
bucket_name = '<BUCKET-NAME>'
bucket_policy = {
'Version': '2012-10-17',
'Statement': [
{
'Effect': 'Allow',
'Principal': {
'AWS': ['arn:aws:iam:::user/<USER-UID>']
},
'Action': [
's3:<ACTION-NAME>',
],
'Resource': [
'arn:aws:s3:::BUCKET-NAME',
'arn:aws:s3:::BUCKET-NAME/*'
]
}
]
}
bucket_policy = json.dumps(bucket_policy)
s3client.put_bucket_policy(Bucket=bucket_name, Policy=bucket_policy)
String s3_bucket_policy = "{\n"
+ " \"Version\": \"2012-10-17\",\n"
+ " \"Statement\": [{\n"
+ " \"Effect\": \"Allow\",\n"
+ " \"Principal\": {\"AWS\": [\"arn:aws:iam:::user/USER-UID\"]},\n"
+ " \"Action\": [\n"
+ " \"s3:<ACTION-NAME>\",\n"
+ " \"s3:<ACTION-NAME>\",\n"
+ " \"s3:<ACTION-NAME>\",\n"
+ " \"s3:<ACTION-NAME>\",\n"
+ " \"s3:<ACTION-NAME>\"\n"
+ " ],\n"
+ " \"Resource\": [\n"
+ " \"arn:aws:s3::*:<BUCKET-NAME>\",\n"
+ " \"arn:aws:s3::*:<BUCKET-NAME>/*\",\n"
+ " ]\n"
+ " }]\n"
+ " }";
client.setBucketPolicy("bucket-01", s3_bucket_policy);
// Không hỗ trợ
// Include
#include <aws/s3/model/PutBucketPolicyRequest.h>
// Code
const Aws::String policy_string =
"{ \n"
" \"Version\":\"2012-10-17\", \n"
" \"Statement\":[ \n"
" { \n"
" \"Effect\":\"Allow\", \n"
" \"Principal\":{ \n"
" \"AWS\":[ \n"
" \"arn:aws:iam:::user/<USER-UID>\" \n"
" ] \n"
" }, \n"
" \"Action\":[ \n"
" \"s3:<ACTION-NAME>\", \n"
" \"s3:<ACTION-NAME>\", \n"
" \"s3:<ACTION-NAME>\" \n"
" ], \n"
" \"Resource\":[ \n"
" \"arn:aws:s3:::<BUCKET-NAME>\" \n"
" \"arn:aws:s3:::<BUCKET-NAME>/*\" \n"
" ] \n"
" } \n"
" ] \n"
"} \n";
auto request_body = Aws::MakeShared<Aws::StringStream>("");
*request_body << policy_string;
Aws::S3::Model::PutBucketPolicyRequest request;
request.SetBucket("new_bucket");
request.SetBody(request_body);
auto outcome = s3_client.PutBucketPolicy(request);
if (outcome.IsSuccess())
{
std::cout << "Done!" << std::endl;
}
else
{
std::cout << "PutBucketPolicy error: " <<
outcome.GetError().GetExceptionName() << std::endl <<
outcome.GetError().GetMessage() << std::endl;
}
string policy = @"{
""Version"": ""2012-10-17"",
""Statement"":[{
""Effect"":""Allow"",
""Principal"": {""AWS"":[""arn:aws:iam:::user/<USER-UID>""]},
""Action"":[""s3:<ACTION-NAME>"",""s3:<ACTION-NAME>"",""s3:<ACTION-NAME>"",""s3:<ACTION-NAME>""],
""Resource"":[""arn:aws:s3:::<BUCKET-NAME>"",""arn:aws:s3:::<BUCKET-NAME>/*""]
}]}";
PutBucketPolicyRequest request_put_bucket_policy = new PutBucketPolicyRequest();
request_put_bucket_policy.BucketName = <BUCKET-NAME>;
request_put_bucket_policy.Policy = policy;
client.PutBucketPolicy(request_put_bucket_policy);
policy_definition = {
'Version': '2012-10-17',
'Statement': [
{
'Effect': 'Allow',
'Principal': {
'AWS': ['arn:aws:iam:::user/<USER-UID>']
},
'Action': [
's3:<ACTION-NAME>',
's3:<ACTION-NAME>',
# ...
],
'Resource': [
'arn:aws:s3:::<BUCKET-NAME>',
'arn:aws:s3:::<BUCKET-NAME>/*'
]
}
]
}
bucket = s3.bucket('<BUCKET-NAME>')
bucket.policy.put(policy: policy_definition.to_json)
<?php
$policies = '{
"Version":"2012-10-17",
"Statement":[
{
"Effect":"Allow",
"Principal":{
"AWS":[
"arn:aws:iam:::user/<USER-UID>"
]
},
"Action":[
"s3:<ACTION-NAME>",
"s3:<ACTION-NAME>",
"s3:<ACTION-NAME>"
],
"Resource":[
"arn:aws:s3:::<BUCKET-NAME>",
"arn:aws:s3:::<BUCKET-NAME>/*"
]
}
]
}';
$resp = $s3Client->putBucketPolicy([
'Bucket' => '<BUCKET-NAME>',
'Policy' => $policies,
]);
var policy_definition = {
'Version': '2012-10-17',
'Statement': [
{
'Effect': 'Allow',
'Principal': {
'AWS': ['arn:aws:iam:::user/<USER-UID>']
},
'Action': [
's3:<ACTION-NAME>',
's3:<ACTION-NAME>'
],
'Resource': [
'arn:aws:s3:::<BUCKET-NAME>',
'arn:aws:s3:::<BUCKET-NAME>/*'
]
}
]
}
var params = {
Bucket: "<BUCKET-NAME>",
Policy: JSON.stringify(policy_definition)
};
s3.putBucketPolicy(params, function(err, data) {
if (err) console.log(err, err.stack); // an error occurred
else console.log(data); // successful response
});
const s3 = new AWS.S3()
const policy_definition = {
'Version': '2012-10-17',
'Statement': [
{
'Effect': 'Allow',
'Principal': {
'AWS': ['arn:aws:iam:::user/<USER-UID>']
},
'Action': [
's3:<ACTION-NAME>',
's3:<ACTION-NAME>'
],
'Resource': [
'arn:aws:s3:::<BUCKET-NAME>',
'arn:aws:s3:::<BUCKET-NAME>/*'
]
}
]
}
const params = {
Bucket: '<BUCKET-NAME>',
Policy: JSON.stringify(policy_definition)
}
s3.putBucketPolicy(params, (err, data) => {
if (err) console.log(err, err.stack) // an error occurred
else console.log(data) // successful response
})
policy := map[string]interface{}{
"Version": "2012-10-17",
"Statement": []map[string]interface{}{
{
"Sid": "AddPerm",
"Effect": "Allow",
"Principal": "*",
"Action": []string{
"s3:GetObject",
},
"Resource": []string{
fmt.Sprintf("arn:aws:s3:::%s/<BUCKET-NAME>", "bucket-01"),
fmt.Sprintf("arn:aws:s3:::%s/<BUCKET-NAME>/*", "bucket-01")
},
},
},
}
out, _ := json.Marshal(policy)
Client.PutBucketPolicy(&s3.PutBucketPolicyInput{
Bucket: aws.String("bucket-01"),
Policy: aws.String(string(out)),
})
Tham số
Ví dụ về policy
{
'Version': '2012-10-17',
'Statement': [
{
'Effect': 'Allow',
'Principal': {
'AWS': ['arn:aws:iam:::user/<USER-UID>']
},
'Action': [
's3:<ACTION-NAME>',
],
'Resource': [
'arn:aws:s3:::<BUCKET-NAME>',
'arn:aws:s3:::<BUCKET-NAME>/*'
]
},
{
'Effect': 'Deny',
'Principal': {
'AWS': ['arn:aws:iam:::user/<USER-UID>']
},
'Action': [
's3:<ACTION-NAME>',
's3:<ACTION-NAME>'
],
'Resource': [
'arn:aws:s3:::<BUCKET-NAME>',
'arn:aws:s3:::<BUCKET-NAME>/*'
]
}
]
}
Trong policy ở trên sẽ có phần
'Version': '2012-10-17'
: Đây là xác định version của policy, phần này không thể thay đổiStatement
Đây là danh sách quyền truy cập vào các tài nguyên của bucket , ví dụ như được truy cập vào bucket A và không được truy cập vào bucket B, có thể có nhiều quyền truy cập trong 1 Statement
Trong 1 thành phần Statement thì sẽ có cách thành phần con như sau:
Effect
có thể là 1 trong 2 giá trị làAllow
hoặcDeny
sẽ quyết định có quyền truy cập vào tài nguyênPrincipal
là định danh của người dùng được phân quyền trong phầnEffect
trong đó<USER-UID>
là ID của người dùng được phân quyềnAction
là danh sách các thao tác được cho phép hoặc bị cấm tác động lên bucket hoặc file bởi người dùng ở phầnPrincipal
- Các action này có thể là 1 hoặc kết hợp nhiều action dưới đây :
AbortMultipartUpload
, CreateBucket
, DeleteBucketPolicy
, DeleteBucket
, DeleteBucketWebsite
, DeleteObject
, DeleteObjectVersion
, DeleteReplicationConfiguration
, GetAccelerateConfiguration
, GetBucketAcl
, GetBucketCORS
, GetBucketLocation
, GetBucketLogging
, GetBucketNotification
, GetBucketPolicy
, GetBucketRequestPayment
, GetBucketTagging
, GetBucketVersioning
, GetBucketWebsite
, GetLifecycleConfiguration
, GetObjectAcl
, GetObject
, GetObjectTorrent
, GetObjectVersionAcl
, GetObjectVersion
, GetObjectVersionTorrent
, GetReplicationConfiguration
, ListAllMyBuckets
, ListBucketMultiPartUploads
, ListBucket
, ListBucketVersions
, ListMultipartUploadParts
, PutAccelerateConfiguration
, PutBucketAcl
, PutBucketCORS
, PutBucketLogging
, PutBucketNotification
, PutBucketPolicy
, PutBucketRequestPayment
, PutBucketTagging
, PutBucketVersioning
, PutBucketWebsite
, PutLifecycleConfiguration
, PutObjectAcl
, PutObject
, PutObjectVersionAcl
, PutReplicationConfiguration
, RestoreObject
Resource
là tài nguyên được chỉ định để thao tácarn:aws:s3:::<BUCKET-NAME>
resource là bucket thường đi kèm với các thao tác tác động tới Bucket như ListBucket, PutBucketAcl, PutBucketCORS ...arn:aws:s3:::<BUCKET-NAME>/*
resource này là tất cả các file trong bucket, ngoài ra có thể định cho một prefix nhất định của bucket như làarn:aws:s3:::<BUCKET-NAME>/prefix1/*
, khi đó thì resource sẽ là những bucket có prefix là prefix1. Resource này thường được đi kèm với cả các action như GetObject, PutObject, PutBucketAcl, GetObjectAcl
Một số ví dụ cụ thể về bucket policy
- Policy cho phép user
a7d1e56edcac40d0896d2b97f414afc5
download file từ bucketbucket-dev
{
'Version': '2012-10-17',
'Statement': [
{
'Effect': 'Allow',
'Principal': {
'AWS': ['arn:aws:iam:::user/a7d1e56edcac40d0896d2b97f414afc5']
},
'Action': [
's3:GetObject',
's3:ListBucket'
],
'Resource': [
'arn:aws:s3:::bucket-dev',
'arn:aws:s3:::bucket-dev/*'
]
}
]
}
- Policy cho phép user
a7d1e56edcac40d0896d2b97f414afc5
upload file từ bucketbucket-dev
, bao gồm cả các thao tác liên quan đến multi-part upload
{
'Version': '2012-10-17',
'Statement': [
{
'Effect': 'Allow',
'Principal': {
'AWS': ['arn:aws:iam:::user/a7d1e56edcac40d0896d2b97f414afc5']
},
'Action': [
's3:PutObjectAcl',
's3:PutObject',
's3:PutObjectVersionAcl',
's3:RestoreObject',
's3:GetObjectAcl',
's3:GetObjectTorrent',
's3:GetObjectVersionAcl',
's3:GetObjectVersion',
's3:ListBucketMultipartUploads',
's3:ListMultipartUploadParts',
's3:ListBucket',
's3:AbortMultipartUpload'
],
'Resource': [
'arn:aws:s3:::bucket-dev',
'arn:aws:s3:::bucket-dev/*'
]
}
]
}
Delete
bucket.delete_policy()
s3client.delete_bucket_policy(Bucket='<BUCKET-NAME>')
client.deleteBucketPolicy("bucket-01");
// Không hỗ trợ
// Include
#include <aws/s3/model/DeleteBucketPolicyRequest.h>
// Code
Aws::S3::Model::DeleteBucketPolicyRequest request;
request.SetBucket("<BUCKET-NAME>");
auto outcome = s3_client.DeleteBucketPolicy(request);
if (outcome.IsSuccess())
{
std::cout << "Done!" << std::endl;
}
else
{
std::cout << "DeleteBucketPolicy error: " <<
outcome.GetError().GetExceptionName() << std::endl <<
outcome.GetError().GetMessage() << std::endl;
}
// Working on it...
bucket.policy.delete
<?php
$resp = $s3Client->deleteBucketPolicy([
'Bucket' => '<BUCKET-NAME>'
]);
var params = { Bucket: '<BUCKET-NAME>' }
s3.deleteBucketPolicy(params, (err, data) => {
if (err) console.log(err, err.stack) // an error occurred
else console.log(data) // successful response
})
const s3 = new AWS.S3()
s3.deleteBucketPolicy({ Bucket: '<BUCKET-NAME>' }, (err, data) => {
if (err) console.log(err, err.stack) // an error occurred
else console.log(data) // successful response
})
Client.DeleteBucketPolicy(&s3.DeleteBucketPolicyInput{
Bucket: aws.String("bucket-01"),
})
Quản lý Bucket CORS
Bucket CORS - Cross-Origin Resource Sharing: là cơ chế cho phép các website với các tên miền khác nhau truy cập cùng một bucket và các resource bên trong. Xem thêm CORS - Wikipedia
Get
rules = bucket.get_cors()
for rule in rules:
print(rule.__dict__)
bucket_cors = s3client.get_bucket_cors(Bucket='bucket-1')
print(bucket_cors)
client.getBucketCrossOriginConfiguration("<BUCKET-NAME>");
// Không hỗ trợ
// Include
#include <aws/s3/model/GetBucketCorsRequest.h>
// Code
Aws::S3::Model::GetBucketCorsRequest request;
request.SetBucket("<BUCKET-NAME>");
auto outcome = s3_client.GetBucketCors(request);
if (outcome.IsSuccess())
{
Aws::Vector<Aws::S3::Model::CORSRule> cors_list = outcome.GetResult().GetCORSRules();
for (auto const &cors : cors_list)
{
for (auto const &allow_org : cors.GetAllowedOrigins())
std::cout << allow_org << " ";
std::cout << "\t";
for (auto const &allow_method : cors.GetAllowedMethods())
std::cout << allow_method << " ";
std::cout << "\t";
for (auto const &allow_header : cors.GetAllowedHeaders())
std::cout << allow_header << " ";
std::cout << "\t";
for (auto const &expose_header : cors.GetExposeHeaders())
std::cout << expose_header << " ";
std::cout << "\t";
std::cout << cors.GetMaxAgeSeconds();
std::cout << std::endl;
}
}
else
{
std::cout << "GetBucketCors error: " <<
outcome.GetError().GetExceptionName() << std::endl <<
outcome.GetError().GetMessage() << std::endl;
}
client.GetCORSConfiguration("<BUCKET-NAME>")
bucket.cors.cors_rules
<?php
$result = $s3Client->getBucketCors([
'Bucket' => '<BUCKET-NAME>'
]);
s3.getBucketCors({ Bucket: "<BUCKET-NAME>" }, (err, data) => {
if (err) console.log(err, err.stack) // an error occurred
else console.log(data) // successful response
})
const s3 = new AWS.S3()
s3.getBucketCors({ Bucket: bucketName }, (err, data) => {
if (err) console.log(err, err.stack) // an error occurred
else console.log(data) // successful response
})
cors, _ := Client.GetBucketCors(&s3.GetBucketCorsInput{
Bucket: aws.String("bucket-01"),
})
Ví dụ set bucket CORS cho trang
http://mysite.com
, với phương thức là GET, cho phép gửi và nhận bất cứ header nào, đồng thời set thời gian cache preflight response là 300 giây.
from boto.s3.cors import CORSConfiguration
cors_definition = CORSConfiguration()
cors_definition.add_rule(['GET'], 'http://mysite.com', allowed_header=['*'], expose_header=['*'], max_age_seconds=300)
bucket.set_cors(cors_definition)
bucket = s3.Bucket('<BUCKET-NAME>')
bucket_cors = bucket.Cors()
config = {
'CORSRules': [
{
'AllowedMethods': ['GET'],
'AllowedOrigins': [mysite.com'],
'AllowedHeaders': ['*'],
'MaxAgeSeconds': 300
}
]
}
bucket_cors.put(CORSConfiguration=config)
BucketCrossOriginConfiguration bucket_cors = new BucketCrossOriginConfiguration();
List<CORSRule> list_rules = new ArrayList<>();
CORSRule cors1 = new CORSRule();
cors1.setAllowedOrigins("http://mysite.com");
cors1.setAllowedMethods(CORSRule.AllowedMethods.GET);
cors1.setExposedHeaders("*");
cors1.setMaxAgeSeconds(300);
list_rules.add(cors1);
bucket_cors.setRules(list_rules);
client.setBucketCrossOriginConfiguration("bucket-01", bucket_cors);
// Không hỗ trợ
// Include
#include <aws/s3/model/PutBucketCorsRequest.h>
// Code
Aws::S3::Model::CORSRule cors;
cors.AddAllowedOrigins("http://mysite.com")
.AddAllowedMethods("GET")
.AddAllowedHeaders("*")
.SetMaxAgeSeconds(300);
Aws::S3::Model::CORSConfiguration cors_config;
cors_config.AddCORSRules(cors);
Aws::S3::Model::PutBucketCorsRequest request;
request.SetBucket("<BUCKET-NAME>");
request.SetCORSConfiguration(cors_config);
auto outcome = s3_client.PutBucketCors(request);
if (outcome.IsSuccess())
{
std::cout << "Done!" << std::endl;
}
else
{
std::cout << "PutBucketCors error: " <<
outcome.GetError().GetExceptionName() << std::endl <<
outcome.GetError().GetMessage() << std::endl;
}
PutCORSConfigurationRequest request_put_cors = new PutCORSConfigurationRequest();
request_put_cors.BucketName = bucket_name;
CORSRule cors_rule = new CORSRule();
cors_rule.AllowedHeaders = new List<string> { "*" };
cors_rule.AllowedMethods = new List<string> { "GET" };
cors_rule.AllowedOrigins = new List<string> { "http://mysite.com" };
cors_rule.MaxAgeSeconds = 300;
CORSConfiguration cors_config = new CORSConfiguration();
cors_config.Rules = new List<CORSRule> {cors_rule};
request_put_cors.Configuration = cors_config;
PutCORSConfigurationResponse response_put_cors = new PutCORSConfigurationResponse();
response_put_cors = client.PutCORSConfiguration(request_put_cors);
Console.WriteLine("Put bucket CORS status " + response_put_cors.HttpStatusCode);
bucket.cors.put({
cors_configuration: { # Bắt buộc
cors_rules: [ # Bắt buộc
{
allowed_headers: ["*"],
allowed_methods: ["GET"], # Bắt buộc
allowed_origins: ["http://mysite.com"], # Bắt buộc
expose_headers: ["*"],
max_age_seconds: 300
}
]
}
})
<?php
$result = $s3Client->putBucketCors([
'Bucket' => '<BUCKET-NAME>', // REQUIRED
'CORSConfiguration' => [ // REQUIRED
'CORSRules' => [ // REQUIRED
[
'AllowedHeaders' => ['*'],
'AllowedMethods' => ['GET'], // REQUIRED
'AllowedOrigins' => ['http://mysite.com'], // REQUIRED
'ExposeHeaders' => ['*'],
'MaxAgeSeconds' => 300
],
],
]
]);
var params = {
Bucket: "<BUCKET-NAME>",
CORSConfiguration: {
CORSRules: [{
AllowedMethods: ['GET'],
AllowedOrigins: ['http://mysite.com'],
AllowedHeaders: ['*'],
ExposeHeaders: ['*'],
MaxAgeSeconds: 300
}]
}
}
s3.putBucketCors(params, (err, data) => {
if (err) console.log(err, err.stack) // an error occurred
else console.log(data) // successful response
})
const s3 = new AWS.S3()
const params = {
Bucket: "<BUCKET-NAME>",
CORSConfiguration: {
CORSRules: [{
AllowedMethods: ['GET'],
AllowedOrigins: ['http://mysite.com'],
AllowedHeaders: ['*'],
ExposeHeaders: ['*'],
MaxAgeSeconds: 300
}]
}
}
s3.putBucketCors(params, (err, data) => {
if (err) console.log(err, err.stack) // an error occurred
else console.log(data) // successful response
})
Client.PutBucketCors(&s3.PutBucketCorsInput{
Bucket: aws.String("bucket-01"),
CORSConfiguration: &s3.CORSConfiguration{
CORSRules: []*s3.CORSRule{&s3.CORSRule{
AllowedHeaders: aws.StringSlice([]string{"*"}),
AllowedOrigins: aws.StringSlice([]string{"http://mysite.com"}),
MaxAgeSeconds: aws.Int64(300),
AllowedMethods: aws.StringSlice([]string{"GET"}),
}},
},
})
Delete
bucket.delete_cors()
bucket = s3.Bucket('<BUCKET-NAME>')
bucket_cors = bucket.Cors()
bucket_cors.delete()
client.deleteBucketCrossOriginConfiguration("bucket-01");
// Không hỗ trợ
// Include
#include <aws/s3/model/DeleteBucketCorsRequest.h>
// Code
Aws::S3::Model::DeleteBucketCorsRequest request;
request.SetBucket("<BUCKET-NAME>");
auto outcome = s3_client.DeleteBucketCors(request);
if (outcome.IsSuccess())
{
std::cout << "Done!" << std::endl;
}
else
{
std::cout << "DeleteBucketCors error: " <<
outcome.GetError().GetExceptionName() << std::endl <<
outcome.GetError().GetMessage() << std::endl;
}
client.DeleteCORSConfiguration("bucket-01");
bucket.cors.delete
<?php
$result = $s3Client->deleteBucketCors([
'Bucket' => '<BUCKET-NAME>'
]);
s3.deleteBucketCors({ Bucket: "<BUCKET-NAME>" }, (err, data) => {
if (err) console.log(err, err.stack) // an error occurred
else console.log(data) // successful response
})
const s3 = new AWS.S3()
s3.deleteBucketCors({ Bucket: "<BUCKET-NAME>" }, (err, data) => {
if (err) console.log(err, err.stack) // an error occurred
else console.log(data) // successful response
})
Client.DeleteBucketCors(&s3.DeleteBucketCorsInput{
Bucket: aws.String("bucket-01"),
})
Quản lý Bucket Versioning
Bucket Versioning: khi được bật VCCloud Simple Storage sẽ tự động tạo một phiên bản lưu trữ mỗi khi file bị ghi đè hoặc xoá, cho phép người dùng có thể khôi phục file về các trạng thái trước đó.
Trạng thái hiện tại
bucket.get_versioning_status()
# {'Versioning': 'Enabled'}
bucket_versioning = s3.BucketVersioning('bucket-1')
print(bucket_versioning.status)
client.getBucketVersioningConfiguration("<BUCKET-NAME>").getStatus();
// Không hỗ trợ
// Include
#include <aws/s3/model/GetBucketVersioningRequest.h>
// Code
Aws::String GetVersioningString(const Aws::S3::Model::BucketVersioningStatus v)
{
switch (v)
{
case Aws::S3::Model::BucketVersioningStatus::NOT_SET:
return "NOT_SET";
case Aws::S3::Model::BucketVersioningStatus::Enabled:
return "Enabled";
case Aws::S3::Model::BucketVersioningStatus::Suspended:
return "Suspended";
default:
return "unknown!";
}
}
Aws::String GetMFADeleteString(const Aws::S3::Model::MFADeleteStatus m)
{
switch (m)
{
case Aws::S3::Model::MFADeleteStatus::NOT_SET:
return "NOT_SET";
case Aws::S3::Model::MFADeleteStatus::Enabled:
return "Enabled";
case Aws::S3::Model::MFADeleteStatus::Disabled:
return "Disabled";
default:
return "unknown!";
}
}
Aws::S3::Model::GetBucketVersioningRequest request;
request.SetBucket("<BUCKET-NAME>");
auto outcome = s3_client.GetBucketVersioning(request);
if (outcome.IsSuccess())
{
std::cout << "Versioning: " << GetVersioningString(outcome.GetResult().GetStatus()) << std::endl;
std::cout << "MFADelete: " << GetMFADeleteString(outcome.GetResult().GetMFADelete()) << std::endl;
}
else
{
std::cout << "GetBucketVersioning error: " <<
outcome.GetError().GetExceptionName() << std::endl <<
outcome.GetError().GetMessage() << std::endl;
}
GetBucketVersioningResponse response_bucket_versioning = client.GetBucketVersioning("<BUCKET-NAME");
bucket.versioning.status
$result = $s3Client->getBucketVersioning([
'Bucket' => '<BUCKET-NAME>'
]);
s3.getBucketVersioning({ Bucket: "<BUCKET-NAME>" }, (err, data) => {
if (err) console.log(err, err.stack) // an error occurred
else console.log(data) // successful response
})
const s3 = new AWS.S3()
s3.getBucketVersioning({ Bucket: bucketName }, (err, data) => {
if (err) console.log(err, err.stack) // an error occurred
else console.log(data) // successful response
})
Client.GetBucketVersioning(&s3.GetBucketVersioningInput{
Bucket: aws.String("bucket-01"),
})
Bật/Tắt bucket versioning
bucket.configure_versioning('<STATUS>')
bucket_versioning = s3.BucketVersioning('bucket-1')
bucket_versioning.enable()
bucket_versioning.suspend()
BucketVersioningConfiguration bucket_version_config = new BucketVersioningConfiguration();
bucket_version_config.setStatus(BucketVersioningConfiguration.<STATUS>);
SetBucketVersioningConfigurationRequest bucket_versioning_request = new SetBucketVersioningConfigurationRequest(bucketName, bucket_version_config);
client.setBucketVersioningConfiguration(bucket_versioning_request);
// Không hỗ trợ
// Include
#include <aws/s3/model/PutBucketVersioningRequest.h>
// Code
Aws::S3::Model::PutBucketVersioningRequest request;
request.SetBucket("<BUCKET-NAME>");
Aws::S3::Model::VersioningConfiguration versioning_config;
versioning_config.SetStatus(Aws::S3::Model::BucketVersioningStatus::<Enabled | Suspended>);
request.SetVersioningConfiguration(versioning_config);
auto outcome = s3_client.PutBucketVersioning(request);
if (outcome.IsSuccess())
{
std::cout << "Done!" << std::endl;
}
else
{
std::cout << "PutBucketVersioning error: " <<
outcome.GetError().GetExceptionName() << std::endl <<
outcome.GetError().GetMessage() << std::endl;
}
PutBucketVersioningRequest request_put_bucket_versioning = new PutBucketVersioningRequest();
request_put_bucket_versioning.BucketName = "<BUCKET-NAME>";
S3BucketVersioningConfig version_config = new S3BucketVersioningConfig();
version_config.Status = VersionStatus.<STATUS>;
request_put_bucket_versioning.VersioningConfig = version_config;
PutBucketVersioningResponse response_put_bucket_versioning = new PutBucketVersioningResponse();
response_put_bucket_versioning = client.PutBucketVersioning(request_put_bucket_versioning);
Console.WriteLine("Put bucker versioning status " + response_put_bucket_versioning.HttpStatusCode);
bucket.versioning.put({
versioning_configuration: {
status: 'Enabled', # Thuộc một trong 2 giá trị: Enabled, Suspended
}
})
<?php
$result = $s3Client->putBucketVersioning([
'Bucket' => '<BUCKET-NAME>',
'VersioningConfiguration' => [
'Status' => 'Enabled|Suspended',
],
]);
var versioningConfiguration = {
Status: 'Enabled' // 'Suspended', 'Enabled'
}
s3.putBucketVersioning({ Bucket: "<BUCKET-NAME>", VersioningConfiguration: versioningConfiguration }, (err, data) => {
if (err) console.log(err, err.stack) // an error occurred
else console.log(data) // successful response
})
const s3 = new AWS.S3()
const versioningConfiguration = {
Status: 'Suspended' || 'Enabled'
}
s3.putBucketVersioning({ Bucket: bucketName, VersioningConfiguration: versioningConfiguration }, (err, data) => {
if (err) console.log(err, err.stack) // an error occurred
else console.log(data) // successful response
})
Client.PutBucketVersioning(&s3.PutBucketVersioningInput{
Bucket: aws.String("bucket-01"),
VersioningConfiguration: &s3.VersioningConfiguration{
Status: aws.String("<STATUS>"),
},
})
Tham số
<STATUS>
là True
hoặc False
tương ứng với trạng thái Bật
hoặc Tắt
Quản lý Bucket Website
Bucket Website: khi được bật VCCloud Simple Storage sẽ biến bucket đó thành 1 static hosting để chứa các file static (html,css,js,image ...).
Tính năng này rất phù hợp với các dạng trang làm landing page, các dạng code frontend generate ra HTML, CSS, JS.
Kiểm tra cấu hình hiện tại
bucket.get_website_configuration()
s3client.get_bucket_website(Bucket='bucket-1')
client.getBucketWebsiteConfiguration("bucket-01";
# Working on it...
// Include
#include <aws/s3/model/GetBucketWebsiteRequest.h>
// Code
Aws::S3::Model::GetBucketWebsiteRequest request;
request.SetBucket("<BUCKET-NAME>");
auto outcome = s3_client.GetBucketWebsite(request);
if (outcome.IsSuccess())
{
std::cout << " Index page: "
<< outcome.GetResult().GetIndexDocument().GetSuffix()
<< std::endl
<< " Error page: "
<< outcome.GetResult().GetErrorDocument().GetKey()
<< std::endl;
}
else
{
std::cout << "GetBucketWebsite error: " <<
outcome.GetError().GetExceptionName() << std::endl <<
outcome.GetError().GetMessage() << std::endl;
}
GetBucketWebsiteResponse request_get_bucket_website = client.GetBucketWebsite("BUCKET-NAME");
webiste_config = bucket.website
webiste_config.index_document
webiste_config.error_document
<?php
$resp = $s3Client->getBucketWebsite([
'Bucket' => '<BUCKET-NAME>'
]);
s3.getBucketWebsite({ Bucket: '<BUCKET-NAME>' }, (err, data) => {
if (err) console.log(err, err.stack) // an error occurred
else console.log(data) // successful response
})
const s3 = new AWS.S3()
s3.getBucketWebsite({ Bucket: '<BUCKET-NAME>' }, (err, data) => {
if (err) console.log(err, err.stack) // an error occurred
else console.log(data) // successful response
})
Client.GetBucketWebsite(&s3.GetBucketWebsiteInput{
Bucket: aws.String("bucket-01"),
})
Sửa cấu hình
bucket.configure_website(suffix='<SUFFIX>', error_key='<ERROR-KEY>')
website_configuration = {
'ErrorDocument': {'Key': '<ERROR-KEY>'},
'IndexDocument': {'Suffix': '<SUFFIX>'},
}
s3client.put_bucket_website(
Bucket='bucket-1',
WebsiteConfiguration=website_configuration
)
BucketWebsiteConfiguration bucket_website = new BucketWebsiteConfiguration();
bucket_website.setIndexDocumentSuffix("<SUFFIX>");
bucket_website.setErrorDocument("<ERROR-KEY>");
client.setBucketWebsiteConfiguration("<BUCKET-NAME>", bucket_website);
# Working on it...
// Include
#include <aws/s3/model/IndexDocument.h>
#include <aws/s3/model/ErrorDocument.h>
#include <aws/s3/model/WebsiteConfiguration.h>
#include <aws/s3/model/PutBucketWebsiteRequest.h>
// Code
Aws::S3::Model::IndexDocument index_doc;
index_doc.SetSuffix("<SUFFIX>");
Aws::S3::Model::ErrorDocument error_doc;
error_doc.SetKey("<ERROR-KEY>");
Aws::S3::Model::WebsiteConfiguration website_config;
website_config.SetIndexDocument(index_doc);
website_config.SetErrorDocument(error_doc);
Aws::S3::Model::PutBucketWebsiteRequest request;
request.SetBucket("<BUCKET-NAME>");
request.SetWebsiteConfiguration(website_config);
auto outcome = s3_client.PutBucketWebsite(request);
if (outcome.IsSuccess())
{
std::cout << "Done!" << std::endl;
}
else
{
std::cout << "PutBucketWebsite error: " <<
outcome.GetError().GetExceptionName() << std::endl <<
outcome.GetError().GetMessage() << std::endl;
}
PutBucketWebsiteRequest request_bucket_website = new PutBucketWebsiteRequest();
WebsiteConfiguration website_config = new WebsiteConfiguration();
website_config.IndexDocumentSuffix = "<SUFFIX>";
website_config.ErrorDocument = "<ERROR-KEY>";
client.PutBucketWebsite("<BUCKET-NAME>", website_config);
bucket.website.put({
website_configuration: { # Bắt buộc
index_document: {
suffix: '<SUFFIX>' # Bắt buộc
},
error_document: {
key: '<ERROR-KEY>' # Bắt buộc
}
}
})
<?php
// Setting a Bucket Website Configuration
$params = [
'Bucket' => '<BUCKET-NAME>',
'WebsiteConfiguration' => [
'ErrorDocument' => [
'Key' => '<ERROR-KEY>',
],
'IndexDocument' => [
'Suffix' => '<SUFFIX>',
],
]
];
$resp = $s3Client->putBucketWebsite($params);
var params = {
Bucket: '<BUCKET-NAME>',
WebsiteConfiguration: {
ErrorDocument: {
Key: '<ERROR-KEY>' // example: 'error.html'
},
IndexDocument: {
Suffix: '<SUFFIX>' // example: 'index.html'
}
}
}
s3.putBucketWebsite(params, (err, data) => {
if (err) console.log(err, err.stack) // an error occurred
else console.log(data) // successful response
})
const s3 = new AWS.S3()
const params = {
Bucket: '<BUCKET-NAME>',
WebsiteConfiguration: {
ErrorDocument: {
Key: '<ERROR-KEY>' // example: 'error.html'
},
IndexDocument: {
Suffix: '<SUFFIX>' // example: 'index.html'
}
}
}
s3.putBucketWebsite(params, (err, data) => {
if (err) console.log(err, err.stack) // an error occurred
else console.log(data) // successful response
})
Client.PutBucketWebsite(&s3.PutBucketWebsiteInput{
Bucket: aws.String("bucket-01"),
WebsiteConfiguration: &s3.WebsiteConfiguration{
IndexDocument: &s3.IndexDocument{
Suffix: aws.String("<SUFFIX>"),
},
},
})
Xoá
bucket.delete_website_configuration()
s3.delete_bucket_website(Bucket='bucket-1')
// Working on it...
# Working on it...
// Include
#include <aws/s3/model/DeleteBucketWebsiteRequest.h>
// Code
Aws::S3::Model::DeleteBucketWebsiteRequest request;
request.SetBucket("<BUCKET-NAME>");
auto outcome = s3_client.DeleteBucketWebsite(request);
if (outcome.IsSuccess())
{
std::cout << "Done!" << std::endl;
}
else
{
std::cout << "DeleteBucketWebsite error: " <<
outcome.GetError().GetExceptionName() << std::endl <<
outcome.GetError().GetMessage() << std::endl;
}
// Working on it...
bucket.website.delete
<?php
try {
$resp = $s3Client->deleteBucketWebsite([
'Bucket' => '<BUCKET-NAME>'
]);
} catch (AwsException $e) {
// output error message if fails
echo $e->getMessage();
echo "\n";
}
var params = {
Bucket: "<BUCKET-NAME>"
};
s3.deleteBucketWebsite(params, function(err, data) {
if (err) console.log(err, err.stack); // an error occurred
else console.log(data); // successful response
});
const params = {
Bucket: "<BUCKET-NAME>"
};
s3.deleteBucketWebsite(params, function(err, data) {
if (err) console.log(err, err.stack); // an error occurred
else console.log(data); // successful response
});
// Working on it...
Quản lý Bucket Lifecycle
Bucket Lifecycle là tính năng giúp người dùng thực hiện các thao tác như sau :
Tự động xóa các file sau một khoảng thời gian nhất định (ví dụ là 3 ngày, 1 tuần ), có thể sử dụng tính năng này biến bucket thành backup
Tự động di chuyển object tới một storage class khác, nhằm đưa object vào archive hay đưa tới những storage class nơi yêu cầu truy suất cao hơn
Tự động Abort Incomplete MultiPart Upload (tự động hủy các multi-part upload mà chưa hoàn thành), giúp dọn dẹp các object upload bị lỗi trong quá trình upload
Tự động xóa các object version mà không phải object version mới nhất (NoncurrentVersionExpiration) sau n ngày
Với tât cả các tính năng trên, người dùng có thể sử dụng để làm giảm bớt dung lượng không cần thiết.
Kiểm tra cấu hình hiện tại
bucket.get_lifecycle_config()
s3client.get_bucket_lifecycle_configuration(Bucket='bucket-1')
List<BucketLifecycleConfiguration.Rule> list_rules = client.getBucketLifecycleConfiguration(bucketName).getRules();
# Working on it...
// Include
#include <aws/s3/model/GetBucketLifecycleConfigurationRequest.h>
// Code
Aws::String GetExpirationStatusString(const Aws::S3::Model::ExpirationStatus s)
{
switch (s)
{
case Aws::S3::Model::ExpirationStatus::NOT_SET:
return "NOT_SET";
case Aws::S3::Model::ExpirationStatus::Enabled:
return "Enabled";
case Aws::S3::Model::ExpirationStatus::Disabled:
return "Disabled";
}
}
Aws::S3::Model::GetBucketLifecycleConfigurationRequest request;
request.SetBucket("<BUCKET-NAME>");
auto outcome = s3_client.GetBucketLifecycleConfiguration(request);
if (outcome.IsSuccess())
{
Aws::Vector<Aws::S3::Model::LifecycleRule> rules = outcome.GetResult().GetRules();
for (auto const &rule : rules)
{
std::cout << rule.GetFilter().GetPrefix() << " "
<< rule.GetExpiration().GetDays() << " "
<< rule.GetNoncurrentVersionExpiration().GetNoncurrentDays() << " "
<< GetExpirationStatusString(rule.GetStatus()) << std::endl;
}
}
else
{
std::cout << "GetBucketLifecycleConfiguration error: " <<
outcome.GetError().GetExceptionName() << std::endl <<
outcome.GetError().GetMessage() << std::endl;
}
GetLifecycleConfigurationResponse response_get_lifecycle_config = client.GetLifecycleConfiguration(bucket_name);
List<LifecycleRule> all_rules = response_get_lifecycle_config.Configuration.Rules;
bucket_lifecyle = bucket.lifecycle
# => #<Aws::S3::BucketLifecycle:0x007f8c3da44450 @bucket_name="bucket-01", @data=nil, @client=#<Aws::S3::Client>>
bucket_lifecyle.data
// Working on it...
s3.getBucketLifecycleConfiguration({ Bucket: '<BUCKET-NAME>' }, (err, data) => {
if (err) console.log(err, err.stack) // an error occurred
else console.log(JSON.stringify(data)) // successful response
})
const s3 = new AWS.S3()
s3.getBucketLifecycleConfiguration({ Bucket: '<BUCKET-NAME>' }, (err, data) => {
if (err) console.log(err, err.stack) // an error occurred
else console.log(JSON.stringify(data)) // successful response
})
Lifecycle, _ := Client.GetBucketLifecycle(&s3.GetBucketLifecycleInput{
Bucket: aws.String("bucket-01"),
})
Sửa cấu hình
# Hiện tại thư viện boto2 không hỗ trợ 2 tính năng là AbortIncompleteMultiPartUpload và NoncurrentVersionExpiration
from boto.s3.lifecycle import Lifecycle, Rule, Expiration
expiration1 = Expiration(days=<EXPIRATIONINDAYS>)
rule1 = Rule(prefix='<PREFIX>', status='<STATUS>', expiration=expiration1)
lifecycle = Lifecycle()
lifecycle.append(rule1)
bucket.configure_lifecycle(lifecycle)
# True
bucket_lifecycle = s3.BucketLifecycle('bucket-1')
response = bucket_lifecycle.put(
LifecycleConfiguration={
'Rules': [
{
'Expiration': {
'Days': 2
},
'ID': '',
'Prefix': '',
'Status': 'Enabled',
'Transitions': [{
'Days': 1,
'StorageClass': 'GLACIER'
},],
'NoncurrentVersionExpiration': {
'NoncurrentDays': 1
},
'AbortIncompleteMultipartUpload': {
'DaysAfterInitiation': 1
}
}
]
}
)
BucketLifecycleConfiguration bucket_lifecycle_config = new BucketLifecycleConfiguration();
AbortIncompleteMultipartUpload abort = new AbortIncompleteMultipartUpload();
abort.setDaysAfterInitiation(<DAYSAFTERINITIATION>)
BucketLifecycleConfiguration.Rule rule = new BucketLifecycleConfiguration.Rule();
rule.setPrefix("<PREFIX>");
rule.setStatus("<STATUS>");
rule.setExpirationInDays(<EXPIRATION-IN-DAYS>);
rule.setNoncurrentVersionExpirationInDays(<NONCURRENT-VERSION-EXPIRATION-IN-DAYS>);
rule.setAbortIncompleteMultipartUpload(abort)
List<Rule> rules = new ArrayList<>();
rules.add(rule);
bucket_lifecycle_config.setRules(rules);
client.setBucketLifecycleConfiguration("<BUCKET-NAME>", bucket_lifecycle_config);
# Working on it...
// Include
#include <aws/s3/model/PutBucketLifecycleConfigurationRequest.h>
// Code
Aws::S3::Model::PutBucketLifecycleConfigurationRequest request;
request.SetBucket("<BUCKET-NAME>");
Aws::S3::Model::LifecycleRuleFilter rule_filter;
rule_filter.SetPrefix("<PREFIX>");
Aws::S3::Model::LifecycleExpiration expiration;
expiration.SetDays(<EXPIRATION-IN-DAYS>);
Aws::S3::Model::NoncurrentVersionExpiration nve;
nve.SetNoncurrentDays(<NONCURRENT-VERSION-EXPIRATION-IN-DAYS>);
Aws::S3::Model::AbortIncompleteMultipartUpload aimu;
aimu.SetDaysAfterInitiation(<DAYS-AFTER-INITIATION>)
Aws::S3::Model::LifecycleRule rule;
rule.WithFilter(rule_filter).WithExpiration(expiration)
.WithNoncurrentVersionExpiration(nve)
.WithAbortIncompleteMultipartUpload(aimu)
.SetStatus(Aws::S3::Model::ExpirationStatus::<STATUS>);
Aws::S3::Model::BucketLifecycleConfiguration lifecycle_config;
lifecycle_config.AddRules(rule);
request.SetLifecycleConfiguration(lifecycle_config);
auto outcome = s3_client.PutBucketLifecycleConfiguration(request);
if (outcome.IsSuccess())
{
std::cout << "Done!" << std::endl;
}
else
{
std::cout << "PutBucketLifecycleConfiguration error: " <<
outcome.GetError().GetExceptionName() << std::endl <<
outcome.GetError().GetMessage() << std::endl;
}
LifecycleRule lifecycle_rule = new LifecycleRule();
lifecycle_rule.Status = LifecycleRuleStatus.<STATUS>;
lifecycle_rule.Expiration = new LifecycleRuleExpiration() { Days = <EXPIRATION-IN-DAYS> };
lifecycle_rule.Id = <RULE-NAME>;
lifecycle_rule.Prefix = <PREFIX>;
lifecycle_rule.NoncurrentVersionExpiration = new LifecycleRuleNoncurrentVersionExpiration() { NoncurrentDays = <NONCURRENT-VERSION-EXPIRATION-IN-DAYS> };
lifecycle_rule.AbortIncompleteMultipartUpload = new LifecycleRuleAbortIncompleteMultipartUpload() { DaysAfterInitiation = <DAYS-AFTER-INITIATION> };
DAYS-AFTER-INITIATION
LifecycleConfiguration lifecycle_config = new LifecycleConfiguration();
lifecycle_config.Rules = new List<LifecycleRule>();
lifecycle_config.Rules.Add(lifecycle_rule);
PutLifecycleConfigurationRequest request_lifecycle = new PutLifecycleConfigurationRequest();
request_lifecycle.BucketName = bucket_name;
request_lifecycle.Configuration = lifecycle_config;
PutLifecycleConfigurationResponse response_put_lifecycle_config = new PutLifecycleConfigurationResponse();
response_put_lifecycle_config = client.PutLifecycleConfiguration(request_lifecycle);
bucket.lifecycle.put({
lifecycle_configuration: {
rules: [ # Bắt buộc
{
expiration: {
days: <EXPIRATION-IN-DAYS>
},
prefix: '<PREFIX>', # Bắt buộc
status: '<STATUS>', # Bắt buộc, thuộc một trong 2 giá trị: Enabled, Disabled
noncurrent_version_expiration: {
noncurrent_days: <NONCURRENT-VERSION-EXPIRATION-IN-DAYS>
},
abort_incomplete_multipart_upload: {
days_after_initiation: <DAYS-AFTER-INITIATION>
}
}
]
}
})
// Working on it...
var params = {
Bucket: '<BUCKET-NAME>',
LifecycleConfiguration: {
Rules: [{
Filter: { Prefix: '<PREFIX>' },
Expiration: { Days: <EXPIRATIONINDAYS> },
ID: 'TestID',
Status: '<STATUS>',
NoncurrentVersionExpiration: { NoncurrentDays: <NONCURRENTVERSIONEXPIRATIONINDAYS> },
AbortIncompleteMultipartUpload: { DaysAfterInitiation: <DAYS-AFTER-INITIATION>}
}]
}
}
s3.putBucketLifecycleConfiguration(params, (err, data) => {
if (err) console.log(err, err.stack) // an error occurred
else console.log(JSON.stringify(data)) // successful response
})
const s3 = new AWS.S3()
const params = {
Bucket: '<BUCKET-NAME>',
LifecycleConfiguration: {
Rules: [{
Filter: { Prefix: '<PREFIX>' },
Expiration: { Days: <EXPIRATIONINDAYS> },
ID: 'TestID',
Status: '<STATUS>',
NoncurrentVersionExpiration: { NoncurrentDays: <NONCURRENTVERSIONEXPIRATIONINDAYS> },
AbortIncompleteMultipartUpload: { DaysAfterInitiation: <DAYS-AFTER-INITIATION>}
}]
}
}
s3.putBucketLifecycleConfiguration(params, (err, data) => {
if (err) console.log(err, err.stack) // an error occurred
else console.log(JSON.stringify(data)) // successful response
})
Client.PutBucketLifecycleConfiguration(&s3.PutBucketLifecycleConfigurationInput{
Bucket: aws.String("<BUCKET-NAME>"),
LifecycleConfiguration: &s3.BucketLifecycleConfiguration{
Rules: []*s3.LifecycleRule{
{
ID: aws.String("testid"),
Filter: &s3.LifecycleRuleFilter{
Prefix: aws.String("<PREFIX>")
},
Expiration: &s3.LifecycleExpiration{
Days: aws.Int64(<EXPIRATION-IN-DAYS>)
},
NoncurrentVersionExpiration: &s3.NoncurrentVersionExpiration{
NoncurrentDays: aws.Int64(<NONCURRENT-VERSION-EXPIRATION-IN-DAYS>),
},
AbortIncompleteMultipartUpload: &s3.AbortIncompleteMultipartUpload{
DaysAfterInitiation: aws.Int64(<DAYS-AFTER-INITIATION>),
},
Status: aws.String("<STATUS>")
}
}
}
})
Tham số
<PREFIX>
tiền tố xác định một hoặc nhiều object được áp dụng, ví dụ: test.txt
hoặc thumuc/
hoặc ScreenShots/ScreenShot2018-
.
<STATUS>
Trạng thái của lifecycle, thuộc một trong hai giá trị: Enabled
(Bật) hoặc Disabled
(Tắt)
<EXPIRATION-IN-DAYS>
số ngày object đó sẽ được đánh dấu là hết hạn tính theo ngày ví dụ 5 (5 ngày).
<NONCURRENT-VERSION-EXPIRATION-IN-DAYS>
khi bật tính năng versioning, tham số này sẽ set cho các object không phải là mới nhất sẽ hết hạn sau bao nhiêu ngày.
<DAYS-AFTER-INITIATION>
khi sử dụng chức năng multi-part upload, các part upload mà không complete sẽ bị xóa xóa sau bao nhiêu ngày.
Lưu ý
- Các config sau : Expiration, AbortIncompleteMultipartUpload, NoncurrentVersionExpiration không bắt buộc phải đi cùng nhau, một bucket có thể có 1 hoặc tất cả các config trên.
- Có thể có nhiều rule (> 1) cho một bucket
- Khi bucket có nhiều rule, nếu 1 rule trong đó có prefix là rỗng khi config bucket lifecycle sẽ bị lỗi 400 (bad request)
Xoá cấu hình
bucket.delete_lifecycle_configuration()
# True
bucket_lifecycle = s3.BucketLifecycle('bucket-1')
bucket_lifecycle.delete()
client.deleteBucketLifecycleConfiguration("<BUCKET-NAME>");
# Working on it...
// Include
#include <aws/s3/model/DeleteBucketLifecycleRequest.h>
// Code
Aws::S3::Model::DeleteBucketLifecycleRequest request;
request.SetBucket("new_bucket");
auto outcome = s3_client.DeleteBucketLifecycle(request);
if (outcome.IsSuccess())
{
std::cout << "Done!" << std::endl;
}
else
{
std::cout << "DeleteBucketLifecycle error: " <<
outcome.GetError().GetExceptionName() << std::endl <<
outcome.GetError().GetMessage() << std::endl;
}
// Working on it...
bucket.lifecycle.delete
// Working on it...
s3.deleteBucketLifecycle({ Bucket: '<BUCKET-NAME>' }, (err, data) => {
if (err) console.log(err, err.stack) // an error occurred
else console.log(JSON.stringify(data)) // successful response
})
const s3 = new AWS.S3()
s3.deleteBucketLifecycle({ Bucket: '<BUCKET-NAME>' }, (err, data) => {
if (err) console.log(err, err.stack) // an error occurred
else console.log(JSON.stringify(data)) // successful response
})
Client.DeleteBucketLifecycle(&s3.DeleteBucketLifecycleInput{
Bucket: aws.String("<BUCKET-NAME>"),
})
Quản lý Bucket Payer
Về cơ bản chủ sở hữu bucket sẽ trả tiền cho việc lưu trữ và transfer dữ liệu bucket đó. Tuy nhiên chủ sở hữu có thể cài đặt bucket trở thành Requester Pays Bucket, tức là để cho người truy cập bucket trả tiền cho các request và transfer dữ liệu do họ sử dụng, chủ sở hữu bucket sẽ chỉ trả tiền cho việc lưu trữ dữ liệu.
Thông thường, Requester Pays Bucket được sử dụng khi bạn muốn chia sẻ dữ liệu , người được chia sẻ dữ liệu sẽ trả phí cho tất cả những gì mà người đó dùng.
Ví dụ sử dụng tài tính năng này. - Có 2 loại tài khoản dành cho 2 mục đích khác nhau: - Loại tài khoản đầu tiên sử dụng để lưu toàn bộ các file - Loại tài khoản thứ 2 được sử dụng để thực hiện thao tác health check hoặc dọn dẹp tài nguyên mà không bị lẫn sang việc download hay upload của tài khoản chính - Khi thực hiện payment request các tài nguyên thống kê sẽ được tính vào người thực hiện request.
Kiểm tra cấu hình hiện tại
bucket.get_request_payment()
# b'<?xml version="1.0" encoding="UTF-8"?><RequestPaymentConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Payer>BucketOwner</Payer></RequestPaymentConfiguration>'
payment = s3.BucketRequestPayment('vccloud')
print(payment.payer)
// Working on it...
# Working on it...
// Include
#include <aws/s3/model/GetBucketRequestPaymentRequest.h>
// Code
Aws::String GetPayerString(const Aws::S3::Model::Payer p)
{
switch (p)
{
case Aws::S3::Model::Payer::NOT_SET:
return "NOT_SET";
case Aws::S3::Model::Payer::Requester:
return "Requester";
case Aws::S3::Model::Payer::BucketOwner:
return "BucketOwner";
}
}
Aws::S3::Model::GetBucketRequestPaymentRequest request;
request.SetBucket("<BUCKET-NAME>");
auto outcome = s3_client.GetBucketRequestPayment(request);
if (outcome.IsSuccess())
{
std::cout << GetPayerString(outcome.GetResult().GetPayer())
<< std::endl;
}
else
{
std::cout << "GetBucketRequestPayment error: " <<
outcome.GetError().GetExceptionName() << std::endl <<
outcome.GetError().GetMessage() << std::endl;
}
GetBucketRequestPaymentResponse response_payment = client.GetBucketRequestPayment("<BUCKET-NAME>");
Console.WriteLine(response_payment.Payer);
bucket.request_payment.payer
# => "BucketOwner"
<?php
$result = $s3Client->getBucketRequestPayment([
'Bucket' => '<BUCKET-NAME>'
]);
// Working on it
const s3 = new AWS.S3()
s3.getBucketRequestPayment({ Bucket: '<BUCKET-NAME>' }, (err, data) => {
if (err) console.log(err, err.stack) // an error occurred
else console.log(JSON.stringify(data)) // successful response
})
Payment, _ := Client.GetBucketRequestPayment(&s3.GetBucketRequestPaymentInput{
Bucket: aws.String("bucket-01"),
})
Set bucket trở thành Requester Pays Bucket
bucket.set_request_payment(payer='Requester')
# True
payment = s3.BucketRequestPayment('vccloud')
payment.put(
RequestPaymentConfiguration={
'Payer': 'Requester'
}
)
#{'ResponseMetadata': {'RequestId': 'tx000000000000006540b19-005caaecfd-e33e71e-hn-1',
'HostId': '',
'HTTPStatusCode': 200,
'HTTPHeaders': {'date': 'Mon, 08 Apr 2019 06:44:31 GMT',
'content-length': '0',
'connection': 'keep-alive',
'x-amz-request-id': 'tx000000000000006540b19-005caaecfd-e33e71e-hn-1',
'server': 'ngx-01',
'strict-transport-security': 'max-age=15768000'},
'RetryAttempts': 0}}
// Working on it...
# Working on it...
// Include
#include <aws/s3/model/PutBucketRequestPaymentRequest.h>
// Code
Aws::S3::Model::RequestPaymentConfiguration payment;
payment.SetPayer(Aws::S3::Model::Payer::Requester);
Aws::S3::Model::PutBucketRequestPaymentRequest request;
request.SetBucket("<BUCKET-NAME>");
request.SetRequestPaymentConfiguration(payment);
auto outcome = s3_client.PutBucketRequestPayment(request);
if (outcome.IsSuccess())
{
std::cout << "Done!" << std::endl;
}
else
{
std::cout << "PutBucketRequestPayment error: " <<
outcome.GetError().GetExceptionName() << std::endl <<
outcome.GetError().GetMessage() << std::endl;
}
PutBucketRequestPaymentRequest request_payment = new PutBucketRequestPaymentRequest();
request_payment.BucketName = "<BUCKET-NAME>";
request_payment.RequestPaymentConfiguration = new RequestPaymentConfiguration() { Payer = "Requester" };
client.PutBucketRequestPayment(request_payment);
bucket.request_payment.put({
request_payment_configuration: { # Bắt buộc
payer: 'Requester', # Bắt buộc, thuộc một trong 2 giá trị: Requester, BucketOwner
}
})
<?php
$result = $s3Client->putBucketRequestPayment([
'Bucket' => <BUCKET-NAME>,
'RequestPaymentConfiguration' => [
'Payer' => 'Requester',
],
]);
var params = {
Bucket: '<BUCKET-NAME>',
RequestPaymentConfiguration: { Payer: 'Requester' }
}
s3.putBucketRequestPayment(params, (err, data) => {
if (err) console.log(err, err.stack) // an error occurred
else console.log(JSON.stringify(data)) // successful response
})
const s3 = new AWS.S3()
const params = {
Bucket: '<BUCKET-NAME>',
RequestPaymentConfiguration: { Payer: 'Requester' }
}
s3.putBucketRequestPayment(params, (err, data) => {
if (err) console.log(err, err.stack) // an error occurred
else console.log(JSON.stringify(data)) // successful response
})
Client.PutBucketRequestPayment(&s3.PutBucketRequestPaymentInput{
Bucket: aws.String("<BUCKET-NAME>"),
RequestPaymentConfiguration: &s3.RequestPaymentConfiguration{
Payer: aws.String("Requester"),
},
})
Set bucket trở về mặc định Owner Pays Bucket
bucket.set_request_payment(payer='BucketOwner')
# True
payment = s3.BucketRequestPayment('vccloud')
payment.put(
RequestPaymentConfiguration={
'Payer': 'BucketOwner'
}
)
# output
#{'ResponseMetadata': {'RequestId': 'tx00000000000000654ab87-005caaedb0-e33e020-hn-1',
# 'HostId': '',
# 'HTTPStatusCode': 200,
# 'HTTPHeaders': {'date': 'Mon, 08 Apr 2019 06:48:10 GMT',
# 'content-length': '0',
# 'connection': 'keep-alive',
# 'x-amz-request-id': 'tx00000000000000654ab87-005caaedb0-e33e020-hn-1',
# 'server': 'ngx-02',
# 'strict-transport-security': 'max-age=15768000'},
# 'RetryAttempts': 0}}
// Working on it...
// Include
#include <aws/s3/model/PutBucketRequestPaymentRequest.h>
// Code
Aws::S3::Model::RequestPaymentConfiguration payment;
payment.SetPayer(Aws::S3::Model::Payer::BucketOwner);
Aws::S3::Model::PutBucketRequestPaymentRequest request;
request.SetBucket("<BUCKET-NAME>");
request.SetRequestPaymentConfiguration(payment);
auto outcome = s3_client.PutBucketRequestPayment(request);
if (outcome.IsSuccess())
{
std::cout << "Done!" << std::endl;
}
else
{
std::cout << "PutBucketRequestPayment error: " <<
outcome.GetError().GetExceptionName() << std::endl <<
outcome.GetError().GetMessage() << std::endl;
}
// Working on it
PutBucketRequestPaymentRequest request_payment = new PutBucketRequestPaymentRequest();
request_payment.BucketName = "<BUCKET-NAME>";
request_payment.RequestPaymentConfiguration = new RequestPaymentConfiguration() { Payer = "BucketOwner" };
client.PutBucketRequestPayment(request_payment);
bucket.request_payment.put({
request_payment_configuration: { # Bắt buộc
payer: 'BucketOwner', # Bắt buộc, thuộc một trong 2 giá trị: Requester, BucketOwner
}
})
<?php
$result = $s3Client->putBucketRequestPayment([
'Bucket' => <BUCKET-NAME>,
'RequestPaymentConfiguration' => [
'Payer' => 'BucketOwner',
],
]);
// Không hỗ trợ
// Không hỗ trợ
Client.PutBucketRequestPayment(&s3.PutBucketRequestPaymentInput{
Bucket: aws.String("bucket-01"),
RequestPaymentConfiguration: &s3.RequestPaymentConfiguration{
Payer: aws.String("BucketOwner"),
},
})
Quản lý object (file)
Upload
Upload file lên bucket.
Trong quá trình upload người dùng thể thêm vào metadata, tag cho object.
from boto.s3.key import Key
bucket = conn.get_bucket('bucket-01')
# <Bucket: bucket-01>
object = Key(bucket1)
object.name = '/path/to/file'
object.set_contents_from_filename('/path/to/local-file')
s3.Object('bucket-1', '/path/to/file').put(Body=open('/path/to/local-file', 'rb'), ACL='public-read')
ObjectMetadata metadata = new ObjectMetadata();
metadata.setContentType('<Content-Type>');
PutObjectRequest request = new PutObjectRequest(bucketName, '<KEY-NAME>', new File('<PATH-TO-LOCAL-FILE>');
request.setCannedAcl(CannedAccessControlList.PublicRead);
request.setMetadata(metadata);
client.putObject(request);
// Callback
typedef struct put_object_callback_data {
int fd;
size_t remain;
} put_object_callback_data;
static int putObjectDataCallback(int bufferSize, char* buffer, void* callbackData) {
put_object_callback_data* data = callbackData;
int ret = 0;
if (data->remain) {
int toRead = MIN(data->remain, bufferSize);
ret = read(data->fd, buffer, toRead);
data->remain -= ret;
}
return ret;
}
// main
/* Create an object */
S3BucketContext bucketContext = {
"hn.ss.bfcplatform.vn",
"<BUCKET-NAME>",
S3ProtocolHTTPS,
S3UriStylePath,
"<ACCESS_KEY_ID>",
"<SECRET_KEY_ID>"
};
put_object_callback_data data;
struct stat st;
if (stat(sample_file, &st) == -1) {
fprintf(stderr, "\nERROR: Failed to stat file %s: ", sample_file);
S3_deinitialize();
return EXIT_FAILURE;
}
data.remain = st.st_size;
data.fd = open(sample_file, O_RDONLY);
if (data.fd == -1) {
fprintf(stderr, "\nERROR: Failed to open input file %s: ", sample_file);
S3_deinitialize();
return EXIT_FAILURE;
}
S3PutObjectHandler putObjectHandler = {
responseHandler,
&putObjectDataCallback
};
S3NameValue meta[3] = {
{"Author", "Tien HV"},
{"Project", "S3-Examples"},
{"Email", "tienhv@vccloud.vn"}
};
S3PutProperties prop = {
"image/jpeg", //contentType
NULL, //md5
"max-age=3600", //cacheControl
NULL, //contentDispositionFilename
NULL, //contentEncoding
0, //expires
S3CannedAclPublicRead, //cannedAcl
3, //metaDataCount
meta, //metaData
0 //useServerSideEncryption
};
S3_put_object(&bucketContext, "<KEY-NAME>", st.st_size, &prop, NULL,
&putObjectHandler, &data);
close(data.fd);
// Include
#include <aws/s3/model/PutObjectRequest.h>
#include <iostream>
#include <fstream>
// Code
Aws::S3::Model::PutObjectRequest object_request;
object_request.WithBucket("<BUCKET-NAME>").WithKey("<KEY-NAME>");
object_request.SetContentType("image/jpeg");
object_request.SetACL(Aws::S3::Model::ObjectCannedACL::public_read);
// Binary files must also have the std::ios_base::bin flag or'ed in
auto input_data = Aws::MakeShared<Aws::FStream>("PutObjectInputStream",
"<PATH-TO-LOCAL-FILE>", std::ios_base::in | std::ios_base::binary);
object_request.SetBody(input_data);
auto put_object_outcome = s3_client.PutObject(object_request);
if (put_object_outcome.IsSuccess())
{
std::cout << "Done!" << std::endl;
}
else
{
std::cout << "PutObject error: " <<
put_object_outcome.GetError().GetExceptionName() << " " <<
put_object_outcome.GetError().GetMessage() << std::endl;
}
PutObjectRequest request_put_object = new PutObjectRequest();
request_put_object.BucketName = "<BUCKET-NAME>";
request_put_object.Key = "<KEY-NAME>";
request_put_object.FilePath = @"PATH-TO-LOCAL-FILE";
request_put_object.CannedACL = S3CannedACL.PublicRead
PutObjectResponse respone_put = client.PutObject(request_put_object);
obj = bucket.object('<KEY-NAME>')
obj.upload_file('<PATH-TO-LOCAL-FILE>', metadata: {}, acl: 'public-read')
<?php
$result = $s3Client->putObject([
'Bucket' => '<BUCKET-NAME>',
'Key' => '<KEY-NAME>',
'ACL' => 'public-read',
'SourceFile' => '<PATH-TO-LOCAL-FILE>'
]);
var params = {Bucket: 'bucket', Key: 'key', Body: stream};
s3.upload(params, function(err, data) {
console.log(err, data);
});
Ví dụ:
var file;
var doUpload = function(file) {
var uploadParams = {
Bucket: '<BUCKET-NAME>',
Key: file.name,
Body: file,
ACL:'public-read',
ContentType: file.type
};
var uploadOptions = {
partSize: 10 * 1024 * 1024,
queueSize: 1
}
var upload = s3.upload(uploadParams, uploadOptions);
upload.send((err, data) => {
if (err) {
console.error("Upload lỗi:", err);
} else if (data) {
console.log("Upload thành công:", data);
}
});
upload.on('httpUploadProgress', function(evt) {
var progress = parseInt((evt.loaded * 100) / evt.total);
console.log(progress + '%');
})
};
const fs = require('fs')
const path = require('path')
const s3 = new AWS.S3()
const file = '<PATH-TO-LOCAL-FILE>'
const uploadParams = {Bucket: '<BUCKET-NAME>', Key: '', Body: ''}
const fileStream = fs.createReadStream(file)
fileStream.on('error', function(err) {
console.log('File Error', err)
})
uploadParams.Body = fileStream
uploadParams.Key = path.basename(file)
uploadParams.ContentType = '<TYPE>'
uploadParams.ACL = 'public-read'
uploadParams.Metadata = {
'<MetadataKey>': 'STRING_VALUE',
/* '<MetadataKey>': ... */
}
s3.upload(uploadParams, (err, data) => {
if (err) {
console.log('Error', err)
} if (data) {
console.log('Upload Success', data.Location)
}
})
sess := session.New(&s3Config)
Client := s3manager.NewUploader(sess)
file, _ := os.Open("/path/to/local-file")
Client.Upload(&s3manager.UploadInput{
Bucket: aws.String("bucket-01"),
Key: aws.String(filepath.Base("/path/to/file")),
Body: file,
ACL: "public-read"
})
Multipart upload
Chức năng này giúp người dùng quản lý việc upload các file lớn. Các file lớn sẽ được upload lên theo từng part, sau khi tất cả các part đã được upload lên file đó sẽ được nối lại.
Size của 1 part tối thiểu là 5MB.
import os
import math
upload_file = '<PATH-TO-LOCAL-FILE>'
filename = os.path.basename(upload_file)
mp = bucket.initiate_multipart_upload('<KEY-NAME>')
source_size = os.stat(upload_file).st_size
bytes_per_chunk = 50*1024*1024 # Mỗi chunk có dung lượng là 50MB
chunks_count = int(math.ceil(source_size / float(bytes_per_chunk)))
for i in range(chunks_count):
offset = i * bytes_per_chunk
remaining_bytes = source_size - offset
bytes = min([bytes_per_chunk, remaining_bytes])
part_num = i + 1
print("uploading part {} of {}".format(part_num, chunks_count))
fp = open(upload_file, 'rb')
try:
fp.seek(offset)
mp.upload_part_from_file(fp=fp, part_num=part_num, size=bytes)
finally:
fp.close()
if len(mp.get_all_parts()) == chunks_count:
mp.complete_upload()
print("Upload thành công")
else:
mp.cancel_upload()
print("Upload thất bại")
from boto3.s3.transfer import TransferConfig
config = TransferConfig(
multipart_threshold=50*1024*1024, # Mỗi chunk có dung lượng là 50MB
max_concurrency=10,
num_download_attempts=10
)
s3.Bucket("bucket-1").upload_file("<PATH-TO-LOCAL-FILE>", "<KEY-NAME>", Config=config)
File file = new File('<PATH-TO-LOCAL-FILE>');
long contentLength = file.length();
long partSize = 50 * 1024 * 1024; //Mỗi chunk có dung lượng là 50MB
List<PartETag> partETags = new ArrayList<PartETag>();
String keyName = "<KEY-NAME>";
String bucketName = "<BUCKET-NAME>";
// Initiate the multipart upload.
InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucketName, keyName);
InitiateMultipartUploadResult initResponse = client.initiateMultipartUpload(initRequest);
// Upload the file parts.
long filePosition = 0;
for (int i = 1; filePosition < contentLength; i++) {
// Because the last part could be less than 5 MB, adjust the part size as needed.
partSize = Math.min(partSize, (contentLength - filePosition));
// Create the request to upload a part.
UploadPartRequest uploadRequest = new UploadPartRequest()
.withBucketName(bucketName)
.withKey(keyName)
.withUploadId(initResponse.getUploadId())
.withPartNumber(i)
.withFileOffset(filePosition)
.withFile(file)
.withPartSize(partSize);
// Upload the part and add the response's ETag to our list.
UploadPartResult uploadResult = client.uploadPart(uploadRequest);
partETags.add(uploadResult.getPartETag());
filePosition += partSize;
}
// Complete the multipart upload.
CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(bucketName, keyName,
initResponse.getUploadId(), partETags);
client.completeMultipartUpload(compRequest);
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <sys/param.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <malloc.h>
#define MULTIPART_CHUNK_SIZE (10 << 20) // 10M per part
#define MULTIPART_KEY "<KEY-NAME>"
#define INPUT_FILE "<PATH-TO-LOCAL-FILE>"
S3BucketContext bucketContext = {
"hn.ss.bfcplatform.vn",
"<BUCKET-NAME>",
S3ProtocolHTTPS,
S3UriStylePath,
"<ACCESS_KEY_ID>",
"<SECRET_KEY_ID>"
};
typedef struct put_object_callback_data {
int fd;
size_t remain;
} put_object_callback_data;
typedef struct UploadManager {
//used for initial multipart
char * upload_id;
//used for upload part object
char **etags;
int next_etags_pos;
int remaining;
char commit_buffer[1024 * 1024];
} UploadManager;
typedef struct MultipartPartData {
put_object_callback_data put_object_data;
int seq;
UploadManager *manager;
} MultipartPartData;
static int putObjectDataCallback(int bufferSize, char* buffer, void* callbackData) {
put_object_callback_data* data = callbackData;
int ret = 0;
if (data->remain) {
int toRead = MIN(data->remain, bufferSize);
ret = read(data->fd, buffer, toRead);
data->remain -= ret;
}
return ret;
}
static S3Status MultipartResponseProperiesCallback
(const S3ResponseProperties *properties, void *callbackData) {
responsePropertiesCallback(properties, callbackData);
MultipartPartData *data = callbackData;
int seq = data->seq;
const char *etag = properties->eTag;
data->manager->etags[seq - 1] = strdup(etag);
data->manager->next_etags_pos = seq;
return S3StatusOK;
}
static S3Status multipartInitialCallback(const char *upload_id,
void *callbackData) {
UploadManager *manager = callbackData;
manager->upload_id = strdup(upload_id);
return S3StatusOK;
}
static int multipartPutXmlCallback(int bufferSize, char *buffer,
void *callbackData) {
UploadManager *manager = callbackData;
int ret = 0;
if (manager->remaining) {
int toRead = MIN(bufferSize, manager->remaining);
memcpy(buffer, manager->commit_buffer, toRead);
ret = toRead;
}
manager->remaining -= ret;
return ret;
}
int main(int argc, char** argv) {
uint64_t contentLength;
put_object_callback_data data;
UploadManager manager;
manager.upload_id = NULL;
MultipartPartData partData;
struct stat st;
if (stat(INPUT_FILE, &st) == -1) {
fprintf(stderr, "\nERROR: Failed to stat file %s: ", INPUT_FILE);
return EXIT_FAILURE;
}
contentLength = st.st_size;
int seq;
int totalSeq = ((contentLength + MULTIPART_CHUNK_SIZE - 1) /
MULTIPART_CHUNK_SIZE);
data.fd = open(INPUT_FILE, O_RDONLY);
if (data.fd == -1) {
fprintf(stderr, "\nERROR: Failed to open input file %s: ", INPUT_FILE);
return EXIT_FAILURE;
}
manager.etags = malloc(sizeof (char*)*totalSeq);
manager.next_etags_pos = 0;
S3_initialize("libs3", S3_INIT_ALL, host);
S3MultipartInitialHandler multipartInitialHandler = {
responseHandler,
&multipartInitialCallback
};
S3PutObjectHandler putObjectHandler = {
{&MultipartResponseProperiesCallback, &responseCompleteCallback},
&putObjectDataCallback
};
S3MultipartCommitHandler multipartCommitHandler = {
responseHandler,
&multipartPutXmlCallback,
NULL
};
S3_initiate_multipart(&bucketContext, MULTIPART_KEY, NULL,
&multipartInitialHandler, NULL, &manager);
printf("Multipart init id: %s\n", manager.upload_id);
for (seq = 1; seq <= totalSeq; seq++) {
printf("upload part %d id %s\n", seq, manager.upload_id);
data.remain = MIN(contentLength, MULTIPART_CHUNK_SIZE);
memset(&partData, 0, sizeof (MultipartPartData));
partData.manager = &manager;
partData.seq = seq;
partData.put_object_data = data;
S3_upload_part(&bucketContext, MULTIPART_KEY, NULL, &putObjectHandler,
seq, manager.upload_id, data.remain, NULL, &partData);
contentLength -= MULTIPART_CHUNK_SIZE;
}
int i;
int buf_size = 0;
buf_size += sprintf(manager.commit_buffer, "<CompleteMultipartUpload>");
for (seq = 1; seq <= totalSeq; seq++) {
buf_size += snprintf(manager.commit_buffer + buf_size, 256,
"<Part><PartNumber>%d</PartNumber>"
"<ETag>%s</ETag></Part>",
seq, manager.etags[seq - 1]);
}
buf_size += sprintf(manager.commit_buffer + buf_size, "</CompleteMultipartUpload>");
manager.remaining = buf_size;
printf("Multipart complete id: %s\n", manager.upload_id);
S3_complete_multipart_upload(&bucketContext, MULTIPART_KEY,
&multipartCommitHandler, manager.upload_id, manager.remaining,
NULL, &manager);
if (manager.upload_id) {
free(manager.upload_id);
}
for (i = 0; i < manager.next_etags_pos; i++) {
free(manager.etags[i]);
}
free(manager.etags);
S3_deinitialize();
return EXIT_SUCCESS;
}
// Working on it
TransferUtilityUploadRequest upload_request = new TransferUtilityUploadRequest();
upload_request.AutoCloseStream = false;
upload_request.BucketName = bucket_name;
upload_request.FilePath = @"<PATH-TO-LOCAL-FILE>";
upload_request.Key = "<KEY-NAME>";
upload_request.PartSize = 50 * 1024 * 1024; //Mỗi chunk có dung lượng là 50MB
TransferUtility ut = new TransferUtility(client);
ut.Upload(upload_request);
obj = bucket.object('<KEY-NAME>')
# Nếu kích thước file lớn hơn giá trị `multipart_threshold` thì file sẽ được upload
# qua multipart API, giá trị mặc định là 15728640B (=15MB)
obj.upload_file('<PATH-TO-LOCAL-FILE>', metadata: {}, multipart_threshold: 15728640)
<?php
$uploader = new Aws\S3\MultipartUploader($s3Client, '<PATH-TO-LOCAL-FILE>', [
'bucket' => '<BUCKET-NAME>',
'key' => '<KEY-NAME>',
]);
try {
$result = $uploader->upload();
echo "Upload complete: {$result['ObjectURL']}\n";
} catch (Aws\Exception\MultipartUploadException $e) {
echo $e->getMessage() . "\n";
}
/**
* Task: Multipart upload
* 01. Break the files into many pieces
* 02. Initiate Multipart Upload
* 03. Upload individual parts
* 04. Complete Multipart Upload
*/
var BUCKET_NAME = '<BUCKET-NAME>';
var partSize = 1024 * 1024 * 5; // Minimum 5MB per chunk
var file;
var multipartMap = {
Parts: []
}
var startTime = new Date();
var numPartsLeft = 0;
function createParts(buffer, file, callback) {
var partNum = 0
var multiPartParams = {
Bucket: BUCKET_NAME,
Key: file.name,
ContentType: file.type
}
numPartsLeft = Math.ceil(buffer.length / partSize);
s3.createMultipartUpload(multiPartParams, function(mpErr, multipart){
if (mpErr) {
console.log('Error!', mpErr)
return
}
console.log("Got upload ID", multipart.UploadId)
// Grab each partSize chunk and upload it as a part
for (var rangeStart = 0; rangeStart < buffer.length; rangeStart += partSize) {
partNum++
var end = Math.min(rangeStart + partSize, buffer.length),
partParams = {
Body: buffer.slice(rangeStart, end),
Bucket: BUCKET_NAME,
Key: file.name,
PartNumber: String(partNum),
UploadId: multipart.UploadId
}
// Send a single part
console.log('Uploading part: #', partParams.PartNumber, ', Range start:', rangeStart);
if (callback) callback(s3, multipart, partParams)
}
})
}
function uploadPart(s3, multipart, partParams, _tryNum) {
const tryNum = _tryNum || 1
s3.uploadPart(partParams, function(multiErr, mData) {
if (multiErr){
console.log('multiErr, upload part error:', multiErr)
if (tryNum < maxUploadTries) {
console.log('Retrying upload of part: #', partParams.PartNumber)
uploadPart(s3, multipart, partParams, tryNum + 1)
} else {
console.log('Failed uploading part: #', partParams.PartNumber)
}
return
}
multipartMap.Parts[this.request.params.PartNumber - 1] = {
ETag: mData.ETag,
PartNumber: Number(this.request.params.PartNumber)
}
console.log("Completed part", this.request.params.PartNumber)
console.log('mData', mData)
if (--numPartsLeft > 0) return // complete only when all parts uploaded
var doneParams = {
Bucket: BUCKET_NAME,
Key: multipart.Key,
MultipartUpload: multipartMap,
UploadId: multipart.UploadId
}
console.log("Completing upload...")
completeMultipartUpload(s3, doneParams)
})
}
function completeMultipartUpload(s3, doneParams) {
s3.completeMultipartUpload(doneParams, function(err, data) {
if (err) {
console.log("An error occurred while completing the multipart upload")
console.log(err)
} else {
const delta = (new Date() - startTime) / 1000
console.log('Completed upload in', delta, 'seconds')
console.log('Final upload data:', data)
}
})
}
// doUpload : Such as onClick button submit file input form
var doUpload = function(file) {
var reader = new FileReader();
reader.readAsArrayBuffer(file);
reader.onload = function(e) {
var buffer = new Uint8Array(e.target.result);
createParts(buffer, file, uploadPart);
}
}
const s3 = new AWS.S3()
// S3 Upload options
const bucket = '<BUCKET-NAME>'
// File
const fileKey = '<FILE-NAME>'
const filePath = `${<PATH-TO-LOCAL-FILE>}/${<FILE-NAME>}`
const buffer = fs.readFileSync(filePath)
// Upload
const startTime = new Date()
let partNum = 0
const partSize = 1024 * 1024 * 5 // Minimum 5MB per chunk
let numPartsLeft = Math.ceil(buffer.length / partSize)
const maxUploadTries = 3
const multiPartParams = {
Bucket: bucket,
Key: fileKey,
ContentType: '<TYPE>'
}
const multipartMap = {
Parts: []
}
function completeMultipartUpload(s3, doneParams) {
s3.completeMultipartUpload(doneParams, function(err, data) {
if (err) {
console.log("An error occurred while completing the multipart upload")
console.log(err)
} else {
const delta = (new Date() - startTime) / 1000
console.log('Completed upload in', delta, 'seconds')
console.log('Final upload data:', data)
}
})
}
function uploadPart(s3, multipart, partParams, _tryNum) {
const tryNum = _tryNum || 1
s3.uploadPart(partParams, function(multiErr, mData) {
if (multiErr){
console.log('multiErr, upload part error:', multiErr)
if (tryNum < maxUploadTries) {
console.log('Retrying upload of part: #', partParams.PartNumber)
uploadPart(s3, multipart, partParams, tryNum + 1)
} else {
console.log('Failed uploading part: #', partParams.PartNumber)
}
return
}
multipartMap.Parts[this.request.params.PartNumber - 1] = {
ETag: mData.ETag,
PartNumber: Number(this.request.params.PartNumber)
}
console.log("Completed part", this.request.params.PartNumber)
console.log('mData', mData)
if (--numPartsLeft > 0) return // complete only when all parts uploaded
const doneParams = {
Bucket: bucket,
Key: fileKey,
MultipartUpload: multipartMap,
UploadId: multipart.UploadId
}
console.log("Completing upload...")
completeMultipartUpload(s3, doneParams)
})
}
// Multipart
console.log("Creating multipart upload for:", fileKey)
s3.createMultipartUpload(multiPartParams, function(mpErr, multipart){
if (mpErr) {
console.log('Error!', mpErr)
return
}
console.log("Got upload ID", multipart.UploadId)
// Grab each partSize chunk and upload it as a part
for (let rangeStart = 0; rangeStart < buffer.length; rangeStart += partSize) {
partNum++
const end = Math.min(rangeStart + partSize, buffer.length),
partParams = {
Body: buffer.slice(rangeStart, end),
Bucket: bucket,
Key: fileKey,
PartNumber: String(partNum),
UploadId: multipart.UploadId
}
// Send a single part
console.log('Uploading part: #', partParams.PartNumber, ', Range start:', rangeStart)
uploadPart(s3, multipart, partParams)
}
})
MAX_PART_SIZE := int64(512 * 1000)
MAX_RETRIES := 3
f, _ := os.Open("<PATH-TO-LOCAL-FILE>")
fileInfo, _ := f.Stat()
size := fileInfo.Size()
buffer := make([]byte, size)
fileType := http.DetectContentType(buffer)
f.Read(buffer)
input := &s3.CreateMultipartUploadInput{
Bucket: aws.String("bucket-01"),
Key: aws.String("<KEY-NAME>"),
ContentType: aws.String(fileType),
}
resp, _ := Client.CreateMultipartUpload(input)
var curr, partLength int64
var remaining = size
var completedParts []*s3.CompletedPart
partNumber := 1
for curr = 0; remaining != 0; curr += partLength {
if remaining < MAX_PART_SIZE {
partLength = remaining
} else {
partLength = MAX_PART_SIZE
}
tryNum := 1
partInput := &s3.UploadPartInput{
Body: bytes.NewReader(buffer[curr : curr+partLength]),
Bucket: resp.Bucket,
Key: resp.Key,
PartNumber: aws.Int64(int64(partNumber)),
UploadId: resp.UploadId,
ContentLength: aws.Int64(int64(len(buffer[curr : curr+partLength]))),
}
for tryNum <= MAX_RETRIES {
Client.UploadPart(partInput)
}
remaining -= partLength
partNumber++
}
Tham số
<PATH-TO-LOCAL-FILE>
là đường dẫn đến file cần upload
<KEY-NAME>
là đường dẫn lưu file trong bucket
Download
Download object từ bucket thành file.
object = bucket1.get_key('path/to/file.txt')
object.get_contents_to_filename("/downloads/file.txt")
for bucket in s3.buckets.all():
print(bucket.name)
bucket = s3.Bucket('vccloud')
with open('<PATH-TO-LOCAL-FILE>', 'wb') as data:
s3client.download_fileobj(bucket, '<KEY-NAME>', data)
S3Object s3object = s3client.getObject("<BUCKET-NAME>", "<KEY-NAME>");
S3ObjectInputStream inputStream = s3object.getObjectContent();
FileUtils.copyInputStreamToFile(inputStream, new File("<PATH-TO-LOCAL-FILE>"));
//Class FileUtils lấy từ thư viện apache-common
// Callback
static S3Status getObjectDataCallback(int bufferSize, const char *buffer, void *callbackData) {
FILE* file = callbackData;
size_t written = fwrite(buffer, 1, bufferSize, file);
return ((written < (size_t) bufferSize) ? S3StatusAbortedByCallback : S3StatusOK);
}
// Main
S3BucketContext bucketContext = {
"hn.ss.bfcplatform.vn",
"<BUCKET-NAME>",
S3ProtocolHTTPS,
S3UriStylePath,
"<ACCESS_KEY_ID>",
"<SECRET_KEY_ID>"
};
S3GetObjectHandler getObjectHandler = {
responseHandler,
&getObjectDataCallback
};
FILE *file = fopen("<PATH-TO-LOCAL-FILE>", "wb");
if (!file) {
fprintf(stderr, "\nERROR: Failed to open output file %s: ", sample_file);
S3_deinitialize();
return EXIT_FAILURE;
}
S3_get_object(&bucketContext, "<KEY-NAME>", NULL, 0, 0, NULL, &getObjectHandler, file);
fclose(file);
// Include
#include <aws/s3/model/GetObjectRequest.h>
#include <fstream>
// Code
Aws::S3::Model::GetObjectRequest object_request;
object_request.WithBucket("<BUCKET-NAME>").WithKey("<KEY-NAME>");
auto get_object_outcome = s3_client.GetObject(object_request);
if (get_object_outcome.IsSuccess())
{
Aws::OFStream local_file;
local_file.open("<PATH-TO-LOCAL-FILE>", std::ios::out | std::ios::binary);
local_file << get_object_outcome.GetResult().GetBody().rdbuf();
std::cout << "Done!" << std::endl;
}
else
{
std::cout << "GetObject error: " <<
get_object_outcome.GetError().GetExceptionName() << " " <<
get_object_outcome.GetError().GetMessage() << std::endl;
}
GetObjectRequest request_download = new GetObjectRequest();
request_download.BucketName = "<BUCKET-NAME>";
request_download.Key = "<KEY-NAME>";
GetObjectResponse response = client.GetObject(request_download);
response.WriteResponseStreamToFile("<PATH-TO-LOCAL-FILE>");
obj = bucket.object('<KEY-NAME>')
obj.download_file('<PATH-TO-LOCAL-FILE>')
<?php
$result = $s3Client->getObject([
'Bucket' => '<BUCKET-NAME>',
'Key' => '<KEY-NAME>',
'SaveAs' => '<PATH-TO-LOCAL-FILE>'
]);
var params = {
Bucket: "<BUCKET-NAME>",
Key: "<KEY-NAME>"
};
s3.getObject(params, function(err, data) {
if (err) console.log(err, err.stack); // an error occurred
else console.log(data); // successful response
});
const fs = require('fs')
const path = require('path')
const s3 = new AWS.S3()
function downloadObject() {
return new Promise((resolve, reject) => {
const BUCKET = '<BUCKET-NAME>'
const key = '<KEY-NAME>'
const destPath = '<PATH-TO-LOCAL-DIR>' // Example: `./tmp/${path.basename(key)}`
const params = {
Bucket: BUCKET,
Key: key
}
const s3Stream = s3.getObject(params).createReadStream()
const fileStream = fs.createWriteStream(destPath)
s3Stream.on('error', reject)
fileStream.on('error', reject)
fileStream.on('close', () => { resolve(destPath) })
s3Stream.pipe(fileStream)
})
}
var s3Download *s3manager.Downloader
s := session.New(s3Config)
s3Download = s3manager.NewDownloader(s)
f, _ := os.Create("path/to/file.txt")
s3Download.Download(f,
&s3.GetObjectInput{
Bucket: aws.String("bucket"),
Key: aws.String("/downloads/file.txt"),
})
Tham số
<PATH-TO-LOCAL-FILE>
là đường dẫn lưu file
<KEY-NAME>
là đường dẫn đến file trong bucket
Lấy thông tin một object theo đường dẫn
Ví dụ lấy thông tin file
smile.png
tại thư mụcemoji
Lấy các thông tin của object như: name, content-type, last_modified ...
object = bucket.get_key('emoji/smile.png')
object.name
# 'emoji/smile.png'
object.last_modified
# 'Mon, 09 Jul 2018 08:15:52 GMT'
object.content_length
# '488'
bucket = s3.Bucket('vccloud')
obj = bucket.Object('emoji/smile.png')
obj.content_length
print(obj.get())
# sẽ cho output
{'ResponseMetadata': {'RequestId': 'tx0000000000000065673c0-005caaf09b-e33e020-hn-1',
'HostId': '',
'HTTPStatusCode': 200,
'HTTPHeaders': {'date': 'Mon, 08 Apr 2019 06:59:58 GMT',
'content-type': 'binary/octet-stream',
'content-length': '1266761',
'connection': 'keep-alive',
'accept-ranges': 'bytes',
'last-modified': 'Mon, 10 Sep 2018 10:07:37 GMT',
'x-amz-version-id': 'qFj4NFLXnRTE9G-UuD4ktam5IErzNb3',
'etag': '"d822d300080849e746782e98033cb86a"',
'x-amz-request-id': 'tx0000000000000065673c0-005caaf09b-e33e020-hn-1',
'server': 'ngx-01',
'strict-transport-security': 'max-age=15768000'},
'RetryAttempts': 0},
'AcceptRanges': 'bytes',
'LastModified': datetime.datetime(2018, 9, 10, 10, 7, 37, tzinfo=tzutc()),
'ContentLength': 1266761,
'ETag': '"d822d300080849e746782e98033cb86a"',
'VersionId': 'qFj4NFLXnRTE9G-UuD4ktam5IErzNb3',
'ContentType': 'binary/octet-stream',
'Metadata': {},
'Body': <botocore.response.StreamingBody at 0x7efe7bc15c18>}
S3Object object = client.getObject(new GetObjectRequest("bucket-01","emoji/smile.png"));
System.out.println("Content-Type: " + object.getObjectMetadata().getContentType());
# Working on it...
// Include
#include <aws/s3/model/HeadObjectRequest.h>
// Code
Aws::S3::Model::HeadObjectRequest object_request;
object_request.WithBucket("<BUCKET-NAME>").WithKey("emoji/smile.png");
auto get_object_outcome = s3_client.HeadObject(object_request);
if (get_object_outcome.IsSuccess())
{
auto result = get_object_outcome.GetResult();
std::cout << result.GetContentType() << " "
<< result.GetLastModified().ToLocalTimeString(Aws::Utils::DateFormat::ISO_8601) << std::endl;
}
else
{
std::cout << "HeadObject error: " <<
get_object_outcome.GetError().GetExceptionName() << " " <<
get_object_outcome.GetError().GetMessage() << std::endl;
}
GetObjectRequest request_download = new GetObjectRequest();
request_download.BucketName = "bucket-01";
request_download.Key = "emoji/smile.png";
GetObjectResponse response = client.GetObject(request_download);
obj = bucket.object('emoji/smile.png')
obj.exists?
# => true
obj.key
# => "emoji/smile.png"
obj.last_modified
# => 2018-08-31 03:44:48 +0000
obj.content_length
# => 401957
<?php
$result = $s3Client->headObject([
'Bucket' => '<BUCKET-NAME>',
'Key' => 'emoji/smile.png',
]);
print_r($result);
var params = {
Bucket: "<BUCKET-NAME>",
Key: "<FILE-NAME>"
};
s3.getObject(params, function(err, data) {
if (err) console.log(err, err.stack); // an error occurred
else console.log(data); // successful response
});
const s3 = new AWS.S3()
const params = {
Bucket: '<BUCKET-NAME>',
Key: '<FILE-NAME>' // 'emoji/smile.png'
}
s3.getObject(params, (err, data) => {
if (err) console.log(err, err.stack) // an error occurred
else console.log(data) // successful response
/*
data = {
AcceptRanges: 'bytes',
ContentLength: 3191,
ContentType: 'image/png',
ETag: '\"6805f2cfc46c0f04559748bb039d69ae\"',
LastModified: <Date Representation>,
Metadata: {},
TagCount: 2,
VersionId: 'null'
}
*/
})
file, _ := Client.GetObject(&s3.GetObjectInput{
Bucket: aws.String("bucket-01"),
Key: aws.String("emoji/smile.png"),
})
Danh sách phiên bản của một object
Khi bucket bật chức năng versioning, người dùng có thể list ra tất cả các version của object.
result = bucket.list_versions('test/1.txt', '/')
for k in result:
print('is_latest: {} name: {} version_id: {}'.format(k.is_latest, k.name, k.version_id))
# is_latest: True name: test/1.txt version_id: y.7k9aKfKpJTVRz6Fwdzr2l088tMB4p
# is_latest: False name: test/1.txt version_id: twtEzzQS4JJhZPfkmKuj-SEQpr-eZ4S
# is_latest: False name: test/1.txt version_id: 5khvagbKvS1qmb.a1nUl17R5AUyR.o4
# is_latest: False name: test/1.txt version_id: 8Y1xI2X2p8-RREACFVq8W0FrxgwZ7Ew
# is_latest: False name: test/1.txt version_id: null
versions = s3.Bucket('vccloud').object_versions.filter(Prefix='vccloud1.txt')
for version in versions:
print('ver_id: {} '.format(version.id))
# out put
# ver_id: UxMLn4KNdk5Kx8IH.ZlD7v6xHxqHaLV
# ver_id: KTMC4VxvTGK0i0npqxdwaZy2U5fBEKa
# ver_id: uFfTITyB5D2Z8IMoB02HaxFHR-aAsqT
# ver_id: 7aH1IkSLHlHSJgSrhc-2dXH34q4Ar33
VersionListing listVersions = client.listVersions("<BUCKET-NAME>", "test/1.txt");
for (S3VersionSummary objectSummary : listVersions.getVersionSummaries()) {
System.out.printf("Retrieved object: %s, version: %s, is_lastest: %s\n",
objectSummary.getKey(),objectSummary.getVersionId(),objectSummary.isLatest());
}
# Working on it...
// Include
#include <aws/s3/model/ListObjectVersionsRequest.h>
// Code
Aws::S3::Model::ListObjectVersionsRequest request;
request.WithBucket("<BUCKET-NAME>").WithPrefix("<KEY-PREFIX>");
auto outcome = s3_client.ListObjectVersions(request);
if (outcome.IsSuccess())
{
auto versions = outcome.GetResult().GetVersions();
for (auto &version : versions)
{
std::cout << version.GetKey() << " " << version.GetVersionId() << " "
<< version.GetLastModified().ToLocalTimeString(Aws::Utils::DateFormat::ISO_8601)
<< std::endl;
}
}
else
{
std::cout << "ListObjectVersion error: " <<
outcome.GetError().GetExceptionName() << " " <<
outcome.GetError().GetMessage() << std::endl;
}
ListVersionsResponse response_list_version = client.ListVersions("bucket-01", "test/1.txt");
foreach (S3ObjectVersion entry in response_list_version.Versions)
{
Console.WriteLine("key = {0}, version_id = {1}, is_lastest = {2}", entry.Key, entry.VersionId, entry.IsLates);
}
...
key = test/1.txt, version_id = IzgiSRNJsV9xLwZNbCn6lfLUm7OenyR, is_lastest = True
key = test/1.txt, version_id = Y-pYHxGOhEglY8FevivOlb4nB6vnq2G, is_lastest = False
key = test/1.txt, version_id = ZwhaUkGdMS677YUQ9AjH3ha7sjVAKBP, is_lastest = False
...
object_versions = bucket.object_versions({
prefix: 'test/1.txt',
delimiter: '/'
})
object_versions.each do |obj|
puts "key = #{obj.key}, version_id = #{obj.version_id}, is_latest = #{obj.is_latest}"
end
# key = test/1.txt, version_id = IzgiSRNJsV9xLwZNbCn6lfLUm7OenyR, is_lastest = true
# key = test/1.txt, version_id = Y-pYHxGOhEglY8FevivOlb4nB6vnq2G, is_lastest = false
# key = test/1.txt, version_id = ZwhaUkGdMS677YUQ9AjH3ha7sjVAKBP, is_lastest = False
<?php
$result = $s3Client->listObjectVersions([
'Bucket' => '<BUCKET-NAME>',
'Prefix' => '<KEY-PREFIX>',
]);
print_r($result['Versions']);
var params = {
Bucket: "<BUCKET-NAME>",
Key: "<KEY-NAME>"
};
s3.listObjectVersions(params, function(err, data) {
if (err) console.log(err, err.stack); // an error occurred
else console.log(data); // successful response
});
const s3 = new AWS.S3()
const params = {
Bucket: <BUCKET-NAME>,
Prefix: <KEY-PREFIX>,
}
s3.listObjectVersions(params, (err, data) => {
if (err) console.log(err, err.stack) // an error occurred
else console.log(data) // successful response
})
Versions, _ := Client.ListObjectVersions(&s3.ListObjectVersionsInput{
Bucket: aws.String("bucket-01"),
Prefix: aws.String("test/1.txt"),
})
Xoá
1. Xoá file thông thường
2. Xoá một version của file
Áp dụng trong trường hợp bucket versioning được bật
3. Xoá file sử dụng xác thực nhiều bước (Multi-Factor Authentication)
Sử dụng trong trường hợp tính năng MFA Authentication được cài đặt. Khi thực hiện xoá file, cần cung cấp một MFA token hợp lệ được sinh ra bởi trình quản lý MFA token đã được cài đặt trên thiết bị của bạn trước đó.
Xoá file thông thường
bucket1.delete_key('path/to/file')
# <Key: bucket-01,path/to/file>
object = s3.Object('vccloud','test2')
object.delete()
client.deleteObject("<BUCKET-NAME>", "path/to/file");
S3BucketContext bucketContext = {
"hn.ss.bfcplatform.vn",
"<BUCKET-NAME>",
S3ProtocolHTTPS,
S3UriStylePath,
"<ACCESS_KEY_ID>",
"<SECRET_KEY_ID>"
};
S3_delete_object(&bucketContext, "<KEY-NAME>", NULL, &responseHandler, NULL);
// Include
#include <aws/s3/model/DeleteObjectRequest.h>
// Code
Aws::S3::Model::DeleteObjectRequest object_request;
object_request.WithBucket("<BUCKET-NAME>").WithKey("<KEY-NAME>");
auto delete_object_outcome = s3_client.DeleteObject(object_request);
if (delete_object_outcome.IsSuccess())
{
std::cout << "Done!" << std::endl;
}
else
{
std::cout << "DeleteObject error: " <<
delete_object_outcome.GetError().GetExceptionName() << " " <<
delete_object_outcome.GetError().GetMessage() << std::endl;
}
client.DeleteObject("<BUCKET-NAME>", "path/to/file");
obj.delete
<?php
$result = $client->deleteObject([
'Bucket' => '<BUCKET-NAME>',
'Key' => '<KEY-NAME>',
]);
var params = {
Bucket: "<BUCKET-NAME>",
Key: "<KEY-NAME>"
};
s3.deleteObject(params, function(err, data) {
if (err) console.log(err, err.stack); // an error occurred
else console.log(data); // successful response
});
const s3 = new AWS.S3()
const params = {
Bucket: '<BUCKET-NAME>',
Key: '<FILE-NAME>'
}
s3.deleteObject(params, (err, data) => {
if (err) console.log(err, err.stack) // an error occurred
else console.log(data) // successful response
})
Client.DeleteObject(&s3.DeleteObjectInput{
Bucket: aws.String("<BUCKET-NAME>"),
Key: aws.String("path/to/file"),
})
Xoá một version của file
bucket1.delete_key('path/to/file', version_id='<VERSION-ID>')
# <Key: bucket-01,path/to/file>
object = s3.Object('vccloud','test2')
object.delete(
MFA='string',
VersionId='string',
RequestPayer='requester',
BypassGovernanceRetention=True|False
)
client.deleteVersion("<BUCKET-NAME>", "path/to/file", "<VERSION-ID>");
# Working on it...
// Include
#include <aws/s3/model/DeleteObjectRequest.h>
// Code
Aws::S3::Model::DeleteObjectRequest object_request;
object_request.WithBucket("<BUCKET-NAME>").WithKey("<KEY-NAME>")
.SetVersionId("<VERSION-ID>");
auto delete_object_outcome = s3_client.DeleteObject(object_request);
if (delete_object_outcome.IsSuccess())
{
std::cout << "Done!" << std::endl;
}
else
{
std::cout << "DeleteObject error: " <<
delete_object_outcome.GetError().GetExceptionName() << " " <<
delete_object_outcome.GetError().GetMessage() << std::endl;
}
client.DeleteObject("<BUCKET-NAME>", "path/to/file", "<VERSION-ID>");
obj.delete({
version_id: '<VERSION-ID>'
})
<?php
$result = $client->deleteObject([
'Bucket' => '<BUCKET-NAME>',
'Key' => '<KEY-NAME>',
'VersionId' => '<VERSION-ID>'
]);
var params = {
Bucket: "<BUCKET-NAME>",
Key: "<KEY-NAME>",
VersionId: "<VERSION-ID>",
MFA: '<MFA-TOKEN>'
};
s3.deleteObject(params, function(err, data) {
if (err) console.log(err, err.stack); // an error occurred
else console.log(data); // successful response
/*
data = {
}
*/
});
const s3 = new AWS.S3()
const params = {
Bucket: '<BUCKET-NAME>',
Key: '<KEY-NAME>',
VersionId: '<VERSION-ID>',
MFA: '<MFA-TOKEN>'
}
s3.deleteObject(params, (err, data) => {
if (err) console.log(err, err.stack) // an error occurred
else console.log(data) // successful response
})
Client.DeleteObject(&s3.DeleteObjectInput{
Bucket: aws.String("bucket-01"),
Key: aws.String("path/to/file"),
VersionId: aws.String("<VERSION-ID>"),
})
Xoá file sử dụng xác thực nhiều bước
bucket1.delete_key('path/to/file', mfa_token='<MFA-TOKEN>')
# <Key: bucket-01,path/to/file>
object = s3.Object('vccloud','test2')
object.delete(
MFA='string',
VersionId='string',
RequestPayer='requester',
BypassGovernanceRetention=True|False
)
// Working on it...
# Working on it...
// // Include
#include <aws/s3/model/DeleteObjectRequest.h>
// Code
Aws::S3::Model::DeleteObjectRequest object_request;
object_request.WithBucket("<BUCKET-NAME>").WithKey("<KEY-NAME>")
.SetMFA("<MFA-TOKEN>");
auto delete_object_outcome = s3_client.DeleteObject(object_request);
if (delete_object_outcome.IsSuccess())
{
std::cout << "Done!" << std::endl;
}
else
{
std::cout << "DeleteObject error: " <<
delete_object_outcome.GetError().GetExceptionName() << " " <<
delete_object_outcome.GetError().GetMessage() << std::endl;
}
// Working on it...
obj.delete({
mfa: '<MFA-TOKEN>'
})
<?php
$result = $client->deleteObject([
'Bucket' => '<BUCKET-NAME>',
'Key' => '<KEY-NAME>',
'MFA' => '<MFA-TOKEN>',
]);
var params = {
Bucket: "<BUCKET-NAME>",
Key: "<KEY-NAME>",
MFA: "<MFA-TOKEN>"
};
s3.deleteObject(params, function(err, data) {
if (err) console.log(err, err.stack); // an error occurred
else console.log(data); // successful response
/*
data = {
}
*/
});
const s3 = new AWS.S3()
const params = {
Bucket: '<BUCKET-NAME>',
Key: '<KEY-NAME>',
MFA: '<MFA-TOKEN>'
}
s3.deleteObject(params, (err, data) => {
if (err) console.log(err, err.stack) // an error occurred
else console.log(data) // successful response
})
Client.DeleteObject(&s3.DeleteObjectInput{
Bucket: aws.String("bucket-01"),
Key: aws.String("path/to/file"),
MFA:aws.String("MFA-TOKEN"),
})
Tham số
<VERSION-ID>
là version_id của object
<MFA-TOKEN>
là multi-factor token hợp lệ tại thời điểm xoá
Sao chép
DST_BUCKET.copy_key('<DST_OBJECT>', '<SRC_BUCKET>', '<SRC_OBJECT>', preserve_acl=True)
copy_source = {
'Bucket': '<SRC_BUCKET>',
'Key': '<SRC_OBJECT>'
}
response = s3.copy_object(
Bucket='<DST_BUCKET>',
CopySource=copy_source,
Key='<DST_OBJECT>',
StorageClass='<STORAGE_CLASS>'
)
client.copyObject(<SRC_BUCKET>, <SRC_OBJECT>, <DST_BUCKET>, <DST_OBJECT>);
S3BucketContext bucketContext = {
"hn.ss.bfcplatform.vn",
"<SRC_BUCKET>",
S3ProtocolHTTPS,
S3UriStylePath,
"<ACCESS_KEY_ID>",
"<SECRET_KEY_ID>"
};
const char destinationBucket[] = "<DST_BUCKET>";
const char destinationKey[] = "<DST_OBJECT>";
int64_t lastModifiedReturn = 0;
char eTagReturn[256];
S3ResponseHandler copyResponseHandler = {
&responsePropertiesCallback,
&responseCompleteCallback
};
S3_copy_object(&bucketContext, "<SRC_OBJECT>", destinationBucket, destinationKey,
NULL, &lastModifiedReturn, sizeof (eTagReturn), eTagReturn,
NULL, ©ResponseHandler, NULL);
// Include
#include <aws/s3/model/CopyObjectRequest.h>
// Code
Aws::S3::Model::CopyObjectRequest request;
request.WithBucket("<DST_BUCKET>").WithKey("<DST_OBJECT>")
.WithCopySource("<SRC_BUCKET>/<SRC_OBJECT>");
auto outcome = s3_client.CopyObject(request);
if (outcome.IsSuccess())
{
std::cout << "Done!" << std::endl;
}
else
{
std::cout << "CopyObject error: " <<
outcome.GetError().GetExceptionName() << " " <<
outcome.GetError().GetMessage() << std::endl;
}
client.CopyObject(<SRC_BUCKET>, <SRC_OBJECT>, <DST_BUCKET>, <DST_OBJECT>);
obj.copy_to({
bucket: '<DST_BUCKET>',
key: '<DST_OBJECT>'
})
<?php
$result = $s3Client->copyObject([
'Bucket' => '<DST_BUCKET>',
'CopySource' => '/<SRC_BUCKET>/<SRC_OBJECT>',
'Key' => '<DST_OBJECT>',
'StorageClass' => '<STORAGE_CLASS>'
]);
var params = {
Bucket: "<DST_BUCKET>",
CopySource: "/<SRC_BUCKET>/<SRC_OBJECT>",
Key: "<DST_OBJECT>"
};
s3.copyObject(params, function(err, data) {
if (err) console.log(err, err.stack); // an error occurred
else console.log(data); // successful response
/*
data = {
CopyObjectResult: {
ETag: "\"6805f2cfc46c0f04559748bb039d69ae\"",
LastModified: <Date Representation>
}
}
*/
});
const s3 = new AWS.S3()
const params = {
Bucket: <DST_BUCKET>,
CopySource: '/<SRC_BUCKET>/<SRC_OBJECT>',
Key: <DST_OBJECT>
}
s3.copyObject(params, (err, data) => {
if (err) console.log(err, err.stack) // an error occurred
else console.log(data) // successful response
})
Client.CopyObject(&s3.CopyObjectInput{
Bucket: aws.String("<DST_OBJECT>"),
CopySource: aws.String("<SRC_BUCKET>"),
Key: aws.String("<SRC_OBJECT>"),
})
Tham số
<DST_BUCKET>
là bucket đích muốn copy tới, bạn có thể copy file tới bucket khác hoặc cùng một bucket
<DST_OBJECT>
là đường dẫn file đích muốn copy tới
<SRC_OBJECT>
là đường dẫn file nguồn
<SRC_BUCKET>
là tên bucket nguồn
<STORAGE_CLASS>
là tên loại storage class, giá trị hợp lệ: STANDARD, COLD,...
giá trị preserve_acl
là True
để chỉ định file đích có ACL giống với file nguồn.
Quản lý object ACL
Get
object.get_acl()
# <Policy: demo (owner) = FULL_CONTROL>
object_acl = s3.ObjectAcl('vccloud','test2')
AccessControlList acl = client.getObjectAcl("<BUCKET-NAME>", "<KEY-NAME>");
Set<Grant> grants = acl.getGrants();
for (Grant grant : grants) {
System.out.format(" %s: %s\n", grant.getGrantee().getIdentifier(),
grant.getPermission().toString());
}
# Working on it...
// Include
#include <aws/s3/model/GetObjectAclRequest.h>
#include <aws/s3/model/Permission.h>
#include <aws/s3/model/Grant.h>
// Code
Aws::String GetPermissionString(const Aws::S3::Model::Permission p)
{
switch (p)
{
case Aws::S3::Model::Permission::NOT_SET:
return "NOT_SET";
case Aws::S3::Model::Permission::FULL_CONTROL:
return "FULL_CONTROL";
case Aws::S3::Model::Permission::WRITE:
return "WRITE";
case Aws::S3::Model::Permission::READ:
return "READ";
case Aws::S3::Model::Permission::WRITE_ACP:
return "WRITE_ACP";
case Aws::S3::Model::Permission::READ_ACP:
return "READ_ACP";
default:
return "*unknown!*";
}
}
Aws::S3::Model::GetObjectAclRequest request;
request.WithBucket("<BUCKET-NAME>").WithKey("<KEY-NAME>");
auto outcome = s3_client.GetObjectAcl(request);
if (outcome.IsSuccess())
{
Aws::Vector<Aws::S3::Model::Grant> grants = outcome.GetResult().GetGrants();
for (auto it = grants.begin(); it != grants.end(); it++)
{
Aws::S3::Model::Grant grant = *it;
std::cout << grant.GetGrantee().GetDisplayName() << ": "
<< GetPermissionString(grant.GetPermission())
<< std::endl;
}
}
else
{
std::cout << "GetObjectAcl error: " <<
outcome.GetError().GetExceptionName() << " " <<
outcome.GetError().GetMessage() << std::endl;
}
GetACLRequest request_acl = new GetACLRequest();
request_acl.BucketName = "<BUCKET-NAME>";
request_acl.Key = "<KEY-NAME>";
GetACLResponse response = client.GetACL(request_acl);
S3AccessControlList acl_object = response.AccessControlList;
object_acl = obj.acl.data
object_acl.owner
# => #<struct Aws::S3::Types::Owner display_name="demo", id="xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx">
object_acl.grants
# => [#<struct Aws::S3::Types::Grant grantee=#<struct Aws::S3::Types::Grantee display_name="demo", email_address=nil, id="xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", type="CanonicalUser", uri=nil>, permission="FULL_CONTROL">]
<?php
$result = $s3Client->getObjectAcl([
'Bucket' => '<BUCKET-NAME>',
'Key' => '<KEY-NAME>'
]);
var params = {
Bucket: "<BUCKET-NAME>",
Key: "<FILE-NAME>"
};
s3.getObjectAcl(params, (err, data) => {
if (err) console.log(err, err.stack) // an error occurred
else console.log(data) // successful response
})
const s3 = new AWS.S3()
const params = {
Bucket: '<BUCKET-NAME>',
Key: '<FILE-NAME>'
};
s3.getObjectAcl(params, (err, data) => {
if (err) console.log(err, err.stack) // an error occurred
else console.log(data) // successful response
})
acl, _ := Client.GetObjectAcl(&s3.GetObjectAclInput{
Bucket: aws.String("bucket-01"),
Key: aws.String("key"),
})
Set
object.set_acl('<CANNED_ACL>')
object_acl = s3.ObjectAcl('vccloud','test2')
object_acl.put(
ACL='private'|'public-read'|'public-read-write'|'authenticated-read'|'aws-exec-read'|'bucket-owner-read'|'bucket-owner-full-control',
AccessControlPolicy={
'Grants': [
{
'Grantee': {
'DisplayName': 'string',
'EmailAddress': 'string',
'ID': 'string',
'Type': 'CanonicalUser'|'AmazonCustomerByEmail'|'Group',
'URI': 'string'
},
'Permission': 'FULL_CONTROL'|'WRITE'|'WRITE_ACP'|'READ'|'READ_ACP'
},
],
'Owner': {
'DisplayName': 'string',
'ID': 'string'
}
},
GrantFullControl='string',
GrantRead='string',
GrantReadACP='string',
GrantWrite='string',
GrantWriteACP='string',
RequestPayer='requester',
VersionId='string'
)
client.setObjectAcl("<BUCKET-NAME>", "<KEY-NAME>", CannedAccessControlList."<CANNED_ACL>");
# Working on it...
// Include
#include <aws/s3/model/PutObjectAclRequest.h>
// Code
Aws::S3::Model::PutObjectAclRequest request;
request.WithBucket("<BUCKET-NAME>").WithKey("<KEY-NAME>")
.SetACL(Aws::S3::Model::ObjectCannedACL::<CANNED_ACL>);
auto outcome = s3_client.PutObjectAcl(request);
if (outcome.IsSuccess())
{
std::cout << "Done!" << std::endl;
}
else
{
std::cout << "PutObjectAcl error: " <<
outcome.GetError().GetExceptionName() << " " <<
outcome.GetError().GetMessage() << std::endl;
}
PutACLRequest request_put_acl = new PutACLRequest();
request_put_acl.BucketName = "<BUCKET-NAME>";
request_put_acl.Key = "<KEY-NAME>";
request_put_acl.CannedACL = S3CannedACL.<CANNED_ACL>;
client.PutACL(request_put_acl);
obj.acl.put({
acl: '<CANNED_ACL>'
})
<?php
$result = $client->putObjectAcl([
'ACL' => '<CANNED_ACL>',
'Bucket' => '<BUCKET-NAME>',
'Key' => '<KEY-NAME>'
]);
var params = {
Bucket: "<BUCKET-NAME>",
Key: "<FILE-NAME>",
ACL: "<CANNED_ACL>"
};
s3.putObjectAcl(params, (err, data) => {
if (err) console.log(err, err.stack) // an error occurred
else console.log(data) // successful response
})
const s3 = new AWS.S3()
const params = {
Bucket: '<BUCKET-NAME>',
Key: '<FILE-NAME>',
ACL: '<CANNED_ACL>'
};
s3.putObjectAcl(params, (err, data) => {
if (err) console.log(err, err.stack) // an error occurred
else console.log(data) // successful response
})
sess := session.Must(session.NewSessionWithOptions(session.Options{
SharedConfigState: session.SharedConfigEnable,
Config: *s3Config,
}))
svc := s3.New(sess)
acl, _ := svc.GetObjectAcl(&s3.GetObjectAclInput{
Bucket: aws.String("bucket-01"),
Key: aws.String("key"),
})
userType := "AmazonCustomerByEmail"
owner := *acl.Owner.DisplayName
ownerId := *acl.Owner.ID
grants := acl.Grants
var newGrantee = s3.Grantee{EmailAddress: aws.String("email"), Type: &userType}
var newGrant = s3.Grant{Grantee: &newGrantee, Permission: aws.String("<CANNED_ACL>")}
grants = append(grants, &newGrant)
params := &s3.PutObjectAclInput{
Bucket: aws.String("bucket-01"),
Key: aws.String("key"),
AccessControlPolicy: &s3.AccessControlPolicy{
Grants: grants,
Owner: &s3.Owner{
DisplayName: &owner,
ID: &ownerId,
},
},
}
svc.PutObjectAcl(params)
Tham số
<CANNED_ACL>
thuộc một trong các giá trị sau:
private
Chủ sở hữu có toàn quyền (FULL_CONTROL), không ai khác có quyền truy cập (mặc định)
public-read
Chủ sở hữu có toàn quyền (FULL_CONTROL), Tất cả người dùng khác có quyền đọc (READ)
public-read-write
Chủ sở hữu có toàn quyền (FULL_CONTROL), Tất cả người dùng khác có quyền đọc (READ) và ghi (WRITE)
authenticated-read
Chủ sở hữu có toàn quyền (FULL_CONTROL), Tất cả người dùng đăng nhập khác có quyền đọc (READ)
Tagging
Với tagging, người dùng có thể sủ dụng gán các tag (thẻ) cho object. Khi cần có thể lọc, tìm kiếm, phần quyền dựa theo tag.
Get tags hiện có
# Không hỗ trợ
// Working on it...
# Working on it...
// Include
#include <aws/s3/model/GetObjectTaggingRequest.h>
// Code
Aws::S3::Model::GetObjectTaggingRequest request;
request.WithBucket("<BUCKET-NAME>").WithKey("<KEY-NAME>");
auto outcome = s3_client.GetObjectTagging(request);
if (outcome.IsSuccess())
{
auto tag_set = outcome.GetResult().GetTagSet();
for (auto const &tag : tag_set)
{
std::cout << tag.GetKey() << ": " << tag.GetValue() << std::endl;
}
}
else
{
std::cout << "GetObjectTagging error: " <<
outcome.GetError().GetExceptionName() << " " <<
outcome.GetError().GetMessage() << std::endl;
}
// Working on it...
# Không hỗ trợ
<?php
$result = $s3Client->getObjectTagging([
'Bucket' => '<BUCKET-NAME>',
'Key' => 'KEY-NAME'
]);
var params = {
Bucket: "<BUCKET-NAME>",
Key: "<KEY-NAME>"
};
s3.getObjectTagging(params, function(err, data) {
if (err) console.log(err, err.stack); // an error occurred
else console.log(data); // successful response
});
const s3 = new AWS.S3()
const params = {
Bucket: '<BUCKET-NAME>',
Key: '<FILE-NAME>'
}
s3.getObjectTagging(params, (err, data) => {
if (err) console.log(err, err.stack) // an error occurred
else console.log(data) // successful response
})
// Working on it...
Đặt tags
# Không hỗ trợ
object = bucket.put_object(
ACL='private',
Tagging='string',
Metadata={
'string': 'string'
},
Key='string'
)
// Working on it...
# Working on it...
// Include
#include <aws/s3/model/PutObjectTaggingRequest.h>
//Code
Aws::S3::Model::PutObjectTaggingRequest request;
request.WithBucket("<BUCKET-NAME>").WithKey("<KEY-NAME>");
Aws::S3::Model::Tag tag1;
tag1.WithKey("<TAG-KEY-1>").WithValue("<TAG-VALUE-1>");
Aws::S3::Model::Tag tag2;
tag2.WithKey("<TAG-KEY-2>").WithValue("<TAG-VALUE-2>");
Aws::S3::Model::Tagging tagging;
tagging.AddTagSet(tag1).AddTagSet(tag2);
request.SetTagging(tagging);
auto outcome = s3_client.PutObjectTagging(request);
if (outcome.IsSuccess())
{
std::cout << "Done!" << std::endl;
}
else
{
std::cout << "PutObjectTagging error: " <<
outcome.GetError().GetExceptionName() << " " <<
outcome.GetError().GetMessage() << std::endl;
}
// Working on it...
# Không hỗ trợ
<?php
$result = $s3Client->putObjectTagging([
'Bucket' => '<BUCKET-NAME>',
'Key' => '<KEY-NAME>',
'Tagging' => [
'TagSet' => [
[
'Key' => '<TAG-KEY-1>',
'Value' => '<TAG-VALUE-1>',
],
[
'Key' => '<TAG-KEY-2>',
'Value' => '<TAG-VALUE-2>',
]
],
],
]);
var params = {
Bucket: '<BUCKET-NAME>',
Key: '<FILE-NAME>'
Tagging: {
TagSet: [
{
Key: "<TAG-KEY-1>",
Value: "<TAG-VALUE-1>"
},
{
Key: "<TAG-KEY-2>",
Value: "<TAG-VALUE-2>"
}
]
}
}
s3.putObjectTagging(params, (err, data) => {
if (err) console.log(err, err.stack) // an error occurred
else console.log(data) // successful response
})
const s3 = new AWS.S3()
const params = {
Bucket: '<BUCKET-NAME>',
Key: '<FILE-NAME>'
Tagging: {
TagSet: [
{
Key: "<TAG-KEY-1>",
Value: "<TAG-VALUE-1>"
},
{
Key: "<TAG-KEY-2>",
Value: "<TAG-VALUE-2>"
}
]
}
}
s3.putObjectTagging(params, (err, data) => {
if (err) console.log(err, err.stack) // an error occurred
else console.log(data) // successful response
})
// Working on it...
Chia sẻ
Ví dụ tạo link chia sẻ file emoji/smile.png có hiệu lực trong 1 giờ (3600 giây)
object.metadata = {}
object.generate_url(3600)
# 'http://hn.ss.bfcplatform.vn/bucket-01/emoji/smile.png?Signature=xxxxxxxxxxxxxxxxxxxxxxxxxxxx&Expires=1534739664&AWSAccessKeyId=XXXXXXXXXXXXXXXXXXXX'
s3client.generate_presigned_url('get_object', Params = {'Bucket': 'vccloud1', 'Key': 'vccloud.txt'}, ExpiresIn = 1000)
# 'https://hn.ss.bfcplatform.vn/vccloud1/vccloud.txt?AWSAccessKeyId=xxxxxxxxxx&Signature=o%xxxxxxxxxxxxxxxxxxxxx%3D&Expires=1554709802'
Calendar cal = Calendar.getInstance();
cal.setTime(new Date());
cal.add(Calendar.HOUR_OF_DAY, 1);
cal.getTime();
GeneratePresignedUrlRequest request = new GeneratePresignedUrlRequest("bucket-01", "emoji/smile.png");
request.setExpiration(cal.getTime());
System.out.println("Generate url : " + client.generatePresignedUrl(request));
http://hn.ss.bfcplatform.vn/bucket-01/emoji/smile.png?Signature=xxxxxxxxxxxxxxxxxxxxxxxxxxxx&Expires=1534739664&AWSAccessKeyId=XXXXXXXXXXXXXXXXXXXX
S3BucketContext bucketContext = {
"hn.ss.bfcplatform.vn",
"<BUCKET-NAME>",
S3ProtocolHTTPS,
S3UriStylePath,
"<ACCESS_KEY_ID>",
"<SECRET_KEY_ID>"
};
char url[S3_MAX_AUTHENTICATED_QUERY_STRING_SIZE];
int64_t expires = time(NULL) + 60 * 60; // Current time + 1 hour
S3_generate_authenticated_query_string(url, &bucketContext, "emoji/smile.png", expires, NULL);
auto url = s3_client.GeneratePresignedUrl("<BUCKET-NAME>", "emoji/smile.png",
Aws::Http::HttpMethod::HTTP_GET, 3600);
std::cout << url << std::endl;
GetPreSignedUrlRequest request_generate_url = new GetPreSignedUrlRequest();
request_generate_url.BucketName = "bucket-01";
request_generate_url.Key = "emoji/smile.png";
request_generate_url.Expires = DateTime.Now.AddHours(1);
request_generate_url.Protocol = Protocol.HTTP;
string generate_url = client.GetPreSignedURL(request_generate_url);
Console.WriteLine(generate_url);
obj.presigned_url('GET', expires_in: 3600)
# => "http://bucket-01.hn.ss.bfcplatform.vn/emoji/smile.png?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX&X-Amz-Date=XXXXXXXXXXXXXXXX&X-Amz-Expires=3600&X-Amz-SignedHeaders=xxxx&X-Amz-Signature=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
<?php
//Creating a presigned URL
$cmd = $s3Client->getCommand('GetObject', [
'Bucket' => '<BUCKET-NAME>',
'Key' => 'emoji/smile.png'
]);
$request = $s3Client->createPresignedRequest($cmd, '+3600 seconds');
// Get the actual presigned-url
$presignedUrl = (string) $request->getUri();
var params = {
Bucket: '<BUCKET-NAME>',
Key: '<FILE-NAME>'
Expires: 3600
}
s3.getSignedUrl('getObject', params, (err, url) => {
if (err) console.log(err, err.stack) // an error occurred
else console.log('The URL is:', url) // successful response
})
const s3 = new AWS.S3()
const params = {
Bucket: '<BUCKET-NAME>',
Key: '<FILE-NAME>'
Expires: 3600
}
s3.getSignedUrl('getObject', params, (err, url) => {
if (err) console.log(err, err.stack) // an error occurred
else console.log('The URL is:', url) // successful response
})
req, _ := Client.GetObjectRequest(&s3.GetObjectInput{
Bucket: aws.String("bucket-01"),
Key: aws.String("file"),
})
url, _ := req.Presign(60 * time.Minute)
Tạo link file để client upload file
Người dùng sử dụng tính năng này để generate ra url trên server, sau đó client sử dụng link để thực hiện tao thác upload object lên SimpleStorage.
Khi đã có được link tạo ra từ server, các client có thể sử dụng thư viện hoặc câu lệnh để thực hiện việc upload. Ví dụ dưới đây sử dụng câu lệnh curl trên linux.
Với link không có metadata thì sử dụng lệnh duới đây
curl --request PUT --upload-file <đường dẫn đến file> "<url>"
Với link có metadata thì cần thêm header để đẩy thêm header lên server.
curl --request PUT --upload-file 1.jpg -H 'Content-Type:image/jpeg' -H 'x-amz-acl:public-read' "<url>"
Chú ý
Khi sử dụng tính năng này, cần chú ý khi sử dụng presigned-url, số lượng header được sinh ra ở trong câu lệnh presigned-url sẽ phải có tương ứng với cả lượng header khi sử dựng các client để upload file lên server.
Ví dụ ở dưới khi gen code server, có 2 header là Content-Type và x-amz-acl thì khi sử dụng các client, ví dụ như ở trên là sử dụng lệnh curl thì cũng phải có 2 header đó là Content-Type và x-amz-acl.
Nếu không có header tương ứng giữa presigned-url và client khi upload lên sẽ có thể xảy ra 1 trong 2 trường hợp sau:
- Sinh ra lỗi 400 bad request khi sử dụng 1 các tool client như curl
- Sinh ra lỗi báo CORS ở trình duyệt đến domain hn.ss.bfcplatform.vn hoặc
.hn.ss.bfcplatform.vn khi sử dụng các sdk như javascript, nodejs
// Url chỉ bao gồm bucket, file
presigned_url = conn.generate_url(
expires_in=3600,
method='PUT',
bucket='<BUCKET-NAME>',
key='<File>',
headers={'Content-Type': 'image/jpeg', 'x-amz-acl':'public-read'},
)
// Url bao gồm bucket, file và metadata ACL và Content-Type
presigned_url_with_metadata = conn.generate_url(
expires_in=3600,
method='PUT',
bucket='<BUCKET-NAME>',
key='<File>',
headers={'Content-Type': 'image/jpeg', 'x-amz-acl':'public-read'},
)
s3client = boto3.client(service_name='s3',
aws_access_key_id=<access_key>,
aws_secret_access_key=secret_key,
endpoint_url=endpoint_url, use_ssl=False, verify=False,
config=Config(signature_version='s3v4'))
// Url chỉ bao gồm bucket, file
presigned_url = s3client.generate_presigned_url('put_object', Params={'Bucket':<BUCKET-NAME>, 'Key':'<File>'}, ExpiresIn=3600, HttpMethod='PUT')
// Url bao gồm bucket, file và metadata ACL và Content-Type
presigned_url_with_metadata = s3client.generate_presigned_url('put_object', Params={'Bucket':<BUCKET-NAME>, 'Key':'<File>','ContentType':'image/jpeg','ACL':'public-read'}, ExpiresIn=3600, HttpMethod='PUT')
Working on it.
Working on it.
Working on it.
Working on it.
Working on it.
Working on it.
Working on it.
const presignedURL = s3.getSignedUrl('putObject', {
Bucket: <BUCKET-NAME>,
Key: '<File>',
Expires: 60*5,
ContentType:'image/jpeg',
ACL:'public-read'
})
console.log(presignedURL)
Working on it.
Object Lock
Đây là tính năng giúp bảo vệ dữ liệu trên hệ thống Simple Storage, tính năng này sẽ chống lại việc xóa dữ liệu. Kể cả trong trường hợp cố tình hay vô ý.
Simple Storage cung cấp 2 cách để quản lý thời gian khóa Object
- Retention period : là việc tạm thời khóa dữ liệu trong 1 khoảng thời gian chỉ định, khi qua khoảng thời gian này thì object sẽ có thể được xóa. Khoảng thời gian này có thể theo số lượng ngày hoặc năm
- Legal hold : là việc khóa dữ liệu giống với retention period chỉ khác là không có giới hạn thời gian, trừ khi là muốn tự tay xóa retention
Simple Storage cung cấp 2 mode để thực hiện object Lock trong Retention period
- Governance : ở mode này có thể bảo vệ dữ liệu khỏi phần lớn người sử dụng khỏi việc xóa dữ liệu, tuy nhiên vẫn sẽ có một số người sử dụng với đặc quyền có thể xóa với việc cho phép người có quyền s3:BypassGovernanceRetention
- Compliance : mở mode này không một ai có thể xóa được dữ liệu trong khoảng thời gian được chỉ định, kể cả với user có quyền cao nhất
Điều kiện để sử dụng tính năng
- Phải enable tính năng bucket versioning
- Phải enable tính năng Object Lock khi tạo bucket , các bucket đã tạo rồi không thể enable tính năng này nữa
#1. Tạo bucket với tính năng Object Lock
bucket_name=<BUCKET-NAME>
s3_client.create_bucket(Bucket=bucket_name,ObjectLockEnabledForBucket=True)
response = s3_client.put_bucket_versioning(
Bucket=bucket_name,
VersioningConfiguration={
'Status': 'Enabled'
}
)
#2. Enable mặc định cho các object trong bucket có tính năng và cấu hình thời gian retention (tùy chọn), ở đây mode 'COMPLIANCE' không ai có thể xóa được object trong 30 năm
response = s3_client.put_object_lock_configuration(
Bucket=bucket_name,
ObjectLockConfiguration={
'ObjectLockEnabled': 'Enabled',
'Rule': {
'DefaultRetention': {
'Mode': 'GOVERNANCE',
'Years': 30
}
}
},
)
#3. Upload file với tùy chọn tính năng Object Lock (nếu đã enable mặc định ở bucket thì không cần nữa )
s3_client.upload_file(
Filename='1.txt',
Bucket=bucket_name,
Key=key_name,
)
s3_client.put_object_retention(
Bucket=bucket_name,
Key=key_name,
VersionId=version_id,
Retention={
'Mode':'GOVERNANCE',
'RetainUntilDate':datetime.now() + timedelta(days=10950)
},
)
#4. Test xóa object ở ở mode Governance
response = s3_client.delete_object(
Bucket=bucket_name,
Key=key_name,
VersionId=version_id,
BypassGovernanceRetention=True
)
#5. Xem thông tin retention của object
response = s3_client.get_object_retention(
Bucket=bucket_name,
Key=key_name,
VersionId=version_id,
)
#6. Cấu hình object ở chế độ legal hold
s3_client.put_object_legal_hold(
Bucket=bucket,
Key=object_name,
VersionId=version_id,
LegalHold={
'Status':'ON',
},
)
#7. Lấy thông tin object ở mode legal hold
response = s3_client.get_object_legal_hold(
Bucket=bucket_name,
VersionId=version_id,
Key=key_name
)
print(response)
Working on it.
Working on it.
Working on it.
Working on it.
Working on it.
Working on it.
Working on it.
Working on it.
Working on it.
Tài liệu tham khảo
Python
https://boto.readthedocs.io/en/latest/ref/s3.html
https://boto3.amazonaws.com/v1/documentation/api/latest/index.html
Java https://aws.amazon.com/sdk-for-java/
C https://github.com/bji/libs3
C++ https://docs.aws.amazon.com/sdk-for-cpp/v1/developer-guide/welcome.html)
C# https://aws.amazon.com/sdk-for-net/
Ruby https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3.html
PHP https://docs.aws.amazon.com/sdk-for-php/v3/developer-guide/welcome.html
Javascript https://docs.aws.amazon.com/sdk-for-javascript/v2/developer-guide/welcome.html
Golang https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/welcome.html