2019-11-07 20:05:39 +01:00
// Copyright 2014 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package storage
import (
"context"
2021-10-11 20:51:32 +02:00
"encoding/base64"
"encoding/json"
"errors"
2019-11-07 20:05:39 +01:00
"fmt"
"reflect"
2022-09-26 14:44:55 +02:00
"strings"
2019-11-07 20:05:39 +01:00
"time"
2021-10-11 20:51:32 +02:00
"cloud.google.com/go/compute/metadata"
2019-11-07 20:05:39 +01:00
"cloud.google.com/go/internal/optional"
"cloud.google.com/go/internal/trace"
2023-07-07 09:04:32 +02:00
"cloud.google.com/go/storage/internal/apiv2/storagepb"
2019-11-07 20:05:39 +01:00
"google.golang.org/api/googleapi"
2021-10-11 20:51:32 +02:00
"google.golang.org/api/iamcredentials/v1"
2019-11-07 20:05:39 +01:00
"google.golang.org/api/iterator"
2021-10-11 20:51:32 +02:00
"google.golang.org/api/option"
2019-11-07 20:05:39 +01:00
raw "google.golang.org/api/storage/v1"
2022-07-21 20:10:25 +02:00
dpb "google.golang.org/genproto/googleapis/type/date"
2022-04-12 11:51:54 +02:00
"google.golang.org/protobuf/proto"
2023-01-23 17:05:39 +01:00
"google.golang.org/protobuf/types/known/durationpb"
2019-11-07 20:05:39 +01:00
)
// BucketHandle provides operations on a Google Cloud Storage bucket.
// Use Client.Bucket to get a handle.
type BucketHandle struct {
2024-01-16 15:57:30 +01:00
c * Client
name string
acl ACLHandle
defaultObjectACL ACLHandle
conds * BucketConditions
userProject string // project for Requester Pays buckets
retry * retryConfig
enableObjectRetention * bool
2019-11-07 20:05:39 +01:00
}
// Bucket returns a BucketHandle, which provides operations on the named bucket.
// This call does not perform any network operations.
//
// The supplied name must contain only lowercase letters, numbers, dashes,
// underscores, and dots. The full specification for valid bucket names can be
// found at:
2022-08-14 23:53:41 +02:00
//
// https://cloud.google.com/storage/docs/bucket-naming
2019-11-07 20:05:39 +01:00
func ( c * Client ) Bucket ( name string ) * BucketHandle {
2022-01-27 12:16:16 +01:00
retry := c . retry . clone ( )
2019-11-07 20:05:39 +01:00
return & BucketHandle {
c : c ,
name : name ,
acl : ACLHandle {
c : c ,
bucket : name ,
2022-01-27 12:16:16 +01:00
retry : retry ,
2019-11-07 20:05:39 +01:00
} ,
defaultObjectACL : ACLHandle {
c : c ,
bucket : name ,
isDefault : true ,
2022-01-27 12:16:16 +01:00
retry : retry ,
2019-11-07 20:05:39 +01:00
} ,
2022-01-27 12:16:16 +01:00
retry : retry ,
2019-11-07 20:05:39 +01:00
}
}
// Create creates the Bucket in the project.
// If attrs is nil the API defaults will be used.
func ( b * BucketHandle ) Create ( ctx context . Context , projectID string , attrs * BucketAttrs ) ( err error ) {
ctx = trace . StartSpan ( ctx , "cloud.google.com/go/storage.Bucket.Create" )
defer func ( ) { trace . EndSpan ( ctx , err ) } ( )
2022-08-14 23:53:41 +02:00
o := makeStorageOpts ( true , b . retry , b . userProject )
2024-01-16 15:57:30 +01:00
if _ , err := b . c . tc . CreateBucket ( ctx , projectID , b . name , attrs , b . enableObjectRetention , o ... ) ; err != nil {
2022-08-14 23:53:41 +02:00
return err
2019-11-07 20:05:39 +01:00
}
2022-08-14 23:53:41 +02:00
return nil
2019-11-07 20:05:39 +01:00
}
// Delete deletes the Bucket.
func ( b * BucketHandle ) Delete ( ctx context . Context ) ( err error ) {
ctx = trace . StartSpan ( ctx , "cloud.google.com/go/storage.Bucket.Delete" )
defer func ( ) { trace . EndSpan ( ctx , err ) } ( )
2022-08-14 23:53:41 +02:00
o := makeStorageOpts ( true , b . retry , b . userProject )
return b . c . tc . DeleteBucket ( ctx , b . name , b . conds , o ... )
2019-11-07 20:05:39 +01:00
}
// ACL returns an ACLHandle, which provides access to the bucket's access control list.
// This controls who can list, create or overwrite the objects in a bucket.
// This call does not perform any network operations.
func ( b * BucketHandle ) ACL ( ) * ACLHandle {
return & b . acl
}
// DefaultObjectACL returns an ACLHandle, which provides access to the bucket's default object ACLs.
// These ACLs are applied to newly created objects in this bucket that do not have a defined ACL.
// This call does not perform any network operations.
func ( b * BucketHandle ) DefaultObjectACL ( ) * ACLHandle {
return & b . defaultObjectACL
}
// Object returns an ObjectHandle, which provides operations on the named object.
2021-09-01 11:51:42 +02:00
// This call does not perform any network operations such as fetching the object or verifying its existence.
// Use methods on ObjectHandle to perform network operations.
2019-11-07 20:05:39 +01:00
//
// name must consist entirely of valid UTF-8-encoded runes. The full specification
// for valid object names can be found at:
2022-08-14 23:53:41 +02:00
//
// https://cloud.google.com/storage/docs/naming-objects
2019-11-07 20:05:39 +01:00
func ( b * BucketHandle ) Object ( name string ) * ObjectHandle {
2022-01-27 12:16:16 +01:00
retry := b . retry . clone ( )
2019-11-07 20:05:39 +01:00
return & ObjectHandle {
c : b . c ,
bucket : b . name ,
object : name ,
acl : ACLHandle {
c : b . c ,
bucket : b . name ,
object : name ,
userProject : b . userProject ,
2022-01-27 12:16:16 +01:00
retry : retry ,
2019-11-07 20:05:39 +01:00
} ,
gen : - 1 ,
userProject : b . userProject ,
2022-01-27 12:16:16 +01:00
retry : retry ,
2019-11-07 20:05:39 +01:00
}
}
// Attrs returns the metadata for the bucket.
func ( b * BucketHandle ) Attrs ( ctx context . Context ) ( attrs * BucketAttrs , err error ) {
ctx = trace . StartSpan ( ctx , "cloud.google.com/go/storage.Bucket.Attrs" )
defer func ( ) { trace . EndSpan ( ctx , err ) } ( )
2022-08-14 23:53:41 +02:00
o := makeStorageOpts ( true , b . retry , b . userProject )
return b . c . tc . GetBucket ( ctx , b . name , b . conds , o ... )
2019-11-07 20:05:39 +01:00
}
// Update updates a bucket's attributes.
func ( b * BucketHandle ) Update ( ctx context . Context , uattrs BucketAttrsToUpdate ) ( attrs * BucketAttrs , err error ) {
2023-08-29 13:12:56 +02:00
ctx = trace . StartSpan ( ctx , "cloud.google.com/go/storage.Bucket.Update" )
2019-11-07 20:05:39 +01:00
defer func ( ) { trace . EndSpan ( ctx , err ) } ( )
2022-01-27 12:16:16 +01:00
isIdempotent := b . conds != nil && b . conds . MetagenerationMatch != 0
2022-08-14 23:53:41 +02:00
o := makeStorageOpts ( isIdempotent , b . retry , b . userProject )
return b . c . tc . UpdateBucket ( ctx , b . name , & uattrs , b . conds , o ... )
2019-11-07 20:05:39 +01:00
}
2021-10-11 20:51:32 +02:00
// SignedURL returns a URL for the specified object. Signed URLs allow anyone
2022-09-26 14:44:55 +02:00
// access to a restricted resource for a limited time without needing a Google
// account or signing in.
// For more information about signed URLs, see "[Overview of access control]."
2021-10-11 20:51:32 +02:00
//
2022-09-26 14:44:55 +02:00
// This method requires the Method and Expires fields in the specified
// SignedURLOptions to be non-nil. You may need to set the GoogleAccessID and
// PrivateKey fields in some cases. Read more on the [automatic detection of credentials]
// for this method.
2021-10-18 14:25:11 +02:00
//
2022-09-26 14:44:55 +02:00
// [Overview of access control]: https://cloud.google.com/storage/docs/accesscontrol#signed_urls_query_string_authentication
2022-11-10 12:46:33 +01:00
// [automatic detection of credentials]: https://pkg.go.dev/cloud.google.com/go/storage#hdr-Credential_requirements_for_signing
2021-10-11 20:51:32 +02:00
func ( b * BucketHandle ) SignedURL ( object string , opts * SignedURLOptions ) ( string , error ) {
// Make a copy of opts so we don't modify the pointer parameter.
newopts := opts . clone ( )
2023-07-07 09:04:32 +02:00
if newopts . Hostname == "" {
// Extract the correct host from the readhost set on the client
newopts . Hostname = b . c . xmlHost
}
if opts . GoogleAccessID != "" && ( opts . SignBytes != nil || len ( opts . PrivateKey ) > 0 ) {
return SignedURL ( b . name , object , newopts )
}
2021-10-11 20:51:32 +02:00
if newopts . GoogleAccessID == "" {
id , err := b . detectDefaultGoogleAccessID ( )
if err != nil {
return "" , err
}
newopts . GoogleAccessID = id
}
if newopts . SignBytes == nil && len ( newopts . PrivateKey ) == 0 {
2021-10-18 14:25:11 +02:00
if b . c . creds != nil && len ( b . c . creds . JSON ) > 0 {
2021-10-11 20:51:32 +02:00
var sa struct {
PrivateKey string ` json:"private_key" `
}
err := json . Unmarshal ( b . c . creds . JSON , & sa )
if err == nil && sa . PrivateKey != "" {
newopts . PrivateKey = [ ] byte ( sa . PrivateKey )
}
}
// Don't error out if we can't unmarshal the private key from the client,
// fallback to the default sign function for the service account.
if len ( newopts . PrivateKey ) == 0 {
newopts . SignBytes = b . defaultSignBytesFunc ( newopts . GoogleAccessID )
}
}
return SignedURL ( b . name , object , newopts )
}
2022-01-27 12:16:16 +01:00
// GenerateSignedPostPolicyV4 generates a PostPolicyV4 value from bucket, object and opts.
// The generated URL and fields will then allow an unauthenticated client to perform multipart uploads.
//
2022-09-26 14:44:55 +02:00
// This method requires the Expires field in the specified PostPolicyV4Options
// to be non-nil. You may need to set the GoogleAccessID and PrivateKey fields
// in some cases. Read more on the [automatic detection of credentials] for this method.
2022-01-27 12:16:16 +01:00
//
2022-11-10 12:46:33 +01:00
// [automatic detection of credentials]: https://pkg.go.dev/cloud.google.com/go/storage#hdr-Credential_requirements_for_signing
2022-01-27 12:16:16 +01:00
func ( b * BucketHandle ) GenerateSignedPostPolicyV4 ( object string , opts * PostPolicyV4Options ) ( * PostPolicyV4 , error ) {
// Make a copy of opts so we don't modify the pointer parameter.
newopts := opts . clone ( )
2023-07-07 09:04:32 +02:00
if newopts . Hostname == "" {
// Extract the correct host from the readhost set on the client
newopts . Hostname = b . c . xmlHost
}
if opts . GoogleAccessID != "" && ( opts . SignRawBytes != nil || opts . SignBytes != nil || len ( opts . PrivateKey ) > 0 ) {
return GenerateSignedPostPolicyV4 ( b . name , object , newopts )
}
2022-01-27 12:16:16 +01:00
if newopts . GoogleAccessID == "" {
id , err := b . detectDefaultGoogleAccessID ( )
if err != nil {
return nil , err
}
newopts . GoogleAccessID = id
}
if newopts . SignBytes == nil && newopts . SignRawBytes == nil && len ( newopts . PrivateKey ) == 0 {
if b . c . creds != nil && len ( b . c . creds . JSON ) > 0 {
var sa struct {
PrivateKey string ` json:"private_key" `
}
err := json . Unmarshal ( b . c . creds . JSON , & sa )
if err == nil && sa . PrivateKey != "" {
newopts . PrivateKey = [ ] byte ( sa . PrivateKey )
}
}
// Don't error out if we can't unmarshal the private key from the client,
// fallback to the default sign function for the service account.
if len ( newopts . PrivateKey ) == 0 {
newopts . SignRawBytes = b . defaultSignBytesFunc ( newopts . GoogleAccessID )
}
}
return GenerateSignedPostPolicyV4 ( b . name , object , newopts )
}
2021-10-11 20:51:32 +02:00
func ( b * BucketHandle ) detectDefaultGoogleAccessID ( ) ( string , error ) {
returnErr := errors . New ( "no credentials found on client and not on GCE (Google Compute Engine)" )
2021-10-18 14:25:11 +02:00
if b . c . creds != nil && len ( b . c . creds . JSON ) > 0 {
2021-10-11 20:51:32 +02:00
var sa struct {
2022-09-26 14:44:55 +02:00
ClientEmail string ` json:"client_email" `
SAImpersonationURL string ` json:"service_account_impersonation_url" `
CredType string ` json:"type" `
2021-10-11 20:51:32 +02:00
}
2022-09-26 14:44:55 +02:00
2021-10-11 20:51:32 +02:00
err := json . Unmarshal ( b . c . creds . JSON , & sa )
2022-09-26 14:44:55 +02:00
if err != nil {
2021-10-11 20:51:32 +02:00
returnErr = err
2022-09-26 14:44:55 +02:00
} else if sa . CredType == "impersonated_service_account" {
start , end := strings . LastIndex ( sa . SAImpersonationURL , "/" ) , strings . LastIndex ( sa . SAImpersonationURL , ":" )
if end <= start {
returnErr = errors . New ( "error parsing impersonated service account credentials" )
} else {
return sa . SAImpersonationURL [ start + 1 : end ] , nil
}
} else if sa . CredType == "service_account" && sa . ClientEmail != "" {
return sa . ClientEmail , nil
2021-10-11 20:51:32 +02:00
} else {
2022-09-26 14:44:55 +02:00
returnErr = errors . New ( "unable to parse credentials; only service_account and impersonated_service_account credentials are supported" )
2021-10-11 20:51:32 +02:00
}
}
// Don't error out if we can't unmarshal, fallback to GCE check.
if metadata . OnGCE ( ) {
email , err := metadata . Email ( "default" )
if err == nil && email != "" {
return email , nil
} else if err != nil {
returnErr = err
} else {
2022-09-26 14:44:55 +02:00
returnErr = errors . New ( "empty email from GCE metadata service" )
2021-10-11 20:51:32 +02:00
}
}
2022-09-26 14:44:55 +02:00
return "" , fmt . Errorf ( "storage: unable to detect default GoogleAccessID: %w. Please provide the GoogleAccessID or use a supported means for autodetecting it (see https://pkg.go.dev/cloud.google.com/go/storage#hdr-Credential_requirements_for_[BucketHandle.SignedURL]_and_[BucketHandle.GenerateSignedPostPolicyV4])" , returnErr )
2021-10-11 20:51:32 +02:00
}
func ( b * BucketHandle ) defaultSignBytesFunc ( email string ) func ( [ ] byte ) ( [ ] byte , error ) {
return func ( in [ ] byte ) ( [ ] byte , error ) {
ctx := context . Background ( )
// It's ok to recreate this service per call since we pass in the http client,
// circumventing the cost of recreating the auth/transport layer
svc , err := iamcredentials . NewService ( ctx , option . WithHTTPClient ( b . c . hc ) )
if err != nil {
2022-11-10 12:46:33 +01:00
return nil , fmt . Errorf ( "unable to create iamcredentials client: %w" , err )
2021-10-11 20:51:32 +02:00
}
resp , err := svc . Projects . ServiceAccounts . SignBlob ( fmt . Sprintf ( "projects/-/serviceAccounts/%s" , email ) , & iamcredentials . SignBlobRequest {
Payload : base64 . StdEncoding . EncodeToString ( in ) ,
} ) . Do ( )
if err != nil {
2022-11-10 12:46:33 +01:00
return nil , fmt . Errorf ( "unable to sign bytes: %w" , err )
2021-10-11 20:51:32 +02:00
}
out , err := base64 . StdEncoding . DecodeString ( resp . SignedBlob )
if err != nil {
2022-11-10 12:46:33 +01:00
return nil , fmt . Errorf ( "unable to base64 decode response: %w" , err )
2021-10-11 20:51:32 +02:00
}
return out , nil
}
}
2019-11-07 20:05:39 +01:00
// BucketAttrs represents the metadata for a Google Cloud Storage bucket.
// Read-only fields are ignored by BucketHandle.Create.
type BucketAttrs struct {
// Name is the name of the bucket.
// This field is read-only.
Name string
// ACL is the list of access control rules on the bucket.
ACL [ ] ACLRule
// BucketPolicyOnly is an alias for UniformBucketLevelAccess. Use of
// UniformBucketLevelAccess is recommended above the use of this field.
// Setting BucketPolicyOnly.Enabled OR UniformBucketLevelAccess.Enabled to
// true, will enable UniformBucketLevelAccess.
BucketPolicyOnly BucketPolicyOnly
// UniformBucketLevelAccess configures access checks to use only bucket-level IAM
// policies and ignore any ACL rules for the bucket.
// See https://cloud.google.com/storage/docs/uniform-bucket-level-access
// for more information.
UniformBucketLevelAccess UniformBucketLevelAccess
2021-07-07 15:05:04 +02:00
// PublicAccessPrevention is the setting for the bucket's
// PublicAccessPrevention policy, which can be used to prevent public access
// of data in the bucket. See
// https://cloud.google.com/storage/docs/public-access-prevention for more
// information.
PublicAccessPrevention PublicAccessPrevention
2019-11-07 20:05:39 +01:00
// DefaultObjectACL is the list of access controls to
// apply to new objects when no object ACL is provided.
DefaultObjectACL [ ] ACLRule
// DefaultEventBasedHold is the default value for event-based hold on
// newly created objects in this bucket. It defaults to false.
DefaultEventBasedHold bool
// If not empty, applies a predefined set of access controls. It should be set
// only when creating a bucket.
// It is always empty for BucketAttrs returned from the service.
// See https://cloud.google.com/storage/docs/json_api/v1/buckets/insert
// for valid values.
PredefinedACL string
// If not empty, applies a predefined set of default object access controls.
// It should be set only when creating a bucket.
// It is always empty for BucketAttrs returned from the service.
// See https://cloud.google.com/storage/docs/json_api/v1/buckets/insert
// for valid values.
PredefinedDefaultObjectACL string
// Location is the location of the bucket. It defaults to "US".
2022-07-21 20:10:25 +02:00
// If specifying a dual-region, CustomPlacementConfig should be set in conjunction.
2019-11-07 20:05:39 +01:00
Location string
2022-07-21 20:10:25 +02:00
// The bucket's custom placement configuration that holds a list of
// regional locations for custom dual regions.
CustomPlacementConfig * CustomPlacementConfig
2019-11-07 20:05:39 +01:00
// MetaGeneration is the metadata generation of the bucket.
// This field is read-only.
MetaGeneration int64
// StorageClass is the default storage class of the bucket. This defines
// how objects in the bucket are stored and determines the SLA
2020-01-16 13:14:19 +01:00
// and the cost of storage. Typical values are "STANDARD", "NEARLINE",
// "COLDLINE" and "ARCHIVE". Defaults to "STANDARD".
// See https://cloud.google.com/storage/docs/storage-classes for all
// valid values.
2019-11-07 20:05:39 +01:00
StorageClass string
// Created is the creation time of the bucket.
// This field is read-only.
Created time . Time
// VersioningEnabled reports whether this bucket has versioning enabled.
VersioningEnabled bool
// Labels are the bucket's labels.
Labels map [ string ] string
// RequesterPays reports whether the bucket is a Requester Pays bucket.
// Clients performing operations on Requester Pays buckets must provide
// a user project (see BucketHandle.UserProject), which will be billed
// for the operations.
RequesterPays bool
// Lifecycle is the lifecycle configuration for objects in the bucket.
Lifecycle Lifecycle
// Retention policy enforces a minimum retention time for all objects
// contained in the bucket. A RetentionPolicy of nil implies the bucket
// has no minimum data retention.
//
// This feature is in private alpha release. It is not currently available to
// most customers. It might be changed in backwards-incompatible ways and is not
// subject to any SLA or deprecation policy.
RetentionPolicy * RetentionPolicy
// The bucket's Cross-Origin Resource Sharing (CORS) configuration.
CORS [ ] CORS
// The encryption configuration used by default for newly inserted objects.
Encryption * BucketEncryption
// The logging configuration.
Logging * BucketLogging
// The website configuration.
Website * BucketWebsite
// Etag is the HTTP/1.1 Entity tag for the bucket.
// This field is read-only.
Etag string
// LocationType describes how data is stored and replicated.
// Typical values are "multi-region", "region" and "dual-region".
// This field is read-only.
LocationType string
2021-09-30 16:52:02 +02:00
// The project number of the project the bucket belongs to.
// This field is read-only.
ProjectNumber uint64
2022-01-27 12:16:16 +01:00
// RPO configures the Recovery Point Objective (RPO) policy of the bucket.
// Set to RPOAsyncTurbo to turn on Turbo Replication for a bucket.
// See https://cloud.google.com/storage/docs/managing-turbo-replication for
// more information.
RPO RPO
2022-11-10 12:46:33 +01:00
// Autoclass holds the bucket's autoclass configuration. If enabled,
// allows for the automatic selection of the best storage class
// based on object access patterns.
Autoclass * Autoclass
2024-01-16 15:57:30 +01:00
// ObjectRetentionMode reports whether individual objects in the bucket can
// be configured with a retention policy. An empty value means that object
// retention is disabled.
// This field is read-only. Object retention can be enabled only by creating
// a bucket with SetObjectRetention set to true on the BucketHandle. It
// cannot be modified once the bucket is created.
// ObjectRetention cannot be configured or reported through the gRPC API.
ObjectRetentionMode string
2019-11-07 20:05:39 +01:00
}
// BucketPolicyOnly is an alias for UniformBucketLevelAccess.
// Use of UniformBucketLevelAccess is preferred above BucketPolicyOnly.
type BucketPolicyOnly struct {
// Enabled specifies whether access checks use only bucket-level IAM
// policies. Enabled may be disabled until the locked time.
Enabled bool
// LockedTime specifies the deadline for changing Enabled from true to
// false.
LockedTime time . Time
}
// UniformBucketLevelAccess configures access checks to use only bucket-level IAM
// policies.
type UniformBucketLevelAccess struct {
// Enabled specifies whether access checks use only bucket-level IAM
// policies. Enabled may be disabled until the locked time.
Enabled bool
// LockedTime specifies the deadline for changing Enabled from true to
// false.
LockedTime time . Time
}
2021-07-07 15:05:04 +02:00
// PublicAccessPrevention configures the Public Access Prevention feature, which
// can be used to disallow public access to any data in a bucket. See
// https://cloud.google.com/storage/docs/public-access-prevention for more
// information.
type PublicAccessPrevention int
const (
// PublicAccessPreventionUnknown is a zero value, used only if this field is
// not set in a call to GCS.
PublicAccessPreventionUnknown PublicAccessPrevention = iota
2021-10-11 20:51:32 +02:00
// PublicAccessPreventionUnspecified corresponds to a value of "unspecified".
// Deprecated: use PublicAccessPreventionInherited
2021-07-07 15:05:04 +02:00
PublicAccessPreventionUnspecified
// PublicAccessPreventionEnforced corresponds to a value of "enforced". This
// enforces Public Access Prevention on the bucket.
PublicAccessPreventionEnforced
2021-10-11 20:51:32 +02:00
// PublicAccessPreventionInherited corresponds to a value of "inherited"
// and is the default for buckets.
PublicAccessPreventionInherited
publicAccessPreventionUnknown string = ""
// TODO: remove unspecified when change is fully completed
publicAccessPreventionUnspecified = "unspecified"
publicAccessPreventionEnforced = "enforced"
publicAccessPreventionInherited = "inherited"
2021-07-07 15:05:04 +02:00
)
func ( p PublicAccessPrevention ) String ( ) string {
switch p {
2021-10-11 20:51:32 +02:00
case PublicAccessPreventionInherited , PublicAccessPreventionUnspecified :
return publicAccessPreventionInherited
2021-07-07 15:05:04 +02:00
case PublicAccessPreventionEnforced :
return publicAccessPreventionEnforced
default :
return publicAccessPreventionUnknown
}
}
2019-11-07 20:05:39 +01:00
// Lifecycle is the lifecycle configuration for objects in the bucket.
type Lifecycle struct {
Rules [ ] LifecycleRule
}
// RetentionPolicy enforces a minimum retention time for all objects
// contained in the bucket.
//
// Any attempt to overwrite or delete objects younger than the retention
// period will result in an error. An unlocked retention policy can be
// modified or removed from the bucket via the Update method. A
// locked retention policy cannot be removed or shortened in duration
// for the lifetime of the bucket.
//
// This feature is in private alpha release. It is not currently available to
// most customers. It might be changed in backwards-incompatible ways and is not
// subject to any SLA or deprecation policy.
type RetentionPolicy struct {
// RetentionPeriod specifies the duration that objects need to be
// retained. Retention duration must be greater than zero and less than
// 100 years. Note that enforcement of retention periods less than a day
// is not guaranteed. Such periods should only be used for testing
// purposes.
RetentionPeriod time . Duration
// EffectiveTime is the time from which the policy was enforced and
// effective. This field is read-only.
EffectiveTime time . Time
// IsLocked describes whether the bucket is locked. Once locked, an
// object retention policy cannot be modified.
// This field is read-only.
IsLocked bool
}
const (
2020-09-01 16:41:27 +02:00
// RFC3339 timestamp with only the date segment, used for CreatedBefore,
// CustomTimeBefore, and NoncurrentTimeBefore in LifecycleRule.
2019-11-07 20:05:39 +01:00
rfc3339Date = "2006-01-02"
// DeleteAction is a lifecycle action that deletes a live and/or archived
// objects. Takes precedence over SetStorageClass actions.
DeleteAction = "Delete"
// SetStorageClassAction changes the storage class of live and/or archived
// objects.
SetStorageClassAction = "SetStorageClass"
2022-06-28 13:51:45 +02:00
// AbortIncompleteMPUAction is a lifecycle action that aborts an incomplete
// multipart upload when the multipart upload meets the conditions specified
// in the lifecycle rule. The AgeInDays condition is the only allowed
// condition for this action. AgeInDays is measured from the time the
// multipart upload was created.
AbortIncompleteMPUAction = "AbortIncompleteMultipartUpload"
2019-11-07 20:05:39 +01:00
)
// LifecycleRule is a lifecycle configuration rule.
//
// When all the configured conditions are met by an object in the bucket, the
// configured action will automatically be taken on that object.
type LifecycleRule struct {
// Action is the action to take when all of the associated conditions are
// met.
Action LifecycleAction
// Condition is the set of conditions that must be met for the associated
// action to be taken.
Condition LifecycleCondition
}
// LifecycleAction is a lifecycle configuration action.
type LifecycleAction struct {
// Type is the type of action to take on matching objects.
//
2022-06-28 13:51:45 +02:00
// Acceptable values are storage.DeleteAction, storage.SetStorageClassAction,
// and storage.AbortIncompleteMPUAction.
2019-11-07 20:05:39 +01:00
Type string
// StorageClass is the storage class to set on matching objects if the Action
// is "SetStorageClass".
StorageClass string
}
// Liveness specifies whether the object is live or not.
type Liveness int
const (
// LiveAndArchived includes both live and archived objects.
LiveAndArchived Liveness = iota
// Live specifies that the object is still live.
Live
// Archived specifies that the object is archived.
Archived
)
// LifecycleCondition is a set of conditions used to match objects and take an
// action automatically.
//
// All configured conditions must be met for the associated action to be taken.
type LifecycleCondition struct {
2022-08-30 08:45:26 +02:00
// AllObjects is used to select all objects in a bucket by
// setting AgeInDays to 0.
AllObjects bool
2019-11-07 20:05:39 +01:00
// AgeInDays is the age of the object in days.
2022-08-30 08:45:26 +02:00
// If you want to set AgeInDays to `0` use AllObjects set to `true`.
2019-11-07 20:05:39 +01:00
AgeInDays int64
// CreatedBefore is the time the object was created.
//
// This condition is satisfied when an object is created before midnight of
// the specified date in UTC.
CreatedBefore time . Time
2020-09-01 16:41:27 +02:00
// CustomTimeBefore is the CustomTime metadata field of the object. This
// condition is satisfied when an object's CustomTime timestamp is before
// midnight of the specified date in UTC.
//
// This condition can only be satisfied if CustomTime has been set.
CustomTimeBefore time . Time
// DaysSinceCustomTime is the days elapsed since the CustomTime date of the
// object. This condition can only be satisfied if CustomTime has been set.
2022-08-30 08:45:26 +02:00
// Note: Using `0` as the value will be ignored by the library and not sent to the API.
2020-09-01 16:41:27 +02:00
DaysSinceCustomTime int64
// DaysSinceNoncurrentTime is the days elapsed since the noncurrent timestamp
// of the object. This condition is relevant only for versioned objects.
2022-08-30 08:45:26 +02:00
// Note: Using `0` as the value will be ignored by the library and not sent to the API.
2020-09-01 16:41:27 +02:00
DaysSinceNoncurrentTime int64
2019-11-07 20:05:39 +01:00
// Liveness specifies the object's liveness. Relevant only for versioned objects
Liveness Liveness
2022-06-28 13:51:45 +02:00
// MatchesPrefix is the condition matching an object if any of the
// matches_prefix strings are an exact prefix of the object's name.
MatchesPrefix [ ] string
2019-11-07 20:05:39 +01:00
// MatchesStorageClasses is the condition matching the object's storage
// class.
//
2020-01-16 13:14:19 +01:00
// Values include "STANDARD", "NEARLINE", "COLDLINE" and "ARCHIVE".
2019-11-07 20:05:39 +01:00
MatchesStorageClasses [ ] string
2022-06-28 13:51:45 +02:00
// MatchesSuffix is the condition matching an object if any of the
// matches_suffix strings are an exact suffix of the object's name.
MatchesSuffix [ ] string
2020-09-01 16:41:27 +02:00
// NoncurrentTimeBefore is the noncurrent timestamp of the object. This
// condition is satisfied when an object's noncurrent timestamp is before
// midnight of the specified date in UTC.
//
// This condition is relevant only for versioned objects.
NoncurrentTimeBefore time . Time
2019-11-07 20:05:39 +01:00
// NumNewerVersions is the condition matching objects with a number of newer versions.
//
// If the value is N, this condition is satisfied when there are at least N
// versions (including the live version) newer than this version of the
// object.
2022-08-30 08:45:26 +02:00
// Note: Using `0` as the value will be ignored by the library and not sent to the API.
2019-11-07 20:05:39 +01:00
NumNewerVersions int64
}
// BucketLogging holds the bucket's logging configuration, which defines the
// destination bucket and optional name prefix for the current bucket's
// logs.
type BucketLogging struct {
// The destination bucket where the current bucket's logs
// should be placed.
LogBucket string
// A prefix for log object names.
LogObjectPrefix string
}
// BucketWebsite holds the bucket's website configuration, controlling how the
// service behaves when accessing bucket contents as a web site. See
// https://cloud.google.com/storage/docs/static-website for more information.
type BucketWebsite struct {
// If the requested object path is missing, the service will ensure the path has
// a trailing '/', append this suffix, and attempt to retrieve the resulting
// object. This allows the creation of index.html objects to represent directory
// pages.
MainPageSuffix string
// If the requested object path is missing, and any mainPageSuffix object is
// missing, if applicable, the service will return the named object from this
// bucket as the content for a 404 Not Found result.
NotFoundPage string
}
2022-07-21 20:10:25 +02:00
// CustomPlacementConfig holds the bucket's custom placement
// configuration for Custom Dual Regions. See
// https://cloud.google.com/storage/docs/locations#location-dr for more information.
type CustomPlacementConfig struct {
// The list of regional locations in which data is placed.
// Custom Dual Regions require exactly 2 regional locations.
DataLocations [ ] string
}
2022-11-10 12:46:33 +01:00
// Autoclass holds the bucket's autoclass configuration. If enabled,
// allows for the automatic selection of the best storage class
// based on object access patterns. See
// https://cloud.google.com/storage/docs/using-autoclass for more information.
type Autoclass struct {
// Enabled specifies whether the autoclass feature is enabled
// on the bucket.
Enabled bool
// ToggleTime is the time from which Autoclass was last toggled.
// If Autoclass is enabled when the bucket is created, the ToggleTime
// is set to the bucket creation time. This field is read-only.
ToggleTime time . Time
2023-10-31 20:19:51 +01:00
// TerminalStorageClass: The storage class that objects in the bucket
// eventually transition to if they are not read for a certain length of
// time. Valid values are NEARLINE and ARCHIVE.
TerminalStorageClass string
// TerminalStorageClassUpdateTime represents the time of the most recent
// update to "TerminalStorageClass".
TerminalStorageClassUpdateTime time . Time
2022-11-10 12:46:33 +01:00
}
2019-11-07 20:05:39 +01:00
func newBucket ( b * raw . Bucket ) ( * BucketAttrs , error ) {
if b == nil {
return nil , nil
}
rp , err := toRetentionPolicy ( b . RetentionPolicy )
if err != nil {
return nil , err
}
2024-01-16 15:57:30 +01:00
2019-11-07 20:05:39 +01:00
return & BucketAttrs {
Name : b . Name ,
Location : b . Location ,
MetaGeneration : b . Metageneration ,
DefaultEventBasedHold : b . DefaultEventBasedHold ,
StorageClass : b . StorageClass ,
Created : convertTime ( b . TimeCreated ) ,
VersioningEnabled : b . Versioning != nil && b . Versioning . Enabled ,
ACL : toBucketACLRules ( b . Acl ) ,
DefaultObjectACL : toObjectACLRules ( b . DefaultObjectAcl ) ,
Labels : b . Labels ,
RequesterPays : b . Billing != nil && b . Billing . RequesterPays ,
Lifecycle : toLifecycle ( b . Lifecycle ) ,
RetentionPolicy : rp ,
2024-01-16 15:57:30 +01:00
ObjectRetentionMode : toBucketObjectRetention ( b . ObjectRetention ) ,
2019-11-07 20:05:39 +01:00
CORS : toCORS ( b . Cors ) ,
Encryption : toBucketEncryption ( b . Encryption ) ,
Logging : toBucketLogging ( b . Logging ) ,
Website : toBucketWebsite ( b . Website ) ,
BucketPolicyOnly : toBucketPolicyOnly ( b . IamConfiguration ) ,
UniformBucketLevelAccess : toUniformBucketLevelAccess ( b . IamConfiguration ) ,
2021-07-07 15:05:04 +02:00
PublicAccessPrevention : toPublicAccessPrevention ( b . IamConfiguration ) ,
2019-11-07 20:05:39 +01:00
Etag : b . Etag ,
LocationType : b . LocationType ,
2021-09-30 16:52:02 +02:00
ProjectNumber : b . ProjectNumber ,
2022-01-27 12:16:16 +01:00
RPO : toRPO ( b ) ,
2022-07-21 20:10:25 +02:00
CustomPlacementConfig : customPlacementFromRaw ( b . CustomPlacementConfig ) ,
2022-11-10 12:46:33 +01:00
Autoclass : toAutoclassFromRaw ( b . Autoclass ) ,
2019-11-07 20:05:39 +01:00
} , nil
}
2022-04-12 11:51:54 +02:00
func newBucketFromProto ( b * storagepb . Bucket ) * BucketAttrs {
if b == nil {
return nil
}
return & BucketAttrs {
Name : parseBucketName ( b . GetName ( ) ) ,
Location : b . GetLocation ( ) ,
MetaGeneration : b . GetMetageneration ( ) ,
DefaultEventBasedHold : b . GetDefaultEventBasedHold ( ) ,
StorageClass : b . GetStorageClass ( ) ,
Created : b . GetCreateTime ( ) . AsTime ( ) ,
VersioningEnabled : b . GetVersioning ( ) . GetEnabled ( ) ,
ACL : toBucketACLRulesFromProto ( b . GetAcl ( ) ) ,
DefaultObjectACL : toObjectACLRulesFromProto ( b . GetDefaultObjectAcl ( ) ) ,
Labels : b . GetLabels ( ) ,
RequesterPays : b . GetBilling ( ) . GetRequesterPays ( ) ,
Lifecycle : toLifecycleFromProto ( b . GetLifecycle ( ) ) ,
RetentionPolicy : toRetentionPolicyFromProto ( b . GetRetentionPolicy ( ) ) ,
CORS : toCORSFromProto ( b . GetCors ( ) ) ,
Encryption : toBucketEncryptionFromProto ( b . GetEncryption ( ) ) ,
Logging : toBucketLoggingFromProto ( b . GetLogging ( ) ) ,
Website : toBucketWebsiteFromProto ( b . GetWebsite ( ) ) ,
BucketPolicyOnly : toBucketPolicyOnlyFromProto ( b . GetIamConfig ( ) ) ,
UniformBucketLevelAccess : toUniformBucketLevelAccessFromProto ( b . GetIamConfig ( ) ) ,
PublicAccessPrevention : toPublicAccessPreventionFromProto ( b . GetIamConfig ( ) ) ,
LocationType : b . GetLocationType ( ) ,
RPO : toRPOFromProto ( b ) ,
2022-07-21 20:10:25 +02:00
CustomPlacementConfig : customPlacementFromProto ( b . GetCustomPlacementConfig ( ) ) ,
2022-09-26 14:44:55 +02:00
ProjectNumber : parseProjectNumber ( b . GetProject ( ) ) , // this can return 0 the project resource name is ID based
2022-11-10 12:46:33 +01:00
Autoclass : toAutoclassFromProto ( b . GetAutoclass ( ) ) ,
2022-04-12 11:51:54 +02:00
}
}
2019-11-07 20:05:39 +01:00
// toRawBucket copies the editable attribute from b to the raw library's Bucket type.
func ( b * BucketAttrs ) toRawBucket ( ) * raw . Bucket {
// Copy label map.
var labels map [ string ] string
if len ( b . Labels ) > 0 {
labels = make ( map [ string ] string , len ( b . Labels ) )
for k , v := range b . Labels {
labels [ k ] = v
}
}
// Ignore VersioningEnabled if it is false. This is OK because
// we only call this method when creating a bucket, and by default
// new buckets have versioning off.
var v * raw . BucketVersioning
if b . VersioningEnabled {
v = & raw . BucketVersioning { Enabled : true }
}
var bb * raw . BucketBilling
if b . RequesterPays {
bb = & raw . BucketBilling { RequesterPays : true }
}
var bktIAM * raw . BucketIamConfiguration
2021-07-07 15:05:04 +02:00
if b . UniformBucketLevelAccess . Enabled || b . BucketPolicyOnly . Enabled || b . PublicAccessPrevention != PublicAccessPreventionUnknown {
bktIAM = & raw . BucketIamConfiguration { }
if b . UniformBucketLevelAccess . Enabled || b . BucketPolicyOnly . Enabled {
bktIAM . UniformBucketLevelAccess = & raw . BucketIamConfigurationUniformBucketLevelAccess {
2019-11-07 20:05:39 +01:00
Enabled : true ,
2021-07-07 15:05:04 +02:00
}
}
if b . PublicAccessPrevention != PublicAccessPreventionUnknown {
bktIAM . PublicAccessPrevention = b . PublicAccessPrevention . String ( )
2019-11-07 20:05:39 +01:00
}
}
return & raw . Bucket {
2022-07-21 20:10:25 +02:00
Name : b . Name ,
Location : b . Location ,
StorageClass : b . StorageClass ,
Acl : toRawBucketACL ( b . ACL ) ,
DefaultObjectAcl : toRawObjectACL ( b . DefaultObjectACL ) ,
Versioning : v ,
Labels : labels ,
Billing : bb ,
Lifecycle : toRawLifecycle ( b . Lifecycle ) ,
RetentionPolicy : b . RetentionPolicy . toRawRetentionPolicy ( ) ,
Cors : toRawCORS ( b . CORS ) ,
Encryption : b . Encryption . toRawBucketEncryption ( ) ,
Logging : b . Logging . toRawBucketLogging ( ) ,
Website : b . Website . toRawBucketWebsite ( ) ,
IamConfiguration : bktIAM ,
Rpo : b . RPO . String ( ) ,
CustomPlacementConfig : b . CustomPlacementConfig . toRawCustomPlacement ( ) ,
2022-11-10 12:46:33 +01:00
Autoclass : b . Autoclass . toRawAutoclass ( ) ,
2019-11-07 20:05:39 +01:00
}
}
2022-04-12 11:51:54 +02:00
func ( b * BucketAttrs ) toProtoBucket ( ) * storagepb . Bucket {
if b == nil {
return & storagepb . Bucket { }
}
// Copy label map.
var labels map [ string ] string
if len ( b . Labels ) > 0 {
labels = make ( map [ string ] string , len ( b . Labels ) )
for k , v := range b . Labels {
labels [ k ] = v
}
}
// Ignore VersioningEnabled if it is false. This is OK because
// we only call this method when creating a bucket, and by default
// new buckets have versioning off.
var v * storagepb . Bucket_Versioning
if b . VersioningEnabled {
v = & storagepb . Bucket_Versioning { Enabled : true }
}
var bb * storagepb . Bucket_Billing
if b . RequesterPays {
2022-07-21 20:10:25 +02:00
bb = & storagepb . Bucket_Billing { RequesterPays : true }
2022-04-12 11:51:54 +02:00
}
var bktIAM * storagepb . Bucket_IamConfig
if b . UniformBucketLevelAccess . Enabled || b . BucketPolicyOnly . Enabled || b . PublicAccessPrevention != PublicAccessPreventionUnknown {
bktIAM = & storagepb . Bucket_IamConfig { }
if b . UniformBucketLevelAccess . Enabled || b . BucketPolicyOnly . Enabled {
bktIAM . UniformBucketLevelAccess = & storagepb . Bucket_IamConfig_UniformBucketLevelAccess {
Enabled : true ,
}
}
2022-05-20 13:45:24 +02:00
if b . PublicAccessPrevention != PublicAccessPreventionUnknown {
bktIAM . PublicAccessPrevention = b . PublicAccessPrevention . String ( )
}
2022-04-12 11:51:54 +02:00
}
return & storagepb . Bucket {
2022-07-21 20:10:25 +02:00
Name : b . Name ,
Location : b . Location ,
StorageClass : b . StorageClass ,
Acl : toProtoBucketACL ( b . ACL ) ,
DefaultObjectAcl : toProtoObjectACL ( b . DefaultObjectACL ) ,
Versioning : v ,
Labels : labels ,
Billing : bb ,
Lifecycle : toProtoLifecycle ( b . Lifecycle ) ,
RetentionPolicy : b . RetentionPolicy . toProtoRetentionPolicy ( ) ,
Cors : toProtoCORS ( b . CORS ) ,
Encryption : b . Encryption . toProtoBucketEncryption ( ) ,
Logging : b . Logging . toProtoBucketLogging ( ) ,
Website : b . Website . toProtoBucketWebsite ( ) ,
IamConfig : bktIAM ,
Rpo : b . RPO . String ( ) ,
CustomPlacementConfig : b . CustomPlacementConfig . toProtoCustomPlacement ( ) ,
2022-11-10 12:46:33 +01:00
Autoclass : b . Autoclass . toProtoAutoclass ( ) ,
2022-04-12 11:51:54 +02:00
}
}
2022-05-20 13:45:24 +02:00
func ( ua * BucketAttrsToUpdate ) toProtoBucket ( ) * storagepb . Bucket {
if ua == nil {
return & storagepb . Bucket { }
}
var v * storagepb . Bucket_Versioning
if ua . VersioningEnabled != nil {
v = & storagepb . Bucket_Versioning { Enabled : optional . ToBool ( ua . VersioningEnabled ) }
}
var bb * storagepb . Bucket_Billing
if ua . RequesterPays != nil {
2022-07-21 20:10:25 +02:00
bb = & storagepb . Bucket_Billing { RequesterPays : optional . ToBool ( ua . RequesterPays ) }
2022-05-20 13:45:24 +02:00
}
2022-11-10 12:46:33 +01:00
2022-05-20 13:45:24 +02:00
var bktIAM * storagepb . Bucket_IamConfig
2022-11-10 12:46:33 +01:00
if ua . UniformBucketLevelAccess != nil || ua . BucketPolicyOnly != nil || ua . PublicAccessPrevention != PublicAccessPreventionUnknown {
bktIAM = & storagepb . Bucket_IamConfig { }
if ua . BucketPolicyOnly != nil {
bktIAM . UniformBucketLevelAccess = & storagepb . Bucket_IamConfig_UniformBucketLevelAccess {
Enabled : optional . ToBool ( ua . BucketPolicyOnly . Enabled ) ,
}
}
if ua . UniformBucketLevelAccess != nil {
// UniformBucketLevelAccess takes precedence over BucketPolicyOnly,
// so Enabled will be overriden here if both are set
bktIAM . UniformBucketLevelAccess = & storagepb . Bucket_IamConfig_UniformBucketLevelAccess {
Enabled : optional . ToBool ( ua . UniformBucketLevelAccess . Enabled ) ,
}
}
if ua . PublicAccessPrevention != PublicAccessPreventionUnknown {
bktIAM . PublicAccessPrevention = ua . PublicAccessPrevention . String ( )
2022-05-20 13:45:24 +02:00
}
}
2022-11-10 12:46:33 +01:00
2022-05-20 13:45:24 +02:00
var defaultHold bool
if ua . DefaultEventBasedHold != nil {
defaultHold = optional . ToBool ( ua . DefaultEventBasedHold )
}
var lifecycle Lifecycle
if ua . Lifecycle != nil {
lifecycle = * ua . Lifecycle
}
var bktACL [ ] * storagepb . BucketAccessControl
if ua . acl != nil {
bktACL = toProtoBucketACL ( ua . acl )
}
if ua . PredefinedACL != "" {
// Clear ACL or the call will fail.
bktACL = nil
}
var bktDefaultObjectACL [ ] * storagepb . ObjectAccessControl
if ua . defaultObjectACL != nil {
bktDefaultObjectACL = toProtoObjectACL ( ua . defaultObjectACL )
}
if ua . PredefinedDefaultObjectACL != "" {
// Clear ACLs or the call will fail.
bktDefaultObjectACL = nil
}
return & storagepb . Bucket {
StorageClass : ua . StorageClass ,
Acl : bktACL ,
DefaultObjectAcl : bktDefaultObjectACL ,
DefaultEventBasedHold : defaultHold ,
Versioning : v ,
Billing : bb ,
Lifecycle : toProtoLifecycle ( lifecycle ) ,
RetentionPolicy : ua . RetentionPolicy . toProtoRetentionPolicy ( ) ,
Cors : toProtoCORS ( ua . CORS ) ,
Encryption : ua . Encryption . toProtoBucketEncryption ( ) ,
Logging : ua . Logging . toProtoBucketLogging ( ) ,
Website : ua . Website . toProtoBucketWebsite ( ) ,
IamConfig : bktIAM ,
Rpo : ua . RPO . String ( ) ,
2022-11-10 12:46:33 +01:00
Autoclass : ua . Autoclass . toProtoAutoclass ( ) ,
2023-07-07 09:04:32 +02:00
Labels : ua . setLabels ,
2022-05-20 13:45:24 +02:00
}
}
2019-11-07 20:05:39 +01:00
// CORS is the bucket's Cross-Origin Resource Sharing (CORS) configuration.
type CORS struct {
// MaxAge is the value to return in the Access-Control-Max-Age
// header used in preflight responses.
MaxAge time . Duration
// Methods is the list of HTTP methods on which to include CORS response
// headers, (GET, OPTIONS, POST, etc) Note: "*" is permitted in the list
// of methods, and means "any method".
Methods [ ] string
// Origins is the list of Origins eligible to receive CORS response
// headers. Note: "*" is permitted in the list of origins, and means
// "any Origin".
Origins [ ] string
// ResponseHeaders is the list of HTTP headers other than the simple
// response headers to give permission for the user-agent to share
// across domains.
ResponseHeaders [ ] string
}
// BucketEncryption is a bucket's encryption configuration.
type BucketEncryption struct {
// A Cloud KMS key name, in the form
// projects/P/locations/L/keyRings/R/cryptoKeys/K, that will be used to encrypt
// objects inserted into this bucket, if no encryption method is specified.
// The key's location must be the same as the bucket's.
DefaultKMSKeyName string
}
// BucketAttrsToUpdate define the attributes to update during an Update call.
type BucketAttrsToUpdate struct {
// If set, updates whether the bucket uses versioning.
VersioningEnabled optional . Bool
// If set, updates whether the bucket is a Requester Pays bucket.
RequesterPays optional . Bool
// DefaultEventBasedHold is the default value for event-based hold on
// newly created objects in this bucket.
DefaultEventBasedHold optional . Bool
// BucketPolicyOnly is an alias for UniformBucketLevelAccess. Use of
// UniformBucketLevelAccess is recommended above the use of this field.
// Setting BucketPolicyOnly.Enabled OR UniformBucketLevelAccess.Enabled to
// true, will enable UniformBucketLevelAccess. If both BucketPolicyOnly and
// UniformBucketLevelAccess are set, the value of UniformBucketLevelAccess
// will take precedence.
BucketPolicyOnly * BucketPolicyOnly
// UniformBucketLevelAccess configures access checks to use only bucket-level IAM
// policies and ignore any ACL rules for the bucket.
// See https://cloud.google.com/storage/docs/uniform-bucket-level-access
// for more information.
UniformBucketLevelAccess * UniformBucketLevelAccess
2021-07-07 15:05:04 +02:00
// PublicAccessPrevention is the setting for the bucket's
// PublicAccessPrevention policy, which can be used to prevent public access
// of data in the bucket. See
// https://cloud.google.com/storage/docs/public-access-prevention for more
// information.
PublicAccessPrevention PublicAccessPrevention
2021-02-09 00:08:56 +01:00
// StorageClass is the default storage class of the bucket. This defines
// how objects in the bucket are stored and determines the SLA
// and the cost of storage. Typical values are "STANDARD", "NEARLINE",
// "COLDLINE" and "ARCHIVE". Defaults to "STANDARD".
// See https://cloud.google.com/storage/docs/storage-classes for all
// valid values.
StorageClass string
2019-11-07 20:05:39 +01:00
// If set, updates the retention policy of the bucket. Using
// RetentionPolicy.RetentionPeriod = 0 will delete the existing policy.
//
// This feature is in private alpha release. It is not currently available to
// most customers. It might be changed in backwards-incompatible ways and is not
// subject to any SLA or deprecation policy.
RetentionPolicy * RetentionPolicy
// If set, replaces the CORS configuration with a new configuration.
// An empty (rather than nil) slice causes all CORS policies to be removed.
CORS [ ] CORS
// If set, replaces the encryption configuration of the bucket. Using
// BucketEncryption.DefaultKMSKeyName = "" will delete the existing
// configuration.
Encryption * BucketEncryption
// If set, replaces the lifecycle configuration of the bucket.
Lifecycle * Lifecycle
// If set, replaces the logging configuration of the bucket.
Logging * BucketLogging
// If set, replaces the website configuration of the bucket.
Website * BucketWebsite
// If not empty, applies a predefined set of access controls.
// See https://cloud.google.com/storage/docs/json_api/v1/buckets/patch.
PredefinedACL string
// If not empty, applies a predefined set of default object access controls.
// See https://cloud.google.com/storage/docs/json_api/v1/buckets/patch.
PredefinedDefaultObjectACL string
2022-01-27 12:16:16 +01:00
// RPO configures the Recovery Point Objective (RPO) policy of the bucket.
// Set to RPOAsyncTurbo to turn on Turbo Replication for a bucket.
// See https://cloud.google.com/storage/docs/managing-turbo-replication for
// more information.
RPO RPO
2022-11-10 12:46:33 +01:00
// If set, updates the autoclass configuration of the bucket.
// See https://cloud.google.com/storage/docs/using-autoclass for more information.
Autoclass * Autoclass
2022-05-20 13:45:24 +02:00
// acl is the list of access control rules on the bucket.
// It is unexported and only used internally by the gRPC client.
// Library users should use ACLHandle methods directly.
acl [ ] ACLRule
// defaultObjectACL is the list of access controls to
// apply to new objects when no object ACL is provided.
// It is unexported and only used internally by the gRPC client.
// Library users should use ACLHandle methods directly.
defaultObjectACL [ ] ACLRule
2019-11-07 20:05:39 +01:00
setLabels map [ string ] string
deleteLabels map [ string ] bool
}
// SetLabel causes a label to be added or modified when ua is used
// in a call to Bucket.Update.
func ( ua * BucketAttrsToUpdate ) SetLabel ( name , value string ) {
if ua . setLabels == nil {
ua . setLabels = map [ string ] string { }
}
ua . setLabels [ name ] = value
}
// DeleteLabel causes a label to be deleted when ua is used in a
// call to Bucket.Update.
func ( ua * BucketAttrsToUpdate ) DeleteLabel ( name string ) {
if ua . deleteLabels == nil {
ua . deleteLabels = map [ string ] bool { }
}
ua . deleteLabels [ name ] = true
}
func ( ua * BucketAttrsToUpdate ) toRawBucket ( ) * raw . Bucket {
rb := & raw . Bucket { }
if ua . CORS != nil {
rb . Cors = toRawCORS ( ua . CORS )
rb . ForceSendFields = append ( rb . ForceSendFields , "Cors" )
}
if ua . DefaultEventBasedHold != nil {
rb . DefaultEventBasedHold = optional . ToBool ( ua . DefaultEventBasedHold )
rb . ForceSendFields = append ( rb . ForceSendFields , "DefaultEventBasedHold" )
}
if ua . RetentionPolicy != nil {
if ua . RetentionPolicy . RetentionPeriod == 0 {
rb . NullFields = append ( rb . NullFields , "RetentionPolicy" )
rb . RetentionPolicy = nil
} else {
rb . RetentionPolicy = ua . RetentionPolicy . toRawRetentionPolicy ( )
}
}
if ua . VersioningEnabled != nil {
rb . Versioning = & raw . BucketVersioning {
Enabled : optional . ToBool ( ua . VersioningEnabled ) ,
ForceSendFields : [ ] string { "Enabled" } ,
}
}
if ua . RequesterPays != nil {
rb . Billing = & raw . BucketBilling {
RequesterPays : optional . ToBool ( ua . RequesterPays ) ,
ForceSendFields : [ ] string { "RequesterPays" } ,
}
}
if ua . BucketPolicyOnly != nil {
rb . IamConfiguration = & raw . BucketIamConfiguration {
UniformBucketLevelAccess : & raw . BucketIamConfigurationUniformBucketLevelAccess {
Enabled : ua . BucketPolicyOnly . Enabled ,
ForceSendFields : [ ] string { "Enabled" } ,
} ,
}
}
if ua . UniformBucketLevelAccess != nil {
rb . IamConfiguration = & raw . BucketIamConfiguration {
UniformBucketLevelAccess : & raw . BucketIamConfigurationUniformBucketLevelAccess {
Enabled : ua . UniformBucketLevelAccess . Enabled ,
ForceSendFields : [ ] string { "Enabled" } ,
} ,
}
}
2021-07-07 15:05:04 +02:00
if ua . PublicAccessPrevention != PublicAccessPreventionUnknown {
if rb . IamConfiguration == nil {
rb . IamConfiguration = & raw . BucketIamConfiguration { }
}
rb . IamConfiguration . PublicAccessPrevention = ua . PublicAccessPrevention . String ( )
}
2019-11-07 20:05:39 +01:00
if ua . Encryption != nil {
if ua . Encryption . DefaultKMSKeyName == "" {
rb . NullFields = append ( rb . NullFields , "Encryption" )
rb . Encryption = nil
} else {
rb . Encryption = ua . Encryption . toRawBucketEncryption ( )
}
}
if ua . Lifecycle != nil {
rb . Lifecycle = toRawLifecycle ( * ua . Lifecycle )
2020-06-19 01:39:53 +02:00
rb . ForceSendFields = append ( rb . ForceSendFields , "Lifecycle" )
2019-11-07 20:05:39 +01:00
}
if ua . Logging != nil {
if * ua . Logging == ( BucketLogging { } ) {
rb . NullFields = append ( rb . NullFields , "Logging" )
rb . Logging = nil
} else {
rb . Logging = ua . Logging . toRawBucketLogging ( )
}
}
if ua . Website != nil {
if * ua . Website == ( BucketWebsite { } ) {
rb . NullFields = append ( rb . NullFields , "Website" )
rb . Website = nil
} else {
rb . Website = ua . Website . toRawBucketWebsite ( )
}
}
2022-11-10 12:46:33 +01:00
if ua . Autoclass != nil {
rb . Autoclass = & raw . BucketAutoclass {
2023-10-31 20:19:51 +01:00
Enabled : ua . Autoclass . Enabled ,
TerminalStorageClass : ua . Autoclass . TerminalStorageClass ,
ForceSendFields : [ ] string { "Enabled" } ,
2022-11-10 12:46:33 +01:00
}
2023-10-31 20:19:51 +01:00
rb . ForceSendFields = append ( rb . ForceSendFields , "Autoclass" )
2022-11-10 12:46:33 +01:00
}
2019-11-07 20:05:39 +01:00
if ua . PredefinedACL != "" {
// Clear ACL or the call will fail.
rb . Acl = nil
rb . ForceSendFields = append ( rb . ForceSendFields , "Acl" )
}
if ua . PredefinedDefaultObjectACL != "" {
// Clear ACLs or the call will fail.
rb . DefaultObjectAcl = nil
rb . ForceSendFields = append ( rb . ForceSendFields , "DefaultObjectAcl" )
}
2022-01-27 12:16:16 +01:00
2021-02-09 00:08:56 +01:00
rb . StorageClass = ua . StorageClass
2022-01-27 12:16:16 +01:00
rb . Rpo = ua . RPO . String ( )
2019-11-07 20:05:39 +01:00
if ua . setLabels != nil || ua . deleteLabels != nil {
rb . Labels = map [ string ] string { }
for k , v := range ua . setLabels {
rb . Labels [ k ] = v
}
if len ( rb . Labels ) == 0 && len ( ua . deleteLabels ) > 0 {
rb . ForceSendFields = append ( rb . ForceSendFields , "Labels" )
}
for l := range ua . deleteLabels {
rb . NullFields = append ( rb . NullFields , "Labels." + l )
}
}
return rb
}
// If returns a new BucketHandle that applies a set of preconditions.
2023-07-07 09:04:32 +02:00
// Preconditions already set on the BucketHandle are ignored. The supplied
// BucketConditions must have exactly one field set to a non-zero value;
// otherwise an error will be returned from any operation on the BucketHandle.
2019-11-07 20:05:39 +01:00
// Operations on the new handle will return an error if the preconditions are not
// satisfied. The only valid preconditions for buckets are MetagenerationMatch
// and MetagenerationNotMatch.
func ( b * BucketHandle ) If ( conds BucketConditions ) * BucketHandle {
b2 := * b
b2 . conds = & conds
return & b2
}
// BucketConditions constrain bucket methods to act on specific metagenerations.
//
// The zero value is an empty set of constraints.
type BucketConditions struct {
// MetagenerationMatch specifies that the bucket must have the given
// metageneration for the operation to occur.
// If MetagenerationMatch is zero, it has no effect.
MetagenerationMatch int64
// MetagenerationNotMatch specifies that the bucket must not have the given
// metageneration for the operation to occur.
// If MetagenerationNotMatch is zero, it has no effect.
MetagenerationNotMatch int64
}
func ( c * BucketConditions ) validate ( method string ) error {
if * c == ( BucketConditions { } ) {
return fmt . Errorf ( "storage: %s: empty conditions" , method )
}
if c . MetagenerationMatch != 0 && c . MetagenerationNotMatch != 0 {
return fmt . Errorf ( "storage: %s: multiple conditions specified for metageneration" , method )
}
return nil
}
// UserProject returns a new BucketHandle that passes the project ID as the user
// project for all subsequent calls. Calls with a user project will be billed to that
// project rather than to the bucket's owning project.
//
// A user project is required for all operations on Requester Pays buckets.
func ( b * BucketHandle ) UserProject ( projectID string ) * BucketHandle {
b2 := * b
b2 . userProject = projectID
b2 . acl . userProject = projectID
b2 . defaultObjectACL . userProject = projectID
return & b2
}
// LockRetentionPolicy locks a bucket's retention policy until a previously-configured
// RetentionPeriod past the EffectiveTime. Note that if RetentionPeriod is set to less
// than a day, the retention policy is treated as a development configuration and locking
// will have no effect. The BucketHandle must have a metageneration condition that
// matches the bucket's metageneration. See BucketHandle.If.
//
// This feature is in private alpha release. It is not currently available to
// most customers. It might be changed in backwards-incompatible ways and is not
// subject to any SLA or deprecation policy.
func ( b * BucketHandle ) LockRetentionPolicy ( ctx context . Context ) error {
2022-08-14 23:53:41 +02:00
o := makeStorageOpts ( true , b . retry , b . userProject )
return b . c . tc . LockBucketRetentionPolicy ( ctx , b . name , b . conds , o ... )
2019-11-07 20:05:39 +01:00
}
2024-01-16 15:57:30 +01:00
// SetObjectRetention returns a new BucketHandle that will enable object retention
// on bucket creation. To enable object retention, you must use the returned
// handle to create the bucket. This has no effect on an already existing bucket.
// ObjectRetention is not enabled by default.
// ObjectRetention cannot be configured through the gRPC API.
func ( b * BucketHandle ) SetObjectRetention ( enable bool ) * BucketHandle {
b2 := * b
b2 . enableObjectRetention = & enable
return & b2
}
2019-11-07 20:05:39 +01:00
// applyBucketConds modifies the provided call using the conditions in conds.
// call is something that quacks like a *raw.WhateverCall.
func applyBucketConds ( method string , conds * BucketConditions , call interface { } ) error {
if conds == nil {
return nil
}
if err := conds . validate ( method ) ; err != nil {
return err
}
cval := reflect . ValueOf ( call )
switch {
case conds . MetagenerationMatch != 0 :
2024-01-16 15:57:30 +01:00
if ! setIfMetagenerationMatch ( cval , conds . MetagenerationMatch ) {
2019-11-07 20:05:39 +01:00
return fmt . Errorf ( "storage: %s: ifMetagenerationMatch not supported" , method )
}
case conds . MetagenerationNotMatch != 0 :
2024-01-16 15:57:30 +01:00
if ! setIfMetagenerationNotMatch ( cval , conds . MetagenerationNotMatch ) {
2019-11-07 20:05:39 +01:00
return fmt . Errorf ( "storage: %s: ifMetagenerationNotMatch not supported" , method )
}
}
return nil
}
2022-04-12 11:51:54 +02:00
// applyBucketConds modifies the provided request message using the conditions
// in conds. msg is a protobuf Message that has fields if_metageneration_match
// and if_metageneration_not_match.
func applyBucketCondsProto ( method string , conds * BucketConditions , msg proto . Message ) error {
rmsg := msg . ProtoReflect ( )
if conds == nil {
return nil
}
if err := conds . validate ( method ) ; err != nil {
return err
}
switch {
case conds . MetagenerationMatch != 0 :
if ! setConditionProtoField ( rmsg , "if_metageneration_match" , conds . MetagenerationMatch ) {
return fmt . Errorf ( "storage: %s: ifMetagenerationMatch not supported" , method )
}
case conds . MetagenerationNotMatch != 0 :
if ! setConditionProtoField ( rmsg , "if_metageneration_not_match" , conds . MetagenerationNotMatch ) {
return fmt . Errorf ( "storage: %s: ifMetagenerationNotMatch not supported" , method )
}
}
return nil
}
2019-11-07 20:05:39 +01:00
func ( rp * RetentionPolicy ) toRawRetentionPolicy ( ) * raw . BucketRetentionPolicy {
if rp == nil {
return nil
}
return & raw . BucketRetentionPolicy {
RetentionPeriod : int64 ( rp . RetentionPeriod / time . Second ) ,
}
}
2022-04-12 11:51:54 +02:00
func ( rp * RetentionPolicy ) toProtoRetentionPolicy ( ) * storagepb . Bucket_RetentionPolicy {
if rp == nil {
return nil
}
2022-11-10 12:46:33 +01:00
// RetentionPeriod must be greater than 0, so if it is 0, the user left it
// unset, and so we should not send it in the request i.e. nil is sent.
2023-01-23 17:05:39 +01:00
var dur * durationpb . Duration
2022-11-10 12:46:33 +01:00
if rp . RetentionPeriod != 0 {
2023-01-23 17:05:39 +01:00
dur = durationpb . New ( rp . RetentionPeriod )
2022-11-10 12:46:33 +01:00
}
2022-04-12 11:51:54 +02:00
return & storagepb . Bucket_RetentionPolicy {
2023-01-23 17:05:39 +01:00
RetentionDuration : dur ,
2022-04-12 11:51:54 +02:00
}
}
2019-11-07 20:05:39 +01:00
func toRetentionPolicy ( rp * raw . BucketRetentionPolicy ) ( * RetentionPolicy , error ) {
2022-05-20 13:45:24 +02:00
if rp == nil || rp . EffectiveTime == "" {
2019-11-07 20:05:39 +01:00
return nil , nil
}
t , err := time . Parse ( time . RFC3339 , rp . EffectiveTime )
if err != nil {
return nil , err
}
return & RetentionPolicy {
RetentionPeriod : time . Duration ( rp . RetentionPeriod ) * time . Second ,
EffectiveTime : t ,
IsLocked : rp . IsLocked ,
} , nil
}
2022-04-12 11:51:54 +02:00
func toRetentionPolicyFromProto ( rp * storagepb . Bucket_RetentionPolicy ) * RetentionPolicy {
2022-11-10 12:46:33 +01:00
if rp == nil || rp . GetEffectiveTime ( ) . AsTime ( ) . Unix ( ) == 0 {
2022-04-12 11:51:54 +02:00
return nil
}
return & RetentionPolicy {
2023-01-23 17:05:39 +01:00
RetentionPeriod : rp . GetRetentionDuration ( ) . AsDuration ( ) ,
2022-04-12 11:51:54 +02:00
EffectiveTime : rp . GetEffectiveTime ( ) . AsTime ( ) ,
IsLocked : rp . GetIsLocked ( ) ,
}
}
2024-01-16 15:57:30 +01:00
func toBucketObjectRetention ( or * raw . BucketObjectRetention ) string {
if or == nil {
return ""
}
return or . Mode
}
2019-11-07 20:05:39 +01:00
func toRawCORS ( c [ ] CORS ) [ ] * raw . BucketCors {
var out [ ] * raw . BucketCors
for _ , v := range c {
out = append ( out , & raw . BucketCors {
MaxAgeSeconds : int64 ( v . MaxAge / time . Second ) ,
Method : v . Methods ,
Origin : v . Origins ,
ResponseHeader : v . ResponseHeaders ,
} )
}
return out
}
2022-04-12 11:51:54 +02:00
func toProtoCORS ( c [ ] CORS ) [ ] * storagepb . Bucket_Cors {
var out [ ] * storagepb . Bucket_Cors
for _ , v := range c {
out = append ( out , & storagepb . Bucket_Cors {
MaxAgeSeconds : int32 ( v . MaxAge / time . Second ) ,
Method : v . Methods ,
Origin : v . Origins ,
ResponseHeader : v . ResponseHeaders ,
} )
}
return out
}
2019-11-07 20:05:39 +01:00
func toCORS ( rc [ ] * raw . BucketCors ) [ ] CORS {
var out [ ] CORS
for _ , v := range rc {
out = append ( out , CORS {
MaxAge : time . Duration ( v . MaxAgeSeconds ) * time . Second ,
Methods : v . Method ,
Origins : v . Origin ,
ResponseHeaders : v . ResponseHeader ,
} )
}
return out
}
2022-04-12 11:51:54 +02:00
func toCORSFromProto ( rc [ ] * storagepb . Bucket_Cors ) [ ] CORS {
var out [ ] CORS
for _ , v := range rc {
out = append ( out , CORS {
MaxAge : time . Duration ( v . GetMaxAgeSeconds ( ) ) * time . Second ,
Methods : v . GetMethod ( ) ,
Origins : v . GetOrigin ( ) ,
ResponseHeaders : v . GetResponseHeader ( ) ,
} )
}
return out
}
2019-11-07 20:05:39 +01:00
func toRawLifecycle ( l Lifecycle ) * raw . BucketLifecycle {
var rl raw . BucketLifecycle
if len ( l . Rules ) == 0 {
2020-06-19 01:39:53 +02:00
rl . ForceSendFields = [ ] string { "Rule" }
2019-11-07 20:05:39 +01:00
}
for _ , r := range l . Rules {
rr := & raw . BucketLifecycleRule {
Action : & raw . BucketLifecycleRuleAction {
Type : r . Action . Type ,
StorageClass : r . Action . StorageClass ,
} ,
Condition : & raw . BucketLifecycleRuleCondition {
2020-09-01 16:41:27 +02:00
DaysSinceCustomTime : r . Condition . DaysSinceCustomTime ,
DaysSinceNoncurrentTime : r . Condition . DaysSinceNoncurrentTime ,
2022-06-28 13:51:45 +02:00
MatchesPrefix : r . Condition . MatchesPrefix ,
2020-09-01 16:41:27 +02:00
MatchesStorageClass : r . Condition . MatchesStorageClasses ,
2022-06-28 13:51:45 +02:00
MatchesSuffix : r . Condition . MatchesSuffix ,
2020-09-01 16:41:27 +02:00
NumNewerVersions : r . Condition . NumNewerVersions ,
2019-11-07 20:05:39 +01:00
} ,
}
2022-08-30 08:45:26 +02:00
// AllObjects takes precedent when both AllObjects and AgeInDays are set
// Rationale: If you've opted into using AllObjects, it makes sense that you
// understand the implications of how this option works with AgeInDays.
if r . Condition . AllObjects {
rr . Condition . Age = googleapi . Int64 ( 0 )
rr . Condition . ForceSendFields = [ ] string { "Age" }
} else if r . Condition . AgeInDays > 0 {
rr . Condition . Age = googleapi . Int64 ( r . Condition . AgeInDays )
}
2022-06-28 13:51:45 +02:00
2019-11-07 20:05:39 +01:00
switch r . Condition . Liveness {
case LiveAndArchived :
rr . Condition . IsLive = nil
case Live :
rr . Condition . IsLive = googleapi . Bool ( true )
case Archived :
rr . Condition . IsLive = googleapi . Bool ( false )
}
if ! r . Condition . CreatedBefore . IsZero ( ) {
rr . Condition . CreatedBefore = r . Condition . CreatedBefore . Format ( rfc3339Date )
}
2020-09-01 16:41:27 +02:00
if ! r . Condition . CustomTimeBefore . IsZero ( ) {
rr . Condition . CustomTimeBefore = r . Condition . CustomTimeBefore . Format ( rfc3339Date )
}
if ! r . Condition . NoncurrentTimeBefore . IsZero ( ) {
rr . Condition . NoncurrentTimeBefore = r . Condition . NoncurrentTimeBefore . Format ( rfc3339Date )
}
2019-11-07 20:05:39 +01:00
rl . Rule = append ( rl . Rule , rr )
}
return & rl
}
2022-04-12 11:51:54 +02:00
func toProtoLifecycle ( l Lifecycle ) * storagepb . Bucket_Lifecycle {
var rl storagepb . Bucket_Lifecycle
for _ , r := range l . Rules {
rr := & storagepb . Bucket_Lifecycle_Rule {
Action : & storagepb . Bucket_Lifecycle_Rule_Action {
Type : r . Action . Type ,
StorageClass : r . Action . StorageClass ,
} ,
Condition : & storagepb . Bucket_Lifecycle_Rule_Condition {
// Note: The Apiary types use int64 (even though the Discovery
// doc states "format: int32"), so the client types used int64,
// but the proto uses int32 so we have a potentially lossy
// conversion.
DaysSinceCustomTime : proto . Int32 ( int32 ( r . Condition . DaysSinceCustomTime ) ) ,
DaysSinceNoncurrentTime : proto . Int32 ( int32 ( r . Condition . DaysSinceNoncurrentTime ) ) ,
2022-06-28 13:51:45 +02:00
MatchesPrefix : r . Condition . MatchesPrefix ,
2022-04-12 11:51:54 +02:00
MatchesStorageClass : r . Condition . MatchesStorageClasses ,
2022-06-28 13:51:45 +02:00
MatchesSuffix : r . Condition . MatchesSuffix ,
2022-04-12 11:51:54 +02:00
NumNewerVersions : proto . Int32 ( int32 ( r . Condition . NumNewerVersions ) ) ,
} ,
}
2023-08-29 13:12:56 +02:00
// Only set AgeDays in the proto if it is non-zero, or if the user has set
// Condition.AllObjects.
if r . Condition . AgeInDays != 0 {
rr . Condition . AgeDays = proto . Int32 ( int32 ( r . Condition . AgeInDays ) )
}
2022-08-30 08:45:26 +02:00
if r . Condition . AllObjects {
rr . Condition . AgeDays = proto . Int32 ( 0 )
}
2022-04-12 11:51:54 +02:00
switch r . Condition . Liveness {
case LiveAndArchived :
rr . Condition . IsLive = nil
case Live :
rr . Condition . IsLive = proto . Bool ( true )
case Archived :
rr . Condition . IsLive = proto . Bool ( false )
}
if ! r . Condition . CreatedBefore . IsZero ( ) {
2022-07-21 20:10:25 +02:00
rr . Condition . CreatedBefore = timeToProtoDate ( r . Condition . CreatedBefore )
2022-04-12 11:51:54 +02:00
}
if ! r . Condition . CustomTimeBefore . IsZero ( ) {
2022-07-21 20:10:25 +02:00
rr . Condition . CustomTimeBefore = timeToProtoDate ( r . Condition . CustomTimeBefore )
2022-04-12 11:51:54 +02:00
}
if ! r . Condition . NoncurrentTimeBefore . IsZero ( ) {
2022-07-21 20:10:25 +02:00
rr . Condition . NoncurrentTimeBefore = timeToProtoDate ( r . Condition . NoncurrentTimeBefore )
2022-04-12 11:51:54 +02:00
}
rl . Rule = append ( rl . Rule , rr )
}
return & rl
}
2019-11-07 20:05:39 +01:00
func toLifecycle ( rl * raw . BucketLifecycle ) Lifecycle {
var l Lifecycle
if rl == nil {
return l
}
for _ , rr := range rl . Rule {
r := LifecycleRule {
Action : LifecycleAction {
Type : rr . Action . Type ,
StorageClass : rr . Action . StorageClass ,
} ,
Condition : LifecycleCondition {
2020-09-01 16:41:27 +02:00
DaysSinceCustomTime : rr . Condition . DaysSinceCustomTime ,
DaysSinceNoncurrentTime : rr . Condition . DaysSinceNoncurrentTime ,
2022-06-28 13:51:45 +02:00
MatchesPrefix : rr . Condition . MatchesPrefix ,
2020-09-01 16:41:27 +02:00
MatchesStorageClasses : rr . Condition . MatchesStorageClass ,
2022-06-28 13:51:45 +02:00
MatchesSuffix : rr . Condition . MatchesSuffix ,
2020-09-01 16:41:27 +02:00
NumNewerVersions : rr . Condition . NumNewerVersions ,
2019-11-07 20:05:39 +01:00
} ,
}
2022-08-30 08:45:26 +02:00
if rr . Condition . Age != nil {
r . Condition . AgeInDays = * rr . Condition . Age
if * rr . Condition . Age == 0 {
r . Condition . AllObjects = true
}
}
2019-11-07 20:05:39 +01:00
2020-02-26 19:45:19 +01:00
if rr . Condition . IsLive == nil {
2019-11-07 20:05:39 +01:00
r . Condition . Liveness = LiveAndArchived
2020-02-26 19:45:19 +01:00
} else if * rr . Condition . IsLive {
2019-11-07 20:05:39 +01:00
r . Condition . Liveness = Live
2020-02-26 19:45:19 +01:00
} else {
2019-11-07 20:05:39 +01:00
r . Condition . Liveness = Archived
}
if rr . Condition . CreatedBefore != "" {
r . Condition . CreatedBefore , _ = time . Parse ( rfc3339Date , rr . Condition . CreatedBefore )
}
2020-09-01 16:41:27 +02:00
if rr . Condition . CustomTimeBefore != "" {
r . Condition . CustomTimeBefore , _ = time . Parse ( rfc3339Date , rr . Condition . CustomTimeBefore )
}
if rr . Condition . NoncurrentTimeBefore != "" {
r . Condition . NoncurrentTimeBefore , _ = time . Parse ( rfc3339Date , rr . Condition . NoncurrentTimeBefore )
}
2019-11-07 20:05:39 +01:00
l . Rules = append ( l . Rules , r )
}
return l
}
2022-04-12 11:51:54 +02:00
func toLifecycleFromProto ( rl * storagepb . Bucket_Lifecycle ) Lifecycle {
var l Lifecycle
if rl == nil {
return l
}
for _ , rr := range rl . GetRule ( ) {
r := LifecycleRule {
Action : LifecycleAction {
Type : rr . GetAction ( ) . GetType ( ) ,
StorageClass : rr . GetAction ( ) . GetStorageClass ( ) ,
} ,
Condition : LifecycleCondition {
AgeInDays : int64 ( rr . GetCondition ( ) . GetAgeDays ( ) ) ,
DaysSinceCustomTime : int64 ( rr . GetCondition ( ) . GetDaysSinceCustomTime ( ) ) ,
DaysSinceNoncurrentTime : int64 ( rr . GetCondition ( ) . GetDaysSinceNoncurrentTime ( ) ) ,
2022-06-28 13:51:45 +02:00
MatchesPrefix : rr . GetCondition ( ) . GetMatchesPrefix ( ) ,
2022-04-12 11:51:54 +02:00
MatchesStorageClasses : rr . GetCondition ( ) . GetMatchesStorageClass ( ) ,
2022-06-28 13:51:45 +02:00
MatchesSuffix : rr . GetCondition ( ) . GetMatchesSuffix ( ) ,
2022-04-12 11:51:54 +02:00
NumNewerVersions : int64 ( rr . GetCondition ( ) . GetNumNewerVersions ( ) ) ,
} ,
}
2023-08-29 13:12:56 +02:00
// Only set Condition.AllObjects if AgeDays is zero, not if it is nil.
if rr . GetCondition ( ) . AgeDays != nil && rr . GetCondition ( ) . GetAgeDays ( ) == 0 {
2022-08-30 08:45:26 +02:00
r . Condition . AllObjects = true
}
2022-04-12 11:51:54 +02:00
if rr . GetCondition ( ) . IsLive == nil {
r . Condition . Liveness = LiveAndArchived
} else if rr . GetCondition ( ) . GetIsLive ( ) {
r . Condition . Liveness = Live
} else {
r . Condition . Liveness = Archived
}
if rr . GetCondition ( ) . GetCreatedBefore ( ) != nil {
2022-07-21 20:10:25 +02:00
r . Condition . CreatedBefore = protoDateToUTCTime ( rr . GetCondition ( ) . GetCreatedBefore ( ) )
2022-04-12 11:51:54 +02:00
}
if rr . GetCondition ( ) . GetCustomTimeBefore ( ) != nil {
2022-07-21 20:10:25 +02:00
r . Condition . CustomTimeBefore = protoDateToUTCTime ( rr . GetCondition ( ) . GetCustomTimeBefore ( ) )
2022-04-12 11:51:54 +02:00
}
if rr . GetCondition ( ) . GetNoncurrentTimeBefore ( ) != nil {
2022-07-21 20:10:25 +02:00
r . Condition . NoncurrentTimeBefore = protoDateToUTCTime ( rr . GetCondition ( ) . GetNoncurrentTimeBefore ( ) )
2022-04-12 11:51:54 +02:00
}
l . Rules = append ( l . Rules , r )
}
return l
}
2019-11-07 20:05:39 +01:00
func ( e * BucketEncryption ) toRawBucketEncryption ( ) * raw . BucketEncryption {
if e == nil {
return nil
}
return & raw . BucketEncryption {
DefaultKmsKeyName : e . DefaultKMSKeyName ,
}
}
2022-04-12 11:51:54 +02:00
func ( e * BucketEncryption ) toProtoBucketEncryption ( ) * storagepb . Bucket_Encryption {
if e == nil {
return nil
}
return & storagepb . Bucket_Encryption {
DefaultKmsKey : e . DefaultKMSKeyName ,
}
}
2019-11-07 20:05:39 +01:00
func toBucketEncryption ( e * raw . BucketEncryption ) * BucketEncryption {
if e == nil {
return nil
}
return & BucketEncryption { DefaultKMSKeyName : e . DefaultKmsKeyName }
}
2022-04-12 11:51:54 +02:00
func toBucketEncryptionFromProto ( e * storagepb . Bucket_Encryption ) * BucketEncryption {
if e == nil {
return nil
}
return & BucketEncryption { DefaultKMSKeyName : e . GetDefaultKmsKey ( ) }
}
2019-11-07 20:05:39 +01:00
func ( b * BucketLogging ) toRawBucketLogging ( ) * raw . BucketLogging {
if b == nil {
return nil
}
return & raw . BucketLogging {
LogBucket : b . LogBucket ,
LogObjectPrefix : b . LogObjectPrefix ,
}
}
2022-04-12 11:51:54 +02:00
func ( b * BucketLogging ) toProtoBucketLogging ( ) * storagepb . Bucket_Logging {
if b == nil {
return nil
}
return & storagepb . Bucket_Logging {
2022-06-28 13:51:45 +02:00
LogBucket : bucketResourceName ( globalProjectAlias , b . LogBucket ) ,
2022-04-12 11:51:54 +02:00
LogObjectPrefix : b . LogObjectPrefix ,
}
}
2019-11-07 20:05:39 +01:00
func toBucketLogging ( b * raw . BucketLogging ) * BucketLogging {
if b == nil {
return nil
}
return & BucketLogging {
LogBucket : b . LogBucket ,
LogObjectPrefix : b . LogObjectPrefix ,
}
}
2022-04-12 11:51:54 +02:00
func toBucketLoggingFromProto ( b * storagepb . Bucket_Logging ) * BucketLogging {
if b == nil {
return nil
}
2022-06-28 13:51:45 +02:00
lb := parseBucketName ( b . GetLogBucket ( ) )
2022-04-12 11:51:54 +02:00
return & BucketLogging {
2022-06-28 13:51:45 +02:00
LogBucket : lb ,
2022-04-12 11:51:54 +02:00
LogObjectPrefix : b . GetLogObjectPrefix ( ) ,
}
}
2019-11-07 20:05:39 +01:00
func ( w * BucketWebsite ) toRawBucketWebsite ( ) * raw . BucketWebsite {
if w == nil {
return nil
}
return & raw . BucketWebsite {
MainPageSuffix : w . MainPageSuffix ,
NotFoundPage : w . NotFoundPage ,
}
}
2022-04-12 11:51:54 +02:00
func ( w * BucketWebsite ) toProtoBucketWebsite ( ) * storagepb . Bucket_Website {
if w == nil {
return nil
}
return & storagepb . Bucket_Website {
MainPageSuffix : w . MainPageSuffix ,
NotFoundPage : w . NotFoundPage ,
}
}
2019-11-07 20:05:39 +01:00
func toBucketWebsite ( w * raw . BucketWebsite ) * BucketWebsite {
if w == nil {
return nil
}
return & BucketWebsite {
MainPageSuffix : w . MainPageSuffix ,
NotFoundPage : w . NotFoundPage ,
}
}
2022-04-12 11:51:54 +02:00
func toBucketWebsiteFromProto ( w * storagepb . Bucket_Website ) * BucketWebsite {
if w == nil {
return nil
}
return & BucketWebsite {
MainPageSuffix : w . GetMainPageSuffix ( ) ,
NotFoundPage : w . GetNotFoundPage ( ) ,
}
}
2019-11-07 20:05:39 +01:00
func toBucketPolicyOnly ( b * raw . BucketIamConfiguration ) BucketPolicyOnly {
if b == nil || b . BucketPolicyOnly == nil || ! b . BucketPolicyOnly . Enabled {
return BucketPolicyOnly { }
}
lt , err := time . Parse ( time . RFC3339 , b . BucketPolicyOnly . LockedTime )
if err != nil {
return BucketPolicyOnly {
Enabled : true ,
}
}
return BucketPolicyOnly {
Enabled : true ,
LockedTime : lt ,
}
}
2022-04-12 11:51:54 +02:00
func toBucketPolicyOnlyFromProto ( b * storagepb . Bucket_IamConfig ) BucketPolicyOnly {
if b == nil || ! b . GetUniformBucketLevelAccess ( ) . GetEnabled ( ) {
return BucketPolicyOnly { }
}
return BucketPolicyOnly {
Enabled : true ,
LockedTime : b . GetUniformBucketLevelAccess ( ) . GetLockTime ( ) . AsTime ( ) ,
}
}
2019-11-07 20:05:39 +01:00
func toUniformBucketLevelAccess ( b * raw . BucketIamConfiguration ) UniformBucketLevelAccess {
if b == nil || b . UniformBucketLevelAccess == nil || ! b . UniformBucketLevelAccess . Enabled {
return UniformBucketLevelAccess { }
}
lt , err := time . Parse ( time . RFC3339 , b . UniformBucketLevelAccess . LockedTime )
if err != nil {
return UniformBucketLevelAccess {
Enabled : true ,
}
}
return UniformBucketLevelAccess {
Enabled : true ,
LockedTime : lt ,
}
}
2022-04-12 11:51:54 +02:00
func toUniformBucketLevelAccessFromProto ( b * storagepb . Bucket_IamConfig ) UniformBucketLevelAccess {
if b == nil || ! b . GetUniformBucketLevelAccess ( ) . GetEnabled ( ) {
return UniformBucketLevelAccess { }
}
return UniformBucketLevelAccess {
Enabled : true ,
LockedTime : b . GetUniformBucketLevelAccess ( ) . GetLockTime ( ) . AsTime ( ) ,
}
}
2021-07-07 15:05:04 +02:00
func toPublicAccessPrevention ( b * raw . BucketIamConfiguration ) PublicAccessPrevention {
if b == nil {
return PublicAccessPreventionUnknown
}
switch b . PublicAccessPrevention {
2021-10-11 20:51:32 +02:00
case publicAccessPreventionInherited , publicAccessPreventionUnspecified :
return PublicAccessPreventionInherited
2021-07-07 15:05:04 +02:00
case publicAccessPreventionEnforced :
return PublicAccessPreventionEnforced
default :
return PublicAccessPreventionUnknown
}
}
2022-04-12 11:51:54 +02:00
func toPublicAccessPreventionFromProto ( b * storagepb . Bucket_IamConfig ) PublicAccessPrevention {
if b == nil {
return PublicAccessPreventionUnknown
}
switch b . GetPublicAccessPrevention ( ) {
case publicAccessPreventionInherited , publicAccessPreventionUnspecified :
return PublicAccessPreventionInherited
case publicAccessPreventionEnforced :
return PublicAccessPreventionEnforced
default :
return PublicAccessPreventionUnknown
}
}
2022-01-27 12:16:16 +01:00
func toRPO ( b * raw . Bucket ) RPO {
if b == nil {
return RPOUnknown
}
switch b . Rpo {
case rpoDefault :
return RPODefault
case rpoAsyncTurbo :
return RPOAsyncTurbo
default :
return RPOUnknown
}
}
2022-04-12 11:51:54 +02:00
func toRPOFromProto ( b * storagepb . Bucket ) RPO {
if b == nil {
return RPOUnknown
}
switch b . GetRpo ( ) {
case rpoDefault :
return RPODefault
case rpoAsyncTurbo :
return RPOAsyncTurbo
default :
return RPOUnknown
}
}
2022-07-21 20:10:25 +02:00
func customPlacementFromRaw ( c * raw . BucketCustomPlacementConfig ) * CustomPlacementConfig {
if c == nil {
return nil
}
return & CustomPlacementConfig { DataLocations : c . DataLocations }
}
func ( c * CustomPlacementConfig ) toRawCustomPlacement ( ) * raw . BucketCustomPlacementConfig {
if c == nil {
return nil
}
return & raw . BucketCustomPlacementConfig {
DataLocations : c . DataLocations ,
}
}
func ( c * CustomPlacementConfig ) toProtoCustomPlacement ( ) * storagepb . Bucket_CustomPlacementConfig {
if c == nil {
return nil
}
return & storagepb . Bucket_CustomPlacementConfig {
DataLocations : c . DataLocations ,
}
}
func customPlacementFromProto ( c * storagepb . Bucket_CustomPlacementConfig ) * CustomPlacementConfig {
if c == nil {
return nil
}
return & CustomPlacementConfig { DataLocations : c . GetDataLocations ( ) }
}
2022-11-10 12:46:33 +01:00
func ( a * Autoclass ) toRawAutoclass ( ) * raw . BucketAutoclass {
if a == nil {
return nil
}
2023-10-31 20:19:51 +01:00
// Excluding read only fields ToggleTime and TerminalStorageClassUpdateTime.
2022-11-10 12:46:33 +01:00
return & raw . BucketAutoclass {
2023-10-31 20:19:51 +01:00
Enabled : a . Enabled ,
TerminalStorageClass : a . TerminalStorageClass ,
2022-11-10 12:46:33 +01:00
}
}
func ( a * Autoclass ) toProtoAutoclass ( ) * storagepb . Bucket_Autoclass {
if a == nil {
return nil
}
2023-10-31 20:19:51 +01:00
// Excluding read only fields ToggleTime and TerminalStorageClassUpdateTime.
ba := & storagepb . Bucket_Autoclass {
2022-11-10 12:46:33 +01:00
Enabled : a . Enabled ,
}
2023-10-31 20:19:51 +01:00
if a . TerminalStorageClass != "" {
ba . TerminalStorageClass = & a . TerminalStorageClass
}
return ba
2022-11-10 12:46:33 +01:00
}
func toAutoclassFromRaw ( a * raw . BucketAutoclass ) * Autoclass {
if a == nil || a . ToggleTime == "" {
return nil
}
2023-10-31 20:19:51 +01:00
ac := & Autoclass {
Enabled : a . Enabled ,
TerminalStorageClass : a . TerminalStorageClass ,
}
// Return ToggleTime and TSCUpdateTime only if parsed with valid values.
2022-11-10 12:46:33 +01:00
t , err := time . Parse ( time . RFC3339 , a . ToggleTime )
2023-10-31 20:19:51 +01:00
if err == nil {
ac . ToggleTime = t
2022-11-10 12:46:33 +01:00
}
2023-10-31 20:19:51 +01:00
ut , err := time . Parse ( time . RFC3339 , a . TerminalStorageClassUpdateTime )
if err == nil {
ac . TerminalStorageClassUpdateTime = ut
2022-11-10 12:46:33 +01:00
}
2023-10-31 20:19:51 +01:00
return ac
2022-11-10 12:46:33 +01:00
}
func toAutoclassFromProto ( a * storagepb . Bucket_Autoclass ) * Autoclass {
if a == nil || a . GetToggleTime ( ) . AsTime ( ) . Unix ( ) == 0 {
return nil
}
return & Autoclass {
2023-10-31 20:19:51 +01:00
Enabled : a . GetEnabled ( ) ,
ToggleTime : a . GetToggleTime ( ) . AsTime ( ) ,
TerminalStorageClass : a . GetTerminalStorageClass ( ) ,
TerminalStorageClassUpdateTime : a . GetTerminalStorageClassUpdateTime ( ) . AsTime ( ) ,
2022-11-10 12:46:33 +01:00
}
}
2020-09-23 13:23:39 +02:00
// Objects returns an iterator over the objects in the bucket that match the
// Query q. If q is nil, no filtering is done. Objects will be iterated over
// lexicographically by name.
2019-11-07 20:05:39 +01:00
//
// Note: The returned iterator is not safe for concurrent operations without explicit synchronization.
func ( b * BucketHandle ) Objects ( ctx context . Context , q * Query ) * ObjectIterator {
2022-08-14 23:53:41 +02:00
o := makeStorageOpts ( true , b . retry , b . userProject )
return b . c . tc . ListObjects ( ctx , b . name , q , o ... )
2019-11-07 20:05:39 +01:00
}
2022-01-27 12:16:16 +01:00
// Retryer returns a bucket handle that is configured with custom retry
// behavior as specified by the options that are passed to it. All operations
// on the new handle will use the customized retry configuration.
// Retry options set on a object handle will take precedence over options set on
// the bucket handle.
// These retry options will merge with the client's retry configuration (if set)
// for the returned handle. Options passed into this method will take precedence
// over retry options on the client. Note that you must explicitly pass in each
// option you want to override.
func ( b * BucketHandle ) Retryer ( opts ... RetryOption ) * BucketHandle {
b2 := * b
var retry * retryConfig
if b . retry != nil {
// merge the options with the existing retry
retry = b . retry
} else {
retry = & retryConfig { }
}
for _ , opt := range opts {
opt . apply ( retry )
}
b2 . retry = retry
b2 . acl . retry = retry
b2 . defaultObjectACL . retry = retry
return & b2
}
2019-11-07 20:05:39 +01:00
// An ObjectIterator is an iterator over ObjectAttrs.
//
// Note: This iterator is not safe for concurrent operations without explicit synchronization.
type ObjectIterator struct {
ctx context . Context
query Query
pageInfo * iterator . PageInfo
nextFunc func ( ) error
items [ ] * ObjectAttrs
}
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
//
// Note: This method is not safe for concurrent operations without explicit synchronization.
func ( it * ObjectIterator ) PageInfo ( ) * iterator . PageInfo { return it . pageInfo }
// Next returns the next result. Its second return value is iterator.Done if
// there are no more results. Once Next returns iterator.Done, all subsequent
// calls will return iterator.Done.
//
2020-09-23 13:23:39 +02:00
// In addition, if Next returns an error other than iterator.Done, all
// subsequent calls will return the same error. To continue iteration, a new
// `ObjectIterator` must be created. Since objects are ordered lexicographically
// by name, `Query.StartOffset` can be used to create a new iterator which will
// start at the desired place. See
// https://pkg.go.dev/cloud.google.com/go/storage?tab=doc#hdr-Listing_objects.
//
2019-11-07 20:05:39 +01:00
// If Query.Delimiter is non-empty, some of the ObjectAttrs returned by Next will
// have a non-empty Prefix field, and a zero value for all other fields. These
// represent prefixes.
//
// Note: This method is not safe for concurrent operations without explicit synchronization.
func ( it * ObjectIterator ) Next ( ) ( * ObjectAttrs , error ) {
if err := it . nextFunc ( ) ; err != nil {
return nil , err
}
item := it . items [ 0 ]
it . items = it . items [ 1 : ]
return item , nil
}
// Buckets returns an iterator over the buckets in the project. You may
// optionally set the iterator's Prefix field to restrict the list to buckets
// whose names begin with the prefix. By default, all buckets in the project
// are returned.
//
// Note: The returned iterator is not safe for concurrent operations without explicit synchronization.
func ( c * Client ) Buckets ( ctx context . Context , projectID string ) * BucketIterator {
2022-08-14 23:53:41 +02:00
o := makeStorageOpts ( true , c . retry , "" )
return c . tc . ListBuckets ( ctx , projectID , o ... )
2019-11-07 20:05:39 +01:00
}
// A BucketIterator is an iterator over BucketAttrs.
//
// Note: This iterator is not safe for concurrent operations without explicit synchronization.
type BucketIterator struct {
// Prefix restricts the iterator to buckets whose names begin with it.
Prefix string
ctx context . Context
projectID string
buckets [ ] * BucketAttrs
pageInfo * iterator . PageInfo
nextFunc func ( ) error
}
// Next returns the next result. Its second return value is iterator.Done if
// there are no more results. Once Next returns iterator.Done, all subsequent
// calls will return iterator.Done.
//
// Note: This method is not safe for concurrent operations without explicit synchronization.
func ( it * BucketIterator ) Next ( ) ( * BucketAttrs , error ) {
if err := it . nextFunc ( ) ; err != nil {
return nil , err
}
b := it . buckets [ 0 ]
it . buckets = it . buckets [ 1 : ]
return b , nil
}
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
//
// Note: This method is not safe for concurrent operations without explicit synchronization.
func ( it * BucketIterator ) PageInfo ( ) * iterator . PageInfo { return it . pageInfo }
2022-01-27 12:16:16 +01:00
// RPO (Recovery Point Objective) configures the turbo replication feature. See
// https://cloud.google.com/storage/docs/managing-turbo-replication for more information.
type RPO int
const (
// RPOUnknown is a zero value. It may be returned from bucket.Attrs() if RPO
// is not present in the bucket metadata, that is, the bucket is not dual-region.
// This value is also used if the RPO field is not set in a call to GCS.
RPOUnknown RPO = iota
// RPODefault represents default replication. It is used to reset RPO on an
// existing bucket that has this field set to RPOAsyncTurbo. Otherwise it
// is equivalent to RPOUnknown, and is always ignored. This value is valid
// for dual- or multi-region buckets.
RPODefault
// RPOAsyncTurbo represents turbo replication and is used to enable Turbo
// Replication on a bucket. This value is only valid for dual-region buckets.
RPOAsyncTurbo
rpoUnknown string = ""
rpoDefault = "DEFAULT"
rpoAsyncTurbo = "ASYNC_TURBO"
)
func ( rpo RPO ) String ( ) string {
switch rpo {
case RPODefault :
return rpoDefault
case RPOAsyncTurbo :
return rpoAsyncTurbo
default :
return rpoUnknown
}
}
2022-07-21 20:10:25 +02:00
// protoDateToUTCTime returns a new Time based on the google.type.Date, in UTC.
//
// Hours, minutes, seconds, and nanoseconds are set to 0.
func protoDateToUTCTime ( d * dpb . Date ) time . Time {
return protoDateToTime ( d , time . UTC )
}
// protoDateToTime returns a new Time based on the google.type.Date and provided
// *time.Location.
//
// Hours, minutes, seconds, and nanoseconds are set to 0.
func protoDateToTime ( d * dpb . Date , l * time . Location ) time . Time {
return time . Date ( int ( d . GetYear ( ) ) , time . Month ( d . GetMonth ( ) ) , int ( d . GetDay ( ) ) , 0 , 0 , 0 , 0 , l )
}
// timeToProtoDate returns a new google.type.Date based on the provided time.Time.
// The location is ignored, as is anything more precise than the day.
func timeToProtoDate ( t time . Time ) * dpb . Date {
return & dpb . Date {
Year : int32 ( t . Year ( ) ) ,
Month : int32 ( t . Month ( ) ) ,
Day : int32 ( t . Day ( ) ) ,
}
}