forked from cloudfoundry/cloud_controller_ng
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathfog_client.rb
202 lines (165 loc) · 5.88 KB
/
fog_client.rb
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
require 'fileutils'
require 'find'
require 'mime-types'
require 'cloud_controller/blobstore/fog/providers'
require 'cloud_controller/blobstore/base_client'
require 'cloud_controller/blobstore/fog/directory'
require 'cloud_controller/blobstore/fog/fog_blob'
require 'cloud_controller/blobstore/fog/idempotent_directory'
require 'cloud_controller/blobstore/fog/cdn'
require 'cloud_controller/blobstore/errors'
module CloudController
module Blobstore
class FogClient < BaseClient
attr_reader :root_dir
DEFAULT_BATCH_SIZE = 1000
def initialize(connection_config:,
directory_key:,
cdn: nil,
root_dir: nil,
min_size: nil,
max_size: nil,
storage_options: nil)
@root_dir = root_dir
@connection_config = connection_config
@directory_key = directory_key
@cdn = cdn
@min_size = min_size || 0
@max_size = max_size
@storage_options = storage_options
end
def local?
@connection_config[:provider].downcase == 'local'
end
def exists?(key)
!file(key).nil?
end
def download_from_blobstore(source_key, destination_path, mode: nil)
FileUtils.mkdir_p(File.dirname(destination_path))
File.open(destination_path, 'wb') do |file|
(@cdn || files).get(partitioned_key(source_key)) do |*chunk|
file.write(chunk[0])
end
file.chmod(mode) if mode
end
end
def cp_to_blobstore(source_path, destination_key)
start = Time.now.utc
logger.info('blobstore.cp-start', destination_key: destination_key, source_path: source_path, bucket: @directory_key)
size = -1
log_entry = 'blobstore.cp-skip'
File.open(source_path) do |file|
size = file.size
next unless within_limits?(size)
mime_type = MIME::Types.of(source_path).first.try(:content_type)
options = {
key: partitioned_key(destination_key),
body: file,
content_type: mime_type || 'application/zip',
public: local?
}.merge(formatted_storage_options)
files.create(options)
log_entry = 'blobstore.cp-finish'
end
duration = Time.now.utc - start
logger.info(log_entry,
destination_key: destination_key,
duration_seconds: duration,
size: size,
)
end
def cp_file_between_keys(source_key, destination_key)
source_file = file(source_key)
raise FileNotFound if source_file.nil?
source_file.copy(@directory_key, partitioned_key(destination_key), formatted_storage_options)
end
def delete_all(page_size=DEFAULT_BATCH_SIZE)
logger.info("Attempting to delete all files in #{@directory_key}/#{@root_dir} blobstore")
delete_files(files_for(@root_dir), page_size)
end
def delete_all_in_path(path)
logger.info("Attempting to delete all files in blobstore #{@directory_key} under path #{@directory_key}/#{partitioned_key(path)}")
delete_files(files_for(partitioned_key(path)), DEFAULT_BATCH_SIZE)
end
def delete(key)
blob_file = file(key)
delete_file(blob_file) if blob_file
end
def delete_blob(blob)
delete_file(blob.file) if blob.file
end
def blob(key)
f = file(key)
FogBlob.new(f, @cdn) if f
end
def files_for(prefix, _ignored_directory_prefixes=[])
if connection.is_a? Fog::Storage::Local::Real
directory = connection.directories.get(File.join(dir.key, prefix || ''))
directory ? directory.files : []
else
connection.directories.get(dir.key, prefix: prefix).files
end
end
private
def files
dir.files
end
def formatted_storage_options
return {} unless @storage_options && @storage_options[:encryption]
opts = @storage_options.dup
encrypt_opt = opts.delete(:encryption)
opts['x-amz-server-side-encryption'] = encrypt_opt
opts
end
def delete_file(file)
file.destroy
end
def delete_files(files_to_delete, page_size)
if connection.respond_to?(:delete_multiple_objects)
# AWS needs the file key to work; other providers with multiple delete
# are currently not supported. When support is added this code may
# need an update.
each_slice(files_to_delete, page_size) do |file_group|
connection.delete_multiple_objects(@directory_key, file_group.map(&:key))
end
else
files_to_delete.each { |f| delete_file(f) }
end
end
def each_slice(files, batch_size)
batch = []
files.each do |f|
batch << f
if batch.length == batch_size
yield(batch)
batch = []
end
end
if batch.length > 0
yield(batch)
end
end
def file(key)
files.head(partitioned_key(key))
end
def dir
@dir ||= directory.get_or_create
end
def directory
@directory ||= IdempotentDirectory.new(Directory.new(connection, @directory_key))
end
def connection
options = @connection_config
blobstore_timeout = options.delete(:blobstore_timeout)
options = options.merge(endpoint: '') if local?
connection_options = options[:connection_options] || {}
connection_options = connection_options.merge(read_timeout: blobstore_timeout, write_timeout: blobstore_timeout)
options = options.merge(connection_options: connection_options)
@connection ||= Fog::Storage.new(options)
end
def logger
@logger ||= Steno.logger('cc.blobstore')
end
end
end
end