@@ -3,7 +3,7 @@ package backup;
3
3
4
4
import "kvrpcpb.proto" ;
5
5
import "errorpb.proto" ;
6
-
6
+ import "encryptionpb.proto" ;
7
7
import "gogoproto/gogo.proto" ;
8
8
import "rustproto.proto" ;
9
9
@@ -19,11 +19,19 @@ message BackupMeta {
19
19
// ID and version of backuped cluster.
20
20
uint64 cluster_id = 1 ;
21
21
string cluster_version = 2 ;
22
+ // Save the version of BR running backup jobs.
23
+ string br_version = 11 ;
24
+ // The backupmeta scheme version.
25
+ int32 version = 12 ;
22
26
23
27
// path field is no longer used.
24
28
reserved 3 ; reserved "path" ;
25
29
// A set of files that compose a backup.
30
+ // Note: `files` is deprecated, as it bloats backupmeta. It is kept for
31
+ // compatibility, so new BR can restore older backups.
26
32
repeated File files = 4 ;
33
+ // An index to files contains data files.
34
+ MetaFile file_index = 13 ;
27
35
28
36
// A pair of timestamp specifies a time range of a backup.
29
37
// For full backup, the start_version equals to the end_version,
@@ -33,19 +41,34 @@ message BackupMeta {
33
41
uint64 start_version = 5 ;
34
42
uint64 end_version = 6 ;
35
43
36
- // Additional metadata describes database and table info.
44
+ // Table metadata describes database and table info.
45
+ // Note: `schemas` is deprecated, as it bloats backupmeta. It is kept for
46
+ // compatibility, so new BR can restore older backups.
37
47
repeated Schema schemas = 7 ;
48
+ // An index to files contains Schemas.
49
+ MetaFile schema_index = 14 ;
38
50
39
- // If in raw kv mode, `start_versions`, `end_versions` and `schemas` will be ignored, and the
40
- // backup data's range is represented by raw_ranges.
51
+ // If in raw kv mode, `start_versions`, `end_versions` and `schemas` will be
52
+ // ignored, and the backup data's range is represented by raw_ranges.
41
53
bool is_raw_kv = 8 ;
54
+ // Note: `raw_ranges` is deprecated, as it bloats backupmeta. It is kept for
55
+ // compatibility, so new BR can restore older backups.
42
56
repeated RawRange raw_ranges = 9 ;
57
+ // An index to files contains RawRanges.
58
+ MetaFile raw_range_index = 15 ;
43
59
44
- // In incremental backup, DDLs which are completed in (lastBackupTS, backupTS] will be stored here.
60
+ // In incremental backup, DDLs which are completed in
61
+ // (lastBackupTS, backupTS] will be stored here.
62
+ // Note: `raw_ranges` is deprecated, as it bloats backupmeta. It is kept for
63
+ // compatibility, so new BR can restore older backups.
45
64
bytes ddls = 10 ;
65
+ // An index to files contains DDLs.
66
+ MetaFile ddl_indexes = 16 ;
67
+ // the backup result into `backupmeta` file
68
+ string backup_result = 17 ;
46
69
47
- // Save the version of BR running backup jobs .
48
- string br_version = 11 ;
70
+ // API version implies the encode of the key and value .
71
+ kvrpcpb.APIVersion api_version = 18 ;
49
72
}
50
73
51
74
message File {
@@ -64,6 +87,23 @@ message File {
64
87
string cf = 10 ;
65
88
66
89
uint64 size = 11 ;
90
+ // cipher_iv is used for AES cipher
91
+ bytes cipher_iv = 12 ;
92
+ }
93
+
94
+ // MetaFile describes a multi-level index of data used in backup.
95
+ message MetaFile {
96
+ // A set of files that contains a MetaFile.
97
+ // It is used as a multi-level index.
98
+ repeated File meta_files = 1 ;
99
+ // A set of files that contains user data.
100
+ repeated File data_files = 2 ;
101
+ // A set of files that contains Schemas.
102
+ repeated Schema schemas = 3 ;
103
+ // A set of files that contains RawRanges.
104
+ repeated RawRange raw_ranges = 4 ;
105
+ // A set of files that contains DDLs.
106
+ repeated bytes ddls = 5 ;
67
107
}
68
108
69
109
message Schema {
@@ -109,6 +149,11 @@ enum CompressionType {
109
149
ZSTD = 3 ;
110
150
}
111
151
152
+ message CipherInfo {
153
+ encryptionpb.EncryptionMethod cipher_type = 1 ;
154
+ bytes cipher_key = 2 ;
155
+ }
156
+
112
157
message BackupRequest {
113
158
uint64 cluster_id = 1 ;
114
159
@@ -135,6 +180,8 @@ message BackupRequest {
135
180
CompressionType compression_type = 12 ;
136
181
// sst compression level, some algorithms support negative compression levels
137
182
int32 compression_level = 13 ;
183
+ // The cipher_info is Used to encrypt sst
184
+ CipherInfo cipher_info = 14 ;
138
185
}
139
186
140
187
message StorageBackend {
@@ -144,6 +191,8 @@ message StorageBackend {
144
191
S3 s3 = 3 ;
145
192
GCS gcs = 4 ;
146
193
CloudDynamic cloud_dynamic = 5 ;
194
+ HDFS hdfs = 6 ;
195
+ AzureBlobStorage azure_blob_storage = 7 ;
147
196
}
148
197
}
149
198
@@ -188,6 +237,29 @@ message GCS {
188
237
string credentials_blob = 6 ;
189
238
}
190
239
240
+ // AzureBlobStorage storage backend saves files into azure blob storage.
241
+ message AzureBlobStorage {
242
+ string endpoint = 1 ;
243
+ // Alias: container
244
+ string bucket = 2 ;
245
+ // Notice: prefix starts without `/`, otherwise the first directory's name is empty.
246
+ string prefix = 3 ;
247
+ // Alias: access_tier.
248
+ // See https://docs.microsoft.com/en-us/azure/storage/blobs/access-tiers-overview
249
+ string storage_class = 4 ;
250
+
251
+ // if empty, try to read account_name from the node's environment variable $AZURE_STORAGE_ACCOUNT.
252
+ string account_name = 5 ;
253
+ // Use shared key to access the azure blob
254
+ // If the node's environment variables($AZURE_CLIENT_ID, $AZURE_TENANT_ID, $AZURE_CLIENT_SECRET) exist,
255
+ // prefer to use token to access the azure blob.
256
+ //
257
+ // See https://docs.microsoft.com/en-us/azure/storage/common/identity-library-acquire-token?toc=/azure/storage/blobs/toc.json
258
+ //
259
+ // Otherwise, if empty, try to read shared key from the node's environment variable $AZURE_STORAGE_KEY.
260
+ string shared_key = 6 ;
261
+ }
262
+
191
263
message Bucket {
192
264
string endpoint = 1 ;
193
265
string region = 3 ;
@@ -199,10 +271,16 @@ message Bucket {
199
271
// CloudDynamic allows testing new cloud providers and new fields without changing protobuf definitions
200
272
message CloudDynamic {
201
273
Bucket bucket = 1 ;
202
- string provider_name = 2 ; // s3 and gcs are supported
274
+ string provider_name = 2 ; // s3, gcs and azureBlobStorage are supported
203
275
map <string , string > attrs = 3 ;
204
276
}
205
277
278
+ // HDFS storage backend saves file into HDFS compatible storages
279
+ message HDFS {
280
+ // a URL: hdfs:///some/path or hdfs://host:port/some/path
281
+ string remote = 1 ;
282
+ }
283
+
206
284
207
285
message BackupResponse {
208
286
Error error = 1 ;
0 commit comments