11import * as ociBucket from "oci-objectstorage" ;
22import * as ociAuth from "oci-common" ;
3- import { createReadStream , createWriteStream , promises as fs } from "node:fs" ;
3+ import { createReadStream , promises as fs } from "node:fs" ;
4+ import { extendsFS } from "@sirherobrine23/extends" ;
45import { finished } from "node:stream/promises" ;
5- import extendsFS from "@sirherobrine23/extends " ;
6+ import { http } from "@sirherobrine23/http " ;
67import chokidar from "chokidar" ;
78import stream from "node:stream" ;
89import path from "node:path" ;
9- import { http } from "@sirherobrine23/http" ;
1010
1111type RegionPretty < S extends string > = S extends `${infer T } _${infer U } ` ? `${T } -${RegionPretty < U > } ` : S
1212export type oracleRegions = RegionPretty < Lowercase < Exclude < Exclude < keyof typeof ociAuth . Region , typeof ociAuth . Region > , "values" | "enableInstanceMetadata" | "register" | "fromRegionId" | "getRegionIdFromShortCode" | "hasUsedConfigFile" | "prototype" | "REGION_STRING" | "REGION_ID_STRING" | "REGION_ID" > > > ;
@@ -129,31 +129,94 @@ export async function oracleBucket(config: oracleOptions) {
129129 if ( ! ( typeof config . namespace === "string" && ! ! ( config . namespace = config . namespace . trim ( ) ) ) ) config . namespace = ( await client . getNamespace ( { } ) ) . value ;
130130 await client . getBucket ( { bucketName : config . name , namespaceName : config . namespace } ) ;
131131
132+
132133 const partialFunctions = {
133134 /**
135+ * Upload file to Oracle cloud bucket.
134136 *
135137 * @param fileName - File location.
136138 * @param storageTier - Optional storage tier, default from seted in the Bucket.
137139 * @returns - Writable stream to Write file (is a PassThrough but for writing only).
138140 */
139- uploadFile ( fileName : string , storageTier ?: "Archive" | "InfrequentAccess" | "Standard" ) : stream . Writable {
140- const strm = new stream . PassThrough ( ) ;
141- client . putObject ( {
142- namespaceName : config . namespace ,
143- bucketName : config . name ,
144- objectName : fileName ,
145- putObjectBody : stream . Readable . from ( strm ) ,
146- storageTier : storageTier === "Archive" ? ociBucket . models . StorageTier . Archive : storageTier === "InfrequentAccess" ? ociBucket . models . StorageTier . InfrequentAccess : storageTier === "Standard" ? ociBucket . models . StorageTier . Standard : undefined ,
147- } ) . then ( ( ) => { } , err => strm . emit ( "error" , err ) ) ;
148- return strm ;
141+ uploadFile ( fileName : string , storageTier ?: "Archive" | "InfrequentAccess" | "Standard" ) {
142+ let uploadId : string , uploadPartNum : string [ ] = [ ] ;
143+ return new stream . Writable ( {
144+ autoDestroy : true ,
145+ emitClose : true ,
146+ async write ( chunk : Buffer , encoding , callback ) {
147+ try {
148+ if ( ! ( Buffer . isBuffer ( chunk ) ) ) chunk = Buffer . from ( chunk , encoding ) ;
149+ if ( ! uploadId ) {
150+ const { multipartUpload } = await client . createMultipartUpload ( {
151+ namespaceName : config . namespace ,
152+ bucketName : config . name ,
153+ createMultipartUploadDetails : {
154+ object : fileName ,
155+ storageTier : storageTier === "Archive" ? ociBucket . models . StorageTier . Archive : storageTier === "InfrequentAccess" ? ociBucket . models . StorageTier . InfrequentAccess : storageTier === "Standard" ? ociBucket . models . StorageTier . Standard : undefined ,
156+ }
157+ } ) ;
158+ uploadId = multipartUpload . uploadId ;
159+ }
160+ const res = await client . uploadPart ( {
161+ namespaceName : config . namespace ,
162+ bucketName : config . name ,
163+ objectName : fileName ,
164+ uploadId,
165+ uploadPartBody : chunk ,
166+ uploadPartNum : uploadPartNum . length + 1 ,
167+ } ) ;
168+ uploadPartNum . push ( res . eTag ) ;
169+ callback ( ) ;
170+ } catch ( e ) {
171+ callback ( e ) ;
172+ }
173+ } ,
174+ async destroy ( error , callback ) {
175+ if ( ! uploadId ) return callback ( error ) ;
176+ try {
177+ if ( error ) {
178+ await client . abortMultipartUpload ( {
179+ namespaceName : config . namespace ,
180+ bucketName : config . name ,
181+ objectName : fileName ,
182+ uploadId
183+ } ) ;
184+ callback ( error ) ;
185+ } else {
186+ await client . commitMultipartUpload ( {
187+ namespaceName : config . namespace ,
188+ bucketName : config . name ,
189+ objectName : fileName ,
190+ uploadId,
191+ commitMultipartUploadDetails : {
192+ partsToCommit : uploadPartNum . map ( ( etag , index ) => ( { etag, partNum : index + 1 } ) )
193+ }
194+ } ) ;
195+ callback ( null ) ;
196+ }
197+ } catch ( e ) {
198+ e [ "originalError" ] = error ;
199+ callback ( e ) ;
200+ }
201+ } ,
202+ } ) ;
149203 } ,
150- async deleteFile ( pathLocation : string ) {
204+ /**
205+ * Delete file in the Bucket
206+ * @param pathLocation - File location/name.
207+ */
208+ async deleteObject ( pathLocation : string ) {
151209 await client . deleteObject ( {
152210 namespaceName : config . namespace ,
153211 bucketName : config . name ,
154212 objectName : pathLocation
155213 } ) ;
156214 } ,
215+ /**
216+ * List files and folder in the Bucket
217+ * @param folder - Folder name
218+ * @returns
219+ */
157220 async listFiles ( folder ?: string ) {
158221 const objects : oracleFileListObject [ ] = [ ] ;
159222 let start : any ;
@@ -182,38 +245,16 @@ export async function oracleBucket(config: oracleOptions) {
182245
183246 return objects ;
184247 } ,
248+ /**
249+ * Get file from bucket.
250+ * @param pathLocation - File path
251+ * @returns return file stream
252+ */
185253 async getFileStream ( pathLocation : string ) {
186254 const { value } = await client . getObject ( { namespaceName : config . namespace , bucketName : config . name , objectName : pathLocation } ) ;
187255 if ( ! value ) throw new Error ( "No file found" ) ;
188256 else if ( value instanceof stream . Readable ) return value ;
189257 else return stream . Readable . fromWeb ( value as any ) ;
190- } ,
191- async watch ( folderPath : string , options ?: { downloadFist ?: boolean , remoteFolder ?: string } ) {
192- if ( ! options ) options = { } ;
193- if ( ! folderPath ) throw new TypeError ( "Folder path is required" ) ;
194- else if ( ! ( await extendsFS . exists ( folderPath ) ) ) throw new Error ( "Folder path is not exists" ) ;
195- else if ( ! ( await extendsFS . isDirectory ( folderPath ) ) ) throw new Error ( "Folder path is not a directory" ) ;
196- if ( options . downloadFist ) {
197- let { remoteFolder = "" } = options ;
198- const filesList = ( await partialFunctions ! . listFiles ( remoteFolder ) ) . map ( d => d . name ) ;
199- const localList = ( await extendsFS . readdir ( folderPath ) ) . map ( file => path . posix . resolve ( "/" , path . relative ( folderPath , file ) ) ) ;
200- for ( const local of localList ) if ( ! filesList . includes ( local ) ) await fs . unlink ( path . posix . resolve ( folderPath , local ) ) ;
201- for await ( const remote of filesList ) await new Promise ( async ( done , reject ) => ( await partialFunctions ! . getFileStream ( remote ) ) . pipe ( createWriteStream ( path . posix . resolve ( folderPath , remote ) ) ) . on ( "error" , reject ) . once ( "done" , done ) ) ;
202- }
203-
204- return chokidar . watch ( folderPath , {
205- ignoreInitial : true ,
206- atomic : true ,
207- } ) . on ( "add" , async ( filePath ) => {
208- await finished ( createReadStream ( filePath ) . pipe ( partialFunctions . uploadFile ( path . posix . resolve ( "/" , path . relative ( folderPath , filePath ) ) ) ) )
209- } ) . on ( "change" , async ( filePath ) => {
210- await finished ( createReadStream ( filePath ) . pipe ( partialFunctions . uploadFile ( path . posix . resolve ( "/" , path . relative ( folderPath , filePath ) ) ) ) )
211- } ) . on ( "unlink" , async ( filePath ) => {
212- await partialFunctions ! . deleteFile ( path . posix . resolve ( "/" , path . relative ( folderPath , filePath ) ) ) ;
213- } ) . on ( "unlinkDir" , async ( filePath ) => {
214- const filesList = ( await partialFunctions ! . listFiles ( path . posix . resolve ( "/" , path . relative ( folderPath , filePath ) ) ) ) . map ( d => d . name ) ;
215- for ( const remote of filesList ) await partialFunctions ! . deleteFile ( remote ) ;
216- } ) ;
217258 }
218259 } ;
219260 return partialFunctions ;
@@ -229,6 +270,8 @@ export async function oracleBucket(config: oracleOptions) {
229270 */
230271export function oracleBucketPreAuth ( region : oracleRegions , namespace : string , name : string , preAuthKey : string ) {
231272 getRegion ( region ) ; // Check valid region
273+ if ( ! preAuthKey ) throw new Error ( "Pre auth key is required" ) ;
274+ const bucketPath = path . posix . join ( "/p" , preAuthKey , "n" , namespace , "b" , name ) ;
232275 const funs = {
233276 /**
234277 * Get file from Bucket
@@ -237,9 +280,11 @@ export function oracleBucketPreAuth(region: oracleRegions, namespace: string, na
237280 * @returns
238281 */
239282 getFile ( filename : string ) {
240- return http . streamRoot ( new URL ( path . posix . join ( "/p" , preAuthKey , "n" , namespace , "b" , name , "o" , encodeURIComponent ( filename ) ) , `https://objectstorage.${ region } .oraclecloud.com` ) , {
241- disableHTTP2 : true
242- } , true ) ;
283+ if ( typeof filename !== "string" ) throw new Error ( "Requrie file name!" ) ;
284+ filename = filename . trim ( ) . slice ( - 1024 ) . split ( path . win32 . sep ) . join ( path . posix . sep ) ;
285+ if ( filename . startsWith ( path . posix . sep ) ) filename = filename . slice ( 1 ) ;
286+ filename = encodeURIComponent ( filename ) ;
287+ return http . streamRoot ( new URL ( path . posix . join ( bucketPath , "o" , filename ) , `https://objectstorage.${ region } .oraclecloud.com` ) , { disableHTTP2 : true } , true ) ;
243288 } ,
244289 /**
245290 * Upload file to bucket
@@ -248,21 +293,67 @@ export function oracleBucketPreAuth(region: oracleRegions, namespace: string, na
248293 * @param storageTier - Another tier to storage file
249294 * @returns Stream to write file
250295 */
251- uploadFile ( filename : string , storageTier ?: oracleFileListObject [ "storageTier" ] ) : stream . Writable {
252- return new class writeFile extends stream . PassThrough {
253- constructor ( ) {
254- super ( ) ;
255- http . bufferRequest ( new URL ( path . posix . join ( "/p" , preAuthKey , "n" , namespace , "b" , name , "o" , encodeURIComponent ( filename ) ) , `https://objectstorage.${ region } .oraclecloud.com` ) , {
256- method : "PUT" ,
257- body : stream . Readable . from ( this ) ,
258- disableHTTP2 : true ,
259- headers : {
260- ...( ! ! storageTier ? { "storage-tier" : storageTier } : { } ) ,
261- "Content-Type" : "application/octet-stream" ,
296+ uploadFile ( filename : string , storageTier ?: oracleFileListObject [ "storageTier" ] ) {
297+ if ( typeof filename !== "string" ) throw new Error ( "Requrie file name!" ) ;
298+ filename = filename . trim ( ) . slice ( - 1024 ) . split ( path . win32 . sep ) . join ( path . posix . sep ) ;
299+ if ( filename . startsWith ( path . posix . sep ) ) filename = filename . slice ( 1 ) ;
300+ filename = encodeURIComponent ( filename ) ;
301+ let uploaduuid : string , uploadPartNum = 1 ;
302+ return new stream . Writable ( {
303+ autoDestroy : true ,
304+ emitClose : true ,
305+ async write ( chunk : Buffer , encoding , callback ) {
306+ try {
307+ if ( ! ( Buffer . isBuffer ( chunk ) ) ) chunk = Buffer . from ( chunk , encoding ) ;
308+ if ( ! uploaduuid ) {
309+ const { body } = await http . jsonRequest ( new URL ( path . posix . join ( bucketPath , "o" , filename ) , `https://objectstorage.${ region } .oraclecloud.com` ) , {
310+ disableHTTP2 : true ,
311+ method : "PUT" ,
312+ headers : {
313+ "opc-multipart" : "true" ,
314+ ...( ! ! storageTier ? { "storage-tier" : storageTier } : { } ) ,
315+ } ,
316+ body : Buffer . from ( [ ] )
317+ } ) ;
318+ uploaduuid = body . uploadId ;
262319 }
263- } ) . catch ( err => this . emit ( "error" , err ) ) ;
264- }
265- }
320+ await http . bufferRequest ( new URL ( path . posix . join ( bucketPath , "u" , filename , "id" , uploaduuid , uploadPartNum . toString ( ) ) , `https://objectstorage.${ region } .oraclecloud.com` ) , {
321+ disableHTTP2 : true ,
322+ method : "PUT" ,
323+ headers : {
324+ "Content-Lenght" : chunk . byteLength . toString ( ) ,
325+ } ,
326+ body : chunk
327+ } ) ;
328+ uploadPartNum ++ ;
329+ this . emit ( "progress" , uploadPartNum , chunk . byteLength ) ;
330+ callback ( ) ;
331+ } catch ( err ) {
332+ callback ( err ) ;
333+ }
334+ } ,
335+ async destroy ( error , callback ) {
336+ try {
337+ if ( uploaduuid ) {
338+ if ( error ) {
339+ await http . bufferRequest ( new URL ( path . posix . join ( bucketPath , "u" , filename , "id" , uploaduuid , "/" ) , `https://objectstorage.${ region } .oraclecloud.com` ) , {
340+ disableHTTP2 : true ,
341+ method : "DELETE"
342+ } ) ;
343+ } else {
344+ await http . bufferRequest ( new URL ( path . posix . join ( bucketPath , "u" , filename , "id" , uploaduuid , "/" ) , `https://objectstorage.${ region } .oraclecloud.com` ) , {
345+ disableHTTP2 : true ,
346+ method : "POST" ,
347+ body : Buffer . from ( [ ] )
348+ } ) ;
349+ }
350+ }
351+ callback ( error ) ;
352+ } catch ( err ) {
353+ callback ( err ) ;
354+ }
355+ } ,
356+ } ) ;
266357 } ,
267358 /**
268359 * List files in Bucket
@@ -272,7 +363,7 @@ export function oracleBucketPreAuth(region: oracleRegions, namespace: string, na
272363 const data : oracleFileListObject [ ] = [ ] ;
273364 let startAfter : string ;
274365 while ( true ) {
275- const response = await http . jsonRequest < { nextStartWith ?: string , objects : ociBucket . models . ObjectSummary [ ] } > ( new URL ( path . posix . join ( "/p" , preAuthKey , "n" , namespace , "b" , name , "o" ) , `https://objectstorage.${ region } .oraclecloud.com` ) , {
366+ const response = await http . jsonRequest < { nextStartWith ?: string , objects : ociBucket . models . ObjectSummary [ ] } > ( new URL ( path . posix . join ( bucketPath , "o" ) , `https://objectstorage.${ region } .oraclecloud.com` ) , {
276367 method : "GET" ,
277368 query : {
278369 limit : 1000 ,
@@ -299,4 +390,50 @@ export function oracleBucketPreAuth(region: oracleRegions, namespace: string, na
299390 }
300391 } ;
301392 return funs ;
393+ }
394+
395+ export interface watchConfig {
396+ skipSyncFiles ?: boolean ;
397+ remoteFolder ?: string ;
398+ listFiles ( folderPath ?: string ) : Promise < oracleFileListObject [ ] > ;
399+ uploadFile ( filename : string ) : stream . Writable ;
400+ deleteObject ?( path : string ) : Promise < void > ;
401+ }
402+
403+ /**
404+ * Sync files and folder from local to bucket
405+ * @param folderPath - Local folder path
406+ * @param config - Settings to sync files and more
407+ */
408+ export async function watch ( folderPath : string , config : watchConfig ) {
409+ if ( ! folderPath ) throw new TypeError ( "Folder path is required" ) ;
410+ if ( ! config ) throw new Error ( "Require configs!" ) ;
411+ else if ( ! ( await extendsFS . exists ( folderPath ) ) ) throw new Error ( "Folder path is not exists" ) ;
412+ else if ( ! ( await extendsFS . isDirectory ( folderPath ) ) ) throw new Error ( "Folder path is not a directory" ) ;
413+ config . remoteFolder ||= "" ;
414+ config . deleteObject ||= ( ) => Promise . resolve ( ) ;
415+ if ( config . skipSyncFiles !== true ) {
416+ const remoteFiles = await config . listFiles ( config . remoteFolder ) ;
417+ await extendsFS . readdirV2 ( folderPath , true , ( ) => true , async ( relativePath , fullPath , stats ) => {
418+ if ( stats . isDirectory ( ) ) return ;
419+ const find = remoteFiles . find ( item => item . name === path . posix . resolve ( "/" , config . remoteFolder || "" , relativePath ) . slice ( 1 ) ) ;
420+ if ( ! find ) return finished ( createReadStream ( fullPath ) . pipe ( config . uploadFile ( path . posix . resolve ( "/" , config . remoteFolder || "" , relativePath ) ) ) , { error : true } ) ;
421+ else if ( stats . size !== find . size ) return finished ( createReadStream ( fullPath ) . pipe ( config . uploadFile ( path . posix . resolve ( "/" , config . remoteFolder || "" , relativePath ) ) ) , { error : true } ) ;
422+ } ) ;
423+ }
424+
425+ async function syncFile ( local : string ) {
426+ if ( ( await fs . lstat ( local ) ) . isDirectory ( ) ) return ;
427+ return finished ( createReadStream ( local ) . pipe ( config . uploadFile ( path . posix . resolve ( "/" , config . remoteFolder || "" , path . relative ( folderPath , local ) ) ) ) )
428+ }
429+ async function deleteFF ( local : string ) {
430+ return config . deleteObject ( path . posix . resolve ( "/" , config . remoteFolder || "" , path . relative ( folderPath , local ) ) ) ;
431+ }
432+
433+ const watch = chokidar . watch ( folderPath , { ignoreInitial : true , atomic : true } ) ;
434+ watch . on ( "add" , async ( filePath ) => syncFile ( filePath ) . catch ( err => watch . emit ( "error" , err ) ) ) ;
435+ watch . on ( "change" , async ( filePath ) => syncFile ( filePath ) . catch ( err => watch . emit ( "error" , err ) ) ) ;
436+ watch . on ( "unlink" , async ( filePath ) => deleteFF ( filePath ) . catch ( err => watch . emit ( "error" , err ) ) ) ;
437+ watch . on ( "unlinkDir" , async ( filePath ) => deleteFF ( filePath ) . catch ( err => watch . emit ( "error" , err ) ) ) ;
438+ return watch ;
302439}
0 commit comments