@@ -54,7 +54,7 @@ if (imageID === "") {
54
54
process . exit ( 1 ) ;
55
55
}
56
56
57
- const tarFile = imageID + ".tar" ;
57
+ const tarFile = imageID . trim ( ) + ".tar" ;
58
58
const imagePath = ".output-image" ;
59
59
if ( ! ( await file ( tarFile ) . exists ( ) ) ) {
60
60
const output = await $ `docker save ${ image } --output ${ tarFile } ` ;
@@ -119,7 +119,7 @@ await mkdir(cacheFolder, { recursive: true });
119
119
const [ manifest ] = manifests ;
120
120
const tasks = [ ] ;
121
121
122
- console . log ( "compressing ..." ) ;
122
+ console . log ( "Compressing ..." ) ;
123
123
// Iterate through every layer, read it and compress to a file
124
124
for ( const layer of manifest . Layers ) {
125
125
tasks . push (
@@ -142,18 +142,17 @@ for (const layer of manifest.Layers) {
142
142
}
143
143
144
144
const inprogressPath = path . join ( cacheFolder , layerName + "-in-progress" ) ;
145
-
146
145
await rm ( inprogressPath , { recursive : true } ) ;
147
- const layerCacheGzip = file ( inprogressPath , { } ) ;
148
-
146
+ const layerCacheGzip = file ( inprogressPath ) ;
149
147
const cacheWriter = layerCacheGzip . writer ( ) ;
150
148
const hasher = new Bun . CryptoHasher ( "sha256" ) ;
149
+
151
150
const gzipStream = zlib . createGzip ( { level : 9 } ) ;
152
151
gzipStream . pipe (
153
152
new stream . Writable ( {
154
- write ( value , _ , callback ) {
155
- hasher . update ( value ) ;
156
- cacheWriter . write ( value ) ;
153
+ write ( value : Buffer , _ , callback ) {
154
+ cacheWriter . write ( value . buffer ) ;
155
+ hasher . update ( value . buffer ) ;
157
156
callback ( ) ;
158
157
} ,
159
158
} ) ,
@@ -165,7 +164,7 @@ for (const layer of manifest.Layers) {
165
164
new WritableStream ( {
166
165
write ( value ) {
167
166
return new Promise ( ( res , rej ) => {
168
- gzipStream . write ( value , "binary" , ( err ) => {
167
+ gzipStream . write ( value , ( err ) => {
169
168
if ( err ) {
170
169
rej ( err ) ;
171
170
return ;
@@ -175,12 +174,21 @@ for (const layer of manifest.Layers) {
175
174
} ) ;
176
175
} ,
177
176
close ( ) {
178
- gzipStream . end ( ) ;
177
+ return new Promise ( ( res ) => {
178
+ gzipStream . end ( ( ) => {
179
+ res ( ) ;
180
+ } ) ;
181
+ } ) ;
179
182
} ,
180
183
} ) ,
181
184
) ;
182
185
183
- await cacheWriter . flush ( ) ;
186
+ await new Promise ( ( res ) =>
187
+ gzipStream . flush ( ( ) => {
188
+ res ( true ) ;
189
+ } ) ,
190
+ ) ;
191
+
184
192
await cacheWriter . end ( ) ;
185
193
const digest = hasher . digest ( "hex" ) ;
186
194
await rename ( inprogressPath , path . join ( cacheFolder , digest ) ) ;
@@ -258,8 +266,15 @@ async function pushLayer(layerDigest: string, readableStream: ReadableStream, to
258
266
throw new Error ( "Docker-Upload-UUID not defined in headers" ) ;
259
267
}
260
268
269
+ function parseLocation ( location : string ) {
270
+ if ( location . startsWith ( "/" ) ) {
271
+ return `${ proto } ://${ imageHost } ${ location } ` ;
272
+ }
273
+
274
+ return location ;
275
+ }
276
+
261
277
let location = createUploadResponse . headers . get ( "location" ) ?? `/v2${ imageRepositoryPath } /blobs/uploads/${ uploadId } ` ;
262
- const putChunkUploadURL = `${ proto } ://${ imageHost } ${ location } ` ;
263
278
const maxToWrite = Math . min ( maxChunkLength , totalLayerSize ) ;
264
279
let end = Math . min ( maxChunkLength , totalLayerSize ) ;
265
280
let written = 0 ;
@@ -268,10 +283,10 @@ async function pushLayer(layerDigest: string, readableStream: ReadableStream, to
268
283
while ( totalLayerSizeLeft > 0 ) {
269
284
const range = `0-${ Math . min ( end , totalLayerSize ) - 1 } ` ;
270
285
const current = new ReadableLimiter ( reader as ReadableStreamDefaultReader , maxToWrite , previousReadable ) ;
271
-
286
+ const patchChunkUploadURL = parseLocation ( location ) ;
272
287
// we have to do fetchNode because Bun doesn't allow setting custom Content-Length.
273
288
// https://github.com/oven-sh/bun/issues/10507
274
- const putChunkResult = await fetchNode ( putChunkUploadURL , {
289
+ const patchChunkResult = await fetchNode ( patchChunkUploadURL , {
275
290
method : "PATCH" ,
276
291
body : current ,
277
292
headers : new Headers ( {
@@ -280,13 +295,13 @@ async function pushLayer(layerDigest: string, readableStream: ReadableStream, to
280
295
"content-length" : `${ Math . min ( totalLayerSizeLeft , maxToWrite ) } ` ,
281
296
} ) ,
282
297
} ) ;
283
- if ( ! putChunkResult . ok ) {
298
+ if ( ! patchChunkResult . ok ) {
284
299
throw new Error (
285
- `uploading chunk ${ putChunkUploadURL } returned ${ putChunkResult . status } : ${ await putChunkResult . text ( ) } ` ,
300
+ `uploading chunk ${ patchChunkUploadURL } returned ${ patchChunkResult . status } : ${ await patchChunkResult . text ( ) } ` ,
286
301
) ;
287
302
}
288
303
289
- const rangeResponse = putChunkResult . headers . get ( "range" ) ;
304
+ const rangeResponse = patchChunkResult . headers . get ( "range" ) ;
290
305
if ( rangeResponse !== range ) {
291
306
throw new Error ( `unexpected Range header ${ rangeResponse } , expected ${ range } ` ) ;
292
307
}
@@ -295,14 +310,13 @@ async function pushLayer(layerDigest: string, readableStream: ReadableStream, to
295
310
totalLayerSizeLeft -= previousReadable . written ;
296
311
written += previousReadable . written ;
297
312
end += previousReadable . written ;
298
- location = putChunkResult . headers . get ( "location" ) ?? location ;
313
+ location = patchChunkResult . headers . get ( "location" ) ?? location ;
299
314
if ( totalLayerSizeLeft != 0 ) console . log ( layerDigest + ":" , totalLayerSizeLeft , "upload bytes left." ) ;
300
315
}
301
316
302
317
const range = `0-${ written - 1 } ` ;
303
- const uploadURL = new URL ( ` ${ proto } :// ${ imageHost } ${ location } ` ) ;
318
+ const uploadURL = new URL ( parseLocation ( location ) ) ;
304
319
uploadURL . searchParams . append ( "digest" , layerDigest ) ;
305
-
306
320
const response = await fetch ( uploadURL . toString ( ) , {
307
321
method : "PUT" ,
308
322
headers : new Headers ( {
0 commit comments