@@ -1287,12 +1287,12 @@ calling core wasm code receives the `STREAM_READ` progress event (in which case
1287
1287
` RevokeBuffer ` is called). This reduces the number of task-switches required
1288
1288
by the spec, particularly when streaming between two components.
1289
1289
1290
- The ` ReadableStreamGuestImpl ` class implements ` ReadableStream ` for streams
1291
- created by wasm (via ` stream.new ` ) and tracks the common state shared by both
1292
- the readable and writable ends of streams (defined below). Introducing the
1293
- class in chunks, starting with the fields and initialization:
1290
+ The ` SharedStreamImpl ` class implements ` ReadableStream ` for streams created by
1291
+ wasm (via ` stream.new ` ) and tracks the common state shared by both the readable
1292
+ and writable ends of streams (defined below). Introducing the class in chunks,
1293
+ starting with the fields and initialization:
1294
1294
``` python
1295
- class ReadableStreamGuestImpl (ReadableStream ):
1295
+ class SharedStreamImpl (ReadableStream ):
1296
1296
closed_: bool
1297
1297
pending_inst: Optional[ComponentInstance]
1298
1298
pending_buffer: Optional[Buffer]
@@ -1420,30 +1420,30 @@ entirely symmetric, with the only difference being whether the polymorphic
1420
1420
whether there is an asynchronous read or write in progress and is maintained by
1421
1421
the definitions of ` stream.{read,write} ` below. Importantly, ` copying ` and the
1422
1422
inherited fields of ` Waitable ` are per-* end* , not per-* stream* (unlike the
1423
- fields of ` ReadableStreamGuestImpl ` shown above, which are per-stream and
1424
- shared by both ends via their common ` stream ` field).
1423
+ fields of ` SharedStreamImpl ` shown above, which are per-stream and shared by
1424
+ both ends via their ` shared ` field).
1425
1425
``` python
1426
1426
class StreamEnd (Waitable ):
1427
- stream : ReadableStream
1427
+ shared : ReadableStream
1428
1428
copying: bool
1429
1429
1430
- def __init__ (self , stream ):
1430
+ def __init__ (self , shared ):
1431
1431
Waitable.__init__ (self )
1432
- self .stream = stream
1432
+ self .shared = shared
1433
1433
self .copying = False
1434
1434
1435
1435
def drop (self ):
1436
1436
trap_if(self .copying)
1437
- self .stream .close()
1437
+ self .shared .close()
1438
1438
Waitable.drop(self )
1439
1439
1440
1440
class ReadableStreamEnd (StreamEnd ):
1441
1441
def copy (self , inst , dst , on_partial_copy , on_copy_done ):
1442
- return self .stream .read(inst, dst, on_partial_copy, on_copy_done)
1442
+ return self .shared .read(inst, dst, on_partial_copy, on_copy_done)
1443
1443
1444
1444
class WritableStreamEnd (StreamEnd ):
1445
1445
def copy (self , inst , src , on_partial_copy , on_copy_done ):
1446
- return self .stream .write(inst, src, on_partial_copy, on_copy_done)
1446
+ return self .shared .write(inst, src, on_partial_copy, on_copy_done)
1447
1447
```
1448
1448
Dropping a stream end while an asynchronous read or write is in progress traps
1449
1449
since the async read or write cannot be cancelled without blocking and ` drop `
@@ -1453,8 +1453,8 @@ finish before closing.
1453
1453
1454
1454
The ` {Readable,Writable}StreamEnd.copy ` method is called polymorphically by the
1455
1455
shared definition of ` stream.{read,write} ` below. While the static type of
1456
- ` StreamEnd.stream ` is ` ReadableStream ` , a ` WritableStreamEnd ` always points to
1457
- a ` ReadableStreamGuestImpl ` object which is why ` WritableStreamEnd.copy ` can
1456
+ ` StreamEnd.shared ` is ` ReadableStream ` , a ` WritableStreamEnd ` always points to
1457
+ a ` SharedStreamImpl ` object which is why ` WritableStreamEnd.copy ` can
1458
1458
unconditionally call ` stream.write ` .
1459
1459
1460
1460
@@ -1470,20 +1470,20 @@ class FutureEnd(StreamEnd):
1470
1470
assert (buffer.remain() == 1 )
1471
1471
def on_copy_done_wrapper (why ):
1472
1472
if buffer.remain() == 0 :
1473
- self .stream .close()
1473
+ self .shared .close()
1474
1474
on_copy_done(why)
1475
1475
ret = copy_op(inst, buffer, on_partial_copy = None , on_copy_done = on_copy_done_wrapper)
1476
1476
if ret == ' done' and buffer.remain() == 0 :
1477
- self .stream .close()
1477
+ self .shared .close()
1478
1478
return ret
1479
1479
1480
1480
class ReadableFutureEnd (FutureEnd ):
1481
1481
def copy (self , inst , dst , on_partial_copy , on_copy_done ):
1482
- return self .close_after_copy(self .stream .read, inst, dst, on_copy_done)
1482
+ return self .close_after_copy(self .shared .read, inst, dst, on_copy_done)
1483
1483
1484
1484
class WritableFutureEnd (FutureEnd ):
1485
1485
def copy (self , inst , src , on_partial_copy , on_copy_done ):
1486
- return self .close_after_copy(self .stream .write, inst, src, on_copy_done)
1486
+ return self .close_after_copy(self .shared .write, inst, src, on_copy_done)
1487
1487
def drop (self ):
1488
1488
FutureEnd.drop(self )
1489
1489
```
@@ -1971,9 +1971,9 @@ def lift_async_value(ReadableEndT, cx, i, t):
1971
1971
assert (not contains_borrow(t))
1972
1972
e = cx.inst.table.remove(i)
1973
1973
trap_if(not isinstance (e, ReadableEndT))
1974
- trap_if(e.stream .t != t)
1974
+ trap_if(e.shared .t != t)
1975
1975
trap_if(e.copying)
1976
- return e.stream
1976
+ return e.shared
1977
1977
```
1978
1978
Lifting transfers ownership of the readable end and traps if a read was in
1979
1979
progress (which would now be dangling).
@@ -3689,20 +3689,20 @@ readable end is subsequently transferred to another component (or the host) via
3689
3689
``` python
3690
3690
async def canon_stream_new (stream_t , task ):
3691
3691
trap_if(not task.inst.may_leave)
3692
- stream = ReadableStreamGuestImpl (stream_t.t)
3693
- ri = task.inst.table.add(ReadableStreamEnd(stream ))
3694
- wi = task.inst.table.add(WritableStreamEnd(stream ))
3692
+ shared = SharedStreamImpl (stream_t.t)
3693
+ ri = task.inst.table.add(ReadableStreamEnd(shared ))
3694
+ wi = task.inst.table.add(WritableStreamEnd(shared ))
3695
3695
return [ ri | (wi << 32 ) ]
3696
3696
3697
3697
async def canon_future_new (future_t , task ):
3698
3698
trap_if(not task.inst.may_leave)
3699
- future = ReadableStreamGuestImpl (future_t.t)
3700
- ri = task.inst.table.add(ReadableFutureEnd(future ))
3701
- wi = task.inst.table.add(WritableFutureEnd(future ))
3699
+ shared = SharedStreamImpl (future_t.t)
3700
+ ri = task.inst.table.add(ReadableFutureEnd(shared ))
3701
+ wi = task.inst.table.add(WritableFutureEnd(shared ))
3702
3702
return [ ri | (wi << 32 ) ]
3703
3703
```
3704
3704
Because futures are just streams with extra limitations, here we see that a
3705
- ` WritableFutureEnd ` shares the same ` ReadableStreamGuestImpl ` type as
3705
+ ` WritableFutureEnd ` shares the same ` SharedStreamImpl ` type as
3706
3706
` WritableStreamEnd ` ; the extra limitations are added by ` WritableFutureEnd ` and
3707
3707
the future built-ins below.
3708
3708
@@ -3756,80 +3756,87 @@ async def canon_future_write(future_t, opts, task, i, ptr):
3756
3756
future_t, opts, task, i, ptr, 1 )
3757
3757
```
3758
3758
3759
- Introducing the ` copy ` function in chunks, ` copy ` first checks that the element
3760
- at index ` i ` is of the right type and that there is not already a copy in
3761
- progress. (In the future, this restriction could be relaxed, allowing a finite
3762
- number of pipelined reads or writes.) Then a readable or writable buffer is
3763
- created which (in ` Buffer ` 's constructor) eagerly checks the alignment and
3764
- bounds of (` i ` , ` n ` ).
3759
+ Introducing the ` copy ` function in chunks, ` copy ` first checks that the
3760
+ element at index ` i ` is of the right type and that there is not already a
3761
+ copy in progress. (In the future, this restriction could be relaxed, allowing
3762
+ a finite number of pipelined reads or writes.)
3765
3763
``` python
3766
3764
async def copy (EndT , BufferT , event_code , stream_or_future_t , opts , task , i , ptr , n ):
3767
3765
trap_if(not task.inst.may_leave)
3768
3766
e = task.inst.table.get(i)
3769
3767
trap_if(not isinstance (e, EndT))
3770
- trap_if(e.stream .t != stream_or_future_t.t)
3768
+ trap_if(e.shared .t != stream_or_future_t.t)
3771
3769
trap_if(e.copying)
3770
+ ```
3771
+ Then a readable or writable buffer is created which (in ` Buffer ` 's
3772
+ constructor) eagerly checks the alignment and bounds of (` i ` , ` n ` ).
3773
+ (In the future, the restriction on futures/streams containing ` borrow ` s could
3774
+ be relaxed by maintaining sufficient bookkeeping state to ensure that
3775
+ borrowed handles * or streams/futures of borrowed handles* could not outlive
3776
+ their originating call.)
3777
+ ``` python
3772
3778
assert (not contains_borrow(stream_or_future_t))
3773
3779
cx = LiftLowerContext(opts, task.inst, borrow_scope = None )
3774
3780
buffer = BufferT(stream_or_future_t.t, cx, ptr, n)
3775
3781
```
3776
-
3777
- Next, in the synchronous case, ` Task.wait_on ` is used to synchronously and
3778
- uninterruptibly wait for the ` on_* ` callbacks to indicate that the copy has made
3779
- progress. In the case of ` on_partial_copy ` , this code carefully delays the call
3780
- to ` revoke_buffer ` until right before control flow is returned back to the
3781
- calling core wasm code. This enables another task to potentially complete
3782
- multiple partial copies before having to context-switch back .
3782
+ Next, the ` copy ` method of ` {Readable,Writable}{Stream,Future}End ` is called
3783
+ to attempt to perform the actual ` read ` or ` write ` . The ` on_partial_copy `
3784
+ callback passed to ` copy ` is called zero or more times each time values are
3785
+ copied to/from ` buffer ` without filling it up. Aftewards, the ` on_copy_done `
3786
+ callback passed to ` copy ` is called at most once when: the ` buffer ` if full,
3787
+ the other end closed, or this end cancelled the copy via
3788
+ ` {stream,future}.cancel-{read,write} ` .
3783
3789
``` python
3784
- if opts.sync:
3785
- final_revoke_buffer = None
3786
- def on_partial_copy (revoke_buffer , why = ' completed' ):
3787
- assert (why == ' completed' )
3788
- nonlocal final_revoke_buffer
3789
- final_revoke_buffer = revoke_buffer
3790
- if not async_copy.done():
3791
- async_copy.set_result(None )
3792
- on_copy_done = partial(on_partial_copy, lambda :())
3793
- if e.copy(task.inst, buffer, on_partial_copy, on_copy_done) != ' done' :
3794
- async_copy = asyncio.Future()
3795
- await task.wait_on(async_copy, sync = True )
3796
- final_revoke_buffer()
3797
- ```
3798
- (When non-cooperative threads are added, the assertion that synchronous copies
3799
- can only be ` completed ` , and not ` cancelled ` , will no longer hold.)
3800
-
3801
- In the asynchronous case, the ` on_* ` callbacks set a pending event on the
3802
- ` Waitable ` which will be delivered to core wasm when core wasm calls
3803
- ` task.{wait,poll} ` or, if using ` callback ` , returns to the event loop.
3804
- Symmetric to the synchronous case, this code carefully delays calling
3805
- ` revoke_buffer ` until the copy event is actually delivered to core wasm,
3806
- allowing multiple partial copies to complete in the interim, reducing overall
3807
- context-switching overhead.
3790
+ def copy_event (why , revoke_buffer ):
3791
+ revoke_buffer()
3792
+ e.copying = False
3793
+ return (event_code, i, pack_copy_result(task, e, buffer, why))
3794
+
3795
+ def on_partial_copy (revoke_buffer ):
3796
+ e.set_event(partial(copy_event, ' completed' , revoke_buffer))
3797
+
3798
+ def on_copy_done (why ):
3799
+ e.set_event(partial(copy_event, why, revoke_buffer = lambda :()))
3800
+
3801
+ if e.copy(task.inst, buffer, on_partial_copy, on_copy_done) == ' done' :
3802
+ return [pack_copy_result(task, e, buffer, ' completed' )]
3803
+ ```
3804
+ If the stream/future is already closed or at least 1 element could be
3805
+ immediately copied, ` copy ` returns ` 'done' ` and ` {stream,future}.{read,write} `
3806
+ synchronously returns how much was copied and how the operation ended to the
3807
+ caller. Otherwise, the built-in blocks:
3808
3808
``` python
3809
3809
else :
3810
- def copy_event (why , revoke_buffer ):
3811
- revoke_buffer()
3812
- e.copying = False
3813
- return (event_code, i, pack_copy_result(task, e, buffer, why))
3814
- def on_partial_copy (revoke_buffer ):
3815
- e.set_event(partial(copy_event, ' completed' , revoke_buffer))
3816
- def on_copy_done (why ):
3817
- e.set_event(partial(copy_event, why, revoke_buffer = lambda :()))
3818
- if e.copy(task.inst, buffer, on_partial_copy, on_copy_done) != ' done' :
3810
+ if opts.sync:
3811
+ await task.wait_on(e.wait_for_pending_event(), sync = True )
3812
+ code,index,payload = e.get_event()
3813
+ assert (code == event_code and index == i)
3814
+ return [payload]
3815
+ else :
3819
3816
e.copying = True
3820
3817
return [BLOCKED ]
3821
- return [pack_copy_result(task, e, buffer, ' completed' )]
3822
3818
```
3823
- However the copy completes, the results are reported to the caller via
3824
- ` pack_copy_result ` :
3819
+ In the synchronous case, the caller synchronously waits for progress
3820
+ (blocking all execution in the calling component instance, but allowing other
3821
+ tasks in other component instances to make progress). Note that ` get_event() `
3822
+ necessarily calls a ` copy_event ` closure created by either ` on_partial_copy `
3823
+ or ` on_copy_done ` . In the asynchronous case, the built-in immeditely returns
3824
+ the ` BLOCKED ` code and the caller must asynchronously wait for progress using
3825
+ ` waitable-set.{wait,poll} ` or, if using a ` callback ` , by returning to the event
3826
+ loop. Setting ` copying ` prevents any more reads/writes from starting and also
3827
+ prevents the stream/future from being closed.
3828
+
3829
+ Regardless of whether the ` {stream,future}.{read,write} ` completes
3830
+ synchronously or asynchronously, the results passed to core wasm are
3831
+ bit-packed into a single ` i32 ` according to the following scheme:
3825
3832
``` python
3826
3833
BLOCKED = 0x ffff_ffff
3827
3834
COMPLETED = 0x 0
3828
3835
CLOSED = 0x 1
3829
3836
CANCELLED = 0x 2
3830
3837
3831
3838
def pack_copy_result (task , e , buffer , why ):
3832
- if e.stream .closed():
3839
+ if e.shared .closed():
3833
3840
result = CLOSED
3834
3841
elif why == ' cancelled' :
3835
3842
result = CANCELLED
@@ -3882,10 +3889,10 @@ async def cancel_copy(EndT, event_code, stream_or_future_t, sync, task, i):
3882
3889
trap_if(not task.inst.may_leave)
3883
3890
e = task.inst.table.get(i)
3884
3891
trap_if(not isinstance (e, EndT))
3885
- trap_if(e.stream .t != stream_or_future_t.t)
3892
+ trap_if(e.shared .t != stream_or_future_t.t)
3886
3893
trap_if(not e.copying)
3887
3894
if not e.has_pending_event():
3888
- e.stream .cancel()
3895
+ e.shared .cancel()
3889
3896
if not e.has_pending_event():
3890
3897
if sync:
3891
3898
await task.wait_on(e.wait_for_pending_event(), sync = True )
@@ -3896,10 +3903,10 @@ async def cancel_copy(EndT, event_code, stream_or_future_t, sync, task, i):
3896
3903
return [payload]
3897
3904
```
3898
3905
The * first* check for ` e.has_pending_event() ` catches the case where the copy has
3899
- already racily finished, in which case we must * not* call ` stream. cancel()` .
3900
- Calling ` stream. cancel()` may, but is not required to, recursively call one of
3901
- the ` on_* ` callbacks (passed by ` canon_{stream,future}_{read,write} ` above)
3902
- which will set a pending event that is caught by the * second* check for
3906
+ already racily finished, in which case we must * not* call ` cancel() ` . Calling
3907
+ ` cancel() ` may, but is not required to, recursively call one of the ` on_* `
3908
+ callbacks (passed by ` canon_{stream,future}_{read,write} ` above) which will set
3909
+ a pending event that is caught by the * second* check for
3903
3910
` e.has_pending_event() ` .
3904
3911
3905
3912
If the copy hasn't been cancelled, the synchronous case uses ` Task.wait_on ` to
@@ -3951,7 +3958,7 @@ async def close(EndT, stream_or_future_t, task, hi):
3951
3958
trap_if(not task.inst.may_leave)
3952
3959
e = task.inst.table.remove(hi)
3953
3960
trap_if(not isinstance (e, EndT))
3954
- trap_if(e.stream .t != stream_or_future_t.t)
3961
+ trap_if(e.shared .t != stream_or_future_t.t)
3955
3962
e.drop()
3956
3963
return []
3957
3964
```
0 commit comments