diff --git a/.github/workflows/verifast-negative.yml b/.github/workflows/verifast-negative.yml index c9ccf44cf0b7a..dd2d8f5d82c75 100644 --- a/.github/workflows/verifast-negative.yml +++ b/.github/workflows/verifast-negative.yml @@ -30,16 +30,16 @@ jobs: - name: Install VeriFast run: | cd ~ - curl -OL https://github.com/verifast/verifast/releases/download/25.02/verifast-25.02-linux.tar.gz - # https://github.com/verifast/verifast/attestations/4911733 - echo '5d5c87d11b3d735f44c3f0ca52aebc89e3c4d1119d98ef25188d07cb57ad65e8 verifast-25.02-linux.tar.gz' | shasum -a 256 -c - tar xf verifast-25.02-linux.tar.gz + curl -OL https://github.com/verifast/verifast/releases/download/25.06/verifast-25.06-linux.tar.gz + # https://github.com/verifast/verifast/attestations/7473259 + echo '7081408d99853620a79fbfc3767f367d6f6ccfdaf26a63b1f30a382489aacb5a verifast-25.06-linux.tar.gz' | shasum -a 256 -c + tar xf verifast-25.06-linux.tar.gz - name: Install the Rust toolchain used by VeriFast run: rustup toolchain install nightly-2024-11-23 - name: Run VeriFast Verification run: | - export PATH=~/verifast-25.02/bin:$PATH + export PATH=~/verifast-25.06/bin:$PATH cd verifast-proofs bash check-verifast-proofs-negative.sh diff --git a/.github/workflows/verifast.yml b/.github/workflows/verifast.yml index 728623ace2a66..824afadde3d13 100644 --- a/.github/workflows/verifast.yml +++ b/.github/workflows/verifast.yml @@ -27,17 +27,17 @@ jobs: - name: Install VeriFast run: | cd ~ - curl -OL https://github.com/verifast/verifast/releases/download/25.02/verifast-25.02-linux.tar.gz - # https://github.com/verifast/verifast/attestations/4911733 - echo '5d5c87d11b3d735f44c3f0ca52aebc89e3c4d1119d98ef25188d07cb57ad65e8 verifast-25.02-linux.tar.gz' | shasum -a 256 -c - tar xf verifast-25.02-linux.tar.gz + curl -OL https://github.com/verifast/verifast/releases/download/25.06/verifast-25.06-linux.tar.gz + # https://github.com/verifast/verifast/attestations/7473259 + echo '7081408d99853620a79fbfc3767f367d6f6ccfdaf26a63b1f30a382489aacb5a verifast-25.06-linux.tar.gz' | shasum -a 256 -c + tar xf verifast-25.06-linux.tar.gz - name: Install the Rust toolchain used by VeriFast run: rustup toolchain install nightly-2024-11-23 - name: Run VeriFast Verification run: | - export PATH=~/verifast-25.02/bin:$PATH + export PATH=~/verifast-25.06/bin:$PATH cd verifast-proofs bash check-verifast-proofs.sh diff --git a/verifast-proofs/alloc/collections/linked_list.rs/README.md b/verifast-proofs/alloc/collections/linked_list.rs/README.md new file mode 100644 index 0000000000000..124ba706d9968 --- /dev/null +++ b/verifast-proofs/alloc/collections/linked_list.rs/README.md @@ -0,0 +1,584 @@ +# Partial verification of LinkedList with VeriFast + +With certain caveats (see [Caveats](#caveats) below), this proof proves +[soundness](https://doc.rust-lang.org/nomicon/working-with-unsafe.html) of the +`Node` associated functions `new` and `into_element`, the `LinkedList` associated functions +`pop_front_node`, `new`, `new_in`, `iter`, `cursor_front_mut`, +`cursor_back_mut`, `len`, `clear`, `push_front`, `split_off`, and `extract_if`, +the `Iter` associated function `next`, the `CursorMut` associated functions `move_next`, `move_prev`, +`current`, and `remove_current`, and the `ExtractIf` associated function `next`. +Furthermore, since the `LinkedList` associated functions `contains`, `remove`, `retain`, `retain_mut`, and `drop` do not use `unsafe` blocks, do not mutate private fields, and call only the former functions, this proof implies their soundness as well. + +## The proof + +We here give a tour of the proof. + +### Predicate `Nodes` + +Central to the proof is predicate `Nodes`, defined as follows: + +``` +pred Nodes(alloc_id: any, n: Option>>, prev: Option>>, last: Option>>, next: Option>>; nodes: list>>) = + if n == next { + nodes == [] &*& last == prev + } else { + n == Option::Some(?n_) &*& + alloc_block_in(alloc_id, NonNull_ptr(n_) as *u8, Layout::new_::>()) &*& struct_Node_padding(NonNull_ptr(n_)) &*& + (*NonNull_ptr(n_)).prev |-> prev &*& + (*NonNull_ptr(n_)).next |-> ?next0 &*& + pointer_within_limits(&(*NonNull_ptr(n_)).element) == true &*& + Nodes(alloc_id, next0, n, last, next, ?nodes0) &*& + nodes == cons(n_, nodes0) + }; +``` + +It is expressed in [separation logic](https://en.wikipedia.org/wiki/Separation_logic), a logic for reasoning about ownership. It asserts ownership of the linked list nodes between `n` (inclusive) and `next` (exclusive). Furthermore, it asserts that the nodes have been allocated with the allocator identified by `alloc_id`, that the first node (if any) points to predecessor node `prev`, that the last node (if any) is `last`, and that the entire sequence of nodes is `nodes` (a [mathematical list](https://github.com/verifast/verifast/blob/37bb28bc5edb0e08e5ee8b63a9afd57e7c3b7ffd/bin/rust/list.rsspec#L16)). + +If `n` is not equal to `next`, it asserts that `n` is not `None` and binds logical variable `n_` to the `NonNull` value wrapped by the `Option`. (The question mark indicates a binding occurrence of a logical variable.) Furthermore, it asserts ownership of the `alloc_block_in` chunk for `n_`, which proves the node was allocated using allocator `alloc_id`. The points-to assertion `(*NonNull_ptr(n_)).prev |-> prev` asserts ownership of `n_`'s `prev` field and asserts that its value equals the value of logical variable `prev`. The predicate calls itself recursively to assert ownership of the remaining nodes. Since the various parts are separated by a *separating conjunction* `&*&`, the predicate asserts that the remainder of the sequence of nodes does not overlap with the first node, which implies that the sequence of nodes is not cyclic. + +### Lemma `Nodes_last_lemma` + +The proof includes the lemma `Nodes_last_lemma`, defined as follows: + +``` +lem Nodes_last_lemma(n: Option>>) + req Nodes::(?alloc_id, n, ?prev, ?last, ?next, ?nodes); + ens Nodes::(alloc_id, n, prev, last, next, nodes) &*& + match last { + Option::None => n == next && prev == last && nodes == [], + Option::Some(last_) => + match prev { + Option::None => n != next && length(nodes) > 0 && nth(length(nodes) - 1, nodes) == last_, + Option::Some(prev_) => nth(length(nodes), cons(prev_, nodes)) == last_ + } + }; +{ + open Nodes(alloc_id, n, prev, last, next, nodes); + if n == next { + } else { + assert Nodes(_, ?next0, _, _, _, _); + Nodes_last_lemma(next0); + } + close Nodes(alloc_id, n, prev, last, next, nodes); +} +``` + +This is a VeriFast lemma. It looks like a regular Rust function, except that it uses keyword `lem` instead of `fn` and it is inside a VeriFast annotation (a Rust comment that starts with `/*@` and ends with `@*/`). VeriFast checks that lemmas do not have side-effects and that they terminate. A lemma that passes these checks constitutes a proof that its precondition implies its postcondition. Lemma `Nodes_last_lemma` proves, among other things, that if predicate `Nodes(alloc_id, n, prev, last, next, nodes)` holds and `last == None`, then `n == next` and `prev == last` and `nodes == []`. It proves this as follows: +- First, it unfolds the definition of the `Nodes` predicate, using an `open` ghost command. +- Then, it performs a case analysis on whether `n == next`. If not: + - It first uses an `assert` ghost statement to match against the recursive `Nodes` chunk produced by the `open` statement and bind the value for the `n` parameter to logical variable `next0`. + - Then it calls itself recursively. VeriFast can tell that this recursion terminates because the `Nodes` chunk required by the recursive call's precondition was produced by unfolding the `Nodes` chunk required by the caller. Since all `Nodes` chunks have been constructed by folding the definition (using `close` ghost statements) a finite number of times, they can be unfolded only a finite number of times. (In mathematics, this is called "induction on the derivation of an inductively defined predicate".) +- Then, it folds up the original `Nodes` chunk again, using a `close` ghost statement. + +### Function `unlink_node` + +Here's the proof of `LinkedList` associated function `unlink_node`: + +```rust +unsafe fn unlink_node(&mut self, mut node: NonNull>) +/*@ +req (*self).head |-> ?head &*& (*self).tail |-> ?tail &*& + Nodes::(?alloc_id, head, None, ?prev_, Some(node), ?nodes1) &*& + alloc_block_in(alloc_id, NonNull_ptr(node) as *u8, Layout::new_::>()) &*& + (*NonNull_ptr(node)).next |-> ?next_ &*& + (*NonNull_ptr(node)).prev |-> prev_ &*& + struct_Node_padding(NonNull_ptr(node)) &*& + Nodes::(alloc_id, next_, Some(node), tail, None, ?nodes2) &*& + (*self).len |-> length(nodes1) + 1 + length(nodes2); +@*/ +/*@ +ens (*self).head |-> ?head1 &*& (*self).tail |-> ?tail1 &*& + Nodes::(alloc_id, head1, None, prev_, next_, nodes1) &*& + alloc_block_in(alloc_id, NonNull_ptr(node) as *u8, Layout::new_::>()) &*& + (*NonNull_ptr(node)).next |-> next_ &*& + (*NonNull_ptr(node)).prev |-> prev_ &*& + struct_Node_padding(NonNull_ptr(node)) &*& + Nodes::(alloc_id, next_, prev_, tail1, None, nodes2) &*& + (*self).len |-> length(nodes1) + length(nodes2); +@*/ +{ + let node = unsafe { node.as_mut() }; // this one is ours now, we can create an &mut. + + // Not creating new mutable (unique!) references overlapping `element`. + match node.prev { + Some(prev) => unsafe { + //@ Nodes_last_lemma(head); + //@ Nodes_split_off_last(head); + //@ assert Nodes(_, head, None, ?pprev, prev_, ?nodes10); + (*prev.as_ptr()).next = node.next; + //@ open Nodes(alloc_id, next_, Some(node), tail, None, nodes2); + //@ close Nodes(alloc_id, next_, Some(node), tail, None, nodes2); + //@ close Nodes::(alloc_id, next_, prev_, prev_, next_, []); + //@ close Nodes::(alloc_id, prev_, pprev, prev_, next_, [prev]); + //@ Nodes_append_(head); + }, + // this node is the head node + None => { + //@ Nodes_last_lemma(head); + //@ open Nodes(alloc_id, head, _, _, _, nodes1); + //@ close Nodes(alloc_id, next_, None, None, next_, []); + self.head = node.next + } + }; + + match node.next { + Some(next) => unsafe { + //@ open Nodes(alloc_id, next_, Some(node), tail, None, nodes2); + (*next.as_ptr()).prev = node.prev; + //@ close Nodes(alloc_id, next_, prev_, tail, None, nodes2); + }, + // this node is the tail node + None => { + //@ open Nodes(alloc_id, next_, Some(node), _, _, nodes2); + //@ close Nodes(alloc_id, next_, prev_, prev_, next_, []); + self.tail = node.prev; + + } + }; + + self.len -= 1; +} +``` + +Since this function is marked as `unsafe`, VeriFast does not attempt to prove its soundness; it only proves that the code complies with the specification provided in the `req` and `ens` clause, i.e. that if started in a state that satisfies the precondition given in the `req` clause, it will not perform Undefined Behavior and any state in which it returns will satisfy the postcondition given in the `ens` clause. This specification is sufficient to prove soundness of functions `CursorMut::remove_current` and `ExtractIf::next`, which call `unlink_node`. + +The precondition asserts that `node` is part of the well-formed linked list between `self.head` and `self.tail`; the postcondition asserts that there is again a well-formed linked list between `self.head` and `self.tail`, but that it no longer contains `node`, and that ownership of the node is now separate from that linked list, so that the node can be deallocated safely. + +The proof invokes `Nodes_last_lemma`, as well as two other lemmas, `Nodes_split_off_last` and `Nodes_append_`. + +### Predicate `>.own` + +Function `LinkedList::new` returns ownership of a `LinkedList` value to the caller. Before we can prove this function's soundness, we must first define what it means to own a `LinkedList` value in separation logic terms. This definition is as follows: + +``` +pred_ctor elem_fbc(t: thread_id_t)(node: NonNull>) = (*NonNull_ptr(node)).element |-> ?elem &*& .own(t, elem); + +pred >.own(t, ll) = + Allocator(t, ll.alloc, ?alloc_id) &*& + Nodes(alloc_id, ll.head, None, ll.tail, None, ?nodes) &*& + ll.len == length(nodes) &*& + foreach(nodes, elem_fbc::(t)); +``` + +This definition says that ownership of a `LinkedList` value `ll` by a thread `t` (which is relevant if type T is not `Send`) consists of ownership of the allocator `ll.alloc`, ownership of the linked list of nodes between `ll.head` and `ll.tail` whose length is given by `ll.len`, and ownership of the elements stored in the nodes. To express the latter, it uses the *predicate constructor* `elem_fbc` and the higher-order predicate [`foreach`](https://github.com/verifast/verifast/blob/37bb28bc5edb0e08e5ee8b63a9afd57e7c3b7ffd/bin/rust/list.rsspec#L174). + +### `.own` predicates: proof obligations + +Since `LinkedList` is Send whenever `T` and `A` are Send, VeriFast checks that the proof proves the following lemma: + +``` +lem LinkedList_send(t1: thread_id_t) + req type_interp::() &*& type_interp::() &*& is_Send(typeid(T)) == true &*& is_Send(typeid(A)) == true &*& LinkedList_own::(?t0, ?v); + ens type_interp::() &*& type_interp::() &*& LinkedList_own::(t1, v); +``` + +In words, this lemma states that if T and A are Send, and `>.own(t0, v)` holds, then one can derive `>.own(t1, v)` for any thread `t1`. `type_interp::()` expresses that the proof obligations for type T have already been checked, so the proof can call lemma [`Send::send`](https://github.com/verifast/verifast/blob/37bb28bc5edb0e08e5ee8b63a9afd57e7c3b7ffd/bin/rust/rust_belt/general.rsspec#L213): + +``` +lem LinkedList_elems_send(t0: thread_id_t, t1: thread_id_t) + req foreach(?nodes, elem_fbc::(t0)) &*& type_interp::() &*& is_Send(typeid(T)) == true; + ens foreach(nodes, elem_fbc::(t1)) &*& type_interp::(); +{ + open foreach(nodes, elem_fbc::(t0)); + match nodes { + nil => {} + cons(n, nodes0) => { + open elem_fbc::(t0)(n); + Send::send(t0, t1, (*NonNull_ptr(n)).element); + close elem_fbc::(t1)(n); + LinkedList_elems_send::(t0, t1); + } + } + close foreach(nodes, elem_fbc::(t1)); +} + +lem LinkedList_send(t1: thread_id_t) + req type_interp::() &*& type_interp::() &*& is_Send(typeid(T)) == true &*& is_Send(typeid(A)) == true &*& LinkedList_own::(?t0, ?v); + ens type_interp::() &*& type_interp::() &*& LinkedList_own::(t1, v); +{ + open >.own(t0, v); + assert Allocator(t0, ?alloc, _); + std::alloc::Allocator_send(t1, alloc); + LinkedList_elems_send::(t0, t1); + close >.own(t1, v); +} +``` + +Furthermore, if a struct S has fields that might need to be dropped, and the struct does not implement Drop, then VeriFast checks that the proof defines a lemma `S_drop` that proves that `.own` implies ownership of the +field values that might need to be dropped. This is the case for Node, so the proof defines the following lemma: + +``` +lem Node_drop() + req Node_own::(?_t, ?_v); + ens .own(_t, _v.element); +{ + open >.own(_t, _v); +} +``` + +### Function `LinkedList::new` + +The proof of `LinkedList::new` is straightforward: + +```rust +pub const fn new() -> Self +//@ req thread_token(?t); +//@ ens thread_token(t) &*& >.own(t, result); +//@ on_unwind_ens thread_token(t); +{ + let r = LinkedList { head: None, tail: None, len: 0, alloc: Global, marker: PhantomData }; + //@ close foreach(nil, elem_fbc::(t)); + //@ std::alloc::produce_Allocator_Global(t); + //@ close >.own(t, r); + r +} +``` + +Since this function is not marked as `unsafe`, VeriFast checks that its specification implies semantic well-typedness. Every non-`unsafe` function receives ownership of a `thread_token` for the current thread (which allows it to access non-thread-safe resources owned by the thread). It must also return this token at the end of the call. Furthermore, `new`'s postcondition asserts ownership of the `LinkedList` value returned by the function. + +The proof calls lemma [`std::alloc::produce_Allocator_Global`](https://github.com/verifast/verifast/blob/37bb28bc5edb0e08e5ee8b63a9afd57e7c3b7ffd/bin/rust/std/lib.rsspec#L576) to obtain (shared) ownership of the global allocator. + +### Function `LinkedList::new_in` + +In contrast, function `LinkedList::new_in` requires the caller to provide ownership of the allocator: + +```rust +pub const fn new_in(alloc: A) -> Self +//@ req thread_token(?t) &*& .own(t, alloc); +//@ ens thread_token(t) &*& >.own(t, result); +//@ on_unwind_ens thread_token(t); +{ + let r = LinkedList { head: None, tail: None, len: 0, alloc, marker: PhantomData }; + //@ std::alloc::open_Allocator_own::(alloc); + //@ close foreach(nil, elem_fbc::(t)); + //@ close >.own(t, r); + r +} +``` + +The proof calls lemma [`std::alloc::open_Allocator_own`](https://github.com/verifast/verifast/blob/37bb28bc5edb0e08e5ee8b63a9afd57e7c3b7ffd/bin/rust/std/lib.rsspec#L467) to turn the `.own` chunk into an `Allocator` chunk that specifies the A value's allocator identifier. (We cannot simply use the A value as the allocator identifier since different values may represent the same allocator. For example, if a variable `x` contains an Allocator value, then `x` and `&x` are different values that represent the same allocator.) + +### Function `LinkedList::clear` + +For function `LinkedList::clear`, the specification is fairly straightforward, but the proof is a bit involved: + +```rust +pub fn clear(&mut self) +//@ req thread_token(?t) &*& *self |-> ?self0 &*& >.own(t, self0); +//@ ens thread_token(t) &*& *self |-> ?self1 &*& >.own(t, self1); +//@ on_unwind_ens thread_token(t) &*& *self |-> ?self1 &*& >.own(t, self1); +{ + //@ open_points_to(self); + //@ open >.own(t, self0); + //@ let alloc_ref = precreate_ref(&(*self).alloc); + // We need to drop the nodes while keeping self.alloc + // We can do this by moving (head, tail, len) into a new list that borrows self.alloc + //@ let k = begin_lifetime(); + { + //@ let_lft 'a = k; + //@ std::alloc::init_ref_Allocator_at_lifetime::<'a, A>(alloc_ref); + let ll = LinkedList { + head: self.head.take(), + tail: self.tail.take(), + len: mem_take_usize__VeriFast_wrapper(&mut self.len), + alloc: &self.alloc, + marker: PhantomData, + }; + //@ close >.own(t, ll); + drop/*@::> @*/(ll); + } + //@ end_lifetime(k); + //@ std::alloc::end_ref_Allocator_at_lifetime::(); + //@ close_points_to(self); + //@ close foreach(nil, elem_fbc::(t)); + //@ close >.own(t, *self); +} +``` + +The `open_points_to` ghost command simply turns the points-to chunk for `self` into a separate chunk for each field of `self` (plus a padding chunk). + +Most of the complexity of this proof is due to Rust's rule that mutating memory pointed to by an active shared reference is Undefined Behavior. The code creates a shared reference to `self.alloc`, so VeriFast needs to ensure that field is not mutated while the shared reference is used. It does so by, first of all, interpreting the creation of a shared reference of a place P (e.g. field `self.alloc`) as an operation that yields a place P' that is *different* from P: even though it has the same address, it has different [*provenance*](https://doc.rust-lang.org/std/ptr/index.html#provenance). As a result, points-to chunks for P do not provide access to P'. Secondly, VeriFast requires that, *before* a shared reference to a place P is created, some *fraction* of the ownership of P is transferred from P to the new place P'. (To read from a place, fractional ownership suffices; to write to it, full ownership is required.) To enable this, VeriFast requires the user to *pre-create* the shared reference, using lemma [`precreate_ref`](https://github.com/verifast/verifast/blob/37bb28bc5edb0e08e5ee8b63a9afd57e7c3b7ffd/bin/rust/aliasing.rsspec#L67). This lemma produces permission to *initialize* the new reference, which means transferring fractional ownership of the original place to it. + +Fractional ownership of `self.alloc` is passed to `drop`, but `drop` does not return it in its postcondition. To recover full ownership of `self.alloc`, we need to exploit the fact that the shared reference to `self.alloc` is active only for a limited *lifetime*. To do so, we create a lifetime `k` for the shared reference using lemma [`begin_lifetime`](https://github.com/verifast/verifast/blob/37bb28bc5edb0e08e5ee8b63a9afd57e7c3b7ffd/bin/rust/rust_belt/lifetime_logic.rsspec#L67), and then introduce a type-system-level lifetime variable `'a` that denotes it using `let_lft`; this lifetime variable is in scope only until the end of the block in which it is introduced. Afterwards, we can end the lifetime using `end_lifetime`. + +Once the lifetime variable is in scope, we can initialize the shared reference using lemma [`std::alloc::init_ref_Allocator_at_lifetime`](https://github.com/verifast/verifast/blob/37bb28bc5edb0e08e5ee8b63a9afd57e7c3b7ffd/bin/rust/std/lib.rsspec#L517). After the lifetime has ended, we can recover full ownership of `self.alloc` using lemma [`std::alloc::end_ref_Allocator_at_lifetime`](https://github.com/verifast/verifast/blob/37bb28bc5edb0e08e5ee8b63a9afd57e7c3b7ffd/bin/rust/std/lib.rsspec#L522). + +### Function `LinkedList::pop_front_node` + +Now, let's look at the specification for function `LinkedList::pop_front_node`: + +```rust +fn pop_front_node<'a>(&'a mut self) -> Option, &'a A>> +//@ req thread_token(?t) &*& [?qa]lifetime_token('a) &*& full_borrow('a, >.full_borrow_content(t, self)); +//@ ens thread_token(t) &*& [qa]lifetime_token('a) &*& , &'a A>>>.own(t, result); +``` + +`[?qa]lifetime_token('a)` asserts ownership of a *fraction* `qa` (a real number greater than zero and not greater than one) of the *lifetime token* for lifetime `'a`. +Every non-`unsafe` function receives such a lifetime token fraction for each of its lifetime parameters. Since ending a lifetime consumes the full lifetime token, a fractional lifetime token proves that the lifetime is alive, without necessarily giving the owner permission to end the lifetime. + +Function `pop_front_node` takes a mutable reference to a place storing a `LinkedList` value, just like function `clear` does (see above). The precondition for function `clear` asserts ownership of the place the reference points to, as well ownership of the value stored by the place, as does its postcondition. That is, the function receives ownership of the place and the value from the caller, and returns it back to the caller when it returns. This, however, does not work for function `pop_front_node`: it must return ownership of a Box that holds a reference to the LinkedList's allocator. It cannot also *separately* return full ownership of the LinkedList value, which asserts full ownership of its allocator. +In fact, after a client calls this function, the Rust type system will prevent them from using their LinkedList value until the lifetime `'a` has ended. + +To reflect this in separation logic, `pop_front_node` *does not* receive ownership of the place or the LinkedList value. Instead, it receives only a *full borrow* at lifetime `'a` of these resources. `full_borrow(k, p)` asserts that the resources asserted by predicate value `p` have been borrowed under lifetime `k`, and asserts the exclusive permission to temporarily obtain ownership of these resources, provided that `k` has not yet ended. `.full_borrow_content(t, l)` is an expression built into VeriFast that denotes a predicate value that asserts resources `*l |-> ?v &*& .own(t, v)`, i.e. ownership of the place at `l` and the value stored at that place (accessible to thread `t`). + +The specification that VeriFast generates to express the soundness of a non-`unsafe` function that takes a mutable reference depends on whether the lifetime of the mutable reference also appears elsewhere in the function's type or not. If it does not, the specification is like that of function `clear` above; otherwise, it is like that of function `pop_front_node`. + +Now, let's look at the proof: + +```rust +fn pop_front_node<'a>(&'a mut self) -> Option, &'a A>> +//@ req thread_token(?t) &*& [?qa]lifetime_token('a) &*& full_borrow('a, >.full_borrow_content(t, self)); +//@ ens thread_token(t) &*& [qa]lifetime_token('a) &*& , &'a A>>>.own(t, result); +{ + // This method takes care not to create mutable references to whole nodes, + // to maintain validity of aliasing pointers into `element`. + //@ let klong = open_full_borrow_strong('a, >.full_borrow_content(t, self), qa); + //@ open LinkedList_full_borrow_content::(t, self)(); + //@ open >.own(t, *self); + //@ assert Nodes(?alloc_id, _, _, _, _, _); + let r; + { + /*@ + pred fbc1() = + (*self).head |-> ?head_ &*& + (*self).tail |-> ?tail_ &*& + Nodes(alloc_id, head_, None, tail_, None, ?nodes) &*& + (*self).len |-> length(nodes) &*& + (*self).marker |-> ?_ &*& + struct_LinkedList_padding(self) &*& + foreach(nodes, elem_fbc::(t)); + @*/ + //@ close fbc1(); + //@ std::alloc::close_Allocator_full_borrow_content_::(t, &(*self).alloc); + //@ close sep(fbc1, std::alloc::Allocator_full_borrow_content_::(t, &(*self).alloc, alloc_id))(); + /*@ + { + pred Ctx() = true; + produce_lem_ptr_chunk full_borrow_convert_strong(Ctx, sep(fbc1, std::alloc::Allocator_full_borrow_content_::(t, &(*self).alloc, alloc_id)), klong, >.full_borrow_content(t, self))() { + open Ctx(); + open sep(fbc1, std::alloc::Allocator_full_borrow_content_::(t, &(*self).alloc, alloc_id))(); + open fbc1(); + std::alloc::open_Allocator_full_borrow_content_::(t, &(*self).alloc, alloc_id); + close >.own(t, *self); + close >.full_borrow_content(t, self)(); + } { + close Ctx(); + close_full_borrow_strong(klong, >.full_borrow_content(t, self), sep(fbc1, std::alloc::Allocator_full_borrow_content_::(t, &(*self).alloc, alloc_id))); + full_borrow_mono(klong, 'a, sep(fbc1, std::alloc::Allocator_full_borrow_content_::(t, &(*self).alloc, alloc_id))); + full_borrow_split('a, fbc1, std::alloc::Allocator_full_borrow_content_::(t, &(*self).alloc, alloc_id)); + } + } + @*/ + //@ open_full_borrow(qa/2, 'a, fbc1); + //@ open fbc1(); + let head = self.head; + let head_ref = &mut self.head; + let tail_ref = &mut self.tail; + let len_ref = &mut self.len; + //@ std::alloc::share_Allocator_full_borrow_content_('a, t, &(*self).alloc, alloc_id); + //@ let alloc_ref1 = precreate_ref(&(*self).alloc); + //@ std::alloc::init_ref_Allocator_share('a, t, alloc_ref1); + //@ open_frac_borrow('a, ref_initialized_(alloc_ref1), qa/2); + //@ open [?f]ref_initialized_::(alloc_ref1)(); + let alloc_ref = &self.alloc; + + r = match head { + None => { + //@ close [f]ref_initialized_::(alloc_ref)(); + //@ close_frac_borrow(f, ref_initialized_(alloc_ref)); + + //@ close fbc1(); + //@ close_full_borrow(fbc1); + //@ close std::option::Option_own::, &'a A>>(t, Option::None); + //@ leak full_borrow(_, _); + None + } + Some(node) => unsafe { + //@ std::alloc::close_Allocator_ref::<'a, A>(t, alloc_ref1); + + //@ open Nodes(alloc_id, ?head1, None, ?tail1, None, ?nodes1); + //@ open foreach(nodes1, elem_fbc::(t)); + //@ open elem_fbc::(t)(node); + let node = Box::from_raw_in(node.as_ptr(), &*alloc_ref); + //@ close [f]ref_initialized_::(alloc_ref)(); + //@ close_frac_borrow(f, ref_initialized_(alloc_ref)); + //@ std::boxed::Box_separate_contents(&node_1); + *head_ref = node.next; + //@ std::boxed::Box_unseparate_contents(&node_1); + + //@ open Nodes(_, ?next, _, ?tail, _, _); + match *head_ref { + None => { + *tail_ref = None; + //@ close Nodes(alloc_id, next, None, *tail_ref, None, _); + } + // Not creating new mutable (unique!) references overlapping `element`. + Some(head) => { + (*head.as_ptr()).prev = None; + //@ close Nodes(alloc_id, next, None, (*self).tail, None, _); + } + } + + *len_ref -= 1; + //@ close fbc1(); + //@ close_full_borrow(fbc1); + //@ leak full_borrow(_, _); + //@ let b = node_1; + //@ assert std::boxed::Box_in(t, b, alloc_id, ?v_node); + //@ close >.own(t, v_node); + //@ std::boxed::Box_in_to_own::, &'a A>(node_1); + //@ close std::option::Option_own::, &'a A>>(t, Option::Some(node_1)); + Some(node) + } + }; + } + r +} +``` + +To create the box using [`Box::from_raw_in`](https://github.com/verifast/verifast/blob/37bb28bc5edb0e08e5ee8b63a9afd57e7c3b7ffd/bin/rust/std/lib.rsspec#L662), we need an +`Allocator` chunk for the shared reference to `self.alloc`. We obtain it using lemma[`std::alloc::close_Allocator_ref`](https://github.com/verifast/verifast/blob/37bb28bc5edb0e08e5ee8b63a9afd57e7c3b7ffd/bin/rust/std/lib.rsspec#L499). But it requires an `Allocator_share` chunk for the shared reference. That one, we +obtain using lemma [`std::alloc::init_ref_Allocator_share`](https://github.com/verifast/verifast/blob/37bb28bc5edb0e08e5ee8b63a9afd57e7c3b7ffd/bin/rust/std/lib.rsspec#L494) +from an `Allocator_share` chunk for `self.alloc` itself (the original place the shared reference was created from), which in turn we obtain using lemma +[`std::alloc::share_Allocator_full_borrow_content_`](https://github.com/verifast/verifast/blob/37bb28bc5edb0e08e5ee8b63a9afd57e7c3b7ffd/bin/rust/std/lib.rsspec#L481) from +a full borrow of predicate value `std::alloc::Allocator_full_borrow_content_::(t, &(*self).alloc, alloc_id)`. + +That full borrow, we obtain by transforming `full_borrow('a, >.full_borrow_content(t, self))` into +`full_borrow('a, sep(fbc1, std::alloc::Allocator_full_borrow_content_::(t, &(*self).alloc, alloc_id)))` and then splitting the resulting full borrow using lemma +[`full_borrow_split`](https://github.com/verifast/verifast/blob/37bb28bc5edb0e08e5ee8b63a9afd57e7c3b7ffd/bin/rust/rust_belt/lifetime_logic.rsspec#L213). + +To perform this transformation, we call lemma [`open_full_borrow_strong`](https://github.com/verifast/verifast/blob/37bb28bc5edb0e08e5ee8b63a9afd57e7c3b7ffd/bin/rust/rust_belt/lifetime_logic.rsspec#L144) and then lemma [`close_full_borrow_strong`](https://github.com/verifast/verifast/blob/37bb28bc5edb0e08e5ee8b63a9afd57e7c3b7ffd/bin/rust/rust_belt/lifetime_logic.rsspec#L153). The proof does not use the simpler lemma [`open_full_borrow`](https://github.com/verifast/verifast/blob/37bb28bc5edb0e08e5ee8b63a9afd57e7c3b7ffd/bin/rust/rust_belt/lifetime_logic.rsspec#L131), because after opening `full_borrow(k, p)`, the latter requires the original predicate value `p` to be re-established when closing the full borrow back up. Notice that both lemmas for opening a full borrow force the caller to close it back up before the caller returns by capturing a fraction of the lifetime token. + +### Predicate `>.share` + +Function `LinkedList::len` takes a shared reference to a LinkedList object. It should therefore receive *shared ownership* of the object. The precise meaning of "shared ownership of an object of type T at location `l` accessible to thread `t`", written as `.share(k, t, l)`, depends on the type. For LinkedList, the proof defines it as follows: + +``` +pred Nodes1(alloc_id: any, n: Option>>, prev: Option>>, last: Option>>, next: Option>>, nodes: list>>; prevs: list>>>, nexts: list>>>) = + match nodes { + nil => + n == next &*& last == prev &*& prevs == nil &*& nexts == nil, + cons(n_, nodes0) => + n == Option::Some(n_) &*& + alloc_block_in(alloc_id, NonNull_ptr(n_) as *u8, Layout::new_::>()) &*& struct_Node_padding(NonNull_ptr(n_)) &*& + (*NonNull_ptr(n_)).prev |-> prev &*& + (*NonNull_ptr(n_)).next |-> ?next0 &*& + pointer_within_limits(&(*NonNull_ptr(n_)).element) == true &*& + Nodes1(alloc_id, next0, n, last, next, nodes0, ?prevs0, ?nexts0) &*& + prevs == cons(prev, prevs0) &*& + nexts == cons(next0, nexts0) + }; + +pred_ctor LinkedList_frac_borrow_content(alloc_id: any, l: *LinkedList, head: Option>>, tail: Option>>, nodes: list>>, prevs: list>>>, nexts: list>>>)(;) = + (*l).head |-> head &*& (*l).tail |-> tail &*& Nodes1(alloc_id, head, None, tail, None, nodes, prevs, nexts) &*& + (*l).len |-> length(nodes) &*& struct_LinkedList_padding(l); + +inductive LinkedList_share_info = LinkedList_share_info(alloc_id: any, head: Option>>, tail: Option>>, nodes: list>>, prevs: list>>>, nexts: list>>>); + +pred_ctor elem_share(k: lifetime_t, t: thread_id_t)(node: NonNull>) = [_](.share(k, t, &(*NonNull_ptr(node)).element)); + +pred >.share(k, t, l) = + exists(LinkedList_share_info(?alloc_id, ?head, ?tail, ?nodes, ?prevs, ?nexts)) &*& + [_]frac_borrow(k, LinkedList_frac_borrow_content::(alloc_id, l, head, tail, nodes, prevs, nexts)) &*& + [_](.share(k, t, &(*l).alloc)) &*& + [_]foreach(nodes, elem_share::(k, t)); +``` + +`>.share` asserts a *dummy fraction* (an arbitrary fraction) of a *fractured borrow* at lifetime `k` of a predicate value `LinkedList_frac_borrow_content::(alloc_id, l, head, tail, nodes, prevs, nexts)` asserting ownership of the fields of `l` as well as the nodes between `(*l).head` and `(*l).tail`, as well as shared ownership of the allocator +and the elements. A fractured borrow differs in two ways from a full borrow: firstly, unlike a full borrow, fractional ownership of a fractured borrow suffices for opening it; secondly, opening a fractured borrow produces only a fraction of its contents. + +Predicate `Nodes1` is equivalent to predicate `Nodes`, except that `Nodes1`'s `nodes` parameter is an *input parameter*, whereas `Nodes`'s `nodes` parameter is an *output parameter*. This allows `Nodes1` to case-split on `nodes` instead of on whether `n == next`. In turn, this allows us to prove lemma `Nodes1_append`: + +``` +lem Nodes1_append(head: Option>>) + req [?f]Nodes1::(?alloc_id, head, ?prev, ?n1, ?n2, ?nodes1, ?prevs1, ?nexts1) &*& [f]Nodes1::(alloc_id, n2, n1, ?tail, ?next, ?nodes2, ?prevs2, ?nexts2); + ens [f]Nodes1::(alloc_id, head, prev, tail, next, append(nodes1, nodes2), append(prevs1, prevs2), append(nexts1, nexts2)); +``` + +A corresponding lemma for `Nodes` (operating on *fractions* of `Nodes` chunks) does *not* hold, because the two segments being appended might overlap. + +### `>.share` proof obligations + +If a proof defines `.share`, it must also define a lemma `T_share_full` that proves that `[_].share(k, t, l)` can be derived from `full_borrow(k, .full_borrow_content(t, l))`: + +``` +lem LinkedList_share_full(k: lifetime_t, t: thread_id_t, l: *LinkedList) + req type_interp::() &*& type_interp::() &*& atomic_mask(MaskTop) &*& full_borrow(k, LinkedList_full_borrow_content::(t, l)) &*& [?q]lifetime_token(k); + ens type_interp::() &*& type_interp::() &*& atomic_mask(MaskTop) &*& [_]LinkedList_share::(k, t, l) &*& [q]lifetime_token(k); +``` + +Furthermore, it must define a lemma `T_share_mono` that proves that from `[_].share(k, t, l)` one can derive `[_].share(k1, t, l)` where lifetime `k1` is included in `k`: + +``` +lem LinkedList_share_mono(k: lifetime_t, k1: lifetime_t, t: thread_id_t, l: *LinkedList) + req type_interp::() &*& type_interp::() &*& lifetime_inclusion(k1, k) == true &*& [_]LinkedList_share::(k, t, l); + ens type_interp::() &*& type_interp::() &*& [_]LinkedList_share::(k1, t, l); +``` + +Furthermore, it must prove a lemma `init_ref_T` that proves that if someone creates a shared reference to a T, they can initialize it: + +``` +lem init_ref_LinkedList(p: *LinkedList) + req type_interp::() &*& type_interp::() &*& atomic_mask(Nlft) &*& ref_init_perm(p, ?x) &*& [_]LinkedList_share::(?k, ?t, x) &*& [?q]lifetime_token(k); + ens type_interp::() &*& type_interp::() &*& atomic_mask(Nlft) &*& [q]lifetime_token(k) &*& [_]LinkedList_share::(k, t, p) &*& [_]frac_borrow(k, ref_initialized_(p)); +``` + +Finally, if T is Sync, it must prove a lemma `T_sync` that derives `[_].share(k, t1, l)` from `[_].share(k, t0, l)`, for any threads `t0` and `t1`. Since `LinkedList` is +sync whenever T and A are Sync, the proof defines the following lemma: + +``` +lem LinkedList_sync(t1: thread_id_t) + req type_interp::() &*& type_interp::() &*& is_Sync(typeid(T)) == true &*& is_Sync(typeid(A)) == true &*& [_]LinkedList_share::(?k, ?t0, ?l); + ens type_interp::() &*& type_interp::() &*& [_]LinkedList_share::(k, t1, l); +``` + +### Function `LinkedList::len` + +The proof of function `LinkedList::len` is as follows: + +```rust +pub fn len(&self) -> usize +//@ req [?f](*self).len |-> ?len; +//@ ens [f](*self).len |-> len &*& result == len; +//@ on_unwind_ens false; +/*@ +safety_proof { + assert [?q]lifetime_token(?k); + open >.share(k, _t, self); + assert [_]exists(LinkedList_share_info(?alloc_id, ?head, ?tail, ?nodes, ?prevs, ?nexts)); + open_frac_borrow(k, LinkedList_frac_borrow_content::(alloc_id, self, head, tail, nodes, prevs, nexts), q); + open [?f]LinkedList_frac_borrow_content::(alloc_id, self, head, tail, nodes, prevs, nexts)(); + call(); + close [f]LinkedList_frac_borrow_content::(alloc_id, self, head, tail, nodes, prevs, nexts)(); + close_frac_borrow(f, LinkedList_frac_borrow_content::(alloc_id, self, head, tail, nodes, prevs, nexts)); +} +@*/ +{ + self.len +} +``` + +Function `len` is not marked as `unsafe`, so VeriFast checks that its specification implies its soundness. However, in this case it cannot prove this automatically. Therefore, a `safety_proof` clause is provided that proves this property. VeriFast verifies the body of the `safety_proof` clause as if it was the body of the following lemma: +``` +lem len_safe(self: &LinkedList) -> usize + req thread_token(?_t) &*& [?_q]lifetime_token(?_k) &*& [_]>.share(_k, _t, self); + ens thread_token(_t) &*& [_q]lifetime_token(_k); +``` +The body of the `safety_proof` clause must call the `len` function exactly once using special syntax `call();`. It simply opens the fractured borrow before the `call()` and +closes it back up afterwards. + +## Caveats + +First of all, this proof was performed with the following VeriFast command-line flags: +- `-skip_specless_fns`: VeriFast ignores the functions that do not have a `req` or `ens` clause. +- `-ignore_unwind_paths`: This proof ignores code that is reachable only when unwinding. +- `-allow_assume`: This proof uses a number of `assume` ghost statements and `assume_correct` clauses. These must be carefully audited. + +Secondly, since VeriFast uses the `rustc` frontend, which assumes a particular target architecture, VeriFast's results hold only for the used Rust toolchain's target architecture. When VeriFast reports "0 errors found" for a Rust program, it always reports the targeted architecture as well (e.g. `0 errors found (2149 statements verified) (target: x86_64-unknown-linux-gnu (LP64))`). + +Thirdly, VeriFast has a number of [known unsoundnesses](https://github.com/verifast/verifast/issues?q=is%3Aissue+is%3Aopen+label%3Aunsoundness) (reasons why VeriFast might in some cases incorrectly accept a program), including the following: +- VeriFast does not yet fully verify compliance with Rust's [pointer aliasing rules](https://doc.rust-lang.org/reference/behavior-considered-undefined.html). +- VeriFast does not yet properly verify compliance of custom type interpretations with Rust's [variance](https://doc.rust-lang.org/reference/subtyping.html#variance) rules. +- The current standard library specifications do not [prevent an allocated memory block from outliving its allocator](https://github.com/verifast/verifast/issues/829). This is sound only if the global allocator is used. + +Fourthly, unlike foundational tools such as [RefinedRust](https://plv.mpi-sws.org/refinedrust/), VeriFast has not itself been verified, so there are undoubtedly also unknown unsoundnesses. diff --git a/verifast-proofs/alloc/collections/linked_list.rs/original/lib.rs b/verifast-proofs/alloc/collections/linked_list.rs/original/lib.rs index 4525d53f6de8e..49ca45e8ace76 100644 --- a/verifast-proofs/alloc/collections/linked_list.rs/original/lib.rs +++ b/verifast-proofs/alloc/collections/linked_list.rs/original/lib.rs @@ -1,4 +1,4 @@ -// verifast_options{skip_specless_fns} +// verifast_options{skip_specless_fns ignore_unwind_paths} #![no_std] #![allow(internal_features)] @@ -12,6 +12,7 @@ #![feature(exact_size_is_empty)] #![feature(hasher_prefixfree_extras)] #![feature(box_into_inner)] +#![feature(try_trait_v2)] #![stable(feature = "rust1", since = "1.0.0")] diff --git a/verifast-proofs/alloc/collections/linked_list.rs/verified/lib.rs b/verifast-proofs/alloc/collections/linked_list.rs/verified/lib.rs index 4525d53f6de8e..49ca45e8ace76 100644 --- a/verifast-proofs/alloc/collections/linked_list.rs/verified/lib.rs +++ b/verifast-proofs/alloc/collections/linked_list.rs/verified/lib.rs @@ -1,4 +1,4 @@ -// verifast_options{skip_specless_fns} +// verifast_options{skip_specless_fns ignore_unwind_paths} #![no_std] #![allow(internal_features)] @@ -12,6 +12,7 @@ #![feature(exact_size_is_empty)] #![feature(hasher_prefixfree_extras)] #![feature(box_into_inner)] +#![feature(try_trait_v2)] #![stable(feature = "rust1", since = "1.0.0")] diff --git a/verifast-proofs/alloc/collections/linked_list.rs/verified/linked_list.rs b/verifast-proofs/alloc/collections/linked_list.rs/verified/linked_list.rs index a6aa7d5ec4a00..c6c5e3c718710 100644 --- a/verifast-proofs/alloc/collections/linked_list.rs/verified/linked_list.rs +++ b/verifast-proofs/alloc/collections/linked_list.rs/verified/linked_list.rs @@ -26,6 +26,74 @@ use crate::boxed::Box; //@ use std::alloc::{alloc_block_in, Layout, Global, Allocator}; //@ use std::option::{Option, Option::None, Option::Some}; //@ use std::ptr::{NonNull, NonNull_ptr}; +//@ use std::boxed::Box_in; + +/*@ + +lem foreach_unappend(xs1: list, xs2: list, p: pred(a)) + req foreach(append(xs1, xs2), p); + ens foreach(xs1, p) &*& foreach(xs2, p); +{ + match xs1 { + nil => {} + cons(x, xs10) => { + open foreach(_, _); + foreach_unappend(xs10, xs2, p); + } + } + close foreach(xs1, p); +} + + +pred foreachp2(xs: list, p: pred(a; b); ys: list) = + match xs { + nil => ys == nil, + cons(x, xs0) => p(x, ?y) &*& foreachp2(xs0, p, ?ys0) &*& ys == cons(y, ys0) + }; + +lem_auto foreachp2_inv() + req foreachp2::(?xs, ?p, ?ys); + ens foreachp2::(xs, p, ys) &*& length(ys) == length(xs); +{ + open foreachp2(xs, p, ys); + match xs { + nil => {} + cons(x, xs0) => { + foreachp2_inv(); + } + } + close foreachp2(xs, p, ys); +} + +lem foreachp2_append(xs1: list, xs2: list, p: pred(a; b)) + req foreachp2(xs1, p, ?ys1) &*& foreachp2(xs2, p, ?ys2); + ens foreachp2(append(xs1, xs2), p, append(ys1, ys2)); +{ + open foreachp2(xs1, p, ys1); + match xs1 { + nil => {} + cons(x, xs10) => { + foreachp2_append(xs10, xs2, p); + close foreachp2(append(xs1, xs2), p, append(ys1, ys2)); + } + } +} + +lem foreachp2_unappend(xs1: list, xs2: list, p: pred(a; b)) + req foreachp2(append(xs1, xs2), p, ?ys); + ens foreachp2(xs1, p, take(length(xs1), ys)) &*& foreachp2(xs2, p, drop(length(xs1), ys)); +{ + match xs1 { + nil => {} + cons(x, xs10) => { + open foreachp2(_, _, _); + foreachp2_unappend(xs10, xs2, p); + } + } + close foreachp2(xs1, p, take(length(xs1), ys)); +} + +@*/ #[cfg(test)] mod tests; @@ -70,6 +138,34 @@ struct Node { /*@ +pred >.own(t, node) = .own(t, node.element); + +lem Node_drop() + req Node_own::(?_t, ?_v); + ens .own(_t, _v.element); +{ + open >.own(_t, _v); +} + +lem Node_own_mono() + req type_interp::() &*& type_interp::() &*& Node_own::(?t, ?v) &*& is_subtype_of::() == true; + ens type_interp::() &*& type_interp::() &*& Node_own::(t, Node:: { next: upcast(v.next), prev: upcast(v.prev), element: upcast(v.element) }); +{ + open >.own(t, v); + own_mono::(t, v.element); + Node_upcast::(v); + close >.own(t, upcast(v)); +} + +lem Node_send(t1: thread_id_t) + req type_interp::() &*& is_Send(typeid(T)) == true &*& Node_own::(?t0, ?v); + ens type_interp::() &*& Node_own::(t1, v); +{ + open >.own(t0, v); + Send::send::(t0, t1, v.element); + close >.own(t1, v); +} + pred Nodes(alloc_id: any, n: Option>>, prev: Option>>, last: Option>>, next: Option>>; nodes: list>>) = if n == next { nodes == [] &*& last == prev @@ -182,6 +278,486 @@ lem Nodes_append_(n: Option>>) } } +lem Nodes_append__(n: Option>>) + req Nodes::(?alloc_id, n, ?prev, ?n1, ?n2, ?nodes1) &*& Nodes::(alloc_id, n2, n1, ?n3, Option::Some(?n4), ?nodes2) &*& (*NonNull_ptr(n4)).prev |-> n3; + ens Nodes::(alloc_id, n, prev, n3, Some(n4), append(nodes1, nodes2)) &*& (*NonNull_ptr(n4)).prev |-> n3; +{ + open Nodes::(alloc_id, n, prev, n1, n2, nodes1); + if n == n2 { + } else { + assert n == Option::Some(?n_); + Nodes_append__((*NonNull_ptr(n_)).next); + close Nodes::(alloc_id, n, prev, n3, Some(n4), append(nodes1, nodes2)); + } +} + +pred_ctor elem_fbc(t: thread_id_t)(node: NonNull>) = (*NonNull_ptr(node)).element |-> ?elem &*& .own(t, elem); + +pred >.own(t, ll) = + Allocator(t, ll.alloc, ?alloc_id) &*& + Nodes(alloc_id, ll.head, None, ll.tail, None, ?nodes) &*& + ll.len == length(nodes) &*& + foreach(nodes, elem_fbc::(t)); + +lem Nodes_upcast() + req Nodes::(?alloc_id, ?head, ?prev, ?tail, ?next, ?nodes) &*& is_subtype_of::() == true; + ens Nodes::(alloc_id, upcast(head), upcast(prev), upcast(tail), upcast(next), upcast(nodes)); +{ + assume(false); +} + +lem LinkedList_own_mono() + req type_interp::() &*& type_interp::() &*& type_interp::() &*& type_interp::() &*& LinkedList_own::(?t, ?v) &*& is_subtype_of::() == true &*& is_subtype_of::() == true; + ens type_interp::() &*& type_interp::() &*& type_interp::() &*& type_interp::() &*& LinkedList_own::(t, LinkedList:: { head: upcast(v.head), tail: upcast(v.tail), len: upcast(v.len), alloc: upcast(v.alloc), marker: upcast(v.marker) }); +{ + open >.own(t, v); + LinkedList_upcast::(v); + assert Allocator(t, ?alloc, _); + std::alloc::Allocator_upcast::(alloc); + + assume(false); + + //close >.own(t, upcast(v)); +} + +lem LinkedList_elems_send(t0: thread_id_t, t1: thread_id_t) + req foreach(?nodes, elem_fbc::(t0)) &*& type_interp::() &*& is_Send(typeid(T)) == true; + ens foreach(nodes, elem_fbc::(t1)) &*& type_interp::(); +{ + open foreach(nodes, elem_fbc::(t0)); + match nodes { + nil => {} + cons(n, nodes0) => { + open elem_fbc::(t0)(n); + Send::send(t0, t1, (*NonNull_ptr(n)).element); + close elem_fbc::(t1)(n); + LinkedList_elems_send::(t0, t1); + } + } + close foreach(nodes, elem_fbc::(t1)); +} + +lem LinkedList_send(t1: thread_id_t) + req type_interp::() &*& type_interp::() &*& is_Send(typeid(T)) == true &*& is_Send(typeid(A)) == true &*& LinkedList_own::(?t0, ?v); + ens type_interp::() &*& type_interp::() &*& LinkedList_own::(t1, v); +{ + open >.own(t0, v); + assert Allocator(t0, ?alloc, _); + std::alloc::Allocator_send(t1, alloc); + LinkedList_elems_send::(t0, t1); + close >.own(t1, v); +} + +pred Nodes1(alloc_id: any, n: Option>>, prev: Option>>, last: Option>>, next: Option>>, nodes: list>>; prevs: list>>>, nexts: list>>>) = + match nodes { + nil => + n == next &*& last == prev &*& prevs == nil &*& nexts == nil, + cons(n_, nodes0) => + n == Option::Some(n_) &*& + alloc_block_in(alloc_id, NonNull_ptr(n_) as *u8, Layout::new_::>()) &*& struct_Node_padding(NonNull_ptr(n_)) &*& + (*NonNull_ptr(n_)).prev |-> prev &*& + (*NonNull_ptr(n_)).next |-> ?next0 &*& + pointer_within_limits(&(*NonNull_ptr(n_)).element) == true &*& + Nodes1(alloc_id, next0, n, last, next, nodes0, ?prevs0, ?nexts0) &*& + prevs == cons(prev, prevs0) &*& + nexts == cons(next0, nexts0) + }; + +lem_auto Nodes1_inv() + req [?f]Nodes1::(?alloc_id, ?head, ?prev, ?last, ?next, ?nodes, ?prevs, ?nexts); + ens [f]Nodes1::(alloc_id, head, prev, last, next, nodes, prevs, nexts) &*& length(prevs) == length(nodes) &*& length(nexts) == length(nodes); +{ + open [f]Nodes1::(alloc_id, head, prev, last, next, nodes, prevs, nexts); + match nodes { + nil => {} + cons(n, nodes0) => { + Nodes1_inv::(); + } + } + close [f]Nodes1::(alloc_id, head, prev, last, next, nodes, prevs, nexts); +} + +lem Nodes1_append(head: Option>>) + req [?f]Nodes1::(?alloc_id, head, ?prev, ?n1, ?n2, ?nodes1, ?prevs1, ?nexts1) &*& [f]Nodes1::(alloc_id, n2, n1, ?tail, ?next, ?nodes2, ?prevs2, ?nexts2); + ens [f]Nodes1::(alloc_id, head, prev, tail, next, append(nodes1, nodes2), append(prevs1, prevs2), append(nexts1, nexts2)); +{ + open Nodes1::(alloc_id, head, prev, n1, n2, nodes1, prevs1, nexts1); + match nodes1 { + nil => {} + cons(n, nodes10) => { + Nodes1_append::((*NonNull_ptr(n)).next); + close [f]Nodes1::(alloc_id, head, prev, tail, next, append(nodes1, nodes2), append(prevs1, prevs2), append(nexts1, nexts2)); + } + } +} + +lem Nodes1_split( + nodes1: list>>, + nodes2: list>>, + prevs1: list>>>, + prevs2: list>>>, + nexts1: list>>>, + nexts2: list>>>) + req [?f]Nodes1::(?alloc_id, ?head, ?prev, ?tail, ?next, append(nodes1, nodes2), append(prevs1, prevs2), append(nexts1, nexts2)) &*& length(prevs1) == length(nodes1) &*& length(nexts1) == length(nodes1); + ens [f]Nodes1::(alloc_id, head, prev, ?n2, ?n1, nodes1, prevs1, nexts1) &*& [f]Nodes1::(alloc_id, n1, n2, tail, next, nodes2, prevs2, nexts2); +{ + match nodes1 { + nil => { + match prevs1 { nil => {} cons(h, t) => {} } + match nexts1 { nil => {} cons(h, t) => {} } + open [f]Nodes1::(alloc_id, head, prev, tail, next, append(nodes1, nodes2), append(prevs1, prevs2), append(nexts1, nexts2)); + close [f]Nodes1::(alloc_id, head, prev, tail, next, nodes2, prevs2, nexts2); + close [f]Nodes1::(alloc_id, head, prev, prev, head, nil, nil, nil); + } + cons(n, nodes10) => { + match prevs1 { nil => {} cons(h, t) => {} } + match nexts1 { nil => {} cons(h, t) => {} } + open [f]Nodes1::(alloc_id, head, prev, tail, next, append(nodes1, nodes2), append(prevs1, prevs2), append(nexts1, nexts2)); + Nodes1_split(nodes10, nodes2, tail(prevs1), prevs2, tail(nexts1), nexts2); + assert [f]Nodes1::(alloc_id, _, _, ?n2, ?n1, nodes10, tail(prevs1), tail(nexts1)); + close [f]Nodes1::(alloc_id, head, prev, n2, n1, nodes1, prevs1, nexts1); + } + } +} + +lem Nodes_to_Nodes1() + req Nodes::(?alloc_id, ?n, ?prev, ?last, ?next, ?nodes); + ens Nodes1::(alloc_id, n, prev, last, next, nodes, _, _); +{ + open Nodes::(alloc_id, n, prev, last, next, nodes); + if n == next { + } else { + Nodes_to_Nodes1::(); + } + close Nodes1::(alloc_id, n, prev, last, next, nodes, _, _); +} + +lem Nodes1_to_Nodes() + req Nodes1::(?alloc_id, ?n, ?prev, ?last, None, ?nodes, _, _); + ens Nodes::(alloc_id, n, prev, last, None, nodes); +{ + open Nodes1::(alloc_id, n, prev, last, None, nodes, _, _); + match nodes { + nil => {} + cons(n_, nodes0) => { + Nodes1_to_Nodes::(); + } + } + close Nodes::(alloc_id, n, prev, last, None, nodes); +} + +pred_ctor LinkedList_frac_borrow_content(alloc_id: any, l: *LinkedList, head: Option>>, tail: Option>>, nodes: list>>, prevs: list>>>, nexts: list>>>)(;) = + (*l).head |-> head &*& (*l).tail |-> tail &*& Nodes1(alloc_id, head, None, tail, None, nodes, prevs, nexts) &*& + (*l).len |-> length(nodes) &*& struct_LinkedList_padding(l); + +inductive LinkedList_share_info = LinkedList_share_info(alloc_id: any, head: Option>>, tail: Option>>, nodes: list>>, prevs: list>>>, nexts: list>>>); + +pred_ctor elem_share(k: lifetime_t, t: thread_id_t)(node: NonNull>) = [_](.share(k, t, &(*NonNull_ptr(node)).element)); + +pred >.share(k, t, l) = + exists(LinkedList_share_info(?alloc_id, ?head, ?tail, ?nodes, ?prevs, ?nexts)) &*& + [_]frac_borrow(k, LinkedList_frac_borrow_content::(alloc_id, l, head, tail, nodes, prevs, nexts)) &*& + [_](.share(k, t, &(*l).alloc)) &*& + [_]foreach(nodes, elem_share::(k, t)); + +fix elem_fbcs(t: thread_id_t, nodes: list>>) -> pred() { + match nodes { + nil => True, + cons(n, nodes0) => sep(.full_borrow_content(t, &(*NonNull_ptr(n)).element), elem_fbcs(t, nodes0)) + } +} + +lem LinkedList_share_full(k: lifetime_t, t: thread_id_t, l: *LinkedList) + req type_interp::() &*& type_interp::() &*& atomic_mask(MaskTop) &*& full_borrow(k, LinkedList_full_borrow_content::(t, l)) &*& [?q]lifetime_token(k); + ens type_interp::() &*& type_interp::() &*& atomic_mask(MaskTop) &*& [_]LinkedList_share::(k, t, l) &*& [q]lifetime_token(k); +{ + let klong = open_full_borrow_strong_m(k, LinkedList_full_borrow_content::(t, l), q); + open LinkedList_full_borrow_content::(t, l)(); + open >.own(t, ?self_); + assert Nodes(?alloc_id, ?head, None, ?tail, None, ?nodes); + Nodes_to_Nodes1::(); + assert Nodes1(alloc_id, head, None, tail, None, nodes, ?prevs, ?nexts); + close LinkedList_frac_borrow_content::(alloc_id, l, head, tail, nodes, prevs, nexts)(); + { + lem iter(nodes_left: list>>) + req foreach(nodes_left, elem_fbc::(t)); + ens elem_fbcs::(t, nodes_left)(); + { + open foreach(nodes_left, elem_fbc::(t)); + match nodes_left { + nil => { + close True(); + } + cons(n, nodes_left0) => { + iter(nodes_left0); + open elem_fbc::(t)(n); + close_full_borrow_content::(t, &(*NonNull_ptr(n)).element); + close sep(.full_borrow_content(t, &(*NonNull_ptr(n)).element), elem_fbcs(t, nodes_left0))(); + assert sep(.full_borrow_content(t, &(*NonNull_ptr(n)).element), elem_fbcs(t, nodes_left0)) == elem_fbcs::(t, nodes_left); + } + } + } + iter(nodes); + } + close sep(LinkedList_frac_borrow_content::(alloc_id, l, head, tail, nodes, prevs, nexts), elem_fbcs::(t, nodes))(); + std::alloc::close_Allocator_full_borrow_content_::(t, &(*l).alloc); + { + pred Ctx() = (*l).marker |-> ?_; + close sep(std::alloc::Allocator_full_borrow_content_::(t, &(*l).alloc, alloc_id), sep(LinkedList_frac_borrow_content::(alloc_id, l, head, tail, nodes, prevs, nexts), elem_fbcs::(t, nodes)))(); + produce_lem_ptr_chunk full_borrow_convert_strong(Ctx, sep(std::alloc::Allocator_full_borrow_content_::(t, &(*l).alloc, alloc_id), sep(LinkedList_frac_borrow_content::(alloc_id, l, head, tail, nodes, prevs, nexts), elem_fbcs::(t, nodes))), klong, LinkedList_full_borrow_content::(t, l))() { + open Ctx(); + open sep(std::alloc::Allocator_full_borrow_content_::(t, &(*l).alloc, alloc_id), sep(LinkedList_frac_borrow_content::(alloc_id, l, head, tail, nodes, prevs, nexts), elem_fbcs::(t, nodes)))(); + open sep(LinkedList_frac_borrow_content::(alloc_id, l, head, tail, nodes, prevs, nexts), elem_fbcs::(t, nodes))(); + open LinkedList_frac_borrow_content::(alloc_id, l, head, tail, nodes, prevs, nexts)(); + { + lem iter(nodes_left: list>>) + req elem_fbcs::(t, nodes_left)(); + ens foreach(nodes_left, elem_fbc::(t)); + { + match nodes_left { + nil => { open True(); } + cons(n, nodes_left0) => { + open sep(.full_borrow_content(t, &(*NonNull_ptr(n)).element), elem_fbcs::(t, nodes_left0))(); + iter(nodes_left0); + open_full_borrow_content::(t, &(*NonNull_ptr(n)).element); + close elem_fbc::(t)(n); + } + } + close foreach(nodes_left, elem_fbc::(t)); + } + iter(nodes); + } + Nodes1_to_Nodes::(); + std::alloc::open_Allocator_full_borrow_content_::(t, &(*l).alloc, alloc_id); + + close >.own(t, *l); + close LinkedList_full_borrow_content::(t, l)(); + } { + close Ctx(); + close_full_borrow_strong_m(klong, LinkedList_full_borrow_content::(t, l), sep(std::alloc::Allocator_full_borrow_content_::(t, &(*l).alloc, alloc_id), sep(LinkedList_frac_borrow_content::(alloc_id, l, head, tail, nodes, prevs, nexts), elem_fbcs::(t, nodes)))); + } + } + full_borrow_mono(klong, k, sep(std::alloc::Allocator_full_borrow_content_::(t, &(*l).alloc, alloc_id), sep(LinkedList_frac_borrow_content::(alloc_id, l, head, tail, nodes, prevs, nexts), elem_fbcs::(t, nodes)))); + full_borrow_split_m(k, std::alloc::Allocator_full_borrow_content_::(t, &(*l).alloc, alloc_id), sep(LinkedList_frac_borrow_content::(alloc_id, l, head, tail, nodes, prevs, nexts), elem_fbcs::(t, nodes))); + full_borrow_split_m(k, LinkedList_frac_borrow_content::(alloc_id, l, head, tail, nodes, prevs, nexts), elem_fbcs::(t, nodes)); + full_borrow_into_frac_m(k, LinkedList_frac_borrow_content::(alloc_id, l, head, tail, nodes, prevs, nexts)); + { + lem iter(nodes_left: list>>) + req type_interp::() &*& atomic_mask(MaskTop) &*& [q]lifetime_token(k) &*& full_borrow(k, elem_fbcs::(t, nodes_left)); + ens type_interp::() &*& atomic_mask(MaskTop) &*& [q]lifetime_token(k) &*& foreach(nodes_left, elem_share::(k, t)); + { + match nodes_left { + nil => { + leak full_borrow(_, _); + close foreach(nodes_left, elem_share::(k, t)); + } + cons(n, nodes_left0) => { + full_borrow_split_m(k, .full_borrow_content(t, &(*NonNull_ptr(n)).element), elem_fbcs::(t, nodes_left0)); + share_full_borrow_m::(k, t, &(*NonNull_ptr(n)).element); + iter(nodes_left0); + close elem_share::(k, t)(n); + close foreach(nodes_left, elem_share::(k, t)); + } + } + } + iter(nodes); + } + close exists(LinkedList_share_info(alloc_id, head, tail, nodes, prevs, nexts)); + std::alloc::share_Allocator_full_borrow_content_m::(k, t, &(*l).alloc, alloc_id); + std::alloc::close_Allocator_share(k, t, &(*l).alloc); + leak foreach(nodes, _); + close >.share()(k, t, l); + leak >.share(k, t, l); +} + +lem LinkedList_share_mono(k: lifetime_t, k1: lifetime_t, t: thread_id_t, l: *LinkedList) + req type_interp::() &*& type_interp::() &*& lifetime_inclusion(k1, k) == true &*& [_]LinkedList_share::(k, t, l); + ens type_interp::() &*& type_interp::() &*& [_]LinkedList_share::(k1, t, l); +{ + open >.share(k, t, l); + assert [_]exists(LinkedList_share_info(?alloc_id, ?head, ?tail, ?nodes, ?prevs, ?nexts)); + close exists(LinkedList_share_info(alloc_id, head, tail, nodes, prevs, nexts)); + frac_borrow_mono(k, k1, LinkedList_frac_borrow_content::(alloc_id, l, head, tail, nodes, prevs, nexts)); + { + lem iter() + req [_]foreach(?nodes1, elem_share::(k, t)) &*& type_interp::(); + ens foreach(nodes1, elem_share::(k1, t)) &*& type_interp::(); + { + open foreach(nodes1, elem_share::(k, t)); + match nodes1 { + nil => { + open foreach(nil, _); + } + cons(n, nodes0) => { + open elem_share::(k, t)(n); + share_mono::(k, k1, t, &(*NonNull_ptr(n)).element); + close elem_share::(k1, t)(n); + iter(); + } + } + close foreach(nodes1, elem_share::(k1, t)); + } + iter(); + } + share_mono::(k, k1, t, &(*l).alloc); + leak foreach(nodes, _); + close >.share(k1, t, l); + leak >.share(k1, t, l); +} + +pred ref_end_token_Option_NonNull(p: *Option>, x: *Option>, f: real, v: Option>) = + [f/2](*x |-> v) &*& ref_end_token(p, x, f/2) &*& (if v != Option::None { ref_end_token(std::option::Option_Some_0_ptr(p), std::option::Option_Some_0_ptr(x), f/2) } else { true }); + +lem init_ref_Option_NonNull(p: *Option>) + req ref_init_perm(p, ?x) &*& [?f](*x |-> ?v); + ens [f/2](*p |-> v) &*& ref_initialized(p) &*& ref_end_token_Option_NonNull(p, x, f, v); +{ + match v { + Option::None => { + std::option::init_ref_Option_None(p, 1/2); + } + Option::Some(v0) => { + std::option::open_points_to_Option_Some(x); + std::option::open_ref_init_perm_Option_Some(p); + std::ptr::init_ref_NonNull(std::option::Option_Some_0_ptr(p), 1/2); + std::option::init_ref_Option_Some(p, 1/2); + std::option::close_points_to_Option_Some(p); + std::option::close_points_to_Option_Some(x); + } + } + close ref_end_token_Option_NonNull(p, x, f, v); +} + +lem end_ref_Option_NonNull(p: *Option>) + req ref_initialized(p) &*& ref_end_token_Option_NonNull(p, ?x, ?f, ?v) &*& [f/2](*p |-> v); + ens [f](*x |-> v); +{ + open ref_end_token_Option_NonNull(p, x, f, v); + match v { + Option::None => { + std::option::end_ref_Option_None(p); + } + Option::Some(v0) => { + std::option::open_points_to_Option_Some(p); + std::option::end_ref_Option_Some(p); + std::ptr::end_ref_NonNull(std::option::Option_Some_0_ptr(p)); + std::option::close_points_to_Option_Some(x); + } + } +} + +lem init_ref_LinkedList(p: *LinkedList) + req type_interp::() &*& type_interp::() &*& atomic_mask(Nlft) &*& ref_init_perm(p, ?x) &*& [_]LinkedList_share::(?k, ?t, x) &*& [?q]lifetime_token(k); + ens type_interp::() &*& type_interp::() &*& atomic_mask(Nlft) &*& [q]lifetime_token(k) &*& [_]LinkedList_share::(k, t, p) &*& [_]frac_borrow(k, ref_initialized_(p)); +{ + open >.share(k, t, x); + assert [_]exists(LinkedList_share_info(?alloc_id, ?head, ?tail, ?nodes, ?prevs, ?nexts)); + close exists(LinkedList_share_info(alloc_id, head, tail, nodes, prevs, nexts)); + open_ref_init_perm_LinkedList(p); + leak ref_init_perm(&(*p).marker, _); + + { + pred R(;) = ref_initialized(&(*p).head) &*& ref_initialized(&(*p).tail) &*& ref_initialized(&(*p).len) &*& ref_padding_initialized(p); + { + let klong = open_frac_borrow_strong_m(k, LinkedList_frac_borrow_content::(alloc_id, x, head, tail, nodes, prevs, nexts), q); + open [?f]LinkedList_frac_borrow_content::(alloc_id, x, head, tail, nodes, prevs, nexts)(); + init_ref_Option_NonNull(&(*p).head); + init_ref_Option_NonNull(&(*p).tail); + init_ref_padding_LinkedList(p, 1/2); + std::num::init_ref_usize(&(*p).len, 1/2); + close [f/2]LinkedList_frac_borrow_content::(alloc_id, p, head, tail, nodes, prevs, nexts)(); + close scaledp(f/2, LinkedList_frac_borrow_content::(alloc_id, p, head, tail, nodes, prevs, nexts))(); + close R(); + { + pred Ctx() = + ref_end_token_Option_NonNull(&(*p).head, &(*x).head, f, head) &*& + ref_end_token_Option_NonNull(&(*p).tail, &(*x).tail, f, tail) &*& + [f/2]Nodes1(alloc_id, head, None, tail, None, nodes, prevs, nexts) &*& + [f/2](*x).len |-> length(nodes) &*& ref_end_token(&(*p).len, &(*x).len, f/2) &*& + [f/2]struct_LinkedList_padding(x) &*& ref_padding_end_token(p, x, f/2); + close Ctx(); + close sep(scaledp(f/2, LinkedList_frac_borrow_content::(alloc_id, p, head, tail, nodes, prevs, nexts)), R)(); + produce_lem_ptr_chunk frac_borrow_convert_strong(Ctx, sep(scaledp(f/2, LinkedList_frac_borrow_content::(alloc_id, p, head, tail, nodes, prevs, nexts)), R), klong, f, LinkedList_frac_borrow_content::(alloc_id, x, head, tail, nodes, prevs, nexts))() { + open Ctx(); + open sep(scaledp(f/2, LinkedList_frac_borrow_content::(alloc_id, p, head, tail, nodes, prevs, nexts)), R)(); + open scaledp(f/2, LinkedList_frac_borrow_content::(alloc_id, p, head, tail, nodes, prevs, nexts))(); + open [f/2]LinkedList_frac_borrow_content::(alloc_id, p, head, tail, nodes, prevs, nexts)(); + open R(); + end_ref_Option_NonNull(&(*p).head); + end_ref_Option_NonNull(&(*p).tail); + std::num::end_ref_usize(&(*p).len); + end_ref_padding_LinkedList(p); + close [f]LinkedList_frac_borrow_content::(alloc_id, x, head, tail, nodes, prevs, nexts)(); + } { + close_frac_borrow_strong_m(); + full_borrow_mono(klong, k, sep(scaledp(f/2, LinkedList_frac_borrow_content::(alloc_id, p, head, tail, nodes, prevs, nexts)), R)); + full_borrow_split_m(k, scaledp(f/2, LinkedList_frac_borrow_content::(alloc_id, p, head, tail, nodes, prevs, nexts)), R); + full_borrow_into_frac_m(k, scaledp(f/2, LinkedList_frac_borrow_content::(alloc_id, p, head, tail, nodes, prevs, nexts))); + frac_borrow_implies_scaled(k, f/2, LinkedList_frac_borrow_content::(alloc_id, p, head, tail, nodes, prevs, nexts)); + full_borrow_into_frac_m(k, R); + } + } + } + + init_ref_share_m::(k, t, &(*p).alloc); + close >.share(k, t, p); + leak >.share(k, t, p); + + frac_borrow_sep(k, ref_initialized_(&(*p).alloc), R); + produce_lem_ptr_chunk implies_frac(sep_(ref_initialized_(&(*p).alloc), R), ref_initialized_(p))() { + open [?f]sep_(ref_initialized_(&(*p).alloc), R)(); + open [f]ref_initialized_::(&(*p).alloc)(); + open [f]R(); + std::marker::close_ref_initialized_PhantomData(&(*p).marker, f); + close_ref_initialized_LinkedList(p); + close [f]ref_initialized_::>(p)(); + } { + produce_lem_ptr_chunk implies_frac(ref_initialized_(p), sep_(ref_initialized_(&(*p).alloc), R))() { + open [?f]ref_initialized_::>(p)(); + open_ref_initialized_LinkedList(p); + close [f]ref_initialized_::(&(*p).alloc)(); + close [f]R(); + close [f]sep_(ref_initialized_(&(*p).alloc), R)(); + leak [f]ref_initialized(&(*p).marker); + } { + frac_borrow_implies(k, sep_(ref_initialized_(&(*p).alloc), R), ref_initialized_(p)); + } + } + } +} + +lem LinkedList_sync(t1: thread_id_t) + req type_interp::() &*& type_interp::() &*& is_Sync(typeid(T)) == true &*& is_Sync(typeid(A)) == true &*& [_]LinkedList_share::(?k, ?t0, ?l); + ens type_interp::() &*& type_interp::() &*& [_]LinkedList_share::(k, t1, l); +{ + open >.share(k, t0, l); + assert [_]exists(LinkedList_share_info(?alloc_id, ?head, ?tail, ?nodes_, ?prevs, ?nexts)); + close exists(LinkedList_share_info(alloc_id, head, tail, nodes_, prevs, nexts)); + { + lem iter() + req [_]foreach(?nodes, elem_share::(k, t0)) &*& type_interp::(); + ens foreach(nodes, elem_share::(k, t1)) &*& type_interp::(); + { + open foreach(nodes, elem_share::(k, t0)); + match nodes { + nil => {} + cons(n, nodes0) => { + open elem_share::(k, t0)(n); + Sync::sync::(k, t0, t1, &(*NonNull_ptr(n)).element); + close elem_share::(k, t1)(n); + iter(); + } + } + close foreach(nodes, elem_share::(k, t1)); + } + iter(); + } + Sync::sync::(k, t0, t1, &(*l).alloc); + leak foreach(_, _); + close >.share(k, t1, l); + leak >.share(k, t1, l); +} + @*/ /// An iterator over the elements of a `LinkedList`. @@ -197,6 +773,90 @@ pub struct Iter<'a, T: 'a> { marker: PhantomData<&'a Node>, } +/*@ + +inductive Iter_info = Iter_info( + alloc_id: any, + head0: Option>>, + prev: Option>>, + next: Option>>, + tail0: Option>>, + nodes_before: list>>, + nodes: list>>, + nodes_after: list>>, + prevs_before: list>>>, + prevs: list>>>, + prevs_after: list>>>, + nexts_before: list>>>, + nexts: list>>>, + nexts_after: list>>>); + +pred_ctor Iter_frac_borrow_content( + alloc_id: any, + head0: Option>>, + head: Option>>, + prev: Option>>, + tail: Option>>, + next: Option>>, + tail0: Option>>, + nodes_before: list>>, + nodes: list>>, + nodes_after: list>>, + prevs_before: list>>>, + prevs: list>>>, + prevs_after: list>>>, + nexts_before: list>>>, + nexts: list>>>, + nexts_after: list>>> + )(;) = + Nodes1(alloc_id, head0, None, prev, head, nodes_before, prevs_before, nexts_before) &*& + Nodes1(alloc_id, head, prev, tail, next, nodes, prevs, nexts) &*& + Nodes1(alloc_id, next, tail, tail0, None, nodes_after, prevs_after, nexts_after); + +pred<'a, T> >.own(t, iter) = + exists(Iter_info(?alloc_id, ?head0, ?prev, ?next, ?tail0, ?nodes_before, ?nodes, ?nodes_after, ?prevs_before, ?prevs, ?prevs_after, ?nexts_before, ?nexts, ?nexts_after)) &*& + [_]frac_borrow('a, Iter_frac_borrow_content::(alloc_id, head0, iter.head, prev, iter.tail, next, tail0, nodes_before, nodes, nodes_after, prevs_before, prevs, prevs_after, nexts_before, nexts, nexts_after)) &*& + iter.len == length(nodes) &*& + [_]foreach(nodes, elem_share::('a, t)); + +lem Iter_own_mono<'a0, 'a1, T0, T1>() + req type_interp::() &*& type_interp::() &*& Iter_own::<'a0, T0>(?t, ?v) &*& lifetime_inclusion('a1, 'a0) == true &*& is_subtype_of::() == true; + ens type_interp::() &*& type_interp::() &*& Iter_own::<'a1, T1>(t, Iter::<'a1, T1> { head: upcast(v.head), tail: upcast(v.tail), len: upcast(v.len), marker: upcast(v.marker) }); +{ + assume(false); +} + +lem Iter_send<'a, T>(t1: thread_id_t) + req type_interp::() &*& Iter_own::<'a, T>(?t0, ?v) &*& is_Sync(typeid(T)) == true; + ens type_interp::() &*& Iter_own::<'a, T>(t1, v); +{ + open >.own(t0, v); + let k = 'a; + { + lem iter() + req [_]foreach(?nodes, elem_share::(k, t0)) &*& type_interp::(); + ens foreach(nodes, elem_share::(k, t1)) &*& type_interp::(); + { + open foreach(nodes, elem_share::(k, t0)); + match nodes { + nil => {} + cons(n, nodes0) => { + open elem_share::(k, t0)(n); + Sync::sync::(k, t0, t1, &(*NonNull_ptr(n)).element); + close elem_share::(k, t1)(n); + iter(); + } + } + close foreach(nodes, elem_share::(k, t1)); + } + iter(); + } + leak foreach(_, _); + close >.own(t1, v); +} + +@*/ + #[stable(feature = "collection_debug", since = "1.17.0")] impl fmt::Debug for Iter<'_, T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { @@ -273,12 +933,27 @@ impl fmt::Debug for IntoIter { } impl Node { - fn new(element: T) -> Self { + fn new(element: T) -> Self + //@ req thread_token(?t); + //@ ens thread_token(t) &*& result == Node:: { next: None, prev: None, element }; + //@ safety_proof { let r = call(); close Node_own::(_, r); } + { Node { next: None, prev: None, element } } - fn into_element(self: Box) -> T { - self.element + fn into_element(self: Box) -> T + //@ req thread_token(?t) &*& Box_in::, A>(t, self, ?alloc_id, ?node); + //@ ens thread_token(t) &*& result == node.element; + //@ on_unwind_ens thread_token(t); + /*@ + safety_proof { + std::boxed::own_to_Box_in(self); + call(); + open Node_own::(_, _); + } + @*/ + { + Box::into_inner(self).element } } @@ -290,43 +965,157 @@ impl LinkedList { /// `node` must point to a valid node that was boxed and leaked using the list's allocator. /// This method takes ownership of the node, so the pointer should not be used again. #[inline] - unsafe fn push_front_node(&mut self, node: NonNull>) { + unsafe fn push_front_node(&mut self, node: NonNull>) + /*@ + req thread_token(?t) &*& *self |-> ?self0 &*& Allocator(t, self0.alloc, ?alloc_id) &*& Nodes(alloc_id, self0.head, None, self0.tail, None, ?nodes) &*& + length(nodes) == self0.len &*& foreach(nodes, elem_fbc::(t)) &*& + *NonNull_ptr(node) |-> ?n &*& .own(t, n.element) &*& alloc_block_in(alloc_id, NonNull_ptr(node) as *u8, Layout::new_::>()); + @*/ + //@ ens thread_token(t) &*& *self |-> ?self1 &*& >.own(t, self1); + { // This method takes care not to create mutable references to whole nodes, // to maintain validity of aliasing pointers into `element`. unsafe { + //@ open_points_to(self); (*node.as_ptr()).next = self.head; (*node.as_ptr()).prev = None; let node = Some(node); + //@ open Nodes(_, _, _, _, _, _); match self.head { - None => self.tail = node, + None => { + //@ close Nodes::(alloc_id, None, None, None, None, nil); + self.tail = node + } // Not creating new mutable (unique!) references overlapping `element`. - Some(head) => (*head.as_ptr()).prev = node, + Some(head) => { + (*head.as_ptr()).prev = node; + //@ close Nodes(alloc_id, self0.head, node_1, self0.tail, None, nodes); + } } self.head = node; + //@ assume(self0.len < usize::MAX); self.len += 1; + //@ close_points_to(self); + //@ assert *self |-> ?self1; + //@ points_to_limits(&(*NonNull_ptr(node)).element); + //@ close Nodes(alloc_id, node_1, None, self1.tail, None, cons(node, nodes)); + //@ close elem_fbc::(t)(node); + //@ close foreach(cons(node, nodes), elem_fbc::(t)); + //@ close >.own(t, self1); } } /// Removes and returns the node at the front of the list. #[inline] - fn pop_front_node(&mut self) -> Option, &A>> { + fn pop_front_node<'a>(&'a mut self) -> Option, &'a A>> + //@ req thread_token(?t) &*& [?qa]lifetime_token('a) &*& full_borrow('a, >.full_borrow_content(t, self)); + //@ ens thread_token(t) &*& [qa]lifetime_token('a) &*& , &'a A>>>.own(t, result); + { // This method takes care not to create mutable references to whole nodes, // to maintain validity of aliasing pointers into `element`. - self.head.map(|node| unsafe { - let node = Box::from_raw_in(node.as_ptr(), &self.alloc); - self.head = node.next; - - match self.head { - None => self.tail = None, - // Not creating new mutable (unique!) references overlapping `element`. - Some(head) => (*head.as_ptr()).prev = None, + //@ let klong = open_full_borrow_strong('a, >.full_borrow_content(t, self), qa); + //@ open LinkedList_full_borrow_content::(t, self)(); + //@ open >.own(t, *self); + //@ assert Nodes(?alloc_id, _, _, _, _, _); + let r; + { + /*@ + pred fbc1() = + (*self).head |-> ?head_ &*& + (*self).tail |-> ?tail_ &*& + Nodes(alloc_id, head_, None, tail_, None, ?nodes) &*& + (*self).len |-> length(nodes) &*& + (*self).marker |-> ?_ &*& + struct_LinkedList_padding(self) &*& + foreach(nodes, elem_fbc::(t)); + @*/ + //@ close fbc1(); + //@ std::alloc::close_Allocator_full_borrow_content_::(t, &(*self).alloc); + //@ close sep(fbc1, std::alloc::Allocator_full_borrow_content_::(t, &(*self).alloc, alloc_id))(); + /*@ + { + pred Ctx() = true; + produce_lem_ptr_chunk full_borrow_convert_strong(Ctx, sep(fbc1, std::alloc::Allocator_full_borrow_content_::(t, &(*self).alloc, alloc_id)), klong, >.full_borrow_content(t, self))() { + open Ctx(); + open sep(fbc1, std::alloc::Allocator_full_borrow_content_::(t, &(*self).alloc, alloc_id))(); + open fbc1(); + std::alloc::open_Allocator_full_borrow_content_::(t, &(*self).alloc, alloc_id); + close >.own(t, *self); + close >.full_borrow_content(t, self)(); + } { + close Ctx(); + close_full_borrow_strong(klong, >.full_borrow_content(t, self), sep(fbc1, std::alloc::Allocator_full_borrow_content_::(t, &(*self).alloc, alloc_id))); + full_borrow_mono(klong, 'a, sep(fbc1, std::alloc::Allocator_full_borrow_content_::(t, &(*self).alloc, alloc_id))); + full_borrow_split('a, fbc1, std::alloc::Allocator_full_borrow_content_::(t, &(*self).alloc, alloc_id)); + } } + @*/ + //@ open_full_borrow(qa/2, 'a, fbc1); + //@ open fbc1(); + let head = self.head; + let head_ref = &mut self.head; + let tail_ref = &mut self.tail; + let len_ref = &mut self.len; + //@ std::alloc::share_Allocator_full_borrow_content_('a, t, &(*self).alloc, alloc_id); + //@ let alloc_ref1 = precreate_ref(&(*self).alloc); + //@ std::alloc::init_ref_Allocator_share('a, t, alloc_ref1); + //@ open_frac_borrow('a, ref_initialized_(alloc_ref1), qa/2); + //@ open [?f]ref_initialized_::(alloc_ref1)(); + let alloc_ref = &self.alloc; + + r = match head { + None => { + //@ close [f]ref_initialized_::(alloc_ref)(); + //@ close_frac_borrow(f, ref_initialized_(alloc_ref)); + + //@ close fbc1(); + //@ close_full_borrow(fbc1); + //@ close std::option::Option_own::, &'a A>>(t, Option::None); + //@ leak full_borrow(_, _); + None + } + Some(node) => unsafe { + //@ std::alloc::close_Allocator_ref::<'a, A>(t, alloc_ref1); + + //@ open Nodes(alloc_id, ?head1, None, ?tail1, None, ?nodes1); + //@ open foreach(nodes1, elem_fbc::(t)); + //@ open elem_fbc::(t)(node); + let node = Box::from_raw_in(node.as_ptr(), &*alloc_ref); + //@ close [f]ref_initialized_::(alloc_ref)(); + //@ close_frac_borrow(f, ref_initialized_(alloc_ref)); + //@ std::boxed::Box_separate_contents(&node_1); + *head_ref = node.next; + //@ std::boxed::Box_unseparate_contents(&node_1); + + //@ open Nodes(_, ?next, _, ?tail, _, _); + match *head_ref { + None => { + *tail_ref = None; + //@ close Nodes(alloc_id, next, None, *tail_ref, None, _); + } + // Not creating new mutable (unique!) references overlapping `element`. + Some(head) => { + (*head.as_ptr()).prev = None; + //@ close Nodes(alloc_id, next, None, (*self).tail, None, _); + } + } - self.len -= 1; - node - }) + *len_ref -= 1; + //@ close fbc1(); + //@ close_full_borrow(fbc1); + //@ leak full_borrow(_, _); + //@ let b = node_1; + //@ assert std::boxed::Box_in(t, b, alloc_id, ?v_node); + //@ close >.own(t, v_node); + //@ std::boxed::Box_in_to_own::, &'a A>(node_1); + //@ close std::option::Option_own::, &'a A>>(t, Option::Some(node_1)); + Some(node) + } + }; + } + r } /// Adds the given node to the back of the list. @@ -549,43 +1338,134 @@ impl LinkedList { ) -> Self where A: Clone, + /*@ + req thread_token(?t) &*& + (*self).alloc |-> ?alloc0 &*& Allocator::(t, alloc0, ?alloc_id) &*& + (*self).head |-> ?head0 &*& + (*self).tail |-> ?tail0 &*& + struct_LinkedList_padding(self) &*& + Nodes::(alloc_id, head0, None, split_node, ?next0, ?nodes1) &*& + Nodes::(alloc_id, next0, split_node, tail0, None, ?nodes2) &*& + foreach(nodes1, elem_fbc::(t)) &*& + foreach(nodes2, elem_fbc::(t)) &*& + (*self).len |-> length(nodes1) + length(nodes2) &*& + length(nodes1) == at; + @*/ + /*@ + ens thread_token(t) &*& + *self |-> ?self1 &*& >.own(t, self1) &*& + >.own(t, result); + @*/ { // The split node is the new tail node of the first part and owns // the head of the second part. if let Some(mut split_node) = split_node { + //@ Nodes_last_lemma(head0); + //@ Nodes_split_off_last(head0); + //@ assert Nodes(alloc_id, head0, None, ?prev0, split_node, ?nodes10); let second_part_head; let second_part_tail; unsafe { second_part_head = split_node.as_mut().next.take(); } + //@ open Nodes(_, next0, split_node, tail0, None, nodes2); if let Some(mut head) = second_part_head { unsafe { head.as_mut().prev = None; } second_part_tail = self.tail; + //@ close Nodes::(alloc_id, next0, None, tail0, None, nodes2); } else { + //@ close Nodes::(alloc_id, next0, None, None, None, nodes2); second_part_tail = None; } - let second_part = LinkedList { - head: second_part_head, - tail: second_part_tail, - len: self.len - at, - alloc: self.alloc.clone(), - marker: PhantomData, - }; + //@ let alloc_ref = precreate_ref(&(*self).alloc); + //@ let k = begin_lifetime(); + let second_part; + { + //@ let_lft 'a = k; + //@ std::alloc::init_ref_Allocator_at_lifetime::<'a, A>(alloc_ref); + second_part = LinkedList { + head: second_part_head, + tail: second_part_tail, + len: self.len - at, + alloc: Allocator_clone__VeriFast_wrapper(&self.alloc), + marker: PhantomData, + }; + } + //@ end_lifetime(k); + //@ std::alloc::end_ref_Allocator_at_lifetime::(); // Fix the tail ptr of the first part self.tail = Some(split_node); self.len = at; - + //@ close Nodes::(alloc_id, None, split_node, split_node, None, []); + //@ close Nodes::(alloc_id, split_node, prev0, split_node, None, [split_node_1]); + //@ Nodes_append(head0); + //@ close_points_to(self); + //@ close >.own(t, *self); + //@ assert Nodes(_, _, _, _, _, nodes2); + //@ assert length(nodes2) == second_part.len; + //@ close >.own(t, second_part); second_part } else { - mem::replace(self, LinkedList::new_in(self.alloc.clone())) + //@ let alloc_ref = precreate_ref(&(*self).alloc); + //@ let k = begin_lifetime(); + let self_ref = &mut *(self as *mut LinkedList); + let alloc; + { + //@ let_lft 'a = k; + //@ std::alloc::init_ref_Allocator_at_lifetime::<'a, A>(alloc_ref); + alloc = Allocator_clone__VeriFast_wrapper(&self.alloc); + } + //@ end_lifetime(k); + //@ std::alloc::end_ref_Allocator_at_lifetime::(); + + //@ std::alloc::Allocator_to_own::(alloc); + //@ close_points_to(self); + //@ Nodes_append(head0); + //@ foreach_append(nodes1, nodes2); + //@ close >.own(t, *self); + let r = LinkedList::new_in(alloc); + mem::replace(self_ref, r) } } } +unsafe fn Box_into_inner_with_ref_Allocator__VeriFast_wrapper<'a, T, A: Allocator>(b: Box) -> T +//@ req thread_token(?t) &*& Box_in::(t, b, ?alloc_id, ?value); +//@ ens thread_token(t) &*& result == value; +//@ on_unwind_ens thread_token(t); +//@ assume_correct +{ + *b +} + +unsafe fn Allocator_clone__VeriFast_wrapper<'a, A: Allocator + Clone>(alloc: &'a A) -> A +//@ req thread_token(?t) &*& Allocator::<&'a A>(t, alloc, ?alloc_id); +//@ ens thread_token(t) &*& Allocator::(t, result, alloc_id); +//@ assume_correct +{ + alloc.clone() +} + +fn mem_take_usize__VeriFast_wrapper(dest: &mut usize) -> usize +//@ req *dest |-> ?v; +//@ ens *dest |-> 0 &*& result == v; +//@ assume_correct +{ + mem::take(dest) +} + +unsafe fn NonNull_from_ref_mut__VeriFast_wrapper(value: &mut T) -> NonNull +//@ req true; +//@ ens NonNull_ptr(result) == value; +//@ assume_correct +{ + NonNull::from(value) +} + #[stable(feature = "rust1", since = "1.0.0")] impl Default for LinkedList { /// Creates an empty `LinkedList`. @@ -609,8 +1489,16 @@ impl LinkedList { #[rustc_const_stable(feature = "const_linked_list_new", since = "1.39.0")] #[stable(feature = "rust1", since = "1.0.0")] #[must_use] - pub const fn new() -> Self { - LinkedList { head: None, tail: None, len: 0, alloc: Global, marker: PhantomData } + pub const fn new() -> Self + //@ req thread_token(?t); + //@ ens thread_token(t) &*& >.own(t, result); + //@ on_unwind_ens thread_token(t); + { + let r = LinkedList { head: None, tail: None, len: 0, alloc: Global, marker: PhantomData }; + //@ close foreach(nil, elem_fbc::(t)); + //@ std::alloc::produce_Allocator_Global(t); + //@ close >.own(t, r); + r } /// Moves all elements from `other` to the end of the list. @@ -678,8 +1566,16 @@ impl LinkedList { /// ``` #[inline] #[unstable(feature = "allocator_api", issue = "32838")] - pub const fn new_in(alloc: A) -> Self { - LinkedList { head: None, tail: None, len: 0, alloc, marker: PhantomData } + pub const fn new_in(alloc: A) -> Self + //@ req thread_token(?t) &*& .own(t, alloc); + //@ ens thread_token(t) &*& >.own(t, result); + //@ on_unwind_ens thread_token(t); + { + let r = LinkedList { head: None, tail: None, len: 0, alloc, marker: PhantomData }; + //@ std::alloc::open_Allocator_own::(alloc); + //@ close foreach(nil, elem_fbc::(t)); + //@ close >.own(t, r); + r } /// Provides a forward iterator. /// @@ -702,8 +1598,42 @@ impl LinkedList { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] - pub fn iter(&self) -> Iter<'_, T> { - Iter { head: self.head, tail: self.tail, len: self.len, marker: PhantomData } + pub fn iter<'a>(&'a self) -> Iter<'a, T> + //@ req thread_token(?t) &*& [?q]lifetime_token('a) &*& [_](>.share('a, t, self)); + //@ ens thread_token(t) &*& [q]lifetime_token('a) &*& >.own(t, result); + //@ on_unwind_ens thread_token(t) &*& [q]lifetime_token('a); + { + //@ open >.share('a, t, self); + //@ assert [_]exists(LinkedList_share_info(?alloc_id, ?head, ?tail, ?nodes, ?prevs, ?nexts)); + //@ open_frac_borrow('a, LinkedList_frac_borrow_content::(alloc_id, self, head, tail, nodes, prevs, nexts), q); + //@ open [?qll]LinkedList_frac_borrow_content::(alloc_id, self, head, tail, nodes, prevs, nexts)(); + let r = Iter { head: self.head, tail: self.tail, len: self.len, marker: PhantomData }; + //@ close [qll]LinkedList_frac_borrow_content::(alloc_id, self, head, tail, nodes, prevs, nexts)(); + //@ close_frac_borrow(qll, LinkedList_frac_borrow_content::(alloc_id, self, head, tail, nodes, prevs, nexts)); + //@ close exists(Iter_info(alloc_id, head, None, None, tail, nil, nodes, nil, nil, prevs, nil, nil, nexts, nil)); + /*@ + { + pred R(;) = (*self).head |-> head &*& (*self).tail |-> tail &*& (*self).len |-> length(nodes) &*& struct_LinkedList_padding(self); + produce_lem_ptr_chunk implies_frac(LinkedList_frac_borrow_content::(alloc_id, self, head, tail, nodes, prevs, nexts), sep_(R, Iter_frac_borrow_content(alloc_id, head, head, None, tail, None, tail, nil, nodes, nil, nil, prevs, nil, nil, nexts, nil)))() { + open [?f]LinkedList_frac_borrow_content::(alloc_id, self, head, tail, nodes, prevs, nexts)(); + close [f]R(); + close [f]Iter_frac_borrow_content::(alloc_id, head, head, None, tail, None, tail, nil, nodes, nil, nil, prevs, nil, nil, nexts, nil)(); + close [f]sep_(R, Iter_frac_borrow_content::(alloc_id, head, head, None, tail, None, tail, nil, nodes, nil, nil, prevs, nil, nil, nexts, nil))(); + } { + produce_lem_ptr_chunk implies_frac(sep_(R, Iter_frac_borrow_content(alloc_id, head, head, None, tail, None, tail, nil, nodes, nil, nil, prevs, nil, nil, nexts, nil)), LinkedList_frac_borrow_content::(alloc_id, self, head, tail, nodes, prevs, nexts))() { + open [?f]sep_(R, Iter_frac_borrow_content::(alloc_id, head, head, None, tail, None, tail, nil, nodes, nil, nil, prevs, nil, nil, nexts, nil))(); + open [f]R(); + open [f]Iter_frac_borrow_content::(alloc_id, head, head, None, tail, None, tail, nil, nodes, nil, nil, prevs, nil, nil, nexts, nil)(); + close [f]LinkedList_frac_borrow_content::(alloc_id, self, head, tail, nodes, prevs, nexts)(); + } { + frac_borrow_implies('a, LinkedList_frac_borrow_content::(alloc_id, self, head, tail, nodes, prevs, nexts), sep_(R, Iter_frac_borrow_content(alloc_id, head, head, None, tail, None, tail, nil, nodes, nil, nil, prevs, nil, nil, nexts, nil))); + } + } + frac_borrow_split('a, R, Iter_frac_borrow_content(alloc_id, head, head, None, tail, None, tail, nil, nodes, nil, nil, prevs, nil, nil, nexts, nil)); + } + @*/ + //@ close >.own(t, r); + r } /// Provides a forward iterator with mutable references. @@ -731,7 +1661,11 @@ impl LinkedList { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] - pub fn iter_mut(&mut self) -> IterMut<'_, T> { + pub fn iter_mut(&mut self) -> IterMut<'_, T> + //@ req *self |-> ?ll; + //@ ens *self |-> ll &*& result.head == ll.head &*& result.tail == ll.tail &*& result.len == ll.len; + //@ safety_proof { assume(false); } + { IterMut { head: self.head, tail: self.tail, len: self.len, marker: PhantomData } } @@ -751,8 +1685,58 @@ impl LinkedList { #[inline] #[must_use] #[unstable(feature = "linked_list_cursors", issue = "58533")] - pub fn cursor_front_mut(&mut self) -> CursorMut<'_, T, A> { - CursorMut { index: 0, current: self.head, list: self } + pub fn cursor_front_mut<'a>(&'a mut self) -> CursorMut<'a, T, A> + //@ req thread_token(?t) &*& [?q]lifetime_token('a) &*& full_borrow('a, >.full_borrow_content(t, self)); + //@ ens thread_token(t) &*& [q]lifetime_token('a) &*& >.own(t, result); + //@ on_unwind_ens thread_token(t) &*& [q]lifetime_token('a); + { + //@ let t1 = if is_Send(typeid(T)) && is_Send(typeid(A)) { default_tid } else { t }; + //@ let klong = open_full_borrow_strong('a, >.full_borrow_content(t, self), q); + //@ open >.full_borrow_content(t, self)(); + //@ let current = (*self).head; + let r = CursorMut { index: 0, current: self.head, list: self }; + //@ open >.own(t, *self); + //@ assert (*self).alloc |-> ?alloc &*& Allocator::(t, alloc, ?alloc_id); + //@ close Nodes(alloc_id, current, None, None, current, nil); + //@ close foreach(nil, elem_fbc::(t1)); + //@ let ghost_cell_id = create_ghost_cell::>>>>(pair(0, current)); + //@ produce_type_interp::(); + //@ produce_type_interp::(); + /*@ + if t1 != t { + std::alloc::Allocator_send(t1, alloc); + LinkedList_elems_send::(t, t1); + } + @*/ + //@ close CursorMut_fbc::(t1, ghost_cell_id, self)(); + /*@ + { + pred Ctx() = type_interp::() &*& type_interp::(); + produce_lem_ptr_chunk full_borrow_convert_strong(Ctx, CursorMut_fbc::(t1, ghost_cell_id, self), klong, >.full_borrow_content(t, self))() { + open Ctx(); + open CursorMut_fbc::(t1, ghost_cell_id, self)(); + assert [1/2]ghost_cell(ghost_cell_id, pair(?index1, ?current1)); + let head = (*self).head; + assert Nodes(_, head, None, ?before_current, current1, ?nodes1) &*& Nodes(_, current1, before_current, ?tail, None, ?nodes2); + Nodes_append::((*self).head); + foreach_append(nodes1, nodes2); + if t1 != t { + assert Allocator(_, ?alloc1, _); + std::alloc::Allocator_send(t, alloc1); + LinkedList_elems_send::(t1, t); + } + close >.own(t, *self); + close >.full_borrow_content(t, self)(); + leak [1/2]ghost_cell(_, _) &*& type_interp::() &*& type_interp::(); + } { + close Ctx(); + close_full_borrow_strong(klong, >.full_borrow_content(t, self), CursorMut_fbc::(t1, ghost_cell_id, self)); + full_borrow_mono(klong, 'a, CursorMut_fbc::(t1, ghost_cell_id, self)); + } + } + @*/ + //@ close >.own(t, r); + r } /// Provides a cursor at the back element. @@ -771,8 +1755,71 @@ impl LinkedList { #[inline] #[must_use] #[unstable(feature = "linked_list_cursors", issue = "58533")] - pub fn cursor_back_mut(&mut self) -> CursorMut<'_, T, A> { - CursorMut { index: self.len.checked_sub(1).unwrap_or(0), current: self.tail, list: self } + pub fn cursor_back_mut<'a>(&'a mut self) -> CursorMut<'a, T, A> + //@ req thread_token(?t) &*& [?q]lifetime_token('a) &*& full_borrow('a, >.full_borrow_content(t, self)); + //@ ens thread_token(t) &*& [q]lifetime_token('a) &*& >.own(t, result); + //@ on_unwind_ens thread_token(t) &*& [q]lifetime_token('a); + { + //@ let t1 = if is_Send(typeid(T)) && is_Send(typeid(A)) { default_tid } else { t }; + //@ let klong = open_full_borrow_strong('a, >.full_borrow_content(t, self), q); + //@ open >.full_borrow_content(t, self)(); + let r = CursorMut { index: self.len.checked_sub(1).unwrap_or(0), current: self.tail, list: self }; + //@ open >.own(t, *self); + //@ assert (*self).alloc |-> ?alloc &*& Allocator::(t, alloc, ?alloc_id) &*& (*self).head |-> ?head_; + //@ Nodes_last_lemma(head_); + //@ produce_type_interp::(); + //@ produce_type_interp::(); + /*@ + if t1 != t { + std::alloc::Allocator_send::(t1, alloc); + LinkedList_elems_send::(t, t1); + } + @*/ + /*@ + match r.current { + Option::None => { + close Nodes::(alloc_id, None, None, None, None, []); + close foreach([], elem_fbc::(t1)); + } + Option::Some(tail_) => { + Nodes_split_off_last(head_); + assert Nodes(alloc_id, head_, _, _, _, ?nodes0); + close Nodes::(alloc_id, None, r.current, r.current, None, []); + close Nodes::(alloc_id, r.current, _, r.current, None, [tail_]); + foreach_unappend(nodes0, [tail_], elem_fbc::(t1)); + } + } + @*/ + //@ let ghost_cell_id = create_ghost_cell::>>>>(pair(r.index, r.current)); + //@ close CursorMut_fbc::(t1, ghost_cell_id, self)(); + /*@ + { + pred Ctx() = type_interp::() &*& type_interp::(); + produce_lem_ptr_chunk full_borrow_convert_strong(Ctx, CursorMut_fbc::(t1, ghost_cell_id, self), klong, >.full_borrow_content(t, self))() { + open Ctx(); + open CursorMut_fbc::(t1, ghost_cell_id, self)(); + assert [1/2]ghost_cell(ghost_cell_id, pair(?index1, ?current1)); + let head = (*self).head; + assert Nodes(_, head, None, ?before_current, current1, ?nodes1) &*& Nodes(_, current1, before_current, ?tail, None, ?nodes2); + Nodes_append::((*self).head); + foreach_append(nodes1, nodes2); + if t1 != t { + assert Allocator(_, ?alloc1, _); + std::alloc::Allocator_send(t, alloc1); + LinkedList_elems_send::(t1, t); + } + close >.own(t, *self); + close >.full_borrow_content(t, self)(); + leak [1/2]ghost_cell(_, _) &*& type_interp::() &*& type_interp::(); + } { + close Ctx(); + close_full_borrow_strong(klong, >.full_borrow_content(t, self), CursorMut_fbc::(t1, ghost_cell_id, self)); + full_borrow_mono(klong, 'a, CursorMut_fbc::(t1, ghost_cell_id, self)); + } + } + @*/ + //@ close >.own(t, r); + r } /// Returns `true` if the `LinkedList` is empty. @@ -821,7 +1868,23 @@ impl LinkedList { #[must_use] #[stable(feature = "rust1", since = "1.0.0")] #[rustc_confusables("length", "size")] - pub fn len(&self) -> usize { + pub fn len(&self) -> usize + //@ req [?f](*self).len |-> ?len; + //@ ens [f](*self).len |-> len &*& result == len; + //@ on_unwind_ens false; + /*@ + safety_proof { + assert [?q]lifetime_token(?k); + open >.share(k, _t, self); + assert [_]exists(LinkedList_share_info(?alloc_id, ?head, ?tail, ?nodes, ?prevs, ?nexts)); + open_frac_borrow(k, LinkedList_frac_borrow_content::(alloc_id, self, head, tail, nodes, prevs, nexts), q); + open [?f]LinkedList_frac_borrow_content::(alloc_id, self, head, tail, nodes, prevs, nexts)(); + call(); + close [f]LinkedList_frac_borrow_content::(alloc_id, self, head, tail, nodes, prevs, nexts)(); + close_frac_borrow(f, LinkedList_frac_borrow_content::(alloc_id, self, head, tail, nodes, prevs, nexts)); + } + @*/ + { self.len } @@ -847,16 +1910,35 @@ impl LinkedList { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] - pub fn clear(&mut self) { + pub fn clear(&mut self) + //@ req thread_token(?t) &*& *self |-> ?self0 &*& >.own(t, self0); + //@ ens thread_token(t) &*& *self |-> ?self1 &*& >.own(t, self1); + //@ on_unwind_ens thread_token(t) &*& *self |-> ?self1 &*& >.own(t, self1); + { + //@ open_points_to(self); + //@ open >.own(t, self0); + //@ let alloc_ref = precreate_ref(&(*self).alloc); // We need to drop the nodes while keeping self.alloc // We can do this by moving (head, tail, len) into a new list that borrows self.alloc - drop(LinkedList { - head: self.head.take(), - tail: self.tail.take(), - len: mem::take(&mut self.len), - alloc: &self.alloc, - marker: PhantomData, - }); + //@ let k = begin_lifetime(); + { + //@ let_lft 'a = k; + //@ std::alloc::init_ref_Allocator_at_lifetime::<'a, A>(alloc_ref); + let ll = LinkedList { + head: self.head.take(), + tail: self.tail.take(), + len: mem_take_usize__VeriFast_wrapper(&mut self.len), + alloc: &self.alloc, + marker: PhantomData, + }; + //@ close >.own(t, ll); + drop/*@::> @*/(ll); + } + //@ end_lifetime(k); + //@ std::alloc::end_ref_Allocator_at_lifetime::(); + //@ close_points_to(self); + //@ close foreach(nil, elem_fbc::(t)); + //@ close >.own(t, *self); } /// Returns `true` if the `LinkedList` contains an element equal to the @@ -1008,11 +2090,31 @@ impl LinkedList { /// assert_eq!(dl.front().unwrap(), &1); /// ``` #[stable(feature = "rust1", since = "1.0.0")] - pub fn push_front(&mut self, elt: T) { - let node = Box::new_in(Node::new(elt), &self.alloc); - let node_ptr = NonNull::from(Box::leak(node)); - // SAFETY: node_ptr is a unique pointer to a node we boxed with self.alloc and leaked + pub fn push_front(&mut self, elt: T) + //@ req thread_token(?t) &*& *self |-> ?ll0 &*& >.own(t, ll0) &*& .own(t, elt); + //@ ens thread_token(t) &*& *self |-> ?ll1 &*& >.own(t, ll1); + //@ on_unwind_ens thread_token(t) &*& *self |-> ?ll1 &*& >.own(t, ll1); + { unsafe { + //@ open_points_to(self); + //@ open >.own(t, ll0); + //@ let alloc_ref = precreate_ref(&(*self).alloc); + let node0 = Node::new(elt); + //@ let k = begin_lifetime(); + let node_ptr; + { + //@ let_lft 'a = k; + //@ std::alloc::init_ref_Allocator_at_lifetime::<'a, A>(alloc_ref); + //@ close drop_perm::>(false, True, t, node0); + let node = Box::new_in(node0, &self.alloc); + //@ open drop_perm::>(false, True, t, node0); + node_ptr = NonNull_from_ref_mut__VeriFast_wrapper(Box::leak(node)); + } + //@ end_lifetime(k); + //@ std::alloc::end_ref_Allocator_at_lifetime::(); + + //@ close_points_to(self); + // SAFETY: node_ptr is a unique pointer to a node we boxed with self.alloc and leaked self.push_front_node(node_ptr); } } @@ -1116,33 +2218,162 @@ impl LinkedList { pub fn split_off(&mut self, at: usize) -> LinkedList where A: Clone, + //@ req thread_token(?t) &*& *self |-> ?self0 &*& >.own(t, self0); + //@ ens thread_token(t) &*& *self |-> ?self1 &*& >.own(t, self1) &*& >.own(t, result); + //@ on_unwind_ens thread_token(t) &*& *self |-> ?self1 &*& >.own(t, self1); { - let len = self.len(); + //@ open >.own(t, self0); + //@ open_points_to(self); + //@ let self_ref = precreate_ref(self); + //@ open_ref_init_perm_LinkedList(self_ref); + //@ init_ref_Option_NonNull(&(*self_ref).head); + //@ init_ref_Option_NonNull(&(*self_ref).tail); + //@ std::num::init_ref_usize(&(*self_ref).len, 1/2); + //@ init_ref_padding_LinkedList(self_ref, 1/2); + //@ let k1 = begin_lifetime(); + //@ std::marker::close_ref_initialized_PhantomData(&(*self_ref).marker, 1); + let len; + { + //@ let_lft 'a = k1; + //@ std::alloc::init_ref_Allocator_at_lifetime::<'a, A>(&(*self_ref).alloc); + //@ close_ref_initialized_LinkedList(self_ref); + len = self.len(); + } + //@ end_lifetime(k1); + //@ open_ref_initialized_LinkedList(self_ref); + //@ end_ref_Option_NonNull(&(*self_ref).head); + //@ end_ref_Option_NonNull(&(*self_ref).tail); + //@ std::alloc::end_ref_Allocator_at_lifetime::(); + //@ std::num::end_ref_usize(&(*self_ref).len); + //@ end_ref_padding_LinkedList(self_ref); + //@ leak ref_init_perm(&(*self_ref).marker, _) &*& ref_initialized(&(*self_ref).marker); + + //@ assert (*self).head |-> ?head &*& (*self).tail |-> ?tail &*& Nodes(?alloc_id, _, _, _, _, _); assert!(at <= len, "Cannot split off at a nonexistent index"); if at == 0 { - return mem::replace(self, Self::new_in(self.alloc.clone())); + //@ let alloc_ref = precreate_ref(&(*self).alloc); + let alloc1; + let self_ref1 = &mut *self as *mut LinkedList; + //@ let k = begin_lifetime(); + unsafe { + //@ let_lft 'a = k; + //@ std::alloc::init_ref_Allocator_at_lifetime::<'a, A>(alloc_ref); + alloc1 = Allocator_clone__VeriFast_wrapper(&self.alloc); + } + //@ end_lifetime(k); + //@ std::alloc::end_ref_Allocator_at_lifetime::(); + + //@ std::alloc::Allocator_to_own::(alloc1); + //@ close_points_to(self); + //@ assert *self |-> ?self1; + //@ close >.own(t, self1); + return mem::replace(unsafe { &mut *self_ref1 }, Self::new_in(alloc1)); } else if at == len { - return Self::new_in(self.alloc.clone()); + //@ let alloc_ref = precreate_ref(&(*self).alloc); + //@ let k = begin_lifetime(); + let alloc2; + unsafe { + //@ let_lft 'a = k; + //@ std::alloc::init_ref_Allocator_at_lifetime::<'a, A>(alloc_ref); + alloc2 = Allocator_clone__VeriFast_wrapper(&self.alloc); + } + //@ end_lifetime(k); + //@ std::alloc::end_ref_Allocator_at_lifetime::(); + + //@ std::alloc::Allocator_to_own::(alloc2); + //@ close_points_to(self); + //@ assert *self |-> ?self1; + //@ close >.own(t, self1); + return Self::new_in(alloc2); } + //@ assert Nodes(alloc_id, head, None, tail, None, ?nodes); + // Below, we iterate towards the `i-1`th node, either from the start or the end, // depending on which would be faster. let split_node = if at - 1 <= len - 1 - (at - 1) { - let mut iter = self.iter_mut(); + //@ close_points_to(self); + let mut iter1 = self.iter_mut(); + //@ open_points_to(self); + //@ close_points_to(&iter1); // instead of skipping using .skip() (which creates a new struct), // we skip manually so we can access the head field without // depending on implementation details of Skip - for _ in 0..at - 1 { - iter.next(); + let r1 = 0..at - 1; + let mut r1_iter = r1.into_iter(); + //@ close Nodes(alloc_id, head, None, None, head, []); + loop { // for _ in 0..at - 1 { + /*@ + inv r1_iter |-> ?r1_iter_ &*& r1_iter_.end == at - 1 &*& + iter1 |-> ?iter1_ &*& iter1_.tail == tail &*& iter1_.len == self0.len - r1_iter_.start &*& + Nodes(alloc_id, head, None, ?prev, iter1_.head, ?nodes1) &*& Nodes(alloc_id, iter1_.head, prev, tail, None, ?nodes2) &*& + length(nodes1) == r1_iter_.start &*& append(nodes1, nodes2) == nodes &*& r1_iter_.start < at &*& at < length(nodes); + @*/ + let r1_iter_ref = &mut r1_iter; + match r1_iter_ref.next() { + None => { + break; + } + Some(_) => { + iter1.next(); + //@ assert iter1_.head == Option::Some(?iter_); + //@ open Nodes(alloc_id, ?next, iter1_.head, tail, None, tail(nodes2)); + //@ close Nodes(alloc_id, next, iter1_.head, tail, None, tail(nodes2)); + //@ close Nodes(alloc_id, next, Some(iter_), Some(iter_), next, []); + //@ close Nodes(alloc_id, Some(iter_), prev, Some(iter_), next, [iter_]); + //@ Nodes_append_(head); + //@ append_assoc(nodes1, [iter_], tail(nodes2)); + } + } } - iter.head + //@ open Nodes(alloc_id, iter1_.head, prev, tail, None, nodes2); + //@ assert iter1_.head == Option::Some(?iter_); + //@ open Nodes(alloc_id, ?next, iter1_.head, tail, None, tail(nodes2)); + //@ close Nodes(alloc_id, next, iter1_.head, tail, None, tail(nodes2)); + //@ close Nodes::(alloc_id, next, iter1_.head, iter1_.head, next, []); + //@ close Nodes::(alloc_id, iter1_.head, prev, iter1_.head, next, [iter_]); + //@ Nodes_append_(head); + //@ append_assoc(nodes1, [iter_], tail(nodes2)); + //@ foreach_unappend(append(nodes1, [iter_]), tail(nodes2), elem_fbc::(t)); + let r = iter1.head; + //@ open_points_to(&iter1); + r } else { // better off starting from the end - let mut iter = self.iter_mut(); - for _ in 0..len - 1 - (at - 1) { - iter.next_back(); + //@ close_points_to(self); + let mut iter2 = self.iter_mut(); + //@ open_points_to(self); + //@ close_points_to(&iter2); + + let r2 = 0..len - 1 - (at - 1); + let mut r2_iter = r2.into_iter(); + //@ close Nodes(alloc_id, None, self0.tail, self0.tail, None, []); + loop { // for _ in 0..len - 1 - (at - 1) { + /*@ + inv r2_iter |-> ?r2_iter_ &*& r2_iter_.end == len - 1 - (at - 1) &*& + iter2 |-> ?iter2_ &*& iter2_.head == head &*& iter2_.len == len - r2_iter_.start &*& + Nodes(alloc_id, head, None, iter2_.tail, ?next, ?nodes1) &*& Nodes(alloc_id, next, iter2_.tail, tail, None, ?nodes2) &*& + length(nodes2) == r2_iter_.start &*& append(nodes1, nodes2) == nodes &*& r2_iter_.start <= len - at; + @*/ + let r2_iter_ref = &mut r2_iter; + match r2_iter_ref.next() { + None => { + break; + } + Some(_) => { + //@ let old_iter = iter2_.tail; + iter2.next_back(); + //@ assert iter2_.tail == Option::Some(?iter_); + //@ assert Nodes(_, head, _, ?last, old_iter, ?nodes11); + //@ close Nodes(alloc_id, old_iter, last, tail, None, _); + //@ append_assoc(nodes11, [iter_], nodes2); + } + } } - iter.tail + //@ foreach_unappend(nodes1, nodes2, elem_fbc::(t)); + let r = iter2.tail; + //@ open_points_to(&iter2); + r }; unsafe { self.split_off_after_node(split_node, at) } } @@ -1317,15 +2548,53 @@ impl LinkedList { /// assert_eq!(odds.into_iter().collect::>(), vec![1, 3, 5, 9, 11, 13, 15]); /// ``` #[stable(feature = "extract_if", since = "1.87.0")] - pub fn extract_if(&mut self, filter: F) -> ExtractIf<'_, T, F, A> + pub fn extract_if<'a, F>(&'a mut self, filter: F) -> ExtractIf<'a, T, F, A> where F: FnMut(&mut T) -> bool, + //@ req thread_token(?t) &*& [?q]lifetime_token('a) &*& full_borrow('a, >.full_borrow_content(t, self)) &*& .own(t, filter); + //@ ens thread_token(t) &*& [q]lifetime_token('a) &*& >.own(t, result); + //@ on_unwind_ens thread_token(t) &*& [q]lifetime_token('a); { + //@ let klong = open_full_borrow_strong('a, >.full_borrow_content(t, self), q); + //@ open >.full_borrow_content(t, self)(); + //@ open >.own(t, ?self0); + //@ open_points_to(self); + //@ assert Nodes(?alloc_id, _, _, _, _, _); + // avoid borrow issues. let it = self.head; let old_len = self.len; - ExtractIf { list: self, it, pred: filter, idx: 0, old_len } + //@ let ghost_cell_id = create_ghost_cell::>>, usize>>(pair(it, 0)); + + /*@ + { + pred Ctx() = struct_LinkedList_padding(self); + produce_lem_ptr_chunk full_borrow_convert_strong(Ctx, ExtractIf_fbc(t, self, ghost_cell_id, self0.len), klong, >.full_borrow_content(t, self))() { + open Ctx(); + open ExtractIf_fbc::(t, self, ghost_cell_id, self0.len)(); + let head1 = (*self).head; + assert Nodes(_, head1, None, _, _, ?nodes1) &*& Nodes(_, _, _, _, _, ?nodes2); + Nodes_append((*self).head); + foreach_append(nodes1, nodes2); + close >.own(t, *self); + close_points_to(self); + close >.full_borrow_content(t, self)(); + leak [1/2]ghost_cell(_, _); + } { + close Ctx(); + close Nodes::(alloc_id, it, None, None, it, []); + close foreach([], elem_fbc::(t)); + close ExtractIf_fbc::(t, self, ghost_cell_id, self0.len)(); + close_full_borrow_strong(klong, >.full_borrow_content(t, self), ExtractIf_fbc(t, self, ghost_cell_id, self0.len)); + full_borrow_mono(klong, 'a, ExtractIf_fbc(t, self, ghost_cell_id, self0.len)); + } + } + @*/ + + let r = ExtractIf { list: self, it, pred: filter, idx: 0, old_len }; + //@ close >.own(t, r); + r } } @@ -1354,17 +2623,136 @@ impl<'a, T> Iterator for Iter<'a, T> { type Item = &'a T; #[inline] - fn next(&mut self) -> Option<&'a T> { + fn next(&mut self) -> Option<&'a T> + //@ req thread_token(?t) &*& [?q]lifetime_token('a) &*& *self |-> ?self0 &*& >.own(t, self0); + //@ ens thread_token(t) &*& [q]lifetime_token('a) &*& *self |-> ?self1 &*& >.own(t, self1) &*& >.own(t, result); + //@ on_unwind_ens thread_token(t) &*& [q]lifetime_token('a) &*& *self |-> ?self1 &*& >.own(t, self1); + { if self.len == 0 { + //@ close std::option::Option_own::<&'a T>(t, Option::None); None } else { - self.head.map(|node| unsafe { - // Need an unbound lifetime to get 'a - let node = &*node.as_ptr(); - self.len -= 1; - self.head = node.next; - &node.element - }) + //@ open >.own(t, self0); + //@ open_points_to(self); + //@ close_points_to(self); + let head = self.head; + let head_ref = &mut self.head; + let len_ref = &mut self.len; + match head { //.map(|node| unsafe { + None => { + //@ close >.own(t, self0); + //@ close std::option::Option_own::<&'a T>(t, Option::None); + None + } + Some(node) => unsafe { + //@ open exists(Iter_info(?alloc_id, ?head0, ?prev, ?next, ?tail0, ?nodes_before, ?nodes, ?nodes_after, ?prevs_before, ?prevs, ?prevs_after, ?nexts_before, ?nexts, ?nexts_after)); + //@ open_frac_borrow('a, Iter_frac_borrow_content::(alloc_id, head0, self0.head, prev, self0.tail, next, tail0, nodes_before, nodes, nodes_after, prevs_before, prevs, prevs_after, nexts_before, nexts, nexts_after), q); + //@ open [?f0]Iter_frac_borrow_content::(alloc_id, head0, self0.head, prev, self0.tail, next, tail0, nodes_before, nodes, nodes_after, prevs_before, prevs, prevs_after, nexts_before, nexts, nexts_after)(); + //@ open Nodes1::(alloc_id, self0.head, prev, self0.tail, next, nodes, prevs, nexts); + //@ close [f0]Nodes1::(alloc_id, self0.head, prev, self0.tail, next, nodes, prevs, nexts); + //@ close [f0]Iter_frac_borrow_content::(alloc_id, head0, self0.head, prev, self0.tail, next, tail0, nodes_before, nodes, nodes_after, prevs_before, prevs, prevs_after, nexts_before, nexts, nexts_after)(); + //@ close_frac_borrow(f0, Iter_frac_borrow_content::(alloc_id, head0, self0.head, prev, self0.tail, next, tail0, nodes_before, nodes, nodes_after, prevs_before, prevs, prevs_after, nexts_before, nexts, nexts_after)); + //@ let node_ref = precreate_ref(NonNull_ptr(node)); + //@ open_ref_init_perm_Node(node_ref); + //@ produce_type_interp::(); + //@ open foreach(_, _); + //@ open elem_share::('a, t)(node); + //@ init_ref_share::('a, t, &(*node_ref).element); + //@ frac_borrow_sep('a, Iter_frac_borrow_content::(alloc_id, head0, self0.head, prev, self0.tail, next, tail0, nodes_before, nodes, nodes_after, prevs_before, prevs, prevs_after, nexts_before, nexts, nexts_after), ref_initialized_(&(*node_ref).element)); + //@ let k1 = open_frac_borrow_strong('a, sep_(Iter_frac_borrow_content::(alloc_id, head0, self0.head, prev, self0.tail, next, tail0, nodes_before, nodes, nodes_after, prevs_before, prevs, prevs_after, nexts_before, nexts, nexts_after), ref_initialized_(&(*node_ref).element)), q/2); + //@ open [?f]sep_(Iter_frac_borrow_content::(alloc_id, head0, self0.head, prev, self0.tail, next, tail0, nodes_before, nodes, nodes_after, prevs_before, prevs, prevs_after, nexts_before, nexts, nexts_after), ref_initialized_(&(*node_ref).element))(); + //@ open [f]Iter_frac_borrow_content::(alloc_id, head0, self0.head, prev, self0.tail, next, tail0, nodes_before, nodes, nodes_after, prevs_before, prevs, prevs_after, nexts_before, nexts, nexts_after)(); + //@ open [f]ref_initialized_::(&(*node_ref).element)(); + //@ open Nodes1::(alloc_id, self0.head, prev, self0.tail, next, nodes, prevs, nexts); + // Need an unbound lifetime to get 'a + //@ init_ref_Option_NonNull(&(*node_ref).prev); + //@ init_ref_Option_NonNull(&(*node_ref).next); + //@ init_ref_padding_Node(node_ref, 1/2); + //@ close [1 - f]ref_padding_initialized_::>(node_ref)(); // We want to close only fraction f of ref_initialized(node_ref) + //@ close_ref_initialized_Node(node_ref); + //@ open [1 - f]ref_padding_initialized_::>(node_ref)(); + //@ let node0 = node; + //@ note(pointer_within_limits(&(*ref_origin(NonNull_ptr(node0))).element)); + let node = &*node.as_ptr(); + let len = *len_ref; + //@ produce_limits(len); + *len_ref = len - 1; + //@ let nodeNext = (*node_ref).next; + *head_ref = (*node).next; + //@ let self1 = *self; + /*@ + { + pred Ctx() = true; + pred Q() = + [f]Nodes1(alloc_id, head0, None, prev, self0.head, nodes_before, prevs_before, nexts_before) &*& + [f]alloc_block_in(alloc_id, NonNull_ptr(node0) as *u8, Layout::new_::>()) &*& + [f/2]struct_Node_padding(node_ref) &*& + [f/2]struct_Node_padding(NonNull_ptr(node0)) &*& + [f/2](*node_ref).prev |-> prev &*& ref_end_token_Option_NonNull(&(*node_ref).prev, &(*NonNull_ptr(node0)).prev, f, prev) &*& + [f/2](*node_ref).next |-> nodeNext &*& ref_end_token_Option_NonNull(&(*node_ref).next, &(*NonNull_ptr(node0)).next, f, nodeNext) &*& + [f]ref_initialized(node_ref) &*& + [1-f]ref_initialized(&(*node_ref).next) &*& + [1-f]ref_initialized(&(*node_ref).prev) &*& + ref_padding_end_token(node_ref, NonNull_ptr(node0), f/2) &*& + [1-f]ref_padding_initialized::>(node_ref) &*& + [f]Nodes1(alloc_id, nodeNext, self0.head, self0.tail, next, tail(nodes), tail(prevs), tail(nexts)) &*& + [f]Nodes1(alloc_id, next, self0.tail, tail0, None, nodes_after, prevs_after, nexts_after); + close Ctx(); + close Q(); + produce_lem_ptr_chunk frac_borrow_convert_strong(Ctx, Q, k1, f, sep_(Iter_frac_borrow_content::(alloc_id, head0, self0.head, prev, self0.tail, next, tail0, nodes_before, nodes, nodes_after, prevs_before, prevs, prevs_after, nexts_before, nexts, nexts_after), ref_initialized_(&(*node_ref).element)))() { + open Ctx(); + open Q(); + open_ref_initialized_Node(node_ref); + end_ref_padding_Node(node_ref); + end_ref_Option_NonNull(&(*node_ref).prev); + end_ref_Option_NonNull(&(*node_ref).next); + close [f]Nodes1(alloc_id, self0.head, prev, self0.tail, next, nodes, prevs, nexts); + close [f]Iter_frac_borrow_content::(alloc_id, head0, self0.head, prev, self0.tail, next, tail0, nodes_before, nodes, nodes_after, prevs_before, prevs, prevs_after, nexts_before, nexts, nexts_after)(); + close [f]ref_initialized_::(&(*node_ref).element)(); + close [f]sep_(Iter_frac_borrow_content::(alloc_id, head0, self0.head, prev, self0.tail, next, tail0, nodes_before, nodes, nodes_after, prevs_before, prevs, prevs_after, nexts_before, nexts, nexts_after), ref_initialized_(&(*node_ref).element))(); + } { + close_frac_borrow_strong(k1, sep_(Iter_frac_borrow_content::(alloc_id, head0, self0.head, prev, self0.tail, next, tail0, nodes_before, nodes, nodes_after, prevs_before, prevs, prevs_after, nexts_before, nexts, nexts_after), ref_initialized_(&(*node_ref).element)), Q); + leak full_borrow(k1, Q); + } + } + @*/ + /*@ + produce_lem_ptr_chunk implies_frac(Iter_frac_borrow_content::(alloc_id, head0, self0.head, prev, self0.tail, next, tail0, nodes_before, nodes, nodes_after, prevs_before, prevs, prevs_after, nexts_before, nexts, nexts_after), Iter_frac_borrow_content::(alloc_id, head0, self1.head, self0.head, self1.tail, next, tail0, append(nodes_before, [node]), tail(nodes), nodes_after, append(prevs_before, [prev]), tail(prevs), prevs_after, append(nexts_before, [self1.head]), tail(nexts), nexts_after))() { + open [?f1]Iter_frac_borrow_content::(alloc_id, head0, self0.head, prev, self0.tail, next, tail0, nodes_before, nodes, nodes_after, prevs_before, prevs, prevs_after, nexts_before, nexts, nexts_after)(); + open Nodes1::(alloc_id, self0.head, prev, self0.tail, next, nodes, prevs, nexts); + close [f1]Nodes1::(alloc_id, self1.head, self0.head, self0.head, self1.head, [], [], []); + assert self0.head == Option::Some(node) &*& [f1](*NonNull_ptr(node)).next |-> self1.head; + close [f1]Nodes1::(alloc_id, self0.head, prev, self0.head, self1.head, [node], [prev], [self1.head]); + Nodes1_append::(head0); + close [f1]Iter_frac_borrow_content::(alloc_id, head0, self1.head, self0.head, self1.tail, next, tail0, append(nodes_before, [node]), tail(nodes), nodes_after, append(prevs_before, [prev]), tail(prevs), prevs_after, append(nexts_before, [self1.head]), tail(nexts), nexts_after)(); + } { + produce_lem_ptr_chunk implies_frac(Iter_frac_borrow_content::(alloc_id, head0, self1.head, self0.head, self1.tail, next, tail0, append(nodes_before, [node]), tail(nodes), nodes_after, append(prevs_before, [prev]), tail(prevs), prevs_after, append(nexts_before, [self1.head]), tail(nexts), nexts_after), Iter_frac_borrow_content::(alloc_id, head0, self0.head, prev, self0.tail, next, tail0, nodes_before, nodes, nodes_after, prevs_before, prevs, prevs_after, nexts_before, nexts, nexts_after))() { + open [?f1]Iter_frac_borrow_content::(alloc_id, head0, self1.head, self0.head, self1.tail, next, tail0, append(nodes_before, [node]), tail(nodes), nodes_after, append(prevs_before, [prev]), tail(prevs), prevs_after, append(nexts_before, [self1.head]), tail(nexts), nexts_after)(); + Nodes1_split::(nodes_before, [node], prevs_before, [prev], nexts_before, [self1.head]); + open Nodes1::(alloc_id, _, _, _, _, [node], [prev], [self1.head]); + open Nodes1::(alloc_id, self1.head, _, _, _, [], [], []); + close [f1]Nodes1::(alloc_id, self0.head, prev, self0.tail, next, nodes, prevs, nexts); + close [f1]Iter_frac_borrow_content::(alloc_id, head0, self0.head, prev, self0.tail, next, tail0, nodes_before, nodes, nodes_after, prevs_before, prevs, prevs_after, nexts_before, nexts, nexts_after)(); + } { + frac_borrow_implies('a, Iter_frac_borrow_content::(alloc_id, head0, self0.head, prev, self0.tail, next, tail0, nodes_before, nodes, nodes_after, prevs_before, prevs, prevs_after, nexts_before, nexts, nexts_after), Iter_frac_borrow_content::(alloc_id, head0, self1.head, self0.head, self1.tail, next, tail0, append(nodes_before, [node]), tail(nodes), nodes_after, append(prevs_before, [prev]), tail(prevs), prevs_after, append(nexts_before, [self1.head]), tail(nexts), nexts_after)); + } + } + @*/ + //@ close exists(Iter_info(alloc_id, head0, self0.head, next, tail0, append(nodes_before, [node]), tail(nodes), nodes_after, append(prevs_before, [prev]), tail(prevs), prevs_after, append(nexts_before, [self1.head]), tail(nexts), nexts_after)); + //@ close >.own(t, *self); + //@ let elem_ref = precreate_ref(&(*node_1).element); + //@ init_ref_share::('a, t, elem_ref); + //@ leak type_interp::(); + //@ close_ref_own::<'a, T>(elem_ref); + //@ close >.own(t, Option::Some(elem_ref)); + //@ open_frac_borrow('a, ref_initialized_(elem_ref), q); + //@ open [?fr]ref_initialized_::(elem_ref)(); + let r = &(*node).element; + //@ close [fr]ref_initialized_::(elem_ref)(); + //@ close_frac_borrow(fr, ref_initialized_(elem_ref)); + Some(r) + } + } } } @@ -1422,17 +2810,47 @@ impl<'a, T> Iterator for IterMut<'a, T> { type Item = &'a mut T; #[inline] - fn next(&mut self) -> Option<&'a mut T> { + fn next(&mut self) -> Option<&'a mut T> + //@ req *self |-> ?self0 &*& Nodes(?alloc_id, self0.head, ?prev, self0.tail, ?next, ?nodes) &*& self0.len == length(nodes); + /*@ + ens if self0.len == 0 { + Nodes(alloc_id, self0.head, prev, self0.tail, next, nodes) &*& + *self |-> self0 &*& result == Option::None + } else { + self0.head == Option::Some(?head) &*& + alloc_block_in(alloc_id, NonNull_ptr(head) as *u8, Layout::new_::>()) &*& struct_Node_padding(NonNull_ptr(head)) &*& + (*NonNull_ptr(head)).prev |-> prev &*& + (*NonNull_ptr(head)).next |-> ?next0 &*& + pointer_within_limits(&(*NonNull_ptr(head)).element) == true &*& + Nodes(alloc_id, next0, Option::Some(head), self0.tail, next, ?nodes0) &*& + nodes == cons(head, nodes0) &*& + *self |-> ?self1 &*& self1.head == next0 &*& self1.tail == self0.tail &*& self1.len == self0.len - 1 &*& + result == Option::Some(&(*NonNull_ptr(head)).element) + }; + @*/ + //@ safety_proof { assume(false); } + { if self.len == 0 { None } else { - self.head.map(|node| unsafe { - // Need an unbound lifetime to get 'a - let node = &mut *node.as_ptr(); - self.len -= 1; - self.head = node.next; - &mut node.element - }) + //@ open Nodes(_, _, _, _, _, _); + let head = self.head; + //@ open_points_to(self); + let head_ref = &mut self.head; + let len_ref = &mut self.len; + match head { + None => None, //~allow_dead_code + Some(node) => unsafe { + // Need an unbound lifetime to get 'a + let node = &mut *node.as_ptr(); + let len = *len_ref; + //@ produce_limits(len); + *len_ref = len - 1; + *head_ref = node.next; + //@ close_points_to(self); + Some(&mut node.element) + } + } } } @@ -1450,17 +2868,48 @@ impl<'a, T> Iterator for IterMut<'a, T> { #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> DoubleEndedIterator for IterMut<'a, T> { #[inline] - fn next_back(&mut self) -> Option<&'a mut T> { + fn next_back(&mut self) -> Option<&'a mut T> + //@ req *self |-> ?self0 &*& Nodes(?alloc_id, self0.head, ?prev, self0.tail, ?next, ?nodes) &*& self0.len == length(nodes); + /*@ + ens if self0.len == 0 { + Nodes(alloc_id, self0.head, prev, self0.tail, next, nodes) &*& + *self |-> self0 &*& result == Option::None + } else { + self0.tail == Option::Some(?tail) &*& + alloc_block_in(alloc_id, NonNull_ptr(tail) as *u8, Layout::new_::>()) &*& struct_Node_padding(NonNull_ptr(tail)) &*& + (*NonNull_ptr(tail)).prev |-> ?prev0 &*& + (*NonNull_ptr(tail)).next |-> next &*& + pointer_within_limits(&(*NonNull_ptr(tail)).element) == true &*& + Nodes(alloc_id, self0.head, prev, prev0, self0.tail, ?nodes0) &*& + nodes == append(nodes0, [tail]) &*& + *self |-> ?self1 &*& self1.head == self0.head &*& self1.tail == prev0 &*& self1.len == self0.len - 1 + }; + @*/ + //@ safety_proof { assume(false); } + { if self.len == 0 { None } else { - self.tail.map(|node| unsafe { - // Need an unbound lifetime to get 'a - let node = &mut *node.as_ptr(); - self.len -= 1; - self.tail = node.prev; - &mut node.element - }) + //@ Nodes_last_lemma(self0.head); + //@ if self0.head == next { open Nodes(_, _, _, _, _, _); assert false; } + //@ Nodes_split_off_last(self0.head); + //@ open_points_to(self); + let tail = self.tail; + let tail_ref = &mut self.tail; + let len_ref = &mut self.len; + match tail { + None => None, //~allow_dead_code + Some(node) => unsafe { + // Need an unbound lifetime to get 'a + let node = &mut *node.as_ptr(); + let len = *len_ref; + //@ produce_limits(len); + *len_ref = len - 1; + *tail_ref = node.prev; + //@ close_points_to(self); + Some(&mut node.element) + } + } } } } @@ -1534,6 +2983,45 @@ pub struct CursorMut< list: &'a mut LinkedList, } +/*@ + +pred_ctor CursorMut_fbc(t: thread_id_t, ghost_cell_id: i32, list: *LinkedList)() = + [1/2]ghost_cell::>>>>(ghost_cell_id, pair(?index, ?current)) &*& + (*list).alloc |-> ?alloc &*& + Allocator::(t, alloc, ?alloc_id) &*& + (*list).head |-> ?head &*& + (*list).tail |-> ?tail &*& + Nodes::(alloc_id, head, None, ?before_current, current, ?nodes1) &*& + Nodes::(alloc_id, current, before_current, tail, None, ?nodes2) &*& + foreach(nodes1, elem_fbc::(t)) &*& + foreach(nodes2, elem_fbc::(t)) &*& + (*list).len |-> length(nodes1) + length(nodes2) &*& + index == length(nodes1) &*& + (*list).marker |-> ?_ &*& + struct_LinkedList_padding(list); + +pred<'a, T, A> >.own(t, cursor) = + [1/2]ghost_cell::>>>>(?ghost_cell_id, pair(cursor.index, cursor.current)) &*& + full_borrow('a, CursorMut_fbc::(if is_Send(typeid(T)) && is_Send(typeid(A)) { default_tid } else { t }, ghost_cell_id, cursor.list)); + +lem CursorMut_own_mono<'a0, 'a1, T, A>() + req type_interp::() &*& type_interp::() &*& CursorMut_own::<'a0, T, A>(?t, ?v) &*& lifetime_inclusion('a1, 'a0) == true; + ens type_interp::() &*& type_interp::() &*& CursorMut_own::<'a1, T, A>(t, CursorMut::<'a1, T, A> { index: upcast(v.index), current: upcast(v.current), list: v.list as *_ }); +{ + assume(false); +} + +lem CursorMut_send<'a, T, A>(t1: thread_id_t) + req type_interp::() &*& type_interp::() &*& is_Send(typeid(T)) == true &*& is_Send(typeid(A)) == true &*& CursorMut_own::<'a, T, A>(?t0, ?v); + ens type_interp::() &*& type_interp::() &*& CursorMut_own::<'a, T, A>(t1, v); +{ + open >.own(t0, v); + close >.own(t1, v); +} + +@*/ + + #[unstable(feature = "linked_list_cursors", issue = "58533")] impl fmt::Debug for CursorMut<'_, T, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { @@ -1686,20 +3174,56 @@ impl<'a, T, A: Allocator> CursorMut<'a, T, A> { /// the first element of the `LinkedList`. If it is pointing to the last /// element of the `LinkedList` then this will move it to the "ghost" non-element. #[unstable(feature = "linked_list_cursors", issue = "58533")] - pub fn move_next(&mut self) { + pub fn move_next(&mut self) + //@ req thread_token(?t) &*& [?q]lifetime_token('a) &*& *self |-> ?self0 &*& >.own(t, self0); + //@ ens thread_token(t) &*& [q]lifetime_token('a) &*& *self |-> ?self1 &*& >.own(t, self1); + //@ on_unwind_ens thread_token(t) &*& [q]lifetime_token('a) &*& *self |-> ?self1 &*& >.own(t, self1); + { + //@ open_points_to(self); + //@ open >.own(t, self0); + //@ let t1 = if is_Send(typeid(T)) && is_Send(typeid(A)) { default_tid } else { t }; + //@ assert [1/2]ghost_cell(?ghost_cell_id, _); + //@ open_full_borrow(q, 'a, CursorMut_fbc::(t1, ghost_cell_id, self0.list)); + //@ open CursorMut_fbc::(t1, ghost_cell_id, self0.list)(); + //@ let head = (*self0.list).head; + //@ let tail = (*self0.list).tail; match self.current.take() { // We had no current element; the cursor was sitting at the start position // Next element should be the head of the list None => { + //@ close CursorMut_current(self, _); self.current = self.list.head; self.index = 0; + //@ open Nodes(?alloc_id, None, ?before_current, tail, None, ?nodes2); + //@ close Nodes(alloc_id, head, None, None, head, nil); } // We had a previous element, so let's go to its next Some(current) => unsafe { + //@ open Nodes(?alloc_id, self0.current, ?before_current, tail, None, ?nodes2); + //@ close CursorMut_current(self, _); + //@ let current_ref = precreate_ref(¤t); + //@ std::ptr::init_ref_NonNull(current_ref, 1/2); self.current = current.as_ref().next; + //@ std::ptr::end_ref_NonNull(current_ref); self.index += 1; + //@ let self1_= *self; + //@ assert Nodes(alloc_id, head, None, _, _, ?nodes1); + //@ open Nodes(alloc_id, self1_.current, self0.current, tail, None, tail(nodes2)); + //@ close Nodes(alloc_id, self1_.current, self0.current, tail, None, tail(nodes2)); + //@ close Nodes(alloc_id, self1_.current, self0.current, self0.current, self1_.current, nil); + //@ close Nodes(alloc_id, self0.current, before_current, self0.current, self1_.current, [current]); + //@ Nodes_append_(head); + //@ open foreach(nodes2, _); + //@ close foreach([], elem_fbc::(t1)); + //@ close foreach([current], elem_fbc::(t1)); + //@ foreach_append(nodes1, [current]); }, - } + }; + //@ let self1 = *self; + //@ ghost_cell_mutate(ghost_cell_id, pair(self1.index, self1.current)); + //@ close CursorMut_fbc::(t1, ghost_cell_id, self1.list)(); + //@ close_full_borrow(CursorMut_fbc::(t1, ghost_cell_id, self1.list)); + //@ close >.own(t, self1); } /// Moves the cursor to the previous element of the `LinkedList`. @@ -1708,19 +3232,146 @@ impl<'a, T, A: Allocator> CursorMut<'a, T, A> { /// the last element of the `LinkedList`. If it is pointing to the first /// element of the `LinkedList` then this will move it to the "ghost" non-element. #[unstable(feature = "linked_list_cursors", issue = "58533")] - pub fn move_prev(&mut self) { + pub fn move_prev(&mut self) + //@ req thread_token(?t) &*& [?q]lifetime_token('a) &*& *self |-> ?self0 &*& >.own(t, self0); + //@ ens thread_token(t) &*& [q]lifetime_token('a) &*& *self |-> ?self1 &*& >.own(t, self1); + //@ on_unwind_ens thread_token(t) &*& [q]lifetime_token('a) &*& *self |-> ?self1 &*& >.own(t, self1); + { + //@ let t1 = if is_Send(typeid(T)) && is_Send(typeid(A)) { default_tid } else { t }; + //@ open_points_to(self); + //@ open >.own(t, self0); + //@ assert [1/2]ghost_cell(?ghost_cell_id, _); + //@ open_full_borrow(q, 'a, CursorMut_fbc::(t1, ghost_cell_id, self0.list)); + //@ open CursorMut_fbc::(t1, ghost_cell_id, self0.list)(); + //@ let head = (*self0.list).head; + //@ let tail = (*self0.list).tail; + //@ open Nodes(?alloc_id, self0.current, ?before_current, tail, None, ?nodes2); match self.current.take() { // No current. We're at the start of the list. Yield None and jump to the end. None => { + //@ close CursorMut_current(self, _); self.current = self.list.tail; - self.index = self.list.len().checked_sub(1).unwrap_or(0); + //@ let list_ref = precreate_ref(self0.list); + //@ open_ref_init_perm_LinkedList(list_ref); + //@ init_ref_Option_NonNull(&(*list_ref).head); + //@ init_ref_Option_NonNull(&(*list_ref).tail); + //@ std::num::init_ref_usize(&(*list_ref).len, 1/2); + //@ init_ref_padding_LinkedList(list_ref, 1/2); + //@ let k1 = begin_lifetime(); + //@ std::marker::close_ref_initialized_PhantomData(&(*list_ref).marker, 1); + let len; + { + //@ let_lft 'b = k1; + //@ std::alloc::init_ref_Allocator_at_lifetime::<'b, A>(&(*list_ref).alloc); + //@ close_ref_initialized_LinkedList(list_ref); + len = self.list.len(); + } + //@ end_lifetime(k1); + //@ open_ref_initialized_LinkedList(list_ref); + //@ end_ref_Option_NonNull(&(*list_ref).head); + //@ end_ref_Option_NonNull(&(*list_ref).tail); + //@ std::alloc::end_ref_Allocator_at_lifetime::(); + //@ std::num::end_ref_usize(&(*list_ref).len); + //@ end_ref_padding_LinkedList(list_ref); + //@ leak ref_init_perm(&(*list_ref).marker, _) &*& ref_initialized(&(*list_ref).marker); + + self.index = len.checked_sub(1).unwrap_or(0); + + //@ assert nodes2 == []; + //@ open foreach([], elem_fbc::(t1)); + /*@ + match tail { + Option::None => { + Nodes_last_lemma(head); + close Nodes(alloc_id, self0.current, tail, tail, None, nodes2); + close foreach([], elem_fbc::(t1)); + assert len == 0; + } + Option::Some(tail_) => { + Nodes_last_lemma(head); + Nodes_split_off_last(head); + assert Nodes(alloc_id, head, None, ?before_tail, tail, ?nodes10); + close Nodes::(alloc_id, None, tail, tail, None, []); + close Nodes::(alloc_id, tail, _, tail, None, [tail_]); + foreach_unappend(nodes10, [tail_], elem_fbc::(t1)); + } + } + @*/ } // Have a prev. Yield it and go to the previous element. Some(current) => unsafe { + //@ close CursorMut_current(self, _); + //@ let current_ref = precreate_ref(¤t); + //@ std::ptr::init_ref_NonNull(current_ref, 1/2); self.current = current.as_ref().prev; - self.index = self.index.checked_sub(1).unwrap_or_else(|| self.list.len()); + //@ std::ptr::end_ref_NonNull(current_ref); + + let index = self.index.checked_sub(1); + + let list; + + //@ let list_ref = precreate_ref(self0.list); + //@ open_ref_init_perm_LinkedList(list_ref); + //@ init_ref_Option_NonNull(&(*list_ref).head); + //@ init_ref_Option_NonNull(&(*list_ref).tail); + //@ std::num::init_ref_usize(&(*list_ref).len, 1/2); + //@ init_ref_padding_LinkedList(list_ref, 1/2); + //@ let k1 = begin_lifetime(); + //@ std::marker::close_ref_initialized_PhantomData(&(*list_ref).marker, 1); + { + //@ let_lft 'b = k1; + //@ std::alloc::init_ref_Allocator_at_lifetime::<'b, A>(&(*list_ref).alloc); + //@ close_ref_initialized_LinkedList(list_ref); + list = &*self.list; + + //@ close ref_initialized_::>(list_ref)(); + //@ borrow(k1, ref_initialized_::>(list_ref)); + //@ full_borrow_into_frac(k1, ref_initialized_::>(list_ref)); + + let index2; + match index { + None => { + //@ open Nodes(_, head, None, _, _, ?nodes1); + //@ assert nodes1 == []; + + let len = list.len(); + + index2 = len; + + //@ close Nodes(alloc_id, self0.current, before_current, tail, None, nodes2); + //@ close Nodes::(alloc_id, None, tail, tail, None, []); + } + Some(index1) => { + //@ close Nodes(alloc_id, self0.current, before_current, tail, None, nodes2); + //@ Nodes_last_lemma(head); + //@ Nodes_split_off_last(head); + //@ assert before_current == Option::Some(?current1); + //@ assert Nodes(_, head, None, ?last, before_current, ?nodes10); + //@ close Nodes::(alloc_id, before_current, last, tail, None, cons(current1, nodes2)); + //@ foreach_unappend(nodes10, [current1], elem_fbc::(t1)); + //@ foreach_append([current1], nodes2); + index2 = index1; + } + }; + self.index = index2; + } + //@ end_lifetime(k1); + //@ borrow_end(k1, ref_initialized_(list_ref)); + //@ open ref_initialized_::>(list_ref)(); + //@ open_ref_initialized_LinkedList(list_ref); + //@ end_ref_Option_NonNull(&(*list_ref).head); + //@ end_ref_Option_NonNull(&(*list_ref).tail); + //@ std::alloc::end_ref_Allocator_at_lifetime::(); + //@ std::num::end_ref_usize(&(*list_ref).len); + //@ end_ref_padding_LinkedList(list_ref); + //@ leak ref_init_perm(&(*list_ref).marker, _) &*& ref_initialized(&(*list_ref).marker); }, } + //@ let self1 = *self; + //@ ghost_cell_mutate(ghost_cell_id, pair(self1.index, self1.current)); + //@ close CursorMut_fbc::(t1, ghost_cell_id, self1.list)(); + //@ close_full_borrow(CursorMut_fbc::(t1, ghost_cell_id, self1.list)); + //@ close >.own(t, self1); } /// Returns a reference to the element that the cursor is currently @@ -1730,8 +3381,126 @@ impl<'a, T, A: Allocator> CursorMut<'a, T, A> { /// "ghost" non-element. #[must_use] #[unstable(feature = "linked_list_cursors", issue = "58533")] - pub fn current(&mut self) -> Option<&mut T> { - unsafe { self.current.map(|current| &mut (*current.as_ptr()).element) } + pub fn current<'b>(&'b mut self) -> Option<&'b mut T> + //@ req thread_token(?t) &*& [?qa]lifetime_token('a) &*& [?qb]lifetime_token('b) &*& full_borrow('b, >.full_borrow_content(t, self)) &*& lifetime_inclusion('b, 'a) == true; + //@ ens thread_token(t) &*& [qa]lifetime_token('a) &*& [qb]lifetime_token('b) &*& >.own(t, result); + //@ on_unwind_ens thread_token(t) &*& [qa]lifetime_token('a) &*& [qb]lifetime_token('b); + { + unsafe { + //@ let t1 = if is_Send(typeid(T)) && is_Send(typeid(A)) { default_tid } else { t }; + //@ let klongb = open_full_borrow_strong('b, >.full_borrow_content(t, self), qb); + //@ open >.full_borrow_content(t, self)(); + let current0 = self.current; + match current0 { //self.current.map(|current| &mut (*current.as_ptr()).element) + None => { + /*@ + { + pred Ctx() = true; + produce_lem_ptr_chunk full_borrow_convert_strong(Ctx, >.full_borrow_content(t, self), klongb, >.full_borrow_content(t, self))() { + open Ctx(); + } { + close Ctx(); + close >.full_borrow_content(t, self)(); + close_full_borrow_strong(klongb, >.full_borrow_content(t, self), >.full_borrow_content(t, self)); + leak full_borrow(_, _); + } + } + @*/ + //@ close >.own(t, None); + None + } + Some(current) => { + //@ open >.own(t, ?self0); + //@ let list = self0.list; + //@ assert [1/2]ghost_cell(?ghost_cell_id, pair(?index, ?current_)); + /*@ + { + pred Ctx1() = + *self |-> self0 &*& + [1/2]ghost_cell(ghost_cell_id, pair(index, current_)); + { + pred Ctx() = true; + produce_lem_ptr_chunk full_borrow_convert_strong(Ctx, sep(Ctx1, full_borrow_('a, CursorMut_fbc::(t1, ghost_cell_id, list))), klongb, >.full_borrow_content(t, self))() { + open Ctx(); + open sep(Ctx1, full_borrow_('a, CursorMut_fbc::(t1, ghost_cell_id, list)))(); + open Ctx1(); + close >.own(t, self0); + close >.full_borrow_content(t, self)(); + } { + close Ctx(); + close Ctx1(); + close sep(Ctx1, full_borrow_('a, CursorMut_fbc::(t1, ghost_cell_id, list)))(); + close_full_borrow_strong(klongb, >.full_borrow_content(t, self), sep(Ctx1, full_borrow_('a, CursorMut_fbc::(t1, ghost_cell_id, list)))); + } + full_borrow_mono(klongb, 'b, sep(Ctx1, full_borrow_('a, CursorMut_fbc::(t1, ghost_cell_id, list)))); + full_borrow_split('b, Ctx1, full_borrow_('a, CursorMut_fbc::(t1, ghost_cell_id, list))); + full_borrow_unnest('b, 'a, CursorMut_fbc::(t1, ghost_cell_id, list)); + lifetime_inclusion_refl('b); + lifetime_inclusion_glb('b, 'b, 'a); + full_borrow_mono(lifetime_intersection('b, 'a), 'b, CursorMut_fbc::(t1, ghost_cell_id, list)); + } + open_full_borrow(qb/2, 'b, Ctx1); + open Ctx1(); + let klongb2 = open_full_borrow_strong('b, CursorMut_fbc::(t1, ghost_cell_id, list), qb/2); + open CursorMut_fbc::(t1, ghost_cell_id, list)(); + close Ctx1(); + close_full_borrow(Ctx1); + leak full_borrow('b, Ctx1); + open Nodes::(?alloc_id, Some(current), ?before_current, ?tail, None, ?nodes2); + close Nodes::(alloc_id, Some(current), before_current, tail, None, nodes2); + open foreach(nodes2, elem_fbc::(t1)); + open elem_fbc::(t1)(current); + produce_type_interp::(); + if t1 != t { + Send::send(t1, t, (*NonNull_ptr(current)).element); + } + close_full_borrow_content::(t, &(*NonNull_ptr(current)).element); + assert + (*list).alloc |-> ?alloc &*& + Allocator::(t1, alloc, alloc_id) &*& + (*list).head |-> ?head &*& + (*list).tail |-> tail &*& + Nodes::(alloc_id, head, None, before_current, current_, ?nodes1); + { + pred Ctx() = + type_interp::() &*& + [1/2]ghost_cell::>>>>(ghost_cell_id, pair(index, current_)) &*& + (*list).alloc |-> alloc &*& + Allocator::(t1, alloc, alloc_id) &*& + (*list).head |-> head &*& + (*list).tail |-> tail &*& + Nodes::(alloc_id, head, None, before_current, current_, nodes1) &*& + Nodes::(alloc_id, current_, before_current, tail, None, nodes2) &*& + foreach(nodes1, elem_fbc::(t1)) &*& + foreach(tail(nodes2), elem_fbc::(t1)) &*& + (*list).len |-> length(nodes1) + length(nodes2) &*& + index == length(nodes1) &*& + (*list).marker |-> ?_ &*& + struct_LinkedList_padding(list); + produce_lem_ptr_chunk full_borrow_convert_strong(Ctx, .full_borrow_content(t, &(*NonNull_ptr(current)).element), klongb2, CursorMut_fbc::(t1, ghost_cell_id, self0.list))() { + open Ctx(); + open_full_borrow_content::(t, &(*NonNull_ptr(current)).element); + if t1 != t { + Send::send(t, t1, (*NonNull_ptr(current)).element); + } + leak type_interp::(); + close elem_fbc::(t1)(current); + close foreach(nodes2, elem_fbc::(t1)); + close CursorMut_fbc::(t1, ghost_cell_id, self0.list)(); + } { + close Ctx(); + close_full_borrow_strong(klongb2, CursorMut_fbc::(t1, ghost_cell_id, self0.list), .full_borrow_content(t, &(*NonNull_ptr(current)).element)); + full_borrow_mono(klongb2, 'b, .full_borrow_content(t, &(*NonNull_ptr(current)).element)); + } + } + } + @*/ + //@ close_ref_mut_own::<'b, T>(t, &(*NonNull_ptr(current)).element); + //@ close >.own(t, Some(&(*NonNull_ptr(current)).element)); + Some(&mut (*current.as_ptr()).element) + } + } + } } /// Returns a reference to the next element. @@ -1883,13 +3652,76 @@ impl<'a, T, A: Allocator> CursorMut<'a, T, A> { /// If the cursor is currently pointing to the "ghost" non-element then no element /// is removed and `None` is returned. #[unstable(feature = "linked_list_cursors", issue = "58533")] - pub fn remove_current(&mut self) -> Option { - let unlinked_node = self.current?; - unsafe { - self.current = unlinked_node.as_ref().next; - self.list.unlink_node(unlinked_node); - let unlinked_node = Box::from_raw_in(unlinked_node.as_ptr(), &self.list.alloc); - Some(unlinked_node.element) + pub fn remove_current(&mut self) -> Option + //@ req thread_token(?t) &*& [?q]lifetime_token('a) &*& *self |-> ?self0 &*& >.own(t, self0); + //@ ens thread_token(t) &*& [q]lifetime_token('a) &*& *self |-> ?self1 &*& >.own(t, self1) &*& >.own(t, result); + //@ on_unwind_ens thread_token(t) &*& [q]lifetime_token('a) &*& *self |-> ?self1 &*& >.own(t, self1); + { + //let unlinked_node = self.current?; + match core::ops::Try::branch(self.current) { + core::ops::ControlFlow::Break(residual) => { + //@ close >.own(t, None); + core::ops::FromResidual::from_residual(residual) + } + core::ops::ControlFlow::Continue(unlinked_node) => unsafe { + //@ open >.own(t, self0); + //@ assert [1/2]ghost_cell(?ghost_cell_id, _); + //@ let t1 = if is_Send(typeid(T)) && is_Send(typeid(A)) { default_tid } else { t }; + //@ open_full_borrow(q, 'a, CursorMut_fbc::(t1, ghost_cell_id, self0.list)); + //@ open CursorMut_fbc::(t1, ghost_cell_id, self0.list)(); + //@ open Nodes::(?alloc_id, self0.current, ?before_current, ?tail, None, ?nodes2); + + //@ let unlinked_node_ref = precreate_ref(&unlinked_node); + //@ std::ptr::init_ref_NonNull(unlinked_node_ref, 1/2); + self.current = unlinked_node.as_ref().next; + //@ std::ptr::end_ref_NonNull(unlinked_node_ref); + + //@ let current1 = (*self).current; + //@ open foreach(nodes2, elem_fbc::(t1)); + //@ open elem_fbc::(t1)(unlinked_node); + self.list.unlink_node(unlinked_node); + /*@ + if t1 != t { + std::alloc::Allocator_send(t, (*self0.list).alloc); + } + @*/ + //@ std::alloc::close_Allocator_full_borrow_content_(t, &(*self0.list).alloc); + //@ let k = begin_lifetime(); + //@ borrow(k, std::alloc::Allocator_full_borrow_content_::(t, &(*self0.list).alloc, alloc_id)); + //@ std::alloc::share_Allocator_full_borrow_content_(k, t, &(*self0.list).alloc, alloc_id); + //@ let alloc_ref = precreate_ref(&(*self0.list).alloc); + //@ std::alloc::init_ref_Allocator_share(k, t, alloc_ref); + //@ open_frac_borrow(k, ref_initialized_(alloc_ref), 1/4); + //@ open [?f]ref_initialized_::(alloc_ref)(); + let r; + { + //@ let_lft 'b = k; + //@ std::alloc::close_Allocator_ref::<'b, A>(t, alloc_ref); + let unlinked_node = Box::from_raw_in/*@::, &'b A>@*/(unlinked_node.as_ptr(), &self.list.alloc); + r = Some(Box_into_inner_with_ref_Allocator__VeriFast_wrapper(unlinked_node).element); // Some(unlinked_node_.element) + } + //@ close [f]ref_initialized_::(alloc_ref)(); + //@ close_frac_borrow(f, ref_initialized_(alloc_ref)); + //@ end_lifetime(k); + //@ borrow_end(k, std::alloc::Allocator_full_borrow_content_::(t, &(*self0.list).alloc, alloc_id)); + //@ std::alloc::open_Allocator_full_borrow_content_(t, &(*self0.list).alloc, alloc_id); + + //@ ghost_cell_mutate(ghost_cell_id, pair(self0.index, current1)); + /*@ + if t1 != t { + std::alloc::Allocator_send(t1, (*self0.list).alloc); + assert r == Option::Some(?e); + produce_type_interp::(); + Send::send(t1, t, e); + leak type_interp::(); + } + @*/ + //@ close CursorMut_fbc::(t1, ghost_cell_id, self0.list)(); + //@ close_full_borrow(CursorMut_fbc::(t1, ghost_cell_id, self0.list)); + //@ close >.own(t, *self); + //@ close >.own(t, r); + r + } } } @@ -2111,6 +3943,53 @@ pub struct ExtractIf< old_len: usize, } +/*@ + +pred_ctor ExtractIf_fbc(t: thread_id_t, list: *LinkedList, ghost_cell_id: i32, old_len: usize)() = + [1/2]ghost_cell::>>, usize>>(ghost_cell_id, pair(?it, ?idx)) &*& + (*list).alloc |-> ?alloc &*& Allocator::(t, alloc, ?alloc_id) &*& + (*list).head |-> ?head &*& + (*list).tail |-> ?tail &*& + Nodes::(alloc_id, head, None, ?prev, it, ?nodes1) &*& + Nodes::(alloc_id, it, prev, tail, None, ?nodes2) &*& + foreach(nodes1, elem_fbc::(t)) &*& + foreach(nodes2, elem_fbc::(t)) &*& + (*list).len |-> length(append(nodes1, nodes2)) &*& + old_len <= usize::MAX &*& + old_len - idx == length(nodes2); + +pred<'a, T, F, A> >.own(t, ex) = + .own(t, ex.`pred`) &*& 0 <= ex.idx &*& + [1/2]ghost_cell::>>, usize>>(?ghost_cell_id, pair(ex.it, ex.idx)) &*& + full_borrow('a, ExtractIf_fbc::(t, ex.list, ghost_cell_id, ex.old_len)); + +lem ExtractIf_drop<'a, T, F, A>() + req ExtractIf_own::<'a, T, F, A>(?_t, ?_v); + ens .own(_t, _v.`pred`); +{ + open >.own(_t, _v); + leak full_borrow(_, _); + leak [1/2]ghost_cell(_, _); +} + +lem ExtractIf_own_mono<'a0, 'a1, T, F0, F1, A>() + req type_interp::() &*& type_interp::() &*& type_interp::() &*& type_interp::() &*& ExtractIf_own::<'a0, T, F0, A>(?t, ?v) &*& lifetime_inclusion('a1, 'a0) == true &*& is_subtype_of::() == true; + ens type_interp::() &*& type_interp::() &*& type_interp::() &*& type_interp::() &*& ExtractIf_own::<'a1, T, F1, A>(t, ExtractIf::<'a1, T, F1, A> { list: v.list as *_, it: upcast(v.it), `pred`: upcast(v.`pred`), idx: upcast(v.idx), old_len: upcast(v.old_len) }); +{ + assume(false); +} + +@*/ + +fn call_pred__VeriFast_wrapper bool>(f: &mut F, element: &mut T) -> bool +//@ req thread_token(?t) &*& *f |-> ?f0 &*& .own(t, f0) &*& *element |-> ?element0 &*& .own(t, element0); +//@ ens thread_token(t) &*& *f |-> ?f1 &*& .own(t, f1) &*& *element |-> ?element1 &*& .own(t, element1); +//@ on_unwind_ens thread_token(t) &*& *f |-> ?f1 &*& .own(t, f1) &*& *element |-> ?element1 &*& .own(t, element1); +//@ assume_correct +{ + f(element) +} + #[stable(feature = "extract_if", since = "1.87.0")] impl Iterator for ExtractIf<'_, T, F, A> where @@ -2118,20 +3997,80 @@ where { type Item = T; - fn next(&mut self) -> Option { - while let Some(mut node) = self.it { - unsafe { - self.it = node.as_ref().next; - self.idx += 1; - - if (self.pred)(&mut node.as_mut().element) { - // `unlink_node` is okay with aliasing `element` references. - self.list.unlink_node(node); - return Some(Box::from_raw_in(node.as_ptr(), &self.list.alloc).element); + fn next(&mut self) -> Option + //@ req thread_token(?t) &*& [?q]lifetime_token('_0) &*& *self |-> ?self0 &*& >.own(t, self0); + //@ ens thread_token(t) &*& [q]lifetime_token('_0) &*& *self |-> ?self1 &*& >.own(t, self1) &*& >.own(t, result); + //@ on_unwind_ens thread_token(t) &*& [q]lifetime_token('_0) &*& *self |-> ?self1 &*& >.own(t, self1); + { + loop { //while let Some(mut node) = self.it { + //@ inv thread_token(t) &*& [q]lifetime_token('_0) &*& *self |-> ?self1 &*& >.own(t, self1) &*& node |-> _; + //@ open_points_to(self); + if let Some(mut node) = self.it { + unsafe { + //@ open >.own(t, self1); + //@ assert [1/2]ghost_cell(?ghost_cell_id, _); + //@ open_full_borrow(q, '_0, ExtractIf_fbc(t, self1.list, ghost_cell_id, self1.old_len)); + //@ open ExtractIf_fbc::(t, self1.list, ghost_cell_id, self1.old_len)(); + //@ assert Allocator::(t, ?alloc, _); + //@ open Nodes(?alloc_id, self1.it, ?prev, ?tail, None, ?nodes2); + + //@ let node_ref = precreate_ref(&node); + //@ std::ptr::init_ref_NonNull(node_ref, 1/2); + self.it = node.as_ref().next; + //@ std::ptr::end_ref_NonNull(node_ref); + + self.idx += 1; + //@ ghost_cell_mutate(ghost_cell_id, pair((*self).it, (*self).idx)); + + //@ open foreach(nodes2, elem_fbc::(t)); + //@ open elem_fbc::(t)(node); + + if call_pred__VeriFast_wrapper(&mut self.pred, &mut node.as_mut().element) { + // `unlink_node` is okay with aliasing `element` references. + self.list.unlink_node(node); + + //@ std::alloc::close_Allocator_full_borrow_content_(t, &(*(*self).list).alloc); + //@ let k = begin_lifetime(); + //@ borrow(k, std::alloc::Allocator_full_borrow_content_::(t, &(*(*self).list).alloc, alloc_id)); + //@ std::alloc::share_Allocator_full_borrow_content_(k, t, &(*(*self).list).alloc, alloc_id); + //@ let alloc_ref = precreate_ref(&(*(*self).list).alloc); + //@ std::alloc::init_ref_Allocator_share(k, t, alloc_ref); + //@ open_frac_borrow(k, ref_initialized_(alloc_ref), 1/4); + //@ open [?f]ref_initialized_::(alloc_ref)(); + let r; + { + //@ let_lft 'b = k; + //@ std::alloc::close_Allocator_ref::<'b, A>(t, alloc_ref); + r = Some(Box_into_inner_with_ref_Allocator__VeriFast_wrapper(Box::from_raw_in/*@::, &'b A>@*/(node.as_ptr(), &self.list.alloc)).element); + } + //@ close [f]ref_initialized_::(alloc_ref)(); + //@ close_frac_borrow(f, ref_initialized_(alloc_ref)); + //@ end_lifetime(k); + //@ borrow_end(k, std::alloc::Allocator_full_borrow_content_::(t, &(*(*self).list).alloc, alloc_id)); + //@ std::alloc::open_Allocator_full_borrow_content_(t, &(*(*self).list).alloc, alloc_id); + + //@ close ExtractIf_fbc::(t, self1.list, ghost_cell_id, self1.old_len)(); + //@ close_full_borrow(ExtractIf_fbc(t, self1.list, ghost_cell_id, self1.old_len)); + //@ close >.own(t, *self); + //@ close >.own(t, r); + return r + } + + //@ assert Nodes(_, ?head, None, _, self1.it, ?nodes1); + //@ Nodes_append_one_(head); + //@ close foreach([], elem_fbc::(t)); + //@ close elem_fbc::(t)(node); + //@ close foreach([node], elem_fbc::(t)); + //@ foreach_append(nodes1, [node]); + //@ close ExtractIf_fbc::(t, self1.list, ghost_cell_id, self1.old_len)(); + //@ close_full_borrow(ExtractIf_fbc(t, self1.list, ghost_cell_id, self1.old_len)); + //@ close >.own(t, *self); } + } else { + break; } } - + //@ close >.own(t, None); None } diff --git a/verifast-proofs/check-verifast-proofs.sh b/verifast-proofs/check-verifast-proofs.sh index 50332c2a3b179..9fafa70176bd0 100644 --- a/verifast-proofs/check-verifast-proofs.sh +++ b/verifast-proofs/check-verifast-proofs.sh @@ -3,7 +3,7 @@ set -e -x cd alloc cd collections cd linked_list.rs - verifast -rustc_args "--edition 2021 --cfg test" -skip_specless_fns verified/lib.rs + verifast -rustc_args "--edition 2021 --cfg test" -skip_specless_fns -ignore_unwind_paths -allow_assume verified/lib.rs refinement-checker --rustc-args "--edition 2021 --cfg test" original/lib.rs verified/lib.rs > /dev/null if ! diff original/linked_list.rs ../../../../library/alloc/src/collections/linked_list.rs; then echo "::error title=Please run verifast-proofs/patch-verifast-proofs.sh::Some VeriFast proofs are out of date; please chdir to verifast-proofs and run patch-verifast-proofs.sh to update them."