Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Handle comments inside &&&-bulleted blocks better #82

Merged
merged 3 commits into from
Jul 19, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
# Unreleased

* Handle comments inside `&&&`/`|||`-bulleted blocks better

# v0.3.8

* Support dividing statement lists into clauses/stanzas
Expand Down
18 changes: 1 addition & 17 deletions examples/ironfleet.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3253,7 +3253,6 @@ impl HostState {
None => true,
Some(host_state) => {
&&& netc.ok() // port of env.ok.ok()

&&& host_state.invariants(&netc.my_end_point())
&&& crate::host_protocol_t::init(
host_state@,
Expand Down Expand Up @@ -3584,7 +3583,6 @@ impl HostState {
&&& self.abstractable()
&&& self.delegation_map.valid()
// TODO why no valid_key?

&&& (forall|k| [email protected]().contains(k) ==> #[trigger] valid_value(self.h@[k]))
&&& self.sd.valid()
&&& match &self.received_packet {
Expand All @@ -3609,7 +3607,6 @@ impl HostState {
&&& [email protected]()
&&& self.num_delegations
< self.constants.params.max_delegations // why did we move this here?

&&& self.constants.params@ == AbstractParameters::static_params()
&&& self.resend_count < 100000000
}
Expand Down Expand Up @@ -4088,13 +4085,11 @@ impl HostState {
== netc.ok()
// Because all `net_events` are sends, the condition "even if ok is false, if we sent at least one
// packet..." is implied by "even if ok is false, if `net_events` has length > 0...".

&&& (ok || event_results.sends.len() > 0) ==> netc.history() == old_netc_history
+ event_results.ios
// There's supposed to be a distinction between the ios that we intended to do and the
// event_seq that we actually did. (See EventResult definition.) But in the interest of
// mimicking Dafny Ironfleet, we make no such distinction.

&&& event_results.ios == event_results.event_seq()
&&& event_results.well_typed_events()
&&& ok ==> {
Expand Down Expand Up @@ -4196,7 +4191,6 @@ impl HostState {
)
// The Dafny Ironfleet "common preconditions" take an explicit cpacket, but we need to talk
// about

&&& self.host_state_common_postconditions(*old(self), cpacket, sent_packets@)
}),
{
Expand Down Expand Up @@ -5681,7 +5675,6 @@ pub open spec(checked) fn receive_packet(
||| {
&&& pre.received_packet is None // No packet currently waiting to be processed (buffered in my state)
// Record incoming packet in my state and possibly ack it

&&& SingleDelivery::receive(pre.sd, post.sd, pkt, ack, out)
&&& if SingleDelivery::new_single_message(pre.sd, pkt) {
post.received_packet == Some(pkt) // Enqueue this packet for processing
Expand All @@ -5695,8 +5688,7 @@ pub open spec(checked) fn receive_packet(
..post
} // Nothing else changes

}
||| {
}||| {
// internal buffer full or okay to ignore packets; drop this message and wait for it to be retransmitted.
&&& pre.received_packet is Some || okay_to_ignore_packets()
&&& post == pre
Expand Down Expand Up @@ -9754,11 +9746,8 @@ pub fn receive_with_demarshal(netc: &mut NetClient, local_addr: &EndPoint) -> (r
&&& (rr.is_Packet() ==> {
&&& [email protected]_Receive()
&&& true // NetPacketIsAbstractable is true

&&& rr.get_Packet_cpacket().abstractable() // can parse u8s up to NetEvent.

&&& true // EndPointIsValidPublicKey

&&& !(rr.get_Packet_cpacket()@.msg is InvalidMessage) ==> {
&&& rr.get_Packet_cpacket()@ == abstractify_net_packet_to_sht_packet(
[email protected]_Receive_r(),
Expand Down Expand Up @@ -9834,9 +9823,7 @@ fn take_buf(buf: &mut Vec<u8>) {
/// valid()
pub open spec fn outbound_packet_is_valid(cpacket: &CPacket) -> bool {
&&& cpacket.abstractable() // CPacketIsAbstractable

&&& cpacket.msg.is_marshalable() // CSingleMessageMarshallable

&&& (
!cpacket.msg.is_InvalidMessage()) // (out.msg.CSingleMessage? || out.msg.CAck?)

Expand All @@ -9845,7 +9832,6 @@ pub open spec fn outbound_packet_is_valid(cpacket: &CPacket) -> bool {
pub open spec fn send_log_entry_reflects_packet(event: NetEvent, cpacket: &CPacket) -> bool {
&&& event.is_Send()
&&& true // NetPacketIsAbstractable == EndPointIsAbstractable == true

&&& cpacket.abstractable()
&&& cpacket@ == abstractify_net_packet_to_sht_packet(event.get_Send_s())
}
Expand Down Expand Up @@ -10296,7 +10282,6 @@ pub enum ReceiveImplResult {
pub open spec fn valid_ack(ack: CPacket, original: CPacket) -> bool {
&&& ack.abstractable()
&&& outbound_packet_is_valid(&ack) // how does this relate to abstractable?

&&& ack.src@ == original.dst@
&&& ack.dst@ == original.src@
}
Expand Down Expand Up @@ -11455,7 +11440,6 @@ impl<MT> SingleDelivery<MT> {
/// Protocol/SHT/SingleDelivery.i.dfy ShouldAckSingleMessage
pub open spec(checked) fn should_ack_single_message(self, pkt: Packet) -> bool {
&&& pkt.msg is Message // Don't want to ack acks

&&& {
let last_seqno = tombstone_table_lookup(pkt.src, self.receive_state);
pkt.msg.get_Message_seqno() <= last_seqno
Expand Down
30 changes: 0 additions & 30 deletions examples/nr.rs
Original file line number Diff line number Diff line change
Expand Up @@ -512,20 +512,17 @@ spec fn state_refinement_relation_basic<DT: Dispatch>(
&&& (0 <= s.version && s.version
<= s.log.len())
// the state corresponds to the state computed at the given version

&&& t.state == s.nrstate_at_version(
s.version,
)
// the request ids of the readonly/update requests and responses must be unique

&&& s.readonly_reqs.dom().disjoint(s.update_reqs.dom())
&&& s.readonly_reqs.dom().disjoint(s.update_resps.dom())
&&& s.update_reqs.dom().disjoint(s.update_resps.dom())
&&& t.reqs.dom().disjoint(
t.resps.dom(),
)
// requests are complete: if a request in present in the AState then it must be present in the SState

&&& (forall|rid|
(#[trigger] s.readonly_reqs.contains_key(rid) || #[trigger] s.update_reqs.contains_key(rid)
|| #[trigger] s.update_resps.contains_key(rid)) <==> (#[trigger] t.reqs.contains_key(
Expand All @@ -534,7 +531,6 @@ spec fn state_refinement_relation_basic<DT: Dispatch>(
rid,
)))
// requests/responses in the rightmaps

&&& (forall|rid| #[trigger]
t.reqs.contains_key(rid) && #[trigger] t.reqs[rid].is_Read()
==> s.readonly_reqs.contains_key(rid))
Expand All @@ -547,40 +543,34 @@ spec fn state_refinement_relation_basic<DT: Dispatch>(
rid,
))
// for all log entries > version, there must be a response with the given version

&&& (forall|v: LogIdx|
s.version <= v && v < s.log.len() ==> update_response_with_version(
s.update_resps,
v,
))
// for any two update responses, if the request id differs, the version in the log must also differ

&&& (forall|rid1, rid2| #[trigger]
s.update_resps.contains_key(rid1) && #[trigger] s.update_resps.contains_key(rid2) && rid1
!= rid2 ==> s.update_resps[rid1]
!= s.update_resps[rid2])
// for all update responses, the version must be within the log

&&& (forall|rid| #[trigger]
s.update_resps.contains_key(rid) ==> s.update_resps[rid].0
< s.log.len())
// for all update requests, they must be part of the requests and the operation must match

&&& (forall|rid| #[trigger]
s.update_reqs.contains_key(rid) ==> t.reqs.contains_key(rid) && t.reqs[rid]
== InputOperation::<DT>::Write(
s.update_reqs[rid],
))
// forall update responses larger than the current version, they must be in the requests,
// the update operation must match

&&& (forall|rid| #[trigger]
s.update_resps.contains_key(rid) && s.update_resps[rid].0 >= s.version ==> {
&&& t.reqs.contains_key(rid)
&&& t.reqs[rid] == InputOperation::<DT>::Write(s.log[s.update_resps[rid].0 as int])
})
// for all update responses smaller than th eversion, they must be valid

&&& (forall|rid| #[trigger]
s.update_resps.contains_key(rid) && s.update_resps[rid].0 < s.version
==> update_response_is_valid(s, t, r_points, rid))
Expand Down Expand Up @@ -3116,13 +3106,11 @@ pub open spec fn LogRangeMatchesQueue<DT: Dispatch>(
&&& (logIndexLower == logIndexUpper ==> queueIndex
== queue.len())
// otherwise, we check the log

&&& (logIndexLower < logIndexUpper ==> {
&&& log.contains_key(
logIndexLower,
)
// local case: the entry has been written by the local node

&&& (log.index(logIndexLower).node_id == nodeId ==> {
// there must be an entry in the queue that matches the log entry
&&& queueIndex < queue.len()
Expand All @@ -3140,7 +3128,6 @@ pub open spec fn LogRangeMatchesQueue<DT: Dispatch>(
)
})
// remote case: the entry has been written by the local node, there is nothing to match, recourse

&&& (log.index(logIndexLower).node_id != nodeId ==> LogRangeMatchesQueue(
queue,
log,
Expand Down Expand Up @@ -3169,21 +3156,18 @@ pub open spec fn LogRangeMatchesQueue2<DT: Dispatch>(
&&& (logIndexLower == logIndexUpper ==> queueIndex
== queue.len())
// otherwise, we check the log

&&& (logIndexLower < logIndexUpper ==> {
&&& log.contains_key(
logIndexLower,
)
// local case: the entry has been written by the local node

&&& (log.index(logIndexLower).node_id == nodeId ==> {
// there must be an entry in the queue that matches the log entry
&&& queueIndex
< queue.len()
// &&& updates.contains_key(queue.index(queueIndex as int))
// &&& updates.index(queue.index(queueIndex as int)).is_Placed()
// &&& updates.index(queue.index(queueIndex as int)).get_Placed_idx() == logIndexLower

&&& LogRangeMatchesQueue2(
queue,
log,
Expand All @@ -3195,7 +3179,6 @@ pub open spec fn LogRangeMatchesQueue2<DT: Dispatch>(
)
})
// remote case: the entry has been written by the local node, there is nothing to match, recourse

&&& (log.index(logIndexLower).node_id != nodeId ==> LogRangeMatchesQueue2(
queue,
log,
Expand Down Expand Up @@ -7410,7 +7393,6 @@ impl<DT: Dispatch> NrLog<DT> {
//
// The following will result in a resource limit exceeded
//

&&& log_entries[i]@.value.op
== operations[i as int]
//
Expand Down Expand Up @@ -8205,7 +8187,6 @@ impl<DT: Dispatch> NrLogAppendExecDataGhost<DT> {
&&& self.combiner@@.value.is_Placed() ==> self.pre_exec(responses)
&&& self.cb_combiner@@.value
== pre.cb_combiner@@.value // other fields in common_pred

&&& self.request_ids == pre.request_ids
}

Expand Down Expand Up @@ -9925,14 +9906,12 @@ impl<DT: Dispatch> ThreadToken<DT> {
&&& (self.tid as nat)
< MAX_THREADS_PER_REPLICA
// &&& self.fc_client@@.instance == fc_inst

&&& self.batch_perm@@.value.is_None()
&&& self.fc_client@@.key == self.tid as nat
}

pub open spec fn wf(&self, replica: &Replica<DT>) -> bool {
&&& self.wf2(replica.spec_id() + 1) // +1 here because ids got < replicas

&&& self.rid@ == replica.spec_id()
&&& self.fc_client@@.instance == replica.flat_combiner_instance
&&& self.batch_perm@@.pcell == replica.contexts[self.thread_id_spec() as int].batch.0.id()
Expand Down Expand Up @@ -10695,11 +10674,9 @@ impl<DT: Dispatch + Sync> crate::NR<DT> for NodeReplicated<DT> {
&&& self.cyclic_buffer_instance@
== self.log.cyclic_buffer_instance@
// the number of replicas should be the as configured

&&& self.replicas.len()
<= MAX_REPLICAS
// the replicas should be well-formed and the instances match

&&& (forall|i|
0 <= i < self.replicas.len() ==> {
&&& (#[trigger] self.replicas[i]).wf()
Expand Down Expand Up @@ -11465,7 +11442,6 @@ pub open spec fn is_readonly_ticket<DT: Dispatch>(
&&& [email protected]_Init() && [email protected]_Init_op()
== op
// requires ticket.loc == TicketStubSingletonLoc.loc()

&&& [email protected] == log
}

Expand All @@ -11480,7 +11456,6 @@ pub open spec fn is_readonly_stub<DT: Dispatch>(
&&& [email protected]
== log
// ensures ssm.IsStub(rid, output, stub.val) -> (exists ctail, op, nodeid :: stub == ReadOp(rid, ReadonlyDone(op, output, nodeid, ctail)))

&&& [email protected] == rid
&&& [email protected]_Done()
&&& [email protected]_Done_ret() == result
Expand All @@ -11496,7 +11471,6 @@ pub open spec fn is_update_ticket<DT: Dispatch>(
&&& [email protected]_Init() && [email protected]_Init_op()
== op
// requires ticket.loc == TicketStubSingletonLoc.loc()

&&& [email protected] == log
}

Expand All @@ -11511,7 +11485,6 @@ pub open spec fn is_update_stub<DT: Dispatch>(
&&& [email protected]
== log
// ensures ssm.IsStub(rid, output, stub.val) -> (exists log_idx :: stub == UpdateOp(rid, UpdateDone(output, log_idx)))

&&& [email protected] == rid
&&& [email protected]_Done()
&&& [email protected]_Done_ret() == result
Expand Down Expand Up @@ -11937,19 +11910,16 @@ pub open spec fn behavior_equiv<DT: Dispatch>(
||| (a.is_Inited()
&& b.is_Inited())
// || (a.Stepped? && a.op.InternalOp? && equiv(a.tail, b))

||| (a.is_Stepped() && a.get_Stepped_1().is_Internal() && behavior_equiv(
*a.get_Stepped_2(),
b,
))
// || (b.Stepped? && b.op.InternalOp? && equiv(a, b.tail))

||| (b.is_Stepped() && b.get_Stepped_1().is_Internal() && behavior_equiv(
a,
*b.get_Stepped_2(),
))
// || (a.Stepped? && b.Stepped? && a.op == b.op && equiv(a.tail, b.tail))

||| (a.is_Stepped() && b.is_Stepped() && a.get_Stepped_1() == b.get_Stepped_1()
&& behavior_equiv(*a.get_Stepped_2(), *b.get_Stepped_2()))
}
Expand Down
Loading