From 6030448391b064ec79b4ecd071472ed3787a222f Mon Sep 17 00:00:00 2001 From: Shilpi Goel Date: Mon, 26 Aug 2024 00:10:51 -0500 Subject: [PATCH 01/18] Add another proof method to obtain partial correctness, and use it to prove Abs --- Correctness/ArmSpec.lean | 4 +- Correctness/Correctness.lean | 248 ++++++++++++++++-- Proofs/Experiments/{ => Abs}/Abs.lean | 0 Proofs/Experiments/{ => Abs}/AbsVCG.lean | 0 Proofs/Experiments/Abs/AbsVCGTandem.lean | 309 +++++++++++++++++++++++ Proofs/Proofs.lean | 5 +- 6 files changed, 542 insertions(+), 24 deletions(-) rename Proofs/Experiments/{ => Abs}/Abs.lean (100%) rename Proofs/Experiments/{ => Abs}/AbsVCG.lean (100%) create mode 100644 Proofs/Experiments/Abs/AbsVCGTandem.lean diff --git a/Correctness/ArmSpec.lean b/Correctness/ArmSpec.lean index 76e54fa9..fb7b9576 100644 --- a/Correctness/ArmSpec.lean +++ b/Correctness/ArmSpec.lean @@ -3,8 +3,8 @@ Copyright (c) 2024 Amazon.com, Inc. or its affiliates. All Rights Reserved. Released under Apache 2.0 license as described in the file LICENSE. Author(s): Shilpi Goel -Specializing the Correctness module for use with our specification of the Arm -ISA. +Specializing the Correctness module for use with our specification of +the Arm ISA. -/ import Arm.Exec diff --git a/Correctness/Correctness.lean b/Correctness/Correctness.lean index 7b6ef2c4..1de2100f 100644 --- a/Correctness/Correctness.lean +++ b/Correctness/Correctness.lean @@ -21,6 +21,8 @@ open Spec' (cut assert) open Iterate (iterate iterate_eq) open Classical +---------------------------------------------------------------------- + theorem not_forall_eq_exists_not (p : α → Prop) : (¬ ∀ x, p x) = ∃ x, ¬ p x := by apply propext constructor @@ -35,30 +37,52 @@ theorem not_forall_eq_exists_not (p : α → Prop) : (¬ ∀ x, p x) = ∃ x, ¬ have := h₂ a contradiction -/- Result from page 2: I1, I2, I3 implies Partial Correctness. -/ -theorem by_inv [Sys σ] [Spec σ] (inv : σ → Prop) - (ini : ∀ s0, pre s0 → inv s0) -- I1 - (step : ∀ s, inv s → ¬ exit s → inv (next s)) -- I2 - (stop : ∀ s0 sf, inv sf → exit sf → post s0 sf) -- I3 +---------------------------------------------------------------------- + +/-- +Prove partial correctness using stepwise invariants. + +We use `s0`, `si`, and `sf` to refer to initial, intermediate, and +final (exit) states respectively. + +This is the result from page 2 of the paper. Note that the `inv` +function must specify an invariant for _each_ machine instruction, +which makes this proof method tedious to use for larger programs. + +Also see `partial_correctness_from_verification_conditions` and +`partial_correctness_from_assertions`. +-/ +theorem partial_correctness_by_stepwise_invariants [Sys σ] [Spec σ] (inv : σ → σ → Prop) + (ini : ∀ s0, pre s0 → inv s0 s0) -- I1 + (step : ∀ s0 si, inv s0 si → ¬ exit si → inv s0 (next si)) -- I2 + (stop : ∀ s0 sf, inv s0 sf → exit sf → post s0 sf) -- I3 : PartialCorrectness σ := - fun s n hp he => - let rec find (i : Nat) (h : inv (run s i)) (hle : i ≤ n) - : ∃ m : Nat, m ≤ n ∧ exit (run s m) ∧ post s (run s m) := + fun s0 n hp he => + let rec find (i : Nat) (h : inv s0 (run s0 i)) (hle : i ≤ n) + : ∃ m : Nat, m ≤ n ∧ exit (run s0 m) ∧ post s0 (run s0 m) := if hn : i < n then - if he : exit (run s i) then + if he : exit (run s0 i) then ⟨i, Nat.le_of_lt hn, he, stop _ _ h he⟩ else - have : inv (run s (i+1)) := by - rw [← next_run]; exact step _ h he + have : inv s0 (run s0 (i+1)) := by + rw [← next_run]; exact step _ _ h he find (i+1) this hn else have := Nat.ge_of_not_lt hn have := Nat.le_antisymm this hle - have hinv : inv (run s n) := by subst this; assumption - have hpost : post s (run s n) := stop _ _ hinv he + have hinv : inv s0 (run s0 n) := by subst this; assumption + have hpost : post s0 (run s0 n) := stop _ _ hinv he ⟨n, Nat.le_refl _, he, hpost⟩ find 0 (ini _ hp) (Nat.zero_le _) +---------------------------------------------------------------------- + +-- Prove partial correctness using inductive assertions using the +-- functions `csteps` (to characterize the number of steps to reach +-- the next cutpoint) and `nextc` (to characterize the next cutpoint +-- state). Note that the function `csteps` is partial: if no cutpoint +-- is reachable from `s`, then the recursion does not terminate. + noncomputable def csteps [Sys σ] [Spec' σ] (s : σ) (i : Nat) : Nat := iterate (fun (s, i) => if cut s then .inl i else .inr (next s, i + 1)) (s, i) @@ -110,10 +134,10 @@ theorem csteps_not_cut [Sys σ] [Spec' σ] {s : σ} (h₁ : ¬ cut s) assumption /-- -Helper theorem. Given a state `s` and `cut (run s n)` for `n`, then there is -`k` such that `csteps s 0 = k` and `k ≤ n`. -Note that `cut (run s n)` ensures `csteps s 0` terminates because we will find a -`cut` in at most `n` steps. +Helper theorem. Given a state `s` and `cut (run s n)` for `n`, then +there is `k` such that `csteps s 0 = k` and `k ≤ n`. +Note that `cut (run s n)` ensures `csteps s 0` terminates because we +will find a `cut` in at most `n` steps. -/ theorem find_next_cut [Sys σ] [Spec' σ] (s : σ) (hc : cut (run s n)) : ∃ k : Nat, csteps s 0 = k ∧ k ≤ n ∧ cut (run s k) := @@ -133,10 +157,23 @@ theorem find_next_cut [Sys σ] [Spec' σ] (s : σ) (hc : cut (run s n)) : loop s 0 (Nat.zero_le ..) rfl /-- -Theorem 1 from page 5. If V1-V4 hold, then we have partial correctness. +Prove partial correctness using inductive assertions and functions +`csteps` and `nextc`. + +We use `s0`, `si`, and `sf` to refer to initial, intermediate, and +final (exit) states respectively. -We use `s0`, `si`, and `sf` to refer to initial, intermediate, and final (exit) -states respectively. +This is Theorem 1 from page 5 of the paper. This proof method is more +convenient to use than `partial_correctness_by_stepwise_invariants` +because we need only attach assertions at certain cutpoints. However, +it may still be tedious to use from the point of view of automation +because it is difficult to both symbolically simulate an instruction +and unwind `csteps` in tandem. So far, we have found that it is +easiest to determine what concrete value `csteps` yields (via symbolic +simulation), and then perform symbolic simulation -- however, then we +end up doing simulation twice, which is expensive. + +Also see `partial_correctness_from_assertions`. -/ theorem partial_correctness_from_verification_conditions [Sys σ] [Spec' σ] (v1 : ∀ s0 : σ, pre s0 → assert s0 s0) @@ -175,4 +212,175 @@ theorem partial_correctness_from_verification_conditions [Sys σ] [Spec' σ] ⟨n, Nat.le_refl _, hexit, hpost⟩ find 0 (v1 s0 hp) (Nat.zero_le ..) +---------------------------------------------------------------------- + +-- Prove partial correctness using inductive assertions using the +-- function `cassert` that checks if `assert` holds if at a cutpoint +-- state, or else recurs until it hits one. This function is also +-- partial: if no cutpoint is reachable, the recursion doesn't +-- terminate. + +/-- +`cassert s0 si i`: if this function terminates, it returns a pair, +whose first element is a `Nat = (i + the number of steps simulated +from `si` to reach the next cutpoint)`, and the second element is a +`Prop` that checks whether `assert` holds at that cutpoint state. +-/ +noncomputable def cassert [Sys σ] [Spec' σ] (s0 si : σ) (i : Nat) : Nat × Prop := + iterate (fun (si, i) => if cut si then .inl (i, assert s0 si) else .inr (next si, i + 1)) (si, i) + +theorem cassert_eq [Sys σ] [Spec' σ] (s0 si : σ) (i : Nat) + : cassert s0 si i = if cut si then (i, assert s0 si) + else cassert s0 (next si) (i + 1) := by + unfold cassert + conv => lhs; rw [iterate_eq] + by_cases cut si <;> simp [*] + done + +theorem cassert_cut [Sys σ] [Spec' σ] {s0 si : σ} (h : cut si) (i : Nat) : + (cassert s0 si i).fst = i ∧ + (cassert s0 si i).snd = assert s0 si := by + rw [cassert_eq] + simp [*] + done + +theorem cassert_not_cut [Sys σ] [Spec' σ] {s0 si : σ} (h₁ : ¬ cut si) + (h₂ : (cassert s0 (next si) (i+1)).fst = j) : + (cassert s0 si i).fst = j ∧ + (cassert s0 si i).snd = (cassert s0 (next si) (i + 1)).snd := by + rw [cassert_eq] + simp [h₁] + assumption + done + +theorem cassert_lower_bound [Sys σ] [Spec' σ] {s0 si : σ} (n : Nat) + (h0 : cut (run si n)) + (h1 : (cassert s0 si i).fst = j) : + i <= j := by + induction n generalizing i si + case zero => + unfold run at h0 + rw [cassert_eq] at h1 + simp_all only [ite_true, Nat.le_refl] + case succ => + rename_i _ _ n' h_inv + rw [run_succ] at h0 + rw [cassert_eq] at h1 + split at h1 + · omega + · have h_inv' := @h_inv (i + 1) (next si) h0 h1 + omega + done + +theorem find_next_cut_for_cassert [Sys σ] [Spec' σ] (s0 si : σ) (hc : cut (run si n)) : + ∃ k : Nat, (cassert s0 si 0).fst = k ∧ + (cassert s0 si 0).snd = assert s0 (run si k) ∧ + k ≤ n ∧ + cut (run si k) := + let rec loop (s' : σ) (i : Nat) (hle : i ≤ n) (heq : s' = run si i) : + ∃ k : Nat, (cassert s0 s' i).fst = k ∧ + (cassert s0 s' i).snd = assert s0 (run s' (k - i)) ∧ + k ≤ n ∧ cut (run si k) := + if hn : i < n then + if hc : cut s' then + have := cassert_cut hc i + have h_i_i : i - i = 0 := by simp only [Nat.sub_self] + ⟨i, this.left, h_i_i ▸ this.right, hle, by subst s'; assumption⟩ + else + have ⟨k, hs, hkn, hck⟩ := loop (next s') (i+1) hn (by simp [heq, run_succ']) + have h_not_cut := (cassert_not_cut hc hs).right + have : (cassert s0 s' i).snd = assert s0 (run s' (k - i)) := by + simp only [h_not_cut, hkn] + have h_next_cut : cut (run (next s') (n - i - 1)) := by + rw [heq] + have : (next (run si i)) = run (run si i) 1 := by + simp only [run] + simp only [this] + have : (run (run (run si i) 1) (n - i - 1)) = run si n := by + repeat (rw [run_run]) + have : (i + (1 + (n - i - 1))) = n := by omega + rw [this] + done + simpa only [this] + have h_lb := @cassert_lower_bound _ (i + 1) k _ _ s0 + (next s') (n - i - 1) h_next_cut hs + conv => + rhs + unfold run + have h_k_i : ¬ k - i = 0 := by omega + split + · contradiction + · rename_i x y h_k_i_y + simp at h_k_i_y + have : (k - (i + 1)) = k - i - 1 := by omega + simp [this, h_k_i_y] + done + ⟨k, (cassert_not_cut hc hs).left, this, hck.left, hck.right⟩ + else + have hin : i = n := by omega + have h_left : (cassert s0 s' i).fst = n := by + have := @cassert_cut _ _ _ s0 _ hc n + subst hin + rw [heq] + simp only [this] + have h_right : (cassert s0 s' i).snd = assert s0 (run s' (n - i)) := by + simp only [Nat.sub_self, eq_iff_iff] + have := @cassert_cut _ _ _ s0 _ hc + subst hin + rw [heq] + simp only [this, Nat.sub_self, eq_iff_iff, run] + done + ⟨n, h_left, h_right, Nat.le_refl .., by assumption⟩ + loop si 0 (Nat.zero_le ..) rfl + + +/-- +Prove partial correctness from inductive assertions using `cassert` +function. + +We use `s0`, `si`, and `sf` to refer to initial, intermediate, and +final (exit) states respectively. + +This is more convenient to use that +`partial_correctness_from_inductive_assertions` because we can do +symbolic simulation and open `cassert` in tandem. +-/ +theorem partial_correctness_from_assertions [Sys σ] [Spec' σ] + (v1 : ∀ s0 : σ, pre s0 → assert s0 s0) + (v2 : ∀ sf : σ, exit sf → cut sf) + (v3 : ∀ s0 sf : σ, assert s0 sf → exit sf → post s0 sf) + (v4 : ∀ s0 si : σ, assert s0 si → ¬ exit si → (cassert s0 (run si 1) 0).snd) + : PartialCorrectness σ := + fun s0 n hp hexit => + let rec find (i : Nat) (h : assert s0 (run s0 i)) (hle : i ≤ n) : + ∃ m : Nat, m ≤ n ∧ exit (run s0 m) ∧ post s0 (run s0 m) := + if hn : i < n then + if he : exit (run s0 i) then + ⟨i, Nat.le_of_lt hn, he, v3 _ _ h he⟩ + else + have : cut (run (run s0 (i + 1)) (n - Nat.succ i)) := by + rw [run_run, Nat.add_one, Nat.add_sub_cancel' hn] + exact v2 _ hexit + + have ⟨k, _hk, hlek, hck⟩ := find_next_cut_for_cassert s0 (run s0 (i+1)) this + have hle' : i + 1 + k ≤ n := by + omega + have : (cassert s0 (next (run s0 i)) 0).snd := v4 _ _ h he + have h' : assert s0 (run s0 (i + 1 + k)) := by + rw [run_run] at hck + rw [next_run] at this + rw [run_run] at hlek + simp_all only [Nat.succ_eq_add_one, eq_iff_iff, true_iff] + done + have : n - (i + 1 + k) < n - i := by + apply Nat.sub_lt_sub_left; assumption; simp_arith + find (i + 1 + k) h' hle' + else + have := Nat.ge_of_not_lt hn + have := Nat.le_antisymm this hle + have ha : assert s0 (run s0 n) := by subst this; assumption + have hpost : post s0 (run s0 n) := v3 _ _ ha hexit + ⟨n, Nat.le_refl _, hexit, hpost⟩ + find 0 (v1 s0 hp) (Nat.zero_le ..) + end Correctness diff --git a/Proofs/Experiments/Abs.lean b/Proofs/Experiments/Abs/Abs.lean similarity index 100% rename from Proofs/Experiments/Abs.lean rename to Proofs/Experiments/Abs/Abs.lean diff --git a/Proofs/Experiments/AbsVCG.lean b/Proofs/Experiments/Abs/AbsVCG.lean similarity index 100% rename from Proofs/Experiments/AbsVCG.lean rename to Proofs/Experiments/Abs/AbsVCG.lean diff --git a/Proofs/Experiments/Abs/AbsVCGTandem.lean b/Proofs/Experiments/Abs/AbsVCGTandem.lean new file mode 100644 index 00000000..7d36e2e1 --- /dev/null +++ b/Proofs/Experiments/Abs/AbsVCGTandem.lean @@ -0,0 +1,309 @@ +/- +Copyright (c) 2024 Amazon.com, Inc. or its affiliates. All Rights Reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Author(s): Shilpi Goel + +Experimental: Use the Correctness module to prove that this program implements +absolute value correctly. The objective of this exercise is to determine the +"shape" of the lemmas that are needed for automation and/or to modify the +definitions in Correctness, if needed. +-/ +import Arm +import Tactics.StepThms +import Tactics.Sym +import Correctness.ArmSpec + +namespace AbsVCGTandem + +def program : Program := + def_program + [ + (0x4005d0#64, 0x2a0003e1#32), -- mov w1, w0 + (0x4005d4#64, 0x131f7c00#32), -- asr w0, w0, #31 + (0x4005d8#64, 0x0b000021#32), -- add w1, w1, w0 + (0x4005dc#64, 0x4a000020#32), -- eor w0, w1, w0 + (0x4005e0#64, 0xd65f03c0#32)] -- ret + +/-- Precondition for the correctness of the `Abs` program. -/ +def abs_pre (s : ArmState) : Prop := + read_pc s = 0x4005d0#64 ∧ + s.program = program ∧ + read_err s = StateError.None ∧ + -- (FIXME) We don't really need the stack pointer to be aligned, but the + -- `sym_n` tactic expects this. Can we make this optional? + CheckSPAlignment s + +/-- Specification of the absolute value computation for a 32-bit bitvector. -/ +def spec (x : BitVec 32) : BitVec 32 := + -- We prefer the current definition as opposed to: + -- BitVec.ofNat 32 x.toInt.natAbs + -- because the above has functions like `toInt` that do not play well with + -- bitblasting/LeanSAT. + let msb := BitVec.extractLsb 31 31 x + if msb == 0#1 then + x + else + (0#32 - x) + +/-- Postcondition for the correctness of the `Abs` program. -/ +def abs_post (s0 sf : ArmState) : Prop := + read_gpr 32 0#5 sf = spec (read_gpr 32 0#5 s0) ∧ + read_pc sf = 0x4005e0#64 ∧ + read_err sf = StateError.None ∧ + CheckSPAlignment sf + +/-- Function identifying the exit state(s) of the program. -/ +def abs_exit (s : ArmState) : Prop := + -- (FIXME) Let's consider the state where we are poised to execute `ret` as an + -- exit state for now. + read_pc s = 0x4005e0#64 + +/-- Function identifying the cutpoints of the program. -/ +def abs_cut (s : ArmState) : Bool := + read_pc s = 0x4005d0#64 -- First instruction + || + read_pc s = 0x4005e0#64 -- Last instruction + +/-- Function that attaches assertions at the cutpoints of this program. -/ +def abs_assert (s0 si : ArmState) : Prop := + abs_pre s0 ∧ + if read_pc si = 0x4005d0#64 then + si = s0 + else if read_pc si = 0x4005e0#64 then + abs_post s0 si + else + False + +instance : Spec' ArmState where + pre := abs_pre + post := abs_post + exit := abs_exit + cut := abs_cut + assert := abs_assert + +theorem Abs.cassert_eq (s0 si : ArmState) (i : Nat) : + Correctness.cassert s0 si i = if abs_cut si then (i, abs_assert s0 si) + else Correctness.cassert s0 (run 1 si) (i + 1) := by + rw [Correctness.cassert_eq] + simp only [Sys.next, Spec'.cut, Spec'.assert, run] + done + +------------------------------------------------------------------------------- +-- Generating the program effects and non-effects + +-- set_option trace.gen_step.print_names true in +#genStepEqTheorems program + +-- (FIXME) Obtain *_cut theorems for each instruction automatically. + +theorem program.stepi_0x4005d0_cut (s sn : ArmState) + (h_program : s.program = program) + (h_pc : r StateField.PC s = 0x4005d0#64) + (h_err : r StateField.ERR s = StateError.None) + (h_sp_aligned : CheckSPAlignment s) + (h_step : sn = run 1 s) : + abs_cut sn = false ∧ + r StateField.PC sn = 0x4005d4#64 ∧ + r StateField.ERR sn = .None ∧ + sn.program = program ∧ + CheckSPAlignment sn := by + have := program.stepi_eq_0x4005d0 h_program h_pc h_err + simp only [minimal_theory] at this + simp_all only [run, abs_cut, this, + state_simp_rules, bitvec_rules, minimal_theory] + done + +theorem program.stepi_0x4005d4_cut (s sn : ArmState) + (h_program : s.program = program) + (h_pc : r StateField.PC s = 0x4005d4#64) + (h_err : r StateField.ERR s = StateError.None) + (h_sp_aligned : CheckSPAlignment s) + (h_step : sn = run 1 s) : + abs_cut sn = false ∧ + r StateField.PC sn = 0x4005d8#64 ∧ + r StateField.ERR sn = .None ∧ + sn.program = program ∧ + CheckSPAlignment sn := by + have := program.stepi_eq_0x4005d4 h_program h_pc h_err + simp only [minimal_theory] at this + simp_all only [run, abs_cut, this, + state_simp_rules, bitvec_rules, minimal_theory] + done + +theorem program.stepi_0x4005d8_cut (s sn : ArmState) + (h_program : s.program = program) + (h_pc : r StateField.PC s = 0x4005d8#64) + (h_err : r StateField.ERR s = StateError.None) + (h_sp_aligned : CheckSPAlignment s) + (h_step : sn = run 1 s) : + abs_cut sn = false ∧ + r StateField.PC sn = 0x4005dc#64 ∧ + r StateField.ERR sn = .None ∧ + sn.program = program ∧ + CheckSPAlignment sn := by + have := program.stepi_eq_0x4005d8 h_program h_pc h_err + simp only [minimal_theory] at this + simp_all only [run, abs_cut, this, + state_simp_rules, bitvec_rules, minimal_theory] + done + +theorem program.stepi_0x4005dc_cut (s sn : ArmState) + (h_program : s.program = program) + (h_pc : r StateField.PC s = 0x4005dc#64) + (h_err : r StateField.ERR s = StateError.None) + (h_sp_aligned : CheckSPAlignment s) + (h_step : sn = run 1 s) : + abs_cut sn = true ∧ + r StateField.PC sn = 0x4005e0#64 ∧ + r StateField.ERR sn = .None ∧ + sn.program = program ∧ + CheckSPAlignment sn := by + have := program.stepi_eq_0x4005dc h_program h_pc h_err + simp only [minimal_theory] at this + simp_all only [run, abs_cut, this, + state_simp_rules, bitvec_rules, minimal_theory] + done + +------------------------------------------------------------------------------- + +theorem partial_correctness : + PartialCorrectness ArmState := by + apply Correctness.partial_correctness_from_assertions + case v1 => + intros s0 h_pre + simp only [Spec'.assert, abs_assert] + simp only [Spec.pre] at h_pre + simp_all only [abs_pre, minimal_theory] + case v2 => + intro sf h_exit + simp only [Spec.exit, abs_exit] at h_exit + simp only [Spec'.cut, abs_cut] + simp_all only [minimal_theory] + case v3 => + intro s0 sf + simp only [Spec'.assert, Spec.exit, Spec.post, abs_assert, abs_exit] + intros h1 h2 + simp_all (config := {decide := true}) only [minimal_theory] + case v4 => + intro s0 si h_assert h_exit + simp only [Correctness.arm_run] + simp [Spec.exit, abs_exit] at h_exit + simp only [Spec'.assert, abs_assert, h_exit, minimal_theory] at h_assert + obtain ⟨h_pre, h_assert_pc, h_assert_si_s0⟩ := h_assert + have ⟨h_s0_pc, h_s0_program, h_s0_err, h_s0_sp_aligned⟩ := h_pre + subst si + clear h_exit + + -- Begin: Symbolic Simulation + + -- Instruction 1 + -- The first instruction is a little special because we know we will a step + -- from the initial state: `assert` holds for the first cutpoint _after_ + -- this state. + -- However, the same tactic pattern works for simulating each of these + -- instructions. + rw [Abs.cassert_eq] + -- + -- Note: The `.fst` element of the conclusion tells us the number of steps + -- that have been simulated _after_ the first one; add one to this number to + -- get the total number of steps simulated from the initial state to reach + -- the next cutpoint. + -- + -- Introduce the next state variable `s1'` to avoid name clashes with the + -- state variable `s1` that `sym_n` introduces. + generalize h_s0_run : run 1 s0 = s1' + replace h_s0_run := h_s0_run.symm + sym_n 1 at s0 + have h_s1_cut := @program.stepi_0x4005d0_cut s0 s1 + h_s0_program h_s0_pc h_s0_err h_s0_sp_aligned + (by simp only [run, stepi_s0]) + -- Prove `s1'` and `s1` refer to the same state, and elide references to + -- `s1'`. + simp only [run_opener_zero] at h_s0_run + rw [h_s0_run] at * + -- Simplify the conclusion. + simp only [h_s1_cut, Nat.reduceAdd, minimal_theory] + clear s1' h_s0_run h_s1_cut stepi_s0 + + -- Instruction 2 + rw [Abs.cassert_eq] + -- We again use `s2'` to avoid name clashes with `s2` introduces by `sym_n`. + generalize h_s1_run : run 1 s1 = s2' + replace h_s1_run := h_s1_run.symm + sym_n 1 at s1 + have h_s2_cut := @program.stepi_0x4005d4_cut s1 s2 + h_s1_program h_s1_pc h_s1_err h_s1_sp_aligned + (by simp only [run, stepi_s1]) + -- Prove `s2'` and `s2` refer to the same state, and elide references to + -- `s2'`. + simp only [run_opener_zero] at h_s1_run + rw [h_s1_run] at * + -- Simplify the conclusion. + simp only [h_s2_cut, Nat.reduceAdd, minimal_theory] + clear s2' h_s1_run h_s2_cut stepi_s1 + + -- Instruction 3 + rw [Abs.cassert_eq] + generalize h_s2_run : run 1 s2 = s3' + replace h_s2_run := h_s2_run.symm + sym_n 1 at s2 + have h_s3_cut := @program.stepi_0x4005d8_cut s2 s3 + h_s2_program h_s2_pc h_s2_err h_s2_sp_aligned + (by simp only [run, stepi_s2]) + simp only [run_opener_zero] at h_s2_run + rw [h_s2_run] at * + simp only [h_s3_cut, Nat.reduceAdd, minimal_theory] + clear s3' h_s2_run h_s3_cut stepi_s2 + + -- Instruction 4 + rw [Abs.cassert_eq] + generalize h_s3_run : run 1 s3 = s4' + replace h_s3_run := h_s3_run.symm + sym_n 1 at s3 + have h_s4_cut := @program.stepi_0x4005dc_cut s3 s4 + h_s3_program h_s3_pc h_s3_err h_s3_sp_aligned + (by simp only [run, stepi_s3]) + simp only [run_opener_zero] at h_s3_run + rw [h_s3_run] at * + -- Note: from the conclusion, we see that we simulated 3 steps from s1 (or 4 + -- steps from s0) to reach the next cutpoint. We can use this to help + -- us come up with a clock in the termination proof. + simp only [h_s4_cut, Nat.reduceAdd, minimal_theory] + clear s4' h_s3_run h_s4_cut stepi_s3 + + -- End: Symbolic Simulation + + simp only [abs_assert] + simp only [h_pre, h_s4_pc, h_s4_err, h_s4_sp_aligned, + abs_post, state_simp_rules, bitvec_rules, minimal_theory] + -- Aggregate program effects here to obtain the value of x0(s4). + simp only [h_step_4, h_step_3, h_step_2, h_step_1, + state_simp_rules, bitvec_rules, minimal_theory] + simp (config := {ground := true}) only [AddWithCarry, spec] + split <;> bv_decide + done + +theorem termination : + Termination ArmState := by + simp [Termination, Spec.pre, Spec.exit, abs_exit, + state_simp_rules, bitvec_rules, minimal_theory] + intro s h_pre + have ⟨h_s0_pc, h_s0_program, h_s0_err, h_s0_sp_aligned⟩ := h_pre + -- While doing the partial correctness proof, we discovered that we need to + -- simulate 4 steps to reach an exit state. + apply Exists.intro 4 + simp only [Correctness.arm_run] + -- Unfortunately, after symbolically simulating the program to prove partial + -- correctness, we need to do that again to prove termination. + -- However, we can avoid this by providing a measure function and proving + -- that it decreases at each cutpoint. It'd be nice to have a more general + -- proof that having such a measure implies the `Termination` statement. + generalize h_run : run 4 s = sf + replace h_run := h_run.symm + sym_n 4 + simp only [run] at h_run + subst h_run + simp only [h_s4_pc] + done + +end AbsVCGTandem diff --git a/Proofs/Proofs.lean b/Proofs/Proofs.lean index a7132a87..4d304e64 100644 --- a/Proofs/Proofs.lean +++ b/Proofs/Proofs.lean @@ -13,7 +13,8 @@ import Proofs.Experiments.Summary1 import Proofs.Experiments.MemoryAliasing import Proofs.Experiments.SHA512MemoryAliasing import Proofs.Experiments.Max -import Proofs.Experiments.Abs -import Proofs.Experiments.AbsVCG +import Proofs.Experiments.Abs.Abs +import Proofs.Experiments.Abs.AbsVCG +import Proofs.Experiments.Abs.AbsVCGTandem import Proofs.Experiments.AddLoop import Proofs.Experiments.MemCpyVCG From ad37fb1832f9c7590a14ea0fe0488b40ab0e37f4 Mon Sep 17 00:00:00 2001 From: Shilpi Goel Date: Mon, 26 Aug 2024 15:25:25 -0500 Subject: [PATCH 02/18] Redo AddLoop proof using partial_correctness_from_assertions --- Proofs/Experiments/{ => AddLoop}/AddLoop.lean | 0 Proofs/Experiments/AddLoop/AddLoopTandem.lean | 551 ++++++++++++++++++ Proofs/Proofs.lean | 3 +- 3 files changed, 553 insertions(+), 1 deletion(-) rename Proofs/Experiments/{ => AddLoop}/AddLoop.lean (100%) create mode 100644 Proofs/Experiments/AddLoop/AddLoopTandem.lean diff --git a/Proofs/Experiments/AddLoop.lean b/Proofs/Experiments/AddLoop/AddLoop.lean similarity index 100% rename from Proofs/Experiments/AddLoop.lean rename to Proofs/Experiments/AddLoop/AddLoop.lean diff --git a/Proofs/Experiments/AddLoop/AddLoopTandem.lean b/Proofs/Experiments/AddLoop/AddLoopTandem.lean new file mode 100644 index 00000000..d94a1b06 --- /dev/null +++ b/Proofs/Experiments/AddLoop/AddLoopTandem.lean @@ -0,0 +1,551 @@ +/- +Copyright (c) 2024 Amazon.com, Inc. or its affiliates. All Rights Reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Author(s): Shilpi Goel + +Experimental: Use the Correctness module to prove that this program computes the +following via a naive loop that iterates over `x0`: +`x0 := x0 + x1` +-/ +import Arm +import Tactics.CSE +import Tactics.Sym +import Tactics.StepThms +import Correctness.ArmSpec + +namespace AddLoopTandem + +/- +Here is a C snippet that describes this program's behavior. + +while (x0 != 0) { + x1 := x1 + 1 + x0 := x0 - 1 +} +x0 := x1 +-/ +def program : Program := + def_program + [ + /- 00000000004005a4 : -/ + (0x4005a4#64, 0x14000003#32), /- b 4005b0 -/ + + /- 00000000004005a8 : -/ + (0x4005a8#64, 0x91000421#32), /- add x1, x1, #0x1 -/ + (0x4005ac#64, 0xd1000400#32), /- sub x0, x0, #0x1 -/ + + /- 00000000004005b0 : -/ + (0x4005b0#64, 0xf100001f#32), /- cmp x0, #0x0 -/ + (0x4005b4#64, 0x54ffffa1#32), /- b.ne 4005a8 -/ + (0x4005b8#64, 0xaa0103e0#32), /- mov x0, x1 -/ + (0x4005bc#64, 0xd65f03c0#32) /- ret -/ + ] + +/-- Precondition for the correctness of the Add program. -/ +def pre (s : ArmState) : Prop := + read_pc s = 0x4005a4#64 ∧ + s.program = program ∧ + read_err s = StateError.None ∧ + -- (FIXME) We don't really need the stack pointer to be aligned, but the + -- `sym_n` tactic expects this. Can we make this optional? + CheckSPAlignment s + +/-- Specification function. -/ +def spec (x0 x1 : BitVec 64) : BitVec 64 := + x0 + x1 + +def post (s0 sf : ArmState) : Prop := + read_gpr 64 0#5 sf = spec (read_gpr 64 0#5 s0) (read_gpr 64 1#5 s0) ∧ + read_pc sf = 0x4005bc#64 ∧ + read_err sf = StateError.None ∧ + sf.program = program ∧ + -- (FIXME) We don't really need the stack pointer to be aligned, but the + -- `sym_n` tactic expects this. Can we make this optional? + CheckSPAlignment sf + +def exit (s : ArmState) : Prop := + -- (FIXME) Let's consider the state where we are poised to execute `ret` as an + -- exit state for now. + read_pc s = 0x4005bc#64 + +def cut (s : ArmState) : Bool := + -- First instruction + read_pc s = 0x4005a4#64 || + -- Loop guard (branch instruction) + read_pc s = 0x4005b4#64 || + -- First instruction following the loop + read_pc s = 0x4005b8#64 || + -- Last instruction + read_pc s = 0x4005bc#64 + +def loop_inv (s0 si : ArmState) : Prop := + let x0 := read_gpr 64 0#5 s0 + let x1 := read_gpr 64 1#5 s0 + let curr_x0 := read_gpr 64 0#5 si + let curr_x1 := read_gpr 64 1#5 si + let curr_zf := r (StateField.FLAG PFlag.Z) si + (curr_x0 <= x0) ∧ + ((curr_zf = 1#1) ↔ (curr_x0 = 0#64)) ∧ + (curr_x1 = x1 + (x0 - curr_x0)) ∧ + read_err si = .None ∧ + si.program = program ∧ + -- (FIXME) We don't really need the stack pointer to be aligned, but the + -- `sym_n` tactic expects this. Can we make this optional? + CheckSPAlignment si + +def loop_post (s0 si : ArmState) : Prop := + read_gpr 64 1#5 si = spec (read_gpr 64 0#5 s0) (read_gpr 64 1#5 s0) ∧ + read_err si = .None ∧ + si.program = program ∧ + -- (FIXME) We don't really need the stack pointer to be aligned, but the + -- `sym_n` tactic expects this. Can we make this optional? + CheckSPAlignment si + +def assert (s0 si : ArmState) : Prop := + pre s0 ∧ + -- Using `match` is preferable to `if` for the `split` tactic (and for + -- readability). + match (read_pc si) with + | 0x4005a4#64 => + -- First instruction + si = s0 + | 0x4005b4#64 => + -- Loop guard + loop_inv s0 si + | 0x4005b8#64 => + -- First instruction following the loop + loop_post s0 si + | 0x4005bc#64 => + -- Last instruction + post s0 si + | _ => False + +instance : Spec' ArmState where + pre := pre + post := post + exit := exit + cut := cut + assert := assert + +theorem AddLoop.csteps_eq (s : ArmState) (i : Nat) : + Correctness.csteps s i = if cut s then i + else Correctness.csteps (run 1 s) (i + 1) := by + rw [Correctness.csteps_eq] + simp only [Sys.next, Spec'.cut, run] + done + +------------------------------------------------------------------------------- +-- Generating the program effects and non-effects + +-- (FIXME) Print names of generated theorems. +-- set_option trace.gen_step.debug true in +#genStepEqTheorems program + +-- (FIXME) Obtain *_cut theorems for each instruction automatically. + +theorem program.stepi_0x4005a4_cut (s sn : ArmState) + (h_program : s.program = program) + (h_pc : r StateField.PC s = 0x4005a4#64) + (h_err : r StateField.ERR s = StateError.None) + (h_sp_aligned : CheckSPAlignment s) + (h_step : sn = run 1 s) : + cut sn = false ∧ + r StateField.PC sn = 0x4005b0#64 ∧ + r StateField.ERR sn = .None ∧ + sn.program = program ∧ + CheckSPAlignment sn := by + have := program.stepi_eq_0x4005a4 h_program h_pc h_err + simp only [minimal_theory] at this + simp_all only [run, cut, this, + state_simp_rules, bitvec_rules, minimal_theory] + done + +theorem program.stepi_0x4005a8_cut (s sn : ArmState) + (h_program : s.program = program) + (h_pc : r StateField.PC s = 0x4005a8#64) + (h_err : r StateField.ERR s = StateError.None) + (h_sp_aligned : CheckSPAlignment s) + (h_step : sn = run 1 s) : + cut sn = false ∧ + r StateField.PC sn = 0x4005ac#64 ∧ + r StateField.ERR sn = .None ∧ + sn.program = program ∧ + CheckSPAlignment sn := by + have := program.stepi_eq_0x4005a8 h_program h_pc h_err + simp only [minimal_theory, bitvec_rules] at this + simp_all only [run, cut, this, state_simp_rules, bitvec_rules, minimal_theory] + done + +theorem program.stepi_0x4005ac_cut (s sn : ArmState) + (h_program : s.program = program) + (h_pc : r StateField.PC s = 0x4005ac#64) + (h_err : r StateField.ERR s = StateError.None) + (h_sp_aligned : CheckSPAlignment s) + (h_step : sn = run 1 s) : + cut sn = false ∧ + r StateField.PC sn = 0x4005b0#64 ∧ + r StateField.ERR sn = .None ∧ + sn.program = program ∧ + CheckSPAlignment sn := by + have := program.stepi_eq_0x4005ac h_program h_pc h_err + simp only [minimal_theory] at this + simp_all only [run, cut, this, state_simp_rules, bitvec_rules, minimal_theory] + done + +theorem program.stepi_0x4005b0_cut (s sn : ArmState) + (h_program : s.program = program) + (h_pc : r StateField.PC s = 0x4005b0#64) + (h_err : r StateField.ERR s = StateError.None) + (h_sp_aligned : CheckSPAlignment s) + (h_step : sn = run 1 s) : + cut sn = true ∧ + r StateField.PC sn = 0x4005b4#64 ∧ + r StateField.ERR sn = .None ∧ + sn.program = program ∧ + CheckSPAlignment sn := by + have := program.stepi_eq_0x4005b0 h_program h_pc h_err + simp only [minimal_theory] at this + simp_all only [run, cut, this, state_simp_rules, bitvec_rules, minimal_theory] + done + +-- Branch instruction! +theorem program.stepi_0x4005b4_cut (s sn : ArmState) + (h_program : s.program = program) + (h_pc : r StateField.PC s = 0x4005b4#64) + (h_err : r StateField.ERR s = StateError.None) + (h_sp_aligned : CheckSPAlignment s) + (h_step : sn = run 1 s) : + cut sn = (r (StateField.FLAG PFlag.Z) s = 1#1) ∧ + r StateField.PC sn = (if (r (StateField.FLAG PFlag.Z) s = 1#1) then + 0x4005b8#64 + else + 0x4005a8#64) ∧ + r StateField.ERR sn = .None ∧ + sn.program = program ∧ + CheckSPAlignment sn := by + have := program.stepi_eq_0x4005b4 h_program h_pc h_err + simp only [minimal_theory] at this + simp_all only [run, cut, this, state_simp_rules, bitvec_rules, minimal_theory] + split + · rename_i h + simp_all only [state_simp_rules, bitvec_rules, minimal_theory] + · rename_i h; simp only [minimal_theory] at h + simp_all only [state_simp_rules, minimal_theory] + done + +theorem program.stepi_0x4005b8_cut (s sn : ArmState) + (h_program : s.program = program) + (h_pc : r StateField.PC s = 0x4005b8#64) + (h_err : r StateField.ERR s = StateError.None) + (h_sp_aligned : CheckSPAlignment s) + (h_step : sn = run 1 s) : + cut sn = true ∧ + r StateField.PC sn = 0x4005bc#64 ∧ + r StateField.ERR sn = .None ∧ + sn.program = program ∧ + CheckSPAlignment sn := by + have := program.stepi_eq_0x4005b8 h_program h_pc h_err + simp only [minimal_theory] at this + simp_all only [run, cut, this, state_simp_rules, bitvec_rules, minimal_theory] + done + +-- (TODO) program.stepi_0x4005bc_cut elided. +-- theorem program.stepi_0x4005bc_cut (s sn : ArmState) +-- (h_program : s.program = program) +-- (h_pc : r StateField.PC s = 0x4005bc#64) +-- (h_err : r StateField.ERR s = StateError.None) +-- (h_sp_aligned : CheckSPAlignment s) +-- (h_step : sn = run 1 s) : +-- cut sn = false := by +-- have := program.stepi_eq_0x4005bc h_program h_pc h_err +-- simp only [minimal_theory] at this + +-- simp_all only [run, cut, this, state_simp_rules, bitvec_rules, minimal_theory] +-- done + +------------------------------------------------------------------------------- + +-- Some helper lemmas to prove obligations stemming from `assert` + +private theorem AddWithCarry.all_ones_identity_64 (x : BitVec 64) : + (AddWithCarry x 0xffffffffffffffff#64 0x1#1).fst = x := by + simp only [AddWithCarry] + bv_decide + +private theorem AddWithCarry.all_ones_zero_flag_64 (x : BitVec 64) : + ((AddWithCarry x 0xffffffffffffffff#64 0x1#1).snd.z = 1#1) ↔ + (x = 0#64) := by + simp only [AddWithCarry, bitvec_rules, state_simp_rules] + repeat split + · bv_decide + · bv_decide + done + +private theorem non_zero_one_bit_is_one {x : BitVec 1} + (h : ¬ x = 0#1) : + x = 1#1 := by + bv_decide + +private theorem AddWithCarry.sub_one_64 (x : BitVec 64) : + (AddWithCarry x 0xfffffffffffffffe#64 0x1#1).fst = x - 1#64 := by + simp only [AddWithCarry] + bv_decide + +private theorem loop_inv_x0_le {x y : BitVec 64} + (h_assert_x0 : x ≤ y) + (h_assert_x0_nz : ¬x = 0x0#64) : + x - 0x1#64 ≤ y := by + bv_omega + +private theorem AddWithCarry.add_one_64 (x : BitVec 64) : + (AddWithCarry x 0x1#64 0x0#1).fst = x + 0x1#64 := by + simp only [AddWithCarry, bitvec_rules] + bv_omega + +private theorem crock_lemma (x y z : BitVec 64) : + x + (y - z) + 1#64 = x + (y - (z - 1#64)) := by + -- (FIXME) Without this apply below, bv_omega crashes my editor. + apply BitVec.eq_sub_iff_add_eq.mp + bv_omega + +-- TODO: Upstream? +@[bitvec_rules] +theorem BitVec.le_refl (x : BitVec n) : + x <= x := by + exact BitVec.le_of_eq x x rfl + +------------------------------------------------------------------------------- + +-- Throwaway tactic to do symbolic simulation in the context of +-- Correctness.partial_correctness_from_assertions. +open Lean (FVarId TSyntax logWarning) in +open Lean.Elab.Tactic (TacticM evalTactic withMainContext) in +def sym1_cassert (curr_state_number : Nat) + (cassert_eq : Lean.Name) + (cut_eq : Lean.Name) + : TacticM Unit := + withMainContext do + let n_str := toString curr_state_number + let n'_str := toString (curr_state_number + 1) + let mk_name (s : String) : Lean.Name := + Lean.Name.mkStr Lean.Name.anonymous s + -- h_st_*: name of the hypothesis about projections from state st. + let h_st_program := Lean.mkIdent (mk_name ("h_s" ++ n_str ++ "_program")) + let h_st_pc := Lean.mkIdent (mk_name ("h_s" ++ n_str ++ "_pc")) + let h_st_err := Lean.mkIdent (mk_name ("h_s" ++ n_str ++ "_err")) + let h_st_sp_aligned := Lean.mkIdent (mk_name ("h_s" ++ n_str ++ "_sp_aligned")) + -- st: name of the current state + let st := Lean.mkIdent (mk_name ("s" ++ n_str)) + -- stn: name of the next state + let stn := Lean.mkIdent (mk_name ("s" ++ n'_str)) + -- stn': temporary name of the next state + let stn' := Lean.mkIdent (mk_name ("s" ++ n'_str ++ "'")) + -- h_st_run: name of the hypothesis with the `run` function + let h_st_run := Lean.mkIdent (mk_name ("h_s" ++ n_str ++ "_run")) + -- stepi_st: name of the hypothesis which associates st and stn with a stepi + -- function. + let stepi_st := Lean.mkIdent (mk_name ("stepi_s" ++ n_str)) + -- cassert_eq lemma + let cassert_eq_lemma := Lean.mkIdent cassert_eq + -- cut_lemma + let cut_lemma := Lean.mkIdent cut_eq + evalTactic (← + `(tactic| + (rw [$cassert_eq_lemma:ident] + generalize $h_st_run:ident : run 1 $st:ident = $stn':ident + replace $h_st_run:ident := ($h_st_run:ident).symm + sym_n 1 at $st:ident + have h_cut := $cut_lemma:ident $st:ident $stn:ident + $h_st_program:ident $h_st_pc:ident + $h_st_err:ident $h_st_sp_aligned:ident + (by simp only [run, $stepi_st:ident]) + simp only [run_opener_zero] at $h_st_run:ident + rw [$h_st_run:ident] at * + simp only [h_cut, Nat.reduceAdd, minimal_theory] + clear $stn':ident h_cut $h_st_run:ident $stepi_st:ident + ))) + +-- sym_i_assert tactic symbolically simulates 1 instruction from the state +-- number `i`. +elab "sym_i_cassert" i:num cassert_eq:ident cut_eq:ident : tactic => do + sym1_cassert i.getNat cassert_eq.getId cut_eq.getId + +theorem cassert_eq (s0 si : ArmState) (i : Nat) : + Correctness.cassert s0 si i = if cut si then (i, assert s0 si) + else Correctness.cassert s0 (run 1 si) (i + 1) := by + rw [Correctness.cassert_eq] + simp only [Sys.next, Spec'.cut, Spec'.assert, run] + done + +theorem partial_correctness : + PartialCorrectness ArmState := by + apply Correctness.partial_correctness_from_assertions + case v1 => + intro s0 h_pre + simp_all only [Spec.pre, pre, Spec'.assert, assert, + minimal_theory] + case v2 => + intro sf h_exit + simp_all only [Spec.exit, exit, Spec'.cut, cut, + state_simp_rules, minimal_theory] + case v3 => + intro s0 sf h_assert h_exit + -- (FIXME) Remove Spec.post to replicate bug where simp_all somehow + -- aggressively makes the goal unprovable. + simp_all only [Spec'.assert, Spec.exit, assert, exit, Spec.post] + case v4 => + intro s0 si h_assert h_exit + simp only [Correctness.arm_run] + simp [Spec.exit, exit] at h_exit + simp only [Spec'.assert, assert, h_exit, minimal_theory] at h_assert + obtain ⟨h_pre, h_assert⟩ := h_assert + have ⟨h_s0_pc, h_s0_program, h_s0_err, h_s0_sp_aligned⟩ := h_pre + simp_all only [Spec'.assert, Spec.exit, assert, exit, + minimal_theory, state_simp_rules] + split at h_assert + · -- Next cutpoint from 0x4005a4#64 (first instruction) + subst si + -- Begin: Symbolic simulation + sym_i_cassert 0 cassert_eq program.stepi_0x4005a4_cut + sym_i_cassert 1 cassert_eq program.stepi_0x4005b0_cut + -- End: Symbolic simulation + simp only [assert, h_pre, loop_inv, + h_s2_pc, h_s2_err, h_s2_program, h_s2_sp_aligned, + state_simp_rules, bitvec_rules, minimal_theory] + simp only [h_step_2, h_step_1, + state_simp_rules, bitvec_rules, minimal_theory] + simp (config := {ground := true}) only + rw [AddWithCarry.all_ones_zero_flag_64] + done + · -- Next cutpoint from 0x4005b4#64 (loop guard) + -- + -- (FIXME @shilpi) Hack for awful sym_i_cassert tactic which expects a + -- numeric suffix for the state variables. + -- generalize h_si_s1 : si = s1; subst si + -- + rename_i h_inv_pc + obtain ⟨h_inv_x0_lt, h_inv_zf_x0, h_inv_x1, + h_inv_err, h_inv_program, h_inv_sp_aligned⟩ := h_assert + simp only [state_simp_rules, bitvec_rules, minimal_theory] at * + by_cases h_cond_holds : r (StateField.FLAG PFlag.Z) si = 0x0#1 + case pos => + have h_inv_x0_nz : ¬ r (StateField.GPR 0#5) si = 0#64 := by + -- TODO: missing iff_rule? + simp (config := {decide := true}) only + [h_cond_holds, minimal_theory] at h_inv_zf_x0 + assumption + -- Begin: Symbolic simulation + -- Instruction 1 (branch instruction) + rw [cassert_eq] + generalize h_run : run 1 si = s1 + replace h_run := (h_run).symm + -- (TODO) Better handling of branch instructions. + -- sym_n 1 at si + have stepi_si : stepi si = s1 := by simp only [h_run, run] + have h_step_1 := @program.stepi_eq_0x4005b4 si + h_inv_program h_inv_pc h_inv_err + rw [stepi_si] at h_step_1 + have h_cut := @program.stepi_0x4005b4_cut si s1 + h_inv_program h_inv_pc h_inv_err h_inv_sp_aligned + h_run + simp only [h_cond_holds, bitvec_rules, minimal_theory] + at h_step_1 h_cut + have h_cut' : cut s1 = false := by + -- TODO: missing iff_rule? + simp (config := {decide := true}) only [minimal_theory] at h_cut + simp only [h_cut] + done + simp only [h_cut', minimal_theory] + clear stepi_si h_run + (intro_fetch_decode_lemmas h_step_1 h_inv_program "h_inv"; + (all_goals (try assumption))) + clear h_cut h_cut' + -- Instruction 2 + sym_i_cassert 1 cassert_eq program.stepi_0x4005a8_cut + sym_i_cassert 2 cassert_eq program.stepi_0x4005ac_cut + sym_i_cassert 3 cassert_eq program.stepi_0x4005b0_cut + -- End: Symbolic simulation + simp only [assert, loop_inv, h_pre, + h_s4_pc, h_s4_err, h_s4_program, h_s4_sp_aligned, + state_simp_rules, bitvec_rules, minimal_theory] + -- Aggregate program effects here. + simp (config := {ground := true}) only at h_step_4 h_step_3 h_step_2 + simp only [h_step_4, h_step_3, h_step_2, h_step_1, + state_simp_rules, bitvec_rules, minimal_theory] + clear h_step_4 h_step_3 h_step_2 h_step_1 + rw [AddWithCarry.sub_one_64, + AddWithCarry.all_ones_zero_flag_64, + AddWithCarry.add_one_64] + -- cse (config := { processHyps := .allHyps }) + simp only [h_inv_x1, crock_lemma, minimal_theory] + apply loop_inv_x0_le h_inv_x0_lt h_inv_x0_nz + done + case neg => + have h_inv_zf : r (StateField.FLAG PFlag.Z) si = 1#1 := by + -- (FIXME) Ugh, tedious. + rw [@non_zero_one_bit_is_one (r (StateField.FLAG PFlag.Z) si)] + assumption + simp only [h_inv_zf, minimal_theory] at h_inv_zf_x0 + -- Begin: Symbolic simulation + -- Instruction 1 (branch instruction) + rw [cassert_eq] + generalize h_run : run 1 si = s1 + replace h_run := (h_run).symm + -- (TODO) Better handling of branch instructions. + -- sym_n 1 at si + have stepi_si : stepi si = s1 := by simp only [h_run, run] + have h_step_1 := @program.stepi_eq_0x4005b4 si + h_inv_program h_inv_pc h_inv_err + rw [stepi_si] at h_step_1 + have h_cut := @program.stepi_0x4005b4_cut si s1 + h_inv_program h_inv_pc h_inv_err h_inv_sp_aligned + h_run + simp only [h_cond_holds, h_inv_zf, bitvec_rules, minimal_theory] + at h_step_1 h_cut + simp only [h_cut, minimal_theory] + clear h_cut stepi_si h_run + (intro_fetch_decode_lemmas h_step_1 h_inv_program "h_inv"; + all_goals (try assumption)) + -- End: Symbolic simulation + simp only [assert, loop_post, + h_pre, h_s1_pc, h_s1_err, h_s1_program, h_s1_sp_aligned, + state_simp_rules, bitvec_rules, minimal_theory] + -- Aggregate program effects here. + simp only [h_step_1, state_simp_rules, bitvec_rules, minimal_theory] + simp only [spec, h_inv_x1, h_inv_zf_x0, bitvec_rules, BitVec.add_comm] + done + · -- Next cutpoint from 0x4005b8#64 (first instruction after loop) + -- + -- (FIXME @shilpi) Hack for awful sym_i_cassert tactic which expects a + -- numeric suffix for the state variables. + generalize h_si_s1 : si = s1; subst si + -- + rename_i h_s1_pc + obtain ⟨h_s1_x1, h_s1_err, h_s1_program, h_s1_sp_aligned⟩ := h_assert + simp only [state_simp_rules, bitvec_rules, minimal_theory] at * + -- Begin: Symbolic simulation + sym_i_cassert 1 cassert_eq program.stepi_0x4005b8_cut + -- End: Symbolic simulation + simp only [assert, post, + h_pre, h_s2_pc, h_s2_err, h_s2_sp_aligned, h_s2_program, + state_simp_rules, bitvec_rules, minimal_theory] + simp only [h_step_2, spec, h_s1_x1, + state_simp_rules, bitvec_rules, minimal_theory] + done + · -- Next cutpoint from 0x4005bc#64 (last instruction) + -- No such cutpoint exists. + contradiction + · -- No further cutpoints exist. + simp_all only + done + +------------------------------------------------------------------------------- + +theorem termination : + Termination ArmState := by + simp [Termination, Spec.pre, Spec.exit, exit, + state_simp_rules, bitvec_rules, minimal_theory] + intro s h_pre + sorry + +end AddLoopTandem diff --git a/Proofs/Proofs.lean b/Proofs/Proofs.lean index 4d304e64..8108ccb1 100644 --- a/Proofs/Proofs.lean +++ b/Proofs/Proofs.lean @@ -16,5 +16,6 @@ import Proofs.Experiments.Max import Proofs.Experiments.Abs.Abs import Proofs.Experiments.Abs.AbsVCG import Proofs.Experiments.Abs.AbsVCGTandem -import Proofs.Experiments.AddLoop +import Proofs.Experiments.AddLoop.AddLoop +import Proofs.Experiments.AddLoop.AddLoopTandem import Proofs.Experiments.MemCpyVCG From 7bac006e0ca34f3a1d1133afc179d26c595e1c56 Mon Sep 17 00:00:00 2001 From: Shilpi Goel Date: Tue, 27 Aug 2024 11:04:50 -0500 Subject: [PATCH 03/18] Testing a strategy for termination proofs that can borrow from the same program's partial correctness proof --- Correctness/Correctness.lean | 25 ++ Proofs/Experiments/AddLoop/AddLoopTandem.lean | 225 +++++++++++++++++- 2 files changed, 239 insertions(+), 11 deletions(-) diff --git a/Correctness/Correctness.lean b/Correctness/Correctness.lean index 1de2100f..66bc69bf 100644 --- a/Correctness/Correctness.lean +++ b/Correctness/Correctness.lean @@ -383,4 +383,29 @@ theorem partial_correctness_from_assertions [Sys σ] [Spec' σ] ⟨n, Nat.le_refl _, hexit, hpost⟩ find 0 (v1 s0 hp) (Nat.zero_le ..) +---------------------------------------------------------------------- + +noncomputable def rank_decreases [Sys σ] [Spec' σ] (rank : σ → Nat) (si sn : σ) (i : Nat) : Nat × Prop := + iterate (fun (sn, i) => + if cut sn then + .inl (i, rank sn < rank si) + else + .inr (next sn, i + 1)) + (sn, i) + +theorem rank_decreases_eq [Sys σ] [Spec' σ] (rank : σ → Nat) (si sn : σ) (i : Nat) : + rank_decreases rank si sn i = + if cut sn then (i, rank sn < rank si) + else rank_decreases rank si (next sn) (i + 1) := by + unfold rank_decreases + conv => lhs; rw [iterate_eq] + by_cases cut sn <;> simp [*] + done + +theorem termination_from_decreasing_rank [Sys σ] [Spec' σ] (rank : σ → Nat) + (v1 : ∀ s0 : σ, pre s0 → assert s0 s0) + (v2 : ∀ s0 si : σ, assert s0 si → ¬ exit si → (rank_decreases rank si (run si 1) 0).snd) + : Termination σ := by + sorry + end Correctness diff --git a/Proofs/Experiments/AddLoop/AddLoopTandem.lean b/Proofs/Experiments/AddLoop/AddLoopTandem.lean index d94a1b06..460f26ef 100644 --- a/Proofs/Experiments/AddLoop/AddLoopTandem.lean +++ b/Proofs/Experiments/AddLoop/AddLoopTandem.lean @@ -127,13 +127,6 @@ instance : Spec' ArmState where cut := cut assert := assert -theorem AddLoop.csteps_eq (s : ArmState) (i : Nat) : - Correctness.csteps s i = if cut s then i - else Correctness.csteps (run 1 s) (i + 1) := by - rw [Correctness.csteps_eq] - simp only [Sys.next, Spec'.cut, run] - done - ------------------------------------------------------------------------------- -- Generating the program effects and non-effects @@ -541,11 +534,221 @@ theorem partial_correctness : ------------------------------------------------------------------------------- +def loop_clock (x0 : BitVec 64) : Nat := + if h : x0 = 0#64 then + 1 + else + have : x0 - 0x1#64 < x0 := by + bv_omega + 4 + loop_clock (x0 - 1) + termination_by x0.toNat + +theorem loop_clock_inv_lemma (h : ¬x = 0x0#64) : + loop_clock (x - 0x1#64) < loop_clock x := by + generalize h_xn : x.toNat = xn + have h_xn_ge_1 : 1 ≤ xn := by bv_omega + induction xn, h_xn_ge_1 using Nat.le_induction generalizing x + case base => + have h_x_eq_1 : x = 1#64 := by bv_omega + unfold loop_clock + simp only [h_x_eq_1, BitVec.sub_self, BitVec.ofNat_eq_ofNat, + reduceDIte, gt_iff_lt, BitVec.reduceEq] + omega + case succ => + rename_i xn' h_xn' h_inv + unfold loop_clock + simp only [BitVec.ofNat_eq_ofNat, dite_eq_ite, gt_iff_lt] + have h1 : ¬ x - 0x1#64 = 0x0#64 := by bv_omega + have h2 : (x - 0x1#64).toNat = xn' := by bv_omega + simp only [h, h1, h2, h_inv, + reduceIte, Nat.add_lt_add_iff_left, not_false_eq_true] + done + +def clock (s : ArmState) : Nat := + let x0 := r (StateField.GPR 0#5) s + if x0 = 0#64 then + 2 + 1 + 1 + else + 2 + ((loop_clock x0) + 1) + +/-- +Our formalization of the rank of a cutpoint for this program is the number of +instructions that remain to be executed from that point. +-/ +def rank (si : ArmState) : Nat := + match (read_pc si) with + | 0x4005a4#64 => + -- First instruction + clock si + | 0x4005b4#64 => + -- Loop guard + (loop_clock (r (StateField.GPR 0#5) si)) + 1 + | 0x4005b8#64 => + -- First instruction following the loop + 1 + | 0x4005bc#64 => + -- Last instruction + 0 + | _ => + -- No cutpoints remain + 0 + +theorem loop_clk_plus_one_lt_clock : + (loop_clock (r (StateField.GPR 0x0#5) s)) + 1 < clock s := by + simp only [clock] + unfold loop_clock + split <;> omega + +theorem rank_decreases_eq (si sn : ArmState) (i : Nat) : + Correctness.rank_decreases rank si sn i = + if cut sn then (i, rank sn < rank si) + else Correctness.rank_decreases rank si (run 1 sn) (i + 1) := by + rw [Correctness.rank_decreases_eq] + simp only [Spec'.cut, Sys.next, run] + done + +-- (FIXME) The termination proof looks very similar to the partial correctness +-- one, and we ought to do them in one swoop. theorem termination : Termination ArmState := by - simp [Termination, Spec.pre, Spec.exit, exit, - state_simp_rules, bitvec_rules, minimal_theory] - intro s h_pre - sorry + apply Correctness.termination_from_decreasing_rank rank + case v1 => + intro s0 h_pre + simp_all only [Spec.pre, pre, Spec'.assert, assert, + minimal_theory] + case v2 => + intro s0 si h_assert h_not_exit + simp only [Correctness.arm_run] + simp [Spec.exit, exit] at h_not_exit + simp only [Spec'.assert, assert, h_not_exit, minimal_theory] at h_assert + obtain ⟨h_pre, h_assert⟩ := h_assert + have ⟨h_s0_pc, h_s0_program, h_s0_err, h_s0_sp_aligned⟩ := h_pre + simp_all only [Spec'.assert, Spec.exit, assert, exit, + minimal_theory, state_simp_rules] + split at h_assert + · -- Next cutpoint from 0x4005a4#64 (first instruction) + subst si + -- Begin: Symbolic simulation + sym_i_cassert 0 rank_decreases_eq program.stepi_0x4005a4_cut + sym_i_cassert 1 rank_decreases_eq program.stepi_0x4005b0_cut + -- End: Symbolic simulation + simp only [rank, h_s0_pc, h_s2_pc, state_simp_rules] + simp (config := {ground := true}) only at h_step_2 + simp only [h_step_2, h_step_1, loop_clk_plus_one_lt_clock, + state_simp_rules, bitvec_rules, minimal_theory] + done + · -- Next cutpoint from 0x4005b4#64 (loop guard) + -- + -- (FIXME @shilpi) Hack for awful sym_i_cassert tactic which expects a + -- numeric suffix for the state variables. + -- generalize h_si_s1 : si = s1; subst si + -- + rename_i h_inv_pc + obtain ⟨h_inv_x0_lt, h_inv_zf_x0, h_inv_x1, + h_inv_err, h_inv_program, h_inv_sp_aligned⟩ := h_assert + simp only [state_simp_rules, bitvec_rules, minimal_theory] at * + by_cases h_cond_holds : r (StateField.FLAG PFlag.Z) si = 0x0#1 + case pos => + have h_inv_x0_nz : ¬ r (StateField.GPR 0#5) si = 0#64 := by + -- TODO: missing iff_rule? + simp (config := {decide := true}) only + [h_cond_holds, minimal_theory] at h_inv_zf_x0 + assumption + -- Begin: Symbolic simulation + -- Instruction 1 (branch instruction) + rw [rank_decreases_eq] + generalize h_run : run 1 si = s1 + replace h_run := (h_run).symm + -- (TODO) Better handling of branch instructions. + -- sym_n 1 at si + have stepi_si : stepi si = s1 := by simp only [h_run, run] + have h_step_1 := @program.stepi_eq_0x4005b4 si + h_inv_program h_inv_pc h_inv_err + rw [stepi_si] at h_step_1 + have h_cut := @program.stepi_0x4005b4_cut si s1 + h_inv_program h_inv_pc h_inv_err h_inv_sp_aligned + h_run + simp only [h_cond_holds, bitvec_rules, minimal_theory] + at h_step_1 h_cut + have h_cut' : cut s1 = false := by + -- TODO: missing iff_rule? + simp (config := {decide := true}) only [minimal_theory] at h_cut + simp only [h_cut] + done + simp only [h_cut', minimal_theory] + clear stepi_si h_run + (intro_fetch_decode_lemmas h_step_1 h_inv_program "h_inv"; + (all_goals (try assumption))) + clear h_cut h_cut' + -- Instruction 2 + sym_i_cassert 1 rank_decreases_eq program.stepi_0x4005a8_cut + sym_i_cassert 2 rank_decreases_eq program.stepi_0x4005ac_cut + sym_i_cassert 3 rank_decreases_eq program.stepi_0x4005b0_cut + -- End: Symbolic simulation + simp only [rank, + h_s4_pc, h_inv_pc, + state_simp_rules, bitvec_rules, minimal_theory] + -- Aggregate program effects here. + simp (config := {ground := true}) only at h_step_4 h_step_3 h_step_2 + simp only [h_step_4, h_step_3, h_step_2, h_step_1, + state_simp_rules, bitvec_rules, minimal_theory] + clear h_step_4 h_step_3 h_step_2 h_step_1 + rw [AddWithCarry.sub_one_64] + have := loop_clock_inv_lemma h_inv_x0_nz + omega + done + case neg => + have h_inv_zf : r (StateField.FLAG PFlag.Z) si = 1#1 := by + -- (FIXME) Ugh, tedious. + rw [@non_zero_one_bit_is_one (r (StateField.FLAG PFlag.Z) si)] + assumption + simp only [h_inv_zf, minimal_theory] at h_inv_zf_x0 + -- Begin: Symbolic simulation + -- Instruction 1 (branch instruction) + rw [rank_decreases_eq] + generalize h_run : run 1 si = s1 + replace h_run := (h_run).symm + -- (TODO) Better handling of branch instructions. + -- sym_n 1 at si + have stepi_si : stepi si = s1 := by simp only [h_run, run] + have h_step_1 := @program.stepi_eq_0x4005b4 si + h_inv_program h_inv_pc h_inv_err + rw [stepi_si] at h_step_1 + have h_cut := @program.stepi_0x4005b4_cut si s1 + h_inv_program h_inv_pc h_inv_err h_inv_sp_aligned + h_run + simp only [h_cond_holds, h_inv_zf, bitvec_rules, minimal_theory] + at h_step_1 h_cut + simp only [h_cut, minimal_theory] + clear h_cut stepi_si h_run + (intro_fetch_decode_lemmas h_step_1 h_inv_program "h_inv"; + all_goals (try assumption)) + -- End: Symbolic simulation + simp only [rank, + h_s1_pc, h_inv_pc, h_inv_zf_x0, + state_simp_rules, bitvec_rules, minimal_theory] + simp (config := {ground := true}) only + done + · -- Next cutpoint from 0x4005b8#64 (first instruction after loop) + -- + -- (FIXME @shilpi) Hack for awful sym_i_cassert tactic which expects a + -- numeric suffix for the state variables. + generalize h_si_s1 : si = s1; subst si + -- + rename_i h_s1_pc + obtain ⟨h_s1_x1, h_s1_err, h_s1_program, h_s1_sp_aligned⟩ := h_assert + simp only [state_simp_rules, bitvec_rules, minimal_theory] at * + -- Begin: Symbolic simulation + sym_i_cassert 1 rank_decreases_eq program.stepi_0x4005b8_cut + -- End: Symbolic simulation + simp only [rank, h_s2_pc, h_s1_pc, + state_simp_rules, bitvec_rules, minimal_theory] + done + · -- Next cutpoint from 0x4005bc#64 (last instruction) + -- No such cutpoint exists. + contradiction + · -- No further cutpoints exist. + simp_all only + done end AddLoopTandem From d47b19960c9458ee22e957de9ab7312b00b99242 Mon Sep 17 00:00:00 2001 From: Siddharth Bhat Date: Tue, 3 Sep 2024 14:25:31 -0500 Subject: [PATCH 04/18] chore: first commit, porting code over --- Proofs/Experiments/Max.lean | 47 -- Proofs/Experiments/Max/MaxProgram.lean | 44 ++ Proofs/Experiments/Max/MaxTandem.lean | 929 +++++++++++++++++++++++++ Proofs/Proofs.lean | 2 +- 4 files changed, 974 insertions(+), 48 deletions(-) delete mode 100644 Proofs/Experiments/Max.lean create mode 100644 Proofs/Experiments/Max/MaxProgram.lean create mode 100644 Proofs/Experiments/Max/MaxTandem.lean diff --git a/Proofs/Experiments/Max.lean b/Proofs/Experiments/Max.lean deleted file mode 100644 index fc38f615..00000000 --- a/Proofs/Experiments/Max.lean +++ /dev/null @@ -1,47 +0,0 @@ -/- -Copyright (c) 2024 Amazon.com, Inc. or its affiliates. All Rights Reserved. -Released under Apache 2.0 license as described in the file LICENSE. -Author(s): Shilpi Goel, Siddharth Bhat - -The goal is to prove that this program implements max correctly. --/ -import Arm -import Arm.BitVec - -namespace Max - -def program : Program := - def_program [ - (0x894#64, 0xd10083ff#32), -- sub sp, sp, #0x20 - (0x898#64, 0xb9000fe0#32), -- str w0, [sp, #12] - (0x89c#64, 0xb9000be1#32), -- str w1, [sp, #8] - (0x8a0#64, 0xb9400fe1#32), -- ldr w1, [sp, #12] - (0x8a4#64, 0xb9400be0#32), -- ldr w0, [sp, #8] - (0x8a8#64, 0x6b00003f#32), -- cmp w1, w0 - (0x8ac#64, 0x5400008d#32), -- b.le 8bc - (0x8b0#64, 0xb9400fe0#32), -- ldr w0, [sp, #12] - (0x8b4#64, 0xb9001fe0#32), -- str w0, [sp, #28] - (0x8b8#64, 0x14000003#32), -- b 8c4 - (0x8bc#64, 0xb9400be0#32), -- ldr w0, [sp, #8] - (0x8c0#64, 0xb9001fe0#32), -- str w0, [sp, #28] - (0x8c4#64, 0xb9401fe0#32), -- ldr w0, [sp, #28] - (0x8c8#64, 0x910083ff#32), -- add sp, sp, #0x20 - (0x8cc#64, 0xd65f03c0#32) -- ret -] - -def spec (x y : BitVec 32) : BitVec 32 := - if BitVec.slt y x then x else y - -theorem correct - {s0 sf : ArmState} - (h_s0_pc : read_pc s0 = 0x4005d0#64) - (h_s0_program : s0.program = program) - (h_s0_err : read_err s0 = StateError.None) - (h_run : sf = run program.length s0) : - read_gpr 32 0 sf = spec (read_gpr 32 0 s0) (read_gpr 32 1 s0) ∧ - read_err sf = StateError.None := by sorry - -/-- info: 'Max.correct' depends on axioms: [propext, sorryAx, Classical.choice, Quot.sound] -/ -#guard_msgs in #print axioms correct - -end Max diff --git a/Proofs/Experiments/Max/MaxProgram.lean b/Proofs/Experiments/Max/MaxProgram.lean new file mode 100644 index 00000000..f88a1cb3 --- /dev/null +++ b/Proofs/Experiments/Max/MaxProgram.lean @@ -0,0 +1,44 @@ +/- +Copyright (c) 2024 Amazon.com, Inc. or its affiliates. All Rights Reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Author(s): Shilpi Goel, Siddharth Bhat + +The goal is to prove that this program implements max correctly. +-/ +import Arm +import Arm.BitVec +import Tactics.Sym +import Tactics.StepThms + + +namespace Max + +def program : Program := + def_program [ +/- 0x0 -/ (0x894#64, 0xd10083ff#32), -- sub sp, sp, #0x20 ; +/- 0x4 -/ (0x898#64, 0xb9000fe0#32), -- str w0, [sp, #12] ; sp[12] = w0_a +/- 0x8 -/ (0x89c#64, 0xb9000be1#32), -- str w1, [sp, #8] ; sp[8] = w1_a +/- 0xc -/ (0x8a0#64, 0xb9400fe1#32), -- ldr w1, [sp, #12] ; w1_b = sp[12] = w0_a +/- 0x10 -/ (0x8a4#64, 0xb9400be0#32), -- ldr w0, [sp, #8] ; w0_b = sp[8] = w1_a +/- 0x14 -/ (0x8a8#64, 0x6b00003f#32), -- cmp w1, w0 ; w1_b - w0_b = w0_a - w1_a +/- 0x18 -/ (0x8ac#64, 0x5400008d#32), -- b.le 8bc ; w0_a ≤ w1_a: br ... -- entry end. +-- LOAD FROM sp[8] = w1_a (which is > w0_a) AND STORE IN w0 +/- 0x1c -/ (0x8b0#64, 0xb9400fe0#32), -- ldr w0, [sp, #12] ; w0_c = sp[12] = w0_a -- then start +/- 0x20 -/ (0x8b4#64, 0xb9001fe0#32), -- str w0, [sp, #28] ; sp[28] = w0_c = w0_a +/- 0x24 -/ (0x8b8#64, 0x14000003#32), -- b 8c4 ; - then end +-- LOAD FROM sp[8] = w1_a (which is > w0_a) AND STORE IN w0 +/- 0x28 -/ (0x8bc#64, 0xb9400be0#32), -- ldr w0, [sp, #8] ; w0_d = sp[8] = w1_a -- else start +/- 0x2c -/ (0x8c0#64, 0xb9001fe0#32), -- str w0, [sp, #28] ; sp[28] = w0_d = w1_a -- else end +-- LOAD FROM sp[28] AND STORE IN w0 +/- 0x30 -/ (0x8c4#64, 0xb9401fe0#32), -- ldr w0, [sp, #28] ; w0 = sp[28] -- merge start +/- 0x34 -/ (0x8c8#64, 0x910083ff#32), -- add sp, sp, #0x20 ; sp = sp + 0x20 +/- 0x38 -/ (0x8cc#64, 0xd65f03c0#32) -- ret -- return -- merge end [TODO: should this be ret?] +] + +#genStepEqTheorems program + +def spec (x y : BitVec 32) : BitVec 32 := + if BitVec.slt y x then x else y + + +end Max diff --git a/Proofs/Experiments/Max/MaxTandem.lean b/Proofs/Experiments/Max/MaxTandem.lean new file mode 100644 index 00000000..5a50a1b0 --- /dev/null +++ b/Proofs/Experiments/Max/MaxTandem.lean @@ -0,0 +1,929 @@ +/- +Copyright (c) 2024 Amazon.com, Inc. or its affiliates. All Rights Reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Author(s): Shilpi Goel + +Experimental: Use the Correctness module to prove that this program computes the +following via a naive loop that iterates over `x0`: +`x0 := x0 + x1` +-/ +import Arm +import Tactics.CSE +import Tactics.Sym +import Tactics.StepThms +import Correctness.ArmSpec +import Proofs.Experiments.Max.MaxProgram +import Arm.Insts.Common +import Arm.Memory.SeparateAutomation + +namespace MaxTandem +open Max + + +section PC + +scoped notation "entry_start" => 0x894#64 +scoped notation "then_start" => 0x8b0#64 +scoped notation "else_start" => 0x8bc#64 +scoped notation "merge_end" => 0x8cc#64 + + +namespace ArmStateNotation +@[inherit_doc read_mem_bytes] +syntax:max term noWs "[" withoutPosition(term) "," withoutPosition(term) noWs "]" : term +macro_rules | `($s[$base,$n]) => `(read_mem_bytes $n $base $s) +end ArmStateNotation +open ArmStateNotation + + + +def pcs : List (BitVec 64) := [entry_start, + -- entry_end, + then_start, + -- then_end, + else_start, + -- else_end, + -- merge_start, + merge_end + ] + +end PC + +def cut (s : ArmState) : Bool := decide (read_pc s ∈ pcs) + +section Invariants +-- variable (s0 si : ArmState) + +/-- Precondition for the correctness of the Add program. -/ +def pre (s : ArmState) : Prop := + read_pc s = entry_start ∧ + s.program = program ∧ + read_err s = StateError.None ∧ + -- (FIXME) We don't really need the stack pointer to be aligned, but the + -- `sym_n` tactic expects this. Can we make this optional? + CheckSPAlignment s ∧ + s.sp ≥ 32 ∧ + mem_legal' s.sp 80 -- TODO: find the correct smallest bound we need here. + + +/-- Specification function. -/ +def spec (x0 x1 : BitVec 64) : BitVec 64 := + if BitVec.slt (x0.zeroExtend 32) (x1.zeroExtend 32) + then (x1.zeroExtend 32).zeroExtend 64 + else (x0.zeroExtend 32).zeroExtend 64 + +def post (s0 sf : ArmState) : Prop := + sf.x0 = BitVec.zeroExtend 64 (BitVec.zeroExtend 32 (spec s0.x0 s0.x1)) ∧ + read_pc sf = 0x8cc#64 ∧ + read_err sf = StateError.None ∧ + sf.program = program ∧ + -- (FIXME) We don't really need the stack pointer to be aligned, but the + -- `sym_n` tactic expects this. Can we make this optional? + CheckSPAlignment sf + +def entry_start_inv (s0 si : ArmState) : Prop := si = s0 + +def entry_end_inv (s0 si : ArmState): Prop := + read_err si = .None ∧ + si.program = program ∧ + -- (FIXME) We don't really need the stack pointer to be aligned, but the + -- `sym_n` tactic expects this. Can we make this optional? + CheckSPAlignment si ∧ + si[s0.sp - 32#64 + 12#64, 4] = s0.x0.zeroExtend 32 ∧ + si[s0.sp - 32#64 + 8#64, 4] = s0.x1.zeroExtend 32 ∧ + si.sp = s0.sp - 32#64 -- TODO: read flags. + -- ∧ read_mem_bytes 4 (r (Spec) si) si = 0x0#32 + +def then_start_inv (s0 si : ArmState): Prop := + entry_end_inv s0 si ∧ + ¬ ((BitVec.zeroExtend 32 s0.x0).sle (BitVec.zeroExtend 32 s0.x1) = true) + +def else_start_inv (s0 si : ArmState) : Prop := + entry_end_inv s0 si ∧ + ((BitVec.zeroExtend 32 s0.x0).sle (BitVec.zeroExtend 32 s0.x1) = true) + +def exit (s : ArmState) : Prop := + -- (FIXME) Let's consider the state where we are poised to execute `ret` as an + -- exit state for now. + read_pc s = 0x8cc#64 + +end Invariants + + +def assert (s0 si : ArmState) : Prop := + pre s0 ∧ + -- Using `match` is preferable to `if` for the `split` tactic (and for + -- readability). + match (read_pc si) with + | entry_start => entry_start_inv s0 si + -- | entry_end => entry_end_inv s0 si + | then_start => then_start_inv s0 si + -- | then_end => then_end_inv s0 si + | else_start => else_start_inv s0 si + -- | else_end => else_end_inv s0 si + -- | merge_start => merge_start_inv s0 si + | merge_end => post s0 si -- why do I need an `assert` here, if it's tracked by `post`? Confusing. + | _ => False + +instance : Spec' ArmState where + pre := pre + post := post + exit := exit + cut := cut + assert := assert + +------------------------------------------------------------------------------- +-- Generating the program effects and non-effects + +section CutTheorems + +-- (FIXME) Obtain *_cut theorems for each instruction automatically. +-- 1/15: sub sp, sp, #0x20 ; +theorem program.stepi_0x894_cut (s sn : ArmState) + (h_program : s.program = program) + (h_pc : r StateField.PC s = 0x894#64) + (h_err : r StateField.ERR s = StateError.None) + (h_sp_aligned : CheckSPAlignment s) + (h_step : sn = run 1 s) : + cut sn = false ∧ + r StateField.PC sn = 0x898#64 ∧ + r StateField.ERR sn = .None ∧ + (sn.x0) = (s.x0) ∧ + (sn.x1) = (s.x1) ∧ + (sn.sp) = (s.sp) - 0x20#64 ∧ + sn.mem = s.mem ∧ + sn.program = program ∧ + CheckSPAlignment sn := by + subst h_step + have := Max.program.stepi_eq_0x894 h_program h_pc h_err + -- clear Decidable.rec (...) + simp (config := { decide := true, ground := true }) only [minimal_theory] at this + simp_all only [run, cut, this, + state_simp_rules, bitvec_rules, minimal_theory, pcs] + simp only [List.mem_cons, BitVec.reduceEq, List.mem_singleton, or_self, not_false_eq_true, + true_and, List.not_mem_nil, or_self, not_false_eq_true, true_and] + repeat first + | simp (config := { ground := true, decide := true}) [aligned_rules, state_simp_rules] + | apply Aligned_BitVecSub_64_4; + | apply Aligned_BitVecAdd_64_4; + | apply Aligned_AddWithCarry_64_4; + | apply Aligned_AddWithCarry_64_4' + + repeat solve + | apply Aligned_of_extractLsb_eq_zero; bv_decide + | apply Aligned_of_toNat_mod_eq_zero; bv_omega + | decide + | bv_omega + | assumption + +/-- +info: 'MaxTandem.program.stepi_0x894_cut' depends on axioms: [propext, Classical.choice, Lean.ofReduceBool, Quot.sound] +-/ +#guard_msgs in #print axioms program.stepi_0x894_cut + +-- 2/15: str w0, [sp, #12] ; sp[12] = w0_a +theorem program.stepi_0x898_cut (s sn : ArmState) + (h_program : s.program = program) + (h_pc : r StateField.PC s = 0x898#64) + (h_err : r StateField.ERR s = StateError.None) + (h_sp_aligned : CheckSPAlignment s) + (h_legal : mem_legal' s.sp 40) + (h_step : sn = run 1 s) : + cut sn = false ∧ + r StateField.PC sn = 0x89c#64 ∧ + r StateField.ERR sn = .None ∧ + sn.program = program ∧ + sn[(sn.sp) + 8#64, 4] = s[s.sp + 8#64, 4] ∧ + sn[(sn.sp) + 12#64, 4] = (s.x0).truncate 32 ∧ + (sn.x0) = (s.x0) ∧ + (sn.x1) = (s.x1) ∧ + (sn.sp) = (s.sp) ∧ + CheckSPAlignment sn := by + have := program.stepi_eq_0x898 h_program h_pc h_err + simp only [minimal_theory] at this + simp_all only [run, cut, this, + state_simp_rules, bitvec_rules, minimal_theory] + simp only [pcs, List.mem_cons, BitVec.reduceEq, List.mem_singleton, or_self, not_false_eq_true, + true_and, List.not_mem_nil, or_self, not_false_eq_true, true_and] + simp only [memory_rules, state_simp_rules] + simp_mem + rfl + + +/-- +info: 'MaxTandem.program.stepi_0x898_cut' depends on axioms: [propext, Classical.choice, Lean.ofReduceBool, Quot.sound] +-/ +#guard_msgs in #print axioms program.stepi_0x898_cut + +-- 3/15: str w1, [sp, #8] ; sp[8] = w1_a +set_option trace.simp_mem.info true in +theorem program.stepi_0x89c_cut (s sn : ArmState) + (h_program : s.program = program) + (h_pc : r StateField.PC s = 0x89c#64) + (h_err : r StateField.ERR s = StateError.None) + (h_sp_aligned : CheckSPAlignment s) + (h_legal : mem_legal' s.sp 40) + (h_step : sn = run 1 s) + : + cut sn = false ∧ + sn[(sn.sp) + 8#64, 4] = (s.x1).truncate 32 ∧ + sn[(sn.sp) + 12#64, 4] = s[s.sp + 12#64, 4] ∧ + (sn.x0) = (s.x0) ∧ + (sn.x1) = (s.x1) ∧ + (sn.sp) = (s.sp) ∧ + r StateField.PC sn = 0x8a0#64 ∧ + r StateField.ERR sn = .None ∧ + sn.program = program ∧ + CheckSPAlignment sn := by + have := program.stepi_eq_0x89c h_program h_pc h_err + simp only [minimal_theory, bitvec_rules] at this + simp_all only [run, cut, this, state_simp_rules, bitvec_rules, minimal_theory] + simp only [pcs, List.mem_cons, BitVec.reduceEq, List.mem_singleton, or_self, not_false_eq_true, + true_and, List.not_mem_nil, or_self, not_false_eq_true, true_and] + simp [memory_rules, state_simp_rules, minimal_theory] + simp_mem + rfl + +/-- +info: 'MaxTandem.program.stepi_0x89c_cut' depends on axioms: [propext, Classical.choice, Lean.ofReduceBool, Quot.sound] +-/ +#guard_msgs in #print axioms program.stepi_0x89c_cut + +-- 4/15: ldr w1, [sp, #12] ; w1_b = sp[12] +theorem program.stepi_0x8a0_cut (s sn : ArmState) + (h_program : s.program = program) + (h_pc : r StateField.PC s = 0x8a0#64) + (h_err : r StateField.ERR s = StateError.None) + (h_sp_aligned : CheckSPAlignment s) + (h_step : sn = run 1 s) : + cut sn = false ∧ + sn[(sn.sp) + 8#64, 4] = s[s.sp + 8#64, 4] ∧ + sn[(sn.sp) + 12#64, 4] = s[s.sp + 12#64, 4] ∧ + sn.x0 = s.x0 ∧ + sn.x1 = BitVec.zeroExtend 64 (s[sn.sp + 12#64, 4]) ∧ -- TODO: change notation to work on Memory, rather than ArmSTate + read_bytes + sn.sp = s.sp ∧ + r StateField.PC sn = 0x8a4#64 ∧ + r StateField.ERR sn = .None ∧ + sn.program = program ∧ + CheckSPAlignment sn := by + have := program.stepi_eq_0x8a0 h_program h_pc h_err + simp only [minimal_theory] at this + simp_all only [run, cut, this, state_simp_rules, bitvec_rules, minimal_theory] + simp only [pcs, List.mem_cons, BitVec.reduceEq, List.mem_singleton, or_self, not_false_eq_true, + true_and, List.not_mem_nil, or_self, not_false_eq_true, true_and] + done + +/-- +info: 'MaxTandem.program.stepi_0x8a0_cut' depends on axioms: [propext, Classical.choice, Quot.sound] +-/ +#guard_msgs in #print axioms program.stepi_0x8a0_cut + +-- 5/15: ldr w0, [sp, #8] +theorem program.stepi_0x8a4_cut (s sn : ArmState) + (h_program : s.program = program) + (h_pc : r StateField.PC s = 0x8a4#64) + (h_err : r StateField.ERR s = StateError.None) + (h_sp_aligned : CheckSPAlignment s) + (h_step : sn = run 1 s) : + cut sn = false ∧ + sn[(sn.sp) + 8#64, 4] = s[s.sp + 8#64, 4] ∧ + sn[(sn.sp) + 12#64, 4] = s[s.sp + 12#64, 4] ∧ + sn.x0 = BitVec.zeroExtend 64 (s[sn.sp + 8#64, 4]) ∧ -- TODO: change notation to work on Memory, rather than ArmSTate + read_bytes + sn.x1 = s.x1 ∧ -- TODO: change notation to work on Memory, rather than ArmSTate + read_bytes + sn.sp = s.sp ∧ + r StateField.PC sn = 0x8a8#64 ∧ + r StateField.ERR sn = .None ∧ + sn.program = program ∧ + CheckSPAlignment sn := by + have := program.stepi_eq_0x8a4 h_program h_pc h_err + simp only [minimal_theory] at this + simp_all only [run, cut, this, state_simp_rules, bitvec_rules, minimal_theory] + simp only [pcs, List.mem_cons, BitVec.reduceEq, List.mem_singleton, or_self, not_false_eq_true, + true_and, List.not_mem_nil, or_self, not_false_eq_true, true_and] + +/-- +info: 'MaxTandem.program.stepi_0x8a4_cut' depends on axioms: [propext, Classical.choice, Quot.sound] +-/ +#guard_msgs in #print axioms program.stepi_0x8a4_cut + +-- 6/15: cmp w1, w0 -- | TODO: add necessary conditions +theorem program.stepi_0x8a8_cut (s sn : ArmState) + (h_program : s.program = program) + (h_pc : r StateField.PC s = 0x8a8#64) + (h_err : r StateField.ERR s = StateError.None) + (h_sp_aligned : CheckSPAlignment s) + (h_step : sn = run 1 s) : + cut sn = false ∧ + r StateField.PC sn = 0x8ac#64 ∧ + r StateField.ERR sn = .None ∧ + sn.program = program ∧ + sn.C = (AddWithCarry (s.x1.zeroExtend 32) (~~~s.x0.zeroExtend 32) 1#1).snd.c ∧ + sn.V = (AddWithCarry (s.x1.zeroExtend 32) (~~~s.x0.zeroExtend 32) 1#1).snd.v ∧ + sn.Z = (AddWithCarry (s.x1.zeroExtend 32) (~~~s.x0.zeroExtend 32) 1#1).snd.z ∧ + sn.N = (AddWithCarry (s.x1.zeroExtend 32) (~~~s.x0.zeroExtend 32) 1#1).snd.n ∧ + sn[(sn.sp) + 8#64, 4] = s[s.sp + 8#64, 4] ∧ + sn[(sn.sp) + 12#64, 4] = s[s.sp + 12#64, 4] ∧ + sn.x0 = s.x0 ∧ -- TODO: change notation to work on Memory, rather than ArmSTate + read_bytes + sn.x1 = s.x1 ∧ -- TODO: change notation to work on Memory, rather than ArmSTate + read_bytes + sn.sp = s.sp ∧ + CheckSPAlignment sn ∧ + sn.mem = s.mem := by + have := program.stepi_eq_0x8a8 h_program h_pc h_err + simp only [minimal_theory] at this + simp_all only [run, cut, this, state_simp_rules, bitvec_rules, minimal_theory] + simp only [pcs, List.mem_cons, BitVec.reduceEq, List.mem_singleton, or_self, not_false_eq_true, + true_and, List.not_mem_nil, or_self, not_false_eq_true, true_and] + -- simp only [or_false, or_true] + + +/-- +info: 'MaxTandem.program.stepi_0x8a8_cut' depends on axioms: [propext, Classical.choice, Quot.sound] +-/ +#guard_msgs in #print axioms program.stepi_0x8a8_cut + +-- 7/15 +-- @bollu: this is useless? we were able to make progress even without this? (branch.) +-- b.le 8bc +theorem program.stepi_0x8ac_cut (s sn : ArmState) + (h_program : s.program = program) + (h_pc : r StateField.PC s = 0x8ac#64) + (h_err : r StateField.ERR s = StateError.None) + (h_sp_aligned : CheckSPAlignment s) + (h_step : sn = run 1 s) : + cut sn = true ∧ + r StateField.PC sn = + (if ¬(s.N = s.V ∧ s.Z = 0#1) + then 2236#64 -- takes the branch + else 0x8b0#64) ∧ + r StateField.ERR sn = .None ∧ + sn.program = program ∧ + sn[(sn.sp) + 8#64, 4] = s[s.sp + 8#64, 4] ∧ + sn[(sn.sp) + 12#64, 4] = s[s.sp + 12#64, 4] ∧ + sn.x0 = s.x0 ∧ -- TODO: change notation to work on Memory, rather than ArmSTate + read_bytes + sn.x1 = s.x1 ∧ -- TODO: change notation to work on Memory, rather than ArmSTate + read_bytes + sn.sp = s.sp ∧ + CheckSPAlignment sn ∧ + sn.mem = s.mem + := by + have := program.stepi_eq_0x8ac h_program h_pc h_err + simp only [minimal_theory] at this + simp only [← not_and] at * + simp only [cut, read_pc, pcs, List.mem_cons, List.mem_singleton, Bool.decide_or, Bool.or_eq_true, + decide_eq_true_eq, state_value] + split + case isTrue h => + have : sn = w StateField.PC (2236#64) s := by + simp only [state_simp_rules] at h + rw [h_step] + simp [h] at this + rw [← this] + rfl + simp [this, state_simp_rules, h_sp_aligned, h_err, h_program] + case isFalse h => + /- TODO: we have too many layers of abstraction. We need to choose a simp normal form + between `run` and `step`. -/ + have : sn = w StateField.PC (2224#64) s := by + simp only [state_simp_rules] at h + rw [h_step] + simp [h] at this + rw [← this] + rfl + simp [this, state_simp_rules, h_sp_aligned, h_err, h_program] + +/-- +info: 'MaxTandem.program.stepi_0x8ac_cut' depends on axioms: [propext, Classical.choice, Quot.sound] +-/ +#guard_msgs in #print axioms program.stepi_0x8ac_cut + +-- 8/15: ldr w0, [sp, #12] +theorem program.stepi_0x8b0_cut (s sn : ArmState) + (h_program : s.program = program) + (h_pc : r StateField.PC s = 0x8b0#64) + (h_err : r StateField.ERR s = StateError.None) + (h_sp_aligned : CheckSPAlignment s) + (h_step : sn = run 1 s) : + cut sn = false ∧ + r StateField.PC sn = 0x8b4#64 ∧ + r StateField.ERR sn = .None ∧ + sn[(sn.sp) + 8#64, 4] = s[s.sp + 8#64, 4] ∧ + sn[(sn.sp) + 12#64, 4] = s[s.sp + 12#64, 4] ∧ + sn.x0 = BitVec.zeroExtend 64 s[sn.sp + 12#64, 4] ∧ -- TODO: change notation to work on Memory, rather than ArmSTate + read_bytes + sn.x1 = s.x1 ∧ -- TODO: change notation to work on Memory, rather than ArmSTate + read_bytes + sn.sp = s.sp ∧ + sn.program = program ∧ + sn.mem = s.mem ∧ + CheckSPAlignment sn := by + have := program.stepi_eq_0x8b0 h_program h_pc h_err + simp only [minimal_theory] at this + simp_all only [run, cut, this, state_simp_rules, bitvec_rules, minimal_theory] + simp only [pcs, List.mem_cons, BitVec.reduceEq, List.mem_singleton, or_self, not_false_eq_true] + simp only [List.not_mem_nil, or_self, not_false_eq_true] + +/-- +info: 'MaxTandem.program.stepi_0x8b0_cut' depends on axioms: [propext, Classical.choice, Quot.sound] +-/ +#guard_msgs in #print axioms program.stepi_0x8b0_cut + +-- 9/15 :str w0, [sp, #28] +theorem program.stepi_0x8b4_cut (s sn : ArmState) + (h_program : s.program = program) + (h_pc : r StateField.PC s = 0x8b4#64) + (h_err : r StateField.ERR s = StateError.None) + (h_sp_aligned : CheckSPAlignment s) + (h_step : sn = run 1 s) : + cut sn = false ∧ + r StateField.PC sn = 0x8b8#64 ∧ + r StateField.ERR sn = .None ∧ + sn.program = program ∧ + sn.x0 = s.x0 ∧ + sn[sn.sp + 28#64, 4] = sn.x0.zeroExtend 32 ∧ + sn.sp = s.sp ∧ + CheckSPAlignment sn := by + have := program.stepi_eq_0x8b4 h_program h_pc h_err + simp only [minimal_theory] at this + simp_all only [run, cut, this, state_simp_rules, bitvec_rules, minimal_theory] + simp only [pcs, List.mem_cons, BitVec.reduceEq, List.mem_singleton, or_self, not_false_eq_true] + simp only [List.not_mem_nil, or_self, or_false, or_true] + simp only [not_false_eq_true] + +/-- +info: 'MaxTandem.program.stepi_0x8b4_cut' depends on axioms: [propext, Classical.choice, Lean.ofReduceBool, Quot.sound] +-/ +#guard_msgs in #print axioms program.stepi_0x8b4_cut + +-- 10/15: b 8c4 +theorem program.stepi_0x8b8_cut (s sn : ArmState) + (h_program : s.program = program) + (h_pc : r StateField.PC s = 0x8b8#64) + (h_err : r StateField.ERR s = StateError.None) + (h_sp_aligned : CheckSPAlignment s) + (h_step : sn = run 1 s) : + r StateField.PC sn = 0x8c4#64 ∧ + r StateField.ERR sn = .None ∧ + cut sn = false ∧ -- TODO: why do we speak about the *next* state being a cut? + sn.program = program ∧ + sn.mem = s.mem ∧ + sn[sn.sp + 28#64, 4] = s[s.sp + 28#64, 4] ∧ + sn.sp = s.sp ∧ + CheckSPAlignment sn := by + have := program.stepi_eq_0x8b8 h_program h_pc h_err + simp only [minimal_theory] at this + simp_all only [run, cut, this, state_simp_rules, bitvec_rules, minimal_theory] + simp only [pcs, List.mem_cons, BitVec.reduceEq, List.mem_singleton, or_false, or_true] + simp only [List.not_mem_nil, or_self, or_false, or_true] + simp only [not_false_eq_true] +/-- +info: 'MaxTandem.program.stepi_0x8b8_cut' depends on axioms: [propext, Classical.choice, Quot.sound] +-/ +#guard_msgs in #print axioms program.stepi_0x8b8_cut + +-- ldr w0, [sp, #8] +theorem program.stepi_0x8bc_cut (s sn : ArmState) + (h_program : s.program = program) + (h_pc : r StateField.PC s = 0x8bc#64) + (h_err : r StateField.ERR s = StateError.None) + (h_sp_aligned : CheckSPAlignment s) + (h_step : sn = run 1 s) : + cut sn = false ∧ + r StateField.PC sn = 0x8c0#64 ∧ + r StateField.ERR sn = .None ∧ + sn.program = program ∧ + -- @bollu: try and alwas write the RHS in terms of `s`, rather than `sn`. + sn.x0 = BitVec.zeroExtend 64 (s[s.sp + 8#64, 4]) ∧ + sn.sp = s.sp ∧ + CheckSPAlignment sn := by + have := program.stepi_eq_0x8bc h_program h_pc h_err + simp only [minimal_theory] at this + simp_all only [run, cut, this, state_simp_rules, bitvec_rules, minimal_theory] + simp only [pcs, List.mem_cons, BitVec.reduceEq, List.mem_singleton, or_self, or_false, or_true] + simp only [List.not_mem_nil, or_self, or_false, or_true] + simp only [not_false_eq_true] + +/-- +info: 'MaxTandem.program.stepi_0x8bc_cut' depends on axioms: [propext, Classical.choice, Quot.sound] +-/ +#guard_msgs in #print axioms program.stepi_0x8bc_cut + +--- str w0, [sp, #28] +theorem program.stepi_0x8c0_cut (s sn : ArmState) + (h_program : s.program = program) + (h_pc : r StateField.PC s = 0x8c0#64) + (h_err : r StateField.ERR s = StateError.None) + (h_sp_aligned : CheckSPAlignment s) + (h_step : sn = run 1 s) : + cut sn = false ∧ + r StateField.PC sn = 0x8c4#64 ∧ + r StateField.ERR sn = .None ∧ + sn.program = program ∧ + sn[sn.sp + 28, 4] = s.x0.zeroExtend 32 ∧ + sn.sp = s.sp ∧ + CheckSPAlignment sn := by + have := program.stepi_eq_0x8c0 h_program h_pc h_err + simp only [minimal_theory] at this + simp_all only [run, cut, this, state_simp_rules, bitvec_rules, minimal_theory] + simp [pcs] + + /-- +info: 'MaxTandem.program.stepi_0x8c0_cut' depends on axioms: [propext, Classical.choice, Lean.ofReduceBool, Quot.sound] +-/ +#guard_msgs in #print axioms program.stepi_0x8c0_cut + +-- ldr w0, [sp, #28] +theorem program.stepi_0x8c4_cut (s sn : ArmState) + (h_program : s.program = program) + (h_pc : r StateField.PC s = 0x8c4#64) + (h_err : r StateField.ERR s = StateError.None) + (h_sp_aligned : CheckSPAlignment s) + (h_step : sn = run 1 s) : + r StateField.PC sn = 0x8c8#64 ∧ + r StateField.ERR sn = .None ∧ + sn.program = program ∧ + sn.mem = s.mem ∧ + cut sn = false ∧ + sn.sp = s.sp ∧ + sn[sn.sp + 28#64, 4] = s[s.sp + 28#64, 4] ∧ + sn.x0 = BitVec.zeroExtend 64 (sn[sn.sp + 28#64, 4]) ∧ + CheckSPAlignment sn := by + have := program.stepi_eq_0x8c4 h_program h_pc h_err + simp only [minimal_theory] at this + simp_all only [run, cut, this, state_simp_rules, bitvec_rules, minimal_theory] + simp [pcs] + +/-- +info: 'MaxTandem.program.stepi_0x8c4_cut' depends on axioms: [propext, Classical.choice, Quot.sound] +-/ +#guard_msgs in #print axioms program.stepi_0x8c4_cut + +-- add sp, sp, #0x20 +theorem program.stepi_0x8c8_cut (s sn : ArmState) + (h_program : s.program = program) + (h_pc : r StateField.PC s = 0x8c8#64) + (h_err : r StateField.ERR s = StateError.None) + (h_sp_aligned : CheckSPAlignment s) + (h_step : sn = run 1 s) : + r StateField.PC sn = 0x8cc#64 ∧ + r StateField.ERR sn = .None ∧ + cut sn = true ∧ + sn.program = program ∧ + sn.x0 = s.x0 ∧ + sn.sp = s.sp + 0x20#64 ∧ + CheckSPAlignment sn := by + have := program.stepi_eq_0x8c8 h_program h_pc h_err + simp only [minimal_theory] at this + simp_all only [run, cut, this, state_simp_rules, bitvec_rules, minimal_theory] + simp (config := {ground := true, decide := true}) + apply Aligned_BitVecAdd_64_4 + · assumption + · decide + +/-- +info: 'MaxTandem.program.stepi_0x8c8_cut' depends on axioms: [propext, Classical.choice, Lean.ofReduceBool, Quot.sound] +-/ +#guard_msgs in #print axioms program.stepi_0x8c8_cut + +end CutTheorems + +/-- +rename an old term `old` identifier a new identifier `new`, and replace +all occurrences of `old` with `new`. +-/ +macro "rename" old:ident "to" new:ident : tactic => + `(tactic| + generalize h_rename : $old = $new <;> subst $old) + +/-- define a value `x := val`, and name the hypothesis `heq : x := val` -/ +macro "name" heq:ident ":" x:ident " := " val:term : tactic => + `(tactic| generalize $heq : $val = $x) + + +/-- info: Correctness.cassert {σ : Type} [Sys σ] [Spec' σ] (s0 si : σ) (i : Nat) : Nat × Prop -/ +#guard_msgs in #check Correctness.cassert + +/-- info: MaxTandem.cut (s : ArmState) : Bool -/ +#guard_msgs in #check cut + +open Lean Elab Meta Tactic in +/-- If no name is supplied, then clears all inaccessible names. +If a list of names is supplied, then clear all inaccessible names that have +*any* one of the strings as prefixes. -/ +elab "clear_named" "[" names:(ident),* "]": tactic => do + withMainContext do + for hyp in (← getLCtx) do + -- trace[debug] "name: {hyp.userName}" + for name in names.getElems do + if name.getId.toString.isPrefixOf hyp.userName.toString then + replaceMainGoal [← (← getMainGoal).clear hyp.fvarId] + continue + + + +section ForLean + +theorem BitVec.not_slt {w} (a b : BitVec w) : ¬ (a.slt b) ↔ (b.sle a) := by + simp only [BitVec.slt, BitVec.sle] + by_cases h : a.toInt < b.toInt + · simp [h] + exact Int.not_le.mpr h + · simp [h] + exact Int.not_lt.mp h + +end ForLean + +/- +open Lean Meta Elab in +dsimproc [vcg_rules] reduce_snd_cassert_of_cut + (Correctness.cassert _ _ _) := fun e => do + let_expr Correctness.cassert s0 si i := e | return .continue + for hyp in (← getLCtx) do + -- cut si = true + -- match_expr hyp.type with + -- | cut s2 = true + pure () + return .continue +-/ +set_option trace.debug true in +theorem partial_correctness : + PartialCorrectness ArmState := by + apply Correctness.partial_correctness_from_assertions + case v1 => + intro s0 h_pre + simp only [Spec.pre] at h_pre + have ⟨h_pre_pc, h2⟩ := h_pre + simp only [Spec'.assert, assert, minimal_theory] + rw [h_pre_pc] + constructor + · assumption + · rfl + case v2 => + intro sf h_exit + simp_all (config := {decide := true}) only + [Spec.exit, exit, Spec'.cut, cut, + state_simp_rules, minimal_theory] + case v3 => + intro s0 sf h_assert h_exit + -- (FIXME) Remove Spec.post to replicate bug where simp_all somehow + -- aggressively makes the goal unprovable. + -- simp_all only [Spec'.assert, Spec.exit, assert, exit, Spec.post] + simp only [Spec'.assert, assert] at h_assert + simp only [Spec.exit, exit] at h_exit + -- Why do we see the hex values instead of PC notation? + simp only [h_exit] at h_assert + simp only [Spec.post] + simp_all + case v4 => + intro s0 si h_assert h_not_exit + simp only [Correctness.arm_run] + simp [Spec.exit, exit] at h_not_exit + simp [Spec'.assert, assert, h_not_exit, minimal_theory, read_pc] at h_assert + obtain ⟨h_pre, h_assert⟩ := h_assert + -- have h_pre_copy : pre s0 := h_pre + simp [pre] at h_pre + have ⟨h_s0_pc, h_s0_program, h_s0_err, h_s0_sp_aligned, h_sp_geq, h_mem_legal⟩ := h_pre + split at h_assert + · -- Next cutpoint from 0x894#64 (1/15) + subst si + rename_i x h_pc + name h_s0_run : s1 := run 1 s0 + obtain ⟨h_s1_cut, h_s1_pc, h_s1_err, h_s1_x0, h_s1_x1, h_s1_sp, h_s1_mem, h_s1_program, h_s1_sp_aligned⟩ := + program.stepi_0x894_cut s0 s1 h_s0_program h_pc h_s0_err h_s0_sp_aligned h_s0_run.symm + rw [Correctness.snd_cassert_of_not_cut h_s1_cut]; -- try rw [Correctness.snd_cassert_of_cut h_cut]; + simp [show Sys.next s1 = run 1 s1 by rfl] + clear_named [h_s0] + + -- 2/15 + name h_s1_run : s2 := run 1 s1 + obtain ⟨h_s2_cut, h_s2_pc, h_s2_err, h_s2_program, h_s2_read_sp_8, h_s2_read_sp_12, h_s2_x0, h_s2_x1, h_s2_sp, h_s2_sp_aligned⟩ := + program.stepi_0x898_cut s1 s2 h_s1_program h_s1_pc h_s1_err h_s1_sp_aligned (by simp_mem) h_s1_run.symm + rw [Correctness.snd_cassert_of_not_cut h_s2_cut]; -- try rw [Correctness.snd_cassert_of_cut h_cut]; + simp [show Sys.next s2 = run 1 s2 by rfl] + replace h_s2_sp : s2.sp = (s0.sp - 32#64) := by simp_all + replace h_s2_x0 : s2.x0 = s0.x0 := by simp_all + replace h_s2_x1 : s2.x1 = s0.x1 := by simp_all + replace h_s2_read_sp12 : read_mem_bytes 4 (s2.sp + 12#64) s2 = BitVec.truncate 32 s0.x0 := by simp_all + clear_named [h_s1] + + -- 3/15 + name h_run : s3 := run 1 s2 + obtain h := program.stepi_0x89c_cut s2 s3 h_s2_program h_s2_pc h_s2_err h_s2_sp_aligned (by simp_mem) h_run.symm + obtain ⟨h_s3_cut, h_s3_read_sp_8, h_s3_read_sp_12, h_s3_x0, h_s3_x1, h_s3_sp, h_s3_pc, h_s3_err, h_s3_program, h_s3_sp_aligned⟩ := h + rw [Correctness.snd_cassert_of_not_cut h_s3_cut]; -- try rw [Correctness.snd_cassert_of_cut h_cut]; + simp [show Sys.next s3 = run 1 s3 by rfl] + replace h_s3_x0 : s3.x0 = s0.x0 := by simp_all + replace h_s3_x1 : s3.x1 = s0.x1 := by simp_all + replace h_s3_sp : s3.sp = s0.sp - 32 := by simp_all + /- TODO: this should be s0.x0-/ + replace h_s3_read_sp12 : read_mem_bytes 4 (s3.sp + 12#64) s3 = BitVec.truncate 32 s0.x0 := by simp_all + replace h_s3_read_sp8 : read_mem_bytes 4 (s3.sp + 8#64) s3 = BitVec.truncate 32 s0.x1 := by simp_all + clear_named [h_s2] + + -- 4/15 + name h_run : s4 := run 1 s3 + obtain h := program.stepi_0x8a0_cut s3 s4 h_s3_program h_s3_pc h_s3_err h_s3_sp_aligned h_run.symm + obtain ⟨h_s4_cut, h_s4_read_sp_8, h_s4_read_sp_12, h_s4_x0, h_s4_x1, h_s4_sp, h_s4_pc, h_s4_err, h_s4_program, h_s4_sp_aligned⟩ := h + rw [Correctness.snd_cassert_of_not_cut h_s4_cut]; -- try rw [Correctness.snd_cassert_of_cut h_cut]; + simp [show Sys.next s4 = run 1 s4 by rfl] + replace h_s4_sp : s4.sp = s0.sp - 32 := by simp_all + replace h_s4_x0 : s4.x0 = s0.x0 := by simp_all + replace h_s4_x1 : s4.x1 = BitVec.zeroExtend 64 (BitVec.truncate 32 s0.x0) := by simp_all + replace h_s4_sp : s4.sp = s0.sp - 32 := by simp_all + replace h_s4_read_sp12 : read_mem_bytes 4 (s4.sp + 12#64) s4 = BitVec.truncate 32 s0.x0 := by simp_all + replace h_s4_read_sp8 : read_mem_bytes 4 (s4.sp + 8#64) s4 = BitVec.truncate 32 s0.x1 := by simp_all + clear_named [h_s3] + + -- 5/15 + name h_run : s5 := run 1 s4 + obtain h := program.stepi_0x8a4_cut s4 s5 h_s4_program h_s4_pc h_s4_err h_s4_sp_aligned h_run.symm + obtain ⟨h_s5_cut, h_s5_read_sp_8, h_s5_read_sp_12, h_s5_x0, h_s5_x1, h_s5_sp, h_s5_pc, h_s5_err, h_s5_program, h_s5_sp_aligned⟩ := h + rw [Correctness.snd_cassert_of_not_cut h_s5_cut]; -- try rw [Correctness.snd_cassert_of_cut h_cut]; + simp [show Sys.next s5 = run 1 s5 by rfl] + replace h_s5_sp : s5.sp = s0.sp - 32 := by simp_all + replace h_s5_x0 : s5.x0 = BitVec.zeroExtend 64 (BitVec.truncate 32 s0.x1) := by simp_all + replace h_s5_x1 : s5.x1 = BitVec.zeroExtend 64 (BitVec.truncate 32 s0.x0) := by simp_all + replace h_s5_sp : s5.sp = s0.sp - 32 := by simp_all + replace h_s5_read_sp12 : read_mem_bytes 4 (s5.sp + 12#64) s5 = BitVec.truncate 32 s0.x0 := by simp_all + replace h_s5_read_sp8 : read_mem_bytes 4 (s5.sp + 8#64) s5 = BitVec.truncate 32 s0.x1 := by simp_all + clear_named [h_s4] + + -- 6/15 + name h_run : s6 := run 1 s5 + obtain h := program.stepi_0x8a8_cut s5 s6 h_s5_program h_s5_pc h_s5_err h_s5_sp_aligned h_run.symm + obtain ⟨h_s6_cut, h_s6_pc, h_s6_err, h_s6_program, h_s6_c, h_s6_v, h_s6_z, h_s6_n, h_s6_read_sp_8, h_s6_read_sp_12, h_s6_x0, h_s6_x1, h_s6_sp, h_s6_sp_aligned, h_s6_mem⟩ := h + rw [Correctness.snd_cassert_of_not_cut h_s6_cut]; -- try rw [Correctness.snd_cassert_of_cut h_cut]; + simp [show Sys.next s6 = run 1 s6 by rfl] + replace h_s6_sp : s6.sp = s0.sp - 32 := by simp_all + replace h_s6_x0 : s6.x0 = BitVec.zeroExtend 64 (BitVec.truncate 32 s0.x1) := by simp_all + replace h_s6_x1 : s6.x1 = BitVec.zeroExtend 64 (BitVec.truncate 32 s0.x0) := by simp_all + replace h_s6_sp : s6.sp = s0.sp - 32 := by simp_all + replace h_s6_read_sp12 : read_mem_bytes 4 (s6.sp + 12#64) s6 = BitVec.truncate 32 s0.x0 := by simp_all + replace h_s6_read_sp8 : read_mem_bytes 4 (s6.sp + 8#64) s6 = BitVec.truncate 32 s0.x1 := by simp_all + replace h_s6_c : s6.C = (AddWithCarry (s0.x0.zeroExtend 32) (~~~s0.x1.zeroExtend 32) 1#1).snd.c := by simp_all + replace h_s6_n : s6.N = (AddWithCarry (s0.x0.zeroExtend 32) (~~~s0.x1.zeroExtend 32) 1#1).snd.n := by simp_all + replace h_s6_v : s6.V = (AddWithCarry (s0.x0.zeroExtend 32) (~~~s0.x1.zeroExtend 32) 1#1).snd.v := by simp_all + replace h_s6_z : s6.Z = (AddWithCarry (s0.x0.zeroExtend 32) (~~~s0.x1.zeroExtend 32) 1#1).snd.z := by simp_all + + clear_named [h_s5] + + -- 7/16 + name h_run : s7 := run 1 s6 + obtain h := program.stepi_0x8ac_cut s6 (by trivial) (by trivial) (by trivial) (by trivial) (by trivial) (by simp [*]) + obtain ⟨h_s7_cut, h_s7_pc, h_s7_err, h_s7_program, h_s7_read_sp_8, h_s7_read_sp_12, h_s7_x0, h_s7_x1, h_s7_sp, h_s7_sp_aligned, h_s7_mem⟩ := h + rw [Correctness.snd_cassert_of_cut h_s7_cut]; -- try rw [Correctness.snd_cassert_of_cut h_cut]; + replace h_s7_sp : s7.sp = s0.sp - 32 := by simp_all + replace h_s7_x0 : s7.x0 = BitVec.zeroExtend 64 (BitVec.truncate 32 s0.x1) := by simp_all + replace h_s7_x1 : s7.x1 = BitVec.zeroExtend 64 (BitVec.truncate 32 s0.x0) := by simp_all + replace h_s7_sp : s7.sp = s0.sp - 32 := by simp_all + replace h_s7_read_sp12 : read_mem_bytes 4 ((s0.sp - 32#64) + 12#64) s7 = BitVec.truncate 32 s0.x0 := by simp_all + replace h_s7_read_sp8 : read_mem_bytes 4 ((s0.sp - 32#64) + 8#64) s7 = BitVec.truncate 32 s0.x1 := by simp_all + have h_s7_s6_c := h_s6_c + have h_s7_s6_n := h_s6_n + have h_s7_s6_v := h_s6_v + have h_s7_s6_z := h_s6_z + clear_named [h_run, h_s6] -- TODO: I clear h_run here. Is this a good idea? + split at h_s7_pc + case isTrue h => -- branch taken + simp only [Spec'.assert, assert, h_pre, read_pc, h_s7_pc, else_start_inv, read_err, + h_s7_err, h_s7_program, h_s7_sp_aligned, entry_end_inv, Nat.reduceMul, h_s7_read_sp12, + h_s7_read_sp8, and_self, true_and, h_s7_sp, BitVec.ofNat_eq_ofNat, true_and] + have h_s7_sle := sle_iff_not_n_eq_v_and_z_eq_0_32 (BitVec.zeroExtend 32 s0.x0) (BitVec.zeroExtend 32 s0.x1) + rw [← h_s7_s6_n, ← h_s7_s6_v, ← h_s7_s6_z] at h_s7_sle + replace h_s7_sle := h_s7_sle.mp h + simp [h_s7_sle] + simp [h_pre, pre] + case isFalse h => -- branch not taken + simp only [Spec'.assert, assert, h_pre, read_pc, h_s7_pc, then_start_inv, entry_end_inv, + read_err, h_s7_err, h_s7_program, h_s7_sp_aligned, Nat.reduceMul, h_s7_read_sp12, + h_s7_read_sp8, and_self, Bool.not_eq_true, true_and, h_s7_sp, BitVec.ofNat_eq_ofNat, true_and] + have h_s7_sgt := sgt_iff_n_eq_v_and_z_eq_0_32 (BitVec.zeroExtend 32 s0.x0) (BitVec.zeroExtend 32 s0.x1) + simp only [not_and, not_imp, Decidable.not_not] at h + rw [← h_s7_s6_n, ← h_s7_s6_v, ← h_s7_s6_z] at h_s7_sgt + replace h_s7_sgt := h_s7_sgt.mp h + simp [h_pre, pre] + bv_decide + · -- start @ then_start_inv + simp [then_start_inv] at h_assert + rename_i x h_s1_pc + rename si to s1 + obtain ⟨⟨h_s1_err, h_s1_program, h_s1_sp_aligned, h_s1_read_sp_12, h_s1_read_sp_8, h_s1_sp⟩, h_KEEP_x0_x1⟩ := h_assert + clear_named [h_assert] + name h_s1_run : s2 := run 1 s1 + obtain ⟨h_s2_cut, h_s2_pc, h_s2_err, h_s2_read_sp_8, h_s2_read_sp_12, h_s2_x0, h_s2_x1, h_s2_sp, h_s2_program, h_s2_mem, h_s2_sp_aligned⟩ := + program.stepi_0x8b0_cut s1 s2 h_s1_program h_s1_pc h_s1_err h_s1_sp_aligned h_s1_run.symm + rw [Correctness.snd_cassert_of_not_cut h_s2_cut]; -- try rw [Correctness.snd_cassert_of_cut h_cut]; + simp [show Sys.next s2 = run 1 s2 by rfl] + replace h_s2_x0 : s2.x0 = BitVec.zeroExtend 64 (BitVec.zeroExtend 32 s0.x0) := by + rw [h_s2_x0] + rw [h_s2_sp] + rw [h_s1_sp] + + simp_all + replace h_s2_x0 : s2.x0 = spec s0.x0 s0.x1 := by + simp [spec, h_s2_x0] + split <;> bv_decide + clear_named [h_s0] + + -- 3/15 + name h_run : s3 := run 1 s2 + obtain h := program.stepi_0x8b4_cut s2 s3 (by trivial) (by trivial) (by trivial) (by trivial) (h_run.symm) + obtain ⟨h_s3_cut, h_s3_pc, h_s3_err, h_s3_program, h_s3_x0, h_s3_sp_28, h_s3_sp, h_s3_sp_aliged⟩ := h + rw [Correctness.snd_cassert_of_not_cut h_s3_cut]; -- try rw [Correctness.snd_cassert_of_cut h_cut]; + simp [show Sys.next _ = run 1 _ by rfl] + replace h_s3_sp_28 : read_mem_bytes 4 (s3.sp + 28#64) s3 = BitVec.zeroExtend 32 (spec s0.x0 s0.x1) := by simp_all + replace h_s3_sp : s3.sp = s0.sp - 32#64 := by simp_all + -- /- TODO: this should be s0.x0-/ + -- replace h_s3_read_sp12 : read_mem_bytes 4 (s3.sp + 12#64) s3 = BitVec.truncate 32 s0.x0 := by simp_all + -- replace h_s3_read_sp8 : read_mem_bytes 4 (s3.sp + 8#64) s3 = BitVec.truncate 32 s0.x1 := by simp_all + clear_named [h_s2, h_s1] + + -- 4/15 + name h_run : s4 := run 1 s3 + obtain h_s4 := program.stepi_0x8b8_cut s3 s4 (by trivial) (by trivial) (by trivial) (by trivial) (h_run.symm) + rw [Correctness.snd_cassert_of_not_cut (si := s4) (by simp_all [Spec'.cut])]; + simp [show Sys.next _ = run 1 _ by rfl] + have h_s4_sp : s4.sp = s0.sp - 32#64 := by simp_all + have h_s4_sp_28 : read_mem_bytes 4 (s4.sp + 28#64) s4 = BitVec.zeroExtend 32 (spec s0.x0 s0.x1) := by simp_all + clear_named [h_s3] + + -- 5/15 + name h_run : s5 := run 1 s4 + obtain h_s5 := program.stepi_0x8c4_cut s4 s5 (by simp_all) (by simp_all) (by simp_all) (by simp_all) (h_run.symm) + rw [Correctness.snd_cassert_of_not_cut (si := s5) (by simp_all [Spec'.cut])]; + have h_s5_x0 : s5.x0 = BitVec.zeroExtend 64 (BitVec.zeroExtend 32 (spec s0.x0 s0.x1)) := by + simp only [show s5.x0 = BitVec.zeroExtend 64 (read_mem_bytes 4 (s5.sp + 28#64) s5) by simp_all] + simp only [Nat.reduceMul] + /- Damn, that the rewrite system is not confluent really fucks me over here ;_; + `simp` winds up rewriting `s5.sp` into `s4.sp` first because of the rule, and + fails to match `read_mem_bytes... (s5.sp + ...) = read_mem_bytes ... (s4.sp + ...)`. + One might say that this entire proof is stupid, but really, I 'just' want it to build an + e-graph and figure it out~ + -/ + have : (read_mem_bytes 4 (s5.sp + 28#64) s5) = read_mem_bytes 4 (s4.sp + 28#64) s4 := by + obtain ⟨_, _, _, _, _, _, h, _⟩ := h_s5 + exact h + simp [this] + rw [h_s4_sp_28] + -- simp only [show s5.x0 = BitVec.zeroExtend 64 (read_mem_bytes 4 (s5.sp + 28) s5) by simp_all] + -- simp only [show (read_mem_bytes 4 (s5.sp + 28) s5) = read_mem_bytes 4 (s4.sp + 28#64) s4 by simp_all] + simp [show Sys.next _ = run 1 _ by rfl] + -- clear_named [h_s4] + + -- 6/15 + name h_run : s6 := run 1 s5 + obtain h_s6 := program.stepi_0x8c8_cut s5 s6 (by simp_all) (by simp_all) (by simp_all) (by simp_all) (h_run.symm) + rw [Correctness.snd_cassert_of_cut (si := s6) (by simp_all [Spec'.cut])]; + simp [Spec'.assert, assert, read_pc, h_s6, h_pre, post, read_err] + simp [h_s5_x0, h_pre, pre] + · -- start @ else_start_inv + simp [else_start_inv] at h_assert + rename_i x h_s1_pc + rename si to s1 + obtain ⟨⟨h_s1_err, h_s1_program, h_s1_sp_aligned, h_s1_read_sp_12, h_s1_read_sp_8, h_s1_sp⟩, h_KEEP_x0_x1⟩ := h_assert + clear_named [h_assert] + name h_s1_run : s2 := run 1 s1 + obtain ⟨h_s2_cut, h_s2_pc, h_s2_err, h_s2_program, h_s2_x0, h_s2_sp_aligned⟩ := + program.stepi_0x8bc_cut s1 s2 h_s1_program h_s1_pc h_s1_err h_s1_sp_aligned h_s1_run.symm + rw [Correctness.snd_cassert_of_not_cut h_s2_cut]; -- try rw [Correctness.snd_cassert_of_cut h_cut]; + simp [show Sys.next s2 = run 1 s2 by rfl] + replace h_s2_x0 : s2.x0 = BitVec.zeroExtend 64 (BitVec.zeroExtend 32 s0.x1) := by simp_all + replace h_s2_x0 : s2.x0 = spec s0.x0 s0.x1 := by + simp [spec, h_s2_x0] + split <;> bv_decide + clear_named [h_s0] + + -- 3/15 + name h_run : s3 := run 1 s2 + obtain h := program.stepi_0x8c0_cut s2 s3 (by trivial) (by trivial) (by trivial) (by simp_all) (h_run.symm) + obtain ⟨h_s3_cut, h_s3_pc, h_s3_err, h_s3_program, h_s3_x0, h_s3_sp, h_s3_sp_aliged⟩ := h + rw [Correctness.snd_cassert_of_not_cut h_s3_cut]; -- try rw [Correctness.snd_cassert_of_cut h_cut]; + simp [show Sys.next _ = run 1 _ by rfl] + replace h_s3_sp_28 : read_mem_bytes 4 (s3.sp + 28#64) s3 = BitVec.zeroExtend 32 (spec s0.x0 s0.x1) := by simp_all + replace h_s3_sp : s3.sp = s0.sp - 32#64 := by simp_all + -- /- TODO: this should be s0.x0-/ + -- replace h_s3_read_sp12 : read_mem_bytes 4 (s3.sp + 12#64) s3 = BitVec.truncate 32 s0.x0 := by simp_all + -- replace h_s3_read_sp8 : read_mem_bytes 4 (s3.sp + 8#64) s3 = BitVec.truncate 32 s0.x1 := by simp_all + clear_named [h_s2, h_s1] + + -- 4/15 + name h_run : s4 := run 1 s3 + obtain h_s4 := program.stepi_0x8c4_cut s3 s4 (by trivial) (by trivial) (by trivial) (by trivial) (h_run.symm) + rw [Correctness.snd_cassert_of_not_cut (si := s4) (by simp_all [Spec'.cut])]; + simp [show Sys.next _ = run 1 _ by rfl] + have h_s4_sp : s4.sp = s0.sp - 32#64 := by simp_all + have h_s4_sp_28 : read_mem_bytes 4 (s4.sp + 28#64) s4 = BitVec.zeroExtend 32 (spec s0.x0 s0.x1) := by simp_all + clear_named [h_s3] + + -- 6/15 + name h_run : s5 := run 1 s4 + obtain h_s5 := program.stepi_0x8c8_cut s4 s5 (by simp_all) (by simp_all) (by simp_all) (by simp_all) (h_run.symm) + rw [Correctness.snd_cassert_of_cut (si := s5) (by simp_all [Spec'.cut])]; + simp [Spec'.assert, assert, read_pc, h_s5, h_pre, post, read_err] + simp_all [h_pre, pre] + · rename_i x si_pc + contradiction + · apply False.elim h_assert + +/-- +info: 'MaxTandem.partial_correctness' depends on axioms: [propext, Classical.choice, Lean.ofReduceBool, Quot.sound] +-/ +#guard_msgs in #print axioms partial_correctness + +end MaxTandem diff --git a/Proofs/Proofs.lean b/Proofs/Proofs.lean index 8108ccb1..11c4df13 100644 --- a/Proofs/Proofs.lean +++ b/Proofs/Proofs.lean @@ -12,10 +12,10 @@ import Proofs.Popcount32 import Proofs.Experiments.Summary1 import Proofs.Experiments.MemoryAliasing import Proofs.Experiments.SHA512MemoryAliasing -import Proofs.Experiments.Max import Proofs.Experiments.Abs.Abs import Proofs.Experiments.Abs.AbsVCG import Proofs.Experiments.Abs.AbsVCGTandem import Proofs.Experiments.AddLoop.AddLoop import Proofs.Experiments.AddLoop.AddLoopTandem +-- import Proofs.Experiments.Max.MaxTandem import Proofs.Experiments.MemCpyVCG From b9a2efb2b821cb1af4e5d79aa8073f69043b54f0 Mon Sep 17 00:00:00 2001 From: Siddharth Bhat Date: Tue, 3 Sep 2024 14:40:04 -0500 Subject: [PATCH 05/18] feat: add projections from state to common registers --- Arm/State.lean | 38 +++++++++++++++++++++++++++++++ Proofs/Experiments/MemCpyVCG.lean | 12 ---------- 2 files changed, 38 insertions(+), 12 deletions(-) diff --git a/Arm/State.lean b/Arm/State.lean index 7720ae74..a76fb4fd 100644 --- a/Arm/State.lean +++ b/Arm/State.lean @@ -289,6 +289,44 @@ def r (fld : StateField) (s : ArmState) : (state_value fld) := | PC => read_base_pc s | FLAG i => read_base_flag i s | ERR => read_base_error s +@[state_simp_rules] +def ArmState.x0 (s : ArmState) : BitVec 64 := r (StateField.GPR 0) s + +@[state_simp_rules] +def ArmState.x1 (s : ArmState) : BitVec 64 := r (StateField.GPR 1) s + +@[state_simp_rules] +def ArmState.x2 (s : ArmState) : BitVec 64 := r (StateField.GPR 2) s + +@[state_simp_rules] +def ArmState.sp (s : ArmState) : BitVec 64 := r (StateField.GPR 31) s + +@[state_simp_rules] +def ArmState.V (s : ArmState) : BitVec 1 := r (StateField.FLAG PFlag.V) s + +@[state_simp_rules] +def ArmState.C (s : ArmState) : BitVec 1 := r (StateField.FLAG PFlag.C) s + +@[state_simp_rules] +def ArmState.Z (s : ArmState) : BitVec 1 := r (StateField.FLAG PFlag.Z) s + +@[state_simp_rules] +def ArmState.N (s : ArmState) : BitVec 1 := r (StateField.FLAG PFlag.N) s + +def ArmState.r_GPR_0_eq_x0 (s : ArmState) : r (StateField.GPR 0) s = s.x0 := by rfl + +def ArmState.r_GPR_1_eq_x1 (s : ArmState) : r (StateField.GPR 1) s = s.x1 := by rfl + +def ArmState.r_GPR_31_eq_sp (s : ArmState) : r (StateField.GPR 31) s = s.sp := by rfl + +def ArmState.r_FLAG_V_eq_V (s : ArmState) : r (StateField.FLAG PFlag.V) s = s.V := by rfl + +def ArmState.r_FLAG_C_eq_C (s : ArmState) : r (StateField.FLAG PFlag.C) s = s.C := by rfl + +def ArmState.r_FLAG_Z_eq_Z (s : ArmState) : r (StateField.FLAG PFlag.Z) s = s.Z := by rfl + +def ArmState.r_FLAG_N_eq_N (s : ArmState) : r (StateField.FLAG PFlag.N) s = s.N := by rfl + @[irreducible] def w (fld : StateField) (v : (state_value fld)) (s : ArmState) : ArmState := diff --git a/Proofs/Experiments/MemCpyVCG.lean b/Proofs/Experiments/MemCpyVCG.lean index 85995eeb..1dc0b317 100644 --- a/Proofs/Experiments/MemCpyVCG.lean +++ b/Proofs/Experiments/MemCpyVCG.lean @@ -11,18 +11,6 @@ import Tactics.StepThms import Tactics.Sym import Correctness.ArmSpec -/-- Helper projections for registers `xn` -/ -@[state_simp_rules] -def ArmState.x (n : Nat) : ArmState → BitVec 64 -| s => read_gpr 64 n s - -@[state_simp_rules] -def ArmState.x0 := ArmState.x 0 -@[state_simp_rules] -def ArmState.x1 := ArmState.x 1 -@[state_simp_rules] -def ArmState.x2 := ArmState.x 2 - namespace Memcpy /- From 873f2e852ac1c794b652cbe3f9d3f30d57bc35ae Mon Sep 17 00:00:00 2001 From: Siddharth Bhat Date: Tue, 3 Sep 2024 14:44:32 -0500 Subject: [PATCH 06/18] feat: AddWithCarry lemmas --- Arm/Insts/Common.lean | 28 +++++++++++++++++++++++++-- Proofs/Experiments/Max/MaxTandem.lean | 1 + 2 files changed, 27 insertions(+), 2 deletions(-) diff --git a/Arm/Insts/Common.lean b/Arm/Insts/Common.lean index dd71910c..582c15ee 100644 --- a/Arm/Insts/Common.lean +++ b/Arm/Insts/Common.lean @@ -14,11 +14,11 @@ open BitVec ---------------------------------------------------------------------- -/-- +/-- `GPRIndex.rand` picks a safe GPR index for Arm-based Apple platforms i.e., one not reserved on them. Use this function instead of `BitVec.rand` to pick an appropriate random index for a source and -destination GPR during cosimulations. +destination GPR during cosimulations. See "NOTE: Considerations for running cosimulations on Arm-based Apple platforms" in Arm/Cosim.lean for details. @@ -58,6 +58,30 @@ def AddWithCarry (x : BitVec n) (y : BitVec n) (carry_in : BitVec 1) : let V := if signExtend (n + 1) result = signed_sum then 0#1 else 1#1 (result, (make_pstate N Z C V)) +theorem fst_AddWithCarry_eq_add (x : BitVec n) (y : BitVec n) : + (AddWithCarry x y 0#1).fst = x + y := by + simp [AddWithCarry, zeroExtend_eq, zeroExtend_zero, zeroExtend_zero] + apply BitVec.eq_of_toNat_eq + simp only [toNat_truncate, toNat_add, Nat.add_mod_mod, Nat.mod_add_mod] + have : 2^n < 2^(n + 1) := by + refine Nat.pow_lt_pow_of_lt (by omega) (by omega) + have : x.toNat + y.toNat < 2^(n + 1) := by omega + rw [Nat.mod_eq_of_lt this] + +theorem fst_AddWithCarry_eq_sub_neg (x : BitVec n) (y : BitVec n) : + (AddWithCarry x y 1#1).fst = x - ~~~y := by + simp [AddWithCarry, zeroExtend_eq, zeroExtend_zero, zeroExtend_zero] + apply BitVec.eq_of_toNat_eq + simp only [toNat_truncate, toNat_add, Nat.add_mod_mod, Nat.mod_add_mod, toNat_ofNat, Nat.pow_one, + Nat.reduceMod, toNat_sub, toNat_not] + simp only [show 2 ^ n - (2 ^ n - 1 - y.toNat) = 1 + y.toNat by omega] + have : 2^n < 2^(n + 1) := by + refine Nat.pow_lt_pow_of_lt (by omega) (by omega) + have : x.toNat + y.toNat + 1 < 2^(n + 1) := by omega + rw [Nat.mod_eq_of_lt this] + congr 1 + omega + -- TODO: Is this rule helpful at all? @[bitvec_rules] theorem zeroExtend_eq_of_AddWithCarry : diff --git a/Proofs/Experiments/Max/MaxTandem.lean b/Proofs/Experiments/Max/MaxTandem.lean index 5a50a1b0..9ed0e850 100644 --- a/Proofs/Experiments/Max/MaxTandem.lean +++ b/Proofs/Experiments/Max/MaxTandem.lean @@ -162,6 +162,7 @@ theorem program.stepi_0x894_cut (s sn : ArmState) state_simp_rules, bitvec_rules, minimal_theory, pcs] simp only [List.mem_cons, BitVec.reduceEq, List.mem_singleton, or_self, not_false_eq_true, true_and, List.not_mem_nil, or_self, not_false_eq_true, true_and] + rw [fst_AddWithCarry_eq_sub_neg] repeat first | simp (config := { ground := true, decide := true}) [aligned_rules, state_simp_rules] | apply Aligned_BitVecSub_64_4; From 83c06e40760d548a1dccdfd7eb8c677320abc769 Mon Sep 17 00:00:00 2001 From: Siddharth Bhat Date: Tue, 3 Sep 2024 14:49:24 -0500 Subject: [PATCH 07/18] feat: ArmState.mem_w_eq_mem --- Arm/State.lean | 12 ++++++++++++ Proofs/Experiments/Max/MaxTandem.lean | 2 +- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/Arm/State.lean b/Arm/State.lean index a76fb4fd..38b91629 100644 --- a/Arm/State.lean +++ b/Arm/State.lean @@ -740,6 +740,18 @@ def Memory.read (addr : BitVec 64) (m : Memory) : BitVec 8 := theorem ArmState.read_mem_eq_mem_read : read_mem addr s = s.mem.read addr := rfl +/-- `w` does not effect memory. -/ +@[memory_rules, state_simp_rules] +theorem ArmState.mem_w_eq_mem (fld : StateField) (v : state_value fld) (s : ArmState) : + (w fld v s).mem = s.mem := by + cases fld <;> + unfold w write_base_error + write_base_gpr + write_base_sfp + write_base_pc + write_base_flag <;> + simp + /-- A variant of `write_mem` that directly talks about writes to memory, instead of over the entire `ArmState` -/ diff --git a/Proofs/Experiments/Max/MaxTandem.lean b/Proofs/Experiments/Max/MaxTandem.lean index 9ed0e850..1b8546ec 100644 --- a/Proofs/Experiments/Max/MaxTandem.lean +++ b/Proofs/Experiments/Max/MaxTandem.lean @@ -162,7 +162,7 @@ theorem program.stepi_0x894_cut (s sn : ArmState) state_simp_rules, bitvec_rules, minimal_theory, pcs] simp only [List.mem_cons, BitVec.reduceEq, List.mem_singleton, or_self, not_false_eq_true, true_and, List.not_mem_nil, or_self, not_false_eq_true, true_and] - rw [fst_AddWithCarry_eq_sub_neg] + simp only [fst_AddWithCarry_eq_sub_neg, BitVec.reduceNot, true_and] repeat first | simp (config := { ground := true, decide := true}) [aligned_rules, state_simp_rules] | apply Aligned_BitVecSub_64_4; From 2648d30d6e00fe267faa99dacbc39efb0693da6a Mon Sep 17 00:00:00 2001 From: Siddharth Bhat Date: Tue, 3 Sep 2024 14:54:13 -0500 Subject: [PATCH 08/18] feat: Aligned_BitVecSub_64_4 --- Arm/Insts/Common.lean | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/Arm/Insts/Common.lean b/Arm/Insts/Common.lean index 582c15ee..6e25832d 100644 --- a/Arm/Insts/Common.lean +++ b/Arm/Insts/Common.lean @@ -120,6 +120,13 @@ def Aligned (x : BitVec n) (a : Nat) : Prop := instance : Decidable (Aligned x a) := by cases a <;> simp [Aligned] <;> infer_instance +theorem Aligned_BitVecSub_64_4 {x : BitVec 64} {y : BitVec 64} + (x_aligned : Aligned x 4) + (y_aligned : Aligned y 4) + : Aligned (x - y) 4 := by + simp_all only [Aligned, Nat.sub_zero, zero_eq] + bv_decide + theorem Aligned_BitVecAdd_64_4 {x : BitVec 64} {y : BitVec 64} (x_aligned : Aligned x 4) (y_aligned : Aligned y 4) From 0da4d40007e3aa6c328fdffceaaad69fa80248d7 Mon Sep 17 00:00:00 2001 From: Siddharth Bhat Date: Tue, 3 Sep 2024 14:56:03 -0500 Subject: [PATCH 09/18] chore: eqn lemmas for (cassert s0 si i) --- Correctness/Correctness.lean | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/Correctness/Correctness.lean b/Correctness/Correctness.lean index 66bc69bf..529f00bf 100644 --- a/Correctness/Correctness.lean +++ b/Correctness/Correctness.lean @@ -244,6 +244,17 @@ theorem cassert_cut [Sys σ] [Spec' σ] {s0 si : σ} (h : cut si) (i : Nat) : simp [*] done +theorem snd_cassert_of_cut [Sys σ] [Spec' σ] {s0 si : σ} (h : cut si) (i : Nat) : + (cassert s0 si i).snd = assert s0 si := by + rw [cassert_eq] + simp [*] + done +theorem snd_cassert_of_not_cut [Sys σ] [Spec' σ] {s0 si : σ} (h : cut si = false) (i : Nat) : + (cassert s0 si i).snd = (cassert s0 (next si) (i + 1)).snd := by + rw [cassert_eq] + simp [*] + done + theorem cassert_not_cut [Sys σ] [Spec' σ] {s0 si : σ} (h₁ : ¬ cut si) (h₂ : (cassert s0 (next si) (i+1)).fst = j) : (cassert s0 si i).fst = j ∧ From b20e6de97746748f6495a905ac493b3cb9ce0cce Mon Sep 17 00:00:00 2001 From: Siddharth Bhat Date: Tue, 3 Sep 2024 14:58:26 -0500 Subject: [PATCH 10/18] =?UTF-8?q?chore:=20add=20proofs=20that=20`x=20>=20y?= =?UTF-8?q?`=20iff=20`(N=20=3D=20V)=20=E2=88=A7=20Z=20=3D=200`?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- Arm/Insts/Common.lean | 39 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/Arm/Insts/Common.lean b/Arm/Insts/Common.lean index 6e25832d..decf8b51 100644 --- a/Arm/Insts/Common.lean +++ b/Arm/Insts/Common.lean @@ -110,6 +110,45 @@ def ConditionHolds (cond : BitVec 4) (s : ArmState) : Bool := else result +-- https://github.com/awslabs/s2n-bignum/blob/main/arm/proofs/instruction.ml#L543C36-L543C62 +/- `x > y` iff `(N = V) ∧ Z = 0` . -/ +theorem sgt_iff_n_eq_v_and_z_eq_0_64 (x y : BitVec 64) : + (((AddWithCarry x (~~~y) 1#1).snd.n = (AddWithCarry x (~~~y) 1#1).snd.v) ∧ + (AddWithCarry x (~~~y) 1#1).snd.z = 0#1) ↔ BitVec.slt y x := by + simp [AddWithCarry, make_pstate] + split + · bv_decide + · bv_decide + +-- https://github.com/awslabs/s2n-bignum/blob/main/arm/proofs/instruction.ml#L543C36-L543C62 +/- `x > y` iff `(N = V) ∧ Z = 0` . -/ +theorem sgt_iff_n_eq_v_and_z_eq_0_32 (x y : BitVec 32) : + (((AddWithCarry x (~~~y) 1#1).snd.n = (AddWithCarry x (~~~y) 1#1).snd.v) ∧ + (AddWithCarry x (~~~y) 1#1).snd.z = 0#1) ↔ BitVec.slt y x := by + simp [AddWithCarry, make_pstate] + split + · bv_decide + · bv_decide + +/- `x ≤ y` iff `¬ ((N = V) ∧ (Z = 0))`. -/ +theorem sle_iff_not_n_eq_v_and_z_eq_0_64 (x y : BitVec 64) : + (¬(((AddWithCarry x (~~~y) 1#1).snd.n = (AddWithCarry x (~~~y) 1#1).snd.v) ∧ + (AddWithCarry x (~~~y) 1#1).snd.z = 0#1)) ↔ BitVec.sle x y := by + simp [AddWithCarry, make_pstate] + split + · bv_decide + · bv_decide + +/- `x ≤ y` iff `¬ ((N = V) ∧ (Z = 0))`. -/ +theorem sle_iff_not_n_eq_v_and_z_eq_0_32 (x y : BitVec 32) : + (¬(((AddWithCarry x (~~~y) 1#1).snd.n = (AddWithCarry x (~~~y) 1#1).snd.v) ∧ + (AddWithCarry x (~~~y) 1#1).snd.z = 0#1)) ↔ BitVec.sle x y := by + simp [AddWithCarry, make_pstate] + split + · bv_decide + · bv_decide + + /-- `Aligned x a` witnesses that the bitvector `x` is `a`-bit aligned. -/ def Aligned (x : BitVec n) (a : Nat) : Prop := match a with From 4242cda70de34820866eeef0e7a5cff75ca6b4d8 Mon Sep 17 00:00:00 2001 From: Siddharth Bhat Date: Tue, 3 Sep 2024 15:03:37 -0500 Subject: [PATCH 11/18] chore: add MaxTandem example --- Proofs/Experiments/Max/MaxTandem.lean | 3 +-- Proofs/Proofs.lean | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/Proofs/Experiments/Max/MaxTandem.lean b/Proofs/Experiments/Max/MaxTandem.lean index 1b8546ec..ecb27d12 100644 --- a/Proofs/Experiments/Max/MaxTandem.lean +++ b/Proofs/Experiments/Max/MaxTandem.lean @@ -171,8 +171,6 @@ theorem program.stepi_0x894_cut (s sn : ArmState) | apply Aligned_AddWithCarry_64_4' repeat solve - | apply Aligned_of_extractLsb_eq_zero; bv_decide - | apply Aligned_of_toNat_mod_eq_zero; bv_omega | decide | bv_omega | assumption @@ -573,6 +571,7 @@ theorem program.stepi_0x8c8_cut (s sn : ArmState) simp only [minimal_theory] at this simp_all only [run, cut, this, state_simp_rules, bitvec_rules, minimal_theory] simp (config := {ground := true, decide := true}) + simp only [fst_AddWithCarry_eq_add, true_and] apply Aligned_BitVecAdd_64_4 · assumption · decide diff --git a/Proofs/Proofs.lean b/Proofs/Proofs.lean index 11c4df13..a830f314 100644 --- a/Proofs/Proofs.lean +++ b/Proofs/Proofs.lean @@ -17,5 +17,5 @@ import Proofs.Experiments.Abs.AbsVCG import Proofs.Experiments.Abs.AbsVCGTandem import Proofs.Experiments.AddLoop.AddLoop import Proofs.Experiments.AddLoop.AddLoopTandem --- import Proofs.Experiments.Max.MaxTandem +import Proofs.Experiments.Max.MaxTandem import Proofs.Experiments.MemCpyVCG From aabff6285529b2f24e42f423120db38397c0dee0 Mon Sep 17 00:00:00 2001 From: Siddharth Bhat Date: Wed, 4 Sep 2024 09:44:57 -0500 Subject: [PATCH 12/18] chore: cleanup --- Arm/Insts/Common.lean | 12 ++-- Arm/State.lean | 1 + Correctness/Correctness.lean | 5 +- Proofs/Experiments/Max/MaxTandem.lean | 86 +++++++++------------------ 4 files changed, 38 insertions(+), 66 deletions(-) diff --git a/Arm/Insts/Common.lean b/Arm/Insts/Common.lean index 7db709be..bda33d7a 100644 --- a/Arm/Insts/Common.lean +++ b/Arm/Insts/Common.lean @@ -55,6 +55,7 @@ def AddWithCarry (x : BitVec n) (y : BitVec n) (carry_in : BitVec 1) : let V := if signExtend (n + 1) result = signed_sum then 0#1 else 1#1 (result, (make_pstate N Z C V)) +/-- When the carry bit is `0`, `AddWithCarry x y 0 = x + y` -/ theorem fst_AddWithCarry_eq_add (x : BitVec n) (y : BitVec n) : (AddWithCarry x y 0#1).fst = x + y := by simp [AddWithCarry, zeroExtend_eq, zeroExtend_zero, zeroExtend_zero] @@ -65,6 +66,7 @@ theorem fst_AddWithCarry_eq_add (x : BitVec n) (y : BitVec n) : have : x.toNat + y.toNat < 2^(n + 1) := by omega rw [Nat.mod_eq_of_lt this] +/-- When the carry bit is `1`, `AddWithCarry x y 1 = x - ~~~y` -/ theorem fst_AddWithCarry_eq_sub_neg (x : BitVec n) (y : BitVec n) : (AddWithCarry x y 1#1).fst = x - ~~~y := by simp [AddWithCarry, zeroExtend_eq, zeroExtend_zero, zeroExtend_zero] @@ -107,8 +109,7 @@ def ConditionHolds (cond : BitVec 4) (s : ArmState) : Bool := else result --- https://github.com/awslabs/s2n-bignum/blob/main/arm/proofs/instruction.ml#L543C36-L543C62 -/- `x > y` iff `(N = V) ∧ Z = 0` . -/ +/-- `x > y` iff `(N = V) ∧ Z = 0` . -/ theorem sgt_iff_n_eq_v_and_z_eq_0_64 (x y : BitVec 64) : (((AddWithCarry x (~~~y) 1#1).snd.n = (AddWithCarry x (~~~y) 1#1).snd.v) ∧ (AddWithCarry x (~~~y) 1#1).snd.z = 0#1) ↔ BitVec.slt y x := by @@ -117,8 +118,7 @@ theorem sgt_iff_n_eq_v_and_z_eq_0_64 (x y : BitVec 64) : · bv_decide · bv_decide --- https://github.com/awslabs/s2n-bignum/blob/main/arm/proofs/instruction.ml#L543C36-L543C62 -/- `x > y` iff `(N = V) ∧ Z = 0` . -/ +/-- `x > y` iff `(N = V) ∧ Z = 0` . -/ theorem sgt_iff_n_eq_v_and_z_eq_0_32 (x y : BitVec 32) : (((AddWithCarry x (~~~y) 1#1).snd.n = (AddWithCarry x (~~~y) 1#1).snd.v) ∧ (AddWithCarry x (~~~y) 1#1).snd.z = 0#1) ↔ BitVec.slt y x := by @@ -127,7 +127,7 @@ theorem sgt_iff_n_eq_v_and_z_eq_0_32 (x y : BitVec 32) : · bv_decide · bv_decide -/- `x ≤ y` iff `¬ ((N = V) ∧ (Z = 0))`. -/ +/-- `x ≤ y` iff `¬ ((N = V) ∧ (Z = 0))`. -/ theorem sle_iff_not_n_eq_v_and_z_eq_0_64 (x y : BitVec 64) : (¬(((AddWithCarry x (~~~y) 1#1).snd.n = (AddWithCarry x (~~~y) 1#1).snd.v) ∧ (AddWithCarry x (~~~y) 1#1).snd.z = 0#1)) ↔ BitVec.sle x y := by @@ -136,7 +136,7 @@ theorem sle_iff_not_n_eq_v_and_z_eq_0_64 (x y : BitVec 64) : · bv_decide · bv_decide -/- `x ≤ y` iff `¬ ((N = V) ∧ (Z = 0))`. -/ +/-- `x ≤ y` iff `¬ ((N = V) ∧ (Z = 0))`. -/ theorem sle_iff_not_n_eq_v_and_z_eq_0_32 (x y : BitVec 32) : (¬(((AddWithCarry x (~~~y) 1#1).snd.n = (AddWithCarry x (~~~y) 1#1).snd.v) ∧ (AddWithCarry x (~~~y) 1#1).snd.z = 0#1)) ↔ BitVec.sle x y := by diff --git a/Arm/State.lean b/Arm/State.lean index 2b5aed10..b740eeff 100644 --- a/Arm/State.lean +++ b/Arm/State.lean @@ -289,6 +289,7 @@ def r (fld : StateField) (s : ArmState) : (state_value fld) := | PC => read_base_pc s | FLAG i => read_base_flag i s | ERR => read_base_error s + @[state_simp_rules] def ArmState.x0 (s : ArmState) : BitVec 64 := r (StateField.GPR 0) s diff --git a/Correctness/Correctness.lean b/Correctness/Correctness.lean index 23a66511..95c6274b 100644 --- a/Correctness/Correctness.lean +++ b/Correctness/Correctness.lean @@ -257,15 +257,16 @@ theorem cassert_cut [Sys σ] [Spec' σ] {s0 si : σ} (h : cut si) (i : Nat) : simp only [↓reduceIte, and_self, h] done +/-- If `si` is a cut-point, then `(cassert s0 si i).snd` is to verify the assertion for `si`. -/ theorem snd_cassert_of_cut [Sys σ] [Spec' σ] {s0 si : σ} (h : cut si) (i : Nat) : (cassert s0 si i).snd = assert s0 si := by rw [cassert_eq] simp [*] - done + +/-- If `si` is not a cut-point, then `(cassert s0 si i).snd` is to run the next state. -/ theorem snd_cassert_of_not_cut [Sys σ] [Spec' σ] {s0 si : σ} (h : cut si = false) (i : Nat) : (cassert s0 si i).snd = (cassert s0 (next si) (i + 1)).snd := by rw [cassert_eq] simp [*] - done theorem cassert_not_cut [Sys σ] [Spec' σ] {s0 si : σ} (h₁ : ¬ cut si) (h₂ : (cassert s0 (next si) (i+1)).fst = j) : diff --git a/Proofs/Experiments/Max/MaxTandem.lean b/Proofs/Experiments/Max/MaxTandem.lean index ecb27d12..534b70ad 100644 --- a/Proofs/Experiments/Max/MaxTandem.lean +++ b/Proofs/Experiments/Max/MaxTandem.lean @@ -22,6 +22,8 @@ open Max section PC +/--# We define scoped notation for our cutpoint PCs to use in pattern matching. -/ + scoped notation "entry_start" => 0x894#64 scoped notation "then_start" => 0x8b0#64 scoped notation "else_start" => 0x8bc#64 @@ -29,21 +31,19 @@ scoped notation "merge_end" => 0x8cc#64 namespace ArmStateNotation + +/-! We build a notation for `read_mem_bytes $n $base $s` as `$s[$base, $n]` -/ @[inherit_doc read_mem_bytes] syntax:max term noWs "[" withoutPosition(term) "," withoutPosition(term) noWs "]" : term macro_rules | `($s[$base,$n]) => `(read_mem_bytes $n $base $s) end ArmStateNotation -open ArmStateNotation - +open ArmStateNotation -def pcs : List (BitVec 64) := [entry_start, - -- entry_end, +def pcs : List (BitVec 64) := [ + entry_start, then_start, - -- then_end, else_start, - -- else_end, - -- merge_start, merge_end ] @@ -52,7 +52,6 @@ end PC def cut (s : ArmState) : Bool := decide (read_pc s ∈ pcs) section Invariants --- variable (s0 si : ArmState) /-- Precondition for the correctness of the Add program. -/ def pre (s : ArmState) : Prop := @@ -65,7 +64,6 @@ def pre (s : ArmState) : Prop := s.sp ≥ 32 ∧ mem_legal' s.sp 80 -- TODO: find the correct smallest bound we need here. - /-- Specification function. -/ def spec (x0 x1 : BitVec 64) : BitVec 64 := if BitVec.slt (x0.zeroExtend 32) (x1.zeroExtend 32) @@ -83,6 +81,7 @@ def post (s0 sf : ArmState) : Prop := def entry_start_inv (s0 si : ArmState) : Prop := si = s0 +/-- Invariant right at the conditional branch -/ def entry_end_inv (s0 si : ArmState): Prop := read_err si = .None ∧ si.program = program ∧ @@ -91,10 +90,14 @@ def entry_end_inv (s0 si : ArmState): Prop := CheckSPAlignment si ∧ si[s0.sp - 32#64 + 12#64, 4] = s0.x0.zeroExtend 32 ∧ si[s0.sp - 32#64 + 8#64, 4] = s0.x1.zeroExtend 32 ∧ - si.sp = s0.sp - 32#64 -- TODO: read flags. - -- ∧ read_mem_bytes 4 (r (Spec) si) si = 0x0#32 + si.sp = s0.sp - 32#64 + +/-! +Notice the pattern: The invariant after the conditional branch +is the invariant before the conditional branch, plus what the conditional branch taught us +-/ -def then_start_inv (s0 si : ArmState): Prop := +def then_start_inv (s0 si : ArmState) : Prop := entry_end_inv s0 si ∧ ¬ ((BitVec.zeroExtend 32 s0.x0).sle (BitVec.zeroExtend 32 s0.x1) = true) @@ -112,17 +115,12 @@ end Invariants def assert (s0 si : ArmState) : Prop := pre s0 ∧ - -- Using `match` is preferable to `if` for the `split` tactic (and for - -- readability). + -- Using `match` is preferable to `if` for the `split` tactic (and for readability). match (read_pc si) with | entry_start => entry_start_inv s0 si - -- | entry_end => entry_end_inv s0 si | then_start => then_start_inv s0 si - -- | then_end => then_end_inv s0 si | else_start => else_start_inv s0 si - -- | else_end => else_end_inv s0 si - -- | merge_start => merge_start_inv s0 si - | merge_end => post s0 si -- why do I need an `assert` here, if it's tracked by `post`? Confusing. + | merge_end => post s0 si -- @bollu: why do I need an `assert` here, if it's tracked by `post`? We should change VCG. | _ => False instance : Spec' ArmState where @@ -163,13 +161,13 @@ theorem program.stepi_0x894_cut (s sn : ArmState) simp only [List.mem_cons, BitVec.reduceEq, List.mem_singleton, or_self, not_false_eq_true, true_and, List.not_mem_nil, or_self, not_false_eq_true, true_and] simp only [fst_AddWithCarry_eq_sub_neg, BitVec.reduceNot, true_and] + /- Toy automation for deciding Aligned, will be converted to a decision procedure in a follow up PR -/ repeat first | simp (config := { ground := true, decide := true}) [aligned_rules, state_simp_rules] | apply Aligned_BitVecSub_64_4; | apply Aligned_BitVecAdd_64_4; | apply Aligned_AddWithCarry_64_4; | apply Aligned_AddWithCarry_64_4' - repeat solve | decide | bv_omega @@ -208,14 +206,12 @@ theorem program.stepi_0x898_cut (s sn : ArmState) simp_mem rfl - /-- info: 'MaxTandem.program.stepi_0x898_cut' depends on axioms: [propext, Classical.choice, Lean.ofReduceBool, Quot.sound] -/ #guard_msgs in #print axioms program.stepi_0x898_cut -- 3/15: str w1, [sp, #8] ; sp[8] = w1_a -set_option trace.simp_mem.info true in theorem program.stepi_0x89c_cut (s sn : ArmState) (h_program : s.program = program) (h_pc : r StateField.PC s = 0x89c#64) @@ -332,8 +328,6 @@ theorem program.stepi_0x8a8_cut (s sn : ArmState) simp_all only [run, cut, this, state_simp_rules, bitvec_rules, minimal_theory] simp only [pcs, List.mem_cons, BitVec.reduceEq, List.mem_singleton, or_self, not_false_eq_true, true_and, List.not_mem_nil, or_self, not_false_eq_true, true_and] - -- simp only [or_false, or_true] - /-- info: 'MaxTandem.program.stepi_0x8a8_cut' depends on axioms: [propext, Classical.choice, Quot.sound] @@ -341,8 +335,6 @@ info: 'MaxTandem.program.stepi_0x8a8_cut' depends on axioms: [propext, Classical #guard_msgs in #print axioms program.stepi_0x8a8_cut -- 7/15 --- @bollu: this is useless? we were able to make progress even without this? (branch.) --- b.le 8bc theorem program.stepi_0x8ac_cut (s sn : ArmState) (h_program : s.program = program) (h_pc : r StateField.PC s = 0x8ac#64) @@ -476,7 +468,7 @@ info: 'MaxTandem.program.stepi_0x8b8_cut' depends on axioms: [propext, Classical -/ #guard_msgs in #print axioms program.stepi_0x8b8_cut --- ldr w0, [sp, #8] +-- 11/15: ldr w0, [sp, #8] theorem program.stepi_0x8bc_cut (s sn : ArmState) (h_program : s.program = program) (h_pc : r StateField.PC s = 0x8bc#64) @@ -503,7 +495,7 @@ info: 'MaxTandem.program.stepi_0x8bc_cut' depends on axioms: [propext, Classical -/ #guard_msgs in #print axioms program.stepi_0x8bc_cut ---- str w0, [sp, #28] +--- 12/15: str w0, [sp, #28] theorem program.stepi_0x8c0_cut (s sn : ArmState) (h_program : s.program = program) (h_pc : r StateField.PC s = 0x8c0#64) @@ -527,7 +519,7 @@ info: 'MaxTandem.program.stepi_0x8c0_cut' depends on axioms: [propext, Classical -/ #guard_msgs in #print axioms program.stepi_0x8c0_cut --- ldr w0, [sp, #28] +-- 13/15: ldr w0, [sp, #28] theorem program.stepi_0x8c4_cut (s sn : ArmState) (h_program : s.program = program) (h_pc : r StateField.PC s = 0x8c4#64) @@ -553,7 +545,7 @@ info: 'MaxTandem.program.stepi_0x8c4_cut' depends on axioms: [propext, Classical -/ #guard_msgs in #print axioms program.stepi_0x8c4_cut --- add sp, sp, #0x20 +-- 14/15: add sp, sp, #0x20 theorem program.stepi_0x8c8_cut (s sn : ArmState) (h_program : s.program = program) (h_pc : r StateField.PC s = 0x8c8#64) @@ -629,26 +621,13 @@ theorem BitVec.not_slt {w} (a b : BitVec w) : ¬ (a.slt b) ↔ (b.sle a) := by end ForLean -/- -open Lean Meta Elab in -dsimproc [vcg_rules] reduce_snd_cassert_of_cut - (Correctness.cassert _ _ _) := fun e => do - let_expr Correctness.cassert s0 si i := e | return .continue - for hyp in (← getLCtx) do - -- cut si = true - -- match_expr hyp.type with - -- | cut s2 = true - pure () - return .continue --/ -set_option trace.debug true in theorem partial_correctness : PartialCorrectness ArmState := by apply Correctness.partial_correctness_from_assertions case v1 => intro s0 h_pre simp only [Spec.pre] at h_pre - have ⟨h_pre_pc, h2⟩ := h_pre + have ⟨h_pre_pc, _h2⟩ := h_pre simp only [Spec'.assert, assert, minimal_theory] rw [h_pre_pc] constructor @@ -766,7 +745,7 @@ theorem partial_correctness : -- 7/16 name h_run : s7 := run 1 s6 obtain h := program.stepi_0x8ac_cut s6 (by trivial) (by trivial) (by trivial) (by trivial) (by trivial) (by simp [*]) - obtain ⟨h_s7_cut, h_s7_pc, h_s7_err, h_s7_program, h_s7_read_sp_8, h_s7_read_sp_12, h_s7_x0, h_s7_x1, h_s7_sp, h_s7_sp_aligned, h_s7_mem⟩ := h + obtain ⟨h_s7_cut, h_s7_pc, h_s7_err, h_s7_program, h_s7_read_sp_8, h_s7_read_sp_12, h_s7_x0, h_s7_x1, h_s7_sp, h_s7_sp_aligned, _h_s7_mem⟩ := h rw [Correctness.snd_cassert_of_cut h_s7_cut]; -- try rw [Correctness.snd_cassert_of_cut h_cut]; replace h_s7_sp : s7.sp = s0.sp - 32 := by simp_all replace h_s7_x0 : s7.x0 = BitVec.zeroExtend 64 (BitVec.truncate 32 s0.x1) := by simp_all @@ -806,9 +785,9 @@ theorem partial_correctness : obtain ⟨⟨h_s1_err, h_s1_program, h_s1_sp_aligned, h_s1_read_sp_12, h_s1_read_sp_8, h_s1_sp⟩, h_KEEP_x0_x1⟩ := h_assert clear_named [h_assert] name h_s1_run : s2 := run 1 s1 - obtain ⟨h_s2_cut, h_s2_pc, h_s2_err, h_s2_read_sp_8, h_s2_read_sp_12, h_s2_x0, h_s2_x1, h_s2_sp, h_s2_program, h_s2_mem, h_s2_sp_aligned⟩ := + obtain ⟨h_s2_cut, h_s2_pc, h_s2_err, h_s2_read_sp_8, h_s2_read_sp_12, h_s2_x0, h_s2_x1, h_s2_sp, h_s2_program, _h_s2_mem, h_s2_sp_aligned⟩ := program.stepi_0x8b0_cut s1 s2 h_s1_program h_s1_pc h_s1_err h_s1_sp_aligned h_s1_run.symm - rw [Correctness.snd_cassert_of_not_cut h_s2_cut]; -- try rw [Correctness.snd_cassert_of_cut h_cut]; + rw [Correctness.snd_cassert_of_not_cut h_s2_cut]; simp [show Sys.next s2 = run 1 s2 by rfl] replace h_s2_x0 : s2.x0 = BitVec.zeroExtend 64 (BitVec.zeroExtend 32 s0.x0) := by rw [h_s2_x0] @@ -829,9 +808,6 @@ theorem partial_correctness : simp [show Sys.next _ = run 1 _ by rfl] replace h_s3_sp_28 : read_mem_bytes 4 (s3.sp + 28#64) s3 = BitVec.zeroExtend 32 (spec s0.x0 s0.x1) := by simp_all replace h_s3_sp : s3.sp = s0.sp - 32#64 := by simp_all - -- /- TODO: this should be s0.x0-/ - -- replace h_s3_read_sp12 : read_mem_bytes 4 (s3.sp + 12#64) s3 = BitVec.truncate 32 s0.x0 := by simp_all - -- replace h_s3_read_sp8 : read_mem_bytes 4 (s3.sp + 8#64) s3 = BitVec.truncate 32 s0.x1 := by simp_all clear_named [h_s2, h_s1] -- 4/15 @@ -850,21 +826,18 @@ theorem partial_correctness : have h_s5_x0 : s5.x0 = BitVec.zeroExtend 64 (BitVec.zeroExtend 32 (spec s0.x0 s0.x1)) := by simp only [show s5.x0 = BitVec.zeroExtend 64 (read_mem_bytes 4 (s5.sp + 28#64) s5) by simp_all] simp only [Nat.reduceMul] - /- Damn, that the rewrite system is not confluent really fucks me over here ;_; + /- Damn, that the rewrite system is not confluent really messes me up over here ;_; `simp` winds up rewriting `s5.sp` into `s4.sp` first because of the rule, and fails to match `read_mem_bytes... (s5.sp + ...) = read_mem_bytes ... (s4.sp + ...)`. One might say that this entire proof is stupid, but really, I 'just' want it to build an - e-graph and figure it out~ + e-graph and figure it out. -/ have : (read_mem_bytes 4 (s5.sp + 28#64) s5) = read_mem_bytes 4 (s4.sp + 28#64) s4 := by obtain ⟨_, _, _, _, _, _, h, _⟩ := h_s5 exact h simp [this] rw [h_s4_sp_28] - -- simp only [show s5.x0 = BitVec.zeroExtend 64 (read_mem_bytes 4 (s5.sp + 28) s5) by simp_all] - -- simp only [show (read_mem_bytes 4 (s5.sp + 28) s5) = read_mem_bytes 4 (s4.sp + 28#64) s4 by simp_all] simp [show Sys.next _ = run 1 _ by rfl] - -- clear_named [h_s4] -- 6/15 name h_run : s6 := run 1 s5 @@ -897,9 +870,6 @@ theorem partial_correctness : simp [show Sys.next _ = run 1 _ by rfl] replace h_s3_sp_28 : read_mem_bytes 4 (s3.sp + 28#64) s3 = BitVec.zeroExtend 32 (spec s0.x0 s0.x1) := by simp_all replace h_s3_sp : s3.sp = s0.sp - 32#64 := by simp_all - -- /- TODO: this should be s0.x0-/ - -- replace h_s3_read_sp12 : read_mem_bytes 4 (s3.sp + 12#64) s3 = BitVec.truncate 32 s0.x0 := by simp_all - -- replace h_s3_read_sp8 : read_mem_bytes 4 (s3.sp + 8#64) s3 = BitVec.truncate 32 s0.x1 := by simp_all clear_named [h_s2, h_s1] -- 4/15 From 45203d39cf92e76bb94df2ee9b114d5353aa0385 Mon Sep 17 00:00:00 2001 From: Siddharth Date: Wed, 4 Sep 2024 15:43:28 -0500 Subject: [PATCH 13/18] Update Arm/State.lean Co-authored-by: Alex Keizer --- Arm/State.lean | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Arm/State.lean b/Arm/State.lean index b740eeff..87569905 100644 --- a/Arm/State.lean +++ b/Arm/State.lean @@ -741,7 +741,7 @@ def Memory.read (addr : BitVec 64) (m : Memory) : BitVec 8 := theorem ArmState.read_mem_eq_mem_read : read_mem addr s = s.mem.read addr := rfl -/-- `w` does not effect memory. -/ +/-- `w` does not affect memory. -/ @[memory_rules, state_simp_rules] theorem ArmState.mem_w_eq_mem (fld : StateField) (v : state_value fld) (s : ArmState) : (w fld v s).mem = s.mem := by From 8d8862883ebdd0df1bb9f52ff7ac390ce3a63451 Mon Sep 17 00:00:00 2001 From: Siddharth Date: Wed, 4 Sep 2024 15:44:57 -0500 Subject: [PATCH 14/18] Update Arm/State.lean Co-authored-by: Alex Keizer --- Arm/State.lean | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/Arm/State.lean b/Arm/State.lean index 87569905..5d066cf2 100644 --- a/Arm/State.lean +++ b/Arm/State.lean @@ -745,13 +745,13 @@ theorem ArmState.read_mem_eq_mem_read : read_mem addr s = s.mem.read addr := rfl @[memory_rules, state_simp_rules] theorem ArmState.mem_w_eq_mem (fld : StateField) (v : state_value fld) (s : ArmState) : (w fld v s).mem = s.mem := by - cases fld <;> - unfold w write_base_error - write_base_gpr - write_base_sfp - write_base_pc - write_base_flag <;> - simp + cases fld <;> ( + unfold w write_base_error + write_base_gpr + write_base_sfp + write_base_pc + write_base_flag + simp) /-- A variant of `write_mem` that directly talks about writes to memory, instead of over the entire `ArmState` From 5dafafc99b9b15e570824490078c826a7ce2738f Mon Sep 17 00:00:00 2001 From: Siddharth Bhat Date: Wed, 4 Sep 2024 15:45:09 -0500 Subject: [PATCH 15/18] chore: cleanups suggested --- Arm/State.lean | 32 +++++++++++++-------------- Proofs/Experiments/Max/MaxTandem.lean | 5 ++--- 2 files changed, 18 insertions(+), 19 deletions(-) diff --git a/Arm/State.lean b/Arm/State.lean index 5d066cf2..102bb155 100644 --- a/Arm/State.lean +++ b/Arm/State.lean @@ -290,29 +290,29 @@ def r (fld : StateField) (s : ArmState) : (state_value fld) := | FLAG i => read_base_flag i s | ERR => read_base_error s -@[state_simp_rules] -def ArmState.x0 (s : ArmState) : BitVec 64 := r (StateField.GPR 0) s +/-! -@[state_simp_rules] -def ArmState.x1 (s : ArmState) : BitVec 64 := r (StateField.GPR 1) s +We define helpers for reading and writing registers on the `ArmState` with the colloquial +names. For example, the stack pointer (`sp`) refers to register 31. +These mnemonics make it much easier to read and write theorems about assembly programs. -@[state_simp_rules] -def ArmState.x2 (s : ArmState) : BitVec 64 := r (StateField.GPR 2) s +-/ -@[state_simp_rules] -def ArmState.sp (s : ArmState) : BitVec 64 := r (StateField.GPR 31) s +@[state_simp_rules] abbrev ArmState.x0 (s : ArmState) : BitVec 64 := r (StateField.GPR 0) s -@[state_simp_rules] -def ArmState.V (s : ArmState) : BitVec 1 := r (StateField.FLAG PFlag.V) s +@[state_simp_rules] abbrev ArmState.x1 (s : ArmState) : BitVec 64 := r (StateField.GPR 1) s -@[state_simp_rules] -def ArmState.C (s : ArmState) : BitVec 1 := r (StateField.FLAG PFlag.C) s +@[state_simp_rules] abbrev ArmState.x2 (s : ArmState) : BitVec 64 := r (StateField.GPR 2) s -@[state_simp_rules] -def ArmState.Z (s : ArmState) : BitVec 1 := r (StateField.FLAG PFlag.Z) s +@[state_simp_rules] abbrev ArmState.sp (s : ArmState) : BitVec 64 := r (StateField.GPR 31) s -@[state_simp_rules] -def ArmState.N (s : ArmState) : BitVec 1 := r (StateField.FLAG PFlag.N) s +@[state_simp_rules] abbrev ArmState.V (s : ArmState) : BitVec 1 := r (StateField.FLAG PFlag.V) s + +@[state_simp_rules] abbrev ArmState.C (s : ArmState) : BitVec 1 := r (StateField.FLAG PFlag.C) s + +@[state_simp_rules] abbrev ArmState.Z (s : ArmState) : BitVec 1 := r (StateField.FLAG PFlag.Z) s + +@[state_simp_rules] abbrev ArmState.N (s : ArmState) : BitVec 1 := r (StateField.FLAG PFlag.N) s def ArmState.r_GPR_0_eq_x0 (s : ArmState) : r (StateField.GPR 0) s = s.x0 := by rfl diff --git a/Proofs/Experiments/Max/MaxTandem.lean b/Proofs/Experiments/Max/MaxTandem.lean index 534b70ad..2ce8416f 100644 --- a/Proofs/Experiments/Max/MaxTandem.lean +++ b/Proofs/Experiments/Max/MaxTandem.lean @@ -3,9 +3,8 @@ Copyright (c) 2024 Amazon.com, Inc. or its affiliates. All Rights Reserved. Released under Apache 2.0 license as described in the file LICENSE. Author(s): Shilpi Goel -Experimental: Use the Correctness module to prove that this program computes the -following via a naive loop that iterates over `x0`: -`x0 := x0 + x1` +Experimental: Use the Correctness module to prove that this program +computes the maximum of two numbers. -/ import Arm import Tactics.CSE From 82ef7d69f2fdb219377fb398ccd44d6c01c07bb5 Mon Sep 17 00:00:00 2001 From: Siddharth Bhat Date: Wed, 4 Sep 2024 15:46:07 -0500 Subject: [PATCH 16/18] chore: check that we have all licenses --- Correctness/Correctness.lean | 2 +- Proofs/Experiments/Max/MaxTandem.lean | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Correctness/Correctness.lean b/Correctness/Correctness.lean index 95c6274b..47a89434 100644 --- a/Correctness/Correctness.lean +++ b/Correctness/Correctness.lean @@ -1,7 +1,7 @@ /- Copyright (c) 2024 Amazon.com, Inc. or its affiliates. All Rights Reserved. Released under Apache 2.0 license as described in the file LICENSE. -Author(s): Leonardo de Moura, Shilpi Goel +Author(s): Leonardo de Moura, Shilpi Goel, Siddharth Bhat -/ /- diff --git a/Proofs/Experiments/Max/MaxTandem.lean b/Proofs/Experiments/Max/MaxTandem.lean index 2ce8416f..c1c05e6c 100644 --- a/Proofs/Experiments/Max/MaxTandem.lean +++ b/Proofs/Experiments/Max/MaxTandem.lean @@ -1,7 +1,7 @@ /- Copyright (c) 2024 Amazon.com, Inc. or its affiliates. All Rights Reserved. Released under Apache 2.0 license as described in the file LICENSE. -Author(s): Shilpi Goel +Author(s): Shilpi Goel, Siddharth Bhat Experimental: Use the Correctness module to prove that this program computes the maximum of two numbers. From 7b789b87d62358a725fc511520a8915ef5a09675 Mon Sep 17 00:00:00 2001 From: Siddharth Bhat Date: Fri, 6 Sep 2024 16:29:24 -0500 Subject: [PATCH 17/18] chore: move to BitVec --- Arm/BitVec.lean | 9 +++++++++ Proofs/Experiments/Max/MaxTandem.lean | 14 -------------- 2 files changed, 9 insertions(+), 14 deletions(-) diff --git a/Arm/BitVec.lean b/Arm/BitVec.lean index 1d47a9a9..f6ebcc46 100644 --- a/Arm/BitVec.lean +++ b/Arm/BitVec.lean @@ -1052,6 +1052,15 @@ theorem extractLsBytes_ge (h : a ≥ n) (x : BitVec n) : apply BitVec.getLsb_ge omega +theorem not_slt {w} (a b : BitVec w) : ¬ (a.slt b) ↔ (b.sle a) := by + simp only [BitVec.slt, BitVec.sle] + by_cases h : a.toInt < b.toInt + · simp [h] + exact Int.not_le.mpr h + · simp [h] + exact Int.not_lt.mp h + + /-! ## `Quote` instance -/ instance (w : Nat) : Quote (BitVec w) `term where diff --git a/Proofs/Experiments/Max/MaxTandem.lean b/Proofs/Experiments/Max/MaxTandem.lean index c1c05e6c..6c235f34 100644 --- a/Proofs/Experiments/Max/MaxTandem.lean +++ b/Proofs/Experiments/Max/MaxTandem.lean @@ -606,20 +606,6 @@ elab "clear_named" "[" names:(ident),* "]": tactic => do replaceMainGoal [← (← getMainGoal).clear hyp.fvarId] continue - - -section ForLean - -theorem BitVec.not_slt {w} (a b : BitVec w) : ¬ (a.slt b) ↔ (b.sle a) := by - simp only [BitVec.slt, BitVec.sle] - by_cases h : a.toInt < b.toInt - · simp [h] - exact Int.not_le.mpr h - · simp [h] - exact Int.not_lt.mp h - -end ForLean - theorem partial_correctness : PartialCorrectness ArmState := by apply Correctness.partial_correctness_from_assertions From 291b802243b55492ea296fbbc9ad96c085a760ce Mon Sep 17 00:00:00 2001 From: Siddharth Bhat Date: Fri, 6 Sep 2024 16:30:11 -0500 Subject: [PATCH 18/18] chore: add TODO to upstream. --- Arm/BitVec.lean | 1 + 1 file changed, 1 insertion(+) diff --git a/Arm/BitVec.lean b/Arm/BitVec.lean index f6ebcc46..1c82f194 100644 --- a/Arm/BitVec.lean +++ b/Arm/BitVec.lean @@ -1052,6 +1052,7 @@ theorem extractLsBytes_ge (h : a ≥ n) (x : BitVec n) : apply BitVec.getLsb_ge omega +/-- TODO: upstream -/ theorem not_slt {w} (a b : BitVec w) : ¬ (a.slt b) ↔ (b.sle a) := by simp only [BitVec.slt, BitVec.sle] by_cases h : a.toInt < b.toInt