diff --git a/.gitignore b/.gitignore index 6c2506f9..1e3e3f4d 100644 --- a/.gitignore +++ b/.gitignore @@ -4,3 +4,4 @@ /lake-packages/* /.lake/* *.log +/data/* diff --git a/Arm/BitVec.lean b/Arm/BitVec.lean index 24dad21f..553848bb 100644 --- a/Arm/BitVec.lean +++ b/Arm/BitVec.lean @@ -20,8 +20,7 @@ open BitVec attribute [bitvec_rules] BitVec.ofFin_eq_ofNat attribute [bitvec_rules] BitVec.ofFin.injEq attribute [bitvec_rules] BitVec.cast_eq -attribute [bitvec_rules] BitVec.zeroExtend_eq -attribute [bitvec_rules] BitVec.truncate_eq +attribute [bitvec_rules] BitVec.setWidth_eq attribute [bitvec_rules] BitVec.getLsbD_ofFin attribute [bitvec_rules] BitVec.getLsbD_ge attribute [bitvec_rules] BitVec.getMsbD_ge @@ -47,19 +46,23 @@ attribute [bitvec_rules] BitVec.getMsbD_cast attribute [bv_toNat] BitVec.toNat_ofInt attribute [bitvec_rules] BitVec.toInt_ofInt attribute [bitvec_rules] BitVec.ofInt_natCast -attribute [bitvec_rules] BitVec.toNat_zeroExtend' --- attribute [bitvec_rules] BitVec.toNat_zeroExtend --- attribute [bitvec_rules] BitVec.toNat_truncate -attribute [bitvec_rules] BitVec.zeroExtend_zero attribute [bitvec_rules] BitVec.ofNat_toNat -attribute [bitvec_rules] BitVec.getLsbD_zeroExtend' -attribute [bitvec_rules] BitVec.getMsbD_zeroExtend' -attribute [bitvec_rules] BitVec.getLsbD_zeroExtend -attribute [bitvec_rules] BitVec.getMsbD_zeroExtend_add -attribute [bitvec_rules] BitVec.getLsbD_truncate -attribute [bitvec_rules] BitVec.zeroExtend_zeroExtend_of_le -attribute [bitvec_rules] BitVec.truncate_truncate_of_le -attribute [bitvec_rules] BitVec.truncate_cast + +/-! ### setWidth / truncate / zeroExtend -/ + +-- We adopt `setWidth` as the simp normal form +attribute [bitvec_rules] BitVec.truncate_eq_setWidth +attribute [bitvec_rules] BitVec.zeroExtend_eq_setWidth + +attribute [bitvec_rules] BitVec.toNat_setWidth' +attribute [bitvec_rules] BitVec.setWidth_zero +attribute [bitvec_rules] BitVec.getLsbD_setWidth' +attribute [bitvec_rules] BitVec.getLsbD_setWidth +attribute [bitvec_rules] BitVec.getMsbD_setWidth' +attribute [bitvec_rules] BitVec.getMsbD_setWidth_add +attribute [bitvec_rules] BitVec.setWidth_setWidth_of_le +attribute [bitvec_rules] BitVec.setWidth_cast + attribute [bitvec_rules] BitVec.extractLsb_ofFin attribute [bitvec_rules] BitVec.extractLsb_ofNat attribute [bitvec_rules] BitVec.extractLsb'_toNat @@ -72,21 +75,21 @@ attribute [bitvec_rules] BitVec.toFin_or attribute [bitvec_rules] BitVec.getLsbD_or attribute [bitvec_rules] BitVec.getMsbD_or attribute [bitvec_rules] BitVec.msb_or -attribute [bitvec_rules] BitVec.truncate_or +attribute [bitvec_rules] BitVec.setWidth_or attribute [bitvec_rules] BitVec.toNat_and attribute [bitvec_rules] BitVec.toFin_and attribute [bitvec_rules] BitVec.getLsbD_and attribute [bitvec_rules] BitVec.getMsbD_and attribute [bitvec_rules] BitVec.msb_and -attribute [bitvec_rules] BitVec.truncate_and +attribute [bitvec_rules] BitVec.setWidth_and attribute [bitvec_rules] BitVec.toNat_xor attribute [bitvec_rules] BitVec.toFin_xor attribute [bitvec_rules] BitVec.getLsbD_xor -attribute [bitvec_rules] BitVec.truncate_xor +attribute [bitvec_rules] BitVec.setWidth_xor -- attribute [bitvec_rules] BitVec.toNat_not attribute [bitvec_rules] BitVec.toFin_not attribute [bitvec_rules] BitVec.getLsbD_not -attribute [bitvec_rules] BitVec.truncate_not +attribute [bitvec_rules] BitVec.setWidth_not attribute [bitvec_rules] BitVec.not_cast attribute [bitvec_rules] BitVec.and_cast attribute [bitvec_rules] BitVec.or_cast @@ -103,8 +106,8 @@ attribute [bitvec_rules] BitVec.getLsbD_ushiftRight attribute [bitvec_rules] BitVec.toNat_append attribute [bitvec_rules] BitVec.getLsbD_append attribute [bitvec_rules] BitVec.getMsbD_append -attribute [bitvec_rules] BitVec.truncate_append -attribute [bitvec_rules] BitVec.truncate_cons +attribute [bitvec_rules] BitVec.setWidth_append +attribute [bitvec_rules] BitVec.setWidth_cons attribute [bitvec_rules] BitVec.not_append attribute [bitvec_rules] BitVec.and_append attribute [bitvec_rules] BitVec.or_append @@ -162,7 +165,9 @@ attribute [bitvec_rules] BitVec.ofBool_true attribute [bitvec_rules] BitVec.ofBool_false attribute [bitvec_rules] BitVec.ofNat_eq_ofNat attribute [bitvec_rules] BitVec.zero_eq -attribute [bitvec_rules] BitVec.truncate_eq_zeroExtend +attribute [bitvec_rules] BitVec.zero_or +attribute [bitvec_rules] BitVec.or_zero +attribute [bitvec_rules] BitVec.or_self attribute [bitvec_rules] BitVec.add_sub_cancel attribute [bitvec_rules] BitVec.sub_add_cancel @@ -215,11 +220,11 @@ attribute [bitvec_rules] BitVec.reduceULT attribute [bitvec_rules] BitVec.reduceULE attribute [bitvec_rules] BitVec.reduceSLT attribute [bitvec_rules] BitVec.reduceSLE -attribute [bitvec_rules] BitVec.reduceZeroExtend' +attribute [bitvec_rules] BitVec.reduceSetWidth' attribute [bitvec_rules] BitVec.reduceShiftLeftZeroExtend attribute [bitvec_rules] BitVec.reduceExtracLsb' attribute [bitvec_rules] BitVec.reduceReplicate -attribute [bitvec_rules] BitVec.reduceZeroExtend +attribute [bitvec_rules] BitVec.reduceSetWidth attribute [bitvec_rules] BitVec.reduceSignExtend attribute [bitvec_rules] BitVec.reduceAllOnes attribute [bitvec_rules] BitVec.reduceBitVecOfFin @@ -293,22 +298,24 @@ abbrev ror (x : BitVec n) (r : Nat) : BitVec n := abbrev lsb (x : BitVec n) (i : Nat) : BitVec 1 := BitVec.extractLsb' i 1 x -abbrev partInstall (hi lo : Nat) (val : BitVec (hi - lo + 1)) (x : BitVec n): BitVec n := - let mask := allOnes (hi - lo + 1) - let val_aligned := (zeroExtend n val) <<< lo - let mask_with_hole := ~~~ ((zeroExtend n mask) <<< lo) +/-- `partInstall start len val x` returns a bitvector where bits +- `start` to `start+len` are equal to `val`, and +- all other bits are equal to the respective bit in `x` -/ +abbrev partInstall (start len : Nat) (val : BitVec len) (x : BitVec n): BitVec n := + let mask := allOnes len + let val_aligned := (zeroExtend n val) <<< start + let mask_with_hole := ~~~ ((zeroExtend n mask) <<< start) let x_with_hole := x &&& mask_with_hole x_with_hole ||| val_aligned -example : (partInstall 3 0 0xC#4 0xAB0D#16 = 0xAB0C#16) := rfl +example : (partInstall 0 4 0xC#4 0xAB0D#16 = 0xAB0C#16) := rfl def flattenTR {n : Nat} (xs : List (BitVec n)) (i : Nat) (acc : BitVec len) (H : n > 0) : BitVec len := match xs with | [] => acc | x :: rest => - have h : n = (i * n + n - 1 - i * n + 1) := by omega - let new_acc := (BitVec.partInstall (i * n + n - 1) (i * n) (BitVec.cast h x) acc) + let new_acc := (BitVec.partInstall (i * n) n x acc) flattenTR rest (i + 1) new_acc H /-- Reverse bits of a bit-vector. -/ @@ -317,8 +324,8 @@ def reverse (x : BitVec n) : BitVec n := match i with | 0 => acc | j + 1 => - let xi : BitVec 1 := extractLsb' (i - 1) 1 x - let acc := BitVec.partInstall (n - i) (n - i) (xi.cast (by omega)) acc + let xi := extractLsb' (i - 1) 1 x + let acc := BitVec.partInstall (n - i) 1 xi acc reverseTR x j acc reverseTR x n $ BitVec.zero n @@ -458,9 +465,6 @@ theorem toNat_ofNat_lt {n w₁ : Nat} (hn : n < 2^w₁) : ---------------------------- Comparison Lemmas ----------------------- -@[simp] protected theorem not_lt {n : Nat} {a b : BitVec n} : ¬ a < b ↔ b ≤ a := by - exact Fin.not_lt .. - theorem ge_of_not_lt (x y : BitVec w₁) (h : ¬ (x < y)) : x ≥ y := by simp_all only [BitVec.le_def, BitVec.lt_def] omega @@ -500,53 +504,30 @@ protected theorem zero_le_sub (x y : BitVec n) : refine (BitVec.nat_bitvec_le (0#n) (x - y)).mp ?a simp only [toNat_ofNat, Nat.zero_mod, toNat_sub, Nat.zero_le] ------------------------------ Logical Lemmas ------------------------ - -@[bitvec_rules] -protected theorem zero_or (x : BitVec n) : 0#n ||| x = x := by - unfold HOr.hOr instHOrOfOrOp OrOp.or instOrOp BitVec.or - simp only [toNat_ofNat, Nat.zero_mod, Nat.zero_or] - congr - -@[bitvec_rules] -protected theorem or_zero (x : BitVec n) : x ||| 0#n = x := by - rw [BitVec.or_comm] - rw [BitVec.zero_or] - done - -@[bitvec_rules] -protected theorem or_self (x : BitVec n) : - x ||| x = x := by - refine eq_of_toNat_eq ?_ - rw [BitVec.toNat_or] - apply Nat.eq_of_testBit_eq - simp only [Nat.testBit_or, Bool.or_self, implies_true] - done - --------------------- ZeroExtend/Append/Extract Lemmas ---------------- @[bitvec_rules] -theorem zeroExtend_zero_width : (zeroExtend 0 x) = 0#0 := by - unfold zeroExtend +theorem setWidth_zero_width : (setWidth 0 x) = 0#0 := by + unfold setWidth split <;> simp [bitvec_zero_is_unique] -- During symbolic simulation, we often encounter an `if` in the first argument --- of `zeroExtend` (e.g., `read_gpr` reads a specified `width` of the register, +-- of `setWidth` (e.g., `read_gpr` reads a specified `width` of the register, -- which is often computed by checking whether the `instruction.sf` bit is 0 or --- 1). I've found the rules `zeroExtend_if_true` and `zeroExtend_if_false` to be --- helpful to reduce such occurrences of `zeroExtend` terms. +-- 1). I've found the rules `setWidth_if_true` and `setWidth_if_false` to be +-- helpful to reduce such occurrences of `setWidth` terms. @[bitvec_rules] -theorem zeroExtend_if_true [Decidable p] (x : BitVec n) +theorem setWidth_if_true [Decidable p] (x : BitVec n) (h_eq : a = (if p then a else b)) : - (zeroExtend (if p then a else b) x) = BitVec.cast h_eq (zeroExtend a x) := by - simp only [toNat_eq, toNat_truncate, ← h_eq, toNat_cast] + (setWidth (if p then a else b) x) = BitVec.cast h_eq (setWidth a x) := by + simp only [toNat_eq, toNat_setWidth, ← h_eq, toNat_cast] @[bitvec_rules] -theorem zeroExtend_if_false [Decidable p] (x : BitVec n) +theorem setWidth_if_false [Decidable p] (x : BitVec n) (h_eq : b = (if p then a else b)) : - (zeroExtend (if p then a else b) x) = BitVec.cast h_eq (zeroExtend b x) := by - simp only [toNat_eq, toNat_truncate, ← h_eq, toNat_cast] + (setWidth (if p then a else b) x) = BitVec.cast h_eq (setWidth b x) := by + simp only [toNat_eq, toNat_setWidth, ← h_eq, toNat_cast] theorem extractLsb_eq (x : BitVec n) (h : n = n - 1 + 1) : BitVec.extractLsb (n - 1) 0 x = BitVec.cast h x := by @@ -554,14 +535,15 @@ theorem extractLsb_eq (x : BitVec n) (h : n = n - 1 + 1) : ext1 simp [←h] +@[simp, bitvec_rules] theorem extractLsb'_eq (x : BitVec n) : - BitVec.extractLsb' 0 n x = x := by + BitVec.extractLsb' 0 n x = x := by unfold extractLsb' - simp only [Nat.shiftRight_zero, ofNat_toNat, zeroExtend_eq] + simp only [Nat.shiftRight_zero, ofNat_toNat, setWidth_eq] @[bitvec_rules] -protected theorem extractLsb'_of_zeroExtend (x : BitVec n) (h : j ≤ i) : - extractLsb' 0 j (zeroExtend i x) = zeroExtend j x := by +protected theorem extractLsb'_of_setWidth (x : BitVec n) (h : j ≤ i) : + extractLsb' 0 j (setWidth i x) = setWidth j x := by apply BitVec.eq_of_getLsbD_eq intro k have q : k < i := by omega @@ -641,13 +623,13 @@ theorem append_of_extract_general_nat (high low n vn : Nat) (h : vn < 2 ^ n) : theorem append_of_extract (n : Nat) (v : BitVec n) (high0 : high = n - low) (h : high + low = n) : - BitVec.cast h (zeroExtend high (v >>> low) ++ extractLsb' 0 low v) = v := by + BitVec.cast h (setWidth high (v >>> low) ++ extractLsb' 0 low v) = v := by ext subst high have vlt := v.isLt have := append_of_extract_general_nat (n - low) low n (BitVec.toNat v) vlt have low_le : low ≤ n := by omega - simp_all [toNat_zeroExtend, Nat.sub_add_cancel, low_le] + simp_all [toNat_setWidth, Nat.sub_add_cancel, low_le] rw [Nat.mod_eq_of_lt (b := 2 ^ n)] at this · rw [this] · rw [Nat.shiftRight_eq_div_pow] @@ -656,13 +638,13 @@ theorem append_of_extract (n : Nat) (v : BitVec n) @[bitvec_rules] theorem append_of_extract_general (v : BitVec n) : - (zeroExtend high (v >>> low)) ++ extractLsb' 0 low v = + (setWidth high (v >>> low)) ++ extractLsb' 0 low v = extractLsb' 0 (high + low) v := by ext have := append_of_extract_general_nat high low n (BitVec.toNat v) have h_vlt := v.isLt; simp_all only [Nat.sub_zero] simp only [h_vlt, forall_prop_of_true] at this - simp_all [toNat_zeroExtend, Nat.sub_add_cancel] + simp_all [toNat_setWidth, Nat.sub_add_cancel] rw [Nat.mod_eq_of_lt (b := 2 ^ n)] at this · rw [this] · rw [Nat.shiftRight_eq_div_pow] @@ -685,9 +667,9 @@ theorem leftshift_n_or_mod_2n : simp [h₀] @[bitvec_rules] -protected theorem truncate_to_lsb_of_append (m n : Nat) (x : BitVec m) (y : BitVec n) : - truncate n (x ++ y) = y := by - simp only [truncate_append, Nat.le_refl, ↓reduceDIte, zeroExtend_eq] +protected theorem setWidth_to_lsb_of_append (m n : Nat) (x : BitVec m) (y : BitVec n) : + setWidth n (x ++ y) = y := by + simp only [setWidth_append, Nat.le_refl, ↓reduceDIte, setWidth_eq] ---------------------------- Shift Lemmas --------------------------- @@ -1102,20 +1084,13 @@ theorem BitVec.ofBool_getLsbD (a : BitVec w) (i : Nat) : intro ⟨0, _⟩ simp -/-- If multiplication does not overflow, -then `(x * y).toNat` equals `x.toNat * y.toNat` -/ -theorem toNat_mul_of_lt {w} {x y : BitVec w} (h : x.toNat * y.toNat < 2^w) : - (x * y).toNat = x.toNat * y.toNat := by - rw [BitVec.toNat_mul, Nat.mod_eq_of_lt h] - /-- If subtraction does not overflow, then `(x - y).toNat` equals `x.toNat - y.toNat` -/ -theorem toNat_sub_of_lt {w} {x y : BitVec w} (h : x.toNat < y.toNat) : +@[deprecated toNat_sub_of_le] +theorem toNat_sub_of_lt' {w} {x y : BitVec w} (h : x.toNat < y.toNat) : (y - x).toNat = y.toNat - x.toNat := by - rw [BitVec.toNat_sub, - show (2^w - x.toNat + y.toNat) = 2^w + (y.toNat - x.toNat) by omega, - Nat.add_mod, Nat.mod_self, Nat.zero_add, Nat.mod_mod, - Nat.mod_eq_of_lt (by omega)] + apply toNat_sub_of_le + bv_omega /-- `x.toNat * z.toNat ≤ k` if `z ≤ y` and `x.toNat * y.toNat ≤ k` -/ theorem toNat_mul_toNat_le_of_le_of_le {w} (x y z : BitVec w) diff --git a/Arm/Cfg/Cfg.lean b/Arm/Cfg/Cfg.lean index 9400882e..3af3c521 100644 --- a/Arm/Cfg/Cfg.lean +++ b/Arm/Cfg/Cfg.lean @@ -8,14 +8,20 @@ import Arm.Exec namespace Cfg open BitVec +open Std +open Std.Format -/-- Conditions under which a branch is taken; this is a function that -takes a state as input, and returns a boolean. -/ +/-- +Conditions under which a branch is taken; this is a function that +takes a state as input, and returns whether the branch was taken. +-/ abbrev CondHoldsFn := ArmState → Bool -/-- The general type of an instruction: for control-flow analysis, we +/-- +The general type of an instruction: for control-flow analysis, we only care about how an instruction affects the program's control -flow. -/ +flow. +-/ inductive InstType where -- Seq: Instruction at address pc after which control falls through -- to the next one, in program order (i.e., at pc + 4). @@ -38,6 +44,7 @@ instance : Repr InstType where | InstType.Ret pc => "" ++ repr pc instance : ToString InstType where toString i := toString (repr i) +instance : ToFormat InstType where format i := toString (repr i) def InstType.pc (x : InstType) : BitVec 64 := match x with @@ -66,11 +73,13 @@ def InstType.pc_and_type_equal (x y : InstType) : Bool := /-- An entry in the forward control-flow graph. -/ abbrev F_CFGentry := InstType × List InstType -/-- A forward control-flow graph maps an instruction (in its InstType +/-- +A forward control-flow graph maps an instruction (in its InstType form) to all possible next instructions (in their InstTypes forms). An edge from `from_pc` to `to_pc` means that control may transfer from -the former to the latter in one instruction step. -/ +the former to the latter in one instruction step. +-/ def ForwardGraph := Array F_CFGentry deriving Repr instance : ToString ForwardGraph where toString fg := toString (repr fg) @@ -88,25 +97,35 @@ deriving Repr instance : ToString LoopsInfo where toString li := toString (repr li) -/-- CFG collects all the control-flow information gleaned from the -program during static analysis. -/ +/-- +CFG collects all the control-flow information gleaned from the +program during static analysis. +-/ structure Cfg where - start_address : BitVec 64 - graph : ForwardGraph := #[] - loops_info : LoopsInfo := #[] + start_address : BitVec 64 + graph : ForwardGraph := #[] + loops_info : LoopsInfo := #[] + maybe_modified_regs : Array RegType := #[] deriving Repr instance : ToString Cfg where toString cfg := toString (repr cfg) +instance : ToFormat Cfg where format cfg := toString (repr cfg) -/-- We can detect a loop if we find an entry where some `to_pc` is +instance : Inhabited Cfg where + default := { start_address := 0#64 } + +/-- +We can detect a loop if we find an entry where some `to_pc` is less than its corresponding `from_pc`, i.e., there is a back-jump from `from_pc` to `to_pc`. In that case, `to_pc` has the loop target instruction and `from_pc` has the loop guard instruction. The first instruction that will be executed after the loop terminates will also be a member of `to_insts` -of type `Seq`. -/ -private def loop_detected (from_inst : InstType) (to_insts : List InstType) : IO (Option LoopInfo) := +of type `Seq`. +-/ +private def loop_detected (from_inst : InstType) (to_insts : List InstType) : + Except Format (Option LoopInfo) := let check := List.find? (fun x => compare x from_inst == .lt) to_insts @@ -120,9 +139,9 @@ private def loop_detected (from_inst : InstType) (to_insts : List InstType) : IO have h' : next.length > 0 := by simp_all pure (some { guard := from_inst, target := to_inst, next := next[0]'h' }) else - throw (IO.userError - ("We expected exactly one Seq instruction in the control-flow graph for this entry. " ++ - "Instead, we found {next.length}.")) + .error + f!"We expected exactly one Seq instruction in the control-flow graph \ + for this entry. Instead, we found {next.length}." private def addToLoopsInfo (entry : Option LoopInfo) (loops_info : LoopsInfo) : LoopsInfo := match entry with @@ -132,7 +151,7 @@ private def addToLoopsInfo (entry : Option LoopInfo) (loops_info : LoopsInfo) : Array.push loops_info (index, loop_info) private def addEntry (from_inst : InstType) (to_insts : List InstType) - (cfg : Cfg) : IO Cfg := do + (mod_regs : List RegType) (cfg : Cfg) : Except Format Cfg := do -- We crawl through the program in a linear manner, so by -- construction, we should not add a previously-added InstType to -- the graph. @@ -141,20 +160,38 @@ private def addEntry (from_inst : InstType) (to_insts : List InstType) let maybe_loop_info ← loop_detected from_inst to_insts let new_loops_info := addToLoopsInfo maybe_loop_info cfg.loops_info let new_graph := Array.push cfg.graph (from_inst, to_insts) - pure { cfg with graph := new_graph, loops_info := new_loops_info } + let maybe_modified_regs := + -- Would've been nice to be able to deduplicate and sort at the same time... + Array.insertionSort (mod_regs_go mod_regs cfg.maybe_modified_regs) + (fun r1 r2 => + match r1, r2 with + | .GPR i, .GPR j => i ≤ j + | .GPR _, .SFP _ => true + | .SFP i, .SFP j => i ≤ j + | .SFP _, .GPR _ => false) + pure { cfg with graph := new_graph, + loops_info := new_loops_info, + maybe_modified_regs := maybe_modified_regs } else - throw (IO.userError - ("[ForwardGraph] Implementation Error: graph already contains " ++ - "an entry with PC {InstType.pc from_inst}! Here is the graph: ${cfg.graph}.")) - --- This function adds information for an Arm instruction into Cfg --- Inputs: pc -- current program counter --- arm_inst -- current Arm instruction --- cfg -- the control-flow graph --- outputs: haltp : Bool -- whether the program halts --- cfg : Cfg -- the updated control-flow graph -protected def addArmInstToCfg (pc : BitVec 64) (arm_inst : ArmInst) (cfg : Cfg) - : IO (Bool × Cfg) := do + .error + f!"[ForwardGraph] Implementation Error: graph already contains \ + an entry with PC {InstType.pc from_inst}! \ + Here is the graph: ${Format.indentD <| repr cfg.graph}." + where mod_regs_go (mod_regs : List RegType) (all : Array RegType) : Array RegType := + match mod_regs with + | [] => all + | m :: ms => if m ∈ all then mod_regs_go ms all + else mod_regs_go ms (all.push m) +/-- +This function adds information for an Arm instruction into Cfg +Inputs: `pc` -- current program counter + `arm_inst` -- current Arm instruction + `cfg` -- the control-flow graph +outputs: `haltp` : `Bool` -- whether the program halts + `cfg` : `Cfg` -- the updated control-flow graph +-/ +protected def addArmInstToCfg (pc : BitVec 64) (raw_inst : BitVec 32) + (arm_inst : ArmInst) (cfg : Cfg) : Except Format (Bool × Cfg) := do let default_to_pc ← pure (pc + 4#64) -- variable pc_inst: the type of instruction InstType: Seq, BrOrg, BrTgt, Ret -- variable to_insts: an over-approximation of possible next pcs, @@ -164,7 +201,7 @@ protected def addArmInstToCfg (pc : BitVec 64) (arm_inst : ArmInst) (cfg : Cfg) open InstType ArmInst in match arm_inst with | DPI _ | DPR _ | LDST _ | DPSFP _ => - (false, Seq pc, [Seq default_to_pc]) + (false, Seq pc, [Seq default_to_pc]) | BR i => open BranchInst in match i with @@ -173,7 +210,7 @@ protected def addArmInstToCfg (pc : BitVec 64) (arm_inst : ArmInst) (cfg : Cfg) let (condition_holds : CondHoldsFn) := (fun state => BR.Compare_branch_inst.condition_holds inst state) (false, BrOrg pc condition_holds, - [Seq default_to_pc, BrTgt branch_taken_pc condition_holds]) + [Seq default_to_pc, BrTgt branch_taken_pc condition_holds]) | Uncond_branch_imm inst => -- B, BL let branch_taken_pc := BR.Uncond_branch_imm_inst.branch_taken_pc inst pc -- These are unconditional branch instructions; we do not add @@ -182,9 +219,9 @@ protected def addArmInstToCfg (pc : BitVec 64) (arm_inst : ArmInst) (cfg : Cfg) | Uncond_branch_reg _ => -- RET -- (FIXME) Extend CFG analysis when instructions other than -- RET are implemented. - + -- -- (FIXME) The to_inst for RET can be the value in the GPR - -- indexed by inst.Rn, but we can figure that value out only + -- indexed by `inst.Rn`, but we can figure that value out only -- after symbolic simulation. (true, Ret pc, [Ret pc]) | Cond_branch_imm inst => @@ -196,28 +233,32 @@ protected def addArmInstToCfg (pc : BitVec 64) (arm_inst : ArmInst) (cfg : Cfg) -- Currently only consider NOP | Hints _ => (false, Seq pc, [Seq default_to_pc]) - let new_cfg ← addEntry pc_inst to_insts cfg + do let maybe_modified_regs ← mayModifiedRegs raw_inst + let new_cfg ← addEntry pc_inst to_insts maybe_modified_regs cfg pure (haltp, new_cfg) protected def addToCfg (address : BitVec 64) (program : Program) (cfg : Cfg) - : IO (Bool × Cfg) := + : Except Format (Bool × Cfg) := let maybe_raw_inst := program.find? address match maybe_raw_inst with - | none => throw (IO.userError s!"No instruction found at address {address}!") + | none => .error f!"No instruction found at address {address}!" | some raw_inst => let maybe_arm_inst := decode_raw_inst raw_inst match maybe_arm_inst with - | none => throw (IO.userError - s!"Instruction {raw_inst} at address {address} could not be decoded!") + | none => + .error f!"Instruction {raw_inst} at {address} could not be decoded!" | some arm_inst => - Cfg.addArmInstToCfg address arm_inst cfg + Cfg.addArmInstToCfg address raw_inst arm_inst cfg --- Termination argument for the create' function below. This theorem --- is in terms of Fin so that we can take advantage of Fin lemmas. We --- will map this theorem to BitVecs (using lemmas like --- BitVec.fin_bitvec_lt) in create'. +/- +Termination argument for the `create'` function below. This theorem +is in terms of `Fin` so that we can take advantage of `Fin` lemmas. We +will map this theorem to `BitVecs` (using lemmas like +`BitVec.fin_bitvec_lt`) in `create'`. +-/ private theorem termination_lemma (i j max : Fin n) (h : n > 0) - (h0 : i < max) (h1 : j <= max - i) (h2 : ((Fin.ofNat' 0 h) : Fin n) < j) : + (h0 : i < max) (h1 : j <= max - i) + (h2 : ((@Fin.ofNat' n ⟨by omega⟩ 0) : Fin n) < j) : (max - (i + j)) < (max - i) := by -- Our strategy is to convert this proof obligation in terms of Nat, -- which is made possible by h0 and h1 hypotheses above. @@ -257,8 +298,15 @@ private theorem termination_lemma (i j max : Fin n) (h : n > 0) exact Nat.sub_lt_self h2 h1' done -private def create' (address : BitVec 64) (max_address : BitVec 64) - (program : Program) (cfg : Cfg) : IO Cfg := do +/-- +Create a `Cfg` structure for `program`, beginning at `start_address` until +`end_address`. +-/ +protected def create' (start_address end_address : BitVec 64) + (program : Program) : Except Format Cfg := + go start_address end_address program { start_address } + where go (address max_address : BitVec 64) + (program : Program) (cfg : Cfg) : Except Format Cfg := do if h₀ : max_address < address then pure cfg else @@ -271,28 +319,34 @@ private def create' (address : BitVec 64) (max_address : BitVec 64) have ?term_lemma : (max_address - (address + 4#64)).toNat < (max_address - address).toNat := by have := termination_lemma address.toFin (4#64).toFin max_address.toFin (by decide) - (by simp_all! only [BitVec.not_lt, BitVec.fin_bitvec_lt, not_false_eq_true, BitVec.lt_of_le_ne, h₁]) + (by simp_all! only [BitVec.not_lt, + BitVec.fin_bitvec_lt, + not_false_eq_true, + BitVec.lt_of_le_ne, h₁]) (by rw [← BitVec.toFin_sub]; exact h₂) - (by simp_arith) + (by simp_arith [Fin.ofNat']) simp [BitVec.fin_bitvec_le, BitVec.fin_bitvec_lt] at * exact this - Cfg.create' (address + 4#64) max_address program cfg + go (address + 4#64) max_address program cfg else - throw (IO.userError - ("We expect Arm instructions to be 32-bits wide; i.e., each " ++ - "program address should be 4-apart from its successor. " ++ - "This does not seem to be the case with this program for the " ++ - s!"successor of address {address}. Note that the highest " ++ - s!"address is {max_address}.")) + .error + f!"We expect Arm instructions to be 32-bits wide; i.e., each \ + program address should be 4-apart from its successor. \ + This does not seem to be the case with this program for the \ + successor of address {address}. Note that the highest \ + address is {max_address}." termination_by (max_address - address).toNat -protected def create (program : Program) : IO Cfg := +/-- +Create a `Cfg` structure for the program `program`. +-/ +protected def create (program : Program) : Except Format Cfg := let maybe_start_address := program.min? let maybe_max_address := program.max? match maybe_start_address, maybe_max_address with | some start_address, some max_address => - Cfg.create' start_address max_address program { start_address } + Cfg.create' start_address max_address program | _, _ => - throw (IO.userError s!"Could not determine the start/stop address for the program!") + .error f!"Could not determine the start/stop address for the program!" end Cfg diff --git a/Arm/Decode.lean b/Arm/Decode.lean index 36578c0b..aa1faf1e 100644 --- a/Arm/Decode.lean +++ b/Arm/Decode.lean @@ -14,6 +14,8 @@ import Arm.Decode.DPSFP section Decode open BitVec +open Std +open Std.Format -- We do not tag any of the decode functions (e.g., decode_raw_inst or -- its callees) with the `simp` attribute because we always expect @@ -25,8 +27,10 @@ open BitVec -- types, but their sub-categories' names (e.g., -- DataProcImmInst.Add_sub_imm) are longer and use underscores. -/-- A fully-decoded Arm instruction is represented by the ArmInst -structure. --/ +/-- +A fully-decoded Arm instruction is represented by the ArmInst +structure. +-/ inductive ArmInst where | DPI : DataProcImmInst → ArmInst | BR : BranchInst → ArmInst @@ -37,6 +41,17 @@ deriving DecidableEq, Repr instance : ToString ArmInst where toString a := toString (repr a) +/-- +Two main types of registers available on the machine. +-/ +inductive RegType where + | GPR : BitVec 5 → RegType + | SFP : BitVec 5 → RegType +deriving Repr, DecidableEq + +instance : ToFormat RegType where format r := toString (repr r) +instance : ToFormat (Array RegType) where format r := toString (repr r) + def decode_data_proc_imm (i : BitVec 32) : Option ArmInst := open ArmInst in open DataProcImmInst in @@ -53,6 +68,18 @@ def decode_data_proc_imm (i : BitVec 32) : Option ArmInst := DPI (Move_wide_imm {sf, opc, hw, imm16, Rd}) | _ => none +/-- +Return the indices of all the GPR/SFP registers that may be modified by the +DPI instruction `i`. +-/ +def DPI.mayModifyRegs (i : BitVec 32) : Except Format (List RegType) := + if let some _ := decode_data_proc_imm i then + -- The 5 LSBs of all DPI instructions give the index of the Rd register. + .ok [(.GPR (extractLsb' 0 5 i))] + else + .error + f!"Instruction 0x{i.toHex} is not of class Data Processing (Immediate)." + def decode_branch (i : BitVec 32) : Option ArmInst := open ArmInst in open BranchInst in @@ -69,6 +96,18 @@ def decode_branch (i : BitVec 32) : Option ArmInst := BR (Hints {CRm, op2}) | _ => none +/-- +Return the indices of all the GPR/SFP registers that may be modified by the +BR instruction `i`. +-/ +def BR.mayModifiedRegs (i : BitVec 32) : Except Format (List RegType) := + if let some _ := decode_branch i then + -- None of the branch instructions modify any GPR or SFP registers. + .ok [] + else + .error + f!"Instruction 0x{i.toHex} is not of class Branch Processing." + def decode_data_proc_reg (i : BitVec 32) : Option ArmInst := open ArmInst in open DataProcRegInst in @@ -89,6 +128,18 @@ def decode_data_proc_reg (i : BitVec 32) : Option ArmInst := DPR (Data_processing_three_source {sf, op54, op31, Rm, o0, Ra, Rn, Rd}) | _ => none +/-- +Return the indices of all the GPR/SFP registers that may be modified by the +DPR instruction `i`. +-/ +def DPR.MayModifyRegs (i : BitVec 32) : Except Format (List RegType) := + if let some _ := decode_data_proc_reg i then + -- The 5 LSBs of all DPR instructions give the index of the Rd register. + .ok [(.GPR (extractLsb' 0 5 i))] + else + .error + f!"Instruction 0x{i.toHex} is not of class Data Processing (Register)." + def decode_data_proc_sfp (i : BitVec 32) : Option ArmInst := open ArmInst in open DataProcSFPInst in @@ -111,9 +162,9 @@ def decode_data_proc_sfp (i : BitVec 32) : Option ArmInst := DPSFP (Advanced_simd_permute {Q, size, Rm, opcode, Rn, Rd}) | [0, Q:1, op:1, 0111100000, a:1, b:1, c:1, cmode:4, o2:1, 1, d:1, e:1, f:1, g:1, h:1, Rd:5] => DPSFP (Advanced_simd_modified_immediate {Q, op, a, b, c, cmode, o2, d, e, f, g, h, Rd}) - -- Note: Advanced SIMD shift by immediate constraint immh != 0000 + -- Note: Advanced SIMD shift by immediate constraint `immh != 0000` -- An instruction will be matched against `Advanced_simd_modified_immediate` first, - -- if it doesn't match, then we know immh can't be 0b0000#4 + -- if it doesn't match, then we know `immh` can't be `0b0000#4` | [0, Q:1, U:1, 011110, immh:4, immb:3, opcode:5, 1, Rn:5, Rd:5] => DPSFP (Advanced_simd_shift_by_immediate {Q, U, immh, immb, opcode, Rn, Rd}) | [01, U:1, 111110, immh:4, immb:3, opcode:5, 1, Rn:5, Rd:5] => @@ -130,6 +181,41 @@ def decode_data_proc_sfp (i : BitVec 32) : Option ArmInst := DPSFP (Conversion_between_FP_and_Int {sf, S, ftype, rmode, opcode, Rn, Rd}) | _ => none +/-- +Does the `Rd` field of a DPSFP instruction indicate a GPR? +All other DPSFP instructions' `Rd` field indicate an SFP register. +-/ +def DPSFP.isGPRdest (inst : ArmInst) : Bool := + open ArmInst in + open DataProcSFPInst in + match inst with + | DPSFP (Advanced_simd_copy i) => + -- SMOV and UMOV write to a GPR. + -- Ref. the following in `exec_advanced_simd_copy`: + -- | [_Q:1, 0, _imm5:5, 0101] => exec_smov_umov inst s true + -- | [_Q:1, 0, _imm5:5, 0111] => exec_smov_umov inst s false + (i.Q = 1#1 ∧ i.op = 0#1 ∧ (i.imm4 = 0b0101#4 ∨ i.imm4 = 0b0111#4)) + | DPSFP (Conversion_between_FP_and_Int i) => + -- FPConvOp.FPConvOp_MOV_FtoI writes to a GPR. + -- Ref. `exec_fmov_general`. + ((extractLsb' 1 2 i.opcode) ++ i.rmode) ∈ [0b1100#4, 0b1101#4] ∧ + ¬(lsb i.opcode 0 = 1#1) + | _ => false + +/-- +Return the indices of all the GPR/SFP registers that may be modified by the +DPSFP instruction `i`. +-/ +def DPSFP.MayModifyRegs (i : BitVec 32) : Except Format (List RegType) := + if let some arm_inst := decode_data_proc_sfp i then + -- The 5 LSBs of all DPSFP instructions give the index of the Rd register. + let is_gpr := DPSFP.isGPRdest arm_inst + let idx := (extractLsb' 0 5 i) + .ok [(if is_gpr then (.GPR idx) else (.SFP idx))] + else + .error + f!"Instruction 0x{i.toHex} is not of class Data Processing (SIMD&FP)." + def decode_ldst_inst (i : BitVec 32) : Option ArmInst := open ArmInst in open LDSTInst in @@ -152,6 +238,79 @@ def decode_ldst_inst (i : BitVec 32) : Option ArmInst := LDST (Advanced_simd_multiple_struct_post_indexed {Q, L, Rm, opcode, size, Rn, Rt}) | _ => none +@[state_simp_rules] +def LDST.multiple_struct_rpt_selem (opcode : BitVec 4) : Nat × Nat := + match opcode with + | 0b0000#4 => (1, 4) -- LD/ST4: 4 registers + | 0b0010#4 => (4, 1) -- LD/ST1: 4 registers + | 0b0100#4 => (1, 3) -- LD/ST3: 3 registers + | 0b0110#4 => (3, 1) -- LD/ST1: 3 registers + | 0b0111#4 => (1, 1) -- LD/ST1: 1 register + | 0b1000#4 => (1, 2) -- LD/ST2: 2 registers + | _ => (2, 1) -- LD/ST1: 2 registers (opcode: 0b1010#4) + +/-- +Return the indices of all the GPR/SFP registers that may be modified by the LDST +instruction `i`. +-/ +def LDST.mayModifiedRegs (i : BitVec 32) : Except Format (List RegType) := + if let some arm_inst := decode_ldst_inst i then + open ArmInst LDSTInst in + match arm_inst with + | LDST (Reg_imm_post_indexed x) => + if (getLsbD x.opc 0) then .ok [] -- Load + else if x.V = 1#1 then -- Store and SIMD + .ok [(.SFP x.Rt), (.GPR x.Rn)] + else .ok [(.GPR x.Rt), (.GPR x.Rn)] -- Store and non-SIMD + | LDST (Reg_unsigned_imm x) => + -- Similar to `Reg_imm_post_indexed`, except that `wback` is `false`, + -- which means that `x.Rn` isn't updated. + if (getLsbD x.opc 0) then .ok [] -- Load + else if x.V = 1#1 then -- Store and SIMD + .ok [(.SFP x.Rt)] + else .ok [(.GPR x.Rt)] -- Store and non-SIMD + | LDST (Reg_unscaled_imm x) => + if getLsbD x.opc 0 then .ok [] -- Load + else .ok [(.SFP x.Rt)] -- Store + | LDST (Reg_pair_pre_indexed x) | LDST (Reg_pair_post_indexed x) => + if x.L = 0#1 then -- Store + if x.V = 1#1 then + .ok [(.SFP x.Rt), (.SFP x.Rt2), (.GPR x.Rn)] -- SIMD + else .ok [(.GPR x.Rt), (.GPR x.Rt2), (.GPR x.Rn)] -- non-SIMD + else .ok [] -- Load + | LDST (Reg_pair_signed_offset x) => + -- Similar to `Reg_pair_pre_indexed` and `Reg_pair_post_indexed`, except + -- `wback` is `false`, which means that `x.Rn` isn't updated. + if x.L = 0#1 then -- Store + if x.V = 1#1 then + .ok [(.SFP x.Rt), (.SFP x.Rt2)] -- SIMD + else .ok [(.GPR x.Rt), (.GPR x.Rt2)] -- non-SIMD + else .ok [] -- Load + | LDST (Advanced_simd_multiple_struct_post_indexed x) => + if x.L = 0#1 then -- Store + let (rpt, _) := LDST.multiple_struct_rpt_selem x.opcode + match rpt with + | 1 => .ok [(.SFP x.Rt), (.GPR x.Rn)] + | 2 => .ok [(.SFP x.Rt), (.SFP (x.Rt + 1)), (.GPR x.Rn)] + | 3 => .ok [(.SFP x.Rt), (.SFP (x.Rt + 1)), (.SFP (x.Rt + 2)), (.GPR x.Rn)] + | _ => .ok [(.SFP x.Rt), (.SFP (x.Rt + 1)), (.SFP (x.Rt + 2)), (.SFP (x.Rt + 3)), (.GPR x.Rn)] + else .ok [] -- Load + | LDST (Advanced_simd_multiple_struct x) => + -- Similar to `Advanced_simd_multiple_struct_post_indexed`, except + -- `wback` is `false`, which means that `x.Rn` isn't updated. + if x.L = 0#1 then -- Store + let (rpt, _) := LDST.multiple_struct_rpt_selem x.opcode + match rpt with + | 1 => .ok [(.SFP x.Rt)] + | 2 => .ok [(.SFP x.Rt), (.SFP (x.Rt + 1))] + | 3 => .ok [(.SFP x.Rt), (.SFP (x.Rt + 1)), (.SFP (x.Rt + 2))] + | _ => .ok [(.SFP x.Rt), (.SFP (x.Rt + 1)), (.SFP (x.Rt + 2)), (.SFP (x.Rt + 3))] + else .ok [] -- Load + | _ => .ok [] + else + .error + f!"Instruction 0x{i.toHex} is not of class LDST." + -- Decode a 32-bit instruction `i`. def decode_raw_inst (i : BitVec 32) : Option ArmInst := open ArmInst in @@ -166,6 +325,24 @@ def decode_raw_inst (i : BitVec 32) : Option ArmInst := | _, _ => none | _ => none +/-- +Return the indices of all the GPR/SFP registers that may be modified by the +instruction `i`. Note that this is an (over)approximation. The use-case is to +statically determine which register components can be affected by a program. +-/ +def mayModifiedRegs (i : BitVec 32) : Except Format (List RegType) := + open ArmInst in + match_bv i with + | [op0:1, _x:2, op1:4, _y:25] => + match op0, op1 with + | _, 0b1000#4 | _, 0b1001#4 => DPI.mayModifyRegs i + | _, 0b1010#4 | _, 0b1011#4 => BR.mayModifiedRegs i + | _, 0b1101#4 | _, 0b0101#4 => DPR.MayModifyRegs i + | _, 0b0111#4 | _, 0b1111#4 => DPSFP.MayModifyRegs i + | _, 0b0100#4 | _, 0b1100#4 | _, 0b0110#4 | _, 0b1110#4 => LDST.mayModifiedRegs i + | _, _ => .ok [] + | _ => .ok [] + ------------------------------------------------------------------------ -- add x1, x1, 1 diff --git a/Arm/Exec.lean b/Arm/Exec.lean index 18ff7562..d4de5a67 100644 --- a/Arm/Exec.lean +++ b/Arm/Exec.lean @@ -162,6 +162,13 @@ theorem run_onestep {s s': ArmState} {n : Nat} : (s' = run (n + 1) s) → ∃ s'', stepi s = s'' ∧ s' = run n s'' := by simp only [run, exists_eq_left', imp_self] +/-- helper lemma for automation -/ +theorem run_of_run_succ_of_stepi_eq {s0 s1 sf : ArmState} {n : Nat} + (h_run : sf = run (n + 1) s0) + (h_stepi : stepi s0 = s1) : + sf = run n s1 := by + simp_all only [run] + /-- helper lemma for automation -/ theorem stepi_eq_of_fetch_inst_of_decode_raw_inst (s : ArmState) (addr : BitVec 64) (rawInst : BitVec 32) (inst : ArmInst) diff --git a/Arm/Insts/Common.lean b/Arm/Insts/Common.lean index ec874fca..58c5068a 100644 --- a/Arm/Insts/Common.lean +++ b/Arm/Insts/Common.lean @@ -65,9 +65,9 @@ def AddWithCarry (x : BitVec n) (y : BitVec n) (carry_in : BitVec 1) : @[bitvec_rules, state_simp_rules] theorem fst_AddWithCarry_eq_add (x : BitVec n) (y : BitVec n) : (AddWithCarry x y 0#1).fst = x + y := by - simp [AddWithCarry, zeroExtend_eq, zeroExtend_zero, zeroExtend_zero] + simp [AddWithCarry, setWidth_eq, setWidth_zero, setWidth_zero] apply BitVec.eq_of_toNat_eq - simp only [toNat_truncate, toNat_add, Nat.add_mod_mod, Nat.mod_add_mod] + simp only [toNat_setWidth, toNat_add, Nat.add_mod_mod, Nat.mod_add_mod] have : 2^n < 2^(n + 1) := by refine Nat.pow_lt_pow_of_lt (by omega) (by omega) have : x.toNat + y.toNat < 2^(n + 1) := by omega @@ -77,9 +77,9 @@ theorem fst_AddWithCarry_eq_add (x : BitVec n) (y : BitVec n) : @[bitvec_rules, state_simp_rules] theorem fst_AddWithCarry_eq_sub_neg (x : BitVec n) (y : BitVec n) : (AddWithCarry x y 1#1).fst = x - ~~~y := by - simp [AddWithCarry, zeroExtend_eq, zeroExtend_zero, zeroExtend_zero] + simp [AddWithCarry, setWidth_eq, setWidth_zero, setWidth_zero] apply BitVec.eq_of_toNat_eq - simp only [toNat_truncate, toNat_add, Nat.add_mod_mod, Nat.mod_add_mod, toNat_ofNat, Nat.pow_one, + simp only [toNat_setWidth, toNat_add, Nat.add_mod_mod, Nat.mod_add_mod, toNat_ofNat, Nat.pow_one, Nat.reduceMod, toNat_sub, toNat_not] simp only [show 2 ^ n - (2 ^ n - 1 - y.toNat) = 1 + y.toNat by omega] have : 2^n < 2^(n + 1) := by @@ -91,10 +91,10 @@ theorem fst_AddWithCarry_eq_sub_neg (x : BitVec n) (y : BitVec n) : -- TODO: Is this rule helpful at all? @[bitvec_rules] -theorem zeroExtend_eq_of_AddWithCarry : - zeroExtend n (AddWithCarry x y carry_in).fst = +theorem setWidth_eq_of_AddWithCarry : + setWidth n (AddWithCarry x y carry_in).fst = (AddWithCarry x y carry_in).fst := by - simp only [zeroExtend_eq] + simp only [setWidth_eq] /-- Return `true` iff `cond` currently holds @@ -102,6 +102,7 @@ Return `true` iff `cond` currently holds Ref.: https://developer.arm.com/documentation/ddi0602/2024-06/Shared-Pseudocode/shared-functions-system?lang=en#impl-shared.ConditionHolds.1 -/ +@[state_simp_rules] def ConditionHolds (cond : BitVec 4) (s : ArmState) : Bool := open PFlag in let N := read_flag N s @@ -199,7 +200,7 @@ theorem Aligned_BitVecAdd_64_4 {x : BitVec 64} {y : BitVec 64} theorem Aligned_AddWithCarry_64_4 (x : BitVec 64) (y : BitVec 64) (carry_in : BitVec 1) (x_aligned : Aligned x 4) - (y_carry_in_aligned : Aligned (BitVec.add (extractLsb' 0 4 y) (zeroExtend 4 carry_in)) 4) + (y_carry_in_aligned : Aligned (BitVec.add (extractLsb' 0 4 y) (setWidth 4 carry_in)) 4) : Aligned (AddWithCarry x y carry_in).fst 4 := by unfold AddWithCarry Aligned at * simp_all only [Nat.sub_zero, zero_eq, add_eq] @@ -219,26 +220,42 @@ def CheckSPAlignment (s : ArmState) : Prop := instance : Decidable (CheckSPAlignment s) := by unfold CheckSPAlignment; infer_instance @[state_simp_rules] -theorem CheckSPAligment_of_w_different (h : StateField.GPR 31#5 ≠ fld) : +theorem CheckSPAlignment_w_different_eq (h : StateField.GPR 31#5 ≠ fld) : CheckSPAlignment (w fld v s) = CheckSPAlignment s := by simp_all only [CheckSPAlignment, state_simp_rules, minimal_theory, bitvec_rules] +/-- A rewording of `CheckSPAlignment_w_different_eq` as an implication, +to be used by proof automation in `AxEffects` -/ +theorem CheckSPAlignment_w_of_ne_sp_of (h : StateField.GPR 31#5 ≠ fld) : + CheckSPAlignment s → CheckSPAlignment (w fld v s) := by + simp only [CheckSPAlignment_w_different_eq h, imp_self] + @[state_simp_rules] -theorem CheckSPAligment_of_w_sp : +theorem CheckSPAlignment_of_w_sp : CheckSPAlignment (w (StateField.GPR 31#5) v s) = (Aligned v 4) := by simp_all only [CheckSPAlignment, state_simp_rules, minimal_theory, bitvec_rules] +/-- A rewording of `CheckSPAlignment_of_w_sp` as an implication, +to be used by proof automation in `AxEffects` -/ +theorem CheckSPAlignment_w_sp_of (h : Aligned v 4) : + CheckSPAlignment (w (StateField.GPR 31#5) v s) := by + simpa only [CheckSPAlignment_of_w_sp] using h + @[state_simp_rules] -theorem CheckSPAligment_of_write_mem_bytes : +theorem CheckSPAlignment_write_mem_bytes_eq : CheckSPAlignment (write_mem_bytes n addr v s) = CheckSPAlignment s := by simp_all only [CheckSPAlignment, state_simp_rules, minimal_theory, bitvec_rules] +theorem CheckSPAlignment_write_mem_bytes_of : + CheckSPAlignment s → CheckSPAlignment (write_mem_bytes n addr v s) := by + simp only [CheckSPAlignment_write_mem_bytes_eq, imp_self] + @[state_simp_rules] theorem CheckSPAlignment_AddWithCarry_64_4 (st : ArmState) (y : BitVec 64) (carry_in : BitVec 1) (x_aligned : CheckSPAlignment st) - (y_carry_in_aligned : Aligned (BitVec.add (extractLsb' 0 4 y) (zeroExtend 4 carry_in)) 4) + (y_carry_in_aligned : Aligned (BitVec.add (extractLsb' 0 4 y) (setWidth 4 carry_in)) 4) : Aligned (AddWithCarry (r (StateField.GPR 31#5) st) y carry_in).fst 4 := by - simp_all only [CheckSPAlignment, read_gpr, zeroExtend_eq, Nat.sub_zero, add_eq, + simp_all only [CheckSPAlignment, read_gpr, setWidth_eq, Nat.sub_zero, add_eq, Aligned_AddWithCarry_64_4] @[state_simp_rules] @@ -253,7 +270,7 @@ theorem CheckSPAlignment_of_r_sp_aligned {s : ArmState} {value} (h_eq : r (StateField.GPR 31#5) s = value) (h_aligned : Aligned value 4) : CheckSPAlignment s := by - simp only [CheckSPAlignment, read_gpr, h_eq, zeroExtend_eq, h_aligned] + simp only [CheckSPAlignment, read_gpr, h_eq, setWidth_eq, h_aligned] ---------------------------------------------------------------------- @@ -367,7 +384,7 @@ def invalid_bit_masks (immN : BitVec 1) (imms : BitVec 6) (immediate : Bool) else if len < 1 ∧ M < (1 <<< len) then true else - let levels := zeroExtend 6 (allOnes len) + let levels := setWidth 6 (allOnes len) if immediate ∧ (imms &&& levels = levels) then true else @@ -382,25 +399,16 @@ dsimproc [state_simp_rules] reduceInvalidBitMasks (invalid_bit_masks _ _ _ _) := let imm ← simp imm let M ← simp M let some ⟨immN_width, immN⟩ ← getBitVecValue? immN.expr | return .continue - if h1 : ¬ (immN_width = 1) then - return .continue - else - let some ⟨imms_width, imms⟩ ← getBitVecValue? imms.expr | return .continue - if h2 : ¬ (imms_width = 6) then - return .continue - else - let some M ← Nat.fromExpr? M.expr | return .continue - have h1' : immN_width = 1 := by simp_all only [Decidable.not_not] - have h2' : imms_width = 6 := by simp_all only [Decidable.not_not] - return .done <| - toExpr (invalid_bit_masks - (BitVec.cast h1' immN) - (BitVec.cast h2' imms) - imm.expr.isTrue - M) - -theorem Nat.lt_one_iff {n : Nat} : n < 1 ↔ n = 0 := by - omega + let some ⟨imms_width, imms⟩ ← getBitVecValue? imms.expr | return .continue + if h : immN_width = 1 ∧ imms_width = 6 then + let some M ← Nat.fromExpr? M.expr | return .continue + return .done <| + toExpr (invalid_bit_masks + (BitVec.cast (by simp_all only) immN) + (BitVec.cast (by simp_all only) imms) + imm.expr.isTrue + M) + else return .continue theorem M_divisible_by_esize_of_valid_bit_masks (immN : BitVec 1) (imms : BitVec 6) (immediate : Bool) (M : Nat): @@ -426,19 +434,20 @@ theorem M_divisible_by_esize_of_valid_bit_masks (immN : BitVec 1) (imms : BitVec -- https://kddnewton.com/2022/08/11/aarch64-bitmask-immediates.html -- Arm Implementation: -- https://developer.arm.com/documentation/ddi0602/2023-12/Shared-Pseudocode/aarch64-functions-bitmasks?lang=en#impl-aarch64.DecodeBitMasks.5 -def decode_bit_masks (immN : BitVec 1) (imms : BitVec 6) (immr : BitVec 6) - (immediate : Bool) (M : Nat) : Option (BitVec M × BitVec M) := +def decode_bit_masks (immN : BitVec 1) (imms immr : BitVec 6) + (immediate : Bool) (M : Nat) : + Option (BitVec M × BitVec M) := if h0 : invalid_bit_masks immN imms immediate M then none else let len := Option.get! $ highest_set_bit $ immN ++ ~~~imms - let levels := zeroExtend 6 (allOnes len) + let levels := setWidth 6 (allOnes len) let s := imms &&& levels let r := immr &&& levels let diff := s - r let esize := 1 <<< len let d := extractLsb' 0 len diff - let welem := zeroExtend esize (allOnes (s.toNat + 1)) - let telem := zeroExtend esize (allOnes (d.toNat + 1)) + let welem := setWidth esize (allOnes (s.toNat + 1)) + let telem := setWidth esize (allOnes (d.toNat + 1)) let wmask := replicate (M/esize) $ rotateRight welem r.toNat let tmask := replicate (M/esize) telem have h : esize * (M / esize) = M := by @@ -447,7 +456,8 @@ def decode_bit_masks (immN : BitVec 1) (imms : BitVec 6) (immr : BitVec 6) some (BitVec.cast h wmask, BitVec.cast h tmask) open Lean Meta Simp in -dsimproc [state_simp_rules] reduceDecodeBitMasks (decode_bit_masks _ _ _ _ _) := fun e => do +dsimproc [state_simp_rules] reduceDecodeBitMasks (decode_bit_masks _ _ _ _ _) := + fun e => do let_expr decode_bit_masks immN imms immr imm M ← e | return .continue let immN ← simp immN let imms ← simp imms @@ -455,28 +465,18 @@ dsimproc [state_simp_rules] reduceDecodeBitMasks (decode_bit_masks _ _ _ _ _) := let imm ← simp imm let M ← simp M let some ⟨immN_width, immN⟩ ← getBitVecValue? immN.expr | return .continue - if h1 : ¬ (immN_width = 1) then - return .continue - else - let some ⟨imms_width, imms⟩ ← getBitVecValue? imms.expr | return .continue - if h2 : ¬ (imms_width = 6) then - return .continue - else - let some ⟨immr_width, immr⟩ ← getBitVecValue? immr.expr | return .continue - if h3 : ¬ (immr_width = 6) then - return .continue - else - let some M ← Nat.fromExpr? M.expr | return .continue - have h1' : immN_width = 1 := by simp_all only [Decidable.not_not] - have h2' : imms_width = 6 := by simp_all only [Decidable.not_not] - have h3' : immr_width = 6 := by simp_all only [Decidable.not_not] - return .done <| - toExpr (decode_bit_masks - (BitVec.cast h1' immN) - (BitVec.cast h2' imms) - (BitVec.cast h3' immr) - imm.expr.isTrue - M) + let some ⟨imms_width, imms⟩ ← getBitVecValue? imms.expr | return .continue + let some ⟨immr_width, immr⟩ ← getBitVecValue? immr.expr | return .continue + if h : immN_width = 1 ∧ imms_width = 6 ∧ immr_width = 6 then + let some M ← Nat.fromExpr? M.expr | return .continue + return .done <| + toExpr (decode_bit_masks + (BitVec.cast (by simp_all only) immN) + (BitVec.cast (by simp_all only) imms) + (BitVec.cast (by simp_all only) immr) + imm.expr.isTrue + M) + else return .continue ---------------------------------------------------------------------- @@ -560,8 +560,8 @@ def rev_elems (n esize : Nat) (x : BitVec n) (h₀ : esize ∣ n) (h₁ : 0 < es if h0 : n <= esize then x else - let element := BitVec.zeroExtend esize x - let rest_x := BitVec.zeroExtend (n - esize) (x >>> esize) + let element := BitVec.setWidth esize x + let rest_x := BitVec.setWidth (n - esize) (x >>> esize) have h1 : esize <= n := by simp at h0; exact Nat.le_of_lt h0; done have h2 : esize ∣ (n - esize) := by @@ -596,10 +596,10 @@ def rev_vector (datasize container_size esize : Nat) (x : BitVec datasize) if h0 : container_size = datasize then BitVec.cast h0 (rev_elems container_size esize (BitVec.cast h0.symm x) h₃ h₀) else - let container := BitVec.zeroExtend container_size x + let container := BitVec.setWidth container_size x let new_container := rev_elems container_size esize container h₃ h₀ let new_datasize := datasize - container_size - let rest_x := BitVec.zeroExtend new_datasize (x >>> container_size) + let rest_x := BitVec.setWidth new_datasize (x >>> container_size) have h₄' : container_size ∣ new_datasize := by have h : container_size ∣ container_size := Nat.dvd_refl _ exact Nat.dvd_sub h₂ h₄ h @@ -633,12 +633,10 @@ def elem_get (vector : BitVec n) (e : Nat) (size : Nat) : BitVec size := the `e`'th element in the `vector`. -/ @[state_simp_rules] def elem_set (vector : BitVec n) (e : Nat) (size : Nat) - (value : BitVec size) (h: size > 0): BitVec n := + (value : BitVec size) : BitVec n := -- assert (e+1)*size <= n let lo := e * size - let hi := lo + size - 1 - have h : size = hi - lo + 1 := by simp only [hi, lo]; omega - BitVec.partInstall hi lo (BitVec.cast h value) vector + BitVec.partInstall lo size value vector ---------------------------------------------------------------------- @@ -650,7 +648,6 @@ structure ShiftInfo where unsigned := true round := false accumulate := false - h : esize > 0 deriving DecidableEq, Repr export ShiftInfo (esize elements shift unsigned round accumulate) @@ -670,7 +667,7 @@ def RShr (unsigned : Bool) (value : Int) (shift : Nat) (round : Bool) @[state_simp_rules] def Int_with_unsigned (unsigned : Bool) (value : BitVec n) : Int := - if unsigned then value.toNat else value.toInt + if unsigned then Int.ofNat value.toNat else value.toInt def shift_right_common_aux (e : Nat) (info : ShiftInfo) (operand : BitVec datasize) @@ -681,11 +678,196 @@ def shift_right_common_aux let elem := Int_with_unsigned info.unsigned $ elem_get operand e info.esize let shift_elem := RShr info.unsigned elem info.shift info.round let acc_elem := elem_get operand2 e info.esize + shift_elem - let result := elem_set result e info.esize acc_elem info.h + let result := elem_set result e info.esize acc_elem have _ : info.elements - (e + 1) < info.elements - e := by omega shift_right_common_aux (e + 1) info operand operand2 result termination_by (info.elements - e) +-- FIXME: should this be upstreamed? +theorem shift_le (x : Nat) (shift :Nat) : + x >>> shift ≤ x := by + simp only [Nat.shiftRight_eq_div_pow] + exact Nat.div_le_self x (2 ^ shift) + +set_option bv.ac_nf false + +@[state_simp_rules] +theorem shift_right_common_aux_64_2_tff (operand : BitVec 128) + (shift : Nat) (result : BitVec 128): + shift_right_common_aux 0 + {esize := 64, elements := 2, shift := shift, + unsigned := true, round := false, accumulate := false} + operand 0#128 result = + (ushiftRight (extractLsb' 64 64 operand) shift) + ++ (ushiftRight (extractLsb' 0 64 operand) shift) := by + unfold shift_right_common_aux + simp only [minimal_theory, bitvec_rules] + unfold shift_right_common_aux + simp only [minimal_theory, bitvec_rules] + unfold shift_right_common_aux + simp only [minimal_theory, bitvec_rules] + simp only [state_simp_rules, + minimal_theory, + -- FIXME: simply using bitvec_rules will expand out setWidth + -- bitvec_rules, + BitVec.cast_eq, + Nat.shiftRight_zero, + Nat.zero_shiftRight, + Nat.reduceMul, + Nat.reduceAdd, + Nat.add_one_sub_one, + Nat.sub_zero, + reduceAllOnes, + reduceZeroExtend, + Nat.zero_mul, + shiftLeft_zero_eq, + reduceNot, + BitVec.extractLsb_ofNat, + Nat.reducePow, + Nat.zero_mod, + Int.ofNat_emod, + Int.Nat.cast_ofNat_Int, + BitVec.zero_add, + Nat.reduceSub, + Nat.one_mul, + reduceHShiftLeft, + -- FIXME: should partInstall be state_simp_rules? + partInstall, + -- Eliminating casting functions + Int.ofNat_eq_coe, ofInt_natCast, ofNat_toNat + ] + generalize (extractLsb' 64 64 operand) = x + generalize (extractLsb' 0 64 operand) = y + have h0 : ∀ (z : BitVec 64), extractLsb' 0 64 ((zeroExtend 65 z).ushiftRight shift) + = z.ushiftRight shift := by + intro z + simp only [ushiftRight, toNat_setWidth] + have h1: z.toNat % 2 ^ 65 = z.toNat := by omega + simp only [h1] + simp only [Std.Tactic.BVDecide.Normalize.BitVec.ofNatLt_reduce] + simp only [Nat.sub_zero, Nat.reduceAdd, BitVec.extractLsb'_ofNat, Nat.shiftRight_zero] + have h2 : z.toNat >>> shift % 2 ^ 65 = z.toNat >>> shift := by + refine Nat.mod_eq_of_lt ?h3 + have h4 : z.toNat >>> shift ≤ z.toNat := by exact shift_le z.toNat shift + omega + simp only [h2] + simp only [h0] + clear h0 + generalize x.ushiftRight shift = p + generalize y.ushiftRight shift = q + -- FIXME: This proof can be simplified once bv_decide supports shift + -- operations with variable offsets + bv_decide + +-- FIXME: where to put this? +theorem ofInt_eq_signExtend (x : BitVec 32) : + BitVec.ofInt 33 x.toInt = signExtend 33 x := by + exact rfl + +-- FIXME: where to put this? +theorem msb_signExtend (x : BitVec n) (hw: n < n'): + (signExtend n' x).msb = x.msb := by + rcases n' with rfl | n' + · simp only [show n = 0 by omega, + msb_eq_getLsbD_last, Nat.zero_sub, Nat.le_refl, + getLsbD_ge] + · simp only [msb_eq_getLsbD_last, Nat.add_one_sub_one, + getLsbD_signExtend, Nat.lt_add_one, + decide_True, Bool.true_and, ite_eq_right_iff] + by_cases h : n' < n + · rcases n with rfl | n + · simp + · simp only [h, Nat.add_one_sub_one, true_implies] + omega + · simp [h] + +theorem shift_right_common_aux_32_4_fff (operand : BitVec 128) + (shift : Nat) (result : BitVec 128): + shift_right_common_aux 0 + { esize := 32, elements := 4, shift := shift, + unsigned := false, round := false, accumulate := false} + operand 0#128 result = + (sshiftRight (extractLsb' 96 32 operand) shift) + ++ (sshiftRight (extractLsb' 64 32 operand) shift) + ++ (sshiftRight (extractLsb' 32 32 operand) shift) + ++ (sshiftRight (extractLsb' 0 32 operand) shift) := by + unfold shift_right_common_aux + simp only [minimal_theory, bitvec_rules] + unfold shift_right_common_aux + simp only [minimal_theory, bitvec_rules] + unfold shift_right_common_aux + simp only [minimal_theory, bitvec_rules] + unfold shift_right_common_aux + simp only [minimal_theory, bitvec_rules] + unfold shift_right_common_aux + simp only [minimal_theory, bitvec_rules] + simp only [state_simp_rules, + minimal_theory, + -- FIXME: simply using bitvec_rules will expand out setWidth + -- bitvec_rules, + BitVec.cast_eq, + Nat.shiftRight_zero, + Nat.zero_shiftRight, + Nat.reduceMul, + Nat.reduceAdd, + Nat.add_one_sub_one, + Nat.sub_zero, + reduceAllOnes, + reduceZeroExtend, + Nat.zero_mul, + shiftLeft_zero_eq, + reduceNot, + BitVec.extractLsb_ofNat, + Nat.reducePow, + Nat.zero_mod, + Int.ofNat_emod, + Int.Nat.cast_ofNat_Int, + BitVec.zero_add, + Nat.reduceSub, + Nat.one_mul, + reduceHShiftLeft, + partInstall, + -- Eliminating casting functions + ofInt_eq_signExtend + ] + generalize extractLsb' 0 32 operand = a + generalize extractLsb' 32 32 operand = b + generalize extractLsb' 64 32 operand = c + generalize extractLsb' 96 32 operand = d + have h : ∀ (x : BitVec 32), + extractLsb' 0 32 ((signExtend 33 x).sshiftRight shift) + = x.sshiftRight shift := by + intros x + apply eq_of_getLsbD_eq; intros i; simp at i + simp only [getLsbD_sshiftRight] + simp only [Nat.sub_zero, Nat.reduceAdd, getLsbD_extractLsb', Nat.zero_add, + getLsbD_sshiftRight, getLsbD_signExtend] + simp only [show (i : Nat) < 32 by omega, + decide_True, Bool.true_and] + simp only [show ¬33 ≤ (i : Nat) by omega, + decide_False, Bool.not_false, Bool.true_and] + simp only [show ¬32 ≤ (i : Nat) by omega, + decide_False, Bool.not_false, Bool.true_and] + by_cases h : shift + (i : Nat) < 32 + · simp only [h, reduceIte] + simp only [show shift + (i : Nat) < 33 by omega, + ↓reduceIte, decide_True, Bool.true_and] + · simp only [h, reduceIte] + have icases : shift + (i : Nat) = 32 ∨ 32 < shift + (i : Nat) := by omega + rcases icases with (h' | h') + · simp only [h', Nat.lt_add_one, ↓reduceIte, decide_True, Bool.true_and] + · simp only [show ¬(shift + (i : Nat) < 33) by omega, ↓reduceIte] + apply msb_signExtend; trivial + simp only [h] + clear h + generalize a.sshiftRight shift = a + generalize b.sshiftRight shift = b + generalize c.sshiftRight shift = c + generalize d.sshiftRight shift = d + -- FIXME: This proof can be simplified once bv_decide supports shift + -- operations with variable offsets + bv_decide + @[state_simp_rules] def shift_right_common (info : ShiftInfo) (datasize : Nat) (Rn : BitVec 5) (Rd : BitVec 5) @@ -703,11 +885,29 @@ def shift_left_common_aux else let elem := elem_get operand e info.esize let shift_elem := elem <<< info.shift - let result := elem_set result e info.esize shift_elem info.h + let result := elem_set result e info.esize shift_elem have _ : info.elements - (e + 1) < info.elements - e := by omega shift_left_common_aux (e + 1) info operand result termination_by (info.elements - e) +theorem shift_left_common_aux_64_2 (operand : BitVec 128) + (shift : Nat) (unsigned: Bool) (round : Bool) (accumulate : Bool) + (result : BitVec 128): + shift_left_common_aux 0 + {esize := 64, elements := 2, shift := shift, + unsigned := unsigned, round := round, accumulate := accumulate} + operand result = + (extractLsb' 64 64 operand <<< shift) + ++ (extractLsb' 0 64 operand <<< shift) := by + unfold shift_left_common_aux + simp only [minimal_theory, bitvec_rules] + unfold shift_left_common_aux + simp only [minimal_theory, bitvec_rules] + unfold shift_left_common_aux + simp only [minimal_theory, bitvec_rules] + simp only [state_simp_rules, minimal_theory, bitvec_rules, partInstall] + bv_decide + @[state_simp_rules] def shift_left_common (info : ShiftInfo) (datasize : Nat) (Rn : BitVec 5) (s : ArmState) diff --git a/Arm/Insts/DPI/Move_wide_imm.lean b/Arm/Insts/DPI/Move_wide_imm.lean index 7b72326e..6681cac4 100644 --- a/Arm/Insts/DPI/Move_wide_imm.lean +++ b/Arm/Insts/DPI/Move_wide_imm.lean @@ -25,8 +25,7 @@ def exec_move_wide_imm (inst : Move_wide_imm_cls) (s : ArmState) : ArmState := let result := if inst.opc = 0b11#2 then read_gpr datasize inst.Rd s else BitVec.zero datasize - have h : 16 = pos + 15 - pos + 1 := by omega - let result := partInstall (pos + 15) pos (BitVec.cast h inst.imm16) result + let result := partInstall pos 16 inst.imm16 result let result := if inst.opc = 0b00#2 then ~~~result else result -- State Update let s := write_gpr datasize inst.Rd result s diff --git a/Arm/Insts/DPI/PC_rel_addressing.lean b/Arm/Insts/DPI/PC_rel_addressing.lean index 82f63a35..b3c83348 100644 --- a/Arm/Insts/DPI/PC_rel_addressing.lean +++ b/Arm/Insts/DPI/PC_rel_addressing.lean @@ -24,7 +24,7 @@ def exec_pc_rel_addressing (inst : PC_rel_addressing_cls) (s : ArmState) : ArmSt let result := if inst.op = 0#1 then orig_pc + imm -- ADR else - (BitVec.partInstall 11 0 0#12 orig_pc) + imm + (BitVec.partInstall 0 12 0#12 orig_pc) + imm -- State Updates let s := write_gpr_zr 64 inst.Rd result s let s := write_pc (orig_pc + 4#64) s diff --git a/Arm/Insts/DPSFP/Advanced_simd_copy.lean b/Arm/Insts/DPSFP/Advanced_simd_copy.lean index ffd077a1..bb383d7c 100644 --- a/Arm/Insts/DPSFP/Advanced_simd_copy.lean +++ b/Arm/Insts/DPSFP/Advanced_simd_copy.lean @@ -17,15 +17,32 @@ namespace DPSFP open BitVec def dup_aux (e : Nat) (elements : Nat) (esize : Nat) - (element : BitVec esize) (result : BitVec datasize) (H : 0 < esize) : BitVec datasize := - if h₀ : elements <= e then + (element : BitVec esize) (result : BitVec datasize) : BitVec datasize := + if elements <= e then result else - let result := elem_set result e esize element H - have h : elements - (e + 1) < elements - e := by omega - dup_aux (e + 1) elements esize element result H + let result := elem_set result e esize element + dup_aux (e + 1) elements esize element result termination_by (elements - e) +set_option bv.ac_nf false + +theorem dup_aux_0_4_32 (element : BitVec 32) (result : BitVec 128) : + dup_aux 0 4 32 element result + = element ++ element ++ element ++ element := by + unfold dup_aux + simp [minimal_theory] + unfold dup_aux + simp [minimal_theory] + unfold dup_aux + simp [minimal_theory] + unfold dup_aux + simp [minimal_theory] + unfold dup_aux + simp [minimal_theory] + simp [state_simp_rules, partInstall] + bv_decide + @[state_simp_rules] def exec_dup_element (inst : Advanced_simd_copy_cls) (s : ArmState) : ArmState := let size := lowest_set_bit inst.imm5 @@ -38,9 +55,8 @@ def exec_dup_element (inst : Advanced_simd_copy_cls) (s : ArmState) : ArmState : let datasize := 64 <<< inst.Q.toNat let elements := datasize / esize let operand := read_sfp idxdsize inst.Rn s - have h₀ : esize > 0 := by apply zero_lt_shift_left_pos (by decide) let element := elem_get operand index esize - let result := dup_aux 0 elements esize element (BitVec.zero datasize) h₀ + let result := dup_aux 0 elements esize element (BitVec.zero datasize) -- State Updates let s := write_pc ((read_pc s) + 4#64) s let s := write_sfp datasize inst.Rd result s @@ -56,8 +72,7 @@ def exec_dup_general (inst : Advanced_simd_copy_cls) (s : ArmState) : ArmState : let datasize := 64 <<< inst.Q.toNat let elements := datasize / esize let element := read_gpr esize inst.Rn s - have h₀ : 0 < esize := by apply zero_lt_shift_left_pos (by decide) - let result := dup_aux 0 elements esize element (BitVec.zero datasize) h₀ + let result := dup_aux 0 elements esize element (BitVec.zero datasize) -- State Updates let s := write_pc ((read_pc s) + 4#64) s let s := write_sfp datasize inst.Rd result s @@ -75,9 +90,8 @@ def exec_ins_element (inst : Advanced_simd_copy_cls) (s : ArmState) : ArmState : let esize := 8 <<< size let operand := read_sfp idxdsize inst.Rn s let result := read_sfp 128 inst.Rd s - have h₀ : esize > 0 := by apply zero_lt_shift_left_pos (by decide) let elem := elem_get operand src_index esize - let result := elem_set result dst_index esize elem h₀ + let result := elem_set result dst_index esize elem -- State Updates let s := write_pc ((read_pc s) + 4#64) s let s := write_sfp 128 inst.Rd result s @@ -93,8 +107,7 @@ def exec_ins_general (inst : Advanced_simd_copy_cls) (s : ArmState) : ArmState : let esize := 8 <<< size let element := read_gpr esize inst.Rn s let result := read_sfp 128 inst.Rd s - have h₀ : esize > 0 := by apply zero_lt_shift_left_pos (by decide) - let result := elem_set result index esize element h₀ + let result := elem_set result index esize element -- State Updates let s := write_pc ((read_pc s) + 4#64) s let s := write_sfp 128 inst.Rd result s diff --git a/Arm/Insts/DPSFP/Advanced_simd_modified_immediate.lean b/Arm/Insts/DPSFP/Advanced_simd_modified_immediate.lean index a46404a8..3f9feabf 100644 --- a/Arm/Insts/DPSFP/Advanced_simd_modified_immediate.lean +++ b/Arm/Insts/DPSFP/Advanced_simd_modified_immediate.lean @@ -90,6 +90,18 @@ def AdvSIMDExpandImm (op : BitVec 1) (cmode : BitVec 4) (imm8 : BitVec 8) : BitV lsb imm8 7 ++ ~~~(lsb imm8 6) ++ (replicate 8 $ lsb imm8 6) ++ extractLsb' 0 6 imm8 ++ BitVec.zero 48 +open Lean Meta Simp in +dsimproc [state_simp_rules] reduceAdvSIMDExpandImm (AdvSIMDExpandImm _ _ _) := fun e => do + let_expr AdvSIMDExpandImm op cmode imm8 ← e | return .continue + let some ⟨op_n, op⟩ ← getBitVecValue? op | return .continue + let some ⟨cmode_n, cmode⟩ ← getBitVecValue? cmode | return .continue + let some ⟨imm8_n, imm8⟩ ← getBitVecValue? imm8 | return .continue + if h : op_n = 1 ∧ cmode_n = 4 ∧ imm8_n = 8 then + return .done <| toExpr (AdvSIMDExpandImm + (BitVec.cast (by simp_all only) op) + (BitVec.cast (by simp_all only) cmode) + (BitVec.cast (by simp_all only) imm8)) + else return .continue private theorem mul_div_norm_form_lemma (n m : Nat) (_h1 : 0 < m) (h2 : n ∣ m) : (n * (m / n)) = n * m / n := by diff --git a/Arm/Insts/DPSFP/Advanced_simd_permute.lean b/Arm/Insts/DPSFP/Advanced_simd_permute.lean index 95a2920c..d3ded2eb 100644 --- a/Arm/Insts/DPSFP/Advanced_simd_permute.lean +++ b/Arm/Insts/DPSFP/Advanced_simd_permute.lean @@ -17,17 +17,16 @@ open BitVec def trn_aux (p : Nat) (pairs : Nat) (esize : Nat) (part : Nat) (operand1 : BitVec datasize) (operand2 : BitVec datasize) - (result : BitVec datasize) (h : esize > 0) : BitVec datasize := - if h₀ : pairs <= p then + (result : BitVec datasize) : BitVec datasize := + if pairs <= p then result else let idx_from := 2 * p + part let op1_part := elem_get operand1 idx_from esize let op2_part := elem_get operand2 idx_from esize - let result := elem_set result (2 * p) esize op1_part h - let result := elem_set result (2 * p + 1) esize op2_part h - have h₁ : pairs - (p + 1) < pairs - p := by omega - trn_aux (p + 1) pairs esize part operand1 operand2 result h + let result := elem_set result (2 * p) esize op1_part + let result := elem_set result (2 * p + 1) esize op2_part + trn_aux (p + 1) pairs esize part operand1 operand2 result termination_by (pairs - p) @[state_simp_rules] @@ -43,8 +42,7 @@ def exec_trn (inst : Advanced_simd_permute_cls) (s : ArmState) : ArmState := let pairs := elements / 2 let operand1 := read_sfp datasize inst.Rn s let operand2 := read_sfp datasize inst.Rm s - have h : esize > 0 := by apply zero_lt_shift_left_pos (by decide) - let result := trn_aux 0 pairs esize part operand1 operand2 (BitVec.zero datasize) h + let result := trn_aux 0 pairs esize part operand1 operand2 (BitVec.zero datasize) -- Update States let s := write_sfp datasize inst.Rd result s let s := write_pc ((read_pc s) + 4#64) s diff --git a/Arm/Insts/DPSFP/Advanced_simd_scalar_shift_by_immediate.lean b/Arm/Insts/DPSFP/Advanced_simd_scalar_shift_by_immediate.lean index 26684d78..585c796c 100644 --- a/Arm/Insts/DPSFP/Advanced_simd_scalar_shift_by_immediate.lean +++ b/Arm/Insts/DPSFP/Advanced_simd_scalar_shift_by_immediate.lean @@ -22,7 +22,6 @@ def exec_shift_right_scalar write_err (StateError.Illegal s!"Illegal {inst} encountered!") s else let esize := 8 <<< 3 - have h : esize > 0 := by decide let datasize := esize let (info : ShiftInfo) := { esize := esize, @@ -30,9 +29,7 @@ def exec_shift_right_scalar shift := (esize * 2) - (inst.immh ++ inst.immb).toNat, unsigned := inst.U = 0b1#1, round := (lsb inst.opcode 2) = 0b1#1, - accumulate := (lsb inst.opcode 1) = 0b1#1, - h := h - } + accumulate := (lsb inst.opcode 1) = 0b1#1 } let result := shift_right_common info datasize inst.Rn inst.Rd s -- State Update let s := write_sfp datasize inst.Rd result s @@ -46,14 +43,11 @@ def exec_shl_scalar write_err (StateError.Illegal s!"Illegal {inst} encountered!") s else let esize := 8 <<< 3 - have h : esize > 0 := by decide let datasize := esize let (info : ShiftInfo) := { esize := esize, elements := 1, - shift := (inst.immh ++ inst.immb).toNat - esize, - h := h - } + shift := (inst.immh ++ inst.immb).toNat - esize } let result := shift_left_common info datasize inst.Rn s -- State Update let s := write_sfp datasize inst.Rd result s diff --git a/Arm/Insts/DPSFP/Advanced_simd_shift_by_immediate.lean b/Arm/Insts/DPSFP/Advanced_simd_shift_by_immediate.lean index 7716b4b8..5da1f99c 100644 --- a/Arm/Insts/DPSFP/Advanced_simd_shift_by_immediate.lean +++ b/Arm/Insts/DPSFP/Advanced_simd_shift_by_immediate.lean @@ -32,9 +32,6 @@ def exec_shift_right_vector else let l := highest_set_bit inst.immh let esize := 8 <<< l - have h : esize > 0 := by - simp only [esize] - apply zero_lt_shift_left_pos (by decide) let datasize := 64 <<< inst.Q.toNat let (info : ShiftInfo) := { esize := esize, @@ -42,8 +39,7 @@ def exec_shift_right_vector shift := (2 * esize) - (inst.immh ++ inst.immb).toNat, unsigned := inst.U = 0b1#1, round := (lsb inst.opcode 2) = 0b1#1, - accumulate := (lsb inst.opcode 1) = 0b1#1, - h := h } + accumulate := (lsb inst.opcode 1) = 0b1#1 } let result := shift_right_common info datasize inst.Rn inst.Rd s -- State Update let s := write_sfp datasize inst.Rd result s @@ -58,15 +54,11 @@ def exec_shl_vector else let l := highest_set_bit inst.immh let esize := 8 <<< l - have h : esize > 0 := by - simp only [esize] - apply zero_lt_shift_left_pos (by decide) let datasize := 64 <<< inst.Q.toNat let (info : ShiftInfo) := { esize := esize, elements := datasize / esize, - shift := (inst.immh ++ inst.immb).toNat - esize, - h := h } + shift := (inst.immh ++ inst.immb).toNat - esize } let result := shift_left_common info datasize inst.Rn s -- State Update let s := write_sfp datasize inst.Rd result s diff --git a/Arm/Insts/DPSFP/Advanced_simd_table_lookup.lean b/Arm/Insts/DPSFP/Advanced_simd_table_lookup.lean index 16024f23..beb4fa78 100644 --- a/Arm/Insts/DPSFP/Advanced_simd_table_lookup.lean +++ b/Arm/Insts/DPSFP/Advanced_simd_table_lookup.lean @@ -21,8 +21,7 @@ def create_table (i : Nat) (regs : Nat) (Rn : BitVec 5) (table : BitVec (128 * r table else let val := read_sfp 128 Rn s - have h₁ : 128 = 128 * i + 127 - 128 * i + 1 := by omega - let table := BitVec.partInstall (128 * i + 127) (128 * i) (BitVec.cast h₁ val) table + let table := BitVec.partInstall (128 * i) 128 val table let Rn := (Rn + 1) % 32 have h₂ : regs - (i + 1) < regs - i := by omega create_table (i + 1) regs Rn table s @@ -31,18 +30,16 @@ def create_table (i : Nat) (regs : Nat) (Rn : BitVec 5) (table : BitVec (128 * r def tblx_aux (i : Nat) (elements : Nat) (indices : BitVec datasize) (regs : Nat) (table : BitVec (128 * regs)) (result: BitVec datasize) : BitVec datasize := - if h₀ : elements <= i then + if elements <= i then result else - have h₁ : 8 > 0 := by decide let index := (elem_get indices i 8).toNat let result := if index < 16 * regs then let val := elem_get table index 8 - elem_set result i 8 val h₁ + elem_set result i 8 val else result - have h₂ : elements - (i + 1) < elements - i := by omega tblx_aux (i + 1) elements indices regs table result termination_by (elements - i) diff --git a/Arm/Insts/DPSFP/Advanced_simd_three_different.lean b/Arm/Insts/DPSFP/Advanced_simd_three_different.lean index 87dede23..d9489a1a 100644 --- a/Arm/Insts/DPSFP/Advanced_simd_three_different.lean +++ b/Arm/Insts/DPSFP/Advanced_simd_three_different.lean @@ -4,7 +4,9 @@ Released under Apache 2.0 license as described in the file LICENSE. Author(s): Yan Peng -/ -- PMULL and PMULL2 --- Polynomial arithmetic over {0,1}: https://tiny.amazon.com/5h01fjm6/devearmdocuddi0cApplApplPoly +-- Polynomial arithmetic over {0,1}: +-- Ref.: +-- https://developer.arm.com/documentation/ddi0602/2024-09/SIMD-FP-Instructions/PMULL--PMULL2--Polynomial-multiply-long-?lang=en import Arm.Decode import Arm.State @@ -23,28 +25,31 @@ def polynomial_mult_aux (i : Nat) (result : BitVec (m+n)) result else let new_res := if lsb op1 i = 1 then result ^^^ (op2 <<< i) else result - have h : m - (i + 1) < m - i := by omega + -- We replace `if` below by bitvec operations. + -- have h : (1 * (m + n)) = m + n := by simp only [Nat.one_mul] + -- let test := (replicate (m+n) (lsb op1 i)).cast h + -- let then_branch := test &&& (result ^^^ (op2 <<< i)) + -- let else_branch := ~~~test &&& result + -- let new_res := then_branch ||| else_branch polynomial_mult_aux (i+1) new_res op1 op2 termination_by (m - i) def polynomial_mult (op1 : BitVec m) (op2 : BitVec n) : BitVec (m+n) := - let result := BitVec.zero (m+n) + let result := 0#(m+n) let extended_op2 := zeroExtend (m+n) op2 polynomial_mult_aux 0 result op1 extended_op2 def pmull_op (e : Nat) (esize : Nat) (elements : Nat) (x : BitVec n) - (y : BitVec n) (result : BitVec (n*2)) (H : 0 < esize) : BitVec (n*2) := - if h₀ : elements <= e then + (y : BitVec n) (result : BitVec (n*2)) : BitVec (n*2) := + if elements <= e then result else let element1 := elem_get x e esize let element2 := elem_get y e esize let elem_result := polynomial_mult element1 element2 have h₁ : esize + esize = 2 * esize := by omega - have h₂ : 2 * esize > 0 := by omega - let result := elem_set result e (2 * esize) (BitVec.cast h₁ elem_result) h₂ - have _ : elements - (e + 1) < elements - e := by omega - pmull_op (e + 1) esize elements x y result H + let result := elem_set result e (2 * esize) (BitVec.cast h₁ elem_result) + pmull_op (e + 1) esize elements x y result termination_by (elements - e) @[state_simp_rules] @@ -54,14 +59,13 @@ def exec_pmull (inst : Advanced_simd_three_different_cls) (s : ArmState) : ArmSt write_err (StateError.Illegal s!"Illegal {inst} encountered!") s else let esize := 8 <<< inst.size.toNat - have h₀ : 0 < esize := by apply zero_lt_shift_left_pos (by decide) let datasize := 64 let part := inst.Q.toNat let elements := datasize / esize let operand1 := Vpart_read inst.Rn part datasize s let operand2 := Vpart_read inst.Rm part datasize s let result := - pmull_op 0 esize elements operand1 operand2 (BitVec.zero (2*datasize)) h₀ + pmull_op 0 esize elements operand1 operand2 (BitVec.zero (2*datasize)) let s := write_sfp (datasize*2) inst.Rd result s let s := write_pc ((read_pc s) + 4#64) s s diff --git a/Arm/Insts/DPSFP/Advanced_simd_three_same.lean b/Arm/Insts/DPSFP/Advanced_simd_three_same.lean index 7da3cc42..87e8d8b9 100644 --- a/Arm/Insts/DPSFP/Advanced_simd_three_same.lean +++ b/Arm/Insts/DPSFP/Advanced_simd_three_same.lean @@ -19,18 +19,15 @@ open BitVec def binary_vector_op_aux (e : Nat) (elems : Nat) (esize : Nat) (op : BitVec esize → BitVec esize → BitVec esize) - (x : BitVec n) (y : BitVec n) (result : BitVec n) - (H : esize > 0) : BitVec n := - if h₀ : elems ≤ e then + (x : BitVec n) (y : BitVec n) (result : BitVec n) : BitVec n := + if elems ≤ e then result else - have h₁ : e < elems := by omega let element1 := elem_get x e esize let element2 := elem_get y e esize let elem_result := op element1 element2 - let result := elem_set result e esize elem_result H - have ht1 : elems - (e + 1) < elems - e := by omega - binary_vector_op_aux (e + 1) elems esize op x y result H + let result := elem_set result e esize elem_result + binary_vector_op_aux (e + 1) elems esize op x y result termination_by (elems - e) /-- @@ -38,8 +35,8 @@ def binary_vector_op_aux (e : Nat) (elems : Nat) (esize : Nat) -/ @[state_simp_rules] def binary_vector_op (esize : Nat) (op : BitVec esize → BitVec esize → BitVec esize) - (x : BitVec n) (y : BitVec n) (H : 0 < esize) : BitVec n := - binary_vector_op_aux 0 (n / esize) esize op x y (BitVec.zero n) H + (x : BitVec n) (y : BitVec n) : BitVec n := + binary_vector_op_aux 0 (n / esize) esize op x y (BitVec.zero n) @[state_simp_rules] def exec_binary_vector (inst : Advanced_simd_three_same_cls) (s : ArmState) : ArmState := @@ -48,12 +45,11 @@ def exec_binary_vector (inst : Advanced_simd_three_same_cls) (s : ArmState) : Ar else let datasize := if inst.Q = 1#1 then 128 else 64 let esize := 8 <<< (BitVec.toNat inst.size) - have h_esize : 0 < esize := by simp [esize]; apply zero_lt_shift_left_pos (by decide) let sub_op := inst.U = 1 let operand1 := read_sfp datasize inst.Rn s let operand2 := read_sfp datasize inst.Rm s let op := if sub_op then BitVec.sub else BitVec.add - let result := binary_vector_op esize op operand1 operand2 h_esize + let result := binary_vector_op esize op operand1 operand2 let s := write_sfp datasize inst.Rd result s s diff --git a/Arm/Insts/DPSFP/Advanced_simd_two_reg_misc.lean b/Arm/Insts/DPSFP/Advanced_simd_two_reg_misc.lean index 19a4ada3..3b1d8a28 100644 --- a/Arm/Insts/DPSFP/Advanced_simd_two_reg_misc.lean +++ b/Arm/Insts/DPSFP/Advanced_simd_two_reg_misc.lean @@ -75,6 +75,18 @@ theorem container_size_dvd_datasize (x : BitVec 2) (q : BitVec 1) : repeat trivial done +/-- +Vector instruction `REV64` that reverses the order of 1-byte elements in each +64-bit slice of the 128-bit input. + +Ref.: +https://developer.arm.com/documentation/ddi0602/2024-06/SIMD-FP-Instructions/REV64--Reverse-elements-in-64-bit-doublew +-/ +def vrev128_64_8 (x : BitVec 128) : BitVec 128 := + rev_vector 128 64 8 x + (by decide) (by decide) (by decide) + (by decide) (by decide) + @[state_simp_rules] def exec_advanced_simd_two_reg_misc (inst : Advanced_simd_two_reg_misc_cls) (s : ArmState) : ArmState := @@ -100,12 +112,20 @@ def exec_advanced_simd_two_reg_misc have h4 : container_size <= datasize := by refine Nat.le_of_dvd ?h h2; simp [datasize]; split <;> trivial let operand := read_sfp datasize inst.Rn s - let result := - match inst.U, inst.opcode with - | 0#1, 0b00000#5 -- REV64 + let result : Option (BitVec datasize) := + -- (FIXME) Define wrappers around `rev_vector` (like + -- `vrev128_64_8`) to see cleaner terms during proofs. + match inst.U, inst.opcode with + | 0#1, 0b00000#5 => -- REV64 + if h_vrev128_64_8 : datasize = 128 ∧ container_size = 64 ∧ esize = 8 then + have h_datasize : datasize = 128 := by simp_all only + some ((vrev128_64_8 (operand.cast h_datasize)).cast h_datasize.symm) + else + some (rev_vector datasize container_size esize operand + h3 h0' h4 h1 h2) | 0#1, 0b00001#5 -- REV16 | 1#1, 0b00000#5 => -- REV32 - some (rev_vector datasize container_size esize operand + some (rev_vector datasize container_size esize operand h3 h0' h4 h1 h2) | _, _ => none -- State Updates diff --git a/Arm/Insts/DPSFP/Conversion_between_FP_and_Int.lean b/Arm/Insts/DPSFP/Conversion_between_FP_and_Int.lean index b1f60918..366405c2 100644 --- a/Arm/Insts/DPSFP/Conversion_between_FP_and_Int.lean +++ b/Arm/Insts/DPSFP/Conversion_between_FP_and_Int.lean @@ -44,20 +44,20 @@ def exec_fmov_general let intsize := 32 <<< inst.sf.toNat let decode_fltsize := if inst.ftype = 0b10#2 then 64 else (8 <<< (inst.ftype ^^^ 0b10#2).toNat) match (extractLsb' 1 2 inst.opcode) ++ inst.rmode with - | 1100 => -- FMOV + | 0b1100 => -- FMOV if decode_fltsize ≠ 16 ∧ decode_fltsize ≠ intsize then write_err (StateError.Illegal s!"Illegal {inst} encountered!") s else - let op := if lsb inst.opcode 0 = 1 + let op := if lsb inst.opcode 0 = 1#1 then FPConvOp.FPConvOp_MOV_ItoF else FPConvOp.FPConvOp_MOV_FtoI let part := 0 fmov_general_aux intsize decode_fltsize op part inst s - | 1101 => -- FMOV D[1] + | 0b1101 => -- FMOV D[1] if intsize ≠ 64 ∨ inst.ftype ≠ 0b10#2 then write_err (StateError.Illegal s!"Illegal {inst} encountered!") s else - let op := if lsb inst.opcode 0 = 1 + let op := if lsb inst.opcode 0 = 1#1 then FPConvOp.FPConvOp_MOV_ItoF else FPConvOp.FPConvOp_MOV_FtoI let part := 1 diff --git a/Arm/Insts/LDST/Advanced_simd_multiple_struct.lean b/Arm/Insts/LDST/Advanced_simd_multiple_struct.lean index 7ebb7340..732ec8f2 100644 --- a/Arm/Insts/LDST/Advanced_simd_multiple_struct.lean +++ b/Arm/Insts/LDST/Advanced_simd_multiple_struct.lean @@ -38,15 +38,7 @@ def ld1_st1_operation (wback : Bool) (inst : Multiple_struct_inst_fields) let t2 := t + 1 let t3 := t + 2 let t4 := t + 3 - let (rpt, selem) := - match inst.opcode with - | 0b0000#4 => (1, 4) -- LD/ST4: 4 registers - | 0b0010#4 => (4, 1) -- LD/ST1: 4 registers - | 0b0100#4 => (1, 3) -- LD/ST3: 3 registers - | 0b0110#4 => (3, 1) -- LD/ST1: 3 registers - | 0b0111#4 => (1, 1) -- LD/ST1: 1 register - | 0b1000#4 => (1, 2) -- LD/ST2: 2 registers - | _ => (2, 1) -- LD/ST1: 2 registers (opcode: 0b1010#4) + let (rpt, selem) := multiple_struct_rpt_selem inst.opcode if inst.size = 0b11#2 && datasize = 64 && selem ≠ 1 then write_err (StateError.Illegal s!"Illegal instruction {inst_str} encountered!") s else diff --git a/Arm/Memory/MemoryProofs.lean b/Arm/Memory/MemoryProofs.lean index 3d53b9d1..6a1a9ee6 100644 --- a/Arm/Memory/MemoryProofs.lean +++ b/Arm/Memory/MemoryProofs.lean @@ -7,7 +7,7 @@ import Arm.FromMathlib import Arm.State import Arm.Memory.Separate import Arm.Memory.SeparateProofs - +import Tactics.BvOmegaBench -- In this file, we have memory (non-)interference proofs. ---------------------------------------------------------------------- @@ -16,6 +16,27 @@ section MemoryProofs open BitVec +/-! ## One byte read/write lemmas-/ +namespace Memory + +theorem read_write_same : + read addr (write addr v mem) = v := by + simp [read, write, store_read_over_write_same] + +theorem read_write_different (h : addr1 ≠ addr2) : + read addr1 (write addr2 v s) = read addr1 s := by + simp [read, write, store_read_over_write_different (h := h)] + +theorem write_write_shadow : + write addr val2 (write addr val1 s) = write addr val2 s := by + unfold write write_store; simp_all + +theorem write_irrelevant : + write addr (read addr s) s = s := by + simp [read, write, store_write_irrelevant] + +end Memory + ---------------------------------------------------------------------- -- Key theorem: read_mem_bytes_of_write_mem_bytes_same @@ -34,82 +55,84 @@ theorem mem_separate_preserved_second_start_addr_add_one apply BitVec.val_nat_le 1 m 64 h0 (_ : 1 < 2^64) h1 decide -theorem read_mem_of_write_mem_bytes_different (hn1 : n <= 2^64) - (h : mem_separate addr1 addr1 addr2 (addr2 + (BitVec.ofNat 64 (n - 1)))) : - read_mem addr1 (write_mem_bytes n addr2 v s) = read_mem addr1 s := by - by_cases hn0 : n = 0 - case pos => -- n = 0 - subst n; simp only [write_mem_bytes] - case neg => -- n ≠ 0 - have hn0' : 0 < n := by omega - induction n, hn0' using Nat.le_induction generalizing addr2 s - case base => - have h' : addr1 ≠ addr2 := by apply mem_separate_starting_addresses_neq h - simp only [write_mem_bytes] - apply read_mem_of_write_mem_different h' - case succ => - have h' : addr1 ≠ addr2 := by refine mem_separate_starting_addresses_neq h - rename_i m hn n_ih - simp_all only [Nat.succ_sub_succ_eq_sub, Nat.sub_zero, - Nat.succ_ne_zero, not_false_eq_true, ne_eq, - write_mem_bytes, Nat.add_eq, Nat.add_zero] - rw [n_ih] - · rw [read_mem_of_write_mem_different h'] - · omega - · rw [addr_add_one_add_m_sub_one m addr2 hn hn1] - rw [mem_separate_preserved_second_start_addr_add_one hn hn1 h] - · omega - done +theorem Memory.read_write_bytes_different (hn1 : n ≤ 2^64) + (h : mem_separate addr1 addr1 addr2 (addr2 + (BitVec.ofNat 64 (n - 1)))) : + read addr1 (write_bytes n addr2 v mem) = read addr1 mem := by + induction n generalizing mem addr1 addr2 + case zero => simp only [write_bytes] + case succ n ih => + have h_neq : addr1 ≠ addr2 := + mem_separate_starting_addresses_neq h + rw [Nat.add_one_sub_one] at h + cases n + case zero => + simp [write_bytes, read_write_different h_neq] + case succ n => + have h_sep : mem_separate addr1 addr1 (addr2 + 1#64) + (addr2 + 1#64 + BitVec.ofNat 64 n) := by + unfold mem_separate mem_overlap at h ⊢ + simp only [BitVec.sub_self, ofNat_add, Bool.or_self_right, Bool.not_or, + Bool.and_eq_true, Bool.not_eq_eq_eq_not, Bool.not_true, + decide_eq_false_iff_not, BitVec.not_le] at h ⊢ + generalize hn' : BitVec.ofNat 64 n = n' at * + have : n' ≠ -1 := by bv_omega + clear hn1 ih + bv_decide + have h_neq : addr1 ≠ addr2 := + mem_separate_starting_addresses_neq h + rw [write_bytes, ih (by omega) h_sep, Memory.read_write_different h_neq] + +theorem read_mem_of_write_mem_bytes_different (hn1 : n ≤ 2^64) + (h : mem_separate addr1 addr1 addr2 (addr2 + (BitVec.ofNat 64 (n - 1)))) : + read_mem addr1 (write_mem_bytes n addr2 v s) = read_mem addr1 s := by + simp only [ArmState.read_mem_eq_mem_read, + Memory.write_mem_bytes_eq_mem_write_bytes] + exact Memory.read_write_bytes_different hn1 h theorem append_byte_of_extract_rest_same_cast (n : Nat) (v : BitVec ((n + 1) * 8)) (hn0 : Nat.succ 0 ≤ n) (h : (n * 8 + 8) = (n + 1) * 8) : - BitVec.cast h (zeroExtend (n * 8) (v >>> 8) ++ extractLsb' 0 8 v) = v := by + BitVec.cast h (setWidth (n * 8) (v >>> 8) ++ extractLsb' 0 8 v) = v := by apply BitVec.append_of_extract · omega done +example (s : ArmState) : + read_mem_bytes n addr s = s.mem.read_bytes n addr := by + exact Memory.State.read_mem_bytes_eq_mem_read_bytes s + @[state_simp_rules] -theorem read_mem_bytes_of_write_mem_bytes_same (hn1 : n <= 2^64) : - read_mem_bytes n addr (write_mem_bytes n addr v s) = v := by - by_cases hn0 : n = 0 - case pos => - subst n - unfold read_mem_bytes - simp only [of_length_zero] - case neg => -- n ≠ 0 - have hn0' : 0 < n := by omega - induction n, hn0' using Nat.le_induction generalizing addr s - case base => - simp only [read_mem_bytes, write_mem_bytes, - read_mem_of_write_mem_same, BitVec.cast_eq] - have l1 := BitVec.extractLsb'_eq v - simp only [Nat.reduceSucc, Nat.one_mul, Nat.succ_sub_succ_eq_sub, - Nat.sub_zero, Nat.reduceAdd, BitVec.cast_eq, - forall_const] at l1 - rw [l1] - have l2 := BitVec.empty_bitvector_append_left v - simp only [Nat.reduceSucc, Nat.one_mul, Nat.zero_add, - BitVec.cast_eq, forall_const] at l2 - exact l2 - case succ => - rename_i n hn n_ih - simp only [read_mem_bytes, Nat.add_eq, Nat.add_zero, write_mem_bytes] - rw [n_ih] - rw [read_mem_of_write_mem_bytes_different] - · simp only [Nat.add_eq, Nat.add_zero, read_mem_of_write_mem_same] - rw [append_byte_of_extract_rest_same_cast n v hn] - · omega - · have := mem_separate_contiguous_regions addr 0#64 (BitVec.ofNat 64 (n - 1)) - simp only [Nat.reducePow, Nat.succ_sub_succ_eq_sub, Nat.sub_zero, - BitVec.sub_zero, ofNat_lt_ofNat, Nat.reduceMod, - BitVec.add_zero] at this - apply this - simp only [Nat.reducePow] at hn1 - omega - · omega - · omega - done +theorem Memory.read_bytes_write_bytes_same (hn1 : n ≤ 2^64) : + read_bytes n addr (write_bytes n addr v mem) = v := by + induction n generalizing addr mem + case zero => + simp [read_bytes, of_length_zero] + case succ n ih => + simp only [read_bytes, write_bytes] + rw [ih (by omega)] + have h_sep : + let m := BitVec.ofNat 64 (n - 1) + mem_separate addr addr (addr + 1#64) (addr + 1#64 + m) := by + rw [← mem_separate_contiguous_regions addr 0#64 _] + · simp; rfl + · bv_omega + rw [read_write_bytes_different (by omega) h_sep, read_write_same] + apply BitVec.eq_of_getLsbD_eq + intro i + simp only [getLsbD_cast, getLsbD_append] + by_cases hi : i.val < 8 + · simp [hi] + · have h₁ : i.val - 8 < n * 8 := by omega + have h₂ : 8 + (i.val - 8) = i.val := by omega + simp [hi, h₁, h₂] + +@[state_simp_rules, memory_rules] +theorem read_mem_bytes_of_write_mem_bytes_same (hn1 : n ≤ 2^64) : + read_mem_bytes n addr (write_mem_bytes n addr v s) = v := by + open Memory in + rw [State.read_mem_bytes_eq_mem_read_bytes, + write_mem_bytes_eq_mem_write_bytes, + Memory.read_bytes_write_bytes_same hn1] ---------------------------------------------------------------------- -- Key theorem: read_mem_bytes_of_write_mem_bytes_different @@ -156,7 +179,7 @@ theorem write_mem_of_write_mem_commute (h : mem_separate addr2 addr2 addr1 addr1) : write_mem addr2 val2 (write_mem addr1 val1 s) = write_mem addr1 val1 (write_mem addr2 val2 s) := by - simp_all only [write_mem, ArmState.mk.injEq, and_self, and_true, true_and] + simp_all only [write_mem, ArmState.mk.injEq, BitVec.and_self, and_true, true_and] unfold write_store have := @mem_separate_starting_addresses_neq addr2 addr2 addr1 addr1 simp [h] at this @@ -321,7 +344,7 @@ private theorem mem_subset_neq_first_addr_small_second_region omega simp only [l1, Nat.lt_irrefl] at h1 · rename_i h - bv_omega + bv_omega_bench done private theorem write_mem_bytes_of_write_mem_bytes_shadow_general_n2_lt @@ -428,7 +451,7 @@ private theorem write_mem_bytes_of_write_mem_bytes_shadow_general_n2_eq case succ => rename_i n n_ih conv in write_mem_bytes (Nat.succ n) .. => simp only [write_mem_bytes] - have n_ih' := @n_ih (addr1 + 1#64) val2 (zeroExtend (n * 8) (val1 >>> 8)) + have n_ih' := @n_ih (addr1 + 1#64) val2 (setWidth (n * 8) (val1 >>> 8)) (write_mem addr1 (extractLsb' 0 8 val1) s) (by omega) simp only [Nat.succ_sub_succ_eq_sub, Nat.sub_zero] at h3 @@ -540,7 +563,7 @@ theorem read_mem_bytes_of_write_mem_bytes_subset_same_first_address erw [Nat.mod_eq_of_lt h1] at hn exact hn rw [n_ih (by omega) (by omega) (by omega) _] - · rw [BitVec.extractLsb'_of_zeroExtend (v >>> 8)] + · rw [BitVec.extractLsb'_of_setWidth (v >>> 8)] · have l1 := @BitVec.append_of_extract_general ((n1_1 + 1) * 8) (n*8) 8 v simp (config := { decide := true }) only [Nat.zero_lt_succ, Nat.mul_pos_iff_of_pos_left, Nat.succ_sub_succ_eq_sub, @@ -551,7 +574,7 @@ theorem read_mem_bytes_of_write_mem_bytes_subset_same_first_address · omega · have rw_lemma2 := @read_mem_of_write_mem_bytes_same_first_address n1_1 (addr + 1#64) - (zeroExtend (n1_1 * 8) (v >>> 8)) + (setWidth (n1_1 * 8) (v >>> 8)) (write_mem addr (extractLsb' 0 8 v) s) simp only [Nat.reducePow, Nat.sub_zero, Nat.reduceAdd, BitVec.cast_eq, forall_const] at rw_lemma2 @@ -662,7 +685,7 @@ theorem read_mem_of_write_mem_bytes_subset rw [n_ih] · ext -- simp only [bv_toNat] - simp only [toNat_cast, extractLsb', toNat_zeroExtend] + simp only [toNat_cast, extractLsb', toNat_setWidth] simp only [toNat_ushiftRight] simp_all only [toNat_ofNat, toNat_ofNatLt] simp only [BitVec.sub_of_add_is_sub_sub, Nat.succ_sub_succ_eq_sub, @@ -756,7 +779,7 @@ private theorem mem_legal_lemma (h0 : 0 < n) (h1 : n < 2^64) revert h0 h1 h2 have : 2^64 = 18446744073709551616 := by decide simp_all [mem_legal] - bv_omega + bv_omega_bench private theorem addr_diff_upper_bound_lemma (h0 : 0 < n1) (h1 : n1 ≤ 2 ^ 64) (h2 : 0 < n2) (h3 : n2 < 2^64) @@ -768,7 +791,7 @@ private theorem addr_diff_upper_bound_lemma (h0 : 0 < n1) (h1 : n1 ≤ 2 ^ 64) have _ : 2^64 = 18446744073709551616 := by decide have _ : 2^64 - 1 = 18446744073709551615 := by decide simp_all [mem_subset, mem_legal] - bv_omega + bv_omega_bench private theorem read_mem_bytes_of_write_mem_bytes_subset_n2_lt (h0 : 0 < n1) (h1 : n1 <= 2^64) (h2 : 0 < n2) (h3 : n2 < 2^64) @@ -850,7 +873,7 @@ theorem entire_memory_subset_of_only_itself have : 2^64 = 18446744073709551616 := by decide unfold my_pow at * simp_all [mem_subset, BitVec.add_sub_self_left_64] - bv_omega + bv_omega_bench theorem entire_memory_subset_legal_regions_eq_addr (h1 : mem_subset addr2 (addr2 + (BitVec.ofNat 64 (my_pow 2 64 - 1))) addr1 (addr1 + (BitVec.ofNat 64 (my_pow 2 64 - 1)))) @@ -860,7 +883,7 @@ theorem entire_memory_subset_legal_regions_eq_addr have : 2^64-1 = 18446744073709551615 := by decide unfold my_pow at * simp_all [mem_subset, mem_legal] - bv_omega + bv_omega_bench private theorem read_mem_bytes_of_write_mem_bytes_subset_n2_eq_alt (h0 : 0 < n1) (h1 : n1 <= my_pow 2 64) (h2 : 0 < n2) (h3 : n2 = my_pow 2 64) @@ -921,7 +944,7 @@ theorem leftshift_n_or_rightshift_n (n x y : Nat) (h : y < 2^n) : Nat.le_add_right, Bool.true_and, Bool.or_false] private theorem write_mem_bytes_irrelevant_helper (h : n * 8 + 8 = (n + 1) * 8) : - (zeroExtend (n * 8) + (setWidth (n * 8) ((BitVec.cast h (read_mem_bytes n (addr + 1#64) s ++ read_mem addr s)) >>> 8)) = read_mem_bytes n (addr + 1#64) s := by ext diff --git a/Arm/Memory/Separate.lean b/Arm/Memory/Separate.lean index a05cc413..784a6c55 100644 --- a/Arm/Memory/Separate.lean +++ b/Arm/Memory/Separate.lean @@ -5,7 +5,7 @@ Author(s): Shilpi Goel, Siddharth Bhat -/ import Arm.State import Arm.BitVec - +import Tactics.BvOmegaBench section Separate open BitVec @@ -137,7 +137,7 @@ theorem lt_or_gt_of_mem_separate_of_mem_legal_of_mem_legal (h : mem_separate a1 · by_cases h₆ : a1.toNat > b2.toNat · simp only [BitVec.val_bitvec_lt, gt_iff_lt, h₆, or_true] · exfalso - bv_omega + bv_omega_bench /-- Given two legal memory regions `[a1, a2]` and `[b1, b2]`, @@ -153,7 +153,7 @@ theorem mem_separate_of_lt_or_gt_of_mem_legal_of_mem_legal (h : a2 < b1 ∨ a1 > unfold mem_legal at ha hb simp only [decide_eq_true_eq] at ha hb rw [BitVec.le_def] at ha hb - bv_omega + bv_omega_bench /-- Given two legal memory regions `[a1, a2]` and `[b1, b2]`, @@ -183,7 +183,7 @@ theorem add_lt_of_mem_legal_of_lt by_cases hadd : a.toNat + n.toNat < 2^64 · assumption · exfalso - bv_omega + bv_omega_bench /-- If we express a memory region as `[a..(a+n)]` for `(n : Nat)`, @@ -274,7 +274,7 @@ theorem mem_legal_of_mem_legal' (h : mem_legal' a n) : simp only [mem_legal', mem_legal, BitVec.le_def] at h ⊢ rw [BitVec.toNat_add_eq_toNat_add_toNat] simp only [BitVec.toNat_ofNat, Nat.reducePow, Nat.le_add_right, decide_True] - bv_omega + bv_omega_bench /-- Legal in the new sense implies legal in the old sense. @@ -283,7 +283,7 @@ Note that the subtraction could also have been written as `(b - a).toNat + 1` theorem mem_legal'_of_mem_legal (h: mem_legal a b) : mem_legal' a (b.toNat - a.toNat + 1) := by simp only [mem_legal, decide_eq_true_eq] at h rw [mem_legal'] - bv_omega + bv_omega_bench def mem_legal'_of_mem_legal'_of_lt (h : mem_legal' a n) (m : Nat) (hm : m ≤ n) : mem_legal' a m := by @@ -304,7 +304,7 @@ theorem mem_legal_iff_mem_legal' : mem_legal a b ↔ · intros h simp only [mem_legal'] at h simp only [mem_legal, BitVec.le_def, decide_eq_true_eq] - bv_omega + bv_omega_bench /-- `mem_separate' a an b bn` asserts that two memory regions [a..an) and [b..bn) are separate. @@ -367,26 +367,32 @@ theorem mem_separate'_comm (h : mem_separate' a an b bn) : omega /-# -This is a theorem we ought to prove, which establishes the equivalence -between the old and new defintions of 'mem_separate'. -However, the proof is finicky, and so we leave it commented for now. +This theorem establishes the equivalence between the old and new definitions of 'mem_separate'. -/ -/- -theorem mem_separate_of_mem_separate' (h : mem_separate' a an b bn) - (ha' : a' = a + (BitVec.ofNat w₁ (an - 1) ) (hb' : b' = b + (BitVec.ofNat w₁ (bn - 1))) - (hlegala : mem_legal a an) (hlegalb: mem_legal b bn) : - mem_separate a a' b b' := by - simp [mem_separate] - simp [mem_overlap] +theorem mem_separate_of_mem_separate' (a b : BitVec 64) + (an bn : Nat) + (han : an > 0) (hbn : bn > 0) + (h : mem_separate' a an b bn) : + mem_separate a (a + an - 1) b (b + bn - 1) := by + simp only [mem_separate, BitVec.natCast_eq_ofNat, BitVec.ofNat_eq_ofNat, + Bool.not_eq_eq_eq_not, Bool.not_true] + simp only [mem_overlap, Bool.or_eq_false_iff, decide_eq_false_iff_not] obtain ⟨ha, hb, hsep⟩ := h - simp [mem_legal'] at ha hb - subst ha' - subst hb' - apply Classical.byContradiction - intro hcontra - · sorry - · sorry --/ + simp only [mem_legal', Nat.reducePow] at ha hb + + obtain ⟨an, rfl⟩ : ∃ an' : BitVec 64, an = an'.toNat := + have han_lt : an < 2^64 := by omega + ⟨BitVec.ofNatLt _ han_lt, rfl⟩ + obtain ⟨bn, rfl⟩ : ∃ bn' : BitVec 64, bn = bn'.toNat := + have han_lt : bn < 2^64 := by omega + ⟨BitVec.ofNatLt _ han_lt, rfl⟩ + change an > 0 at han + change bn > 0 at hbn + simp only [BitVec.ofNat_toNat, BitVec.setWidth_eq, BitVec.not_le] + apply And.intro + · bv_omega + · rw [show ∀ {x y}, b + x - y - b = x - y by bv_omega] + bv_omega /-- `mem_subset' a an b bn` witnesses that `[a..a+an)` is a subset of `[b..b+bn)`. In prose, we may notate this as `[a..an) ≤ [b..bn)`. @@ -449,7 +455,7 @@ theorem mem_subset'_refl (h : mem_legal' a an) : mem_subset' a an a an where theorem mem_separate'.symm (h : mem_separate' addr₁ n₁ addr₂ n₂) : mem_separate' addr₂ n₂ addr₁ n₁ := by have := h.omega_def apply mem_separate'.of_omega - bv_omega + bv_omega_bench theorem mem_separate'.of_subset'_of_subset' (h : mem_separate' addr₁ n₁ addr₂ n₂) @@ -460,7 +466,7 @@ theorem mem_separate'.of_subset'_of_subset' have := h₁.omega_def have := h₂.omega_def apply mem_separate'.of_omega - bv_omega + bv_omega_bench /-- If `[a'..a'+an')` begins at least where `[a..an)` begins, @@ -522,8 +528,8 @@ theorem mem_subset_of_mem_subset' (h : mem_subset' a an b bn) (han : an > 0) (hb simp only [bitvec_rules, minimal_theory] by_cases hb : bn = 2^64 · left - bv_omega - · bv_omega + bv_omega_bench + · bv_omega_bench /- value of read_mem_bytes when separate from the write. -/ theorem Memory.read_bytes_write_bytes_eq_read_bytes_of_mem_separate' @@ -541,7 +547,7 @@ theorem Memory.read_bytes_write_bytes_eq_read_bytes_of_mem_separate' simp only [decide_True, ite_eq_left_iff, Bool.true_and] intros h₁ intros h₂ - bv_omega + bv_omega_bench /- value of `read_mem_bytes'` when subset of the write. -/ theorem Memory.read_bytes_write_bytes_eq_of_mem_subset' @@ -619,17 +625,17 @@ theorem Memory.read_bytes_eq_extractLsBytes_sub_of_mem_subset' have ⟨h1, h2, h3, h4⟩ := hsubset.omega_def apply BitVec.eq_of_extractLsByte_eq intros i - rw [extractLsByte_read_bytes (by bv_omega)] + rw [extractLsByte_read_bytes (by bv_omega_bench)] rw [BitVec.extractLsByte_extractLsBytes] by_cases h : i < an · simp only [h, ↓reduceIte] apply BitVec.eq_of_getLsbD_eq intros j rw [← hread] - rw [extractLsByte_read_bytes (by bv_omega)] - simp only [show a.toNat - b.toNat + i < bn by bv_omega, if_true] + rw [extractLsByte_read_bytes (by bv_omega_bench)] + simp only [show a.toNat - b.toNat + i < bn by bv_omega_bench, if_true] congr 2 - bv_omega + bv_omega_bench · simp only [h, ↓reduceIte] /-- A region of memory, given by (base pointer, length) -/ diff --git a/Arm/Memory/SeparateAutomation.lean b/Arm/Memory/SeparateAutomation.lean index 29ac674c..e0149a8b 100644 --- a/Arm/Memory/SeparateAutomation.lean +++ b/Arm/Memory/SeparateAutomation.lean @@ -20,10 +20,10 @@ import Lean.Meta.Tactic.Rewrites import Lean.Elab.Tactic.Conv import Lean.Elab.Tactic.Conv.Basic import Tactics.Simp +import Tactics.BvOmegaBench open Lean Meta Elab Tactic - /-! ## Memory Separation Automation ##### A Note on Notation @@ -122,9 +122,18 @@ structure SimpMemConfig where structure Context where /-- User configurable options for `simp_mem`. -/ cfg : SimpMemConfig - -def Context.init (cfg : SimpMemConfig) : Context where - cfg := cfg + /-- Cache of `bv_toNat` simp context. -/ + bvToNatSimpCtx : Simp.Context + /-- Cache of `bv_toNat` simprocs. -/ + bvToNatSimprocs : Array Simp.Simprocs + +def Context.init (cfg : SimpMemConfig) : MetaM Context := do + let (bvToNatSimpCtx, bvToNatSimprocs) ← + LNSymSimpContext + (config := {failIfUnchanged := false}) + (simp_attrs := #[`bv_toNat]) + (useDefaultSimprocs := false) + return {cfg, bvToNatSimpCtx, bvToNatSimprocs} /-- a Proof of `e : α`, where `α` is a type such as `MemLegalProp`. -/ structure Proof (α : Type) (e : α) where @@ -305,7 +314,7 @@ we can have some kind of funny situation where both LHS and RHS are ReadBytes. For example, `mem1.read base1 n = mem2.read base2 n`. In such a scenario, we should record both reads. -/ -def ReadBytesEqProof.ofExpr? (eval : Expr) (etype : Expr) : Array ReadBytesEqProof := Id.run do +def ReadBytesEqProof.ofExpr? (eval : Expr) (etype : Expr) : MetaM (Array ReadBytesEqProof) := do let mut out := #[] if let .some ⟨_ty, lhs, rhs⟩ := etype.eq? then do let lhs := lhs @@ -314,7 +323,7 @@ def ReadBytesEqProof.ofExpr? (eval : Expr) (etype : Expr) : Array ReadBytesEqPr out := out.push { val := rhs, read := read, h := eval } if let .some read := ReadBytesExpr.ofExpr? rhs then - out:= out.push { val := lhs, read := read, h := eval } + out:= out.push { val := lhs, read := read, h := ← mkEqSymm eval } return out inductive Hypothesis @@ -349,8 +358,8 @@ def State.init (cfg : SimpMemConfig) : State := abbrev SimpMemM := StateRefT State (ReaderT Context TacticM) -def SimpMemM.run (m : SimpMemM α) (cfg : SimpMemConfig) : TacticM α := - m.run' (State.init cfg) |>.run (Context.init cfg) +def SimpMemM.run (m : SimpMemM α) (cfg : SimpMemConfig) : TacticM α := do + m.run' (State.init cfg) |>.run (← Context.init cfg) /-- Add a `Hypothesis` to our hypothesis cache. -/ def SimpMemM.addHypothesis (h : Hypothesis) : SimpMemM Unit := @@ -369,6 +378,14 @@ def SimpMemM.withTraceNode (header : MessageData) (k : SimpMemM α) (traceClass : Name := `simp_mem.info) : SimpMemM α := Lean.withTraceNode traceClass (fun _ => return header) k (collapsed := collapsed) +/-- Get the cached simp context for bv_toNat -/ +def SimpMemM.getBvToNatSimpCtx : SimpMemM Simp.Context := do + return (← read).bvToNatSimpCtx + +/-- Get the cached simpprocs for bv_toNat -/ +def SimpMemM.getBvToNatSimprocs : SimpMemM (Array Simp.Simprocs) := do + return (← read).bvToNatSimprocs + def processingEmoji : String := "⚙️" def consumeRewriteFuel : SimpMemM Unit := @@ -428,10 +445,18 @@ def simpAndIntroDef (name : String) (hdefVal : Expr) : SimpMemM FVarId := do /-- SimpMemM's omega invoker -/ def omega : SimpMemM Unit := do - -- https://leanprover.zulipchat.com/#narrow/stream/326056-ICERM22-after-party/topic/Regression.20tests/near/290131280 - -- @bollu: TODO: understand what precisely we are recovering from. - withoutRecover do - evalTactic (← `(tactic| bv_omega)) + SimpMemM.withMainContext do + -- https://leanprover.zulipchat.com/#narrow/stream/326056-ICERM22-after-party/topic/Regression.20tests/near/290131280 + let bvToNatSimpCtx ← SimpMemM.getBvToNatSimpCtx + let bvToNatSimprocs ← SimpMemM.getBvToNatSimprocs + let .some goal ← LNSymSimpAtStar (← getMainGoal) bvToNatSimpCtx bvToNatSimprocs + | trace[simp_mem.info] "simp [bv_toNat] at * managed to close goal." + replaceMainGoal [goal] + SimpMemM.withTraceNode m!"goal post `bv_toNat` reductions (Note: can be large)" do + trace[simp_mem.info] "{goal}" + -- @bollu: TODO: understand what precisely we are recovering from. + withoutRecover do + evalTactic (← `(tactic| bv_omega_bench)) section Hypotheses @@ -558,7 +583,7 @@ def hypothesisOfExpr (h : Expr) (hyps : Array Hypothesis) : MetaM (Array Hypothe return hyps else let mut hyps := hyps - for eqProof in ReadBytesEqProof.ofExpr? h ht do + for eqProof in ← ReadBytesEqProof.ofExpr? h ht do let proof : Hypothesis := .read_eq eqProof hyps := hyps.push proof return hyps @@ -787,8 +812,6 @@ def proveWithOmega? {α : Type} [ToMessageData α] [OmegaReducible α] (e : α) SimpMemM.withMainContext do let _ ← Hypothesis.addOmegaFactsOfHyps hyps.toList #[] trace[simp_mem.info] m!"Executing `omega` to close {e}" - SimpMemM.withTraceNode m!"goal (Note: can be large)" do - trace[simp_mem.info] "{← getMainGoal}" omega trace[simp_mem.info] "{checkEmoji} `omega` succeeded." return (.some <| Proof.mk (← instantiateMVars factProof)) diff --git a/Arm/Memory/SeparateProofs.lean b/Arm/Memory/SeparateProofs.lean index c9439cbb..ab1e52fd 100644 --- a/Arm/Memory/SeparateProofs.lean +++ b/Arm/Memory/SeparateProofs.lean @@ -6,6 +6,7 @@ Author(s): Shilpi Goel import Arm.State import Arm.Memory.Separate import Std.Tactic.BVDecide +import Tactics.BvOmegaBench ---------------------------------------------------------------------- @@ -14,6 +15,7 @@ section MemoryProofs open BitVec set_option sat.timeout 60 +set_option bv.ac_nf false ---------------------------------------------------------------------- ---- Some helpful bitvector lemmas ---- @@ -30,101 +32,101 @@ theorem n_minus_1_lt_2_64_1 (n : Nat) -- (FIXME) Prove for all bitvector widths. theorem BitVec.add_sub_self_left_64 (a m : BitVec 64) : a + m - a = m := by - bv_omega + bv_omega_bench -- (FIXME) Prove for all bitvector widths. theorem BitVec.add_sub_self_right_64 (a m : BitVec 64) : a + m - m = a := by - bv_omega + bv_omega_bench -- (FIXME) Prove for all bitvector widths. theorem BitVec.add_sub_add_left (a m n : BitVec 64) : a + m - (a + n) = m - n := by - bv_omega + bv_omega_bench -- (FIXME) Prove for all bitvector widths, using general assoc/comm -- BitVec lemmas. theorem BitVec.sub_of_add_is_sub_sub (a b c : BitVec 64) : (a - (b + c)) = a - b - c := by - bv_omega + bv_omega_bench -- (FIXME) Prove for all bitvector widths, using general assoc/comm -- BitVec lemmas. theorem BitVec.add_of_sub_sub_of_add (a b c : BitVec 64) : (a + b - c) = a - c + b := by - bv_omega + bv_omega_bench theorem nat_bitvec_sub1 (x y : BitVec 64) (_h : y.toNat <= x.toNat) : (x - y).toNat = (x.toNat - y.toNat) % 2^64 := by - bv_omega + bv_omega_bench theorem nat_bitvec_sub2 (x y : Nat) (h : y <= x) (xub : x < 2^64) : BitVec.ofNat 64 (x - y) = (BitVec.ofNat 64 x) - (BitVec.ofNat 64 y) := by - bv_omega + bv_omega_bench theorem addr_add_one_add_m_sub_one (n : Nat) (addr : BitVec 64) (h_lb : Nat.succ 0 ≤ n) (h_ub : n + 1 ≤ 2 ^ 64) : (addr + 1#64 + (BitVec.ofNat 64 (n - 1))) = addr + (BitVec.ofNat 64 n) := by - bv_omega + bv_omega_bench ---------------------------------------------------------------------- ---- mem_subset ---- theorem mem_subset_refl : mem_subset a1 a2 a1 a2 := by simp [mem_subset] - bv_check "lrat_files/SeparateProofs.lean-mem_subset_refl-77-2.lrat" + bv_check "lrat_files/SeparateProofs.lean-mem_subset_refl-78-2.lrat" theorem mem_subsets_overlap (h : mem_subset a1 a2 b1 b2) : mem_overlap a1 a2 b1 b2 := by revert h simp [mem_subset, mem_overlap] - bv_check "lrat_files/SeparateProofs.lean-mem_subsets_overlap-83-2.lrat" + bv_check "lrat_files/SeparateProofs.lean-mem_subsets_overlap-84-2.lrat" theorem mem_subset_eq : mem_subset a a b b = (a = b) := by simp [mem_subset] - bv_check "lrat_files/SeparateProofs.lean-mem_subset_eq-87-2.lrat" + bv_check "lrat_files/SeparateProofs.lean-mem_subset_eq-88-2.lrat" theorem mem_subset_first_address (h : mem_subset a b c d) : mem_subset a a c d := by revert h simp_all [mem_subset] - bv_check "lrat_files/SeparateProofs.lean-mem_subset_first_address-93-2.lrat" + bv_check "lrat_files/SeparateProofs.lean-mem_subset_first_address-94-2.lrat" theorem mem_subset_one_addr_neq (h1 : a ≠ b1) (h : mem_subset a a b1 b2) : mem_subset a a (b1 + 1#64) b2 := by revert h simp_all [mem_subset] - bv_check "lrat_files/SeparateProofs.lean-mem_subset_one_addr_neq-100-2.lrat" + bv_check "lrat_files/SeparateProofs.lean-mem_subset_one_addr_neq-101-2.lrat" theorem mem_subset_same_address_different_sizes (h : mem_subset addr (addr + n1) addr (addr + n2)) : n1 <= n2 := by revert h simp [mem_subset] - bv_check "lrat_files/SeparateProofs.lean-mem_subset_same_address_different_sizes-107-2.lrat" + bv_check "lrat_files/SeparateProofs.lean-mem_subset_same_address_different_sizes-108-2.lrat" theorem first_address_is_subset_of_region : mem_subset a a a (a + n) := by simp [mem_subset] - bv_check "lrat_files/SeparateProofs.lean-first_address_is_subset_of_region-112-2.lrat" + bv_check "lrat_files/SeparateProofs.lean-first_address_is_subset_of_region-113-2.lrat" private theorem first_address_add_one_is_subset_of_region_helper (n addr : BitVec 64) (_h_lb : 0#64 < n) : addr + n - addr = 18446744073709551615#64 ∨ - addr + n - addr ≤ addr + n - addr ∧ addr + 1#64 - addr ≤ addr + n - addr := by + addr + 1#64 - addr ≤ addr + n - addr := by bv_check - "lrat_files/SeparateProofs.lean-_private.Arm.SeparateProofs.0.first_address_add_one_is_subset_of_region_helper-118-2.lrat" + "lrat_files/SeparateProofs.lean-_private.Arm.Memory.SeparateProofs.0.first_address_add_one_is_subset_of_region_helper-119-2.lrat" theorem first_address_add_one_is_subset_of_region (n : Nat) (addr : BitVec 64) (_h_lb : 0 < n) (h_ub : n < 2 ^ 64) : mem_subset (addr + 1#64) (addr + (BitVec.ofNat 64 n)) addr (addr + (BitVec.ofNat 64 n)) := by simp [mem_subset] apply first_address_add_one_is_subset_of_region_helper - bv_omega + bv_omega_bench private theorem first_addresses_add_one_is_subset_of_region_general_helper (n m addr1 addr2 : BitVec 64) (h0 : 0#64 < m) : @@ -133,7 +135,7 @@ private theorem first_addresses_add_one_is_subset_of_region_general_helper addr2 + n - addr2 = 18446744073709551615#64 ∨ addr1 + m - addr2 ≤ addr2 + n - addr2 ∧ addr1 + 1#64 - addr2 ≤ addr1 + m - addr2 := by bv_check - "lrat_files/SeparateProofs.lean-_private.Arm.SeparateProofs.0.first_addresses_add_one_is_subset_of_region_general_helper-134-4.lrat" + "lrat_files/SeparateProofs.lean-_private.Arm.Memory.SeparateProofs.0.first_addresses_add_one_is_subset_of_region_general_helper-135-4.lrat" theorem first_addresses_add_one_is_subset_of_region_general (h0 : 0 < m) (h1 : m < 2 ^ 64) (h2 : n < 2 ^ 64) @@ -146,13 +148,13 @@ theorem first_addresses_add_one_is_subset_of_region_general revert h3 simp [mem_subset] apply first_addresses_add_one_is_subset_of_region_general_helper - bv_omega + bv_omega_bench private theorem first_addresses_add_one_preserves_subset_same_addr_helper (h1l : 0#64 < m) : - m - 1#64 ≤ (BitVec.ofNat 64 (2^64 - 1)) - 1#64 := by + m - 1#64 ≤ 18446744073709551614#64 := by revert h1l bv_check - "lrat_files/SeparateProofs.lean-_private.Arm.SeparateProofs.0.first_addresses_add_one_preserves_subset_same_addr_helper-153-2.lrat" + "lrat_files/SeparateProofs.lean-_private.Arm.Memory.SeparateProofs.0.first_addresses_add_one_preserves_subset_same_addr_helper-154-2.lrat" theorem first_addresses_add_one_preserves_subset_same_addr (h1l : 0 < m) (h1u : m < 2 ^ 64) @@ -200,34 +202,34 @@ theorem first_addresses_add_one_preserves_subset_same_addr private theorem mem_subset_one_addr_region_lemma_helper (n1 addr1 addr2 : BitVec 64) : addr1 + n1 - 1#64 - addr2 ≤ 0#64 ∧ addr1 - addr2 ≤ addr1 + n1 - 1#64 - addr2 → - n1 = 1 ∧ addr1 = addr2 := by + n1 = 1#64 ∧ addr1 = addr2 := by bv_check - "lrat_files/SeparateProofs.lean-_private.Arm.SeparateProofs.0.mem_subset_one_addr_region_lemma_helper-203-2.lrat" + "lrat_files/SeparateProofs.lean-_private.Arm.Memory.SeparateProofs.0.mem_subset_one_addr_region_lemma_helper-204-2.lrat" theorem mem_subset_one_addr_region_lemma (addr1 addr2 : BitVec 64) (h : n1 <= 2 ^ 64) : mem_subset addr1 (addr1 + (BitVec.ofNat 64 n1) - 1#64) addr2 addr2 → (n1 = 1) ∧ (addr1 = addr2) := by simp [mem_subset] have h0 := mem_subset_one_addr_region_lemma_helper (BitVec.ofNat 64 n1) addr1 addr2 - have h1 : 0#64 ≠ 18446744073709551615#64 := by bv_omega + have h1 : 0#64 ≠ 18446744073709551615#64 := by bv_omega_bench simp_all only [ofNat_eq_ofNat, and_imp, ne_eq, false_or] - have h2 : (BitVec.ofNat 64 n1) = 1#64 → n1 = 1 := by bv_omega + have h2 : (BitVec.ofNat 64 n1) = 1#64 → n1 = 1 := by bv_omega_bench intro h₀ h₁ - simp_all only [true_implies, BitVec.sub_self, and_self] + simp_all only [true_implies, BitVec.sub_self, _root_.and_self] theorem mem_subset_one_addr_region_lemma_alt (addr1 addr2 : BitVec 64) (h : n1 < 2 ^ 64) : mem_subset addr1 (addr1 + (BitVec.ofNat 64 n1)) addr2 addr2 → (n1 = 0) ∧ (addr1 = addr2) := by simp only [mem_subset, bitvec_rules, minimal_theory] - have h1 : 0#64 ≠ 18446744073709551615#64 := by bv_omega + have h1 : 0#64 ≠ 18446744073709551615#64 := by bv_omega_bench simp_all only [ne_eq, false_or, and_imp] - bv_omega + bv_omega_bench theorem mem_subset_same_region_lemma (h0 : 0 < n) (h1 : Nat.succ n ≤ 2 ^ 64) : mem_subset (addr + 1#64) (addr + 1#64 + (BitVec.ofNat 64 (n - 1))) addr (addr + (BitVec.ofNat 64 (Nat.succ n - 1))) := by simp [mem_subset] - bv_omega + bv_omega_bench done theorem mem_subset_trans @@ -270,18 +272,18 @@ theorem mem_separate_contiguous_regions (a k n : BitVec 64) bv_check "lrat_files/SeparateProofs.lean-mem_separate_contiguous_regions-270-2.lrat" private theorem mem_separate_contiguous_regions_one_address_helper (n' addr : BitVec 64) - (h : n' < 0xffffffffffffffff) : -(¬addr + 1#64 - addr ≤ 0#64 ∧ ¬addr + 1#64 + n' - addr ≤ 0#64) ∧ - ¬addr - (addr + 1#64) ≤ addr + 1#64 + n' - (addr + 1#64) := by + (h : n' < 0xffffffffffffffff) : + (0#64 < addr + 1#64 - addr ∧ 0#64 < addr + 1#64 + n' - addr) + ∧ addr + 1#64 + n' - (addr + 1#64) < addr - (addr + 1#64) := by bv_check - "lrat_files/SeparateProofs.lean-_private.Arm.SeparateProofs.0.mem_separate_contiguous_regions_one_address_helper-276-2.lrat" + "lrat_files/SeparateProofs.lean-_private.Arm.Memory.SeparateProofs.0.mem_separate_contiguous_regions_one_address_helper-276-2.lrat" -- TODO: Perhaps use/modify mem_separate_contiguous_regions instead? theorem mem_separate_contiguous_regions_one_address (addr : BitVec 64) (h : n' < 2 ^ 64) : mem_separate addr addr (addr + 1#64) (addr + 1#64 + (BitVec.ofNat 64 (n' - 1))) := by simp [mem_separate, mem_overlap] have h' : (BitVec.ofNat 64 (n' - 1)) < 0xffffffffffffffff#64 := by - bv_omega + bv_omega_bench apply mem_separate_contiguous_regions_one_address_helper assumption @@ -293,7 +295,7 @@ theorem mem_separate_for_subset2 mem_separate a1 a2 c1 c2 := by revert h1 h2 simp [mem_subset, mem_separate, mem_overlap] - bv_check "lrat_files/SeparateProofs.lean-mem_separate_for_subset2-296-2.lrat" + bv_check "lrat_files/SeparateProofs.lean-mem_separate_for_subset2-295-2.lrat" theorem mem_separate_for_subset1 (h1 : mem_separate a1 a2 b1 b2) (h2 : mem_subset c1 c2 a1 a2) : diff --git a/Arm/Memory/lrat_files/SeparateProofs.lean-_private.Arm.Memory.SeparateProofs.0.first_address_add_one_is_subset_of_region_helper-119-2.lrat b/Arm/Memory/lrat_files/SeparateProofs.lean-_private.Arm.Memory.SeparateProofs.0.first_address_add_one_is_subset_of_region_helper-119-2.lrat new file mode 100644 index 00000000..149dc7e6 Binary files /dev/null and b/Arm/Memory/lrat_files/SeparateProofs.lean-_private.Arm.Memory.SeparateProofs.0.first_address_add_one_is_subset_of_region_helper-119-2.lrat differ diff --git a/Arm/Memory/lrat_files/SeparateProofs.lean-_private.Arm.Memory.SeparateProofs.0.first_addresses_add_one_is_subset_of_region_general_helper-135-4.lrat b/Arm/Memory/lrat_files/SeparateProofs.lean-_private.Arm.Memory.SeparateProofs.0.first_addresses_add_one_is_subset_of_region_general_helper-135-4.lrat new file mode 100644 index 00000000..9b933ed6 Binary files /dev/null and b/Arm/Memory/lrat_files/SeparateProofs.lean-_private.Arm.Memory.SeparateProofs.0.first_addresses_add_one_is_subset_of_region_general_helper-135-4.lrat differ diff --git a/Arm/Memory/lrat_files/SeparateProofs.lean-_private.Arm.Memory.SeparateProofs.0.first_addresses_add_one_preserves_subset_same_addr_helper-154-2.lrat b/Arm/Memory/lrat_files/SeparateProofs.lean-_private.Arm.Memory.SeparateProofs.0.first_addresses_add_one_preserves_subset_same_addr_helper-154-2.lrat new file mode 100644 index 00000000..f5c74c32 Binary files /dev/null and b/Arm/Memory/lrat_files/SeparateProofs.lean-_private.Arm.Memory.SeparateProofs.0.first_addresses_add_one_preserves_subset_same_addr_helper-154-2.lrat differ diff --git a/Arm/Memory/lrat_files/SeparateProofs.lean-_private.Arm.Memory.SeparateProofs.0.mem_separate_contiguous_regions_one_address_helper-276-2.lrat b/Arm/Memory/lrat_files/SeparateProofs.lean-_private.Arm.Memory.SeparateProofs.0.mem_separate_contiguous_regions_one_address_helper-276-2.lrat new file mode 100644 index 00000000..58514b79 Binary files /dev/null and b/Arm/Memory/lrat_files/SeparateProofs.lean-_private.Arm.Memory.SeparateProofs.0.mem_separate_contiguous_regions_one_address_helper-276-2.lrat differ diff --git a/Arm/Memory/lrat_files/SeparateProofs.lean-_private.Arm.Memory.SeparateProofs.0.mem_separate_contiguous_regions_one_address_helper-277-2.lrat b/Arm/Memory/lrat_files/SeparateProofs.lean-_private.Arm.Memory.SeparateProofs.0.mem_separate_contiguous_regions_one_address_helper-277-2.lrat new file mode 100644 index 00000000..58514b79 Binary files /dev/null and b/Arm/Memory/lrat_files/SeparateProofs.lean-_private.Arm.Memory.SeparateProofs.0.mem_separate_contiguous_regions_one_address_helper-277-2.lrat differ diff --git a/Arm/Memory/lrat_files/SeparateProofs.lean-_private.Arm.Memory.SeparateProofs.0.mem_subset_one_addr_region_lemma_helper-204-2.lrat b/Arm/Memory/lrat_files/SeparateProofs.lean-_private.Arm.Memory.SeparateProofs.0.mem_subset_one_addr_region_lemma_helper-204-2.lrat new file mode 100644 index 00000000..ea3efa11 Binary files /dev/null and b/Arm/Memory/lrat_files/SeparateProofs.lean-_private.Arm.Memory.SeparateProofs.0.mem_subset_one_addr_region_lemma_helper-204-2.lrat differ diff --git a/Arm/Memory/lrat_files/SeparateProofs.lean-first_address_is_subset_of_region-113-2.lrat b/Arm/Memory/lrat_files/SeparateProofs.lean-first_address_is_subset_of_region-113-2.lrat new file mode 100644 index 00000000..3b8d1276 Binary files /dev/null and b/Arm/Memory/lrat_files/SeparateProofs.lean-first_address_is_subset_of_region-113-2.lrat differ diff --git a/Arm/Memory/lrat_files/SeparateProofs.lean-mem_separate_commutative-247-2.lrat b/Arm/Memory/lrat_files/SeparateProofs.lean-mem_separate_commutative-247-2.lrat index 650d7ae9..06f5dfff 100644 Binary files a/Arm/Memory/lrat_files/SeparateProofs.lean-mem_separate_commutative-247-2.lrat and b/Arm/Memory/lrat_files/SeparateProofs.lean-mem_separate_commutative-247-2.lrat differ diff --git a/Arm/Memory/lrat_files/SeparateProofs.lean-mem_separate_contiguous_regions-270-2.lrat b/Arm/Memory/lrat_files/SeparateProofs.lean-mem_separate_contiguous_regions-270-2.lrat index 62bc21ea..e0894c93 100644 Binary files a/Arm/Memory/lrat_files/SeparateProofs.lean-mem_separate_contiguous_regions-270-2.lrat and b/Arm/Memory/lrat_files/SeparateProofs.lean-mem_separate_contiguous_regions-270-2.lrat differ diff --git a/Arm/Memory/lrat_files/SeparateProofs.lean-mem_separate_first_address_separate-263-2.lrat b/Arm/Memory/lrat_files/SeparateProofs.lean-mem_separate_first_address_separate-263-2.lrat index 595fcd37..40570881 100644 Binary files a/Arm/Memory/lrat_files/SeparateProofs.lean-mem_separate_first_address_separate-263-2.lrat and b/Arm/Memory/lrat_files/SeparateProofs.lean-mem_separate_first_address_separate-263-2.lrat differ diff --git a/Arm/Memory/lrat_files/SeparateProofs.lean-mem_separate_for_subset2-295-2.lrat b/Arm/Memory/lrat_files/SeparateProofs.lean-mem_separate_for_subset2-295-2.lrat new file mode 100644 index 00000000..f62822ae Binary files /dev/null and b/Arm/Memory/lrat_files/SeparateProofs.lean-mem_separate_for_subset2-295-2.lrat differ diff --git a/Arm/Memory/lrat_files/SeparateProofs.lean-mem_separate_for_subset2-296-2.lrat b/Arm/Memory/lrat_files/SeparateProofs.lean-mem_separate_for_subset2-296-2.lrat index 90f64eb7..a0d22380 100644 Binary files a/Arm/Memory/lrat_files/SeparateProofs.lean-mem_separate_for_subset2-296-2.lrat and b/Arm/Memory/lrat_files/SeparateProofs.lean-mem_separate_for_subset2-296-2.lrat differ diff --git a/Arm/Memory/lrat_files/SeparateProofs.lean-mem_separate_neq-257-2.lrat b/Arm/Memory/lrat_files/SeparateProofs.lean-mem_separate_neq-257-2.lrat index 9856c249..0d3cffc1 100644 Binary files a/Arm/Memory/lrat_files/SeparateProofs.lean-mem_separate_neq-257-2.lrat and b/Arm/Memory/lrat_files/SeparateProofs.lean-mem_separate_neq-257-2.lrat differ diff --git a/Arm/Memory/lrat_files/SeparateProofs.lean-mem_separate_starting_addresses_neq-252-2.lrat b/Arm/Memory/lrat_files/SeparateProofs.lean-mem_separate_starting_addresses_neq-252-2.lrat index 444388fd..4751e957 100644 Binary files a/Arm/Memory/lrat_files/SeparateProofs.lean-mem_separate_starting_addresses_neq-252-2.lrat and b/Arm/Memory/lrat_files/SeparateProofs.lean-mem_separate_starting_addresses_neq-252-2.lrat differ diff --git a/Arm/Memory/lrat_files/SeparateProofs.lean-mem_subset_eq-88-2.lrat b/Arm/Memory/lrat_files/SeparateProofs.lean-mem_subset_eq-88-2.lrat new file mode 100644 index 00000000..643b7172 Binary files /dev/null and b/Arm/Memory/lrat_files/SeparateProofs.lean-mem_subset_eq-88-2.lrat differ diff --git a/Arm/Memory/lrat_files/SeparateProofs.lean-mem_subset_first_address-94-2.lrat b/Arm/Memory/lrat_files/SeparateProofs.lean-mem_subset_first_address-94-2.lrat new file mode 100644 index 00000000..bea25d28 Binary files /dev/null and b/Arm/Memory/lrat_files/SeparateProofs.lean-mem_subset_first_address-94-2.lrat differ diff --git a/Arm/Memory/lrat_files/SeparateProofs.lean-mem_subset_one_addr_neq-101-2.lrat b/Arm/Memory/lrat_files/SeparateProofs.lean-mem_subset_one_addr_neq-101-2.lrat new file mode 100644 index 00000000..14232b67 Binary files /dev/null and b/Arm/Memory/lrat_files/SeparateProofs.lean-mem_subset_one_addr_neq-101-2.lrat differ diff --git a/Arm/Memory/lrat_files/SeparateProofs.lean-mem_subset_refl-78-2.lrat b/Arm/Memory/lrat_files/SeparateProofs.lean-mem_subset_refl-78-2.lrat new file mode 100644 index 00000000..ee2ac623 Binary files /dev/null and b/Arm/Memory/lrat_files/SeparateProofs.lean-mem_subset_refl-78-2.lrat differ diff --git a/Arm/Memory/lrat_files/SeparateProofs.lean-mem_subset_same_address_different_sizes-108-2.lrat b/Arm/Memory/lrat_files/SeparateProofs.lean-mem_subset_same_address_different_sizes-108-2.lrat new file mode 100644 index 00000000..1e9ebd17 Binary files /dev/null and b/Arm/Memory/lrat_files/SeparateProofs.lean-mem_subset_same_address_different_sizes-108-2.lrat differ diff --git a/Arm/Memory/lrat_files/SeparateProofs.lean-mem_subset_trans-239-2.lrat b/Arm/Memory/lrat_files/SeparateProofs.lean-mem_subset_trans-239-2.lrat index fc6f28bf..d7227cbc 100644 Binary files a/Arm/Memory/lrat_files/SeparateProofs.lean-mem_subset_trans-239-2.lrat and b/Arm/Memory/lrat_files/SeparateProofs.lean-mem_subset_trans-239-2.lrat differ diff --git a/Arm/Memory/lrat_files/SeparateProofs.lean-mem_subsets_overlap-84-2.lrat b/Arm/Memory/lrat_files/SeparateProofs.lean-mem_subsets_overlap-84-2.lrat new file mode 100644 index 00000000..c94adf7e Binary files /dev/null and b/Arm/Memory/lrat_files/SeparateProofs.lean-mem_subsets_overlap-84-2.lrat differ diff --git a/Arm/State.lean b/Arm/State.lean index 6b559099..4f264183 100644 --- a/Arm/State.lean +++ b/Arm/State.lean @@ -343,7 +343,7 @@ These mnemonics make it much easier to read and write theorems about assembly pr @[state_simp_rules] abbrev ArmState.x0 (s : ArmState) : BitVec 64 := r (StateField.GPR 0) s @[state_simp_rules] abbrev ArmState.w0 (s : ArmState) : BitVec 32 := - (r (StateField.GPR 0) s).zeroExtend 32 + (r (StateField.GPR 0) s).setWidth 32 @[state_simp_rules] abbrev ArmState.x1 (s : ArmState) : BitVec 64 := r (StateField.GPR 1) s @@ -395,18 +395,18 @@ def w (fld : StateField) (v : (state_value fld)) (s : ArmState) : ArmState := | ERR => write_base_error v s @[state_simp_rules] -theorem zeroExtend_eq_of_r_gpr : - zeroExtend 64 (r (StateField.GPR i) s) = (r (StateField.GPR i) s) := by +theorem setWidth_eq_of_r_gpr : + setWidth 64 (r (StateField.GPR i) s) = (r (StateField.GPR i) s) := by simp only [bitvec_rules] @[state_simp_rules] -theorem zeroExtend_eq_of_r_sfp : - zeroExtend 128 (r (StateField.SFP i) s) = (r (StateField.SFP i) s) := by +theorem setWidth_eq_of_r_sfp : + setWidth 128 (r (StateField.SFP i) s) = (r (StateField.SFP i) s) := by simp only [bitvec_rules] @[state_simp_rules] -theorem zeroExtend_eq_of_r_pc : - zeroExtend 64 (r (StateField.PC) s) = (r (StateField.PC) s) := by +theorem setWidth_eq_of_r_pc : + setWidth 64 (r (StateField.PC) s) = (r (StateField.PC) s) := by simp only [bitvec_rules] @[state_simp_rules] @@ -479,7 +479,7 @@ theorem w_program : (w fld v s).program = s.program := by def read_gpr (width : Nat) (idx : BitVec 5) (s : ArmState) : BitVec width := let val := r (StateField.GPR idx) s - BitVec.zeroExtend width val + BitVec.setWidth width val -- Use read_gpr_zr when register 31 is mapped to the zero register ZR, -- instead of the default (Stack pointer). @@ -497,7 +497,7 @@ def read_gpr_zr (width : Nat) (idx : BitVec 5) (s : ArmState) @[state_simp_rules] def write_gpr (width : Nat) (idx : BitVec 5) (val : BitVec width) (s : ArmState) : ArmState := - let val := BitVec.zeroExtend 64 val + let val := BitVec.setWidth 64 val w (StateField.GPR idx) val s -- Use write_gpr_zr when register 31 is mapped to the zero register @@ -514,19 +514,19 @@ def write_gpr_zr (n : Nat) (idx : BitVec 5) (val : BitVec n) (s : ArmState) -- (see simp?). example (n : Nat) (idx : BitVec 5) (val : BitVec n) (s : ArmState) : read_gpr n idx (write_gpr n idx val s) = - BitVec.zeroExtend n (BitVec.zeroExtend 64 val) := by + BitVec.setWidth n (BitVec.setWidth 64 val) := by simp [state_simp_rules, minimal_theory] @[state_simp_rules] def read_sfp (width : Nat) (idx : BitVec 5) (s : ArmState) : BitVec width := let val := r (StateField.SFP idx) s - BitVec.zeroExtend width val + BitVec.setWidth width val -- Write `val` to the `idx`-th SFP, zeroing the upper bits, if -- applicable. @[state_simp_rules] def write_sfp (n : Nat) (idx : BitVec 5) (val : BitVec n) (s : ArmState) : ArmState := - let val := BitVec.zeroExtend 128 val + let val := BitVec.setWidth 128 val w (StateField.SFP idx) val s @[state_simp_rules] @@ -679,7 +679,7 @@ def write_mem_bytes (n : Nat) (addr : BitVec 64) (val : BitVec (n * 8)) (s : Arm | n' + 1 => let byte := BitVec.extractLsb' 0 8 val let s := write_mem addr byte s - let val_rest := BitVec.zeroExtend (n' * 8) (val >>> 8) + let val_rest := BitVec.setWidth (n' * 8) (val >>> 8) write_mem_bytes n' (addr + 1#64) val_rest s @@ -747,6 +747,12 @@ theorem read_mem_bytes_w_of_read_mem_eq = read_mem_bytes n₁ addr₁ s₂ := by simp only [read_mem_bytes_of_w, h] +@[state_simp_rules] +theorem mem_w_of_mem_eq {s₁ s₂ : ArmState} (h : s₁.mem = s₂.mem) (fld val) : + (w fld val s₁).mem = s₂.mem := by + unfold w; + cases fld <;> exact h + @[state_simp_rules] theorem write_mem_bytes_program {n : Nat} (addr : BitVec 64) (bytes : BitVec (n * 8)): (write_mem_bytes n addr bytes s).program = s.program := by @@ -838,6 +844,9 @@ def read_bytes (n : Nat) (addr : BitVec 64) (m : Memory) : BitVec (n * 8) := have h : n' * 8 + 8 = (n' + 1) * 8 := by simp_arith BitVec.cast h (rest ++ byte) +-- TODO (@bollu): we should drop the `State` namespace here, given that +-- this namespace is used nowhere else. Also, `ArmState.read_mem_eq_mem_read` +-- should probably live under the `Memory` namespace. @[memory_rules] theorem State.read_mem_bytes_eq_mem_read_bytes (s : ArmState) : read_mem_bytes n addr s = s.mem.read_bytes n addr := by @@ -966,7 +975,7 @@ def write_bytes (n : Nat) (addr : BitVec 64) | n' + 1 => let byte := BitVec.extractLsb' 0 8 val let m := m.write addr byte - let val_rest := BitVec.zeroExtend (n' * 8) (val >>> 8) + let val_rest := BitVec.setWidth (n' * 8) (val >>> 8) m.write_bytes n' (addr + 1#64) val_rest /-- Writing zero bytes does not change memory. -/ @@ -992,7 +1001,7 @@ theorem write_bytes_succ {mem : Memory} : mem.write_bytes (n + 1) addr val = let byte := BitVec.extractLsb' 0 8 val let mem := mem.write addr byte - let val_rest := BitVec.zeroExtend (n * 8) (val >>> 8) + let val_rest := BitVec.setWidth (n * 8) (val >>> 8) mem.write_bytes n (addr + 1#64) val_rest := rfl theorem write_bytes_eq_of_le {mem : Memory} {ix base : BitVec 64} @@ -1033,13 +1042,13 @@ theorem write_bytes_eq_of_ge {mem : Memory} {ix base : BitVec 64} (by simp only [BitVec.toNat_ofNat, Nat.reducePow, Nat.reduceMod]; omega)] simp only [BitVec.toNat_ofNat, Nat.reducePow, Nat.reduceMod]; omega -theorem extractLsByte_zeroExtend_shiftLeft (data : BitVec ((n + 1) * 8)) (hi : i > 0): - (BitVec.zeroExtend (n * 8) (data >>> 8)).extractLsByte (i - 1) = data.extractLsByte i := by +theorem extractLsByte_setWidth_shiftLeft (data : BitVec ((n + 1) * 8)) (hi : i > 0): + (BitVec.setWidth (n * 8) (data >>> 8)).extractLsByte (i - 1) = data.extractLsByte i := by rcases i with rfl | i · simp at hi · apply BitVec.eq_of_getLsbD_eq intros j - simp only [Nat.add_one_sub_one, BitVec.getLsbD_extractLsByte, BitVec.getLsbD_zeroExtend, + simp only [Nat.add_one_sub_one, BitVec.getLsbD_extractLsByte, BitVec.getLsbD_setWidth, BitVec.getLsbD_ushiftRight] by_cases hj : (j : Nat) ≤ 7 · simp only [hj, decide_True, Bool.true_and] @@ -1088,7 +1097,7 @@ theorem write_bytes_eq_extractLsByte {ix base : BitVec 64} {m : Memory} rw [show (ix - base).toNat = ix.toNat - base.toNat by bv_omega] rw [Nat.sub_add_eq, show ix.toNat - base.toNat - 1 = (ix.toNat - base.toNat) - 1 by omega] - apply extractLsByte_zeroExtend_shiftLeft + apply extractLsByte_setWidth_shiftLeft omega /-- @@ -1163,13 +1172,10 @@ theorem Memory.mem_eq_iff_read_mem_bytes_eq {s₁ s₂ : ArmState} : · intro h _ _; rw[h] · exact Memory.eq_of_read_mem_bytes_eq -theorem read_mem_bytes_write_mem_bytes_of_read_mem_eq - (h : ∀ n addr, read_mem_bytes n addr s₁ = read_mem_bytes n addr s₂) - (n₂ addr₂ val n₁ addr₁) : - read_mem_bytes n₁ addr₁ (write_mem_bytes n₂ addr₂ val s₁) - = read_mem_bytes n₁ addr₁ (write_mem_bytes n₂ addr₂ val s₂) := by - revert n₁ addr₁ - simp only [← Memory.mem_eq_iff_read_mem_bytes_eq] at h ⊢ +theorem mem_write_mem_bytes_of_mem_eq + (h : s₁.mem = s₂.mem) (n addr val) : + (write_mem_bytes n addr val s₁).mem + = (write_mem_bytes n addr val s₂).mem := by simp only [memory_rules, h] /- Helper lemma for `state_eq_iff_components_eq` -/ diff --git a/Arm/Syntax.lean b/Arm/Syntax.lean index 0c1ca562..99658f43 100644 --- a/Arm/Syntax.lean +++ b/Arm/Syntax.lean @@ -10,10 +10,10 @@ import Arm.Memory.Separate namespace ArmStateNotation -/-! We build a notation for `read_mem_bytes $n $base $s` as `$s[$base, $n]` -/ +/-! We build a notation for `$s.mem.read_bytes $n $base $s` as `$s[$base, $n]` -/ @[inherit_doc read_mem_bytes] syntax:max term noWs "[" withoutPosition(term) "," withoutPosition(term) noWs "]" : term -macro_rules | `($s[$base,$n]) => `(read_mem_bytes $n $base $s) +macro_rules | `($s[$base,$n]) => `(Memory.read_bytes $n $base (ArmState.mem $s)) /-! Notation to specify the frame condition for non-memory state components. E.g., diff --git a/Benchmarks.lean b/Benchmarks.lean index 7b7fcc0a..cc1f1c6b 100644 --- a/Benchmarks.lean +++ b/Benchmarks.lean @@ -3,6 +3,11 @@ Copyright (c) 2024 Amazon.com, Inc. or its affiliates. All Rights Reserved. Released under Apache 2.0 license as described in the file LICENSE. Author(s): Alex Keizer -/ +import Benchmarks.SHA512_75 +import Benchmarks.SHA512_75_noKernel_noLint import Benchmarks.SHA512_150 +import Benchmarks.SHA512_150_noKernel_noLint import Benchmarks.SHA512_225 +import Benchmarks.SHA512_225_noKernel_noLint import Benchmarks.SHA512_400 +import Benchmarks.SHA512_400_noKernel_noLint diff --git a/Benchmarks/Command.lean b/Benchmarks/Command.lean index bf605c43..87dc9582 100644 --- a/Benchmarks/Command.lean +++ b/Benchmarks/Command.lean @@ -12,58 +12,102 @@ initialize defValue := false descr := "enables/disables benchmarking in `withBenchmark` combinator" } + registerOption `benchmark.runs { + defValue := (5 : Nat) + descr := "controls how many runs the `benchmark` command does. \ + NOTE: Benchmarks are run only once when the `profiler` option is true, \ + regardless of the value of `benchmark.runs`" + } + /- Shouldn't be set directly, instead, use the `benchmark` command -/ + registerTraceClass `benchmark variable {m} [Monad m] [MonadLiftT BaseIO m] in -def withHeartbeatsAndMs (x : m α) : m (α × Nat × Nat) := do +/-- Measure the heartbeats and time (in milliseconds) taken by `x` -/ +def withHeartbeatsAndMilliseconds (x : m α) : m (α × Nat × Nat) := do let start ← IO.monoMsNow let (a, heartbeats) ← withHeartbeats x let endTime ← IO.monoMsNow return ⟨a, heartbeats, endTime - start⟩ -elab "benchmark" id:ident declSig:optDeclSig val:declVal : command => do - logInfo m!"Running {id} benchmark\n" +/-- Adds a trace node with the `benchmark` class, but only if the profiler +option is *not* set. + +We deliberately suppress benchmarking nodes when profiling, since it generally +only adds noise +-/ +def withBenchTraceNode (msg : MessageData) (x : CommandElabM α ) + : CommandElabM α := do + if (← getBoolOption `profiler) then + x + else + withTraceNode `benchmark (fun _ => pure msg) x (collapsed := false) + +/-- +Run a benchmark for a set number of times, and report the average runtime. +If the `profiler` option is set true, we run the benchmark only once, +with `trace.profiler` to true. -/ +elab "benchmark" id:ident declSig:optDeclSig val:declVal : command => do + let originalOpts ← getOptions + let mut n := originalOpts.getNat `benchmark.runs 5 + let mut opts := originalOpts + opts := opts.setBool `benchmark true let stx ← `(command| - set_option benchmark true in example $declSig:optDeclSig $val:declVal ) - let n := 5 - let mut totalRunTime := 0 - -- geomean = exp(log((a₁ a₂ ... aₙ)^1/n)) = - -- exp(1/n * (log a₁ + log a₂ + log aₙ)). - let mut totalRunTimeLog := 0 - for i in [0:n] do - logInfo m!"\n\nRun {i} (out of {n}):\n" - let ((), _, runTime) ← withHeartbeatsAndMs <| - elabCommand stx - - logInfo m!"Proof took {runTime / 1000}s in total" - totalRunTime := totalRunTime + runTime - totalRunTimeLog := totalRunTimeLog + Float.log runTime.toFloat - - let avg := totalRunTime.toFloat / n.toFloat / 1000 - let geomean := (Float.exp (totalRunTimeLog / n.toFloat)) / 1000.0 - logInfo m!"\ -{id}: - average runtime over {n} runs: - {avg}s - geomean over {n} runs: - {geomean}s -" + if (← getBoolOption `profiler) then + opts := opts.setBool `trace.profiler true + opts := opts.setNat `trace.profiler.threshold 1 + n := 1 -- only run once, if `profiler` is set to true + else + opts := opts.setBool `trace.benchmark true + + if n = 0 then + return () + + -- Set options + modifyScope fun scope => { scope with opts } + + withBenchTraceNode m!"Running {id} benchmark" <| do + let mut totalRunTime := 0 + -- geomean = exp(log((a₁ a₂ ... aₙ)^1/n)) = + -- exp(1/n * (log a₁ + log a₂ + log aₙ)). + let mut totalRunTimeLog : Float := 0 + for i in [0:n] do + let runTime ← withBenchTraceNode m!"Run {i+1} (out of {n}):" <| do + let ((), _, runTime) ← withHeartbeatsAndMilliseconds <| + elabCommand stx + + trace[benchmark] m!"Proof took {runTime / 1000}s in total" + pure runTime + totalRunTime := totalRunTime + runTime + totalRunTimeLog := totalRunTimeLog + Float.log runTime.toFloat + + let avg := totalRunTime.toFloat / n.toFloat / 1000 + let geomean := (Float.exp (totalRunTimeLog / n.toFloat)) / 1000.0 + trace[benchmark] m!"\ + {id}: + average runtime over {n} runs: + {avg}s + geomean over {n} runs: + {geomean}s + " + -- Restore options + modifyScope fun scope => { scope with opts := originalOpts } /-- Set various options to disable linters -/ macro "disable_linters" "in" cmd:command : command => `(command| - set_option linter.constructorNameAsVariable false in - set_option linter.deprecated false in - set_option linter.missingDocs false in - set_option linter.omit false in - set_option linter.suspiciousUnexpanderPatterns false in - set_option linter.unnecessarySimpa false in - set_option linter.unusedRCasesPattern false in - set_option linter.unusedSectionVars false in - set_option linter.unusedVariables false in - $cmd +set_option linter.constructorNameAsVariable false in +set_option linter.deprecated false in +set_option linter.missingDocs false in +set_option linter.omit false in +set_option linter.suspiciousUnexpanderPatterns false in +set_option linter.unnecessarySimpa false in +set_option linter.unusedRCasesPattern false in +set_option linter.unusedSectionVars false in +set_option linter.unusedVariables false in +$cmd ) /-- The default `maxHeartbeats` setting. @@ -96,7 +140,7 @@ private def withBenchmarkAux (x : m α) (f : Nat → Nat → m Unit) : m α := if (← getBoolOption `benchmark) = false then x else - let (a, heartbeats, t) ← withHeartbeatsAndMs x + let (a, heartbeats, t) ← withHeartbeatsAndMilliseconds x f heartbeats t return a diff --git a/Benchmarks/SHA512.lean b/Benchmarks/SHA512.lean index b0901b0c..685a18b7 100644 --- a/Benchmarks/SHA512.lean +++ b/Benchmarks/SHA512.lean @@ -15,11 +15,15 @@ namespace Benchmarks def SHA512Bench (nSteps : Nat) : Prop := ∀ (s0 sf : ArmState) - (_h_s0_num_blocks : r (.GPR 2#5) s0 = 10) + (_h_s0_num_blocks : r (.GPR 2#5) s0 = 10#64) (_h_s0_pc : read_pc s0 = 0x1264c0#64) (_h_s0_err : read_err s0 = StateError.None) (_h_s0_sp_aligned : CheckSPAlignment s0) (_h_s0_program : s0.program = SHA512.program) (_h_run : sf = run nSteps s0), r StateField.ERR sf = StateError.None - ∧ r (.GPR 2#5) sf = BitVec.ofNat _ (9 - (nSteps / 485)) + ∧ r (.GPR 2#5) sf = BitVec.ofNat 64 (10 - ((nSteps + 467) / 485)) + -- / -------------------------------^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + -- | This computes the expected value of x2, taking into account that + -- | the loop body is 485 instructions long, and that x2 is first + -- | decremented after 18 instructions (485 - 18 = 467). diff --git a/Benchmarks/SHA512_150.lean b/Benchmarks/SHA512_150.lean index da549001..0cf74cff 100644 --- a/Benchmarks/SHA512_150.lean +++ b/Benchmarks/SHA512_150.lean @@ -12,5 +12,6 @@ open Benchmarks benchmark sha512_150_instructions : SHA512Bench 150 := fun s0 _ h => by intros sym_n 150 - · exact (sorry : Aligned ..) + simp (config := {failIfUnchanged := false}) only [h, bitvec_rules] + all_goals exact (sorry : Aligned ..) done diff --git a/Benchmarks/SHA512_150_noKernel_noLint.lean b/Benchmarks/SHA512_150_noKernel_noLint.lean new file mode 100644 index 00000000..faec9e9f --- /dev/null +++ b/Benchmarks/SHA512_150_noKernel_noLint.lean @@ -0,0 +1,19 @@ +/- +Copyright (c) 2024 Amazon.com, Inc. or its affiliates. All Rights Reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Author(s): Alex Keizer +-/ +import Tactics.Sym +import Benchmarks.Command +import Benchmarks.SHA512 + +open Benchmarks + +disable_linters in +set_option debug.skipKernelTC true in +benchmark sha512_150_noKernel_noLint : SHA512Bench 150 := fun s0 _ h => by + intros + sym_n 150 + simp (config := {failIfUnchanged := false}) only [h, bitvec_rules] + all_goals exact (sorry : Aligned ..) + done diff --git a/Benchmarks/SHA512_225_noKernel_noLint.lean b/Benchmarks/SHA512_225_noKernel_noLint.lean new file mode 100644 index 00000000..df2b7a88 --- /dev/null +++ b/Benchmarks/SHA512_225_noKernel_noLint.lean @@ -0,0 +1,19 @@ +/- +Copyright (c) 2024 Amazon.com, Inc. or its affiliates. All Rights Reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Author(s): Alex Keizer +-/ +import Tactics.Sym +import Benchmarks.Command +import Benchmarks.SHA512 + +open Benchmarks + +disable_linters in +set_option debug.skipKernelTC true in +benchmark sha512_225_noKernel_noLint : SHA512Bench 225 := fun s0 _ h => by + intros + sym_n 225 + simp (config := {failIfUnchanged := false}) only [h, bitvec_rules] + all_goals exact (sorry : Aligned ..) + done diff --git a/Benchmarks/SHA512_400.lean b/Benchmarks/SHA512_400.lean index 3be508ee..ae26c4e9 100644 --- a/Benchmarks/SHA512_400.lean +++ b/Benchmarks/SHA512_400.lean @@ -12,5 +12,6 @@ open Benchmarks benchmark sha512_400_instructions : SHA512Bench 400 := fun s0 _ h => by intros sym_n 400 - · exact (sorry : Aligned ..) + simp (config := {failIfUnchanged := false}) only [h, bitvec_rules] + all_goals exact (sorry : Aligned ..) done diff --git a/Benchmarks/SHA512_400_noKernel_noLint.lean b/Benchmarks/SHA512_400_noKernel_noLint.lean new file mode 100644 index 00000000..cefce28c --- /dev/null +++ b/Benchmarks/SHA512_400_noKernel_noLint.lean @@ -0,0 +1,19 @@ +/- +Copyright (c) 2024 Amazon.com, Inc. or its affiliates. All Rights Reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Author(s): Alex Keizer +-/ +import Tactics.Sym +import Benchmarks.Command +import Benchmarks.SHA512 + +open Benchmarks + +disable_linters in +set_option debug.skipKernelTC true in +benchmark sha512_400_noKernel_noLint : SHA512Bench 400 := fun s0 _ h => by + intros + sym_n 400 + simp (config := {failIfUnchanged := false}) only [h, bitvec_rules] + all_goals exact (sorry : Aligned ..) + done diff --git a/Benchmarks/SHA512_50.lean b/Benchmarks/SHA512_50.lean new file mode 100644 index 00000000..7e388869 --- /dev/null +++ b/Benchmarks/SHA512_50.lean @@ -0,0 +1,17 @@ +/- +Copyright (c) 2024 Amazon.com, Inc. or its affiliates. All Rights Reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Author(s): Alex Keizer +-/ +import Tactics.Sym +import Benchmarks.Command +import Benchmarks.SHA512 + +open Benchmarks + +benchmark sha512_50 : SHA512Bench 50 := fun s0 _ h => by + intros + sym_n 50 + simp (config := {failIfUnchanged := false}) only [h, bitvec_rules] + all_goals exact (sorry : Aligned ..) + done diff --git a/Benchmarks/SHA512_50_noKernel_noLint.lean b/Benchmarks/SHA512_50_noKernel_noLint.lean new file mode 100644 index 00000000..f08a6868 --- /dev/null +++ b/Benchmarks/SHA512_50_noKernel_noLint.lean @@ -0,0 +1,19 @@ +/- +Copyright (c) 2024 Amazon.com, Inc. or its affiliates. All Rights Reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Author(s): Alex Keizer +-/ +import Tactics.Sym +import Benchmarks.Command +import Benchmarks.SHA512 + +open Benchmarks + +disable_linters in +set_option debug.skipKernelTC true in +benchmark sha512_50_noKernel_noLint : SHA512Bench 50 := fun s0 _ h => by + intros + sym_n 50 + simp (config := {failIfUnchanged := false}) only [h, bitvec_rules] + all_goals exact (sorry : Aligned ..) + done diff --git a/Benchmarks/SHA512_75.lean b/Benchmarks/SHA512_75.lean new file mode 100644 index 00000000..068b06b3 --- /dev/null +++ b/Benchmarks/SHA512_75.lean @@ -0,0 +1,17 @@ +/- +Copyright (c) 2024 Amazon.com, Inc. or its affiliates. All Rights Reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Author(s): Alex Keizer +-/ +import Tactics.Sym +import Benchmarks.Command +import Benchmarks.SHA512 + +open Benchmarks + +benchmark sha512_75 : SHA512Bench 75 := fun s0 _ h => by + intros + sym_n 75 + simp (config := {failIfUnchanged := false}) only [h, bitvec_rules] + all_goals exact (sorry : Aligned ..) + done diff --git a/Benchmarks/SHA512_75_noKernel_noLint.lean b/Benchmarks/SHA512_75_noKernel_noLint.lean new file mode 100644 index 00000000..516880c8 --- /dev/null +++ b/Benchmarks/SHA512_75_noKernel_noLint.lean @@ -0,0 +1,19 @@ +/- +Copyright (c) 2024 Amazon.com, Inc. or its affiliates. All Rights Reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Author(s): Alex Keizer +-/ +import Tactics.Sym +import Benchmarks.Command +import Benchmarks.SHA512 + +open Benchmarks + +disable_linters in +set_option debug.skipKernelTC true in +benchmark sha512_75_noKernel_noLint : SHA512Bench 75 := fun s0 _ h => by + intros + sym_n 75 + simp (config := {failIfUnchanged := false}) only [h, bitvec_rules] + all_goals exact (sorry : Aligned ..) + done diff --git a/Makefile b/Makefile index 00cf37c6..f3588a57 100644 --- a/Makefile +++ b/Makefile @@ -5,6 +5,7 @@ SHELL := /bin/bash LAKE = lake +LEAN = $(LAKE) env lean NUM_TESTS?=3 VERBOSE?=--verbose @@ -37,9 +38,25 @@ awslc_elf: cosim: time -p lake exe lnsym $(VERBOSE) --num-tests $(NUM_TESTS) +BENCHMARKS = \ + Benchmarks/SHA512_50.lean \ + Benchmarks/SHA512_50_noKernel_noLint.lean \ + Benchmarks/SHA512_75.lean \ + Benchmarks/SHA512_75_noKernel_noLint.lean \ + Benchmarks/SHA512_150.lean \ + Benchmarks/SHA512_150_noKernel_noLint.lean \ + Benchmarks/SHA512_225.lean \ + Benchmarks/SHA512_225_noKernel_noLint.lean \ + Benchmarks/SHA512_400.lean \ + Benchmarks/SHA512_400_noKernel_noLint.lean + .PHONY: benchmarks benchmarks: - $(LAKE) build Benchmarks + ./scripts/benchmark.sh $(BENCHMARKS) + +.PHONY: profile +profile: + ./scripts/profile.sh $(BENCHMARKS) .PHONY: clean clean_all clean: @@ -51,3 +68,5 @@ clean_all: clean rm -rf lake-packages rm -rf .lake rm -rf lakefile.olean + rm -rf data/benchmarks + rm -rf data/profiles diff --git a/Proofs/AES-GCM/GCMGmultV8Sym.lean b/Proofs/AES-GCM/GCMGmultV8Sym.lean index 6a5ecc7a..d1c453c2 100644 --- a/Proofs/AES-GCM/GCMGmultV8Sym.lean +++ b/Proofs/AES-GCM/GCMGmultV8Sym.lean @@ -1,13 +1,15 @@ /- Copyright (c) 2024 Amazon.com, Inc. or its affiliates. All Rights Reserved. Released under Apache 2.0 license as described in the file LICENSE. -Author(s): Alex Keizer +Author(s): Alex Keizer, Shilpi Goel -/ +import Specs.GCMV8 import Tests.«AES-GCM».GCMGmultV8Program import Tactics.Sym import Tactics.Aggregate import Tactics.StepThms import Tactics.CSE +import Tactics.ClearNamed import Arm.Memory.SeparateAutomation import Arm.Syntax @@ -16,9 +18,33 @@ open ArmStateNotation #genStepEqTheorems gcm_gmult_v8_program -/- -xxx: GCMGmultV8 Xi HTable --/ +private theorem lsb_from_extractLsb'_of_append_self (x : BitVec 128) : + BitVec.extractLsb' 64 64 (BitVec.extractLsb' 64 128 (x ++ x)) = + BitVec.extractLsb' 0 64 x := by + bv_decide + +private theorem msb_from_extractLsb'_of_append_self (x : BitVec 128) : + BitVec.extractLsb' 0 64 (BitVec.extractLsb' 64 128 (x ++ x)) = + BitVec.extractLsb' 64 64 x := by + bv_decide + +theorem extractLsb'_zero_extractLsb'_of_le (h : len1 ≤ len2) : + BitVec.extractLsb' 0 len1 (BitVec.extractLsb' start len2 x) = + BitVec.extractLsb' start len1 x := by + apply BitVec.eq_of_getLsbD_eq; intro i + simp only [BitVec.getLsbD_extractLsb', Fin.is_lt, + decide_True, Nat.zero_add, Bool.true_and, + Bool.and_iff_right_iff_imp, decide_eq_true_eq] + omega + +theorem extractLsb'_extractLsb'_zero_of_le (h : start + len1 ≤ len2): + BitVec.extractLsb' start len1 (BitVec.extractLsb' 0 len2 x) = + BitVec.extractLsb' start len1 x := by + apply BitVec.eq_of_getLsbD_eq; intro i + simp only [BitVec.getLsbD_extractLsb', Fin.is_lt, + decide_True, Nat.zero_add, Bool.true_and, + Bool.and_iff_right_iff_imp, decide_eq_true_eq] + omega set_option pp.deepTerms false in set_option pp.deepTerms.threshold 50 in @@ -29,10 +55,10 @@ theorem gcm_gmult_v8_program_run_27 (s0 sf : ArmState) (h_s0_pc : read_pc s0 = gcm_gmult_v8_program.min) (h_s0_sp_aligned : CheckSPAlignment s0) (h_Xi : Xi = s0[read_gpr 64 0#5 s0, 16]) - (h_HTable : HTable = s0[read_gpr 64 1#5 s0, 256]) + (h_HTable : HTable = s0[read_gpr 64 1#5 s0, 32]) (h_mem_sep : Memory.Region.pairwiseSeparate [(read_gpr 64 0#5 s0, 16), - (read_gpr 64 1#5 s0, 256)]) + (read_gpr 64 1#5 s0, 32)]) (h_run : sf = run gcm_gmult_v8_program.length s0) : -- The final state is error-free. read_err sf = .None ∧ @@ -42,8 +68,11 @@ theorem gcm_gmult_v8_program_run_27 (s0 sf : ArmState) CheckSPAlignment sf ∧ -- The final state returns to the address in register `x30` in `s0`. read_pc sf = r (StateField.GPR 30#5) s0 ∧ + -- (TODO) Delete the following conjunct because it is covered by the + -- MEM_UNCHANGED_EXCEPT frame condition. We keep it around because it + -- exposes the issue with `simp_mem` that @bollu will fix. -- HTable is unmodified. - sf[read_gpr 64 1#5 s0, 256] = HTable ∧ + sf[read_gpr 64 1#5 s0, 32] = HTable ∧ -- Frame conditions. -- Note that the following also covers that the Xi address in .GPR 0 -- is unmodified. @@ -52,11 +81,15 @@ theorem gcm_gmult_v8_program_run_27 (s0 sf : ArmState) .SFP 21, .PC] (sf, s0) ∧ -- Memory frame condition. - MEM_UNCHANGED_EXCEPT [(r (.GPR 0) s0, 128)] (sf, s0) := by - simp_all only [state_simp_rules, -h_run] -- prelude + MEM_UNCHANGED_EXCEPT [(r (.GPR 0) s0, 16)] (sf, s0) ∧ + sf[r (.GPR 0) s0, 16] = GCMV8.GCMGmultV8_alt (HTable.extractLsb' 0 128) Xi := by + -- Prelude + simp_all only [state_simp_rules, -h_run] + simp only [Nat.reduceMul] at Xi HTable simp (config := {ground := true}) only at h_s0_pc -- ^^ Still needed, because `gcm_gmult_v8_program.min` is somehow -- unable to be reflected + sym_n 27 -- Epilogue simp only [←Memory.mem_eq_iff_read_mem_bytes_eq] at * @@ -64,34 +97,53 @@ theorem gcm_gmult_v8_program_run_27 (s0 sf : ArmState) sym_aggregate -- Split conjunction repeat' apply And.intro - · -- Aggregate the memory (non)effects. - -- (FIXME) This will be tackled by `sym_aggregate` when `sym_n` and `simp_mem` - -- are merged. - simp only [*] - /- - (FIXME @bollu) `simp_mem; rfl` creates a malformed proof here. The tactic produces - no goals, but we get the following error message: - - application type mismatch - Memory.read_bytes_eq_extractLsBytes_sub_of_mem_subset' - (Eq.mp (congrArg (Eq HTable) (Memory.State.read_mem_bytes_eq_mem_read_bytes s0)) - (Eq.mp (congrArg (fun x => HTable = read_mem_bytes 256 x s0) zeroExtend_eq_of_r_gpr) h_HTable)) - argument has type - HTable = Memory.read_bytes 256 (r (StateField.GPR 1#5) s0) s0.mem - but function has type - Memory.read_bytes 256 (r (StateField.GPR 1#5) s0) s0.mem = HTable → - mem_subset' (r (StateField.GPR 1#5) s0) 256 (r (StateField.GPR 1#5) s0) 256 → - Memory.read_bytes 256 (r (StateField.GPR 1#5) s0) s0.mem = - HTable.extractLsBytes (BitVec.toNat (r (StateField.GPR 1#5) s0) - BitVec.toNat (r (StateField.GPR 1#5) s0)) 256 - - simp_mem; rfl - -/ - rw [Memory.read_bytes_write_bytes_eq_read_bytes_of_mem_separate'] - simp_mem + · simp_mem; rfl · simp only [List.mem_cons, List.mem_singleton, not_or, and_imp] sym_aggregate · intro n addr h_separate simp_mem (config := { useOmegaToClose := false }) -- Aggregate the memory (non)effects. simp only [*] + · clear_named [h_s, stepi_] + clear s1 s2 s3 s4 s5 s6 s7 s8 s9 s10 s11 s12 s13 s14 s15 s16 s17 s18 s19 s20 s21 s22 s23 s24 s25 s26 + -- Simplifying the LHS + have h_HTable_low : + Memory.read_bytes 16 (r (StateField.GPR 1#5) s0) s0.mem = HTable.extractLsb' 0 128 := by + -- (FIXME @bollu) use `simp_mem` instead of the rw below. + rw [@Memory.read_bytes_eq_extractLsBytes_sub_of_mem_subset' + 32 (r (StateField.GPR 1#5) s0) HTable (r (StateField.GPR 1#5) s0) 16 _ h_HTable.symm] + · simp only [Nat.reduceMul, BitVec.extractLsBytes, Nat.sub_self, Nat.zero_mul] + · simp_mem + have h_HTable_high : + (Memory.read_bytes 16 (r (StateField.GPR 1#5) s0 + 16#64) s0.mem) = HTable.extractLsb' 128 128 := by + -- (FIXME @bollu) use `simp_mem` instead of the rw below. + rw [@Memory.read_bytes_eq_extractLsBytes_sub_of_mem_subset' + 32 (r (StateField.GPR 1#5) s0) HTable (r (StateField.GPR 1#5) s0 + 16#64) 16 _ h_HTable.symm] + repeat sorry + simp only [h_HTable_high, h_HTable_low, ←h_Xi] + /- + simp/ground below to reduce + (BitVec.extractLsb' 0 64 + (shift_left_common_aux 0 + { esize := 64, elements := 2, shift := 57, unsigned := true, round := false, + accumulate := false } + 300249147283180997173565830086854304225#128 0#128)) + -/ + simp (config := {ground := true}) only + simp only [msb_from_extractLsb'_of_append_self, + lsb_from_extractLsb'_of_append_self, + BitVec.partInstall] + -- (FIXME @bollu) cse leaves the goal unchanged here, quietly, likely due to + -- subexpressions occurring in dep. contexts. Maybe a message here would be helpful. + generalize h_Xi_rev : DPSFP.vrev128_64_8 Xi = Xi_rev + -- Simplifying the RHS + simp only [←h_HTable, GCMV8.GCMGmultV8_alt, + GCMV8.lo, GCMV8.hi, + GCMV8.gcm_polyval] + repeat rw [extractLsb'_zero_extractLsb'_of_le (by decide)] + repeat rw [extractLsb'_extractLsb'_zero_of_le (by decide)] + + sorry done + +end GCMGmultV8Program diff --git a/Proofs/AES-GCM/GCMInitV8Sym.lean b/Proofs/AES-GCM/GCMInitV8Sym.lean index 5683d0c7..e4e47c8a 100644 --- a/Proofs/AES-GCM/GCMInitV8Sym.lean +++ b/Proofs/AES-GCM/GCMInitV8Sym.lean @@ -11,11 +11,12 @@ import Specs.GCMV8 namespace GCMInitV8Program +set_option bv.ac_nf false + abbrev H_addr (s : ArmState) : BitVec 64 := r (StateField.GPR 1#5) s abbrev Htable_addr (s : ArmState) : BitVec 64 := r (StateField.GPR 0#5) s --- set_option debug.byAsSorry true in -set_option maxRecDepth 1000000 in +set_option maxRecDepth 8000 in -- set_option profiler true in theorem gcm_init_v8_program_run_152 (s0 sf : ArmState) (h_s0_program : s0.program = gcm_init_v8_program) @@ -33,8 +34,13 @@ theorem gcm_init_v8_program_run_152 (s0 sf : ArmState) sym_n 152 done -set_option maxRecDepth 1000000 in -set_option maxHeartbeats 2000000 in +set_option maxRecDepth 100000 in +set_option maxHeartbeats 500000 in +set_option sat.timeout 120 in +-- set_option pp.deepTerms true in +-- set_option pp.maxSteps 10000 in +-- set_option trace.profiler true in +-- set_option linter.unusedVariables false in -- set_option profiler true in theorem gcm_init_v8_program_correct (s0 sf : ArmState) (h_s0_program : s0.program = gcm_init_v8_program) @@ -42,7 +48,7 @@ theorem gcm_init_v8_program_correct (s0 sf : ArmState) (h_s0_pc : read_pc s0 = gcm_init_v8_program.min) (h_s0_sp_aligned : CheckSPAlignment s0) (h_run : sf = run gcm_init_v8_program.length s0) - (_h_mem : Memory.Region.pairwiseSeparate + (h_mem : Memory.Region.pairwiseSeparate [ ⟨(H_addr s0), 128⟩, ⟨(Htable_addr s0), 2048⟩ ]) : -- effects @@ -59,8 +65,10 @@ theorem gcm_init_v8_program_correct (s0 sf : ArmState) -- H_addr ptr stays the same ∧ H_addr sf = H_addr s0 -- v20 - v31 stores results of Htable - ∧ read_sfp 128 20#5 sf = (GCMV8.GCMInitV8 (read_mem_bytes 16 (H_addr s0) s0)).get! 0 - -- ∧ read_sfp 128 21#5 sf = (GCMV8.GCMInitV8 (read_mem_bytes 16 (H_addr s0) s0)).get! 1 + ∧ let Hinit := (read_mem_bytes 16 (H_addr s0) s0) + read_sfp 128 20#5 sf = + (GCMV8.GCMInitV8 ((BitVec.extractLsb' 0 64 Hinit) ++ (BitVec.extractLsb' 64 64 Hinit))).get! 0 + -- -- TODO: Commenting out memory related conjuncts since it seems -- to make symbolic execution stuck -- -- First 12 elements in Htable is correct @@ -73,6 +81,7 @@ theorem gcm_init_v8_program_correct (s0 sf : ArmState) -- ∧ (∀ (f : StateField), ¬ (f = StateField.PC) ∧ -- ¬ (f = (StateField.GPR 29#5)) → -- r f sf = r f s0) + -- -- -- Memory safety: memory location that should not change did -- -- not change -- -- The addr exclude output region Htable @@ -84,6 +93,32 @@ theorem gcm_init_v8_program_correct (s0 sf : ArmState) -- unable to be reflected sym_n 152 simp only [Htable_addr, state_value] -- TODO: state_value is needed, why - apply And.intro + apply And.intro · bv_decide · sorry + -- [Shilpi] Commenting out the following because the CI fails with + -- "INTERNAL PANIC: out of memory" + /- + simp only + [shift_left_common_aux_64_2 + , shift_right_common_aux_64_2_tff + , shift_right_common_aux_32_4_fff + , DPSFP.AdvSIMDExpandImm + , DPSFP.dup_aux_0_4_32] + generalize read_mem_bytes 16 (r (StateField.GPR 1#5) s0) s0 = Hinit + -- change the type of Hinit to be BitVec 128, assuming that's def-eq + change BitVec 128 at Hinit + simp only [GCMV8.GCMInitV8, GCMV8.lo, List.get!, GCMV8.hi, + GCMV8.gcm_init_H, GCMV8.refpoly, GCMV8.pmod, GCMV8.pmod.pmodTR, + GCMV8.reduce, GCMV8.degree, GCMV8.degree.degreeTR] + simp only [Nat.reduceAdd, BitVec.ushiftRight_eq, BitVec.reduceExtracLsb', + BitVec.reduceHShiftLeft, BitVec.reduceAppend, BitVec.reduceHShiftRight, BitVec.ofNat_eq_ofNat, + BitVec.reduceEq, ↓reduceIte, BitVec.zero_eq, Nat.sub_self, BitVec.ushiftRight_zero_eq, + BitVec.reduceAnd, BitVec.toNat_ofNat, Nat.pow_one, Nat.reduceMod, Nat.mul_zero, Nat.add_zero, + Nat.zero_mod, Nat.zero_add, Nat.sub_zero, Nat.mul_one, Nat.zero_mul, Nat.one_mul, + Nat.reduceSub, BitVec.reduceMul, BitVec.reduceXOr, BitVec.mul_one, Nat.add_one_sub_one, + BitVec.one_mul] + -- bv_check "GCMInitV8Sym.lean-GCMInitV8Program.gcm_init_v8_program_correct-117-4.lrat" + -- TODO: proof works in vscode but timeout in the CI -- need to investigate further + -/ + diff --git a/Proofs/AES-GCM/GCMInitV8Sym.lean-GCMInitV8Program.gcm_init_v8_program_correct-117-4.lrat b/Proofs/AES-GCM/GCMInitV8Sym.lean-GCMInitV8Program.gcm_init_v8_program_correct-117-4.lrat new file mode 100644 index 00000000..a639618a Binary files /dev/null and b/Proofs/AES-GCM/GCMInitV8Sym.lean-GCMInitV8Program.gcm_init_v8_program_correct-117-4.lrat differ diff --git a/Proofs/Experiments/AddLoop/AddLoop.lean b/Proofs/Experiments/AddLoop/AddLoop.lean index 732f21d6..eb647fd0 100644 --- a/Proofs/Experiments/AddLoop/AddLoop.lean +++ b/Proofs/Experiments/AddLoop/AddLoop.lean @@ -11,6 +11,7 @@ import Arm import Tactics.StepThms import Tactics.Sym import Correctness.ArmSpec +import Tactics.BvOmegaBench namespace AddLoop @@ -523,18 +524,18 @@ private theorem loop_inv_x0_le (x y : BitVec 64) (h_assert_x0 : x ≤ y) (h_assert_x0_nz : ¬x = 0x0#64) : x - 0x1#64 ≤ y := by - bv_omega + bv_omega_bench private theorem AddWithCarry.add_one_64 (x : BitVec 64) : (AddWithCarry x 0x1#64 0x0#1).fst = x + 0x1#64 := by simp only [AddWithCarry, bitvec_rules] - bv_omega + bv_omega_bench private theorem crock_lemma (x y z : BitVec 64) : x + (y - z) + 1#64 = x + (y - (z - 1#64)) := by - -- (FIXME) Without this apply below, bv_omega crashes my editor. + -- (FIXME) Without this apply below, bv_omega_bench crashes my editor. apply BitVec.eq_sub_iff_add_eq.mp - bv_omega + bv_omega_bench theorem partial_correctness : PartialCorrectness ArmState := by diff --git a/Proofs/Experiments/AddLoop/AddLoopTandem.lean b/Proofs/Experiments/AddLoop/AddLoopTandem.lean index ac58b4d9..a5aa02d6 100644 --- a/Proofs/Experiments/AddLoop/AddLoopTandem.lean +++ b/Proofs/Experiments/AddLoop/AddLoopTandem.lean @@ -12,6 +12,7 @@ import Tactics.CSE import Tactics.Sym import Tactics.StepThms import Correctness.ArmSpec +import Tactics.BvOmegaBench namespace AddLoopTandem @@ -123,7 +124,7 @@ instance : Spec' ArmState where private theorem AddWithCarry.add_one_64 (x : BitVec 64) : (AddWithCarry x 0x1#64 0x0#1).fst = x + 0x1#64 := by simp only [AddWithCarry, bitvec_rules] - bv_omega + bv_omega_bench private theorem AddWithCarry.sub_one_64 (x : BitVec 64) : (AddWithCarry x 0xfffffffffffffffe#64 0x1#1).fst = x - 1#64 := by @@ -317,14 +318,14 @@ private theorem non_one_bit_is_zero {x : BitVec 1} private theorem crock_lemma (x y z : BitVec 64) : x + (y - z) + 1#64 = x + (y - (z - 1#64)) := by - -- (FIXME) Without this apply below, bv_omega crashes my editor. + -- (FIXME) Without this apply below, bv_omega_bench crashes my editor. apply BitVec.eq_sub_iff_add_eq.mp - bv_omega + bv_omega_bench private theorem loop_inv_x0_helper_lemma {x y : BitVec 64} (h1 : x ≤ y) (h2 : ¬(x = 0#64)) : x - 0x1#64 ≤ y := by - bv_omega + bv_omega_bench ------------------------------------------------------------------------------- @@ -340,17 +341,17 @@ def loop_clock (x0 : BitVec 64) : Nat := 1 else have : x0 - 0x1#64 < x0 := by - bv_omega + bv_omega_bench 4 + loop_clock (x0 - 1) termination_by x0.toNat theorem loop_clock_inv_lemma (h : ¬x = 0x0#64) : loop_clock (x - 0x1#64) < loop_clock x := by generalize h_xn : x.toNat = xn - have h_xn_ge_1 : 1 ≤ xn := by bv_omega + have h_xn_ge_1 : 1 ≤ xn := by bv_omega_bench induction xn, h_xn_ge_1 using Nat.le_induction generalizing x case base => - have h_x_eq_1 : x = 1#64 := by bv_omega + have h_x_eq_1 : x = 1#64 := by bv_omega_bench unfold loop_clock simp only [h_x_eq_1, BitVec.sub_self, BitVec.ofNat_eq_ofNat, reduceDIte, gt_iff_lt, BitVec.reduceEq] @@ -359,8 +360,8 @@ theorem loop_clock_inv_lemma (h : ¬x = 0x0#64) : rename_i xn' h_xn' h_inv unfold loop_clock simp only [BitVec.ofNat_eq_ofNat, dite_eq_ite, gt_iff_lt] - have h1 : ¬ x - 0x1#64 = 0x0#64 := by bv_omega - have h2 : (x - 0x1#64).toNat = xn' := by bv_omega + have h1 : ¬ x - 0x1#64 = 0x0#64 := by bv_omega_bench + have h2 : (x - 0x1#64).toNat = xn' := by bv_omega_bench simp only [h, h1, h2, h_inv, reduceIte, Nat.add_lt_add_iff_left, not_false_eq_true] done diff --git a/Proofs/Experiments/Max/MaxTandem.lean b/Proofs/Experiments/Max/MaxTandem.lean index f8aac756..b3083461 100644 --- a/Proofs/Experiments/Max/MaxTandem.lean +++ b/Proofs/Experiments/Max/MaxTandem.lean @@ -164,7 +164,7 @@ theorem program.stepi_0x894_cut (s sn : ArmState) | apply Aligned_AddWithCarry_64_4' repeat solve | decide - | bv_omega + | bv_omega_bench | assumption /-- @@ -194,14 +194,17 @@ theorem program.stepi_0x898_cut (s sn : ArmState) simp only [minimal_theory] at this simp_all only [run, cut, this, state_simp_rules, bitvec_rules, minimal_theory] - simp only [pcs, List.mem_cons, BitVec.reduceEq, List.mem_singleton, or_self, not_false_eq_true, - true_and, List.not_mem_nil, or_self, not_false_eq_true, true_and] - simp only [memory_rules, state_simp_rules] + simp only [pcs, List.mem_cons, BitVec.reduceEq, List.mem_singleton, or_self, + not_false_eq_true, true_and, List.not_mem_nil, or_self, not_false_eq_true, + true_and] + simp only [Memory.write_mem_bytes_eq_mem_write_bytes] simp_mem - rfl + simp only [Nat.reduceMul, BitVec.toNat_add, BitVec.toNat_ofNat, Nat.reducePow, + Nat.reduceMod, Nat.sub_self, BitVec.extractLsBytes_eq_self, BitVec.cast_eq, + and_true] /-- -info: 'MaxTandem.program.stepi_0x898_cut' depends on axioms: [propext, Classical.choice, Lean.ofReduceBool, Quot.sound] +info: 'MaxTandem.program.stepi_0x898_cut' depends on axioms: [propext, Classical.choice, Quot.sound] -/ #guard_msgs in #print axioms program.stepi_0x898_cut @@ -260,6 +263,7 @@ theorem program.stepi_0x8a0_cut (s sn : ArmState) simp_all only [run, cut, this, state_simp_rules, bitvec_rules, minimal_theory] simp only [pcs, List.mem_cons, BitVec.reduceEq, List.mem_singleton, or_self, not_false_eq_true, true_and, List.not_mem_nil, or_self, not_false_eq_true, true_and] + simp only [Memory.State.read_mem_bytes_eq_mem_read_bytes] done /-- @@ -289,6 +293,7 @@ theorem program.stepi_0x8a4_cut (s sn : ArmState) simp_all only [run, cut, this, state_simp_rules, bitvec_rules, minimal_theory] simp only [pcs, List.mem_cons, BitVec.reduceEq, List.mem_singleton, or_self, not_false_eq_true, true_and, List.not_mem_nil, or_self, not_false_eq_true, true_and] + simp only [Memory.State.read_mem_bytes_eq_mem_read_bytes] /-- info: 'MaxTandem.program.stepi_0x8a4_cut' depends on axioms: [propext, Classical.choice, Quot.sound] @@ -403,6 +408,7 @@ theorem program.stepi_0x8b0_cut (s sn : ArmState) simp_all only [run, cut, this, state_simp_rules, bitvec_rules, minimal_theory] simp only [pcs, List.mem_cons, BitVec.reduceEq, List.mem_singleton, or_self, not_false_eq_true] simp only [List.not_mem_nil, or_self, not_false_eq_true] + simp only [Memory.State.read_mem_bytes_eq_mem_read_bytes, and_true] /-- info: 'MaxTandem.program.stepi_0x8b0_cut' depends on axioms: [propext, Classical.choice, Quot.sound] @@ -430,6 +436,8 @@ theorem program.stepi_0x8b4_cut (s sn : ArmState) simp only [pcs, List.mem_cons, BitVec.reduceEq, List.mem_singleton, or_self, not_false_eq_true] simp only [List.not_mem_nil, or_self, or_false, or_true] simp only [not_false_eq_true] + simp only [Memory.write_mem_bytes_eq_mem_write_bytes, true_and] + rw [Memory.read_bytes_write_bytes_same (by omega)] /-- info: 'MaxTandem.program.stepi_0x8b4_cut' depends on axioms: [propext, Classical.choice, Lean.ofReduceBool, Quot.sound] @@ -483,6 +491,7 @@ theorem program.stepi_0x8bc_cut (s sn : ArmState) simp only [pcs, List.mem_cons, BitVec.reduceEq, List.mem_singleton, or_self, or_false, or_true] simp only [List.not_mem_nil, or_self, or_false, or_true] simp only [not_false_eq_true] + simp only [Memory.State.read_mem_bytes_eq_mem_read_bytes, and_self] /-- info: 'MaxTandem.program.stepi_0x8bc_cut' depends on axioms: [propext, Classical.choice, Quot.sound] @@ -506,6 +515,8 @@ theorem program.stepi_0x8c0_cut (s sn : ArmState) have := program.stepi_eq_0x8c0 h_program h_pc h_err simp only [minimal_theory] at this simp_all only [run, cut, this, state_simp_rules, bitvec_rules, minimal_theory] + simp only [Memory.write_mem_bytes_eq_mem_write_bytes] + rw [Memory.read_bytes_write_bytes_same (by omega)] simp [pcs] /-- @@ -532,6 +543,7 @@ theorem program.stepi_0x8c4_cut (s sn : ArmState) have := program.stepi_eq_0x8c4 h_program h_pc h_err simp only [minimal_theory] at this simp_all only [run, cut, this, state_simp_rules, bitvec_rules, minimal_theory] + simp only [Memory.State.read_mem_bytes_eq_mem_read_bytes] simp [pcs] /-- @@ -632,7 +644,8 @@ theorem partial_correctness : replace h_s2_sp : s2.sp = (s0.sp - 32#64) := by simp_all replace h_s2_x0 : s2.x0 = s0.x0 := by simp_all replace h_s2_x1 : s2.x1 = s0.x1 := by simp_all - replace h_s2_read_sp12 : read_mem_bytes 4 (s2.sp + 12#64) s2 = BitVec.truncate 32 s0.x0 := by simp_all + replace h_s2_read_sp12 : s2.mem.read_bytes 4 (s2.sp + 12#64) = BitVec.truncate 32 s0.x0 := by + simp_all clear_named [h_s1] -- 3/15 @@ -645,8 +658,8 @@ theorem partial_correctness : replace _h_s3_x1 : s3.x1 = s0.x1 := by simp_all replace h_s3_sp : s3.sp = s0.sp - 32 := by simp_all /- TODO: this should be s0.x0-/ - replace h_s3_read_sp12 : read_mem_bytes 4 (s3.sp + 12#64) s3 = BitVec.truncate 32 s0.x0 := by simp_all - replace _h_s3_read_sp8 : read_mem_bytes 4 (s3.sp + 8#64) s3 = BitVec.truncate 32 s0.x1 := by simp_all + replace h_s3_read_sp12 : s3.mem.read_bytes 4 (s3.sp + 12#64) = BitVec.truncate 32 s0.x0 := by simp_all + replace _h_s3_read_sp8 : s3.mem.read_bytes 4 (s3.sp + 8#64) = BitVec.truncate 32 s0.x1 := by simp_all clear_named [h_s2] -- 4/15 @@ -659,8 +672,8 @@ theorem partial_correctness : replace _h_s4_x0 : s4.x0 = s0.x0 := by simp_all replace h_s4_x1 : s4.x1 = BitVec.zeroExtend 64 (BitVec.truncate 32 s0.x0) := by simp_all replace h_s4_sp : s4.sp = s0.sp - 32 := by simp_all - replace h_s4_read_sp12 : read_mem_bytes 4 (s4.sp + 12#64) s4 = BitVec.truncate 32 s0.x0 := by simp_all - replace _h_s4_read_sp8 : read_mem_bytes 4 (s4.sp + 8#64) s4 = BitVec.truncate 32 s0.x1 := by simp_all + replace h_s4_read_sp12 : s4.mem.read_bytes 4 (s4.sp + 12#64) = BitVec.truncate 32 s0.x0 := by simp_all + replace _h_s4_read_sp8 : s4.mem.read_bytes 4 (s4.sp + 8#64) = BitVec.truncate 32 s0.x1 := by simp_all clear_named [h_s3] -- 5/15 @@ -673,8 +686,8 @@ theorem partial_correctness : replace h_s5_x0 : s5.x0 = BitVec.zeroExtend 64 (BitVec.truncate 32 s0.x1) := by simp_all replace h_s5_x1 : s5.x1 = BitVec.zeroExtend 64 (BitVec.truncate 32 s0.x0) := by simp_all replace h_s5_sp : s5.sp = s0.sp - 32 := by simp_all - replace h_s5_read_sp12 : read_mem_bytes 4 (s5.sp + 12#64) s5 = BitVec.truncate 32 s0.x0 := by simp_all - replace _h_s5_read_sp8 : read_mem_bytes 4 (s5.sp + 8#64) s5 = BitVec.truncate 32 s0.x1 := by simp_all + replace h_s5_read_sp12 : s5.mem.read_bytes 4 (s5.sp + 12#64) = BitVec.truncate 32 s0.x0 := by simp_all + replace _h_s5_read_sp8 : s5.mem.read_bytes 4 (s5.sp + 8#64) = BitVec.truncate 32 s0.x1 := by simp_all clear_named [h_s4] -- 6/15 @@ -687,8 +700,8 @@ theorem partial_correctness : replace h_s6_x0 : s6.x0 = BitVec.zeroExtend 64 (BitVec.truncate 32 s0.x1) := by simp_all replace h_s6_x1 : s6.x1 = BitVec.zeroExtend 64 (BitVec.truncate 32 s0.x0) := by simp_all replace h_s6_sp : s6.sp = s0.sp - 32 := by simp_all - replace h_s6_read_sp12 : read_mem_bytes 4 (s6.sp + 12#64) s6 = BitVec.truncate 32 s0.x0 := by simp_all - replace _h_s6_read_sp8 : read_mem_bytes 4 (s6.sp + 8#64) s6 = BitVec.truncate 32 s0.x1 := by simp_all + replace h_s6_read_sp12 : s6.mem.read_bytes 4 (s6.sp + 12#64) = BitVec.truncate 32 s0.x0 := by simp_all + replace _h_s6_read_sp8 : s6.mem.read_bytes 4 (s6.sp + 8#64) = BitVec.truncate 32 s0.x1 := by simp_all replace h_s6_c : s6.C = (AddWithCarry (s0.x0.zeroExtend 32) (~~~s0.x1.zeroExtend 32) 1#1).snd.c := by simp_all replace h_s6_n : s6.N = (AddWithCarry (s0.x0.zeroExtend 32) (~~~s0.x1.zeroExtend 32) 1#1).snd.n := by simp_all replace h_s6_v : s6.V = (AddWithCarry (s0.x0.zeroExtend 32) (~~~s0.x1.zeroExtend 32) 1#1).snd.v := by simp_all @@ -705,8 +718,8 @@ theorem partial_correctness : replace h_s7_x0 : s7.x0 = BitVec.zeroExtend 64 (BitVec.truncate 32 s0.x1) := by simp_all replace h_s7_x1 : s7.x1 = BitVec.zeroExtend 64 (BitVec.truncate 32 s0.x0) := by simp_all replace h_s7_sp : s7.sp = s0.sp - 32 := by simp_all - replace h_s7_read_sp12 : read_mem_bytes 4 ((s0.sp - 32#64) + 12#64) s7 = BitVec.truncate 32 s0.x0 := by simp_all - replace h_s7_read_sp8 : read_mem_bytes 4 ((s0.sp - 32#64) + 8#64) s7 = BitVec.truncate 32 s0.x1 := by simp_all + replace h_s7_read_sp12 : s7.mem.read_bytes 4 ((s0.sp - 32#64) + 12#64) = BitVec.truncate 32 s0.x0 := by simp_all + replace h_s7_read_sp8 : s7.mem.read_bytes 4 ((s0.sp - 32#64) + 8#64) = BitVec.truncate 32 s0.x1 := by simp_all have h_s7_s6_c := h_s6_c have h_s7_s6_n := h_s6_n have h_s7_s6_v := h_s6_v @@ -760,7 +773,7 @@ theorem partial_correctness : obtain ⟨h_s3_cut, h_s3_pc, h_s3_err, h_s3_program, h_s3_x0, h_s3_sp_28, h_s3_sp, h_s3_sp_aliged⟩ := h rw [Correctness.snd_cassert_of_not_cut h_s3_cut]; -- try rw [Correctness.snd_cassert_of_cut h_cut]; simp [show Sys.next _ = run 1 _ by rfl] - replace h_s3_sp_28 : read_mem_bytes 4 (s3.sp + 28#64) s3 = BitVec.zeroExtend 32 (spec s0.x0 s0.x1) := by simp_all + replace h_s3_sp_28 : s3.mem.read_bytes 4 (s3.sp + 28#64) = BitVec.zeroExtend 32 (spec s0.x0 s0.x1) := by simp_all replace h_s3_sp : s3.sp = s0.sp - 32#64 := by simp_all clear_named [h_s2, h_s1] @@ -770,7 +783,7 @@ theorem partial_correctness : rw [Correctness.snd_cassert_of_not_cut (si := s4) (by simp_all [Spec'.cut])]; simp [show Sys.next _ = run 1 _ by rfl] have h_s4_sp : s4.sp = s0.sp - 32#64 := by simp_all - have h_s4_sp_28 : read_mem_bytes 4 (s4.sp + 28#64) s4 = BitVec.zeroExtend 32 (spec s0.x0 s0.x1) := by simp_all + have h_s4_sp_28 : s4.mem.read_bytes 4 (s4.sp + 28#64) = BitVec.zeroExtend 32 (spec s0.x0 s0.x1) := by simp_all clear_named [h_s3] -- 5/15 @@ -778,7 +791,7 @@ theorem partial_correctness : obtain h_s5 := program.stepi_0x8c4_cut s4 s5 (by simp_all) (by simp_all) (by simp_all) (by simp_all) (h_run.symm) rw [Correctness.snd_cassert_of_not_cut (si := s5) (by simp_all [Spec'.cut])]; have h_s5_x0 : s5.x0 = BitVec.zeroExtend 64 (BitVec.zeroExtend 32 (spec s0.x0 s0.x1)) := by - simp only [show s5.x0 = BitVec.zeroExtend 64 (read_mem_bytes 4 (s5.sp + 28#64) s5) by simp_all] + simp only [show s5.x0 = BitVec.zeroExtend 64 (s5.mem.read_bytes 4 (s5.sp + 28#64)) by simp_all] simp only [Nat.reduceMul] /- Damn, that the rewrite system is not confluent really messes me up over here ;_; `simp` winds up rewriting `s5.sp` into `s4.sp` first because of the rule, and @@ -786,10 +799,11 @@ theorem partial_correctness : One might say that this entire proof is stupid, but really, I 'just' want it to build an e-graph and figure it out. -/ - have : (read_mem_bytes 4 (s5.sp + 28#64) s5) = read_mem_bytes 4 (s4.sp + 28#64) s4 := by + have : (s5.mem.read_bytes 4 (s5.sp + 28#64)) = read_mem_bytes 4 (s4.sp + 28#64) s4 := by obtain ⟨_, _, _, _, _, _, h, _⟩ := h_s5 exact h - simp [this] + simp only [this, Memory.State.read_mem_bytes_eq_mem_read_bytes, + BitVec.truncate_eq_setWidth] rw [h_s4_sp_28] simp [show Sys.next _ = run 1 _ by rfl] @@ -822,7 +836,7 @@ theorem partial_correctness : obtain ⟨h_s3_cut, h_s3_pc, h_s3_err, h_s3_program, h_s3_x0, h_s3_sp, h_s3_sp_aliged⟩ := h rw [Correctness.snd_cassert_of_not_cut h_s3_cut]; -- try rw [Correctness.snd_cassert_of_cut h_cut]; simp [show Sys.next _ = run 1 _ by rfl] - replace h_s3_sp_28 : read_mem_bytes 4 (s3.sp + 28#64) s3 = BitVec.zeroExtend 32 (spec s0.x0 s0.x1) := by simp_all + replace h_s3_sp_28 : s3.mem.read_bytes 4 (s3.sp + 28#64) = BitVec.zeroExtend 32 (spec s0.x0 s0.x1) := by simp_all replace h_s3_sp : s3.sp = s0.sp - 32#64 := by simp_all clear_named [h_s2, h_s1] @@ -832,7 +846,7 @@ theorem partial_correctness : rw [Correctness.snd_cassert_of_not_cut (si := s4) (by simp_all [Spec'.cut])]; simp [show Sys.next _ = run 1 _ by rfl] have h_s4_sp : s4.sp = s0.sp - 32#64 := by simp_all - have h_s4_sp_28 : read_mem_bytes 4 (s4.sp + 28#64) s4 = BitVec.zeroExtend 32 (spec s0.x0 s0.x1) := by simp_all + have h_s4_sp_28 : s4.mem.read_bytes 4 (s4.sp + 28#64) = BitVec.zeroExtend 32 (spec s0.x0 s0.x1) := by simp_all clear_named [h_s3] -- 6/15 diff --git a/Proofs/Experiments/Memcpy/MemCpyVCG.lean b/Proofs/Experiments/Memcpy/MemCpyVCG.lean index e12f2b2b..b4b001f0 100644 --- a/Proofs/Experiments/Memcpy/MemCpyVCG.lean +++ b/Proofs/Experiments/Memcpy/MemCpyVCG.lean @@ -270,8 +270,10 @@ theorem program.step_8e4_8e8_of_wellformed_of_stepped (scur snext : ArmState) have := program.stepi_eq_0x8e4 h_program h_pc h_err obtain ⟨h_step⟩ := hstep subst h_step - constructor <;> simp only [*, cut, state_simp_rules, minimal_theory, bitvec_rules] - · constructor <;> simp [*, state_simp_rules, minimal_theory] + constructor + <;> simp only [*, cut, state_simp_rules, minimal_theory, bitvec_rules, + memory_rules] + · constructor <;> simp [*, state_simp_rules, minimal_theory, memory_rules] -- 3/7 (0x8e8#64, 0x3c810444#32), /- str q4, [x2], #16 -/ structure Step_8e8_8ec (scur : ArmState) (snext : ArmState) extends WellFormedAtPc snext 0x8ec : Prop where @@ -464,7 +466,7 @@ theorem Memcpy.extracted_2 (s0 si : ArmState) (Memory.write_bytes 16 (s0.x2 + 0x10#64 * (s0.x0 - si.x0)) (Memory.read_bytes 16 (s0.x1 + 0x10#64 * (s0.x0 - si.x0)) si.mem) si.mem) = Memory.read_bytes n addr s0.mem := by - have h_le : (s0.x0 - (si.x0 - 0x1#64)).toNat ≤ s0.x0.toNat := by bv_omega + have h_le : (s0.x0 - (si.x0 - 0x1#64)).toNat ≤ s0.x0.toNat := by bv_omega_bench have h_upper_bound := hsep.hb.omega_def have h_upper_bound₂ := h_pre_1.hb.omega_def have h_upper_bound₃ := hsep.ha.omega_def @@ -476,7 +478,7 @@ theorem Memcpy.extracted_2 (s0 si : ArmState) apply mem_separate'.symm apply mem_separate'.of_subset'_of_subset' hsep · apply mem_subset'.of_omega - skip_proof refine ⟨?_, ?_, ?_, ?_⟩ <;> skip_proof bv_omega + skip_proof refine ⟨?_, ?_, ?_, ?_⟩ <;> skip_proof bv_omega_bench · apply mem_subset'_refl hsep.hb -- set_option skip_proof.skip true in @@ -519,9 +521,9 @@ theorem Memcpy.extracted_0 (s0 si : ArmState) skip_proof simp_mem have h_subset_1 : mem_subset' (s0.x1 + 0x10#64 * (s0.x0 - si.x0)) 16 s0.x1 (s0.x0.toNat * 16) := by skip_proof simp_mem - have icases : i = s0.x0 - si.x0 ∨ i < s0.x0 - si.x0 := by skip_proof bv_omega + have icases : i = s0.x0 - si.x0 ∨ i < s0.x0 - si.x0 := by skip_proof bv_omega_bench have s2_sum_inbounds := h_pre_1.hb.omega_def - have i_sub_x0_mul_16 : 16 * i.toNat < 16 * s0.x0.toNat := by skip_proof bv_omega + have i_sub_x0_mul_16 : 16 * i.toNat < 16 * s0.x0.toNat := by skip_proof bv_omega_bench rcases icases with hi | hi · subst hi @@ -541,13 +543,12 @@ theorem Memcpy.extracted_0 (s0 si : ArmState) -- proof states. skip_proof { have s2_sum_inbounds := h_pre_1.hb.omega_def - have i_sub_x0_mul_16 : 16 * i.toNat < 16 * s0.x0.toNat := by skip_proof bv_omega - rw [BitVec.toNat_add_eq_toNat_add_toNat (by bv_omega)] - rw [BitVec.toNat_add_eq_toNat_add_toNat (by bv_omega)] - rw [BitVec.toNat_mul_of_lt (by bv_omega)] - rw [BitVec.toNat_mul_of_lt (by bv_omega)] - rw [BitVec.toNat_sub_of_lt (by bv_omega)] - bv_omega + have i_sub_x0_mul_16 : 16 * i.toNat < 16 * s0.x0.toNat := by skip_proof bv_omega_bench + rw [BitVec.toNat_add_eq_toNat_add_toNat (by bv_omega_bench)] + rw [BitVec.toNat_add_eq_toNat_add_toNat (by bv_omega_bench)] + rw [BitVec.toNat_mul_of_lt (by bv_omega_bench)] + rw [BitVec.toNat_mul_of_lt (by bv_omega_bench)] + bv_omega_bench } · intros n addr hsep apply Memcpy.extracted_2 <;> assumption @@ -741,19 +742,19 @@ theorem partial_correctness : apply zero_iff_z_eq_one simp only [h_s5_z] - simp only [show s5.x0 ≤ s0.x0 by bv_omega, true_and] + simp only [show s5.x0 ≤ s0.x0 by bv_omega_bench, true_and] rw [h_s5_x0, h_s5_x1, h_si_x1] have h_s0_x1 : s0.x1 + 0x10#64 * (s0.x0 - si.x0) + 0x10#64 = s0.x1 + 0x10#64 * (s0.x0 - (si.x0 - 0x1#64)) := by - rw [show s0.x0 - (si.x0 - 0x1#64) = (s0.x0 - si.x0) + 0x1#64 by skip_proof bv_omega, + rw [show s0.x0 - (si.x0 - 0x1#64) = (s0.x0 - si.x0) + 0x1#64 by skip_proof bv_omega_bench, BitVec.BitVec.mul_add, BitVec.add_assoc, BitVec.mul_one] simp only [h_s0_x1, true_and] rw [h_s5_x2, h_si_x2] have h_s0_x2 : s0.x2 + 0x10#64 * (s0.x0 - si.x0) + 0x10#64 = s0.x2 + 0x10#64 * (s0.x0 - (si.x0 - 0x1#64)) := by - rw [show s0.x0 - (si.x0 - 0x1#64) = (s0.x0 - si.x0) + 0x1#64 by skip_proof bv_omega, + rw [show s0.x0 - (si.x0 - 0x1#64) = (s0.x0 - si.x0) + 0x1#64 by skip_proof bv_omega_bench, BitVec.BitVec.mul_add] - skip_proof bv_omega + skip_proof bv_omega_bench simp only [h_s0_x2, true_and] simp only [step_8f0_8f4.h_err, step_8f0_8f4.h_program, @@ -771,7 +772,7 @@ theorem partial_correctness : rw [step_8e4_8e8.h_q4] rw [h_si_x2] obtain ⟨h_assert_1, h_assert_2, h_assert_3, h_assert_4, h_assert_5, h_assert_6, h_assert_7⟩ := h_assert - simp only [memory_rules] + -- simp only [memory_rules] simp only [step_8f4_8e4.h_mem] simp only [step_8f4_8e4.h_x1] rw [h_si_x1] diff --git a/Proofs/Experiments/MemoryAliasing.lean b/Proofs/Experiments/MemoryAliasing.lean index a357172c..b7ac1b9c 100644 --- a/Proofs/Experiments/MemoryAliasing.lean +++ b/Proofs/Experiments/MemoryAliasing.lean @@ -12,6 +12,7 @@ import Arm.Memory.SeparateAutomation -- set_option trace.simp_mem true -- set_option trace.simp_mem.info true +-- set_option trace.Meta.Tactic.simp true namespace MemLegal /-- Show reflexivity of legality. -/ @@ -200,7 +201,7 @@ theorem mem_automation_test_4 simp only [memory_rules] simp_mem congr 1 - bv_omega -- TODO: address normalization. + bv_omega_bench -- TODO: address normalization. /-- info: 'mem_automation_test_4' depends on axioms: [propext, Classical.choice, Quot.sound] -/ #guard_msgs in #print axioms mem_automation_test_4 @@ -229,7 +230,7 @@ theorem overlapping_read_test_2 {out : BitVec (16 * 8)} simp_mem · congr -- ⊢ (src_addr + 6).toNat - src_addr.toNat = 6 - bv_omega + bv_omega_bench /-- info: 'ReadOverlappingRead.overlapping_read_test_2' depends on axioms: [propext, Classical.choice, Quot.sound] -/ @@ -248,16 +249,12 @@ theorem overlapping_read_test_3 simp_mem · congr -- ⊢ (src_addr + 6).toNat - src_addr.toNat = 6 - bv_omega + bv_omega_bench /-- info: 'ReadOverlappingRead.overlapping_read_test_3' depends on axioms: [propext, Classical.choice, Quot.sound] -/ #guard_msgs in #print axioms overlapping_read_test_3 -/- TODO(@bollu): This test case hangs at `bv_omega`. This is to be debugged. -/-- A read in the goal state overlaps with a read in the -right hand side of the hypotheis `h`. --/ theorem overlapping_read_test_4 (hlegal : mem_legal' src_addr 16) (h : read_mem_bytes 16 other_addr s = read_mem_bytes 16 src_addr s) : @@ -267,11 +264,11 @@ theorem overlapping_read_test_4 simp only [memory_rules] at h ⊢ simp_mem · congr - -- ⊢ (src_addr + 6).toNat - src_addr.toNat = 6 - bv_omega -- TODO: Lean gets stuck here? + bv_omega +/-- info: 'ReadOverlappingRead.overlapping_read_test_4' depends on axioms: [propext, Classical.choice, Quot.sound] -/ #guard_msgs in #print axioms overlapping_read_test_4 --/ + end ReadOverlappingRead namespace ReadOverlappingWrite @@ -290,7 +287,7 @@ theorem test_2 {val : BitVec _} Memory.read_bytes 6 (src_addr + 10) (Memory.write_bytes 16 src_addr val mem) = val.extractLsBytes 10 6 := by simp_mem - have : ((src_addr + 10).toNat - src_addr.toNat) = 10 := by bv_omega + have : ((src_addr + 10).toNat - src_addr.toNat) = 10 := by bv_omega_bench rw [this] /-- diff --git a/Proofs/Popcount32.lean b/Proofs/Popcount32.lean index 699e0147..d7857b79 100644 --- a/Proofs/Popcount32.lean +++ b/Proofs/Popcount32.lean @@ -70,7 +70,7 @@ def popcount32_program : Program := #genStepEqTheorems popcount32_program -set_option trace.simp_mem.info true in +-- set_option trace.simp_mem.info true in theorem popcount32_sym_meets_spec (s0 sf : ArmState) (h_s0_pc : read_pc s0 = 0x4005b4#64) (h_s0_program : s0.program = popcount32_program) diff --git a/Proofs/SHA512/SHA512Loop.lean b/Proofs/SHA512/SHA512Loop.lean index 2a9f715d..d4d8d031 100644 --- a/Proofs/SHA512/SHA512Loop.lean +++ b/Proofs/SHA512/SHA512Loop.lean @@ -4,6 +4,8 @@ Released under Apache 2.0 license as described in the file LICENSE. Author(s): Shilpi Goel -/ import Proofs.SHA512.SHA512Prelude +import Proofs.SHA512.SHA512_block_armv8_rules +import Arm.Memory.AddressNormalization open BitVec /-! ## Reasoning about the SHA512 loop @@ -14,110 +16,316 @@ is satisfied. namespace SHA512 -/-- -Vector instruction `REV64` that reverses the order of 16-byte elements in each -64-bit slice of the 128-bit input. - -Ref.: -https://developer.arm.com/documentation/ddi0602/2024-06/SIMD-FP-Instructions/REV64--Reverse-elements-in-64-bit-doublew --/ -def vrev64_16 (x : BitVec 128) : BitVec 128 := - rev_vector 128 64 16 x - (by decide) (by decide) (by decide) - (by decide) (by decide) - -/-- -Loop postcondition when exactly one block needs to be hashed. --/ -def loop_post (PC N SP CtxBase InputBase : BitVec 64) - (si : ArmState) : Prop := - -- TODO: Write a better spec. function. - -- let spec_digest := 0#512 - -- let impl_digest := - -- r (.SFP 3#5) si ++ r (.SFP 2#5) si ++ - -- r (.SFP 1#5) si ++ r (.SFP 0#5) si - -- All blocks must be hashed. - num_blocks si = 0 ∧ - si.program = program ∧ - r .PC si = PC ∧ - r .ERR si = .None ∧ - CheckSPAlignment si ∧ - ctx_addr si = CtxBase ∧ - stack_ptr si = SP - 16#64 ∧ - si[KtblAddr, (SHA2.k_512.length * 8)] = BitVec.flatten SHA2.k_512 ∧ - -- (TODO @alex @bollu Uncomment, please, for stress-testing) --- Memory.Region.pairwiseSeparate --- [(SP - 16#64, 16), --- (CtxBase, 64), --- (InputBase, (N.toNat * 128)), --- (KtblAddr, (SHA2.k_512.length * 8))] ∧ - r (.GPR 3#5) si = KtblAddr ∧ - input_addr si = InputBase + (N * 128#64) ∧ - -- Registers contain the last processed input block. - r (.SFP 16#5) si = vrev64_16 (si[input_addr si - (128#64 - (16#64 * 0)), 16]) ∧ - r (.SFP 17#5) si = vrev64_16 (si[input_addr si - (128#64 - (16#64 * 1)), 16]) ∧ - r (.SFP 18#5) si = vrev64_16 (si[input_addr si - (128#64 - (16#64 * 2)), 16]) ∧ - r (.SFP 19#5) si = vrev64_16 (si[input_addr si - (128#64 - (16#64 * 3)), 16]) ∧ - r (.SFP 20#5) si = vrev64_16 (si[input_addr si - (128#64 - (16#64 * 4)), 16]) ∧ - r (.SFP 21#5) si = vrev64_16 (si[input_addr si - (128#64 - (16#64 * 5)), 16]) ∧ - r (.SFP 22#5) si = vrev64_16 (si[input_addr si - (128#64 - (16#64 * 6)), 16]) ∧ - r (.SFP 23#5) si = vrev64_16 (si[input_addr si - (128#64 - (16#64 * 7)), 16]) - -- spec_digest = impl_digest -- TODO +def loop_post_1 (PC N SP CtxBase InputBase : BitVec 64) + (si sf : ArmState) : Prop := + -- We subtract from `num_blocks` early on in the loop body. + num_blocks sf = 0#64 ∧ + sf.program = program ∧ + r .PC sf = PC ∧ + r .ERR sf = .None ∧ + CheckSPAlignment sf ∧ + input_addr sf = InputBase ∧ + ctx_addr sf = CtxBase ∧ + stack_ptr sf = SP - 16#64 ∧ + sf[KtblAddr, (SHA2.k_512.length * 8)] = BitVec.flatten SHA2.k_512 ∧ + sf[InputBase, (N.toNat * 128)] = si[InputBase, (N.toNat * 128)] ∧ + Memory.Region.pairwiseSeparate + [(SP - 16#64, 16), + (CtxBase, 64), + (InputBase, (N.toNat * 128)), + (KtblAddr, (SHA2.k_512.length * 8))] ∧ + r (.GPR 3#5) sf = KtblAddr + 16#64 ∧ + r (.SFP 26#5) sf = r (.SFP 0#5) si ∧ + r (.SFP 27#5) sf = r (.SFP 1#5) si ∧ + r (.SFP 28#5) sf = r (.SFP 2#5) si ∧ + r (.SFP 29#5) sf = r (.SFP 3#5) si ∧ + r (StateField.SFP 0x10#5) sf = DPSFP.vrev128_64_8 si[InputBase, 16] ∧ + r (StateField.SFP 0x11#5) sf = DPSFP.vrev128_64_8 si[(InputBase + 0x10#64), 16] ∧ + r (StateField.SFP 0x12#5) sf = DPSFP.vrev128_64_8 si[(InputBase + 0x20#64), 16] ∧ + r (StateField.SFP 0x13#5) sf = DPSFP.vrev128_64_8 si[(InputBase + 0x30#64), 16] ∧ + r (StateField.SFP 0x14#5) sf = DPSFP.vrev128_64_8 si[(InputBase + 0x40#64), 16] ∧ + r (StateField.SFP 0x15#5) sf = DPSFP.vrev128_64_8 si[(InputBase + 0x50#64), 16] ∧ + r (StateField.SFP 0x16#5) sf = DPSFP.vrev128_64_8 si[(InputBase + 0x60#64), 16] ∧ + r (StateField.SFP 0x17#5) sf = DPSFP.vrev128_64_8 si[(InputBase + 0x70#64), 16] ∧ + -- The following is true only when N = 1. + r (StateField.FLAG PFlag.Z) sf = 0x1#1 -/- TODO: Symbolically simulate (program.length - 16 - 3) = 485 instructions -here. We elide the 16 instructions before the loop and 3 instructions after it. -Note that this would involve automatically reasoning about the conditional -branch here: --- (0x126c90#64 , 0xb5ffc382#32) -- cbnz x2, 126500 --/ -set_option linter.unusedVariables false in -set_option debug.skipKernelTC true in --- set_option trace.Tactic.sym.heartbeats true in --- set_option profiler true in --- set_option profiler.threshold 1 in -set_option maxHeartbeats 0 in --- set_option maxRecDepth 8000 in -theorem sha512_block_armv8_loop_1block (si sf : ArmState) +theorem sha512_block_armv8_loop_1 (si sf : ArmState) (h_N : N = 1#64) (h_si_prelude : SHA512.prelude 0x126500#64 N SP CtxBase InputBase si) - -- TODO: Ideally, nsteps ought to be 485 to be able to simulate the loop to - -- completion. - (h_steps : nsteps = 400) + (h_steps : nsteps = 8) (h_run : sf = run nsteps si) : - -- (FIXME) PC should be 0x126c94#64 i.e., we are poised to execute the first - -- instruction following the loop. For now, we stop early on to remain in sync. - -- with the number of steps we simulate. - loop_post (0x126500#64 + nsteps*4) N SP CtxBase InputBase sf := by + loop_post_1 (0x126500#64 + nsteps*4) N SP CtxBase InputBase si sf := by -- Prelude subst h_N h_steps obtain ⟨h_si_program, h_si_pc, h_si_err, h_si_sp_aligned, - h_si_num_blocks, h_si_sp, h_si_ctx_base, - h_si_input_base, h_si_ctx, h_si_ktbl, h_si_separate⟩ := h_si_prelude + h_si_sp, h_si_num_blocks, h_si_ctx_base, + h_si_input_base, h_si_ktbl_base, + _h_si_ctx, h_si_ktbl, h_si_separate, + _h_si_q0, _h_si_q1, _h_si_q2, _h_si_q3, + h_si_16, h_si_17, h_si_18, h_si_19, + h_si_20, h_si_21, h_si_22, h_si_23⟩ := h_si_prelude simp only [num_blocks, ctx_addr, stack_ptr, input_addr] at * - simp only [loop_post] + simp only [loop_post_1] simp at h_si_separate -- Symbolic Simulation - /- - TODO @alex: The following aggregation fails with - "simp failed, maximum number of steps exceeded" - -/ - sym_n 100 - sym_n 100 - sym_n 100 - sym_n 100 - -- sym_aggregate + sym_n 8 + -- Epilogue + simp only [h_si_ktbl, h_si_separate, minimal_theory] + done + +------------------------------------------------------------------------------- + +def loop_post_2 (PC N SP CtxBase InputBase : BitVec 64) + (si sf : ArmState) : Prop := + -- We subtract from `num_blocks` early on in the loop body. + num_blocks sf = 0#64 ∧ + sf.program = program ∧ + r .PC sf = PC ∧ + r .ERR sf = .None ∧ + CheckSPAlignment sf ∧ + input_addr sf = InputBase ∧ + ctx_addr sf = CtxBase ∧ + stack_ptr sf = SP - 16#64 ∧ + sf[KtblAddr, (SHA2.k_512.length * 8)] = BitVec.flatten SHA2.k_512 ∧ + sf[InputBase, (N.toNat * 128)] = si[InputBase, (N.toNat * 128)] ∧ + Memory.Region.pairwiseSeparate + [(SP - 16#64, 16), + (CtxBase, 64), + (InputBase, (N.toNat * 128)), + (KtblAddr, (SHA2.k_512.length * 8))] ∧ + r (.GPR 3#5) sf = KtblAddr + 16#64 + 16#64 ∧ + r (.SFP 3#5) sf = + DPSFP.sha512h2 (r (StateField.SFP 0x1#5) si) (r (StateField.SFP 0x0#5) si) + (DPSFP.sha512h (extractLsb' 64 128 (r (StateField.SFP 0x3#5) si ++ r (StateField.SFP 0x2#5) si)) + (extractLsb' 64 128 (r (StateField.SFP 0x2#5) si ++ r (StateField.SFP 0x1#5) si)) + (DPSFP.binary_vector_op_aux 0 2 64 BitVec.add (r (StateField.SFP 0x3#5) si) + (extractLsb' 64 128 + (DPSFP.binary_vector_op_aux 0 2 64 BitVec.add (r (StateField.SFP 0x18#5) si) + (DPSFP.vrev128_64_8 (read_mem_bytes 16 InputBase si)) 0x0#128 ++ + DPSFP.binary_vector_op_aux 0 2 64 BitVec.add (r (StateField.SFP 0x18#5) si) + (DPSFP.vrev128_64_8 (read_mem_bytes 16 InputBase si)) 0x0#128)) + 0x0#128)) ∧ + r (.SFP 4#5) sf = + DPSFP.binary_vector_op_aux 0 2 64 BitVec.add (r (StateField.SFP 0x1#5) si) + (DPSFP.sha512h (extractLsb' 64 128 (r (StateField.SFP 0x3#5) si ++ r (StateField.SFP 0x2#5) si)) + (extractLsb' 64 128 (r (StateField.SFP 0x2#5) si ++ r (StateField.SFP 0x1#5) si)) + (DPSFP.binary_vector_op_aux 0 2 64 BitVec.add (r (StateField.SFP 0x3#5) si) + (extractLsb' 64 128 + (DPSFP.binary_vector_op_aux 0 2 64 BitVec.add (r (StateField.SFP 0x18#5) si) + (DPSFP.vrev128_64_8 (read_mem_bytes 16 InputBase si)) 0x0#128 ++ + DPSFP.binary_vector_op_aux 0 2 64 BitVec.add (r (StateField.SFP 0x18#5) si) + (DPSFP.vrev128_64_8 (read_mem_bytes 16 InputBase si)) 0x0#128)) + 0x0#128)) + 0x0#128 ∧ + r (.SFP 5#5) sf = + extractLsb' 64 128 (r (StateField.SFP 0x3#5) si ++ r (StateField.SFP 0x2#5) si) ∧ + r (.SFP 6#5) sf = + extractLsb' 64 128 (r (StateField.SFP 0x2#5) si ++ r (StateField.SFP 0x1#5) si) ∧ + r (.SFP 7#5) sf = + extractLsb' 64 128 + (DPSFP.vrev128_64_8 (read_mem_bytes 16 (InputBase + 0x50#64) si) ++ + DPSFP.vrev128_64_8 (read_mem_bytes 16 (InputBase + 0x40#64) si)) ∧ + r (.SFP 16#5) sf = + DPSFP.sha512su1 (DPSFP.vrev128_64_8 (read_mem_bytes 16 (InputBase + 0x70#64) si)) + (extractLsb' 64 128 + (DPSFP.vrev128_64_8 (read_mem_bytes 16 (InputBase + 0x50#64) si) ++ + DPSFP.vrev128_64_8 (read_mem_bytes 16 (InputBase + 0x40#64) si))) + (DPSFP.sha512su0 (DPSFP.vrev128_64_8 (read_mem_bytes 16 (InputBase + 0x10#64) si)) + (DPSFP.vrev128_64_8 (read_mem_bytes 16 InputBase si))) ∧ + r (.SFP 24#5) sf = + extractLsb' 64 128 + (DPSFP.binary_vector_op_aux 0 2 64 BitVec.add (r (StateField.SFP 0x18#5) si) + (DPSFP.vrev128_64_8 (read_mem_bytes 16 InputBase si)) 0x0#128 ++ + DPSFP.binary_vector_op_aux 0 2 64 BitVec.add (r (StateField.SFP 0x18#5) si) + (DPSFP.vrev128_64_8 (read_mem_bytes 16 InputBase si)) 0x0#128) ∧ + r (.SFP 25#5) sf = + read_mem_bytes 16 (KtblAddr + 0x10#64) si +theorem extractLsBytes_ge (h : a ≥ n) (x : BitVec n) : + x.extractLsBytes a n = 0#(n*8) := by + apply BitVec.eq_of_getLsbD_eq + intros i + simp only [getLsbD_extractLsBytes, Fin.is_lt, decide_True, Bool.true_and, getLsbD_zero] + apply BitVec.getLsbD_ge + omega +theorem extractLsBytes_of_read_bytes_le (n m : Nat) + (h_legal : mem_legal' addr n) + (h_m_le_n : m ≤ n) : + (Memory.read_bytes n addr mem).extractLsBytes 0 m = + Memory.read_bytes m addr mem := by + apply BitVec.eq_of_getLsbD_eq; intro i + simp only [extractLsBytes, bitvec_rules] + simp only [getLsbD_extractLsb', minimal_theory, Fin.isLt] + have h_n_upper : n ≤ 2^64 := by + simp only [mem_legal', Nat.reducePow] at h_legal; omega + have h_m_upper : m ≤ 2^64 := by + simp [mem_legal'] at h_legal; omega + have h_i_upper : i < m * 8 := Fin.isLt i + have h_i_upper2 : i < n * 8 := by omega + have h_lhs := @Memory.getLsbD_read_bytes n i addr mem h_n_upper + have h_rhs := @Memory.getLsbD_read_bytes m i addr mem h_m_upper + simp_all only + done + +/-- +info: #[RegType.SFP 0x03#5, RegType.SFP 0x04#5, RegType.SFP 0x05#5, RegType.SFP 0x06#5, RegType.SFP 0x07#5, + RegType.SFP 0x10#5, RegType.SFP 0x18#5, RegType.SFP 0x19#5] +-/ +#guard_msgs in +#eval ((Cfg.create' (0x126500#64 + 8*4) (0x126500#64 + 8*4 + 12*4) SHA512.program).toOption.get!).maybe_modified_regs + +theorem sha512_block_armv8_loop_2 (sprev si sf : ArmState) + (h_N : N = 1#64) + (h_si_prelude : loop_post_1 (0x126500#64 + 8*4) N SP CtxBase InputBase sprev si) + (h_steps : nsteps = 12) + (h_run : sf = run nsteps si) : + loop_post_2 (0x126500#64 + 8*4 + nsteps*4) N SP CtxBase InputBase + si sf := by + -- Prelude + subst h_N h_steps + simp at h_si_prelude ⊢ + obtain ⟨h_si_num_blocks, h_si_program, h_si_pc, h_si_err, + h_si_sp_aligned, h_si_input_base, h_si_ctx_base, + h_si_sp, h_si_ktbl, keep_h_si_input, + keep_h_si_separate, h_si_ktbl_base, + _h_si_q0, _h_si_q1, _h_si_q2, _h_si_q3, + h_si_16, h_si_17, h_si_18, h_si_19, + h_si_20, h_si_21, h_si_22, h_si_23, h_si_zf⟩ := h_si_prelude + simp only [input_addr, ctx_addr] at * + simp only [BitVec.reduceToNat, Nat.reduceMul] at keep_h_si_input + simp only [loop_post_2] + -- Symbolic Simulation + sym_n 12 -- Epilogue - -- cse (config := { processHyps := .allHyps }) - -- simp (config := {ground := true}) only - -- [fst_AddWithCarry_eq_sub_neg, - -- ConditionHolds, - -- state_simp_rules, - -- bitvec_rules, minimal_theory] - -- sym_aggregate - -- assumption - -- done - sorry + simp only [←Memory.mem_eq_iff_read_mem_bytes_eq] at * + simp only [memory_rules] at * + simp at keep_h_si_separate + simp only [h_si_ktbl, keep_h_si_separate, minimal_theory] + have h_si_input_1 : (Memory.read_bytes 16 InputBase sprev.mem) = + (Memory.read_bytes 16 InputBase si.mem) := by + clear_named [h_s, h_run, step, _h] + -- simp_mem (config := {useOmegaToClose := true}) + rw [@Memory.read_bytes_eq_extractLsBytes_sub_of_mem_subset' + 128 InputBase (Memory.read_bytes 128 InputBase sprev.mem)] + · simp only [Nat.reduceMul, Nat.sub_self, bitvec_rules] + -- (FIXME) Need a theorem about `extractLsBytes_of_read_bytes` to simplify + -- terms like + -- `(Memory.read_bytes 128 InputBase sprev.mem).extractLsBytes 0 16` + sorry + · rfl + · simp_mem + have h_si_input_2 : Memory.read_bytes 16 (InputBase + 0x10#64) sprev.mem = + Memory.read_bytes 16 (InputBase + 0x10#64) si.mem := by + sorry + have h_si_input_3 : Memory.read_bytes 16 (InputBase + 0x40#64) sprev.mem = + Memory.read_bytes 16 (InputBase + 0x40#64) si.mem := by + sorry + have h_si_input_4 : Memory.read_bytes 16 (InputBase + 0x50#64) sprev.mem = + Memory.read_bytes 16 (InputBase + 0x50#64) si.mem := by + sorry + have h_si_input_5 : Memory.read_bytes 16 (InputBase + 0x70#64) sprev.mem = + Memory.read_bytes 16 (InputBase + 0x70#64) si.mem := by + sorry + simp only [h_si_input_1, h_si_input_2, h_si_input_3, + h_si_input_4, h_si_input_5, + minimal_theory] + done +------------------------------------------------------------------------------- +--/-- +--Loop postcondition when exactly one block needs to be hashed. +---/ +---- def loop_post (PC N SP CtxBase InputBase : BitVec 64) +-- -- (si : ArmState) : Prop := +-- TODO: Write a better spec. function. +-- let spec_digest := 0#512 +-- let impl_digest := +-- r (.SFP 3#5) si ++ r (.SFP 2#5) si ++ +-- r (.SFP 1#5) si ++ r (.SFP 0#5) si +-- All blocks must be hashed. +-- -- num_blocks si = 0#64 ∧ +-- -- si.program = program ∧ +-- -- r .PC si = PC ∧ +-- -- r .ERR si = .None ∧ +-- -- CheckSPAlignment si ∧ +-- -- ctx_addr si = CtxBase ∧ +-- -- stack_ptr si = SP - 16#64 ∧ +-- -- si[KtblAddr, (SHA2.k_512.length * 8)] = BitVec.flatten SHA2.k_512 ∧ +-- (TODO @alex @bollu Uncomment, please, for stress-testing) +-- Memory.Region.pairwiseSeparate +-- [(SP - 16#64, 16), +-- (CtxBase, 64), +-- (InputBase, (N.toNat * 128)), +-- (KtblAddr, (SHA2.k_512.length * 8))] ∧ +-- -- r (.GPR 3#5) si = KtblAddr ∧ +-- -- input_addr si = InputBase + (N * 128#64) ∧ +-- Registers contain the last processed input block. +-- -- r (.SFP 16#5) si = vrev64_16 (si[input_addr si - (128#64 - (16#64 * 0)), 16]) ∧ +-- -- r (.SFP 17#5) si = vrev64_16 (si[input_addr si - (128#64 - (16#64 * 1)), 16]) ∧ +-- -- r (.SFP 18#5) si = vrev64_16 (si[input_addr si - (128#64 - (16#64 * 2)), 16]) ∧ +-- -- r (.SFP 19#5) si = vrev64_16 (si[input_addr si - (128#64 - (16#64 * 3)), 16]) ∧ +-- -- r (.SFP 20#5) si = vrev64_16 (si[input_addr si - (128#64 - (16#64 * 4)), 16]) ∧ +-- -- r (.SFP 21#5) si = vrev64_16 (si[input_addr si - (128#64 - (16#64 * 5)), 16]) ∧ +-- -- r (.SFP 22#5) si = vrev64_16 (si[input_addr si - (128#64 - (16#64 * 6)), 16]) ∧ +-- -- r (.SFP 23#5) si = vrev64_16 (si[input_addr si - (128#64 - (16#64 * 7)), 16]) +-- spec_digest = impl_digest -- TODO +---- +---- /- TODO: Symbolically simulate (program.length - 16 - 3) = 485 instructions +---- here. We elide the 16 instructions before the loop and 3 instructions after it. +---- Note that this would involve automatically reasoning about the conditional +---- branch here: +--(0x126c90#64 , 0xb5ffc382#32) -- cbnz x2, 126500 +---- -/ +---- set_option linter.unusedVariables false in +---- set_option debug.skipKernelTC true in +--set_option trace.Tactic.sym.heartbeats true in +--set_option profiler true in +--set_option profiler.threshold 1 in +---- set_option maxHeartbeats 0 in +--set_option maxRecDepth 8000 in +---- theorem sha512_block_armv8_loop_1block (si sf : ArmState) +-- -- (h_N : N = 1#64) +-- -- (h_si_prelude : SHA512.prelude 0x126500#64 N SP CtxBase InputBase si) +-- TODO: Ideally, nsteps ought to be 485 to be able to simulate the loop to +-- completion. +-- -- (h_steps : nsteps = 400) +-- -- (h_run : sf = run nsteps si) : +-- (FIXME) PC should be 0x126c94#64 i.e., we are poised to execute the first +-- instruction following the loop. For now, we stop early on to remain in sync. +-- with the number of steps we simulate. +-- -- loop_post (0x126500#64 + nsteps*4) N SP CtxBase InputBase sf := by +-- Prelude +-- -- subst h_N h_steps +-- -- obtain ⟨h_si_program, h_si_pc, h_si_err, h_si_sp_aligned, +-- -- h_si_num_blocks, h_si_sp, h_si_ctx_base, +-- -- h_si_input_base, h_si_ctx, h_si_ktbl, h_si_separate⟩ := h_si_prelude +-- -- simp only [num_blocks, ctx_addr, stack_ptr, input_addr] at * +-- -- simp only [loop_post] +-- -- simp at h_si_separate +-- Symbolic Simulation +-- -- /- +-- -- TODO @alex: The following aggregation fails with +-- -- "simp failed, maximum number of steps exceeded" +-- -- -/ +-- -- sym_n 100 +-- -- sym_n 100 +-- -- sym_n 100 +-- -- sym_n 100 +-- sym_aggregate +---- +---- +-- Epilogue +-- cse (config := { processHyps := .allHyps }) +-- simp (config := {ground := true}) only +-- [fst_AddWithCarry_eq_sub_neg, +-- ConditionHolds, +-- state_simp_rules, +-- bitvec_rules, minimal_theory] +-- sym_aggregate +-- assumption +-- done +-- -- sorry +-- end SHA512 diff --git a/Proofs/SHA512/SHA512Prelude.lean b/Proofs/SHA512/SHA512Prelude.lean index 9013de0c..25434718 100644 --- a/Proofs/SHA512/SHA512Prelude.lean +++ b/Proofs/SHA512/SHA512Prelude.lean @@ -68,7 +68,8 @@ def precondition Invariant that must hold after SHA512's first basic block is simulated, i.e., the basic block immediately preceding the loop. -/ -def prelude (PC N SP CtxBase InputBase : BitVec 64) (si : ArmState) : Prop := +def prelude (PC N SP CtxBase InputBase : BitVec 64) + (si : ArmState) : Prop := si.program = program ∧ r .PC si = PC ∧ r .ERR si = .None ∧ @@ -77,13 +78,26 @@ def prelude (PC N SP CtxBase InputBase : BitVec 64) (si : ArmState) : Prop := num_blocks si = N ∧ ctx_addr si = CtxBase ∧ input_addr si = InputBase + 128#64 ∧ + r (.GPR 3#5) si = KtblAddr ∧ si[CtxBase, 64] = SHA2.h0_512.toBitVec ∧ si[KtblAddr, (SHA2.k_512.length * 8)] = BitVec.flatten SHA2.k_512 ∧ Memory.Region.pairwiseSeparate [(SP - 16#64, 16), (CtxBase, 64), (InputBase, (N.toNat * 128)), - (KtblAddr, (SHA2.k_512.length * 8))] + (KtblAddr, (SHA2.k_512.length * 8))] ∧ + r (.SFP 0#5) si = si[CtxBase, 16] ∧ + r (.SFP 1#5) si = si[CtxBase + 16#64, 16] ∧ + r (.SFP 2#5) si = si[CtxBase + 32#64, 16] ∧ + r (.SFP 3#5) si = si[CtxBase + 48#64, 16] ∧ + r (.SFP 16#5) si = DPSFP.vrev128_64_8 si[InputBase, 16] ∧ + r (.SFP 17#5) si = DPSFP.vrev128_64_8 si[InputBase + 16#64, 16] ∧ + r (.SFP 18#5) si = DPSFP.vrev128_64_8 si[InputBase + 32#64, 16] ∧ + r (.SFP 19#5) si = DPSFP.vrev128_64_8 si[InputBase + 48#64, 16] ∧ + r (.SFP 20#5) si = DPSFP.vrev128_64_8 si[InputBase + 64#64, 16] ∧ + r (.SFP 21#5) si = DPSFP.vrev128_64_8 si[InputBase + 80#64, 16] ∧ + r (.SFP 22#5) si = DPSFP.vrev128_64_8 si[InputBase + 96#64, 16] ∧ + r (.SFP 23#5) si = DPSFP.vrev128_64_8 si[InputBase + 112#64, 16] -- private theorem add_eq_sub_16 (x : BitVec 64) : @@ -141,9 +155,8 @@ theorem sha512_block_armv8_prelude (s0 sf : ArmState) -- cse (config := { processHyps := .allHyps }) simp only [SHA512.prelude, bitvec_rules, minimal_theory] -- Opening up `prelude`: - -- (FIXME @alex) Why does `s16.program = program` remain even after aggregation? sym_aggregate - simp only [h_s16_program, ←add_eq_sub_16, minimal_theory] + simp only [←add_eq_sub_16, minimal_theory] -- The following discharges -- InputBase + 0x40#64 + 0x40#64 = -- InputBase + 0x80#64 @@ -153,7 +166,6 @@ theorem sha512_block_armv8_prelude (s0 sf : ArmState) -- Only memory-related obligations are left. -- (TODO @alex/@bollu) Remove ∀ in memory (non)effect hyps generated by -- `sym_n`. The user may still state memory properties using quantifiers. - simp only [←Memory.mem_eq_iff_read_mem_bytes_eq] at * -- Rewrite *_mem_bytes (in terms of ArmState) to *_bytes (in terms of Memory). simp only [memory_rules] at * -- (FIXME) Need to aggregate memory effects here automatically. @@ -197,9 +209,8 @@ theorem sha512_block_armv8_prelude (s0 sf : ArmState) ((SP + 0xfffffffffffffff0#64), 16) simp at this simp only [h_s0_sp, this] - · simp only [h_s0_sp, h_s0_num_blocks, h_s0_input_base, h_s0_ctx_base, - h_s0_mem_sep, - BitVec.add_assoc, bitvec_rules, minimal_theory] + · simp only [h_s0_mem_sep, minimal_theory] + done · intro n addr h simp only [←h_s0_sp] at h clear_named [h_, stepi] diff --git a/Proofs/SHA512/SHA512_block_armv8_rules.lean b/Proofs/SHA512/SHA512_block_armv8_rules.lean index 6d2375b1..de5fcb9c 100644 --- a/Proofs/SHA512/SHA512_block_armv8_rules.lean +++ b/Proofs/SHA512/SHA512_block_armv8_rules.lean @@ -8,6 +8,7 @@ import Specs.SHA512 import Std.Tactic.BVDecide set_option sat.timeout 60 +set_option bv.ac_nf false section sha512_block_armv8_rules @@ -27,8 +28,8 @@ theorem sha512_message_schedule_rule (a b c d : BitVec 128) : let d0 := extractLsb' 0 64 d message_schedule_word_aux a1 b1 c0 d1 ++ message_schedule_word_aux a0 b0 d1 d0 := by - simp [sha512su1, sha512su0, message_schedule_word_aux] - bv_check "lrat_files/Sha512_block_armv8_rules.lean-sha512_message_schedule_rule-31-2.lrat" + simp only [sha512su1, Nat.reduceAdd, sha512su0, message_schedule_word_aux] + bv_check "SHA512_block_armv8_rules.lean-sha512_message_schedule_rule-31-2.lrat" theorem sha512h2_rule (a b c : BitVec 128) : sha512h2 a b c = @@ -39,8 +40,8 @@ theorem sha512h2_rule (a b c : BitVec 128) : let c1 := extractLsb' 64 64 c ((compression_update_t2 b0 a0 b1) + c1) ++ ((compression_update_t2 ((compression_update_t2 b0 a0 b1) + c1) b0 b1) + c0) := by - simp [maj, compression_update_t2, sha512h2, sigma_big_0, ror] - bv_check "lrat_files/Sha512_block_armv8_rules.lean-sha512h2_rule-43-2.lrat" + simp only [sha512h2, Nat.reduceAdd, maj, sigma_big_0, ror, compression_update_t2] + bv_check "SHA512_block_armv8_rules.lean-sha512h2_rule-43-2.lrat" -- sha512h2 q3, q1, v0.2d: 0xce608423#32 -- theorem sha512h2_instruction_rewrite @@ -61,15 +62,15 @@ theorem sha512h2_rule (a b c : BitVec 128) : -- simp [sha512h2_rule] private theorem and_nop_lemma (x : BitVec 64) : - (zeroExtend 128 x) &&& 0xffffffffffffffff#128 = (zeroExtend 128 x) := by + (setWidth 128 x) &&& 0xffffffffffffffff#128 = (setWidth 128 x) := by bv_decide -private theorem extractLsb'_low_64_from_zeroExtend_128_or (x y : BitVec 64) : - extractLsb' 0 64 ((zeroExtend 128 x) ||| (zeroExtend 128 y <<< 64)) = x := by +private theorem extractLsb'_low_64_from_setWidth_128_or (x y : BitVec 64) : + extractLsb' 0 64 ((setWidth 128 x) ||| (setWidth 128 y <<< 64)) = x := by bv_decide -private theorem extractLsb'_high_64_from_zeroExtend_128_or (x y : BitVec 64) : - extractLsb' 64 64 ((zeroExtend 128 x) ||| (zeroExtend 128 y <<< 64)) = y := by +private theorem extractLsb'_high_64_from_setWidth_128_or (x y : BitVec 64) : + extractLsb' 64 64 ((setWidth 128 x) ||| (setWidth 128 y <<< 64)) = y := by bv_decide -- This lemma takes ~5min with bv_decide and the generated LRAT @@ -83,8 +84,8 @@ private theorem extractLsb'_high_64_from_zeroExtend_128_or (x y : BitVec 64) : theorem sha512h_rule_1 (a b c d e : BitVec 128) : let elements := 2 let esize := 64 - let inner_sum := (binary_vector_op_aux 0 elements esize BitVec.add c d (BitVec.zero 128) H) - let outer_sum := (binary_vector_op_aux 0 elements esize BitVec.add inner_sum e (BitVec.zero 128) H) + let inner_sum := (binary_vector_op_aux 0 elements esize BitVec.add c d 0#128) + let outer_sum := (binary_vector_op_aux 0 elements esize BitVec.add inner_sum e 0#128) let a0 := extractLsb' 0 64 a let a1 := extractLsb' 64 64 a let b0 := extractLsb' 0 64 b @@ -102,7 +103,7 @@ theorem sha512h_rule_1 (a b c d e : BitVec 128) : repeat (unfold binary_vector_op_aux elem_set elem_get; simp) unfold BitVec.partInstall unfold sha512h compression_update_t1 sigma_big_1 ch allOnes ror - simp only [Nat.reduceAdd, Nat.reduceSub, Nat.sub_zero, Nat.reducePow, reduceZeroExtend, + simp only [Nat.reduceAdd, Nat.reduceSub, Nat.sub_zero, Nat.reducePow, reduceSetWidth, reduceHShiftLeft, reduceNot, reduceAnd, BitVec.zero_or, shiftLeft_zero_eq] generalize extractLsb' 0 64 a = a_lo generalize extractLsb' 64 64 a = a_hi @@ -116,100 +117,196 @@ theorem sha512h_rule_1 (a b c d e : BitVec 128) : generalize extractLsb' 64 64 e = e_hi -- simp at a_lo a_hi b_lo b_hi c_lo c_hi d_lo d_hi e_lo e_hi clear a b c d e - simp only [and_nop_lemma, extractLsb'_low_64_from_zeroExtend_128_or, extractLsb'_high_64_from_zeroExtend_128_or] + simp only [truncate_eq_setWidth, reduceSetWidth, reduceNot, zero_and, zero_or, + reduceHShiftLeft, and_nop_lemma, extractLsb'_low_64_from_setWidth_128_or, + extractLsb'_high_64_from_setWidth_128_or] + -- simp only [and_nop_lemma, extractLsb'_low_64_from_setWidth_128_or, extractLsb'_high_64_from_setWidth_128_or] generalize (b_hi.rotateRight 14 ^^^ b_hi.rotateRight 18 ^^^ b_hi.rotateRight 41) = aux0 generalize (b_hi &&& a_lo ^^^ ~~~b_hi &&& a_hi) = aux1 ac_rfl --- (FIXME) Generalize to arbitrary-length bitvecs. -theorem rev_elems_of_rev_elems_64_8 (x : BitVec 64) : - rev_elems 64 8 (rev_elems 64 8 x h₀ h₁) h₀ h₁ = x := by - repeat (unfold rev_elems; (simp (config := {ground := true, decide := true}))) - simp_arith at h₀ - simp_arith at h₁ - bv_check "lrat_files/Sha512_block_armv8_rules.lean-rev_elems_of_rev_elems_64_8-96-2.lrat" - --- (FIXME) Generalize to arbitrary-length bitvecs. -theorem concat_of_rsh_is_msb_128 (x y : BitVec 64) : - (x ++ y) >>> 64 = BitVec.zeroExtend 128 x := by - bv_check "lrat_files/Sha512_block_armv8_rules.lean-concat_of_rsh_is_msb_128-101-2.lrat" - --- (FIXME) Generalize to arbitrary-length bitvecs. -theorem truncate_of_concat_is_lsb_64 (x y : BitVec 64) : - BitVec.zeroExtend 64 (x ++ y) = y := by - bv_check "lrat_files/Sha512_block_armv8_rules.lean-truncate_of_concat_is_lsb_64-106-2.lrat" - --- (FIXME) Generalize to arbitrary-length bitvecs. -theorem zeroextend_bigger_smaller_64 (x : BitVec 64) : - BitVec.zeroExtend 64 (BitVec.zeroExtend 128 x) = - BitVec.zeroExtend 64 x := by - bv_omega - --- (FIXME) Generalize to arbitrary-length bitvecs. -theorem rsh_concat_identity_128 (x : BitVec 128) : - zeroExtend 64 (x >>> 64) ++ zeroExtend 64 x = x := by - bv_check "lrat_files/Sha512_block_armv8_rules.lean-rsh_concat_identity_128-117-2.lrat" - --- (FIXME) Generalize to arbitrary-length bitvecs. -theorem rev_vector_of_rev_vector_128_64_8 (x : BitVec 128) : - rev_vector 128 64 8 - (rev_vector 128 64 8 x h₀ h₁ h₂ h₃ h₄) h₀ h₁ h₂ h₃ h₄ = x := by - repeat (unfold rev_vector; simp) - rw [concat_of_rsh_is_msb_128, - truncate_of_concat_is_lsb_64, - rev_elems_of_rev_elems_64_8, - zeroextend_bigger_smaller_64, - @zeroExtend_eq 64, - rev_elems_of_rev_elems_64_8, - rsh_concat_identity_128] - done - -private theorem sha512h_rule_2_helper_1 (x y : BitVec 64) : - extractLsb' 0 64 - (extractLsb' 64 128 - ((zeroExtend 128 x ||| zeroExtend 128 y <<< 64) ++ - (zeroExtend 128 x ||| zeroExtend 128 y <<< 64))) - = - y := by - bv_decide +-- TODO: upstream? +@[simp] +theorem setWidth_append_right (x : BitVec n) (y : BitVec m) : + BitVec.setWidth m (x ++ y) = y := by + apply eq_of_toNat_eq + simp only [toNat_setWidth, toNat_append] + rw [← Nat.and_pow_two_sub_one_eq_mod, Nat.and_distrib_right] + suffices x.toNat <<< m &&& 2 ^ m - 1 = 0 + by simp [this] + apply Nat.eq_of_testBit_eq + intro i + simp only [Nat.and_pow_two_sub_one_eq_mod, Nat.testBit_mod_two_pow, Nat.testBit_shiftLeft, + ge_iff_le, Nat.zero_testBit, Bool.and_eq_false_imp, decide_eq_true_eq] + omega -private theorem sha512h_rule_2_helper_2 (x y : BitVec 64) : - extractLsb' 64 64 - (extractLsb' 64 128 - ((zeroExtend 128 x ||| zeroExtend 128 y <<< 64) ++ - (zeroExtend 128 x ||| zeroExtend 128 y <<< 64))) - = - x := by - bv_decide +theorem BitVec.extractLsb'_append (x : BitVec n) (y : BitVec m) : + (x ++ y).extractLsb' start len + = let len' := min len (m - start) + (x.extractLsb' (start - m) (len - len') + ++ y.extractLsb' start len' + ).cast (by omega) := by + apply eq_of_getLsbD_eq + intro i + simp [getLsbD_append] + by_cases h₁ : m - start ≥ len + · have len'_eq : min len (m - start) = len := Nat.min_eq_left h₁ + have : start + i.val < m := by omega + simp [len'_eq, this] + · have len'_eq : min len (m - start) = m - start := + Nat.min_eq_right (by omega) + simp only [len'_eq] + by_cases h₂ : start + i.val < m + · have h₃ : ↑i < m - start := by omega + simp [h₂, h₃] + · have h₃ : ¬(↑i < m - start) := by omega + have h₄ : ↑i - (m - start) < len - (m - start) := by omega + have h₅ : start - m + (↑i - (m - start)) = start + ↑i - m := by omega + simp [h₂, h₃, h₄, h₅] + +theorem BitVec.cast_eq_of_heq (x : BitVec n) (y : BitVec m) (h : n = m) : + HEq x y → x.cast h = y := by + cases h; simp + +@[simp] +theorem BitVec.cast_heq_iff (x : BitVec n) (y : BitVec m) (h : n = n') : + HEq (x.cast h) y ↔ HEq x y := by + cases h; simp + +theorem BitVec.extractLsb'_append_right_of_le (h : start + len ≤ m) + (x : BitVec n) (y : BitVec m) : + (x ++ y).extractLsb' start len = y.extractLsb' start len := by + have len'_eq : min len (m - start) = len := by omega + simp only [extractLsb'_append] + apply cast_eq_of_heq + rw [len'_eq, Nat.sub_self] + simp only [zero_width_append, heq_eq_eq, cast_heq_iff] + +@[simp] +theorem BitVec.extractLsb'_append_right (x : BitVec n) (y : BitVec m) : + (x ++ y).extractLsb' 0 m = y := by + rw [extractLsb'_append_right_of_le (by omega), extractLsb'_eq] + +@[simp] +theorem BitVec.extractLsb'_append_left_of_le (h : m ≤ start) + (x : BitVec n) (y : BitVec m) : + (x ++ y).extractLsb' start len = x.extractLsb' (start - m) len := by + have len'_eq : min len (m - start) = m - start := by omega + simp only [extractLsb'_append] + apply cast_eq_of_heq + rw [len'_eq, show m - start = 0 by omega] + simp only [append_zero_width, heq_eq_eq, cast_heq_iff, Nat.sub_zero] + +@[simp] +theorem BitVec.extractLsb'_append_left (x : BitVec n) (y : BitVec m) : + (x ++ y).extractLsb' m n = x := by + rw [extractLsb'_append_left_of_le (by omega), Nat.sub_self, extractLsb'_eq] +@[simp] +theorem BitVec.extractLsb'_extractLsb'_of_le {w : Nat} (start₁ len₁ start₂ len₂) + (h : start₂ + len₂ ≤ len₁) + (x : BitVec w) : + (x.extractLsb' start₁ len₁).extractLsb' start₂ len₂ + = x.extractLsb' (start₁ + start₂) len₂ := by + apply eq_of_getLsbD_eq + intro i + simp only [getLsbD_extractLsb', Fin.is_lt, decide_True, Bool.true_and, + Bool.and_iff_right_iff_imp, decide_eq_true_eq, + show start₁ + (start₂ + i.val) = start₁ + start₂ + i.val by ac_rfl] + omega + +theorem binary_vector_op_aux_of_lt {n} {e elems} (h : e < elems) (esize op) + (x y result : BitVec n) : + binary_vector_op_aux e elems esize op x y result + = let element1 := elem_get x e esize + let element2 := elem_get y e esize + let elem_result := op element1 element2 + let result := elem_set result e esize elem_result + binary_vector_op_aux (e + 1) elems esize op x y result := by + conv => { lhs; unfold binary_vector_op_aux } + have : ¬(elems ≤ e) := by omega + simp only [this, ↓reduceIte] + +theorem binary_vector_op_aux_of_not_lt {n} {e elems} (h : ¬(e < elems)) + (esize op) (x y result : BitVec n) : + binary_vector_op_aux e elems esize op x y result = result := by + unfold binary_vector_op_aux + simp only [ite_eq_left_iff, Nat.not_le, h, false_implies] + +theorem BitVec.getLsbD_eq_false_of_le {w} (x : BitVec w) {i : Nat} (h : w ≤ i) : + x.getLsbD i = false := by + exact getLsbD_ge x i h + +theorem partInstall_partInstall (x : BitVec n) : + partInstall (start + len₁) len₂ val₂ (partInstall start len₁ val₁ x) + = (partInstall start _ (val₂ ++ val₁) x).cast (by omega) := by + apply BitVec.eq_of_getLsbD_eq + intro i + simp only [partInstall, truncate_eq_setWidth, getLsbD_or, getLsbD_and, + getLsbD_not, Fin.is_lt, decide_True, getLsbD_shiftLeft, Bool.true_and, + getLsbD_setWidth, getLsbD_allOnes, Bool.not_and, Bool.not_not, + getLsbD_cast, BitVec.getLsbD_append] + simp only [ + show ∀ m, i.val - m < n by omega, + decide_True, Bool.not_true, Bool.false_or, Bool.true_and] + by_cases h₁ : i < start + · simp [h₁, show i < start + len₁ by omega] + · simp only [h₁, decide_False, Bool.false_or, Bool.not_false, Bool.true_and, + show i < start + len₁ ↔ i - start < len₁ by omega] + by_cases h₂ : i - start < len₁ + · simp [h₂, show ↑i - start < len₂ + len₁ by omega] + · rw [BitVec.getLsbD_ge val₁ _ (by omega)] + simp [h₂, Nat.sub_add_eq, + show i - start < len₂ + len₁ ↔ i - start - len₁ < len₂ by omega] + +/-- `partInstall`ing at bit `0` a value `x` of the same width as the original +bitvector `y` returns exactly the value `x` -/ +@[simp] +theorem partInstall_eq (x y : BitVec n) : + partInstall 0 n x y = x := by + simp [partInstall] + +-- set_option maxHeartbeats 0 in -- This lemma takes 2min with bv_decide and the generated LRAT -- file is ~120MB. As with sha512h_rule_1 above, we prefer to just simplify and -- normalize here instead of doing bit-blasting. theorem sha512h_rule_2 (a b c d e : BitVec 128) : - let a0 := extractLsb' 0 64 a - let a1 := extractLsb' 64 64 a - let b0 := extractLsb' 0 64 b - let b1 := extractLsb' 64 64 b - let c0 := extractLsb' 0 64 c - let c1 := extractLsb' 64 64 c - let d0 := extractLsb' 0 64 d - let d1 := extractLsb' 64 64 d - let e0 := extractLsb' 0 64 e - let e1 := extractLsb' 64 64 e - let inner_sum := binary_vector_op_aux 0 2 64 BitVec.add d e (BitVec.zero 128) h1 - let concat := inner_sum ++ inner_sum - let operand := extractLsb' 64 128 concat - let hi64_spec := compression_update_t1 b1 a0 a1 c1 d0 e0 - let lo64_spec := compression_update_t1 (b0 + hi64_spec) b1 a0 c0 d1 e1 - sha512h a b (binary_vector_op_aux 0 2 64 BitVec.add c operand (BitVec.zero 128) h2) = - hi64_spec ++ lo64_spec := by - repeat (unfold binary_vector_op_aux; simp) - repeat (unfold BitVec.partInstall; simp) - unfold sha512h compression_update_t1 elem_set elem_get partInstall sigma_big_1 ch ror - simp only [Nat.reduceAdd, Nat.reduceSub, Nat.reduceMul, Nat.sub_zero, reduceAllOnes, - reduceZeroExtend, Nat.zero_mul, reduceHShiftLeft, reduceNot, reduceAnd, Nat.one_mul, - BitVec.cast_eq] - simp only [shiftLeft_zero_eq, BitVec.zero_or, and_nop_lemma] + let a0 := extractLsb' 0 64 a + let a1 := extractLsb' 64 64 a + let b0 := extractLsb' 0 64 b + let b1 := extractLsb' 64 64 b + let c0 := extractLsb' 0 64 c + let c1 := extractLsb' 64 64 c + let d0 := extractLsb' 0 64 d + let d1 := extractLsb' 64 64 d + let e0 := extractLsb' 0 64 e + let e1 := extractLsb' 64 64 e + let inner_sum := binary_vector_op_aux 0 2 64 BitVec.add d e 0#128 + let concat := inner_sum ++ inner_sum + let operand := extractLsb' 64 128 concat + let hi64_spec := compression_update_t1 b1 a0 a1 c1 d0 e0 + let lo64_spec := compression_update_t1 (b0 + hi64_spec) b1 a0 c0 d1 e1 + sha512h a b (binary_vector_op_aux 0 2 64 BitVec.add c operand 0#128) + = hi64_spec ++ lo64_spec := by + simp only + repeat ( + repeat rw [binary_vector_op_aux_of_lt (by omega)] + rw [binary_vector_op_aux_of_not_lt (by omega)] + ) + simp only [zero_eq, Nat.reduceAdd, add_eq, Nat.zero_add] + simp only [elem_set, Nat.one_mul, elem_get, Nat.zero_mul, Nat.reduceAdd, + Nat.le_refl, extractLsb'_extractLsb'_of_le, Nat.zero_add, Nat.reduceLeDiff, + Nat.add_zero] + rw [extractLsb'_append_left_of_le (by omega), Nat.sub_self, + partInstall_partInstall, partInstall_partInstall] + simp only [Nat.reduceAdd, BitVec.cast_eq, partInstall_eq] + + simp only [sha512h, compression_update_t1, elem_set, elem_get, partInstall, sigma_big_1, ch, ror] + simp only [Nat.reduceAdd, Nat.zero_add, zero_eq, reduceAllOnes, truncate_eq_setWidth, + reduceSetWidth, Nat.zero_mul, shiftLeft_zero_eq, reduceNot, zero_and, Nat.reduceLeDiff, + extractLsb'_extractLsb'_of_le, Nat.add_zero, add_eq, zero_or, Nat.one_mul, reduceHShiftLeft, + Nat.le_refl] + generalize extractLsb' 0 64 a = a_lo generalize extractLsb' 64 64 a = a_hi generalize extractLsb' 0 64 b = b_lo @@ -220,10 +317,15 @@ theorem sha512h_rule_2 (a b c d e : BitVec 128) : generalize extractLsb' 64 64 d = d_hi generalize extractLsb' 0 64 e = e_lo generalize extractLsb' 64 64 e = e_hi - clear a b c d e - simp only [extractLsb'_high_64_from_zeroExtend_128_or, extractLsb'_low_64_from_zeroExtend_128_or] - simp only [sha512h_rule_2_helper_1, sha512h_rule_2_helper_2] generalize (b_hi.rotateRight 14 ^^^ b_hi.rotateRight 18 ^^^ b_hi.rotateRight 41) = aux1 + clear a b c d e + + rw [BitVec.extractLsb'_append_left] + rw [BitVec.extractLsb'_append_right] + rw [BitVec.extractLsb'_append_right] + rw [BitVec.extractLsb'_append_right_of_le (by omega)] + rw [BitVec.extractLsb'_append_left] + ac_rfl end sha512_block_armv8_rules diff --git a/Proofs/SHA512/SHA512_block_armv8_rules.lean-concat_of_rsh_is_msb_128-140-2.lrat b/Proofs/SHA512/SHA512_block_armv8_rules.lean-concat_of_rsh_is_msb_128-140-2.lrat new file mode 100644 index 00000000..a3b6d5f3 Binary files /dev/null and b/Proofs/SHA512/SHA512_block_armv8_rules.lean-concat_of_rsh_is_msb_128-140-2.lrat differ diff --git a/Proofs/SHA512/SHA512_block_armv8_rules.lean-rev_elems_of_rev_elems_64_8-135-2.lrat b/Proofs/SHA512/SHA512_block_armv8_rules.lean-rev_elems_of_rev_elems_64_8-135-2.lrat new file mode 100644 index 00000000..e8925f2e Binary files /dev/null and b/Proofs/SHA512/SHA512_block_armv8_rules.lean-rev_elems_of_rev_elems_64_8-135-2.lrat differ diff --git a/Proofs/SHA512/SHA512_block_armv8_rules.lean-rsh_concat_identity_128-156-2.lrat b/Proofs/SHA512/SHA512_block_armv8_rules.lean-rsh_concat_identity_128-156-2.lrat new file mode 100644 index 00000000..a3b6d5f3 Binary files /dev/null and b/Proofs/SHA512/SHA512_block_armv8_rules.lean-rsh_concat_identity_128-156-2.lrat differ diff --git a/Proofs/SHA512/SHA512_block_armv8_rules.lean-sha512_message_schedule_rule-31-2.lrat b/Proofs/SHA512/SHA512_block_armv8_rules.lean-sha512_message_schedule_rule-31-2.lrat new file mode 100644 index 00000000..e1ec4390 Binary files /dev/null and b/Proofs/SHA512/SHA512_block_armv8_rules.lean-sha512_message_schedule_rule-31-2.lrat differ diff --git a/Proofs/SHA512/SHA512_block_armv8_rules.lean-sha512h2_rule-43-2.lrat b/Proofs/SHA512/SHA512_block_armv8_rules.lean-sha512h2_rule-43-2.lrat new file mode 100644 index 00000000..a5571282 Binary files /dev/null and b/Proofs/SHA512/SHA512_block_armv8_rules.lean-sha512h2_rule-43-2.lrat differ diff --git a/Proofs/SHA512/SHA512_block_armv8_rules.lean-truncate_of_concat_is_lsb_64-145-2.lrat b/Proofs/SHA512/SHA512_block_armv8_rules.lean-truncate_of_concat_is_lsb_64-145-2.lrat new file mode 100644 index 00000000..a3b6d5f3 Binary files /dev/null and b/Proofs/SHA512/SHA512_block_armv8_rules.lean-truncate_of_concat_is_lsb_64-145-2.lrat differ diff --git a/README.md b/README.md index 47939c18..4fdb2f68 100644 --- a/README.md +++ b/README.md @@ -28,7 +28,8 @@ The default `make` command corresponds to the following invocation: `clean`: remove build outputs. -`clean_all`: `clean` plus remove Lean dependencies. +`clean_all`: `clean` plus remove Lean dependencies and + all benchmarking and profiling data. `specs`: [run under `all`] builds only the specifications of native-code programs of interest. @@ -43,6 +44,8 @@ native-code programs of interest. `benchmarks`: run benchmarks for the symbolic simulator. +`profiler`: run a single round of each benchmark, with the profiler enabled + ### Makefile variables that can be passed in at the command line `VERBOSE`: Verbose mode; prints disassembly of the instructions being diff --git a/Specs/AESCommon.lean b/Specs/AESCommon.lean index 3c8a8ff0..73cc4265 100644 --- a/Specs/AESCommon.lean +++ b/Specs/AESCommon.lean @@ -50,8 +50,7 @@ def SubBytes_aux (i : Nat) (op : BitVec 128) (out : BitVec 128) let i := 16 - i let idx := (extractLsb' (i * 8) 8 op).toNat let val := extractLsb' (idx * 8) 8 $ BitVec.flatten SBOX - have h₁ : 8 = i * 8 + 7 - i * 8 + 1 := by omega - let out := BitVec.partInstall (i * 8 + 7) (i * 8) (BitVec.cast h₁ val) out + let out := BitVec.partInstall (i * 8) 8 val out SubBytes_aux i' op out def SubBytes (op : BitVec 128) : BitVec 128 := @@ -66,20 +65,18 @@ def MixColumns_aux (c : Nat) | 0 => (out0, out1, out2, out3) | c' + 1 => let lo := (4 - c) * 8 - let hi := lo + 7 let in0_byte := extractLsb' lo 8 in0 let in1_byte := extractLsb' lo 8 in1 let in2_byte := extractLsb' lo 8 in2 let in3_byte := extractLsb' lo 8 in3 - have h : 8 = hi - lo + 1 := by omega - let val0 := BitVec.cast h $ FFmul02 in0_byte ^^^ FFmul03 in1_byte ^^^ in2_byte ^^^ in3_byte - let out0 := BitVec.partInstall hi lo val0 out0 - let val1 := BitVec.cast h $ FFmul02 in1_byte ^^^ FFmul03 in2_byte ^^^ in3_byte ^^^ in0_byte - let out1 := BitVec.partInstall hi lo val1 out1 - let val2 := BitVec.cast h $ FFmul02 in2_byte ^^^ FFmul03 in3_byte ^^^ in0_byte ^^^ in1_byte - let out2 := BitVec.partInstall hi lo val2 out2 - let val3 := BitVec.cast h $ FFmul02 in3_byte ^^^ FFmul03 in0_byte ^^^ in1_byte ^^^ in2_byte - let out3 := BitVec.partInstall hi lo val3 out3 + let val0 := FFmul02 in0_byte ^^^ FFmul03 in1_byte ^^^ in2_byte ^^^ in3_byte + let out0 := BitVec.partInstall lo 8 val0 out0 + let val1 := FFmul02 in1_byte ^^^ FFmul03 in2_byte ^^^ in3_byte ^^^ in0_byte + let out1 := BitVec.partInstall lo 8 val1 out1 + let val2 := FFmul02 in2_byte ^^^ FFmul03 in3_byte ^^^ in0_byte ^^^ in1_byte + let out2 := BitVec.partInstall lo 8 val2 out2 + let val3 := FFmul02 in3_byte ^^^ FFmul03 in0_byte ^^^ in1_byte ^^^ in2_byte + let out3 := BitVec.partInstall lo 8 val3 out3 MixColumns_aux c' in0 in1 in2 in3 out0 out1 out2 out3 FFmul02 FFmul03 def MixColumns (op : BitVec 128) (FFmul02 : BitVec 8 -> BitVec 8) diff --git a/Specs/AESV8.lean b/Specs/AESV8.lean index 756acae8..c0c66e27 100644 --- a/Specs/AESV8.lean +++ b/Specs/AESV8.lean @@ -112,7 +112,6 @@ def AESHWCtr32EncryptBlocks_helper {Param : AESArm.KBR} (in_blocks : BitVec m) else let lo := m - (i + 1) * 128 let hi := lo + 127 - have h5 : hi - lo + 1 = 128 := by omega let curr_block : BitVec 128 := BitVec.extractLsb' lo 128 in_blocks have h4 : 128 = Param.block_size := by cases h3 @@ -126,7 +125,7 @@ def AESHWCtr32EncryptBlocks_helper {Param : AESArm.KBR} (in_blocks : BitVec m) (Param := Param) (BitVec.cast h4 ivec_rev) key.rd_key let res_block := rev_elems 128 8 res_block (by decide) (by decide) let res_block := res_block ^^^ curr_block - let new_acc := BitVec.partInstall hi lo (BitVec.cast h5.symm res_block) acc + let new_acc := BitVec.partInstall lo 128 res_block acc AESHWCtr32EncryptBlocks_helper (Param := Param) in_blocks (i + 1) len key (ivec + 1#128) new_acc h1 h2 h3 termination_by (len - i) diff --git a/Specs/GCM.lean b/Specs/GCM.lean index b7496b62..0a83fa5c 100644 --- a/Specs/GCM.lean +++ b/Specs/GCM.lean @@ -67,11 +67,9 @@ def GCTR_aux (CIPH : Cipher (n := 128) (m := m)) Y else let lo := (n - i - 1) * 128 - let hi := lo + 127 - have h : 128 = hi - lo + 1 := by omega let Xi := extractLsb' lo 128 X let Yi := Xi ^^^ CIPH ICB K - let Y := BitVec.partInstall hi lo (BitVec.cast h Yi) Y + let Y := BitVec.partInstall lo 128 Yi Y let ICB := inc_s 32 ICB (by omega) GCTR_aux CIPH (i + 1) n K ICB X Y termination_by (n - i) diff --git a/Specs/GCMV8.lean b/Specs/GCMV8.lean index 8b51ae15..f365cfff 100644 --- a/Specs/GCMV8.lean +++ b/Specs/GCMV8.lean @@ -42,28 +42,32 @@ def pmult (x: BitVec (m + 1)) (y : BitVec (n + 1)) : BitVec (m + n + 1) := | j + 1 => let acc := acc <<< 1 let tmp := if getMsbD y (n + 1 - i) - then (BitVec.zero n) ++ x - else BitVec.zero (n + (m + 1)) - have h : m + n + 1 = n + (m + 1) := by omega - let acc := (BitVec.cast h acc) ^^^ tmp - pmultTR x y j (BitVec.cast h.symm acc) - pmultTR x y (n + 1) (BitVec.zero (m + n + 1)) + then partInstall 0 (m + 1) x 0#(m + n + 1) + else 0#(m + n + 1) + let acc := acc ^^^ tmp + pmultTR x y j acc + pmultTR x y (n + 1) 0#(m + n + 1) example: pmult 0b1101#4 0b10#2 = 0b11010#5 := rfl -/-- Degree of x. -/ -private def degree (x : BitVec n) : Nat := - let rec degreeTR (x : BitVec n) (n : Nat) : Nat := +/-- Degree of x. Defined using non-ite statements. -/ +def degree (x : BitVec n) : Nat := + let rec degreeTR (x : BitVec n) (n : Nat) (i : Nat) (acc : Nat) : Nat := match n with - | 0 => 0 + | 0 => acc | m + 1 => - if getLsbD x n then n else degreeTR x m - degreeTR x (n - 1) + let is_one := extractLsb' 0 1 (x &&& 1) + degreeTR (x >>> 1) m (i + 1) (acc + is_one.toNat * (i - acc)) + degreeTR x n 0 0 + example: GCMV8.degree 0b0101#4 = 2 := rfl +example: GCMV8.degree 0b1101#4 = 3 := rfl -/-- Subtract x from y if y's x-degree-th bit is 1. -/ -private def reduce (x : BitVec n) (y : BitVec n) : BitVec n := - if getLsbD y (GCMV8.degree x) then y ^^^ x else y +/-- Subtract x from y if y's x-degree-th bit is 1. + Defined using non-ite statements. -/ +def reduce (x : BitVec n) (y : BitVec n) : BitVec n := + let is_one := (y >>> (GCMV8.degree x)) &&& 1 + y ^^^ (is_one * x) /-- Performs division of polynomials over GF(2). -/ def pdiv (x: BitVec n) (y : BitVec m): BitVec n := @@ -76,7 +80,7 @@ def pdiv (x: BitVec n) (y : BitVec m): BitVec n := let zi := extractLsb' 0 m ((GCMV8.reduce y z) ++ xi) let bit := extractLsb' (GCMV8.degree y) 1 zi let newacc : BitVec n := - partInstall (i - 1) (i - 1) (bit.cast (by omega)) acc + partInstall (i - 1) 1 bit acc pdivTR x y j zi newacc pdivTR x y n (BitVec.zero m) (BitVec.zero n) @@ -84,16 +88,16 @@ example : pdiv 0b1101#4 0b10#2 = 0b110#4 := rfl example : pdiv 0x1a#5 0b10#2 = 0b1101#5 := rfl example : pdiv 0b1#1 0b10#2 = 0b0#1 := rfl -/-- Performs modulus of polynomials over GF(2). -/ +/-- Performs modulus of polynomials over GF(2). + Defined using non-ite statements.-/ def pmod (x : BitVec n) (y : BitVec (m + 1)) (H : 0 < m) : BitVec m := let rec pmodTR (x : BitVec n) (y : BitVec (m + 1)) (p : BitVec (m + 1)) (i : Nat) (r : BitVec m) (H : 0 < m) : BitVec m := match i with | 0 => r | j + 1 => - let xi := getLsbD x (n - i) - let tmp := - if xi then extractLsb' 0 m p else BitVec.zero m + let is_one := extractLsb' 0 m ((x >>> (n - i)) &&& 1) + let tmp := is_one * extractLsb' 0 m p let r := r ^^^ tmp pmodTR x y (GCMV8.reduce y (p <<< 1)) j r H if y = 0 then 0 else pmodTR x y (GCMV8.reduce y 1) n (BitVec.zero m) H @@ -128,13 +132,13 @@ def refpoly : BitVec 129 := 0x1C2000000000000000000000000000001#129 See Remark 5 in paper "A New Interpretation for the GHASH Authenticator of AES-GCM" -/ -private def gcm_init_H (H : BitVec 128) : BitVec 128 := +def gcm_init_H (H : BitVec 128) : BitVec 128 := pmod (H ++ 0b0#1) refpoly (by omega) -private def gcm_polyval_mul (x : BitVec 128) (y : BitVec 128) : BitVec 256 := +def gcm_polyval_mul (x : BitVec 128) (y : BitVec 128) : BitVec 256 := 0b0#1 ++ pmult x y -private def gcm_polyval_red (x : BitVec 256) : BitVec 128 := +def gcm_polyval_red (x : BitVec 256) : BitVec 128 := reverse $ pmod (reverse x) irrepoly (by omega) /-- @@ -146,7 +150,7 @@ private def gcm_polyval_red (x : BitVec 256) : BitVec 128 := "A New Interpretation for the GHASH Authenticator of AES-GCM" 2. Lemma: reverse (pmult x y) = pmult (reverse x) (reverse y) -/ -private def gcm_polyval (x : BitVec 128) (y : BitVec 128) : BitVec 128 := +def gcm_polyval (x : BitVec 128) (y : BitVec 128) : BitVec 128 := GCMV8.gcm_polyval_red $ GCMV8.gcm_polyval_mul x y /-- GCMInitV8 specification: @@ -203,6 +207,15 @@ def GCMGmultV8 (H : BitVec 128) (Xi : List (BitVec 8)) (h : 8 * Xi.length = 128) let H := (lo H) ++ (hi H) split (GCMV8.gcm_polyval H (BitVec.cast h (BitVec.flatten Xi))) 8 (by omega) +/-- Alternative GCMGmultV8 specification that does not use lists: + H : BitVec 128 -- the first element in Htable, not the initial H input to GCMInitV8 + Xi : BitVec 128 -- current hash value + output : BitVec 128 -- next hash value +-/ +def GCMGmultV8_alt (H : BitVec 128) (Xi : BitVec 128) : BitVec 128 := + let H := (lo H) ++ (hi H) + gcm_polyval H Xi + set_option maxRecDepth 8000 in example : GCMGmultV8 0x1099f4b39468565ccdd297a9df145877#128 [ 0x10#8, 0x54#8, 0x43#8, 0xb0#8, 0x2c#8, 0x4b#8, 0x1f#8, 0x24#8, @@ -211,7 +224,7 @@ example : GCMGmultV8 0x1099f4b39468565ccdd297a9df145877#128 0x9e#8, 0x15#8, 0xa6#8, 0x00#8, 0x67#8, 0x29#8, 0x7e#8, 0x0f#8 ] := rfl -private def gcm_ghash_block (H : BitVec 128) (Xi : BitVec 128) +def gcm_ghash_block (H : BitVec 128) (Xi : BitVec 128) (inp : BitVec 128) : BitVec 128 := let H := (lo H) ++ (hi H) GCMV8.gcm_polyval H (Xi ^^^ inp) diff --git a/Tactics/Aggregate.lean b/Tactics/Aggregate.lean index 19afad66..f4197f0b 100644 --- a/Tactics/Aggregate.lean +++ b/Tactics/Aggregate.lean @@ -30,6 +30,9 @@ def aggregate (axHyps : Array LocalDecl) (location : Location) let config := simpConfig?.getD aggregate.defaultSimpConfig let (ctx, simprocs) ← LNSymSimpContext + -- https://github.com/leanprover/lean4/blob/94b1e512da9df1394350ab81a28deca934271f65/src/Lean/Meta/DiscrTree.lean#L371 + -- refines the discrimination tree to also index applied functions. + (noIndexAtArgs := false) (config := config) (decls := axHyps) @@ -106,6 +109,15 @@ elab "sym_aggregate" simpConfig?:(config)? loc?:(location)? : tactic => withMain (expectedType := do let state ← mkFreshExprMVar mkArmState return mkApp (mkConst ``CheckSPAlignment) state) + -- `?state.program = ?program` + searchLCtxFor (whenFound := whenFound) + (expectedType := do + let mkProgramTy := mkConst ``Program + let state ← mkFreshExprMVar mkArmState + let program ← mkFreshExprMVar mkProgramTy + return mkApp3 (.const ``Eq [1]) mkProgramTy + (mkApp (mkConst ``ArmState.program) state) + program) let loc := (loc?.map expandLocation).getD (.targets #[] true) aggregate axHyps loc simpConfig? diff --git a/Tactics/Attr.lean b/Tactics/Attr.lean index e6b682e8..2c11c8fb 100644 --- a/Tactics/Attr.lean +++ b/Tactics/Attr.lean @@ -9,8 +9,11 @@ open Lean initialize -- CSE tactic's non-verbose summary logging. registerTraceClass `Tactic.cse.summary + -- enable tracing for `sym_n` tactic and related components registerTraceClass `Tactic.sym + -- enable verbose tracing + registerTraceClass `Tactic.sym.info -- enable tracing for heartbeat usage of `sym_n` registerTraceClass `Tactic.sym.heartbeats @@ -27,3 +30,31 @@ initialize generally not set this option, unless they are reporting a bug with \ `sym_n`" } + + -- enable extra checks for debugging `sym_n`, + -- see `AxEffects.validate` for more detail on what is being type-checked + + register_option Tactic.bv_omega_bench.filePath : String := { + defValue := "/tmp/omega-bench.txt" + descr := "File path that `omega-bench` writes its results to." + } + + register_option Tactic.bv_omega_bench.enabled : Bool := { + defValue := true, + descr := "Enable `bv_omega_bench`'s logging, which writes benchmarking data to `Tactic.bv_omega_bench.filePath`." + } + + register_option Tactic.bv_omega_bench.minMs : Nat := { + defValue := 1000, + descr := "Log into `Tactic.bv_omega_bench.filePath` if the time spent in milliseconds is greater than or equal to `Tactic.bv_omega_bench.minMs`." + } + +def getBvOmegaBenchFilePath [Monad m] [MonadOptions m] : m String := do + return Tactic.bv_omega_bench.filePath.get (← getOptions) + + +def getBvOmegaBenchIsEnabled [Monad m] [MonadOptions m] : m Bool := do + return Tactic.bv_omega_bench.enabled.get (← getOptions) + +def getBvOmegaBenchMinMs [Monad m] [MonadOptions m] : m Nat := do + return Tactic.bv_omega_bench.minMs.get (← getOptions) diff --git a/Tactics/BvOmegaBench.lean b/Tactics/BvOmegaBench.lean new file mode 100644 index 00000000..1505335c --- /dev/null +++ b/Tactics/BvOmegaBench.lean @@ -0,0 +1,57 @@ +/- +Copyright (c) 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Author(s): Siddharth Bhat +-/ +/- +This module implements `bv_omega_bench`, which writes benchmarking results of `bv_omega` +into a user-defined file. This is used for extracting out calls to `bv_omega` that are slow, +and allows us to send bug reports to the Lean developers. +-/ +import Tactics.Attr +import Lean +open Lean Elab Meta Tactic + +namespace BvOmegaBench + +/-- +Run bv_omega, gather the results, and then store them at the value that is given by the option. +By default, it's stored at `pwd`, with filename `omega-bench`. The file is appended to, +with the goal state that is being run, and the time elapsed to solve the goal is written. +-/ +def run : TacticM Unit := do + let minMs ← getBvOmegaBenchMinMs + let goal ← getMainGoal + let goalStr ← ppGoal goal + let startTime ← IO.monoMsNow + try + withMainContext do + withoutRecover do + evalTactic (← `(tactic| bv_omega)) + if !(← getBvOmegaBenchIsEnabled) then + return () + let endTime ← IO.monoMsNow + let delta := endTime - startTime + let filePath ← getBvOmegaBenchFilePath + IO.FS.withFile filePath IO.FS.Mode.append fun h => do + if delta >= minMs then + h.putStrLn "\n---\n" + h.putStrLn s!"time" + h.putStrLn s!"{delta}" + h.putStrLn s!"endtime" + h.putStrLn s!"goal" + h.putStrLn goalStr.pretty + h.putStrLn s!"endgoal" + catch e => + throw e + return () + +end BvOmegaBench + +syntax (name := bvOmegaBenchTac) "bv_omega_bench" : tactic + +@[tactic bvOmegaBenchTac] +def bvOmegaBenchImpl : Tactic +| `(tactic| bv_omega_bench) => + BvOmegaBench.run +| _ => throwUnsupportedSyntax diff --git a/Tactics/CSE.lean b/Tactics/CSE.lean index 612f1559..59adcb09 100644 --- a/Tactics/CSE.lean +++ b/Tactics/CSE.lean @@ -10,6 +10,8 @@ import Lean import Std import Tactics.Attr import Lean.Meta.Tactic.Generalize +import Lean.Meta.Match +import Lean.Meta.LitValues open Lean Elab Tactic Expr Meta /-- An emoji for when we are processing or trying. -/ @@ -198,15 +200,6 @@ private def Array.replicate (n : Nat) (v : α) : Array α := List.replicate n v def ExprData.isProfitable? (data : ExprData) : CSEM Bool := return data.size > 1 && data.occs >= (← getConfig).minOccsToCSE -/-- Check if an expression is a nat literal, or a `OfNat.ofNat` of a nat literal. -This lets us avoid CSEing over nat literals, which are already in canonical form. --/ -def CSEM.isNatLit (e : Expr) : Bool := - if e.isRawNatLit then true - else - match_expr e with - | OfNat.ofNat _α x _inst => x.isRawNatLit -- @OfNat.ofNat Nat 1 (instOfNatNat 1) - | _ => false /-- The function is partial because of the call to `tryAddExpr` that @@ -219,8 +212,8 @@ partial def CSEM.tryAddExpr (e : Expr) : CSEM (Option ExprData) := do return .none let t ← inferType e - -- for now, we ignore function terms. - let relevant? := !t.isArrow && !t.isSort && !t.isForall && !isNatLit e + -- for now, we ignore function terms and all literals. + let relevant? := !t.isArrow && !t.isSort && !t.isForall && !(← isLitValue e) withTraceNode m!"({e}):({t}) [relevant? {if relevant? then checkEmoji else crossEmoji}] (unfold for subexpressions...)" do /- If we have an application, then only add its children diff --git a/Tactics/Common.lean b/Tactics/Common.lean index 50212ce9..5d0f78eb 100644 --- a/Tactics/Common.lean +++ b/Tactics/Common.lean @@ -277,6 +277,46 @@ def Lean.Expr.eqReadField? (e : Expr) : Option (Expr × Expr × Expr) := do | none some (field, state, value) +/-- Return the expression for `Memory` -/ +def mkMemory : Expr := mkConst ``Memory + +/-- Return a proof of type `x = x`, where `x : Memory` -/ +def mkEqReflMemory (x : Expr) : Expr := + mkApp2 (.const ``Eq.refl [1]) mkMemory x + +/-! ## Expr Helpers -/ + +/-- Throw an error if `e` is not of type `expectedType` -/ +def assertHasType (e expectedType : Expr) : MetaM Unit := do + let eType ← inferType e + if !(←isDefEq eType expectedType) then + throwError "{e} {← mkHasTypeButIsExpectedMsg eType expectedType}" + +/-- Throw an error if `e` is not def-eq to `expected` -/ +def assertIsDefEq (e expected : Expr) : MetaM Unit := do + if !(←isDefEq e expected) then + throwError "expected:\n {expected}\nbut found:\n {e}" + +/-- +Rewrites `e` via some `eq`, producing a proof `e = e'` for some `e'`. +Rewrites with a fresh metavariable as the ambient goal. +Fails if the rewrite produces any subgoals. +-/ +-- source: https://github.com/leanprover-community/mathlib4/blob/b35703fe5a80f1fa74b82a2adc22f3631316a5b3/Mathlib/Lean/Expr/Basic.lean#L476-L477 +def rewrite (e eq : Expr) : MetaM Expr := do + let ⟨_, eq', []⟩ ← (← mkFreshExprMVar none).mvarId!.rewrite e eq + | throwError "Expr.rewrite may not produce subgoals." + return eq' + +/-- +Rewrites the type of `e` via some `eq`, then moves `e` into the new type via `Eq.mp`. +Rewrites with a fresh metavariable as the ambient goal. +Fails if the rewrite produces any subgoals. +-/ +-- source: https://github.com/leanprover-community/mathlib4/blob/b35703fe5a80f1fa74b82a2adc22f3631316a5b3/Mathlib/Lean/Expr/Basic.lean#L476-L477 +def rewriteType (e eq : Expr) : MetaM Expr := do + mkEqMP (← rewrite (← inferType e) eq) e + /-! ## Tracing helpers -/ def traceHeartbeats (cls : Name) (header : Option String := none) : diff --git a/Tactics/Simp.lean b/Tactics/Simp.lean index c4b716fc..cd001369 100644 --- a/Tactics/Simp.lean +++ b/Tactics/Simp.lean @@ -52,12 +52,18 @@ def LNSymSimpContext (exprs : Array Expr := #[]) -- Simprocs to add to the default set. (simprocs : Array Name := #[]) + -- Whether the default simprocs should be used. + (useDefaultSimprocs : Bool := true) -- argument to `DiscrTree.mkPath` (noIndexAtArgs : Bool := true) : MetaM (Simp.Context × Array Simp.Simprocs) := do let mut ext_simpTheorems := #[] - let default_simprocs ← Simp.getSimprocs - let mut all_simprocs := (#[default_simprocs] : Simp.SimprocsArray) + /- Workaround for https://github.com/leanprover/lean4/issues/5607: Elaboration failure with let mut whose RHS is a do notation -/ + let all_simprocs ← do + if useDefaultSimprocs then + pure #[← Simp.getSimprocs] + else pure #[] + let mut all_simprocs := all_simprocs for a in simp_attrs do let some ext ← (getSimpExtension? a) | @@ -113,4 +119,21 @@ def LNSymSimp (goal : MVarId) | none => return none | some (_, goal') => return goal' +/-- +Invoke `simp [..] at *` at the given goal `g` with +simp context `ctx` and simprocs `simprocs`. +-/ +def LNSymSimpAtStar (g : MVarId) + (ctx : Simp.Context) + (simprocs : Array Simp.Simprocs) + : MetaM (Option MVarId) := do + g.withContext do + let fvars : Array FVarId := + (← getLCtx).foldl (init := #[]) fun fvars d => fvars.push d.fvarId + let (result, _stats) ← simpGoal g ctx simprocs (fvarIdsToSimp := fvars) + (simplifyTarget := true) (discharge? := none) + match result with + | none => return none + | some (_newHyps, g') => pure g' + ---------------------------------------------------------------------- diff --git a/Tactics/Sym.lean b/Tactics/Sym.lean index b0d5621f..e2af7599 100644 --- a/Tactics/Sym.lean +++ b/Tactics/Sym.lean @@ -11,15 +11,16 @@ import Tactics.Sym.Context import Lean open BitVec -open Lean Meta -open Lean.Elab.Tactic +open Lean +open Lean.Meta Lean.Elab.Tactic open AxEffects SymContext +open Sym (withTraceNode withInfoTraceNode) /-- A wrapper around `evalTactic` that traces the passed tactic script, executes those tactics, and then traces the new goal state -/ private def evalTacticAndTrace (tactic : TSyntax `tactic) : TacticM Unit := - withTraceNode `Tactic.sym (fun _ => pure m!"running: {tactic}") <| do + withTraceNode m!"running: {tactic}" <| do evalTactic tactic trace[Tactic.sym] "new goal state:\n{← getGoals}" @@ -50,7 +51,8 @@ to add a new local hypothesis in terms of `w` and `write_mem` `h_step : ?s' = w _ _ (w _ _ (... ?s))` -/ def stepiTac (stepiEq : Expr) (hStep : Name) : SymReaderM Unit := fun ctx => - withMainContext' do + withMainContext' <| + withInfoTraceNode m!"stepiTac: {stepiEq}" (tag := "stepiTac") <| do let pc := (Nat.toDigits 16 ctx.pc.toNat).asString -- ^^ The PC in hex let stepLemma := Name.str ctx.program s!"stepi_eq_0x{pc}" @@ -71,94 +73,135 @@ def stepiTac (stepiEq : Expr) (hStep : Name) : SymReaderM Unit := fun ctx => end stepiTac -/-- Attempt to show that we have (at least) one more step available, -by ensuring that `h_run`'s type is def-eq to: +/-- +Return an expression `n` such that `hRun` (in the resulting state!) is of type: ` = run (_ + 1) ` +Thus showing we have at least one more step available. -If the number of steps is statically tracked in `runSteps?`, +NOTE: `hRun` might be modified in the process; this property is *not* +guaranteed for the original `hRun` expression, only for the `hRun` expression +*after* execution. + +- If the number of steps is statically tracked in `runSteps?`, (i.e., it is a literal that we managed to reflect) -we check that this number is non-zero, and leave the type of `h_run` unchanged. +we check that this number is non-zero, and leave the type of `hRun` unchanged. This means we trust that the reflected value is accurate w.r.t. to the current goal state. -Otherwise, if the number is steps is *not* statically known, we assert that -`c.h_run` is of type ` = run ?runSteps `, +- Otherwise, if the number is steps is *not* statically known, we assert that +`hRun` is of type ` = run ?runSteps `, for some metavariable `?runSteps`, then create the proof obligation `?runSteps = _ + 1`, and attempt to close it using `whileTac`. -Finally, we use this proof to change the type of `h_run` accordingly. +Finally, we use this proof to change the type of `hRun` accordingly. -/ -def unfoldRun (whileTac : Unit → TacticM Unit) : SymReaderM Unit := do +def unfoldRun (whileTac : Unit → TacticM Unit) : SymM Expr := do let c ← readThe SymContext - let msg := m!"unfoldRun (runSteps? := {c.runSteps?})" - withTraceNode `Tactic.sym (fun _ => pure msg) <| + withTraceNode m!"unfoldRun (runSteps? := {c.runSteps?})" (tag := "unfoldRun") <| match c.runSteps? with - | some (_ + 1) => do + | some (n + 1) => do trace[Tactic.sym] "runSteps is statically known to be non-zero, \ no further action required" - return + return toExpr n | some 0 => throwError "No more steps available to symbolically simulate!" -- NOTE: this error shouldn't occur, as we should have checked in -- `sym_n` that, if the number of runSteps is statically known, -- that we never simulate more than that many steps | none => withMainContext' do - let mut goal :: originalGoals ← getGoals - | throwNoGoalsToBeSolved - let hRunDecl ← c.hRunDecl + let hRun := c.hRun -- Assert that `h_run : = run ?runSteps ` let runSteps ← mkFreshExprMVar (mkConst ``Nat) - guard <|← isDefEq hRunDecl.type ( + guard <|← isDefEq hRun ( mkApp3 (.const ``Eq [1]) (mkConst ``ArmState) c.finalState (mkApp2 (mkConst ``_root_.run) runSteps (← getCurrentState))) - -- NOTE: ^^ Since we check for def-eq on SymContext construction, - -- this check should never fail + -- NOTE: Since we check for def-eq on SymContext construction, this + -- check should never fail. Still, we need it to assign `runSteps` -- Attempt to prove that `?runSteps` is non-zero - let runStepsPredId ← mkFreshMVarId - let runStepsPred ← mkFreshExprMVarWithId runStepsPredId (mkConst ``Nat) + let runStepsPred ← mkFreshExprMVar (mkConst ``Nat) let subGoalTyRhs := mkApp (mkConst ``Nat.succ) runStepsPred - let subGoalTy := -- `?runSteps = ?runStepsPred + 1` + let runStepsEq ← mkFreshMVarId + runStepsEq.setType <| -- `?runSteps = ?runStepsPred + 1` mkApp3 (.const ``Eq [1]) (mkConst ``Nat) runSteps subGoalTyRhs - let subGoal ← mkFreshMVarId - let _ ← mkFreshExprMVarWithId subGoal subGoalTy - let msg := m!"runSteps is not statically known, so attempt to prove:\ - {subGoal}" - withTraceNode `Tactic.sym (fun _ => pure msg) <| subGoal.withContext <| do - setGoals [subGoal] + withTraceNode m!"runSteps is not statically known, so attempt to prove:\ + {runStepsEq}" <| + runStepsEq.withContext <| do + setGoals [runStepsEq] whileTac () -- run `whileTac` to attempt to close `subGoal` -- Ensure `runStepsPred` is assigned, by giving it a default value -- This is important because of the use of `replaceLocalDecl` below - if !(← runStepsPredId.isAssigned) then + -- TODO(@alexkeizer): we got rid of replaceLocalDecl, so we probably + -- can get rid of this, too, leaving the mvar unassigned + if !(← runStepsPred.mvarId!.isAssigned) then let default := mkApp (mkConst ``Nat.pred) runSteps trace[Tactic.sym] "{runStepsPred} is unassigned, \ so we assign to the default value ({default})" - runStepsPredId.assign default - - -- Change the type of `h_run` - let state ← getCurrentState - let typeNew ← do - let rhs := mkApp2 (mkConst ``_root_.run) subGoalTyRhs state - mkEq c.finalState rhs - let eqProof ← do - let f := -- `fun s => = s` - let eq := mkApp3 (.const ``Eq [1]) (mkConst ``ArmState) - c.finalState (.bvar 0) - .lam `s (mkConst ``ArmState) eq .default - let g := mkConst ``_root_.run - let h ← instantiateMVars (.mvar subGoal) - mkCongrArg f (←mkCongrFun (←mkCongrArg g h) state) - let res ← goal.replaceLocalDecl hRunDecl.fvarId typeNew eqProof + runStepsPred.mvarId!.assign default + + -- Change the type of `hRun` + let goal :: originalGoals ← getGoals + | throwNoGoalsToBeSolved + let rwRes ← goal.rewrite hRun (.mvar runStepsEq) + modifyThe SymContext ({ · with + hRun := rwRes.eNew + }) -- Restore goal state - if !(←subGoal.isAssigned) then - trace[Tactic.sym] "Subgoal {subGoal} was not closed yet, \ - so add it as a goal for the user to solve" - originalGoals := originalGoals.concat subGoal - setGoals (res.mvarId :: originalGoals) + let newGoal ← do + if (←runStepsEq.isAssigned) then + pure [] + else + trace[Tactic.sym] "Subgoal {runStepsEq} was not closed yet, \ + so add it as a goal for the user to solve" + pure [runStepsEq] + setGoals (rwRes.mvarIds ++ originalGoals ++ newGoal) + instantiateMVars runStepsPred + +/-- TODO: better docstring +`initNextStep` returns expressions +- `nextState : ArmState`, and +- `stepiEq : stepi = nextState` +In that order, it also modifies `hRun` to be of type: + ` = hRun _ sn` +-/ +def initNextStep (whileTac : TSyntax `tactic) : SymM (Expr × Expr) := + withMainContext' do + let goal ← getMainGoal + + -- Add next state to local context + let currentState ← getCurrentState + let nextStateVal := -- `stepi ` + mkApp (mkConst ``stepi) currentState + let (nextStateId, goal) ← do + let name ← getNextStateName + goal.note name nextStateVal mkArmState + let nextState := Expr.fvar nextStateId + + let stepiEq : Expr := -- `stepiEq : stepi = nextState` + let ty := mkEqArmState nextStateVal nextState + mkApp2 (.const ``id [0]) ty <| mkEqReflArmState nextState + let (_, goal) ← do + let name := Name.mkSimple s!"stepi_{← getCurrentStateName}" + goal.note name stepiEq + replaceMainGoal [goal] + + -- Ensure we have one more step to simulate + let runStepsPred ← unfoldRun (fun _ => evalTacticAndTrace whileTac) + + -- Change `hRun` + let hRun ← getHRun + let hRun := -- `run_of_run_succ_of_stepi_eq ` + mkAppN (mkConst ``run_of_run_succ_of_stepi_eq) #[ + currentState, nextState, ← getFinalState, runStepsPred, + hRun, stepiEq + ] + modifyThe SymContext ({ · with hRun }) + + return (nextState, stepiEq) /-- Break an equality `h_step : s{i+1} = w ... (... (w ... s{i})...)` into an `AxEffects` that characterizes the effects in terms of reads from `s{i+1}`, @@ -166,9 +209,10 @@ add the relevant hypotheses to the local context, and store an `AxEffects` object with the newly added variables in the monad state -/ def explodeStep (hStep : Expr) : SymM Unit := - withMainContext' do + withMainContext' <| + withTraceNode m!"explodeStep {hStep}" (tag := "explodeStep") <| do let c ← getThe SymContext - let mut eff ← AxEffects.fromEq hStep + let mut eff ← AxEffects.fromEq hStep c.effects.stackAlignmentProof? let stateExpr ← getCurrentState /- Assert that the initial state of the obtained `AxEffects` is equal to @@ -182,47 +226,26 @@ def explodeStep (hStep : Expr) : SymM Unit := eff ← eff.withProgramEq c.effects.programProof eff ← eff.withField (← c.effects.getField .ERR).proof - if let some h_sp := c.h_sp? then - let hSp ← SymContext.findFromUserName h_sp - -- let effWithSp? - eff ← match ← eff.withStackAlignment? hSp.toExpr with - | some newEff => pure newEff - | none => do - trace[Tactic.sym] "failed to show stack alignment" - -- FIXME: in future, we'd like to detect when the `sp_aligned` - -- hypothesis is actually necessary, and add the proof obligation - -- on-demand. For now, however, we over-approximate, and say that - -- if the original state was known to be aligned, and something - -- writes to the SP, then we eagerly add the obligation to proof - -- that the result is aligned as well. - -- If you don't want this obligation, simply remove the hypothesis - -- that the original state is aligned - let spEff ← eff.getField .SP - let subGoal ← mkFreshMVarId - -- subGoal.setTag <| - let hAligned ← do - let name := Name.mkSimple s!"h_{← getNextStateName}_sp_aligned" - mkFreshExprMVarWithId subGoal (userName := name) <| - mkAppN (mkConst ``Aligned) #[toExpr 64, spEff.value, toExpr 4] - - trace[Tactic.sym] "created subgoal to show alignment:\n{subGoal}" - let subGoal? ← do - let (ctx, simprocs) ← - LNSymSimpContext - (config := {failIfUnchanged := false, decide := true}) - (decls := #[hSp]) - LNSymSimp subGoal ctx simprocs - - if let some subGoal := subGoal? then - trace[Tactic.sym] "subgoal got simplified to:\n{subGoal}" - appendGoals [subGoal] - else - trace[Tactic.sym] "subgoal got closed by simplification" - - let stackAlignmentProof? := some <| - mkAppN (mkConst ``CheckSPAlignment_of_r_sp_aligned) - #[eff.currentState, spEff.value, spEff.proof, hAligned] - pure { eff with stackAlignmentProof? } + if let some hSp := c.effects.stackAlignmentProof? then + withInfoTraceNode m!"discharging side conditions" <| do + for subGoal in eff.sideConditions do + trace[Tactic.sym] "attempting to discharge side-condition:\n {subGoal}" + let subGoal? ← do + let (ctx, simprocs) ← + LNSymSimpContext + (config := {failIfUnchanged := false, decide := true}) + (exprs := #[hSp]) + LNSymSimp subGoal ctx simprocs + + if let some subGoal := subGoal? then + trace[Tactic.sym] "subgoal got simplified to:\n{subGoal}" + subGoal.setTag (.mkSimple s!"h_{← getNextStateName}_sp_aligned") + appendGoals [subGoal] + else + trace[Tactic.sym] "subgoal got closed by simplification" + else + appendGoals eff.sideConditions + eff := { eff with sideConditions := [] } -- Add new (non-)effect hyps to the context, and to the aggregation simpset withMainContext' <| do @@ -251,51 +274,42 @@ elab "explode_step" h_step:term " at " state:term : tactic => withMainContext do Symbolically simulate a single step, according the the symbolic simulation context `c`, returning the context for the next step in simulation. -/ def sym1 (whileTac : TSyntax `tactic) : SymM Unit := do + /- `withCurrHeartbeats` sets the initial heartbeats to the current heartbeats, + effectively resetting our heartbeat budget back to the maximum. -/ + withCurrHeartbeats <| do + let stateNumber ← getCurrentStateNumber - let msg := m!"(sym1): simulating step {stateNumber}" - withTraceNode `Tactic.sym (fun _ => pure msg) <| withMainContext' do - withTraceNode `Tactic.sym (fun _ => pure "verbose context") <| do + Sym.withTraceNode m!"(sym1): simulating step {stateNumber}" (tag:="sym1") <| + withMainContext' do + withInfoTraceNode "verbose context" (tag := "infoDump") <| do traceSymContext trace[Tactic.sym] "Goal state:\n {← getMainGoal}" - let stepi_eq := Lean.mkIdent (.mkSimple s!"stepi_{← getCurrentStateName}") - let h_step := Lean.mkIdent (.mkSimple s!"h_step_{stateNumber + 1}") - unfoldRun (fun _ => evalTacticAndTrace whileTac) - -- Add new state to local context - let hRunId := mkIdent <|← getHRunName - let nextStateId := mkIdent <|← getNextStateName - evalTacticAndTrace <|← `(tactic| - init_next_step $hRunId:ident $stepi_eq:ident $nextStateId:ident - ) + let (_sn, stepiEq) ← initNextStep whileTac -- Apply relevant pre-generated `stepi` lemma - withMainContext' <| do - let stepiEq ← SymContext.findFromUserName stepi_eq.getId - stepiTac stepiEq.toExpr h_step.getId + let h_step := Lean.mkIdent (.mkSimple s!"h_step_{stateNumber + 1}") + withMainContext' <| + stepiTac stepiEq h_step.getId -- WORKAROUND: eventually we'd like to eagerly simp away `if`s in the -- pre-generation of instruction semantics. For now, though, we keep a -- `simp` here withMainContext' <| do let hStep ← SymContext.findFromUserName h_step.getId - let lctx ← getLCtx - let decls := (← getThe SymContext).h_sp?.bind lctx.findFromUserName? - let decls := decls.toArray - -- If we know SP is aligned, `simp` with that fact - if !decls.isEmpty then - trace[Tactic.sym] "simplifying {hStep.toExpr} \ - with {decls.map (·.toExpr)}" - -- If `decls` is empty, we have no more knowledge than before, so - -- everything that could've been `simp`ed, already should have been - let some goal ← do - let (ctx, simprocs) ← LNSymSimpContext - (config := {decide := false}) (decls := decls) - let goal ← getMainGoal - LNSymSimp goal ctx simprocs hStep.fvarId - | throwError "internal error: simp closed goal unexpectedly" - replaceMainGoal [goal] + -- If we know SP is aligned, `simp` with that fact + if let some hSp := (← getThe AxEffects).stackAlignmentProof? then + let msg := m!"simplifying {hStep.toExpr} with {hSp}" + withTraceNode msg (tag := "simplifyHStep") <| do + let some goal ← do + let (ctx, simprocs) ← LNSymSimpContext + (config := {decide := false}) (exprs := #[hSp]) + let goal ← getMainGoal + LNSymSimp goal ctx simprocs hStep.fvarId + | throwError "internal error: simp closed goal unexpectedly" + replaceMainGoal [goal] else trace[Tactic.sym] "we have no relevant local hypotheses, \ skipping simplification step" @@ -318,44 +332,46 @@ def sym1 (whileTac : TSyntax `tactic) : SymM Unit := do - log a warning and return `m`, if `runSteps? = some m` and `m < n`, or - return `n` unchanged, otherwise -/ def ensureAtMostRunSteps (n : Nat) : SymM Nat := do - let ctx ← getThe SymContext - match ctx.runSteps? with - | none => pure n - | some runSteps => - if n ≤ runSteps then - pure n - else - withMainContext <| do - let hRun ← ctx.hRunDecl - logWarning m!"Symbolic simulation is limited to at most {runSteps} \ - steps, because {hRun.toExpr} is of type:\n {hRun.type}" - pure runSteps - return n + withInfoTraceNode "" (tag := "ensureAtMostRunSteps") <| do + let ctx ← getThe SymContext + match ctx.runSteps? with + | none => pure n + | some runSteps => + if n ≤ runSteps then + pure n + else + withMainContext' <| do + let c ← readThe SymContext + logWarning m!"Symbolic simulation is limited to at most {runSteps} \ + steps, because {c.hRun} is of type:\n {← inferType c.hRun}" + pure runSteps + return n /-- Check that the step-thoerem corresponding to the current PC value exists, and throw a user-friendly error, pointing to `#genStepEqTheorems`, if it does not. -/ -def assertStepTheoremsGenerated : SymM Unit := do - let c ← getThe SymContext - let pc := c.pc.toHexWithoutLeadingZeroes - if !c.programInfo.instructions.contains c.pc then - let pcEff ← AxEffects.getFieldM .PC - throwError "\ - Program {c.program} has no instruction at address {c.pc}. - - We inferred this address as the program-counter from {pcEff.proof}, \ - which has type: - {← inferType pcEff.proof}" - - let step_thm := Name.str c.program ("stepi_eq_0x" ++ pc) - try - let _ ← getConstInfo step_thm - catch err => - throwErrorAt err.getRef "{err.toMessageData}\n -Did you remember to generate step theorems with: - #genStepEqTheorems {c.program}" --- TODO: can we make this error ^^ into a `Try this:` suggestion that --- automatically adds the right command just before the theorem? +def assertStepTheoremsGenerated : SymM Unit := + withInfoTraceNode "" (tag := "assertStepTheoremsGenerated") <| do + let c ← getThe SymContext + let pc := c.pc.toHexWithoutLeadingZeroes + if !c.programInfo.instructions.contains c.pc then + let pcEff ← AxEffects.getFieldM .PC + throwError "\ + Program {c.program} has no instruction at address {c.pc}. + + We inferred this address as the program-counter from {pcEff.proof}, \ + which has type: + {← inferType pcEff.proof}" + + let step_thm := Name.str c.program ("stepi_eq_0x" ++ pc) + try + let _ ← getConstInfo step_thm + catch err => + throwErrorAt err.getRef "{err.toMessageData}\n + Did you remember to generate step theorems with: + #genStepEqTheorems {c.program}" + -- TODO: can we make this error ^^ into a `Try this:` suggestion that + -- automatically adds the right command just before the theorem? /- used in `sym_n` tactic to specify an initial state -/ syntax sym_at := "at" ident @@ -415,34 +431,37 @@ elab "sym_n" whileTac?:(sym_while)? n:num s:(sym_at)? : tactic => do sym1 whileTac traceHeartbeats "symbolic simulation total" - let c ← getThe SymContext - -- Check if we can substitute the final state - if c.runSteps? = some 0 then - let msg := do - let hRun ← userNameToMessageData c.h_run - pure m!"runSteps := 0, substituting along {hRun}" - withTraceNode `Tactic.sym (fun _ => msg) <| withMainContext' do - let sfEq ← mkEq (← getCurrentState) c.finalState - + withCurrHeartbeats <| + Sym.withTraceNode "Post processing" (tag := "postProccessing") <| do + let c ← getThe SymContext + -- Check if we can substitute the final state + if c.runSteps? = some 0 then + let msg := pure m!"runSteps := 0, substituting along {c.hRun}" + withMainContext' <| + withTraceNode `Tactic.sym (fun _ => msg) <| do + let sfEq ← mkEq (← getCurrentState) c.finalState + + let goal ← getMainGoal + trace[Tactic.sym] "original goal:\n{goal}" + let ⟨hEqId, goal⟩ ← do + goal.note `this (← mkEqSymm c.hRun) sfEq + goal.withContext <| do + trace[Tactic.sym] "added {← userNameToMessageData `this} of type \ + {sfEq} in:\n{goal}" + + let goal ← subst goal hEqId + trace[Tactic.sym] "performed subsitutition in:\n{goal}" + replaceMainGoal [goal] + else -- Replace `h_run` in the local context let goal ← getMainGoal - trace[Tactic.sym] "original goal:\n{goal}" - let ⟨hEqId, goal⟩ ← do - let hRun ← SymContext.findFromUserName c.h_run - goal.note `this (← mkEqSymm hRun.toExpr) sfEq - goal.withContext <| do - trace[Tactic.sym] "added {← userNameToMessageData `this} of type \ - {sfEq} in:\n{goal}" - - let goal ← subst goal hEqId - trace[Tactic.sym] "performed subsitutition in:\n{goal}" - - replaceMainGoal [goal] - - -- Rudimentary aggregation: we feed all the axiomatic effect hypotheses - -- added while symbolically evaluating to `simp` - let msg := m!"aggregating (non-)effects" - withTraceNode `Tactic.sym (fun _ => pure msg) <| withMainContext' do - let goal? ← LNSymSimp (← getMainGoal) c.aggregateSimpCtx c.aggregateSimprocs - replaceMainGoal goal?.toList - - traceHeartbeats "final usage" + let res ← goal.replace c.hRunId c.hRun + replaceMainGoal [res.mvarId] + + -- Rudimentary aggregation: we feed all the axiomatic effect hypotheses + -- added while symbolically evaluating to `simp` + withMainContext' <| + withTraceNode m!"aggregating (non-)effects" (tag := "aggregateEffects") <| do + let goal? ← LNSymSimp (← getMainGoal) c.aggregateSimpCtx c.aggregateSimprocs + replaceMainGoal goal?.toList + + traceHeartbeats "aggregation" diff --git a/Tactics/Sym/AxEffects.lean b/Tactics/Sym/AxEffects.lean index 8c8a6aa5..1caee504 100644 --- a/Tactics/Sym/AxEffects.lean +++ b/Tactics/Sym/AxEffects.lean @@ -8,10 +8,13 @@ import Arm.State import Tactics.Common import Tactics.Attr import Tactics.Simp +import Tactics.Sym.Common +import Tactics.Sym.MemoryEffects import Std.Data.HashMap open Lean Meta +open Sym (withTraceNode withInfoTraceNode) /-- A reflected `ArmState` field, see `AxEffects.fields` for more context -/ structure AxEffects.FieldEffect where @@ -57,17 +60,8 @@ structure AxEffects where where `f₁, ⋯, fₙ` are the keys of `fields` -/ nonEffectProof : Expr - /-- An expression of a (potentially empty) sequence of `write_mem`s - to the initial state, which describes the effects on memory. - See `memoryEffectProof` for more detail -/ - memoryEffect : Expr - /-- An expression that contains the proof of: - ```lean - ∀ n addr, - read_mem_bytes n addr - = read_mem_bytes n addr - ``` -/ - memoryEffectProof : Expr + /-- The memory effects -/ + memoryEffects : MemoryEffects /-- A proof that `.program = .program` -/ programProof : Expr /-- An optional proof of `CheckSPAlignment `. @@ -78,20 +72,28 @@ structure AxEffects where However, if SP is written to, no effort is made to prove alignment of the new value; the field will be set to `none` -/ stackAlignmentProof? : Option Expr + + /-- `sideContitions` are proof obligations that come up during effect + characterization. + + Currently, the only side condition that arises is of the form + `CheckSPAlignment _`, after updating the SP, but this may change + when we add better handling for branches -/ + sideConditions : List MVarId deriving Repr namespace AxEffects -/-! ## Monad getters -/ +/-! ## Monadic getters -/ -section Monad +section MonadicGetters variable {m} [Monad m] [MonadReaderOf AxEffects m] def getCurrentState : m Expr := do return (← read).currentState def getInitialState : m Expr := do return (← read).initialState def getNonEffectProof : m Expr := do return (← read).nonEffectProof -def getMemoryEffect : m Expr := do return (← read).memoryEffect -def getMemoryEffectProof : m Expr := do return (← read).memoryEffectProof +def getMemoryEffect : m Expr := do return (← read).memoryEffects.effects +def getMemoryEffectProof : m Expr := do return (← read).memoryEffects.proof def getProgramProof : m Expr := do return (← read).programProof def getStackAlignmentProof? : m (Option Expr) := do @@ -105,13 +107,13 @@ def getCurrentStateName : m Name := do @id (MetaM _) <| do let state ← instantiateMVars state let Expr.fvar id := state.consumeMData - | throwError "error: expected a free variable, found:\n {state} WHHOPS" + | throwError "error: expected a free variable, found:\n {state}" let lctx ← getLCtx let some decl := lctx.find? id | throwError "error: unknown fvar: {state}" return decl.userName -end Monad +end MonadicGetters /-! ## Initial Reflected State -/ @@ -126,21 +128,14 @@ def initial (state : Expr) : AxEffects where -- `fun f => rfl` mkLambda `f .default (mkConst ``StateField) <| mkEqReflArmState <| mkApp2 (mkConst ``r) (.bvar 0) state - memoryEffect := state - memoryEffectProof := - -- `fun n addr => rfl` - mkLambda `n .default (mkConst ``Nat) <| - let bv64 := mkApp (mkConst ``BitVec) (toExpr 64) - mkLambda `addr .default bv64 <| - mkApp2 (.const ``Eq.refl [1]) - (mkApp (mkConst ``BitVec) <| mkNatMul (.bvar 1) (toExpr 8)) - (mkApp3 (mkConst ``read_mem_bytes) (.bvar 1) (.bvar 0) state) + memoryEffects := .initial state programProof := -- `rfl` mkAppN (.const ``Eq.refl [1]) #[ mkConst ``Program, mkApp (mkConst ``ArmState.program) state] stackAlignmentProof? := none + sideConditions := [] /-! ## ToMessageData -/ @@ -159,15 +154,13 @@ instance : ToMessageData AxEffects where currentState := {eff.currentState}, fields := {eff.fields}, nonEffectProof := {eff.nonEffectProof}, - memoryEffect := {eff.memoryEffect}, - memoryEffectProof := {eff.memoryEffectProof}, + memoryEffects := {eff.memoryEffects}, programProof := {eff.programProof} }" private def traceCurrentState (eff : AxEffects) - (header : MessageData := "current state") : - MetaM Unit := - withTraceNode `Tactic.sym (fun _ => pure header) do + (header : MessageData := "current state") : MetaM Unit := + withTraceNode header <| do trace[Tactic.sym] "{eff}" /-! ## Helpers -/ @@ -199,7 +192,7 @@ private def rewriteType (e eq : Expr) : MetaM Expr := do by constructing an application of `eff.nonEffectProof` -/ partial def mkAppNonEffect (eff : AxEffects) (field : Expr) : MetaM Expr := do let msg := m!"constructing application of non-effects proof" - withTraceNode `Tactic.sym (fun _ => pure msg) <| do + withTraceNode msg (tag := "mkAppNonEffect") <| do trace[Tactic.sym] "nonEffectProof: {eff.nonEffectProof}" let nonEffectProof := mkApp eff.nonEffectProof field @@ -218,8 +211,7 @@ partial def mkAppNonEffect (eff : AxEffects) (field : Expr) : MetaM Expr := do /-- Get the value for a field, if one is stored in `eff.fields`, or assemble an instantiation of the non-effects proof otherwise -/ def getField (eff : AxEffects) (fld : StateField) : MetaM FieldEffect := - let msg := m!"getField {fld}" - withTraceNode `Tactic.sym (fun _ => pure msg) <| do + withTraceNode m!"getField {fld}" (tag := "getField") <| do eff.traceCurrentState if let some val := eff.fields.get? fld then @@ -231,7 +223,7 @@ def getField (eff : AxEffects) (fld : StateField) : MetaM FieldEffect := let proof ← eff.mkAppNonEffect (toExpr fld) pure { value, proof } -section Monad +section MonadicGettersAndSetters variable {m} [Monad m] [MonadLiftT MetaM m] variable [MonadReaderOf AxEffects m] in @@ -259,7 +251,7 @@ This is a specialization of `setFieldEffect`. -/ def setErrorProof (proof : Expr) : m Unit := setFieldEffect .ERR { value := mkConst ``StateError.None, proof } -end Monad +end MonadicGettersAndSetters /-! ## Update a Reflected State -/ @@ -270,9 +262,8 @@ and all other fields are updated accordingly. Note that no effort is made to preserve `currentStateEq`; it is set to `none`! -/ private def update_write_mem (eff : AxEffects) (n addr val : Expr) : - MetaM AxEffects := do - trace[Tactic.sym] "adding write of {n} bytes of value {val} \ - to memory address {addr}" + MetaM AxEffects := + Sym.withTraceNode m!"processing: write_mem {n} {addr} {val} …" (tag := "updateWriteMem") <| do -- Update each field let fields ← eff.fields.toList.mapM fun ⟨fld, {value, proof}⟩ => do @@ -290,11 +281,10 @@ private def update_write_mem (eff : AxEffects) (n addr val : Expr) : mkLambdaFVars args proof -- ^^ `fun f ... => Eq.trans (@r_of_write_mem_bytes f ...) ` - -- Update the memory effects proof - let memoryEffectProof := - -- `read_mem_bytes_write_mem_bytes_of_read_mem_eq ...` - mkAppN (mkConst ``read_mem_bytes_write_mem_bytes_of_read_mem_eq) - #[eff.currentState, eff.memoryEffect, eff.memoryEffectProof, n, addr, val] + -- Update the memory effects + let memoryEffects ← + eff.memoryEffects.updateWriteMem eff.currentState n addr val + -- Update the program proof let programProof ← @@ -304,20 +294,23 @@ private def update_write_mem (eff : AxEffects) (n addr val : Expr) : #[eff.currentState, n, addr, val]) eff.programProof + -- Update the stack alignment proof + let stackAlignmentProof? := eff.stackAlignmentProof?.map fun proof => + mkAppN (mkConst ``CheckSPAlignment_write_mem_bytes_of) + #[eff.currentState, n, addr, val, proof] + -- Assemble the result - let addWrite (e : Expr) := - -- `@write_mem_bytes ` - mkApp4 (mkConst ``write_mem_bytes) n addr val e + let currentState := -- `@write_mem_bytes ` + mkApp4 (mkConst ``write_mem_bytes) n addr val eff.currentState let eff := { eff with - currentState := addWrite eff.currentState + currentState fields := .ofList fields nonEffectProof - memoryEffect := addWrite eff.memoryEffect - memoryEffectProof + memoryEffects programProof + stackAlignmentProof? } - withTraceNode `Tactic.sym (fun _ => pure "new state") <| do - trace[Tactic.sym] "{eff}" + eff.traceCurrentState return eff /-- Execute `w ` against the state stored in `eff` @@ -328,8 +321,8 @@ Note that no effort is made to preserve `currentStateEq`; it is set to `none`! -/ private def update_w (eff : AxEffects) (fld val : Expr) : MetaM AxEffects := do + Sym.withTraceNode m!"processing: w {fld} {val} …" (tag := "updateWrite") <| do let rField ← reflectStateField fld - trace[Tactic.sym] "adding write of value {val} to register {rField}" -- Update all other fields let fields ← @@ -385,11 +378,8 @@ private def update_w (eff : AxEffects) (fld val : Expr) : withLocalDeclD name h_neq_type fun h_neq => k (args.push h_neq) h_neq - -- Update the memory effect proof - let memoryEffectProof := - -- `read_mem_bytes_w_of_read_mem_eq ...` - mkAppN (mkConst ``read_mem_bytes_w_of_read_mem_eq) - #[eff.currentState, eff.memoryEffect, eff.memoryEffectProof, fld, val] + -- Update the memory effects + let memoryEffects ← eff.memoryEffects.updateWrite eff.currentState fld val -- Update the program proof let programProof ← @@ -398,14 +388,33 @@ private def update_w (eff : AxEffects) (fld val : Expr) : (mkAppN (mkConst ``w_program) #[fld, val, eff.currentState]) eff.programProof + -- Update the stack alignment proof + let mut sideConditions := eff.sideConditions + let mut stackAlignmentProof? := eff.stackAlignmentProof? + if let some proof := stackAlignmentProof? then + if rField ≠ StateField.SP then + let hNeq ← mkDecideProof <| + mkApp3 (.const ``Ne [1]) + (mkConst ``StateField) (toExpr StateField.SP) fld + stackAlignmentProof? := mkAppN (mkConst ``CheckSPAlignment_w_of_ne_sp_of) + #[fld, eff.currentState, val, hNeq, proof] + else + let hAligned ← mkFreshExprMVar (some <| + mkApp3 (mkConst ``Aligned) (toExpr 64) val (toExpr 4) + ) + sideConditions := hAligned.mvarId! :: sideConditions + stackAlignmentProof? := mkAppN (mkConst ``CheckSPAlignment_w_sp_of) + #[val, eff.currentState, hAligned] + -- Assemble the result let eff := { eff with currentState := mkApp3 (mkConst ``w) fld val eff.currentState fields := Std.HashMap.ofList fields nonEffectProof - -- memory effects are unchanged - memoryEffectProof + memoryEffects programProof + stackAlignmentProof? + sideConditions } eff.traceCurrentState "new state" return eff @@ -420,23 +429,33 @@ private def assertIsDefEq (e expected : Expr) : MetaM Unit := do if !(←isDefEq e expected) then throwError "expected:\n {expected}\nbut found:\n {e}" +/-- Given an expression `e : ArmState`, +which is a sequence of `w`/`write_mem`s to `eff.currentState`, +return an `AxEffects` where `e` is the new `currentState`. + +See also `updateWithExpr`, which is a wrapper around `updateWithExprAux` which adds a top-level trace node. +-/ +private partial def updateWithExprAux (eff : AxEffects) (e : Expr) : MetaM AxEffects := do + match_expr e with + | write_mem_bytes n addr val e => + let eff ← eff.updateWithExprAux e + eff.update_write_mem n addr val + + | w field value e => + let eff ← eff.updateWithExprAux e + eff.update_w field value + + | _ => + assertIsDefEq e eff.currentState + return eff + /-- Given an expression `e : ArmState`, which is a sequence of `w`/`write_mem`s to `eff.currentState`, return an `AxEffects` where `e` is the new `currentState`. -/ partial def updateWithExpr (eff : AxEffects) (e : Expr) : MetaM AxEffects := do let msg := m!"Updating effects with writes from: {e}" - withTraceNode `Tactic.sym (fun _ => pure msg) <| do match_expr e with - | write_mem_bytes n addr val e => - let eff ← eff.updateWithExpr e - eff.update_write_mem n addr val - - | w field value e => - let eff ← eff.updateWithExpr e - eff.update_w field value - - | _ => - assertIsDefEq e eff.currentState - return eff + withTraceNode msg (tag := "updateWithExpr") <| + updateWithExprAux eff e /-- Given an expression `e : ArmState`, which is a sequence of `w`/`write_mem`s to the some state `s`, @@ -451,62 +470,67 @@ def fromExpr (e : Expr) : MetaM AxEffects := do let eff ← eff.updateWithExpr e return { eff with initialState := ← instantiateMVars eff.initialState} - /-- Given a proof `eq : s = `, set `s` to be the new `currentState`, and update all proofs accordingly -/ def adjustCurrentStateWithEq (eff : AxEffects) (s eq : Expr) : MetaM AxEffects := do - withTraceNode `Tactic.sym (fun _ => pure "adjusting `currenstState`") do - eff.traceCurrentState + Sym.withTraceNode m!"adjustCurrentStateWithEq" (tag := "adjustCurrentStateWithEq") do trace[Tactic.sym] "rewriting along {eq}" + eff.traceCurrentState + assertHasType eq <| mkEqArmState s eff.currentState let eq ← mkEqSymm eq let currentState := s let fields ← eff.fields.toList.mapM fun (field, fieldEff) => do - withTraceNode `Tactic.sym (fun _ => pure m!"rewriting field {field}") do + withTraceNode m!"rewriting field {field}" (tag := "rewriteField") do trace[Tactic.sym] "original proof: {fieldEff.proof}" let proof ← rewriteType fieldEff.proof eq trace[Tactic.sym] "new proof: {proof}" pure (field, {fieldEff with proof}) let fields := .ofList fields - let nonEffectProof ← rewriteType eff.nonEffectProof eq - let memoryEffectProof ← rewriteType eff.memoryEffectProof eq - -- ^^ TODO: what happens if `memoryEffect` is the same as `currentState`? - -- Presumably, we would *not* want to encapsulate `memoryEffect` here - let programProof ← rewriteType eff.programProof eq + Sym.withTraceNode m!"rewriting other proofs" (tag := "rewriteMisc") <| do + let nonEffectProof ← rewriteType eff.nonEffectProof eq + let memoryEffects ← eff.memoryEffects.adjustCurrentStateWithEq eq + let programProof ← rewriteType eff.programProof eq + let stackAlignmentProof? ← eff.stackAlignmentProof?.mapM + (rewriteType · eq) - return { eff with - currentState, fields, nonEffectProof, memoryEffectProof, programProof - } + return { eff with + currentState, fields, nonEffectProof, memoryEffects, programProof, + stackAlignmentProof? + } /-- Given a proof `eq : ?s = `, where `?s` and `?s0` are arbitrary `ArmState`s, return an `AxEffect` with the rhs of the equality as the current state, and the (non-)effects updated accordingly -/ def updateWithEq (eff : AxEffects) (eq : Expr) : MetaM AxEffects := - let msg := m!"Building effects with equality: {eq}" - withTraceNode `Tactic.sym (fun _ => pure msg) <| do + withTraceNode m!"Building effects with equality: {eq}" + (tag := "updateWithEq") <| do let s ← mkFreshExprMVar mkArmState let rhs ← mkFreshExprMVar mkArmState assertHasType eq <| mkEqArmState s rhs let eff ← eff.updateWithExpr (← instantiateMVars rhs) let eff ← eff.adjustCurrentStateWithEq s eq - withTraceNode `Tactic.sym (fun _ => pure "new state") do - trace[Tactic.sym] "{eff}" + eff.traceCurrentState "new state" return eff /-- Given a proof `eq : ?s = `, where `?s` and `?s0` are arbitrary `ArmState`s, return an `AxEffect` with `?s0` as the initial state, the rhs of the equality as the current state, -and the (non-)effects updated accordingly -/ -def fromEq (eq : Expr) : MetaM AxEffects := do +and the (non-)effects updated accordingly + +One can optionally pass in a proof that `?s0` has a well-aligned stack pointer. +-/ +def fromEq (eq : Expr) (stackAlignmentProof? : Option Expr := none) : + MetaM AxEffects := do let s0 ← mkFreshExprMVar mkArmState - let eff := initial s0 + let eff := { initial s0 with stackAlignmentProof? } let eff ← eff.updateWithEq eq return { eff with initialState := ← instantiateMVars eff.initialState} @@ -534,8 +558,7 @@ Note: throws an error when `initialState = currentState` *and* the field already has a value stored, as the rewrite might produce expressions of unexpected types. -/ def withField (eff : AxEffects) (eq : Expr) : MetaM AxEffects := do - let msg := m!"withField {eq}" - withTraceNode `Tactic.sym (fun _ => pure msg) <| do + withTraceNode m!"withField {eq}" (tag := "withField") <| do eff.traceCurrentState let fieldE ← mkFreshExprMVar (mkConst ``StateField) let value ← mkFreshExprMVar none @@ -570,34 +593,6 @@ def withField (eff : AxEffects) (eq : Expr) : MetaM AxEffects := do let fields := eff.fields.insert field { value, proof } return { eff with fields } -/-- Given a proof of `CheckSPAlignment `, -attempt to transport it to a proof of `CheckSPAlignment ` -and store that proof in `stackAlignmentProof?`. - -Returns `none` if the proof failed to be transported, -i.e., if SP was written to. -/ -def withStackAlignment? (eff : AxEffects) (spAlignment : Expr) : - MetaM (Option AxEffects) := do - let msg := m!"withInitialStackAlignment? {spAlignment}" - withTraceNode `Tactic.sym (fun _ => pure msg) <| do - eff.traceCurrentState - - let { value, proof } ← eff.getField StateField.SP - let expected := - mkApp2 (mkConst ``r) (toExpr <| StateField.SP) eff.initialState - trace[Tactic.sym] "checking whether value:\n {value}\n\ - is syntactically equal to expected value\n {expected}" - if value != expected then - trace[Tactic.sym] "failed to transport proof: - expected value to be {expected}, but found {value}" - return none - - let stackAlignmentProof? := some <| - mkAppN (mkConst ``CheckSPAlignment_of_r_sp_eq) - #[eff.initialState, eff.currentState, proof, spAlignment] - trace[Tactic.sym] "constructed stackAlignmentProof: {stackAlignmentProof?}" - return some { eff with stackAlignmentProof? } - /-! ## Composition -/ /- TODO: write a function that combines two effects `left` and `right`, @@ -621,8 +616,8 @@ NOTE: does not necessarily validate *which* type an expression has, validation will still pass if types are different to those we claim in the docstrings -/ def validate (eff : AxEffects) : MetaM Unit := do - let msg := "validating that the axiomatic effects are well-formed" - withTraceNode `Tactic.sym (fun _ => pure msg) <| do + Sym.withTraceNode "validating that the axiomatic effects are well-formed" + (tag := "validate") <| do eff.traceCurrentState assertHasType eff.initialState mkArmState @@ -632,13 +627,13 @@ def validate (eff : AxEffects) : MetaM Unit := do check fieldEff.value check fieldEff.proof + eff.memoryEffects.validate check eff.nonEffectProof - check eff.memoryEffect - check eff.memoryEffectProof check eff.programProof if let some h := eff.stackAlignmentProof? then check h + /-! ## Tactic Environment -/ section Tactic open Elab.Tactic @@ -657,8 +652,8 @@ that was just added to the local context -/ def addHypothesesToLContext (eff : AxEffects) (hypPrefix : String := "h_") (mvar : Option MVarId := none) : TacticM AxEffects := - let msg := m!"adding hypotheses to local context" - withTraceNode `Tactic.sym (fun _ => pure msg) do + Sym.withTraceNode m!"adding hypotheses to local context" + (tag := "addHypothesesToLContext") do eff.traceCurrentState let mut goal ← mvar.getDM getMainGoal @@ -683,12 +678,14 @@ def addHypothesesToLContext (eff : AxEffects) (hypPrefix : String := "h_") let nonEffectProof := Expr.fvar nonEffectProof goal := goal' - trace[Tactic.sym] "adding memory effects with {eff.memoryEffectProof}" + trace[Tactic.sym] "adding memory effects with {eff.memoryEffects.proof}" let ⟨memoryEffectProof, goal'⟩ ← goal.withContext do let name := .mkSimple s!"{hypPrefix}memory_effects" - let proof := eff.memoryEffectProof + let proof := eff.memoryEffects.proof replaceOrNote goal name proof - let memoryEffectProof := Expr.fvar memoryEffectProof + let memoryEffects := { eff.memoryEffects with + proof := Expr.fvar memoryEffectProof + } goal := goal' trace[Tactic.sym] "adding program hypothesis with {eff.programProof}" @@ -714,15 +711,15 @@ def addHypothesesToLContext (eff : AxEffects) (hypPrefix : String := "h_") replaceMainGoal [goal] return {eff with - fields, nonEffectProof, memoryEffectProof, programProof, + fields, nonEffectProof, memoryEffects, programProof, stackAlignmentProof? } where replaceOrNote (goal : MVarId) (h : Name) (v : Expr) (t? : Option Expr := none) : MetaM (FVarId × MVarId) := - let msg := m!"adding {h} to the local context" - withTraceNode `Tactic.sym (fun _ => pure msg) <| do + withTraceNode m!"adding {h} to the local context" + (tag := "replaceOrNote") <| do trace[Tactic.sym] "with value {v} and type {t?}" if let some decl := (← getLCtx).findFromUserName? h then let ⟨fvar, goal, _⟩ ← goal.replace decl.fvarId v t? @@ -734,8 +731,8 @@ where /-- Return an array of `SimpTheorem`s of the proofs contained in the given `AxEffects` -/ def toSimpTheorems (eff : AxEffects) : MetaM (Array SimpTheorem) := do - let msg := m!"computing SimpTheorems for (non-)effect hypotheses" - withTraceNode `Tactic.sym (fun _ => pure msg) <| do + Sym.withTraceNode m!"computing SimpTheorems for (non-)effect hypotheses" + (tag := "toSimpTheorems") <| do let lctx ← getLCtx let baseName? := if eff.currentState.isFVar then @@ -747,8 +744,7 @@ def toSimpTheorems (eff : AxEffects) : MetaM (Array SimpTheorem) := do let add (thms : Array SimpTheorem) (e : Expr) (name : String) (prio : Nat := 1000) := - let msg := m!"adding {e} with name {name}" - withTraceNode `Tactic.sym (fun _ => pure msg) <| do + withTraceNode m!"adding {e} with name {name}" <| do let origin : Origin := if e.isFVar then .fvar e.fvarId! @@ -769,7 +765,7 @@ def toSimpTheorems (eff : AxEffects) : MetaM (Array SimpTheorem) := do thms ← add thms proof s!"field_{field}" (prio := 1500) thms ← add thms eff.nonEffectProof "nonEffectProof" - thms ← add thms eff.memoryEffectProof "memoryEffectProof" + thms ← add thms eff.memoryEffects.proof "memoryEffectProof" thms ← add thms eff.programProof "programProof" if let some stackAlignmentProof := eff.stackAlignmentProof? then thms ← add thms stackAlignmentProof "stackAlignmentProof" diff --git a/Tactics/Sym/Common.lean b/Tactics/Sym/Common.lean new file mode 100644 index 00000000..bb151ffd --- /dev/null +++ b/Tactics/Sym/Common.lean @@ -0,0 +1,41 @@ +/- +Copyright (c) 2024 Amazon.com, Inc. or its affiliates. All Rights Reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Author(s): Alex Keizer +-/ +import Lean + +open Lean + +namespace Sym + +/-! ## Trace Nodes -/ +section Tracing +variable {α : Type} {m : Type → Type} [Monad m] [MonadTrace m] [MonadLiftT IO m] + [MonadRef m] [AddMessageContext m] [MonadOptions m] {ε : Type} + [MonadAlwaysExcept ε m] [MonadLiftT BaseIO m] + +/-- Add a trace node with the `Tactic.sym` trace class -/ +def withTraceNode (msg : MessageData) (k : m α) + (collapsed : Bool := true) + (tag : String := "") + : m α := do + Lean.withTraceNode `Tactic.sym (fun _ => pure msg) k collapsed tag + +/-- Add a trace node with the `Tactic.sym.info` trace class -/ +def withInfoTraceNode (msg : MessageData) (k : m α) + (collapsed : Bool := true) + (tag : String := "") + : m α := do + Lean.withTraceNode `Tactic.sym.info (fun _ => pure msg) k collapsed tag + +/-- Create a trace note that folds `header` with `(NOTE: can be large)`, +and prints `msg` under such a trace node. +-/ +def traceLargeMsg (header : MessageData) (msg : MessageData) : MetaM Unit := + withTraceNode m!"{header} (NOTE: can be large)" do + trace[Tactic.sym] msg + +end Tracing + +end Sym diff --git a/Tactics/Sym/Context.lean b/Tactics/Sym/Context.lean index 40979b2d..5f43dac9 100644 --- a/Tactics/Sym/Context.lean +++ b/Tactics/Sym/Context.lean @@ -9,6 +9,7 @@ import Lean.Meta import Arm.Exec import Tactics.Common import Tactics.Attr +import Tactics.Sym.Common import Tactics.Sym.ProgramInfo import Tactics.Sym.AxEffects import Tactics.Sym.LCtxSearch @@ -33,6 +34,7 @@ and is likely to be deprecated and removed in the near future. -/ open Lean Meta Elab.Tactic open BitVec +open Sym (withTraceNode withInfoTraceNode) /-- A `SymContext` collects the names of various variables/hypotheses in the local context required for symbolic evaluation -/ @@ -45,15 +47,18 @@ structure SymContext where If `runSteps?` is `some n`, where `n` is a meta-level `Nat`, then we expect that `` in type of `h_run` is the literal `n`. Otherwise, if `runSteps?` is `none`, - then `` is allowed to be anything, even a symbolic value. + then `` is allowed to be anything, including a symbolic value. See also `SymContext.h_run` -/ runSteps? : Option Nat - /-- `h_run` is a local hypothesis of the form - `finalState = run state` + /-- `hRun` is an expression of type + ` = run state` See also `SymContext.runSteps?` -/ - h_run : Name + hRun : Expr + /-- The id of the variable with which `hRun` was initialized -/ + hRunId : FVarId + /-- `programInfo` is the relevant cached `ProgramInfo` -/ programInfo : ProgramInfo @@ -77,9 +82,6 @@ structure SymContext where and we assume that no overflow happens (i.e., `base - x` can never be equal to `base + y`) -/ pc : BitVec 64 - /-- `h_sp?`, if present, is a local hypothesis of the form - `CheckSPAlignment state` -/ - h_sp? : Option Name /-- The `simp` context used for effect aggregation. This collects references to all (non-)effect hypotheses of the intermediate @@ -159,24 +161,41 @@ def program : Name := c.programInfo.name /-- Find the local declaration that corresponds to a given name, or throw an error if no local variable of that name exists -/ -def findFromUserName (name : Name) : MetaM LocalDecl := do - let some decl := (← getLCtx).findFromUserName? name - | throwError "Unknown local variable `{name}`" - return decl - -/-- Find the local declaration that corresponds to `c.h_run`, -or throw an error if no local variable of that name exists -/ -def hRunDecl : MetaM LocalDecl := do - findFromUserName c.h_run +def findFromUserName (name : Name) : MetaM LocalDecl := + withInfoTraceNode m!"[findFromUserName] {name}" <| do + let some decl := (← getLCtx).findFromUserName? name + | throwError "Unknown local variable `{name}`" + return decl -section Monad +section MonadicGetters variable {m} [Monad m] [MonadReaderOf SymContext m] def getCurrentStateNumber : m Nat := do return (← read).currentStateNumber -/-- Return the name of the hypothesis - `h_run : = run ` -/ -def getHRunName : m Name := do return (← read).h_run +def getFinalState : m Expr := do return (← read).finalState + +/-- Return an expression of type + ` = run ` -/ +def getHRun : m Expr := do return (← read).hRun + +/-- Return the `Name` of a variable of type + ` = run ` + +This will return the name of `hRun`, if its an fvar. +Otherwise, add a new variable to the local context, and return the new name. +Note that `hRun` is not modified in either case. -/ +def getHRunName [MonadLiftT TacticM m] [MonadLiftT MetaM m] [MonadError m] + [MonadLCtx m] : + m Name := do + let hRun ← getHRun + if let Expr.fvar id := hRun then + let some decl := (← getLCtx).find? id + | throwError "Unknown fvar {Expr.fvar id}" + return decl.userName + else + let ⟨_id, goal⟩ ← (← getMainGoal).note `h_run hRun none + replaceMainGoal [goal] + return `h_run /-- Retrieve the name for the next state @@ -187,7 +206,7 @@ def getNextStateName : m Name := do let c ← read return Name.mkSimple s!"{c.state_prefix}{c.currentStateNumber + 1}" -end Monad +end MonadicGetters end @@ -196,27 +215,17 @@ end /-- Convert a `SymContext` to `MessageData` for tracing. This is not a `ToMessageData` instance because we need access to `MetaM` -/ def toMessageData (c : SymContext) : MetaM MessageData := do - let h_run ← userNameToMessageData c.h_run - let h_sp? ← c.h_sp?.mapM userNameToMessageData - return m!"\{ finalState := {c.finalState}, runSteps? := {c.runSteps?}, - h_run := {h_run}, + hRun := {c.hRun}, program := {c.program}, pc := {c.pc}, - h_sp? := {h_sp?}, state_prefix := {c.state_prefix}, curr_state_number := {c.currentStateNumber}, effects := {c.effects} }" -variable {α : Type} {m : Type → Type} [Monad m] [MonadTrace m] [MonadLiftT IO m] - [MonadRef m] [AddMessageContext m] [MonadOptions m] {ε : Type} - [MonadAlwaysExcept ε m] [MonadLiftT BaseIO m] in -def withSymTraceNode (msg : MessageData) (k : m α) : m α := do - withTraceNode `Tactic.sym (fun _ => pure msg) k - def traceSymContext : SymM Unit := - withTraceNode `Tactic.sym (fun _ => pure m!"SymContext: ") <| do + withTraceNode m!"SymContext: " <| do let m ← (← getThe SymContext).toMessageData trace[Tactic.sym] m @@ -248,7 +257,9 @@ private def initial (state : Expr) : MetaM SymContext := do let finalState ← mkFreshExprMVar mkArmState /- Get the default simp lemmas & simprocs for aggregation -/ let (aggregateSimpCtx, aggregateSimprocs) ← - LNSymSimpContext (config := {decide := true, failIfUnchanged := false}) + LNSymSimpContext + (config := {decide := true, failIfUnchanged := false}) + (simp_attrs := #[`minimal_theory, `bitvec_rules, `state_simp_rules, `memory_rules]) let aggregateSimpCtx := { aggregateSimpCtx with -- Create a new discrtree for effect hypotheses to be added to. -- TODO(@alexkeizer): I put this here, since the previous version kept @@ -259,13 +270,13 @@ private def initial (state : Expr) : MetaM SymContext := do return { finalState runSteps? := none - h_run := `dummyValue + hRunId := ⟨.anonymous⟩ + hRun := ← mkFreshExprMVar none programInfo := { name := `dummyValue instructions := ∅ } pc := 0 - h_sp? := none aggregateSimpCtx, aggregateSimprocs, effects := AxEffects.initial state @@ -329,7 +340,8 @@ protected def searchFor : SearchLCtxForM SymM Unit := do (from {hRun.toExpr} : {hRun.type})" modifyThe SymContext ({ · with - h_run := hRun.userName + hRunId := hRun.fvarId + hRun := hRun.toExpr finalState := ←instantiateMVars c.finalState runSteps? }) @@ -400,9 +412,6 @@ protected def searchFor : SearchLCtxForM SymM Unit := do modifyThe AxEffects ({ · with stackAlignmentProof? := some decl.toExpr }) - modifyThe SymContext ({· with - h_sp? := decl.userName - }) ) -- Find `r ?field currentState = ?value` @@ -440,7 +449,7 @@ we create a new subgoal of this type. -/ def fromMainContext (state? : Option Name) : TacticM SymContext := do let msg := m!"Building a `SymContext` from the local context" - withTraceNode `Tactic.sym (fun _ => pure msg) <| withMainContext' do + withTraceNode msg (tag := "fromMainContext") <| withMainContext' do trace[Tactic.Sym] "state? := {state?}" let lctx ← getLCtx @@ -473,18 +482,17 @@ evaluation: * the `currentStateNumber` is incremented -/ def prepareForNextStep : SymM Unit := do - let s ← getNextStateName - let pc ← do - let { value, ..} ← AxEffects.getFieldM .PC - try - reflectBitVecLiteral 64 value - catch err => - trace[Tactic.sym] "failed to reflect PC: {err.toMessageData}" - pure <| (← getThe SymContext).pc + 4 - - modifyThe SymContext (fun c => { c with - pc - h_sp? := c.h_sp?.map (fun _ => .mkSimple s!"h_{s}_sp_aligned") - runSteps? := (· - 1) <$> c.runSteps? - currentStateNumber := c.currentStateNumber + 1 - }) + withInfoTraceNode "prepareForNextStep" (tag := "prepareForNextStep") <| do + let pc ← do + let { value, ..} ← AxEffects.getFieldM .PC + try + reflectBitVecLiteral 64 value + catch err => + trace[Tactic.sym] "failed to reflect PC: {err.toMessageData}" + pure <| (← getThe SymContext).pc + 4 + + modifyThe SymContext (fun c => { c with + pc + runSteps? := (· - 1) <$> c.runSteps? + currentStateNumber := c.currentStateNumber + 1 + }) diff --git a/Tactics/Sym/MemoryEffects.lean b/Tactics/Sym/MemoryEffects.lean new file mode 100644 index 00000000..5b8d3719 --- /dev/null +++ b/Tactics/Sym/MemoryEffects.lean @@ -0,0 +1,121 @@ +/- +Copyright (c) 2024 Amazon.com, Inc. or its affiliates. All Rights Reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Author(s): Alex Keizer, Siddharth Bhat +-/ + +import Arm.State +import Tactics.Common +import Tactics.Attr +import Tactics.Simp +import Tactics.Sym.Common + +import Std.Data.HashMap + +open Lean Meta + +structure MemoryEffects where + /-- An expression of a (potentially empty) sequence of `write_mem`s + to the initial state, which describes the effects on memory. + See `memoryEffectProof` for more detail -/ + effects : Expr + /-- An expression that contains the proof of: + ```lean + .mem + = .mem + ``` -/ + proof : Expr +deriving Repr + +instance : ToMessageData MemoryEffects where + toMessageData eff := + m!"\ + \{ effects := {eff.effects}, + proof := {eff.proof + }" + +namespace MemoryEffects + +/-! ## Initial Reflected State -/ + +/-- An initial `MemoryEffects`, representing no memory changes to the +initial `state` -/ +def initial (state : Expr) : MemoryEffects where + effects := state + proof := + -- `rfl` + mkEqReflMemory (mkApp (mkConst ``ArmState.mem) state) + +/-- Update the memory effects with a memory write -/ +def updateWriteMem (eff : MemoryEffects) (currentState : Expr) + (n addr val : Expr) : + MetaM MemoryEffects := do + let effects := mkApp4 (mkConst ``write_mem_bytes) n addr val eff.effects + let proof := + -- `mem_write_mem_bytes_of_mem_eq ...` + mkAppN (mkConst ``mem_write_mem_bytes_of_mem_eq) + #[currentState, eff.effects, eff.proof, n, addr, val] + return { effects, proof } + +/-- Update the memory effects with a register write. + +This doesn't change the actual effect, but since the `currentState` has changed, +we need to update proofs -/ +def updateWrite (eff : MemoryEffects) (currentState : Expr) + (fld val : Expr) : + MetaM MemoryEffects := do + let proof := -- `mem_w_of_mem_eq ...` + mkAppN (mkConst ``mem_w_of_mem_eq) + #[currentState, eff.effects, eff.proof, fld, val] + return { eff with proof } + +/-- Transport all proofs along an equality `eq : = s`, +so that `s` is the new `currentState` -/ +def adjustCurrentStateWithEq (eff : MemoryEffects) (eq : Expr) : + MetaM MemoryEffects := do + let proof ← rewriteType eff.proof eq + /- ^^ This looks scary, since it can rewrite the left-hand-side of the proof + if `memoryEffect` is the same as `currentState` (which would be bad!). + However, this cannot ever happen in LNSym: every instruction has to modify + either the PC or the error field, neither of which is incorporated into + the `memoryEffect` and thus, `memoryEffect` never coincides with + `currentState` (assuming we're dealing with instruction semantics, as we + currently do!). -/ + return { eff with proof } + +/-- Convert a `MemoryEffects` into a `MessageData` for logging. -/ +def toMessageData (eff : MemoryEffects) : MetaM MessageData := do + let out := m!"effects: {eff.effects}" + return out + +/-- Trace the current state of `MemoryEffects`. -/ +def traceCurrentState (eff : MemoryEffects) : MetaM Unit := do + Sym.traceLargeMsg "memoryEffects" (← eff.toMessageData) + + + +/-- type check all expressions stored in `eff`, +throwing an error if one is not type-correct. + +In principle, the various `MemoryEffects` definitions should return only +well-formed expressions, making `validate` a no-op. +In practice, however, running `validate` is helpful for catching bugs in those +definitions. Do note that typechecking might be a bit expensive, so we generally +only call `validate` while debugging, not during normal execution. +See also the `Tactic.sym.debug` option, which controls whether `validate` is +called for each step of the `sym_n` tactic. + +NOTE: does not necessarily validate *which* type an expression has, +validation will still pass if types are different to those we claim in the +docstrings +-/ +def validate (eff : MemoryEffects) : MetaM Unit := do + let msg := "validating that the axiomatic effects are well-formed" + Sym.withTraceNode msg do + eff.traceCurrentState + check eff.effects + assertHasType eff.effects mkArmState + + check eff.proof + +end MemoryEffects diff --git a/Tests/AES-GCM/GCMGmultV8Program.lean b/Tests/AES-GCM/GCMGmultV8Program.lean index a94e9c9f..1c0c4cf5 100644 --- a/Tests/AES-GCM/GCMGmultV8Program.lean +++ b/Tests/AES-GCM/GCMGmultV8Program.lean @@ -3,8 +3,8 @@ Copyright (c) 2024 Amazon.com, Inc. or its affiliates. All Rights Reserved. Released under Apache 2.0 license as described in the file LICENSE. Author(s): Yan Peng -/ -import Arm.BitVec import Arm.Exec +import Arm.Cfg.Cfg namespace GCMGmultV8Program @@ -50,4 +50,12 @@ def gcm_gmult_v8_program : Program := (0x7d8868#64, 0xd65f03c0#32) -- ret ] +-- Statically obtain all the GPR/SFP registers that may be affected by this program. +/-- +info: #[RegType.SFP 0x00#5, RegType.SFP 0x01#5, RegType.SFP 0x02#5, RegType.SFP 0x03#5, RegType.SFP 0x11#5, + RegType.SFP 0x12#5, RegType.SFP 0x13#5, RegType.SFP 0x14#5] +-/ +#guard_msgs in +#eval ((Cfg.create gcm_gmult_v8_program).toOption).get!.maybe_modified_regs + end GCMGmultV8Program diff --git a/Tests/SHA2/SHA512ProgramTest.lean b/Tests/SHA2/SHA512ProgramTest.lean index 29c0cb86..5bbad78a 100644 --- a/Tests/SHA2/SHA512ProgramTest.lean +++ b/Tests/SHA2/SHA512ProgramTest.lean @@ -13,15 +13,27 @@ section SHA512ProgramTest open BitVec +-- We get an over-approximation of the GPR/SFP registers that may be modified in +-- a loop iteration. /-- -info: #[(0, +info: Except.ok #[RegType.GPR 0x01#5, RegType.GPR 0x02#5, RegType.GPR 0x03#5, RegType.GPR 0x04#5, RegType.SFP 0x00#5, + RegType.SFP 0x01#5, RegType.SFP 0x02#5, RegType.SFP 0x03#5, RegType.SFP 0x04#5, RegType.SFP 0x05#5, + RegType.SFP 0x06#5, RegType.SFP 0x07#5, RegType.SFP 0x10#5, RegType.SFP 0x11#5, RegType.SFP 0x12#5, + RegType.SFP 0x13#5, RegType.SFP 0x14#5, RegType.SFP 0x15#5, RegType.SFP 0x16#5, RegType.SFP 0x17#5, + RegType.SFP 0x18#5, RegType.SFP 0x19#5, RegType.SFP 0x1a#5, RegType.SFP 0x1b#5, RegType.SFP 0x1c#5, + RegType.SFP 0x1d#5] +-/ +#guard_msgs in +#eval (do let cfg ← Cfg.create' 0x126500#64 0x126c90#64 SHA512.program; pure cfg.maybe_modified_regs).mapError toString + +/-- +info: ok: #[(0, { guard := 0x0000000000126c90#64, target := 0x0000000000126500#64, next := 0x0000000000126c94#64 })] -/ #guard_msgs in -#eval (do let cfg ← (Cfg.create SHA512.program) - IO.println s!"{cfg.loops_info}") +#eval do let cfg ← Cfg.create SHA512.program; pure cfg.loops_info -- Initial hash value, with the most-significant word first. def SHA512_H0 : BitVec 512 := diff --git a/Tests/Tactics/CSE.lean b/Tests/Tactics/CSE.lean index bc74a9b2..28b0bc6a 100644 --- a/Tests/Tactics/CSE.lean +++ b/Tests/Tactics/CSE.lean @@ -19,149 +19,18 @@ We want to check that it correctly sees that there are: -/ -set_option trace.Tactic.cse.summary true in /-- warning: declaration uses 'sorry' --- -info: [Tactic.cse.summary] CSE collecting hypotheses: - [Tactic.cse.summary] (x + x + (y + y + (y + y)) = - y + y + (y + y) + (y + y + (y + y)) + (y + y + (y + y))):(Prop) [relevant? ❌️] (unfold for subexpressions...) - [Tactic.cse.summary] (x + x + (y + y + (y + y))):(Nat) [relevant? ✅️] (unfold for subexpressions...) - [Tactic.cse.summary] (x + x):(Nat) [relevant? ✅️] (unfold for subexpressions...) - [Tactic.cse.summary] (x):(Nat) [relevant? ✅️] (unfold for subexpressions...) - [Tactic.cse.summary] Added new expr (...) with info ({ occs := 1, size := 1 }) (NOTE: can be large) - [Tactic.cse.summary] x - [Tactic.cse.summary] (x):(Nat) [relevant? ✅️] (unfold for subexpressions...) - [Tactic.cse.summary] updated expr (...) with info ({ occs := 2, size := 1 }) (NOTE: can be large) - [Tactic.cse.summary] x - [Tactic.cse.summary] Added new expr (...) with info ({ occs := 1, size := 3 }) (NOTE: can be large) - [Tactic.cse.summary] x + x - [Tactic.cse.summary] (y + y + (y + y)):(Nat) [relevant? ✅️] (unfold for subexpressions...) - [Tactic.cse.summary] (y + y):(Nat) [relevant? ✅️] (unfold for subexpressions...) - [Tactic.cse.summary] (y):(Nat) [relevant? ✅️] (unfold for subexpressions...) - [Tactic.cse.summary] Added new expr (...) with info ({ occs := 1, size := 1 }) (NOTE: can be large) - [Tactic.cse.summary] y - [Tactic.cse.summary] (y):(Nat) [relevant? ✅️] (unfold for subexpressions...) - [Tactic.cse.summary] updated expr (...) with info ({ occs := 2, size := 1 }) (NOTE: can be large) - [Tactic.cse.summary] y - [Tactic.cse.summary] Added new expr (...) with info ({ occs := 1, size := 3 }) (NOTE: can be large) - [Tactic.cse.summary] y + y - [Tactic.cse.summary] (y + y):(Nat) [relevant? ✅️] (unfold for subexpressions...) - [Tactic.cse.summary] (y):(Nat) [relevant? ✅️] (unfold for subexpressions...) - [Tactic.cse.summary] updated expr (...) with info ({ occs := 3, size := 1 }) (NOTE: can be large) - [Tactic.cse.summary] y - [Tactic.cse.summary] (y):(Nat) [relevant? ✅️] (unfold for subexpressions...) - [Tactic.cse.summary] updated expr (...) with info ({ occs := 4, size := 1 }) (NOTE: can be large) - [Tactic.cse.summary] y - [Tactic.cse.summary] updated expr (...) with info ({ occs := 2, size := 3 }) (NOTE: can be large) - [Tactic.cse.summary] y + y - [Tactic.cse.summary] Added new expr (...) with info ({ occs := 1, size := 7 }) (NOTE: can be large) - [Tactic.cse.summary] y + y + (y + y) - [Tactic.cse.summary] Added new expr (...) with info ({ occs := 1, size := 11 }) (NOTE: can be large) - [Tactic.cse.summary] x + x + (y + y + (y + y)) - [Tactic.cse.summary] (y + y + (y + y) + (y + y + (y + y)) + - (y + y + (y + y))):(Nat) [relevant? ✅️] (unfold for subexpressions...) - [Tactic.cse.summary] (y + y + (y + y) + (y + y + (y + y))):(Nat) [relevant? ✅️] (unfold for subexpressions...) - [Tactic.cse.summary] (y + y + (y + y)):(Nat) [relevant? ✅️] (unfold for subexpressions...) - [Tactic.cse.summary] (y + y):(Nat) [relevant? ✅️] (unfold for subexpressions...) - [Tactic.cse.summary] (y):(Nat) [relevant? ✅️] (unfold for subexpressions...) - [Tactic.cse.summary] updated expr (...) with info ({ occs := 5, size := 1 }) (NOTE: can be large) - [Tactic.cse.summary] y - [Tactic.cse.summary] (y):(Nat) [relevant? ✅️] (unfold for subexpressions...) - [Tactic.cse.summary] updated expr (...) with info ({ occs := 6, size := 1 }) (NOTE: can be large) - [Tactic.cse.summary] y - [Tactic.cse.summary] updated expr (...) with info ({ occs := 3, size := 3 }) (NOTE: can be large) - [Tactic.cse.summary] y + y - [Tactic.cse.summary] (y + y):(Nat) [relevant? ✅️] (unfold for subexpressions...) - [Tactic.cse.summary] (y):(Nat) [relevant? ✅️] (unfold for subexpressions...) - [Tactic.cse.summary] updated expr (...) with info ({ occs := 7, size := 1 }) (NOTE: can be large) - [Tactic.cse.summary] y - [Tactic.cse.summary] (y):(Nat) [relevant? ✅️] (unfold for subexpressions...) - [Tactic.cse.summary] updated expr (...) with info ({ occs := 8, size := 1 }) (NOTE: can be large) - [Tactic.cse.summary] y - [Tactic.cse.summary] updated expr (...) with info ({ occs := 4, size := 3 }) (NOTE: can be large) - [Tactic.cse.summary] y + y - [Tactic.cse.summary] updated expr (...) with info ({ occs := 2, size := 7 }) (NOTE: can be large) - [Tactic.cse.summary] y + y + (y + y) - [Tactic.cse.summary] (y + y + (y + y)):(Nat) [relevant? ✅️] (unfold for subexpressions...) - [Tactic.cse.summary] (y + y):(Nat) [relevant? ✅️] (unfold for subexpressions...) - [Tactic.cse.summary] (y):(Nat) [relevant? ✅️] (unfold for subexpressions...) - [Tactic.cse.summary] updated expr (...) with info ({ occs := 9, size := 1 }) (NOTE: can be large) - [Tactic.cse.summary] y - [Tactic.cse.summary] (y):(Nat) [relevant? ✅️] (unfold for subexpressions...) - [Tactic.cse.summary] updated expr (...) with info ({ occs := 10, size := 1 }) (NOTE: can be large) - [Tactic.cse.summary] y - [Tactic.cse.summary] updated expr (...) with info ({ occs := 5, size := 3 }) (NOTE: can be large) - [Tactic.cse.summary] y + y - [Tactic.cse.summary] (y + y):(Nat) [relevant? ✅️] (unfold for subexpressions...) - [Tactic.cse.summary] (y):(Nat) [relevant? ✅️] (unfold for subexpressions...) - [Tactic.cse.summary] updated expr (...) with info ({ occs := 11, size := 1 }) (NOTE: can be large) - [Tactic.cse.summary] y - [Tactic.cse.summary] (y):(Nat) [relevant? ✅️] (unfold for subexpressions...) - [Tactic.cse.summary] updated expr (...) with info ({ occs := 12, size := 1 }) (NOTE: can be large) - [Tactic.cse.summary] y - [Tactic.cse.summary] updated expr (...) with info ({ occs := 6, size := 3 }) (NOTE: can be large) - [Tactic.cse.summary] y + y - [Tactic.cse.summary] updated expr (...) with info ({ occs := 3, size := 7 }) (NOTE: can be large) - [Tactic.cse.summary] y + y + (y + y) - [Tactic.cse.summary] Added new expr (...) with info ({ occs := 1, size := 15 }) (NOTE: can be large) - [Tactic.cse.summary] y + y + (y + y) + (y + y + (y + y)) - [Tactic.cse.summary] (y + y + (y + y)):(Nat) [relevant? ✅️] (unfold for subexpressions...) - [Tactic.cse.summary] (y + y):(Nat) [relevant? ✅️] (unfold for subexpressions...) - [Tactic.cse.summary] (y):(Nat) [relevant? ✅️] (unfold for subexpressions...) - [Tactic.cse.summary] updated expr (...) with info ({ occs := 13, size := 1 }) (NOTE: can be large) - [Tactic.cse.summary] y - [Tactic.cse.summary] (y):(Nat) [relevant? ✅️] (unfold for subexpressions...) - [Tactic.cse.summary] updated expr (...) with info ({ occs := 14, size := 1 }) (NOTE: can be large) - [Tactic.cse.summary] y - [Tactic.cse.summary] updated expr (...) with info ({ occs := 7, size := 3 }) (NOTE: can be large) - [Tactic.cse.summary] y + y - [Tactic.cse.summary] (y + y):(Nat) [relevant? ✅️] (unfold for subexpressions...) - [Tactic.cse.summary] (y):(Nat) [relevant? ✅️] (unfold for subexpressions...) - [Tactic.cse.summary] updated expr (...) with info ({ occs := 15, size := 1 }) (NOTE: can be large) - [Tactic.cse.summary] y - [Tactic.cse.summary] (y):(Nat) [relevant? ✅️] (unfold for subexpressions...) - [Tactic.cse.summary] updated expr (...) with info ({ occs := 16, size := 1 }) (NOTE: can be large) - [Tactic.cse.summary] y - [Tactic.cse.summary] updated expr (...) with info ({ occs := 8, size := 3 }) (NOTE: can be large) - [Tactic.cse.summary] y + y - [Tactic.cse.summary] updated expr (...) with info ({ occs := 4, size := 7 }) (NOTE: can be large) - [Tactic.cse.summary] y + y + (y + y) - [Tactic.cse.summary] Added new expr (...) with info ({ occs := 1, size := 23 }) (NOTE: can be large) - [Tactic.cse.summary] y + y + (y + y) + (y + y + (y + y)) + (y + y + (y + y)) -[Tactic.cse.summary] ⏭️ CSE eliminiating unprofitable expressions (#expressions:8): - [Tactic.cse.summary] ⏭️ Unprofitable { occs := 16, size := 1 } . (NOTE: can be large) - [Tactic.cse.summary] expr: y - [Tactic.cse.summary] ⏭️ Unprofitable { occs := 2, size := 1 } . (NOTE: can be large) - [Tactic.cse.summary] expr: x - [Tactic.cse.summary] ⏭️ Unprofitable { occs := 1, size := 3 } . (NOTE: can be large) - [Tactic.cse.summary] expr: x + x - [Tactic.cse.summary] ⏭️ Unprofitable { occs := 1, size := 23 } . (NOTE: can be large) - [Tactic.cse.summary] expr: y + y + (y + y) + (y + y + (y + y)) + (y + y + (y + y)) - [Tactic.cse.summary] ⏭️ Unprofitable { occs := 1, size := 11 } . (NOTE: can be large) - [Tactic.cse.summary] expr: x + x + (y + y + (y + y)) - [Tactic.cse.summary] ⏭️ Unprofitable { occs := 1, size := 15 } . (NOTE: can be large) - [Tactic.cse.summary] expr: y + y + (y + y) + (y + y + (y + y)) -[Tactic.cse.summary] CSE summary of profitable expressions (#expressions:2): - [Tactic.cse.summary] 1) { occs := 8, size := 3 } (NOTE: can be large) - [Tactic.cse.summary] y + y - [Tactic.cse.summary] 2) { occs := 4, size := 7 } (NOTE: can be large) - [Tactic.cse.summary] y + y + (y + y) -[Tactic.cse.summary] CSE rewriting (#expressions:2): - [Tactic.cse.summary] ⌛ Generalizing hx1: x1 = ... (NOTE: can be large) - [Tactic.cse.summary] y + y + (y + y) - [Tactic.cse.summary] ✅️ succeeded in generalizing hx1. (NOTE: can be large) - [Tactic.cse.summary] x y z x1 : Nat - hx1 : y + y + (y + y) = x1 - ⊢ x + x + x1 = x1 + x1 + x1 - [Tactic.cse.summary] ⌛ Generalizing hx2: x2 = ... (NOTE: can be large) - [Tactic.cse.summary] y + y - [Tactic.cse.summary] ✅️ succeeded in generalizing hx2. (NOTE: can be large) - [Tactic.cse.summary] x y z x1 x2 : Nat hx2 : y + y = x2 hx1 : x2 + x2 = x1 ⊢ x + x + x1 = x1 + x1 + x1 +info: x y z x1 x2 : Nat +hx2 : y + y = x2 +hx1 : x2 + x2 = x1 +⊢ x + x + x1 = x1 + x1 + x1 -/ #guard_msgs in theorem many_subexpr (x y z : Nat) : (x + x) + ((y + y) + (y + y)) = (((y + y) + (y + y)) + ((y + y) + (y + y))) + (((y + y) + (y + y))) := by cse (config := {minOccsToCSE := 2}) + trace_state all_goals sorry @@ -170,59 +39,17 @@ info: [Tactic.cse.summary] CSE collecting hypotheses: In this test case, we try to generalize on `64`, which is a dependent index of the type `BitVec 64`. Therefore, this should fail to generalize. -/ -set_option trace.Tactic.cse.summary true in /-- warning: declaration uses 'sorry' --- -info: [Tactic.cse.summary] CSE collecting hypotheses: - [Tactic.cse.summary] (BitVec.ofNat (y + y) (y + y) = x):(Prop) [relevant? ❌️] (unfold for subexpressions...) - [Tactic.cse.summary] (BitVec.ofNat (y + y) (y + y)):(BitVec (y + y)) [relevant? ✅️] (unfold for subexpressions...) - [Tactic.cse.summary] (y + y):(Nat) [relevant? ✅️] (unfold for subexpressions...) - [Tactic.cse.summary] (y):(Nat) [relevant? ✅️] (unfold for subexpressions...) - [Tactic.cse.summary] Added new expr (...) with info ({ occs := 1, size := 1 }) (NOTE: can be large) - [Tactic.cse.summary] y - [Tactic.cse.summary] (y):(Nat) [relevant? ✅️] (unfold for subexpressions...) - [Tactic.cse.summary] updated expr (...) with info ({ occs := 2, size := 1 }) (NOTE: can be large) - [Tactic.cse.summary] y - [Tactic.cse.summary] Added new expr (...) with info ({ occs := 1, size := 3 }) (NOTE: can be large) - [Tactic.cse.summary] y + y - [Tactic.cse.summary] (y + y):(Nat) [relevant? ✅️] (unfold for subexpressions...) - [Tactic.cse.summary] (y):(Nat) [relevant? ✅️] (unfold for subexpressions...) - [Tactic.cse.summary] updated expr (...) with info ({ occs := 3, size := 1 }) (NOTE: can be large) - [Tactic.cse.summary] y - [Tactic.cse.summary] (y):(Nat) [relevant? ✅️] (unfold for subexpressions...) - [Tactic.cse.summary] updated expr (...) with info ({ occs := 4, size := 1 }) (NOTE: can be large) - [Tactic.cse.summary] y - [Tactic.cse.summary] updated expr (...) with info ({ occs := 2, size := 3 }) (NOTE: can be large) - [Tactic.cse.summary] y + y - [Tactic.cse.summary] Added new expr (...) with info ({ occs := 1, size := 7 }) (NOTE: can be large) - [Tactic.cse.summary] BitVec.ofNat (y + y) (y + y) - [Tactic.cse.summary] (x):(BitVec (y + (y + 0))) [relevant? ✅️] (unfold for subexpressions...) - [Tactic.cse.summary] Added new expr (...) with info ({ occs := 1, size := 1 }) (NOTE: can be large) - [Tactic.cse.summary] x -[Tactic.cse.summary] ⏭️ CSE eliminiating unprofitable expressions (#expressions:4): - [Tactic.cse.summary] ⏭️ Unprofitable { occs := 4, size := 1 } . (NOTE: can be large) - [Tactic.cse.summary] expr: y - [Tactic.cse.summary] ⏭️ Unprofitable { occs := 1, size := 1 } . (NOTE: can be large) - [Tactic.cse.summary] expr: x - [Tactic.cse.summary] ⏭️ Unprofitable { occs := 1, size := 7 } . (NOTE: can be large) - [Tactic.cse.summary] expr: BitVec.ofNat (y + y) (y + y) -[Tactic.cse.summary] CSE summary of profitable expressions (#expressions:1): - [Tactic.cse.summary] 1) { occs := 2, size := 3 } (NOTE: can be large) - [Tactic.cse.summary] y + y -[Tactic.cse.summary] CSE rewriting (#expressions:1): - [Tactic.cse.summary] ⌛ Generalizing hx1: x1 = ... (NOTE: can be large) - [Tactic.cse.summary] y + y - [Tactic.cse.summary] 💥️ failed to generalize hx1 (NOTE: can be large) - [Tactic.cse.summary] tactic 'generalize' failed, result is not type correct - ∀ (x1 : Nat), BitVec.ofNat x1 x1 = x - y : Nat - x : BitVec (y + (y + 0)) - ⊢ BitVec.ofNat (y + y) (y + y) = x +info: y : Nat +x : BitVec (y + (y + 0)) +⊢ BitVec.ofNat (y + y) (y + y) = x -/ #guard_msgs in theorem generalize_failure (x : BitVec (y + (y + 0))) : (BitVec.ofNat (y + y) (y + y)) = x := by cse + trace_state all_goals sorry /- ### Test from SHA -/ @@ -259,96 +86,95 @@ open BitVec sha512_helpers DPSFP SHA2 in /-- warning: declaration uses 'sorry' --- -info: H : 64 > 0 -a b c d e : BitVec 128 +info: a b c d e : BitVec 128 x1 x2 x3 : BitVec 64 x4 : BitVec 128 hx3 : BitVec.extractLsb' 64 64 x4 = x3 x5 x6 x7 x8 x9 : BitVec 64 hx2 : x9 + x3 = x2 x10 x11 : BitVec 128 -hx4 : x10 ||| x11 = x4 +hx4 : x11 ||| x10 = x4 x12 : BitVec 128 -hx11 : x12 <<< 64 = x11 +hx11 : x12 &&& 18446744073709551615#128 = x11 x13 : BitVec 128 -hx10 : x13 &&& 18446744073709551615#128 = x10 +hx10 : x13 <<< 64 = x10 x14 : BitVec 64 hx12 : BitVec.zeroExtend 128 x14 = x12 x15 : BitVec 64 hx13 : BitVec.zeroExtend 128 x15 = x13 x16 x17 : BitVec 64 x18 : BitVec 128 -hx16 : BitVec.extractLsb' 0 64 x18 = x16 -hx17 : BitVec.extractLsb' 64 64 x18 = x17 +hx16 : BitVec.extractLsb' 64 64 x18 = x16 +hx17 : BitVec.extractLsb' 0 64 x18 = x17 x19 x20 : BitVec 64 hx8 : x19 + x20 = x8 x21 : BitVec 64 hx9 : x20 + x21 = x9 -x22 x23 : BitVec 128 -hx18 : x22 ||| x23 = x18 -x24 : BitVec 64 +x22 : BitVec 64 +x23 x24 : BitVec 128 +hx18 : x24 ||| x23 = x18 x25 : BitVec 128 hx23 : x25 <<< 64 = x23 -x26 : BitVec 128 -hx22 : x26 &&& 18446744073709551615#128 = x22 -x27 x28 : BitVec 64 -hx25 : BitVec.zeroExtend 128 x28 = x25 +x26 : BitVec 64 +x27 : BitVec 128 +hx24 : x27 &&& 18446744073709551615#128 = x24 +x28 : BitVec 64 +hx20 : x28 ^^^ x26 = x20 x29 : BitVec 64 -hx20 : x29 ^^^ x27 = x20 +hx25 : BitVec.zeroExtend 128 x29 = x25 x30 : BitVec 64 -hx26 : BitVec.zeroExtend 128 x30 = x26 +hx27 : BitVec.zeroExtend 128 x30 = x27 x31 : BitVec 64 -hx21 : x24 ^^^ x31 = x21 -x32 x33 : BitVec 64 -hx24 : x33 ^^^ x32 = x24 -x34 x35 : BitVec 64 -hx35 : BitVec.extractLsb' 0 64 a = x35 +hx21 : x22 ^^^ x31 = x21 +x32 x33 x34 : BitVec 64 +hx22 : x34 ^^^ x33 = x22 +x35 : BitVec 64 +hx35 : BitVec.extractLsb' 64 64 a = x35 +hx26 : x32 &&& x35 = x26 x36 : BitVec 64 -hx36 : BitVec.extractLsb' 64 64 a = x36 -hx27 : x34 &&& x36 = x27 +hx36 : BitVec.extractLsb' 64 64 e = x36 +hx6 : x7 + x36 = x6 +hx15 : x16 + x36 = x15 x37 : BitVec 64 -hx37 : BitVec.extractLsb' 0 64 b = x37 -hx1 : x2 + x37 = x1 -hx5 : x37 + x6 = x5 +hx37 : BitVec.extractLsb' 0 64 a = x37 x38 : BitVec 64 -hx38 : BitVec.extractLsb' 0 64 e = x38 -hx15 : x16 + x38 = x15 +hx38 : BitVec.extractLsb' 64 64 d = x38 +hx7 : x8 + x38 = x7 x39 : BitVec 64 -hx39 : BitVec.extractLsb' 64 64 b = x39 -hx31 : x39.rotateRight 41 = x31 -hx32 : x39.rotateRight 18 = x32 -hx33 : x39.rotateRight 14 = x33 -hx34 : ~~~x39 = x34 -hx29 : x39 &&& x35 = x29 +hx39 : BitVec.extractLsb' 0 64 c = x39 x40 : BitVec 64 hx40 : BitVec.extractLsb' 64 64 c = x40 hx19 : x40 + x21 = x19 +hx29 : x40 + x38 = x29 x41 : BitVec 64 -hx41 : BitVec.extractLsb' 64 64 d = x41 -hx7 : x8 + x41 = x7 -hx28 : x40 + x41 = x28 +hx41 : BitVec.extractLsb' 0 64 b = x41 +hx1 : x2 + x41 = x1 +hx5 : x41 + x6 = x5 x42 : BitVec 64 -hx42 : BitVec.extractLsb' 64 64 e = x42 -hx6 : x7 + x42 = x6 -hx14 : x17 + x42 = x14 +hx42 : BitVec.extractLsb' 64 64 b = x42 +hx31 : x42.rotateRight 41 = x31 +hx32 : ~~~x42 = x32 +hx33 : x42.rotateRight 18 = x33 +hx34 : x42.rotateRight 14 = x34 +hx28 : x42 &&& x37 = x28 x43 : BitVec 64 -hx43 : BitVec.extractLsb' 0 64 d = x43 +hx43 : BitVec.extractLsb' 0 64 e = x43 +hx14 : x17 + x43 = x14 x44 : BitVec 64 -hx44 : BitVec.extractLsb' 0 64 c = x44 -hx30 : x44 + x43 = x30 +hx44 : BitVec.extractLsb' 0 64 d = x44 +hx30 : x39 + x44 = x30 ⊢ x2 ++ - ((x1 &&& x39 ^^^ ~~~x1 &&& x35) + (x1.rotateRight 14 ^^^ x1.rotateRight 18 ^^^ x1.rotateRight 41) + + ((x1 &&& x42 ^^^ ~~~x1 &&& x37) + (x1.rotateRight 14 ^^^ x1.rotateRight 18 ^^^ x1.rotateRight 41) + BitVec.extractLsb' 0 64 x4) = x6 ++ - (x44 + (x5.rotateRight 14 ^^^ x5.rotateRight 18 ^^^ x5.rotateRight 41) + (x5 &&& x39 ^^^ ~~~x5 &&& x35) + x43 + - x38) + (x39 + (x5.rotateRight 14 ^^^ x5.rotateRight 18 ^^^ x5.rotateRight 41) + (x5 &&& x42 ^^^ ~~~x5 &&& x37) + x44 + + x43) -/ - #guard_msgs in theorem sha512h_rule_1 (a b c d e : BitVec 128) : let elements := 2 let esize := 64 - let inner_sum := (binary_vector_op_aux 0 elements esize BitVec.add c d (BitVec.zero 128) H) - let outer_sum := (binary_vector_op_aux 0 elements esize BitVec.add inner_sum e (BitVec.zero 128) H) + let inner_sum := (binary_vector_op_aux 0 elements esize BitVec.add c d (BitVec.zero 128)) + let outer_sum := (binary_vector_op_aux 0 elements esize BitVec.add inner_sum e (BitVec.zero 128)) let a0 := extractLsb' 0 64 a let a1 := extractLsb' 64 64 a let b0 := extractLsb' 0 64 b @@ -381,16 +207,15 @@ open BitVec sha512_helpers DPSFP SHA2 in /-- warning: declaration uses 'sorry' --- -info: h1 h2 : 64 > 0 -a b c d e : BitVec 128 +info: a b c d e : BitVec 128 x1 x2 x3 : BitVec 64 x4 : BitVec 128 hx3 : BitVec.extractLsb' 64 64 x4 = x3 x5 : BitVec 64 x6 x7 : BitVec 128 -hx4 : x7 ||| x6 = x4 +hx6 : x7 <<< 64 = x6 x8 : BitVec 128 -hx6 : x8 <<< 64 = x6 +hx4 : x8 ||| x6 = x4 x9 : BitVec 64 hx8 : BitVec.zeroExtend 128 x9 = x8 x10 : BitVec 64 @@ -403,72 +228,71 @@ x16 : BitVec 256 hx15 : BitVec.extractLsb' 64 128 x16 = x15 x17 x18 : BitVec 64 hx2 : x18 + x3 = x2 -x19 : BitVec 128 -hx16 : x19 ++ x19 = x16 -x20 x21 : BitVec 64 -hx17 : x20 + x21 = x17 +x19 : BitVec 64 +x20 : BitVec 128 +hx16 : x20 ++ x20 = x16 +x21 : BitVec 64 +hx17 : x19 + x21 = x17 x22 : BitVec 64 hx18 : x21 + x22 = x18 x23 : BitVec 64 -x24 : BitVec 128 -x25 : BitVec 64 -x26 : BitVec 128 -hx19 : x26 ||| x24 = x19 +x24 x25 : BitVec 128 +hx24 : x25 <<< 64 = x24 +x26 : BitVec 64 x27 : BitVec 128 -hx24 : x27 <<< 64 = x24 +hx20 : x27 ||| x24 = x20 x28 : BitVec 64 -hx26 : BitVec.zeroExtend 128 x28 = x26 +hx25 : BitVec.zeroExtend 128 x28 = x25 x29 : BitVec 64 -hx21 : x29 ^^^ x25 = x21 +hx27 : BitVec.zeroExtend 128 x29 = x27 x30 : BitVec 64 -hx27 : BitVec.zeroExtend 128 x30 = x27 -x31 x32 x33 : BitVec 64 -hx22 : x23 ^^^ x33 = x22 -x34 : BitVec 64 -hx23 : x34 ^^^ x31 = x23 +hx21 : x30 ^^^ x26 = x21 +x31 x32 : BitVec 64 +hx22 : x23 ^^^ x32 = x22 +x33 x34 : BitVec 64 +hx23 : x34 ^^^ x33 = x23 x35 : BitVec 64 -hx35 : BitVec.extractLsb' 64 64 e = x35 +hx35 : BitVec.extractLsb' 64 64 b = x35 +hx31 : ~~~x35 = x31 +hx32 : x35.rotateRight 41 = x32 +hx33 : x35.rotateRight 18 = x33 +hx34 : x35.rotateRight 14 = x34 x36 : BitVec 64 -hx36 : BitVec.extractLsb' 64 64 b = x36 -hx31 : x36.rotateRight 18 = x31 -hx32 : ~~~x36 = x32 -hx33 : x36.rotateRight 41 = x33 -hx34 : x36.rotateRight 14 = x34 +hx36 : BitVec.extractLsb' 64 64 c = x36 +hx10 : x36 + x12 = x10 +hx19 : x36 + x22 = x19 x37 : BitVec 64 hx37 : BitVec.extractLsb' 0 64 c = x37 -hx10 : x37 + x13 = x10 +hx9 : x37 + x13 = x9 x38 : BitVec 64 -hx38 : BitVec.extractLsb' 0 64 d = x38 -hx14 : x17 + x38 = x14 +hx38 : BitVec.extractLsb' 0 64 e = x38 +hx11 : x14 + x38 = x11 x39 : BitVec 64 -hx39 : BitVec.extractLsb' 64 64 c = x39 -hx9 : x39 + x12 = x9 -hx20 : x39 + x22 = x20 +hx39 : BitVec.extractLsb' 0 64 d = x39 +hx14 : x17 + x39 = x14 +hx29 : x39 + x38 = x29 x40 : BitVec 64 -hx40 : BitVec.extractLsb' 0 64 e = x40 -hx11 : x14 + x40 = x11 -hx28 : x38 + x40 = x28 +hx40 : BitVec.extractLsb' 64 64 a = x40 +hx26 : x31 &&& x40 = x26 x41 : BitVec 64 -hx41 : BitVec.extractLsb' 0 64 b = x41 -hx1 : x2 + x41 = x1 -hx5 : x41 + x11 = x5 +hx41 : BitVec.extractLsb' 64 64 e = x41 x42 : BitVec 64 hx42 : BitVec.extractLsb' 64 64 d = x42 -hx30 : x42 + x35 = x30 +hx28 : x42 + x41 = x28 x43 : BitVec 64 hx43 : BitVec.extractLsb' 0 64 a = x43 -hx29 : x36 &&& x43 = x29 +hx30 : x35 &&& x43 = x30 x44 : BitVec 64 -hx44 : BitVec.extractLsb' 64 64 a = x44 -hx25 : x32 &&& x44 = x25 +hx44 : BitVec.extractLsb' 0 64 b = x44 +hx1 : x2 + x44 = x1 +hx5 : x44 + x11 = x5 ⊢ x2 ++ - ((x1 &&& x36 ^^^ ~~~x1 &&& x43) + (x1.rotateRight 14 ^^^ x1.rotateRight 18 ^^^ x1.rotateRight 41) + + ((x1 &&& x35 ^^^ ~~~x1 &&& x43) + (x1.rotateRight 14 ^^^ x1.rotateRight 18 ^^^ x1.rotateRight 41) + BitVec.extractLsb' 0 64 x4) = x11 ++ - (x37 + (x5.rotateRight 14 ^^^ x5.rotateRight 18 ^^^ x5.rotateRight 41) + (x5 &&& x36 ^^^ ~~~x5 &&& x43) + x42 + - x35) + (x37 + (x5.rotateRight 14 ^^^ x5.rotateRight 18 ^^^ x5.rotateRight 41) + (x5 &&& x35 ^^^ ~~~x5 &&& x43) + x42 + + x41) -/ - #guard_msgs in theorem sha512h_rule_2 (a b c d e : BitVec 128) : let a0 := extractLsb' 0 64 a let a1 := extractLsb' 64 64 a @@ -480,12 +304,12 @@ hx25 : x32 &&& x44 = x25 let d1 := extractLsb' 64 64 d let e0 := extractLsb' 0 64 e let e1 := extractLsb' 64 64 e - let inner_sum := binary_vector_op_aux 0 2 64 BitVec.add d e (BitVec.zero 128) h1 + let inner_sum := binary_vector_op_aux 0 2 64 BitVec.add d e (BitVec.zero 128) let concat := inner_sum ++ inner_sum let operand := extractLsb' 64 128 concat let hi64_spec := compression_update_t1 b1 a0 a1 c1 d0 e0 let lo64_spec := compression_update_t1 (b0 + hi64_spec) b1 a0 c0 d1 e1 - sha512h a b (binary_vector_op_aux 0 2 64 BitVec.add c operand (BitVec.zero 128) h2) = + sha512h a b (binary_vector_op_aux 0 2 64 BitVec.add c operand (BitVec.zero 128)) = hi64_spec ++ lo64_spec := by repeat (unfold binary_vector_op_aux; simp) repeat (unfold BitVec.partInstall; simp) diff --git a/lake-manifest.json b/lake-manifest.json index 35f79314..f8269aa7 100644 --- a/lake-manifest.json +++ b/lake-manifest.json @@ -15,7 +15,7 @@ "type": "git", "subDir": null, "scope": "", - "rev": "3ae17cd758a7cb3b71ac76f75ae453a227e915fe", + "rev": "18c69c9a08747865949c4b7e28dc4913531ab98e", "name": "ELFSage", "manifestFile": "lake-manifest.json", "inputRev": "main", @@ -35,7 +35,7 @@ "type": "git", "subDir": null, "scope": "", - "rev": "7afce91e4fcee25c1ed06dca8d71b82bed396776", + "rev": "6d2e06515f1ed1f74208d5a1da3a9cc26c60a7a0", "name": "UnicodeBasic", "manifestFile": "lake-manifest.json", "inputRev": "main", @@ -45,7 +45,7 @@ "type": "git", "subDir": null, "scope": "", - "rev": "0a294fe9bf23b396c5cc955054c50b9b652ec5ad", + "rev": "85e1e7143dd4cfa2b551826c27867bada60858e8", "name": "BibtexQuery", "manifestFile": "lake-manifest.json", "inputRev": "master", @@ -55,7 +55,7 @@ "type": "git", "subDir": null, "scope": "", - "rev": "1b0072fea2aa6a0ef8ef8b506ec5223b184cb4d0", + "rev": "ccb4e97ffb7ad0f9b1852e9669d5e2922f984175", "name": "«doc-gen4»", "manifestFile": "lake-manifest.json", "inputRev": "main", diff --git a/lakefile.lean b/lakefile.lean index 7175ad63..70d5df23 100644 --- a/lakefile.lean +++ b/lakefile.lean @@ -33,6 +33,17 @@ lean_lib «Doc» where -- add library configuration options here lean_lib «Benchmarks» where + /- We want `lake build Benchmarks` to build all dependencies needed to run + benchmarks, but not run the actual benchmarks themselves. + To ensure we build all dependencies, we build the actual benchmark files, + but to prevent the benchmarks from running, we set the `benchmark.runs` option + to `0`. + + Note that regular options (without the `weak` prefix) are checked to exist, + but not all files will have imported the `benchmark.runs` option, so we have + to set the option with the `weak` prefix. As a result, the option will be + silently ignored when compiling files where the option hasn't been defined -/ + leanOptions := #[⟨`weak.benchmark.runs, (0 : Nat)⟩] -- add library configuration options here @[default_target] diff --git a/lean-toolchain b/lean-toolchain index 018f6702..143740cb 100644 --- a/lean-toolchain +++ b/lean-toolchain @@ -1 +1 @@ -leanprover/lean4:nightly-2024-09-10 \ No newline at end of file +leanprover/lean4:nightly-2024-10-07 diff --git a/scripts/benchmark.sh b/scripts/benchmark.sh new file mode 100755 index 00000000..717892bf --- /dev/null +++ b/scripts/benchmark.sh @@ -0,0 +1,39 @@ +#!/bin/bash +# Copyright (c) 2024 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# Released under Apache 2.0 license as described in the file LICENSE. +# Author(s): Alex Keizer + +# Helper script to get benchmarking data +# USAGE: +# ./benchmark.sh Benchmark/file1.lean Benchmark/file2.lean ... +# +# The script will first build the `Benchmarks` target, which builds the +# dependencies but does not run the benchmarks themselves, and then +# invokes lean directly on each of the files passed as arguments, with +# - the `benchmark.runs` option set to 5, so that each benchmark is run 5 +# times +# - the stdout of the build logged in +# `data/benchmark/[timestamp]_[commit hash]/[filename]` +# +# NOTE: if you pass a file which is not part of the Benchmarks target, then +# you *have* to make sure its dependencies are built before invoking this +# script + +LAKE=lake +BENCH="$LAKE env lean -Dweak.benchmark.runs=5" +OUT="data/benchmarks" + +timestamp=$(date +"%Y-%m-%d_%H%M%S") +rev=$(git rev-parse --short HEAD) +echo "HEAD is on $rev" +out="$OUT/${timestamp}_${rev}" +mkdir -p "$out" + +$LAKE build Benchmarks +for file in "$@"; do + echo + echo + $file + echo + base="$(basename "$file" ".lean")" + $BENCH $file | tee "$out/$base" +done diff --git a/scripts/profile.sh b/scripts/profile.sh new file mode 100755 index 00000000..9be0994d --- /dev/null +++ b/scripts/profile.sh @@ -0,0 +1,40 @@ +#!/bin/bash +# Copyright (c) 2024 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# Released under Apache 2.0 license as described in the file LICENSE. +# Author(s): Alex Keizer + +# Helper script to get profiling data +# USAGE: +# ./profile.sh Benchmark/file1.lean Benchmark/file2.lean ... +# +# The script will first build the `Benchmarks` target, which builds the +# dependencies but does not run the benchmarks themselves, and then +# invokes lean directly on each of the files passed as arguments, with +# - the `profiler` option set to true +# - the `trace.profiler.output` option set to +# `data/profiles/[timestamp]_[commit hash]/[filename].json`, and +# - the stdout of the build logged in +# `data/profiles/[timestamp]_[commit hash]/[filename].log` +# +# NOTE: if you pass a file which is not part of the Benchmarks target, then +# you *have* to make sure its dependencies are built before invoking this +# script + +LAKE=lake +PROF="$LAKE env lean -Dprofiler=true" +OUT="data/profiles" + +timestamp=$(date +"%Y-%m-%d_%H%M%S") +rev=$(git rev-parse --short HEAD) +echo "HEAD is on $rev" +out="$OUT/${timestamp}_${rev}" +mkdir -p "$out" + +$LAKE build Benchmarks +for file in "$@"; do + echo + echo + $file + echo + base="$(basename "$file" ".lean")" + $PROF -Dtrace.profiler.output="$out/$base.json" "$file" | tee "$base.log" +done