--- a/Admin/Windows/launch4j/isabelle.xml Wed Feb 23 08:43:44 2022 +0100
+++ b/Admin/Windows/launch4j/isabelle.xml Wed Mar 23 10:54:22 2022 +0100
@@ -15,7 +15,7 @@
<manifest></manifest>
<icon>{ICON}</icon>
<classPath>
- <mainClass>isabelle.jedit.Main</mainClass>
+ <mainClass>isabelle.jedit.JEdit_Main</mainClass>
{CLASSPATH}
</classPath>
<singleInstance>
--- a/etc/build.props Wed Feb 23 08:43:44 2022 +0100
+++ b/etc/build.props Wed Mar 23 10:54:22 2022 +0100
@@ -1,6 +1,6 @@
title = Isabelle/Scala
module = $ISABELLE_HOME/lib/classes/isabelle.jar
-main = isabelle.jedit.Main
+main = isabelle.jedit.JEdit_Main
resources = \
lib/services/java.nio.charset.spi.CharsetProvider:META-INF/services/ \
lib/logo/isabelle_transparent-32.gif:isabelle/ \
@@ -219,11 +219,10 @@
src/Tools/VSCode/src/lsp.scala \
src/Tools/VSCode/src/preview_panel.scala \
src/Tools/VSCode/src/state_panel.scala \
- src/Tools/VSCode/src/textmate_grammar.scala \
+ src/Tools/VSCode/src/vscode_main.scala \
src/Tools/VSCode/src/vscode_model.scala \
src/Tools/VSCode/src/vscode_rendering.scala \
src/Tools/VSCode/src/vscode_resources.scala \
- src/Tools/VSCode/src/vscode_setup.scala \
src/Tools/VSCode/src/vscode_spell_checker.scala \
src/Tools/jEdit/src/active.scala \
src/Tools/jEdit/src/base_plugin.scala \
@@ -247,6 +246,7 @@
src/Tools/jEdit/src/jedit_bibtex.scala \
src/Tools/jEdit/src/jedit_editor.scala \
src/Tools/jEdit/src/jedit_lib.scala \
+ src/Tools/jEdit/src/jedit_main.scala \
src/Tools/jEdit/src/jedit_options.scala \
src/Tools/jEdit/src/jedit_plugins.scala \
src/Tools/jEdit/src/jedit_rendering.scala \
@@ -254,7 +254,6 @@
src/Tools/jEdit/src/jedit_sessions.scala \
src/Tools/jEdit/src/jedit_spell_checker.scala \
src/Tools/jEdit/src/keymap_merge.scala \
- src/Tools/jEdit/src/main.scala \
src/Tools/jEdit/src/main_plugin.scala \
src/Tools/jEdit/src/monitor_dockable.scala \
src/Tools/jEdit/src/output_dockable.scala \
--- a/lib/Tools/vscode Wed Feb 23 08:43:44 2022 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,16 +0,0 @@
-#!/usr/bin/env bash
-#
-# Author: Makarius
-#
-# DESCRIPTION: run Isabelle/VSCode (requires "vscodium-X.YY.Z" component)
-
-isabelle vscode_setup || exit "$?"
-
-export ISABELLE_VSCODIUM_APP="$(platform_path "$ISABELLE_VSCODIUM_RESOURCES/vscodium")"
-
-ELECTRON_RUN_AS_NODE=1 "$ISABELLE_VSCODIUM_ELECTRON" \
- "$(platform_path "$ISABELLE_VSCODIUM_RESOURCES/vscodium/out/cli.js")" \
- --ms-enable-electron-run-as-node --locale en-US \
- --user-data-dir "$(platform_path "$ISABELLE_VSCODE_SETTINGS"/user-data)" \
- --extensions-dir "$(platform_path "$ISABELLE_VSCODE_SETTINGS"/extensions)" \
- "$@"
--- a/src/Doc/JEdit/JEdit.thy Wed Feb 23 08:43:44 2022 +0100
+++ b/src/Doc/JEdit/JEdit.thy Wed Mar 23 10:54:22 2022 +0100
@@ -263,7 +263,7 @@
session \<^verbatim>\<open>ROOT\<close> entry in the editor to facilitate editing of the main
session. The \<^verbatim>\<open>-A\<close> option specifies and alternative ancestor session for
option \<^verbatim>\<open>-R\<close>: this allows to restructure the hierarchy of session images on
- the spot.
+ the spot. Options \<^verbatim>\<open>-R\<close> and \<^verbatim>\<open>-l\<close> are mutually exclusive.
The \<^verbatim>\<open>-i\<close> option includes additional sessions into the name-space of
theories: multiple occurrences are possible.
--- a/src/Doc/System/Environment.thy Wed Feb 23 08:43:44 2022 +0100
+++ b/src/Doc/System/Environment.thy Wed Mar 23 10:54:22 2022 +0100
@@ -459,7 +459,7 @@
text \<open>
The subsequent example creates a raw Java process on the command-line and
invokes the main Isabelle application entry point:
- @{verbatim [display] \<open>isabelle_java -Djava.awt.headless=false isabelle.jedit.Main\<close>}
+ @{verbatim [display] \<open>isabelle_java -Djava.awt.headless=false isabelle.jedit.JEdit_Main\<close>}
\<close>
--- a/src/HOL/SMT.thy Wed Feb 23 08:43:44 2022 +0100
+++ b/src/HOL/SMT.thy Wed Mar 23 10:54:22 2022 +0100
@@ -623,10 +623,11 @@
ML_file \<open>Tools/SMT/z3_isar.ML\<close>
ML_file \<open>Tools/SMT/smt_solver.ML\<close>
ML_file \<open>Tools/SMT/cvc4_interface.ML\<close>
+ML_file \<open>Tools/SMT/lethe_proof.ML\<close>
+ML_file \<open>Tools/SMT/lethe_isar.ML\<close>
+ML_file \<open>Tools/SMT/lethe_proof_parse.ML\<close>
ML_file \<open>Tools/SMT/cvc4_proof_parse.ML\<close>
ML_file \<open>Tools/SMT/verit_proof.ML\<close>
-ML_file \<open>Tools/SMT/verit_isar.ML\<close>
-ML_file \<open>Tools/SMT/verit_proof_parse.ML\<close>
ML_file \<open>Tools/SMT/conj_disj_perm.ML\<close>
ML_file \<open>Tools/SMT/smt_replay_methods.ML\<close>
ML_file \<open>Tools/SMT/smt_replay.ML\<close>
@@ -635,6 +636,7 @@
ML_file \<open>Tools/SMT/z3_replay_rules.ML\<close>
ML_file \<open>Tools/SMT/z3_replay_methods.ML\<close>
ML_file \<open>Tools/SMT/z3_replay.ML\<close>
+ML_file \<open>Tools/SMT/lethe_replay_methods.ML\<close>
ML_file \<open>Tools/SMT/verit_replay_methods.ML\<close>
ML_file \<open>Tools/SMT/verit_replay.ML\<close>
ML_file \<open>Tools/SMT/smt_systems.ML\<close>
@@ -888,7 +890,6 @@
"(if P then \<not> Q else R) \<or> \<not> P \<or> Q"
"(if P then Q else \<not> R) \<or> P \<or> R"
by auto
-
hide_type (open) symb_list pattern
hide_const (open) Symb_Nil Symb_Cons trigger pat nopat fun_app z3div z3mod
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/HOL/Tools/SMT/lethe_isar.ML Wed Mar 23 10:54:22 2022 +0100
@@ -0,0 +1,60 @@
+(* Title: HOL/Tools/SMT/verit_isar.ML
+ Author: Mathias Fleury, TU Muenchen
+ Author: Jasmin Blanchette, TU Muenchen
+
+LETHE proofs as generic ATP proofs for Isar proof reconstruction.
+*)
+
+signature LETHE_ISAR =
+sig
+ type ('a, 'b) atp_step = ('a, 'b) ATP_Proof.atp_step
+ val atp_proof_of_veriT_proof: Proof.context -> term list -> thm list -> term list -> term ->
+ (string * term) list -> int list -> int -> (int * string) list -> Lethe_Proof.lethe_step list ->
+ (term, string) ATP_Proof.atp_step list
+end;
+
+structure Lethe_Isar: LETHE_ISAR =
+struct
+
+open ATP_Util
+open ATP_Problem
+open ATP_Proof
+open ATP_Proof_Reconstruct
+open SMTLIB_Interface
+open SMTLIB_Isar
+open Lethe_Proof
+
+fun atp_proof_of_veriT_proof ctxt ll_defs rewrite_rules hyp_ts concl_t fact_helper_ts prem_ids
+ conjecture_id fact_helper_ids =
+ let
+ fun steps_of (Lethe_Proof.Lethe_Step {id, rule, prems, concl, ...}) =
+ let
+ val concl' = postprocess_step_conclusion ctxt rewrite_rules ll_defs concl
+ fun standard_step role = ((id, []), role, concl', rule, map (rpair []) prems)
+ in
+ if rule = input_rule then
+ let
+ val (_, id_num) = SMTLIB_Interface.role_and_index_of_assert_name id
+ val ss = the_list (AList.lookup (op =) fact_helper_ids id_num)
+ in
+ (case distinguish_conjecture_and_hypothesis ss id_num conjecture_id prem_ids
+ fact_helper_ts hyp_ts concl_t of
+ NONE => []
+ | SOME (role0, concl00) =>
+ let
+ val name0 = (id ^ "a", ss)
+ val concl0 = unskolemize_names ctxt concl00
+ in
+ [(name0, role0, concl0, rule, []),
+ ((id, []), Plain, concl', rewrite_rule,
+ name0 :: normalizing_prems ctxt concl0)]
+ end)
+ end
+ else
+ [standard_step (if null prems then Lemma else Plain)]
+ end
+ in
+ maps steps_of
+ end
+
+end;
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/HOL/Tools/SMT/lethe_proof.ML Wed Mar 23 10:54:22 2022 +0100
@@ -0,0 +1,785 @@
+(* Title: HOL/Tools/SMT/Lethe_Proof.ML
+ Author: Mathias Fleury, ENS Rennes
+ Author: Sascha Boehme, TU Muenchen
+
+Lethe proofs: parsing and abstract syntax tree.
+*)
+
+signature LETHE_PROOF =
+sig
+ (*proofs*)
+ datatype lethe_step = Lethe_Step of {
+ id: string,
+ rule: string,
+ prems: string list,
+ proof_ctxt: term list,
+ concl: term,
+ fixes: string list}
+
+ datatype lethe_replay_node = Lethe_Replay_Node of {
+ id: string,
+ rule: string,
+ args: term list,
+ prems: string list,
+ proof_ctxt: term list,
+ concl: term,
+ bounds: (string * typ) list,
+ declarations: (string * term) list,
+ insts: term Symtab.table,
+ subproof: (string * typ) list * term list * term list * lethe_replay_node list}
+
+ val mk_replay_node: string -> string -> term list -> string list -> term list ->
+ term -> (string * typ) list -> term Symtab.table -> (string * term) list ->
+ (string * typ) list * term list * term list * lethe_replay_node list -> lethe_replay_node
+
+ (*proof parser*)
+ val parse: typ Symtab.table -> term Symtab.table -> string list ->
+ Proof.context -> lethe_step list * Proof.context
+ val parse_replay: typ Symtab.table -> term Symtab.table -> string list ->
+ Proof.context -> lethe_replay_node list * Proof.context
+
+ val step_prefix : string
+ val input_rule: string
+ val keep_app_symbols: string -> bool
+ val keep_raw_lifting: string -> bool
+ val normalized_input_rule: string
+ val la_generic_rule : string
+ val rewrite_rule : string
+ val simp_arith_rule : string
+ val lethe_deep_skolemize_rule : string
+ val lethe_def : string
+ val subproof_rule : string
+ val local_input_rule : string
+ val not_not_rule : string
+ val contract_rule : string
+ val ite_intro_rule : string
+ val eq_congruent_rule : string
+ val eq_congruent_pred_rule : string
+ val skolemization_steps : string list
+ val theory_resolution2_rule: string
+ val equiv_pos2_rule: string
+ val th_resolution_rule: string
+
+ val is_skolemization: string -> bool
+ val is_skolemization_step: lethe_replay_node -> bool
+
+ val number_of_steps: lethe_replay_node list -> int
+
+end;
+
+structure Lethe_Proof: LETHE_PROOF =
+struct
+
+open SMTLIB_Proof
+
+datatype raw_lethe_node = Raw_Lethe_Node of {
+ id: string,
+ rule: string,
+ args: SMTLIB.tree,
+ prems: string list,
+ concl: SMTLIB.tree,
+ declarations: (string * SMTLIB.tree) list,
+ subproof: raw_lethe_node list}
+
+fun mk_raw_node id rule args prems declarations concl subproof =
+ Raw_Lethe_Node {id = id, rule = rule, args = args, prems = prems, declarations = declarations,
+ concl = concl, subproof = subproof}
+
+datatype lethe_node = Lethe_Node of {
+ id: string,
+ rule: string,
+ prems: string list,
+ proof_ctxt: term list,
+ concl: term}
+
+fun mk_node id rule prems proof_ctxt concl =
+ Lethe_Node {id = id, rule = rule, prems = prems, proof_ctxt = proof_ctxt, concl = concl}
+
+datatype lethe_replay_node = Lethe_Replay_Node of {
+ id: string,
+ rule: string,
+ args: term list,
+ prems: string list,
+ proof_ctxt: term list,
+ concl: term,
+ bounds: (string * typ) list,
+ insts: term Symtab.table,
+ declarations: (string * term) list,
+ subproof: (string * typ) list * term list * term list * lethe_replay_node list}
+
+fun mk_replay_node id rule args prems proof_ctxt concl bounds insts declarations subproof =
+ Lethe_Replay_Node {id = id, rule = rule, args = args, prems = prems, proof_ctxt = proof_ctxt,
+ concl = concl, bounds = bounds, insts = insts, declarations = declarations,
+ subproof = subproof}
+
+datatype lethe_step = Lethe_Step of {
+ id: string,
+ rule: string,
+ prems: string list,
+ proof_ctxt: term list,
+ concl: term,
+ fixes: string list}
+
+fun mk_step id rule prems proof_ctxt concl fixes =
+ Lethe_Step {id = id, rule = rule, prems = prems, proof_ctxt = proof_ctxt, concl = concl,
+ fixes = fixes}
+
+val step_prefix = ".c"
+val input_rule = "input"
+val la_generic_rule = "la_generic"
+val normalized_input_rule = "__normalized_input" (*arbitrary*)
+val rewrite_rule = "__rewrite" (*arbitrary*)
+val subproof_rule = "subproof"
+val local_input_rule = "__local_input" (*arbitrary*)
+val simp_arith_rule = "simp_arith"
+val lethe_def = "__skolem_definition" (*arbitrary*)
+val not_not_rule = "not_not"
+val contract_rule = "contraction"
+val eq_congruent_pred_rule = "eq_congruent_pred"
+val eq_congruent_rule = "eq_congruent"
+val ite_intro_rule = "ite_intro"
+val default_skolem_rule = "sko_forall" (*arbitrary, but must be one of the skolems*)
+val theory_resolution2_rule = "__theory_resolution2" (*arbitrary*)
+val equiv_pos2_rule = "equiv_pos2"
+val th_resolution_rule = "th_resolution"
+
+val skolemization_steps = ["sko_forall", "sko_ex"]
+val is_skolemization = member (op =) skolemization_steps
+val keep_app_symbols = member (op =) [eq_congruent_pred_rule, eq_congruent_rule, ite_intro_rule]
+val keep_raw_lifting = member (op =) [eq_congruent_pred_rule, eq_congruent_rule, ite_intro_rule]
+val is_SH_trivial = member (op =) [not_not_rule, contract_rule]
+
+fun is_skolemization_step (Lethe_Replay_Node {id, ...}) = is_skolemization id
+
+(* Even the lethe developers do not know if the following rule can still appear in proofs: *)
+val lethe_deep_skolemize_rule = "deep_skolemize"
+
+fun number_of_steps [] = 0
+ | number_of_steps ((Lethe_Replay_Node {subproof = (_, _, _, subproof), ...}) :: pf) =
+ 1 + number_of_steps subproof + number_of_steps pf
+
+(* proof parser *)
+
+fun node_of p cx =
+ ([], cx)
+ ||>> `(with_fresh_names (term_of p))
+ |>> snd
+
+fun synctactic_var_subst old_name new_name (u $ v) =
+ (synctactic_var_subst old_name new_name u $ synctactic_var_subst old_name new_name v)
+ | synctactic_var_subst old_name new_name (Abs (v, T, u)) =
+ Abs (if String.isPrefix old_name v then new_name else v, T,
+ synctactic_var_subst old_name new_name u)
+ | synctactic_var_subst old_name new_name (Free (v, T)) =
+ if String.isPrefix old_name v then Free (new_name, T) else Free (v, T)
+ | synctactic_var_subst _ _ t = t
+
+fun synctatic_rew_in_lhs_subst old_name new_name (Const(\<^const_name>\<open>HOL.eq\<close>, T) $ t1 $ t2) =
+ Const(\<^const_name>\<open>HOL.eq\<close>, T) $ synctactic_var_subst old_name new_name t1 $ t2
+ | synctatic_rew_in_lhs_subst old_name new_name (Const(\<^const_name>\<open>Trueprop\<close>, T) $ t1) =
+ Const(\<^const_name>\<open>Trueprop\<close>, T) $ (synctatic_rew_in_lhs_subst old_name new_name t1)
+ | synctatic_rew_in_lhs_subst _ _ t = t
+
+fun add_bound_variables_to_ctxt cx =
+ fold (update_binding o
+ (fn (s, SOME typ) => (s, Term (Free (s, type_of cx typ)))))
+
+local
+ fun extract_symbols bds =
+ bds
+ |> map (fn (SMTLIB.S [SMTLIB.Sym "=", SMTLIB.Sym x, SMTLIB.Sym y], typ) => [([x, y], typ)]
+ | t => raise (Fail ("match error " ^ @{make_string} t)))
+ |> flat
+
+ (* onepoint can bind a variable to another variable or to a constant *)
+ fun extract_qnt_symbols cx bds =
+ bds
+ |> map (fn (SMTLIB.S [SMTLIB.Sym "=", SMTLIB.Sym x, SMTLIB.Sym y], typ) =>
+ (case node_of (SMTLIB.Sym y) cx of
+ ((_, []), _) => [([x], typ)]
+ | _ => [([x, y], typ)])
+ | (SMTLIB.S (SMTLIB.Sym "=" :: SMTLIB.Sym x :: _), typ) => [([x], typ)]
+ | t => raise (Fail ("match error " ^ @{make_string} t)))
+ |> flat
+
+ fun extract_symbols_map bds =
+ bds
+ |> map (fn (SMTLIB.S [SMTLIB.Sym "=", SMTLIB.Sym x, _], typ) => [([x], typ)])
+ |> flat
+in
+
+fun declared_csts _ "__skolem_definition" [(SMTLIB.S [SMTLIB.Sym x, typ, _], _)] = [(x, typ)]
+ | declared_csts _ "__skolem_definition" t = raise (Fail ("unrecognized skolem_definition " ^ @{make_string} t))
+ | declared_csts _ _ _ = []
+
+fun skolems_introduced_by_rule (SMTLIB.S bds) =
+ fold (fn (SMTLIB.S [SMTLIB.Sym "=", _, SMTLIB.Sym y]) => curry (op ::) y) bds []
+
+(*FIXME there is probably a way to use the information given by onepoint*)
+fun bound_vars_by_rule _ "bind" (bds) = extract_symbols bds
+ | bound_vars_by_rule cx "onepoint" bds = extract_qnt_symbols cx bds
+ | bound_vars_by_rule _ "sko_forall" bds = extract_symbols_map bds
+ | bound_vars_by_rule _ "sko_ex" bds = extract_symbols_map bds
+ | bound_vars_by_rule _ "__skolem_definition" [(SMTLIB.S [SMTLIB.Sym x, typ, _], _)] = [([x], SOME typ)]
+ | bound_vars_by_rule _ "__skolem_definition" [(SMTLIB.S [_, SMTLIB.Sym x, _], _)] = [([x], NONE)]
+ | bound_vars_by_rule _ _ _ = []
+
+(* Lethe adds "?" before some variables. *)
+fun remove_all_qm (SMTLIB.Sym v :: l) =
+ SMTLIB.Sym (perhaps (try (unprefix "?")) v) :: remove_all_qm l
+ | remove_all_qm (SMTLIB.S l :: l') = SMTLIB.S (remove_all_qm l) :: remove_all_qm l'
+ | remove_all_qm (SMTLIB.Key v :: l) = SMTLIB.Key v :: remove_all_qm l
+ | remove_all_qm (v :: l) = v :: remove_all_qm l
+ | remove_all_qm [] = []
+
+fun remove_all_qm2 (SMTLIB.Sym v) = SMTLIB.Sym (perhaps (try (unprefix "?")) v)
+ | remove_all_qm2 (SMTLIB.S l) = SMTLIB.S (remove_all_qm l)
+ | remove_all_qm2 (SMTLIB.Key v) = SMTLIB.Key v
+ | remove_all_qm2 v = v
+
+end
+
+datatype step_kind = ASSUME | ANCHOR | NO_STEP | NORMAL_STEP | SKOLEM
+
+fun parse_raw_proof_steps (limit : string option) (ls : SMTLIB.tree list) (cx : name_bindings) :
+ (raw_lethe_node list * SMTLIB.tree list * name_bindings) =
+ let
+ fun rotate_pair (a, (b, c)) = ((a, b), c)
+ fun step_kind [] = (NO_STEP, SMTLIB.S [], [])
+ | step_kind ((p as SMTLIB.S (SMTLIB.Sym "anchor" :: _)) :: l) = (ANCHOR, p, l)
+ | step_kind ((p as SMTLIB.S (SMTLIB.Sym "assume" :: _)) :: l) = (ASSUME, p, l)
+ | step_kind ((p as SMTLIB.S (SMTLIB.Sym "step" :: _)) :: l) = (NORMAL_STEP, p, l)
+ | step_kind ((p as SMTLIB.S (SMTLIB.Sym "define-fun" :: _)) :: l) = (SKOLEM, p, l)
+ | step_kind p = raise (Fail ("step_kind unrec: " ^ @{make_string} p))
+ fun parse_skolem (SMTLIB.S [SMTLIB.Sym "define-fun", SMTLIB.Sym id, _, typ,
+ SMTLIB.S (SMTLIB.Sym "!" :: t :: [SMTLIB.Key _, SMTLIB.Sym name])]) cx =
+ (*replace the name binding by the constant instead of the full term in order to reduce
+ the size of the generated terms and therefore the reconstruction time*)
+ let val (l, cx) = (fst oo SMTLIB_Proof.extract_and_update_name_bindings) t cx
+ |> apsnd (SMTLIB_Proof.update_name_binding (name, SMTLIB.Sym id))
+ in
+ (mk_raw_node (id ^ lethe_def) lethe_def (SMTLIB.S [SMTLIB.Sym id, typ, l]) [] []
+ (SMTLIB.S [SMTLIB.Sym "=", SMTLIB.Sym id, l]) [], cx)
+ end
+ | parse_skolem (SMTLIB.S [SMTLIB.Sym "define-fun", SMTLIB.Sym id, _, typ, SMTLIB.S l]) cx =
+ let val (l, cx) = (fst oo SMTLIB_Proof.extract_and_update_name_bindings) (SMTLIB.S l ) cx
+ in
+ (mk_raw_node (id ^ lethe_def) lethe_def (SMTLIB.S [SMTLIB.Sym id, typ, l]) [] []
+ (SMTLIB.S [SMTLIB.Sym "=", SMTLIB.Sym id, l]) [], cx)
+ end
+ | parse_skolem t _ = raise Fail ("unrecognized Lethe proof " ^ \<^make_string> t)
+ fun get_id_cx (SMTLIB.S ((SMTLIB.Sym _) :: (SMTLIB.Sym id) :: l), cx) = (id, (l, cx))
+ | get_id_cx t = raise Fail ("unrecognized Lethe proof " ^ \<^make_string> t)
+ fun get_id (SMTLIB.S ((SMTLIB.Sym _) :: (SMTLIB.Sym id) :: l)) = (id, l)
+ | get_id t = raise Fail ("unrecognized Lethe proof " ^ \<^make_string> t)
+ fun parse_source (SMTLIB.Key "premises" :: SMTLIB.S source ::l, cx) =
+ (SOME (map (fn (SMTLIB.Sym id) => id) source), (l, cx))
+ | parse_source (l, cx) = (NONE, (l, cx))
+ fun parse_rule (SMTLIB.Key "rule" :: SMTLIB.Sym r :: l, cx) = (r, (l, cx))
+ | parse_rule t = raise Fail ("unrecognized Lethe proof " ^ \<^make_string> t)
+ fun parse_anchor_step (SMTLIB.S (SMTLIB.Sym "anchor" :: SMTLIB.Key "step" :: SMTLIB.Sym r :: l), cx) = (r, (l, cx))
+ | parse_anchor_step t = raise Fail ("unrecognized Lethe proof " ^ \<^make_string> t)
+ fun parse_args (SMTLIB.Key "args" :: args :: l, cx) =
+ let val ((args, cx), _) = SMTLIB_Proof.extract_and_update_name_bindings args cx
+ in (args, (l, cx)) end
+ | parse_args (l, cx) = (SMTLIB.S [], (l, cx))
+ fun parse_and_clausify_conclusion (SMTLIB.S (SMTLIB.Sym "cl" :: []) :: l, cx) =
+ (SMTLIB.Sym "false", (l, cx))
+ | parse_and_clausify_conclusion (SMTLIB.S (SMTLIB.Sym "cl" :: concl) :: l, cx) =
+ let val (concl, cx) = fold_map (fst oo SMTLIB_Proof.extract_and_update_name_bindings) concl cx
+ in (SMTLIB.S (SMTLIB.Sym "or" :: concl), (l, cx)) end
+ | parse_and_clausify_conclusion t = raise Fail ("unrecognized Lethe proof " ^ \<^make_string> t)
+ val parse_normal_step =
+ get_id_cx
+ ##> parse_and_clausify_conclusion
+ #> rotate_pair
+ ##> parse_rule
+ #> rotate_pair
+ ##> parse_source
+ #> rotate_pair
+ ##> parse_args
+ #> rotate_pair
+
+ fun to_raw_node subproof ((((id, concl), rule), prems), args) =
+ mk_raw_node id rule args (the_default [] prems) [] concl subproof
+ fun at_discharge NONE _ = false
+ | at_discharge (SOME id) p = p |> get_id |> fst |> (fn id2 => id = id2)
+ in
+ case step_kind ls of
+ (NO_STEP, _, _) => ([],[], cx)
+ | (NORMAL_STEP, p, l) =>
+ if at_discharge limit p then ([], ls, cx) else
+ let
+ (*ignores content of "discharge": Isabelle is keeping track of it via the context*)
+ val (s, (_, cx)) = (p, cx)
+ |> parse_normal_step
+ |>> (to_raw_node [])
+ val (rp, rl, cx) = parse_raw_proof_steps limit l cx
+ in (s :: rp, rl, cx) end
+ | (ASSUME, p, l) =>
+ let
+ val (id, t :: []) = p
+ |> get_id
+ val ((t, cx), _) = SMTLIB_Proof.extract_and_update_name_bindings t cx
+ val s = mk_raw_node id input_rule (SMTLIB.S []) [] [] t []
+ val (rp, rl, cx) = parse_raw_proof_steps limit l cx
+ in (s :: rp, rl, cx) end
+ | (ANCHOR, p, l) =>
+ let
+ val (anchor_id, (anchor_args, (_, cx))) = (p, cx) |> (parse_anchor_step ##> parse_args)
+ val (subproof, discharge_step :: remaining_proof, cx) = parse_raw_proof_steps (SOME anchor_id) l cx
+ val (curss, (_, cx)) = parse_normal_step (discharge_step, cx)
+ val s = to_raw_node subproof (fst curss, anchor_args)
+ val (rp, rl, cx) = parse_raw_proof_steps limit remaining_proof cx
+ in (s :: rp, rl, cx) end
+ | (SKOLEM, p, l) =>
+ let
+ val (s, cx) = parse_skolem p cx
+ val (rp, rl, cx) = parse_raw_proof_steps limit l cx
+ in (s :: rp, rl, cx) end
+ end
+
+fun proof_ctxt_of_rule "bind" t = t
+ | proof_ctxt_of_rule "sko_forall" t = t
+ | proof_ctxt_of_rule "sko_ex" t = t
+ | proof_ctxt_of_rule "let" t = t
+ | proof_ctxt_of_rule "onepoint" t = t
+ | proof_ctxt_of_rule _ _ = []
+
+fun args_of_rule "bind" t = t
+ | args_of_rule "la_generic" t = t
+ | args_of_rule _ _ = []
+
+fun insts_of_forall_inst "forall_inst" t = map (fn SMTLIB.S [_, SMTLIB.Sym x, a] => (x, a)) t
+ | insts_of_forall_inst _ _ = []
+
+fun id_of_last_step prems =
+ if null prems then []
+ else
+ let val Lethe_Replay_Node {id, ...} = List.last prems in [id] end
+
+fun extract_assumptions_from_subproof subproof =
+ let fun extract_assumptions_from_subproof (Lethe_Replay_Node {rule, concl, ...}) assms =
+ if rule = local_input_rule then concl :: assms else assms
+ in
+ fold extract_assumptions_from_subproof subproof []
+ end
+
+fun normalized_rule_name id rule =
+ (case (rule = input_rule, can SMTLIB_Interface.role_and_index_of_assert_name id) of
+ (true, true) => normalized_input_rule
+ | (true, _) => local_input_rule
+ | _ => rule)
+
+fun is_assm_repetition id rule =
+ rule = input_rule andalso can SMTLIB_Interface.role_and_index_of_assert_name id
+
+fun extract_skolem ([SMTLIB.Sym var, typ, choice]) = (var, typ, choice)
+ | extract_skolem t = raise Fail ("fail to parse type" ^ @{make_string} t)
+
+(* The preprocessing takes care of:
+ 1. unfolding the shared terms
+ 2. extract the declarations of skolems to make sure that there are not unfolded
+*)
+fun preprocess compress step =
+ let
+ fun expand_assms cs =
+ map (fn t => case AList.lookup (op =) cs t of NONE => t | SOME a => a)
+ fun expand_lonely_arguments (args as SMTLIB.S [SMTLIB.Sym "=", _, _]) = [args]
+ | expand_lonely_arguments (x as SMTLIB.S [SMTLIB.Sym var, _]) = [SMTLIB.S [SMTLIB.Sym "=", x, SMTLIB.Sym var]]
+
+ fun preprocess (Raw_Lethe_Node {id, rule, args, prems, concl, subproof, ...}) (cx, remap_assms) =
+ let
+ val (skolem_names, stripped_args) = args
+ |> (fn SMTLIB.S args => args)
+ |> map
+ (fn SMTLIB.S [SMTLIB.Key "=", x, y] => SMTLIB.S [SMTLIB.Sym "=", x, y]
+ | x => x)
+ |> (rule = "bind" orelse rule = "onepoint") ? flat o (map expand_lonely_arguments)
+ |> `(if rule = lethe_def then single o extract_skolem else K [])
+ ||> SMTLIB.S
+
+ val (subproof, (cx, _)) = fold_map preprocess subproof (cx, remap_assms) |> apfst flat
+ val remap_assms = (if rule = "or" then (id, hd prems) :: remap_assms else remap_assms)
+ (* declare variables in the context *)
+ val declarations =
+ if rule = lethe_def
+ then skolem_names |> map (fn (name, _, choice) => (name, choice))
+ else []
+ in
+ if compress andalso rule = "or"
+ then ([], (cx, remap_assms))
+ else ([Raw_Lethe_Node {id = id, rule = rule, args = stripped_args,
+ prems = expand_assms remap_assms prems, declarations = declarations, concl = concl, subproof = subproof}],
+ (cx, remap_assms))
+ end
+ in preprocess step end
+
+fun filter_split _ [] = ([], [])
+ | filter_split f (a :: xs) =
+ (if f a then apfst (curry op :: a) else apsnd (curry op :: a)) (filter_split f xs)
+
+fun extract_types_of_args (SMTLIB.S [var, typ, t as SMTLIB.S [SMTLIB.Sym "choice", _, _]]) =
+ (SMTLIB.S [var, typ, t], SOME typ)
+ |> single
+ | extract_types_of_args (SMTLIB.S t) =
+ let
+ fun extract_types_of_arg (SMTLIB.S [eq, SMTLIB.S [var, typ], t]) = (SMTLIB.S [eq, var, t], SOME typ)
+ | extract_types_of_arg t = (t, NONE)
+ in
+ t
+ |> map extract_types_of_arg
+ end
+
+fun collect_skolem_defs (Raw_Lethe_Node {rule, subproof = subproof, args, ...}) =
+ (if is_skolemization rule then map (fn id => id ^ lethe_def) (skolems_introduced_by_rule args) else []) @
+ flat (map collect_skolem_defs subproof)
+
+(*The postprocessing does:
+ 1. translate the terms to Isabelle syntax, taking care of free variables
+ 2. remove the ambiguity in the proof terms:
+ x \<leadsto> y |- x = x
+ means y = x. To remove ambiguity, we use the fact that y is a free variable and replace the term
+ by:
+ xy \<leadsto> y |- xy = x.
+ This is now does not have an ambiguity and we can safely move the "xy \<leadsto> y" to the proof
+ assumptions.
+*)
+fun postprocess_proof compress ctxt step cx =
+ let
+ fun postprocess (Raw_Lethe_Node {id, rule, args, prems, declarations, concl, subproof}) (cx, rew) =
+ let
+ val _ = (SMT_Config.verit_msg ctxt) (fn () => @{print} ("id =", id, "concl =", concl))
+
+ val args = extract_types_of_args args
+ val globally_bound_vars = declared_csts cx rule args
+ val cx = fold (update_binding o (fn (s, typ) => (s, Term (Free (s, type_of cx typ)))))
+ globally_bound_vars cx
+
+ (*find rebound variables specific to the LHS of the equivalence symbol*)
+ val bound_vars = bound_vars_by_rule cx rule args
+ val bound_vars_no_typ = map fst bound_vars
+ val rhs_vars =
+ fold (fn [t', t] => t <> t' ? (curry (op ::) t) | _ => fn x => x) bound_vars_no_typ []
+
+ fun not_already_bound cx t = SMTLIB_Proof.lookup_binding cx t = None andalso
+ not (member (op =) rhs_vars t)
+ val (shadowing_vars, rebound_lhs_vars) = bound_vars
+ |> filter_split (fn ([t, _], typ) => not_already_bound cx t | _ => true)
+ |>> map (apfst (hd))
+ |>> (fn vars => vars @ flat (map (fn ([_, t], typ) => [(t, typ)] | _ => []) bound_vars))
+ val subproof_rew = fold (fn [t, t'] => curry (op ::) (t, t ^ t'))
+ (map fst rebound_lhs_vars) rew
+ val subproof_rewriter = fold (fn (t, t') => synctatic_rew_in_lhs_subst t t')
+ subproof_rew
+
+ val ((concl, bounds), cx') = node_of concl cx
+
+ val extra_lhs_vars = map (fn ([a,b], typ) => (a, a^b, typ)) rebound_lhs_vars
+ val old_lhs_vars = map (fn (a, _, typ) => (a, typ)) extra_lhs_vars
+ val new_lhs_vars = map (fn (_, newvar, typ) => (newvar, typ)) extra_lhs_vars
+
+ (* postprocess conclusion *)
+ val concl = SMTLIB_Isar.unskolemize_names ctxt (subproof_rewriter concl)
+
+ val _ = (SMT_Config.verit_msg ctxt) (fn () => \<^print> ("id =", id, "concl =", concl))
+ val _ = (SMT_Config.verit_msg ctxt) (fn () => \<^print> ("id =", id, "cx' =", cx',
+ "bound_vars =", bound_vars))
+
+ val bound_tvars =
+ map (fn (s, SOME typ) => (s, type_of cx typ))
+ (shadowing_vars @ new_lhs_vars)
+ val subproof_cx =
+ add_bound_variables_to_ctxt cx (shadowing_vars @ new_lhs_vars) cx
+
+ fun could_unify (Bound i, Bound j) = i = j
+ | could_unify (Var v, Var v') = v = v'
+ | could_unify (Free v, Free v') = v = v'
+ | could_unify (Const (v, ty), Const (v', ty')) = v = v' andalso ty = ty'
+ | could_unify (Abs (_, ty, bdy), Abs (_, ty', bdy')) = ty = ty' andalso could_unify (bdy, bdy')
+ | could_unify (u $ v, u' $ v') = could_unify (u, u') andalso could_unify (v, v')
+ | could_unify _ = false
+ fun is_alpha_renaming t =
+ t
+ |> HOLogic.dest_Trueprop
+ |> HOLogic.dest_eq
+ |> could_unify
+ handle TERM _ => false
+ val alpha_conversion = rule = "bind" andalso is_alpha_renaming concl
+
+ val can_remove_subproof =
+ compress andalso (is_skolemization rule orelse alpha_conversion)
+ val (fixed_subproof : lethe_replay_node list, _) =
+ fold_map postprocess (if can_remove_subproof then [] else subproof)
+ (subproof_cx, subproof_rew)
+
+ val unsk_and_rewrite = SMTLIB_Isar.unskolemize_names ctxt o subproof_rewriter
+
+ (* postprocess assms *)
+ val stripped_args = map fst args
+ val sanitized_args = proof_ctxt_of_rule rule stripped_args
+
+ val arg_cx = add_bound_variables_to_ctxt cx (shadowing_vars @ old_lhs_vars) subproof_cx
+ val (termified_args, _) = fold_map node_of sanitized_args arg_cx |> apfst (map fst)
+ val normalized_args = map unsk_and_rewrite termified_args
+
+ val subproof_assms = proof_ctxt_of_rule rule normalized_args
+
+ (* postprocess arguments *)
+ val rule_args = args_of_rule rule stripped_args
+ val (termified_args, _) = fold_map term_of rule_args subproof_cx
+ val normalized_args = map unsk_and_rewrite termified_args
+ val rule_args = map subproof_rewriter normalized_args
+
+ val raw_insts = insts_of_forall_inst rule stripped_args
+ fun termify_term (x, t) cx = let val (t, cx) = term_of t cx in ((x, t), cx) end
+ val (termified_args, _) = fold_map termify_term raw_insts subproof_cx
+ val insts = Symtab.empty
+ |> fold (fn (x, t) => fn insts => Symtab.update_new (x, t) insts) termified_args
+ |> Symtab.map (K unsk_and_rewrite)
+
+ (* declarations *)
+ val (declarations, _) = fold_map termify_term declarations cx
+ |> apfst (map (apsnd unsk_and_rewrite))
+
+ (* fix step *)
+ val _ = if bounds <> [] then raise (Fail "found dangling variable in concl") else ()
+
+ val skolem_defs = (if is_skolemization rule
+ then map (fn id => id ^ lethe_def) (skolems_introduced_by_rule (SMTLIB.S (map fst args))) else [])
+ val skolems_of_subproof = (if is_skolemization rule
+ then flat (map collect_skolem_defs subproof) else [])
+ val fixed_prems =
+ prems @ (if is_assm_repetition id rule then [id] else []) @
+ skolem_defs @ skolems_of_subproof @ (id_of_last_step fixed_subproof)
+
+ (* fix subproof *)
+ val normalized_rule = normalized_rule_name id rule
+ |> (if compress andalso alpha_conversion then K "refl" else I)
+
+ val extra_assms2 =
+ (if rule = subproof_rule then extract_assumptions_from_subproof fixed_subproof else [])
+
+ val step = mk_replay_node id normalized_rule rule_args fixed_prems subproof_assms concl
+ [] insts declarations (bound_tvars, subproof_assms, extra_assms2, fixed_subproof)
+
+ in
+ (step, (cx', rew))
+ end
+ in
+ postprocess step (cx, [])
+ |> (fn (step, (cx, _)) => (step, cx))
+ end
+
+fun combine_proof_steps ((step1 : lethe_replay_node) :: step2 :: steps) =
+ let
+ val (Lethe_Replay_Node {id = id1, rule = rule1, args = args1, prems = prems1,
+ proof_ctxt = proof_ctxt1, concl = concl1, bounds = bounds1, insts = insts1,
+ declarations = declarations1,
+ subproof = (bound_sub1, assms_sub1, assms_extra1, subproof1)}) = step1
+ val (Lethe_Replay_Node {id = id2, rule = rule2, args = args2, prems = prems2,
+ proof_ctxt = proof_ctxt2, concl = concl2, bounds = bounds2, insts = insts2,
+ declarations = declarations2,
+ subproof = (bound_sub2, assms_sub2, assms_extra2, subproof2)}) = step2
+ val goals1 =
+ (case concl1 of
+ _ $ (Const (\<^const_name>\<open>HOL.disj\<close>, _) $ _ $
+ (Const (\<^const_name>\<open>HOL.disj\<close>, _) $ (Const (\<^const_name>\<open>HOL.Not\<close>, _) $a) $ b)) => [a,b]
+ | _ => [])
+ val goal2 = (case concl2 of _ $ a => a)
+ in
+ if rule1 = equiv_pos2_rule andalso rule2 = th_resolution_rule andalso member (op =) prems2 id1
+ andalso member (op =) goals1 goal2
+ then
+ mk_replay_node id2 theory_resolution2_rule args2 (filter_out (curry (op =) id1) prems2)
+ proof_ctxt2 concl2 bounds2 insts2 declarations2
+ (bound_sub2, assms_sub2, assms_extra2, combine_proof_steps subproof2) ::
+ combine_proof_steps steps
+ else
+ mk_replay_node id1 rule1 args1 prems1
+ proof_ctxt1 concl1 bounds1 insts1 declarations1
+ (bound_sub1, assms_sub1, assms_extra1, combine_proof_steps subproof1) ::
+ combine_proof_steps (step2 :: steps)
+ end
+ | combine_proof_steps steps = steps
+
+
+val linearize_proof =
+ let
+ fun map_node_concl f (Lethe_Node {id, rule, prems, proof_ctxt, concl}) =
+ mk_node id rule prems proof_ctxt (f concl)
+ fun linearize (Lethe_Replay_Node {id = id, rule = rule, args = _, prems = prems,
+ proof_ctxt = proof_ctxt, concl = concl, bounds = bounds, insts = _, declarations = _,
+ subproof = (bounds', assms, inputs, subproof)}) =
+ let
+ val bounds = distinct (op =) bounds
+ val bounds' = distinct (op =) bounds'
+ fun mk_prop_of_term concl =
+ concl |> fastype_of concl = \<^typ>\<open>bool\<close> ? curry (op $) \<^term>\<open>Trueprop\<close>
+ fun remove_assumption_id assumption_id prems =
+ filter_out (curry (op =) assumption_id) prems
+ fun add_assumption assumption concl =
+ \<^Const>\<open>Pure.imp for \<open>mk_prop_of_term assumption\<close> \<open>mk_prop_of_term concl\<close>\<close>
+ fun inline_assumption assumption assumption_id
+ (Lethe_Node {id, rule, prems, proof_ctxt, concl}) =
+ mk_node id rule (remove_assumption_id assumption_id prems) proof_ctxt
+ (add_assumption assumption concl)
+ fun find_input_steps_and_inline [] = []
+ | find_input_steps_and_inline
+ (Lethe_Node {id = id', rule, prems, concl, ...} :: steps) =
+ if rule = input_rule then
+ find_input_steps_and_inline (map (inline_assumption concl id') steps)
+ else
+ mk_node (id') rule prems [] concl :: find_input_steps_and_inline steps
+
+ fun free_bounds bounds (concl) =
+ fold (fn (var, typ) => fn t => Logic.all (Free (var, typ)) t) bounds concl
+ val subproof = subproof
+ |> flat o map linearize
+ |> map (map_node_concl (fold add_assumption (assms @ inputs)))
+ |> map (map_node_concl (free_bounds (bounds @ bounds')))
+ |> find_input_steps_and_inline
+ val concl = free_bounds bounds concl
+ in
+ subproof @ [mk_node id rule prems proof_ctxt concl]
+ end
+ in linearize end
+
+fun rule_of (Lethe_Replay_Node {rule,...}) = rule
+fun subproof_of (Lethe_Replay_Node {subproof = (_, _, _, subproof),...}) = subproof
+
+
+(* Massage Skolems for Sledgehammer.
+
+We have to make sure that there is an "arrow" in the graph for skolemization steps.
+
+
+A. The normal easy case
+
+This function detects the steps of the form
+ P \<longleftrightarrow> Q :skolemization
+ Q :resolution with P
+and replace them by
+ Q :skolemization
+Throwing away the step "P \<longleftrightarrow> Q" completely. This throws away a lot of information, but it does not
+matter too much for Sledgehammer.
+
+
+B. Skolems in subproofs
+Supporting this is more or less hopeless as long as the Isar reconstruction of Sledgehammer
+does not support more features like definitions. lethe is able to generate proofs with skolemization
+happening in subproofs inside the formula.
+ (assume "A \<or> P"
+ ...
+ P \<longleftrightarrow> Q :skolemization in the subproof
+ ...)
+ hence A \<or> P \<longrightarrow> A \<or> Q :lemma
+ ...
+ R :something with some rule
+and replace them by
+ R :skolemization with some rule
+Without any subproof
+*)
+fun remove_skolem_definitions_proof steps =
+ let
+ fun replace_equivalent_by_imp (judgement $ ((Const(\<^const_name>\<open>HOL.eq\<close>, typ) $ arg1) $ arg2)) =
+ judgement $ ((Const(\<^const_name>\<open>HOL.implies\<close>, typ) $ arg1) $ arg2)
+ | replace_equivalent_by_imp a = a (*This case is probably wrong*)
+ fun remove_skolem_definitions (Lethe_Replay_Node {id = id, rule = rule, args = args,
+ prems = prems,
+ proof_ctxt = proof_ctxt, concl = concl, bounds = bounds, insts = insts,
+ declarations = declarations,
+ subproof = (vars, assms', extra_assms', subproof)}) (prems_to_remove, skolems) =
+ let
+ val prems = prems
+ |> filter_out (member (op =) prems_to_remove)
+ val trivial_step = is_SH_trivial rule
+ fun has_skolem_substep st NONE = if is_skolemization (rule_of st) then SOME (rule_of st)
+ else fold has_skolem_substep (subproof_of st) NONE
+ | has_skolem_substep _ a = a
+ val promote_to_skolem = exists (fn t => member (op =) skolems t) prems
+ val promote_from_assms = fold has_skolem_substep subproof NONE <> NONE
+ val promote_step = promote_to_skolem orelse promote_from_assms
+ val skolem_step_to_skip = is_skolemization rule orelse
+ (promote_from_assms andalso length prems > 1)
+ val is_skolem = is_skolemization rule orelse promote_step
+ val prems = prems
+ |> filter_out (fn t => member (op =) skolems t)
+ |> is_skolem ? filter_out (String.isPrefix id)
+ val rule = (if promote_step then default_skolem_rule else rule)
+ val subproof = subproof
+ |> (is_skolem ? K []) (*subproofs of skolemization steps are useless for SH*)
+ |> map (fst o (fn st => remove_skolem_definitions st (prems_to_remove, skolems)))
+ (*no new definitions in subproofs*)
+ |> flat
+ val concl = concl
+ |> is_skolem ? replace_equivalent_by_imp
+ val step = (if skolem_step_to_skip orelse rule = lethe_def orelse trivial_step then []
+ else mk_replay_node id rule args prems proof_ctxt concl bounds insts declarations
+ (vars, assms', extra_assms', subproof)
+ |> single)
+ val defs = (if rule = lethe_def orelse trivial_step then id :: prems_to_remove
+ else prems_to_remove)
+ val skolems = (if skolem_step_to_skip then id :: skolems else skolems)
+ in
+ (step, (defs, skolems))
+ end
+ in
+ fold_map remove_skolem_definitions steps ([], [])
+ |> fst
+ |> flat
+ end
+
+local
+ (*TODO useful?*)
+ fun remove_pattern (SMTLIB.S (SMTLIB.Sym "!" :: t :: [SMTLIB.Key _, SMTLIB.S _])) = t
+ | remove_pattern (SMTLIB.S xs) = SMTLIB.S (map remove_pattern xs)
+ | remove_pattern p = p
+
+ fun import_proof_and_post_process typs funs lines ctxt =
+ let
+ val compress = SMT_Config.compress_verit_proofs ctxt
+ val smtlib_lines_without_qm =
+ lines
+ |> map single
+ |> map SMTLIB.parse
+ |> map remove_all_qm2
+ |> map remove_pattern
+ val (raw_steps, _, _) =
+ parse_raw_proof_steps NONE smtlib_lines_without_qm SMTLIB_Proof.empty_name_binding
+
+ fun process step (cx, cx') =
+ let fun postprocess step (cx, cx') =
+ let val (step, cx) = postprocess_proof compress ctxt step cx
+ in (step, (cx, cx')) end
+ in uncurry (fold_map postprocess) (preprocess compress step (cx, cx')) end
+ val step =
+ (empty_context ctxt typs funs, [])
+ |> fold_map process raw_steps
+ |> (fn (steps, (cx, _)) => (flat steps, cx))
+ |> compress? apfst combine_proof_steps
+ in step end
+in
+
+fun parse typs funs lines ctxt =
+ let
+ val (u, env) = import_proof_and_post_process typs funs lines ctxt
+ val t = u
+ |> remove_skolem_definitions_proof
+ |> flat o (map linearize_proof)
+ fun node_to_step (Lethe_Node {id, rule, prems, concl, ...}) =
+ mk_step id rule prems [] concl []
+ in
+ (map node_to_step t, ctxt_of env)
+ end
+
+fun parse_replay typs funs lines ctxt =
+ let
+ val (u, env) = import_proof_and_post_process typs funs lines ctxt
+ val _ = (SMT_Config.verit_msg ctxt) (fn () => \<^print> u)
+ in
+ (u, ctxt_of env)
+ end
+end
+
+end;
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/HOL/Tools/SMT/lethe_proof_parse.ML Wed Mar 23 10:54:22 2022 +0100
@@ -0,0 +1,76 @@
+(* Title: HOL/Tools/SMT/verit_proof_parse.ML
+ Author: Mathias Fleury, TU Muenchen
+ Author: Jasmin Blanchette, TU Muenchen
+
+VeriT proof parsing.
+*)
+
+signature LETHE_PROOF_PARSE =
+sig
+ type ('a, 'b) atp_step = ('a, 'b) ATP_Proof.atp_step
+ val parse_proof: SMT_Translate.replay_data ->
+ ((string * ATP_Problem_Generate.stature) * thm) list -> term list -> term -> string list ->
+ SMT_Solver.parsed_proof
+end;
+
+structure Lethe_Proof_Parse: LETHE_PROOF_PARSE =
+struct
+
+open ATP_Util
+open ATP_Problem
+open ATP_Proof
+open ATP_Proof_Reconstruct
+open Lethe_Isar
+open Lethe_Proof
+
+fun parse_proof
+ ({context = ctxt, typs, terms, ll_defs, rewrite_rules, assms} : SMT_Translate.replay_data)
+ xfacts prems concl output =
+ let
+ val num_ll_defs = length ll_defs
+
+ val id_of_index = Integer.add num_ll_defs
+ val index_of_id = Integer.add (~ num_ll_defs)
+
+ fun step_of_assume j (_, th) =
+ Lethe_Proof.Lethe_Step {id = SMTLIB_Interface.assert_name_of_role_and_index SMT_Util.Axiom (id_of_index j),
+ rule = input_rule, prems = [], proof_ctxt = [], concl = Thm.prop_of th, fixes = []}
+
+ val (actual_steps, _) = Lethe_Proof.parse typs terms output ctxt
+ val used_assert_ids =
+ map_filter (try (snd o SMTLIB_Interface.role_and_index_of_assert_name)) output
+ val used_assm_js =
+ map_filter (fn id => let val i = index_of_id id in if i >= 0 then SOME i else NONE end)
+ used_assert_ids
+ val used_assms = map (nth assms) used_assm_js
+ val assm_steps = map2 step_of_assume used_assm_js used_assms
+ val steps = assm_steps @ actual_steps
+
+ val conjecture_i = 0
+ val prems_i = conjecture_i + 1
+ val num_prems = length prems
+ val facts_i = prems_i + num_prems
+ val num_facts = length xfacts
+ val helpers_i = facts_i + num_facts
+
+ val conjecture_id = id_of_index conjecture_i
+ val prem_ids = map id_of_index (prems_i upto prems_i + num_prems - 1)
+ val fact_ids' =
+ map_filter (fn j =>
+ let val (i, _) = nth assms j in
+ try (apsnd (nth xfacts)) (id_of_index j, i - facts_i)
+ end) used_assm_js
+ val helper_ids' = filter (fn (i, _) => i >= helpers_i) used_assms
+
+ val fact_helper_ts =
+ map (fn (_, th) => (ATP_Util.short_thm_name ctxt th, Thm.prop_of th)) helper_ids' @
+ map (fn (_, ((s, _), th)) => (s, Thm.prop_of th)) fact_ids'
+ val fact_helper_ids' =
+ map (apsnd (ATP_Util.short_thm_name ctxt)) helper_ids' @ map (apsnd (fst o fst)) fact_ids'
+ in
+ {outcome = NONE, fact_ids = SOME fact_ids',
+ atp_proof = fn () => atp_proof_of_veriT_proof ctxt ll_defs rewrite_rules prems concl
+ fact_helper_ts prem_ids conjecture_id fact_helper_ids' steps}
+ end
+
+end;
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/HOL/Tools/SMT/lethe_replay_methods.ML Wed Mar 23 10:54:22 2022 +0100
@@ -0,0 +1,1191 @@
+(* Title: HOL/Tools/SMT/verit_replay_methods.ML
+ Author: Mathias Fleury, MPII, JKU, University Freiburg
+
+Proof method for replaying veriT proofs.
+*)
+
+signature LETHE_REPLAY_METHODS =
+sig
+
+
+ datatype verit_rule =
+ False | True |
+
+ (*input: a repeated (normalized) assumption of assumption of in a subproof*)
+ Normalized_Input | Local_Input |
+ (*Subproof:*)
+ Subproof |
+ (*Conjunction:*)
+ And | Not_And | And_Pos | And_Neg |
+ (*Disjunction""*)
+ Or | Or_Pos | Not_Or | Or_Neg |
+ (*Disjunction:*)
+ Implies | Implies_Neg1 | Implies_Neg2 | Implies_Pos | Not_Implies1 | Not_Implies2 |
+ (*Equivalence:*)
+ Equiv_neg1 | Equiv_pos1 | Equiv_pos2 | Equiv_neg2 | Not_Equiv1 | Not_Equiv2 | Equiv1 | Equiv2 |
+ (*If-then-else:*)
+ ITE_Pos1 | ITE_Pos2 | ITE_Neg1 | ITE_Neg2 | Not_ITE1 | Not_ITE2 | ITE_Intro | ITE1 | ITE2 |
+ (*Equality:*)
+ Eq_Congruent | Eq_Reflexive | Eq_Transitive | Eq_Congruent_Pred | Trans | Refl | Cong |
+ (*Arithmetics:*)
+ LA_Disequality | LA_Generic | LA_Tautology | LIA_Generic | LA_Totality | LA_RW_Eq |
+ NLA_Generic |
+ (*Quantifiers:*)
+ Forall_Inst | Qnt_Rm_Unused | Qnt_Join | Onepoint | Bind | Skolem_Forall | Skolem_Ex |
+ (*Resolution:*)
+ Theory_Resolution | Resolution |
+ (*Temporary rules, that the verit developers want to remove:*)
+ AC_Simp |
+ Bfun_Elim |
+ Qnt_CNF |
+ (*Used to introduce skolem constants*)
+ Definition |
+ (*Former cong rules*)
+ Bool_Simplify | Or_Simplify | Not_Simplify | And_Simplify | Equiv_Simplify |
+ Implies_Simplify | Connective_Def | Minus_Simplify | Comp_Simplify |
+ Eq_Simplify | ITE_Simplify | Sum_Simplify | Prod_Simplify | All_Simplify |
+ Qnt_Simplify | Not_Not | Tautological_Clause | Duplicate_Literals |
+ (*Unsupported rule*)
+ Unsupported |
+ (*For compression*)
+ Theory_Resolution2 |
+ (*Extended rules*)
+ Symm | Not_Symm | Reordering | Tmp_Quantifier_Simplify
+
+ val requires_subproof_assms : string list -> string -> bool
+ val requires_local_input: string list -> string -> bool
+
+ val string_of_verit_rule: verit_rule -> string
+
+ type lethe_tac_args = Proof.context -> thm list -> term list -> term -> thm
+ type lethe_tac = Proof.context -> thm list -> term -> thm
+ type lethe_tac2 = Proof.context -> thm list -> term list -> term Symtab.table -> term -> thm
+ val bind: lethe_tac_args
+ val and_rule: lethe_tac
+ val and_neg_rule: lethe_tac
+ val and_pos: lethe_tac
+ val rewrite_and_simplify: lethe_tac
+ val rewrite_bool_simplify: lethe_tac
+ val rewrite_comp_simplify: lethe_tac
+ val rewrite_minus_simplify: lethe_tac
+ val rewrite_not_simplify: lethe_tac
+ val rewrite_eq_simplify: lethe_tac
+ val rewrite_equiv_simplify: lethe_tac
+ val rewrite_implies_simplify: lethe_tac
+ val rewrite_or_simplify: lethe_tac
+ val cong: lethe_tac
+ val rewrite_connective_def: lethe_tac
+ val duplicate_literals: lethe_tac
+ val eq_congruent: lethe_tac
+ val eq_congruent_pred: lethe_tac
+ val eq_reflexive: lethe_tac
+ val eq_transitive: lethe_tac
+ val equiv1: lethe_tac
+ val equiv2: lethe_tac
+ val equiv_neg1: lethe_tac
+ val equiv_neg2: lethe_tac
+ val equiv_pos1: lethe_tac
+ val equiv_pos2: lethe_tac
+ val false_rule: lethe_tac
+ val forall_inst: lethe_tac2
+ val implies_rules: lethe_tac
+ val implies_neg1: lethe_tac
+ val implies_neg2: lethe_tac
+ val implies_pos: lethe_tac
+ val ite1: lethe_tac
+ val ite2: lethe_tac
+ val ite_intro: lethe_tac
+ val ite_neg1: lethe_tac
+ val ite_neg2: lethe_tac
+ val ite_pos1: lethe_tac
+ val ite_pos2: lethe_tac
+ val rewrite_ite_simplify: lethe_tac
+ val la_disequality: lethe_tac
+ val la_generic: lethe_tac_args
+ val la_rw_eq: lethe_tac
+ val lia_generic: lethe_tac
+ val refl: lethe_tac
+ val normalized_input: lethe_tac
+ val not_and_rule: lethe_tac
+ val not_equiv1: lethe_tac
+ val not_equiv2: lethe_tac
+ val not_implies1: lethe_tac
+ val not_implies2: lethe_tac
+ val not_ite1: lethe_tac
+ val not_ite2: lethe_tac
+ val not_not: lethe_tac
+ val not_or_rule: lethe_tac
+ val or: lethe_tac
+ val or_neg_rule: lethe_tac
+ val or_pos_rule: lethe_tac
+ val theory_resolution2: lethe_tac
+ val prod_simplify: lethe_tac
+ val qnt_join: lethe_tac
+ val qnt_rm_unused: lethe_tac
+ val onepoint: lethe_tac
+ val qnt_simplify: lethe_tac
+ val all_simplify: lethe_tac
+ val unit_res: lethe_tac
+ val skolem_ex: lethe_tac
+ val skolem_forall: lethe_tac
+ val subproof: lethe_tac
+ val sum_simplify: lethe_tac
+ val tautological_clause: lethe_tac
+ val tmp_AC_rule: lethe_tac
+ val bfun_elim: lethe_tac
+ val qnt_cnf: lethe_tac
+ val trans: lethe_tac
+ val symm: lethe_tac
+ val not_symm: lethe_tac
+ val reordering: lethe_tac
+
+(*
+ val : lethe_tac
+*)
+ val REPEAT_CHANGED: ('a -> tactic) -> 'a -> tactic
+ val TRY': ('a -> tactic) -> 'a -> tactic
+
+end;
+
+
+structure Lethe_Replay_Methods: LETHE_REPLAY_METHODS =
+struct
+
+type lethe_tac_args = Proof.context -> thm list -> term list -> term -> thm
+type lethe_tac = Proof.context -> thm list -> term -> thm
+type lethe_tac2 = Proof.context -> thm list -> term list -> term Symtab.table -> term -> thm
+
+(*Some general comments on the proof format:
+ 1. Double negations are not always removed. This means for example that the equivalence rules
+ cannot assume that the double negations have already been removed. Therefore, we match the
+ term, instantiate the theorem, then use simp (to remove double negations), and finally use
+ assumption.
+ 2. The reconstruction for rule forall_inst is buggy and tends to be very fragile, because the rule
+ is doing much more that is supposed to do. Moreover types can make trivial goals (for the
+ boolean structure) impossible to prove.
+ 3. Duplicate literals are sometimes removed, mostly by the SAT solver.
+
+ Rules unsupported on purpose:
+ * Distinct_Elim, XOR, let (we don't need them).
+ * deep_skolemize (because it is not clear if verit still generates using it).
+*)
+
+
+datatype verit_rule =
+ False | True |
+
+ (*input: a repeated (normalized) assumption of assumption of in a subproof*)
+ Normalized_Input | Local_Input |
+ (*Subproof:*)
+ Subproof |
+ (*Conjunction:*)
+ And | Not_And | And_Pos | And_Neg |
+ (*Disjunction""*)
+ Or | Or_Pos | Not_Or | Or_Neg |
+ (*Disjunction:*)
+ Implies | Implies_Neg1 | Implies_Neg2 | Implies_Pos | Not_Implies1 | Not_Implies2 |
+ (*Equivalence:*)
+ Equiv_neg1 | Equiv_pos1 | Equiv_pos2 | Equiv_neg2 | Not_Equiv1 | Not_Equiv2 | Equiv1 | Equiv2 |
+ (*If-then-else:*)
+ ITE_Pos1 | ITE_Pos2 | ITE_Neg1 | ITE_Neg2 | Not_ITE1 | Not_ITE2 | ITE_Intro | ITE1 | ITE2 |
+ (*Equality:*)
+ Eq_Congruent | Eq_Reflexive | Eq_Transitive | Eq_Congruent_Pred | Trans | Refl | Cong |
+ (*Arithmetics:*)
+ LA_Disequality | LA_Generic | LA_Tautology | LIA_Generic | LA_Totality | LA_RW_Eq |
+ NLA_Generic |
+ (*Quantifiers:*)
+ Forall_Inst | Qnt_Rm_Unused | Qnt_Join | Onepoint | Bind | Skolem_Forall | Skolem_Ex |
+ (*Resolution:*)
+ Theory_Resolution | Resolution |
+ (*Temporary rules, that the verit developpers want to remove:*)
+ AC_Simp |
+ Bfun_Elim |
+ Qnt_CNF |
+ (*Used to introduce skolem constants*)
+ Definition |
+ (*Former cong rules*)
+ Bool_Simplify | Or_Simplify | Not_Simplify | And_Simplify | Equiv_Simplify |
+ Implies_Simplify | Connective_Def | Minus_Simplify | Comp_Simplify |
+ Eq_Simplify | ITE_Simplify | Sum_Simplify | Prod_Simplify | All_Simplify |
+ Qnt_Simplify | Not_Not | Tautological_Clause | Duplicate_Literals |
+ (*Unsupported rule*)
+ Unsupported |
+ (*For compression*)
+ Theory_Resolution2 |
+ (*Extended rules*)
+ Symm | Not_Symm | Reordering | Tmp_Quantifier_Simplify
+
+fun string_of_verit_rule Bind = "Bind"
+ | string_of_verit_rule Cong = "Cong"
+ | string_of_verit_rule Refl = "Refl"
+ | string_of_verit_rule Equiv1 = "Equiv1"
+ | string_of_verit_rule Equiv2 = "Equiv2"
+ | string_of_verit_rule Equiv_pos1 = "Equiv_pos1"
+ | string_of_verit_rule Equiv_pos2 = "Equiv_pos2"
+ | string_of_verit_rule Equiv_neg1 = "Equiv_neg1"
+ | string_of_verit_rule Equiv_neg2 = "Equiv_neg2"
+ | string_of_verit_rule Skolem_Forall = "Skolem_Forall"
+ | string_of_verit_rule Skolem_Ex = "Skolem_Ex"
+ | string_of_verit_rule Eq_Reflexive = "Eq_Reflexive"
+ | string_of_verit_rule Theory_Resolution = "Theory_Resolution"
+ | string_of_verit_rule Theory_Resolution2 = "Theory_Resolution2"
+ | string_of_verit_rule Forall_Inst = "forall_inst"
+ | string_of_verit_rule Or = "Or"
+ | string_of_verit_rule Not_Or = "Not_Or"
+ | string_of_verit_rule Resolution = "Resolution"
+ | string_of_verit_rule Eq_Congruent = "eq_congruent"
+ | string_of_verit_rule Trans = "trans"
+ | string_of_verit_rule False = "false"
+ | string_of_verit_rule And = "and"
+ | string_of_verit_rule And_Pos = "and_pos"
+ | string_of_verit_rule Not_And = "not_and"
+ | string_of_verit_rule And_Neg = "and_neg"
+ | string_of_verit_rule Or_Pos = "or_pos"
+ | string_of_verit_rule Or_Neg = "or_neg"
+ | string_of_verit_rule AC_Simp = "ac_simp"
+ | string_of_verit_rule Not_Equiv1 = "not_equiv1"
+ | string_of_verit_rule Not_Equiv2 = "not_equiv2"
+ | string_of_verit_rule Not_Implies1 = "not_implies1"
+ | string_of_verit_rule Not_Implies2 = "not_implies2"
+ | string_of_verit_rule Implies_Neg1 = "implies_neg1"
+ | string_of_verit_rule Implies_Neg2 = "implies_neg2"
+ | string_of_verit_rule Implies = "implies"
+ | string_of_verit_rule Bfun_Elim = "bfun_elim"
+ | string_of_verit_rule ITE1 = "ite1"
+ | string_of_verit_rule ITE2 = "ite2"
+ | string_of_verit_rule Not_ITE1 = "not_ite1"
+ | string_of_verit_rule Not_ITE2 = "not_ite2"
+ | string_of_verit_rule ITE_Pos1 = "ite_pos1"
+ | string_of_verit_rule ITE_Pos2 = "ite_pos2"
+ | string_of_verit_rule ITE_Neg1 = "ite_neg1"
+ | string_of_verit_rule ITE_Neg2 = "ite_neg2"
+ | string_of_verit_rule ITE_Intro = "ite_intro"
+ | string_of_verit_rule LA_Disequality = "la_disequality"
+ | string_of_verit_rule LA_Generic = "la_generic"
+ | string_of_verit_rule LIA_Generic = "lia_generic"
+ | string_of_verit_rule LA_Tautology = "la_tautology"
+ | string_of_verit_rule LA_RW_Eq = "la_rw_eq"
+ | string_of_verit_rule LA_Totality = "LA_Totality"
+ | string_of_verit_rule NLA_Generic = "nla_generic"
+ | string_of_verit_rule Eq_Transitive = "eq_transitive"
+ | string_of_verit_rule Qnt_Rm_Unused = "qnt_remove_unused"
+ | string_of_verit_rule Onepoint = "onepoint"
+ | string_of_verit_rule Qnt_Join = "qnt_join"
+ | string_of_verit_rule Eq_Congruent_Pred = "eq_congruent_pred"
+ | string_of_verit_rule Normalized_Input = Lethe_Proof.normalized_input_rule
+ | string_of_verit_rule Local_Input = Lethe_Proof.local_input_rule
+ | string_of_verit_rule Subproof = "subproof"
+ | string_of_verit_rule Bool_Simplify = "bool_simplify"
+ | string_of_verit_rule Equiv_Simplify = "equiv_simplify"
+ | string_of_verit_rule Eq_Simplify = "eq_simplify"
+ | string_of_verit_rule Not_Simplify = "not_simplify"
+ | string_of_verit_rule And_Simplify = "and_simplify"
+ | string_of_verit_rule Or_Simplify = "or_simplify"
+ | string_of_verit_rule ITE_Simplify = "ite_simplify"
+ | string_of_verit_rule Implies_Simplify = "implies_simplify"
+ | string_of_verit_rule Connective_Def = "connective_def"
+ | string_of_verit_rule Minus_Simplify = "minus_simplify"
+ | string_of_verit_rule Sum_Simplify = "sum_simplify"
+ | string_of_verit_rule Prod_Simplify = "prod_simplify"
+ | string_of_verit_rule All_Simplify = "all_simplify"
+ | string_of_verit_rule Comp_Simplify = "comp_simplify"
+ | string_of_verit_rule Qnt_Simplify = "qnt_simplify"
+ | string_of_verit_rule Symm = "symm"
+ | string_of_verit_rule Not_Symm = "not_symm"
+ | string_of_verit_rule Reordering = "reordering"
+ | string_of_verit_rule Not_Not = Lethe_Proof.not_not_rule
+ | string_of_verit_rule Tautological_Clause = "tautology"
+ | string_of_verit_rule Duplicate_Literals = Lethe_Proof.contract_rule
+ | string_of_verit_rule Qnt_CNF = "qnt_cnf"
+ | string_of_verit_rule r = "Unknown rule: " ^ \<^make_string> r
+
+fun replay_error ctxt msg rule thms t =
+ SMT_Replay_Methods.replay_error ctxt msg (string_of_verit_rule rule) thms t
+
+(* utility function *)
+
+fun eqsubst_all ctxt thms =
+ K ((REPEAT o HEADGOAL) (EqSubst.eqsubst_asm_tac ctxt [0] thms))
+ THEN' K ((REPEAT o HEADGOAL) (EqSubst.eqsubst_tac ctxt [0] thms))
+
+fun simplify_tac ctxt thms =
+ ctxt
+ |> empty_simpset
+ |> put_simpset HOL_basic_ss
+ |> (fn ctxt => ctxt addsimps @{thms simp_thms} addsimps thms)
+ |> Simplifier.asm_full_simp_tac
+
+(* sko_forall requires the assumptions to be able to prove the equivalence in case of double
+skolemization. See comment below. *)
+fun requires_subproof_assms _ t =
+ member (op =) ["refl", "sko_forall", "sko_ex", "cong"] t
+
+fun requires_local_input _ t =
+ member (op =) [Lethe_Proof.local_input_rule] t
+
+(*This is a weaker simplification form. It is weaker, but is also less likely to loop*)
+fun partial_simplify_tac ctxt thms =
+ ctxt
+ |> empty_simpset
+ |> put_simpset HOL_basic_ss
+ |> (fn ctxt => ctxt addsimps @{thms simp_thms} addsimps thms)
+ |> Simplifier.full_simp_tac
+
+val try_provers = SMT_Replay_Methods.try_provers "verit"
+
+fun TRY' tac = fn i => TRY (tac i)
+fun REPEAT' tac = fn i => REPEAT (tac i)
+fun REPEAT_CHANGED tac = fn i => REPEAT (CHANGED (tac i))
+
+
+(* Bind *)
+
+(*The bind rule is non-obvious due to the handling of quantifiers:
+ "\<And>x y a. x = y ==> (\<forall>b. P a b x) \<longleftrightarrow> (\<forall>b. P' a b y)"
+ ------------------------------------------------------
+ \<forall>a. (\<forall>b x. P a b x) \<longleftrightarrow> (\<forall>b y. P' a b y)
+is a valid application.*)
+
+val bind_thms =
+ [@{lemma \<open>(\<And>x x'. x = x' \<Longrightarrow> P x = Q x') \<Longrightarrow> (\<forall>x. P x) = (\<forall>y. Q y)\<close> by blast},
+ @{lemma \<open>(\<And>x x'. x = x' \<Longrightarrow> P x = Q x') \<Longrightarrow> (\<exists>x. P x) = (\<exists>y. Q y)\<close> by blast},
+ @{lemma \<open>(\<And>x x'. x = x' \<Longrightarrow> P x = Q x') \<Longrightarrow> (\<exists>x. P x = Q x)\<close> by blast},
+ @{lemma \<open>(\<And>x x'. x = x' \<Longrightarrow> P x = Q x') \<Longrightarrow> (\<forall>x. P x = Q x)\<close> by blast}]
+
+val bind_thms_same_name =
+ [@{lemma \<open>(\<And>x. P x = Q x) \<Longrightarrow> (\<forall>x. P x) = (\<forall>y. Q y)\<close> by blast},
+ @{lemma \<open>(\<And>x. P x = Q x) \<Longrightarrow> (\<exists>x. P x) = (\<exists>y. Q y)\<close> by blast},
+ @{lemma \<open>(\<And>x. P x = Q x) \<Longrightarrow> (\<exists>x. P x = Q x)\<close> by blast},
+ @{lemma \<open>(\<And>x. P x = Q x) \<Longrightarrow> (\<forall>x. P x = Q x)\<close> by blast}]
+
+fun extract_quantified_names_q (_ $ Abs (name, _, t)) =
+ apfst (curry (op ::) name) (extract_quantified_names_q t)
+ | extract_quantified_names_q t = ([], t)
+
+fun extract_forall_quantified_names_q (Const(\<^const_name>\<open>HOL.All\<close>, _) $ Abs (name, ty, t)) =
+ (name, ty) :: (extract_forall_quantified_names_q t)
+ | extract_forall_quantified_names_q _ = []
+
+fun extract_all_forall_quantified_names_q (Const(\<^const_name>\<open>HOL.All\<close>, _) $ Abs (name, _, t)) =
+ name :: (extract_all_forall_quantified_names_q t)
+ | extract_all_forall_quantified_names_q (t $ u) =
+ extract_all_forall_quantified_names_q t @ extract_all_forall_quantified_names_q u
+ | extract_all_forall_quantified_names_q _ = []
+
+val extract_quantified_names_ba =
+ SMT_Replay_Methods.dest_prop
+ #> extract_quantified_names_q
+ ##> HOLogic.dest_eq
+ ##> fst
+ ##> extract_quantified_names_q
+ ##> fst
+
+val extract_quantified_names =
+ extract_quantified_names_ba
+ #> (op @)
+
+val extract_all_forall_quantified_names =
+ SMT_Replay_Methods.dest_prop
+ #> HOLogic.dest_eq
+ #> fst
+ #> extract_all_forall_quantified_names_q
+
+
+fun extract_all_exists_quantified_names_q (Const(\<^const_name>\<open>HOL.Ex\<close>, _) $ Abs (name, _, t)) =
+ name :: (extract_all_exists_quantified_names_q t)
+ | extract_all_exists_quantified_names_q (t $ u) =
+ extract_all_exists_quantified_names_q t @ extract_all_exists_quantified_names_q u
+ | extract_all_exists_quantified_names_q _ = []
+
+val extract_all_exists_quantified_names =
+ SMT_Replay_Methods.dest_prop
+ #> HOLogic.dest_eq
+ #> fst
+ #> extract_all_exists_quantified_names_q
+
+
+val extract_bind_names =
+ HOLogic.dest_eq
+ #> apply2 (fn (Free (name, _)) => name)
+
+fun combine_quant ctxt ((n1, n2) :: bounds) (n1' :: formula) =
+ TRY' (if n1 = n1'
+ then if n1 <> n2
+ then
+ resolve_tac ctxt bind_thms
+ THEN' TRY'(resolve_tac ctxt [@{thm refl}])
+ THEN' combine_quant ctxt bounds formula
+ else resolve_tac ctxt bind_thms_same_name THEN' combine_quant ctxt bounds formula
+ else resolve_tac ctxt @{thms allI} THEN' combine_quant ctxt ((n1, n2) :: bounds) formula)
+ | combine_quant _ _ _ = K all_tac
+
+fun bind_quants ctxt args t =
+ combine_quant ctxt (map extract_bind_names args) (extract_quantified_names t)
+
+fun generalize_prems_q [] prems = prems
+ | generalize_prems_q (_ :: quants) prems =
+ generalize_prems_q quants (@{thm spec} OF [prems])
+
+fun generalize_prems t = generalize_prems_q (fst (extract_quantified_names_ba t))
+
+fun bind ctxt [prems] args t =
+ SMT_Replay_Methods.prove ctxt t (fn _ =>
+ bind_quants ctxt args t
+ THEN' TRY' (SOLVED' (resolve_tac ctxt [generalize_prems t prems]
+ THEN_ALL_NEW TRY' (assume_tac ctxt ORELSE' resolve_tac ctxt @{thms refl}))))
+ | bind ctxt thms _ t = replay_error ctxt "invalid bind application" Bind thms t
+
+
+(* Congruence/Refl *)
+
+fun cong ctxt thms = try_provers ctxt
+ (string_of_verit_rule Cong) [
+ ("basic", SMT_Replay_Methods.cong_basic ctxt thms),
+ ("unfolding then reflexivity", SMT_Replay_Methods.cong_unfolding_trivial ctxt thms),
+ ("unfolding then auto", SMT_Replay_Methods.cong_unfolding_first ctxt thms),
+ ("full", SMT_Replay_Methods.cong_full ctxt thms)] thms
+
+fun refl ctxt thm t =
+ (case find_first (fn thm => t = Thm.full_prop_of thm) thm of
+ SOME thm => thm
+ | NONE =>
+ (case try (fn t => SMT_Replay_Methods.match_instantiate ctxt t @{thm refl}) t of
+ NONE => cong ctxt thm t
+ | SOME thm => thm))
+
+(* Instantiation *)
+
+local
+fun dropWhile _ [] = []
+ | dropWhile f (x :: xs) = if f x then dropWhile f xs else x :: xs
+in
+
+fun forall_inst ctxt _ _ insts t =
+ let
+ fun instantiate_and_solve i ({context = ctxt, prems = [prem], ...}: Subgoal.focus) =
+ let
+ val t = Thm.prop_of prem
+ val quants = t
+ |> SMT_Replay_Methods.dest_prop
+ |> extract_forall_quantified_names_q
+ val insts = map (Symtab.lookup insts o fst) (rev quants)
+ |> dropWhile (curry (op =) NONE)
+ |> rev
+ fun option_map _ NONE = NONE
+ | option_map f (SOME a) = SOME (f a)
+ fun instantiate_with inst prem =
+ Drule.infer_instantiate' ctxt [NONE, inst] @{thm spec} OF [prem]
+ val inst_thm =
+ fold instantiate_with
+ (map (option_map (Thm.cterm_of ctxt)) insts)
+ prem
+ in
+ (Method.insert_tac ctxt [inst_thm]
+ THEN' TRY' (fn i => assume_tac ctxt i)
+ THEN' TRY' (partial_simplify_tac ctxt @{thms eq_commute})) i
+ end
+ | instantiate_and_solve _ ({context = ctxt, prems = thms, ...}: Subgoal.focus) =
+ replay_error ctxt "invalid application" Forall_Inst thms t
+ in
+ SMT_Replay_Methods.prove ctxt t (fn _ =>
+ match_tac ctxt [@{thm disj_not1} RSN (1, @{thm iffD2}) OF [@{thm impI}]]
+ THEN' (fn i => Subgoal.FOCUS (instantiate_and_solve i) ctxt i))
+ end
+end
+
+
+(* Or *)
+
+fun or _ (thm :: _) _ = thm
+ | or ctxt thms t = replay_error ctxt "invalid bind application" Or thms t
+
+
+(* Implication *)
+
+val implies_pos_thm =
+ [@{lemma \<open>\<not>(A \<longrightarrow> B) \<or> \<not>A \<or> B\<close> by blast},
+ @{lemma \<open>\<not>(\<not>A \<longrightarrow> B) \<or> A \<or> B\<close> by blast}]
+
+fun implies_pos ctxt _ t = SMT_Replay_Methods.prove ctxt t (fn _ =>
+ resolve_tac ctxt implies_pos_thm)
+
+(* Skolemization *)
+
+fun extract_rewrite_rule_assumption _ thms =
+ let
+ fun is_rewrite_rule thm =
+ (case Thm.prop_of thm of
+ \<^term>\<open>Trueprop\<close> $ (Const(\<^const_name>\<open>HOL.eq\<close>, _) $ _ $ Free(_, _)) => true
+ | _ => false)
+ fun is_context_rule thm =
+ (case Thm.prop_of thm of
+ \<^term>\<open>Trueprop\<close> $ (Const(\<^const_name>\<open>HOL.eq\<close>, _) $ Free(_, _) $ Free(_, _)) => true
+ | _ => false)
+ val ctxt_eq =
+ thms
+ |> filter is_context_rule
+ val rew =
+ thms
+ |> filter_out is_context_rule
+ |> filter is_rewrite_rule
+ in
+ (ctxt_eq, rew)
+ end
+
+local
+ fun rewrite_all_skolems thm_indirect ctxt (SOME thm :: thms) =
+ EqSubst.eqsubst_tac ctxt [0] [thm_indirect OF [thm]]
+ THEN' (partial_simplify_tac ctxt (@{thms eq_commute}))
+ THEN' rewrite_all_skolems thm_indirect ctxt thms
+ | rewrite_all_skolems thm_indirect ctxt (NONE :: thms) = rewrite_all_skolems thm_indirect ctxt thms
+ | rewrite_all_skolems _ _ [] = K (all_tac)
+
+ fun extract_var_name (thm :: thms) =
+ let val name = Thm.concl_of thm
+ |> SMT_Replay_Methods.dest_prop
+ |> HOLogic.dest_eq
+ |> fst
+ |> (fn Const(_,_) $ Abs(name, _, _) => [(name,@{thm sym} OF [thm])]
+ | _ => [])
+ in name :: extract_var_name thms end
+ | extract_var_name [] = []
+
+fun skolem_tac extractor thm1 thm2 ctxt thms t =
+ let
+ val (ctxt_eq, ts) = extract_rewrite_rule_assumption ctxt thms
+ fun ordered_definitions () =
+ let
+ val var_order = extractor t
+ val thm_names_with_var = extract_var_name ts |> flat
+ in map (AList.lookup (op =) thm_names_with_var) var_order end
+
+ in
+ SMT_Replay_Methods.prove ctxt t (fn _ =>
+ K (unfold_tac ctxt ctxt_eq)
+ THEN' ((SOLVED' (K (unfold_tac ctxt (map (fn thm => thm1 OF [@{thm sym} OF [thm]]) ts))))
+ ORELSE'
+ (rewrite_all_skolems thm2 ctxt (ordered_definitions ())
+ THEN' partial_simplify_tac ctxt @{thms eq_commute})))
+ end
+in
+
+val skolem_forall =
+ skolem_tac extract_all_forall_quantified_names @{thm verit_sko_forall_indirect}
+ @{thm verit_sko_forall_indirect2}
+
+val skolem_ex =
+ skolem_tac extract_all_exists_quantified_names @{thm verit_sko_ex_indirect}
+ @{thm verit_sko_ex_indirect2}
+
+end
+
+fun eq_reflexive ctxt _ t = SMT_Replay_Methods.match_instantiate ctxt t @{thm refl}
+
+local
+ fun not_not_prove ctxt _ =
+ K (unfold_tac ctxt @{thms not_not})
+ THEN' match_tac ctxt @{thms verit_or_simplify_1}
+
+ fun duplicate_literals_prove ctxt prems =
+ Method.insert_tac ctxt prems
+ THEN' match_tac ctxt @{thms ccontr}
+ THEN' K (unfold_tac ctxt @{thms de_Morgan_disj not_not})
+ THEN' TRY o CHANGED_PROP o REPEAT_ALL_NEW (ematch_tac ctxt @{thms conjE})
+ THEN' CHANGED_PROP o REPEAT_ALL_NEW (ematch_tac ctxt @{thms disjE})
+ THEN' REPEAT' (ematch_tac ctxt @{thms notE} THEN' TRY' (assume_tac ctxt))
+
+ fun tautological_clause_prove ctxt _ =
+ match_tac ctxt @{thms verit_or_neg}
+ THEN' K (unfold_tac ctxt @{thms not_not disj_assoc[symmetric]})
+ THEN' TRY' (match_tac ctxt @{thms notE} THEN_ALL_NEW assume_tac ctxt)
+
+ val theory_resolution2_lemma = @{lemma \<open>a \<Longrightarrow> a = b \<Longrightarrow> b\<close> by blast}
+
+in
+
+fun prove_abstract abstracter tac ctxt thms t =
+ let
+ val thms = map (Conv.fconv_rule Thm.eta_long_conversion) thms
+ val t' = Thm.eta_long_conversion (Object_Logic.dest_judgment ctxt (Thm.cterm_of ctxt t))
+ val (_, t2) = Logic.dest_equals (Thm.prop_of t')
+ val thm =
+ SMT_Replay_Methods.prove_abstract ctxt thms t2 tac (
+ fold_map (abstracter o SMT_Replay_Methods.dest_thm) thms ##>>
+ abstracter (SMT_Replay_Methods.dest_prop t2))
+ in
+ @{thm verit_Pure_trans} OF [t', thm]
+ end
+
+val not_not = prove_abstract SMT_Replay_Methods.abstract_bool_shallow not_not_prove
+
+val tautological_clause =
+ prove_abstract SMT_Replay_Methods.abstract_bool_shallow tautological_clause_prove
+
+val duplicate_literals =
+ prove_abstract SMT_Replay_Methods.abstract_bool_shallow duplicate_literals_prove
+
+val unit_res = prove_abstract SMT_Replay_Methods.abstract_bool_shallow SMT_Replay_Methods.prop_tac
+
+(*match_instantiate does not work*)
+fun theory_resolution2 ctxt prems t =
+ SMT_Replay_Methods.prove ctxt t (fn _ => match_tac ctxt [theory_resolution2_lemma OF prems])
+
+end
+
+
+fun normalized_input ctxt prems t = SMT_Replay_Methods.prove ctxt t (fn _ =>
+ Method.insert_tac ctxt prems
+ THEN' TRY' (K (unfold_tac ctxt @{thms SMT.trigger_def}))
+ THEN' TRY' (partial_simplify_tac ctxt @{thms eq_commute}))
+
+val false_rule_thm = @{lemma \<open>\<not>False\<close> by blast}
+
+fun false_rule ctxt _ t = SMT_Replay_Methods.match_instantiate ctxt t false_rule_thm
+
+
+(* Transitivity *)
+
+val trans_bool_thm =
+ @{lemma \<open>P = Q \<Longrightarrow> Q \<Longrightarrow> P\<close> by blast}
+
+fun trans ctxt thms t =
+ let
+ val prop_of = HOLogic.dest_Trueprop o Thm.full_prop_of
+ fun combine_thms [thm1, thm2] =
+ (case (prop_of thm1, prop_of thm2) of
+ ((Const(\<^const_name>\<open>HOL.eq\<close>, _) $ t1 $ t2),
+ (Const(\<^const_name>\<open>HOL.eq\<close>, _) $ t3 $ t4)) =>
+ if t2 aconv t3 then thm1 RSN (1, thm2 RSN (2, @{thm trans}))
+ else if t2 aconv t4 then thm1 RSN ((1, ((thm2 RS sym)) RSN (2, @{thm trans})))
+ else if t1 aconv t4 then thm2 RSN (1, thm1 RSN (2, @{thm trans}))
+ else raise Fail "invalid trans theorem"
+ | _ => trans_bool_thm OF [thm1, thm2])
+ | combine_thms (thm1 :: thm2 :: thms) =
+ combine_thms (combine_thms [thm1, thm2] :: thms)
+ | combine_thms thms = replay_error ctxt "invalid bind application" Trans thms t
+ val t' = Thm.eta_long_conversion (Object_Logic.dest_judgment ctxt (Thm.cterm_of ctxt t))
+ val (_, t2) = Logic.dest_equals (Thm.prop_of t')
+ val thms = map (Conv.fconv_rule Thm.eta_long_conversion) thms
+ val trans_thm = combine_thms thms
+ in
+ (case (prop_of trans_thm, t2) of
+ ((Const(\<^const_name>\<open>HOL.eq\<close>, _) $ t1 $ _),
+ (Const(\<^const_name>\<open>HOL.eq\<close>, _) $ t3 $ _)) =>
+ if t1 aconv t3 then trans_thm else trans_thm RS sym
+ | _ => trans_thm (*to be on the safe side*))
+ end
+
+
+fun tmp_AC_rule ctxt thms t =
+ SMT_Replay_Methods.prove ctxt t (fn _ =>
+ Method.insert_tac ctxt thms
+ THEN' REPEAT_ALL_NEW (partial_simplify_tac ctxt @{thms eq_commute}
+ THEN' TRY' (simplify_tac ctxt @{thms ac_simps conj_ac})
+ THEN' TRY' (Classical.fast_tac ctxt)))
+
+
+(* And/Or *)
+
+local
+ fun not_and_rule_prove ctxt prems =
+ Method.insert_tac ctxt prems
+ THEN' K (unfold_tac ctxt @{thms de_Morgan_conj})
+ THEN_ALL_NEW TRY' (assume_tac ctxt)
+
+ fun or_pos_prove ctxt _ =
+ K (unfold_tac ctxt @{thms de_Morgan_disj not_not})
+ THEN' match_tac ctxt @{thms verit_and_pos}
+ THEN' K (unfold_tac ctxt @{thms de_Morgan_conj de_Morgan_disj not_not})
+ THEN' TRY' (assume_tac ctxt)
+
+ fun not_or_rule_prove ctxt prems =
+ Method.insert_tac ctxt prems
+ THEN' K (unfold_tac ctxt @{thms de_Morgan_disj not_not})
+ THEN' TRY' (REPEAT' (match_tac ctxt @{thms conjI}))
+ THEN_ALL_NEW ((REPEAT' (ematch_tac ctxt @{thms conjE}))
+ THEN' TRY' (assume_tac ctxt))
+
+ fun and_rule_prove ctxt prems =
+ Method.insert_tac ctxt prems
+ THEN' (fn i => REPEAT (dresolve_tac ctxt @{thms conjE} i THEN assume_tac ctxt (i+1)))
+ THEN' TRY' (assume_tac ctxt)
+
+ fun and_neg_rule_prove ctxt _ =
+ match_tac ctxt @{thms verit_and_neg}
+ THEN' K (unfold_tac ctxt @{thms de_Morgan_conj not_not})
+ THEN' TRY' (assume_tac ctxt)
+
+ fun prover tac = prove_abstract SMT_Replay_Methods.abstract_prop tac
+
+in
+
+val and_rule = prover and_rule_prove
+
+val not_and_rule = prover not_and_rule_prove
+
+val not_or_rule = prover not_or_rule_prove
+
+val or_pos_rule = prover or_pos_prove
+
+val and_neg_rule = prover and_neg_rule_prove
+
+val or_neg_rule = prove_abstract SMT_Replay_Methods.abstract_bool (fn ctxt => fn _ =>
+ resolve_tac ctxt @{thms verit_or_neg}
+ THEN' K (unfold_tac ctxt @{thms not_not})
+ THEN_ALL_NEW
+ (REPEAT'
+ (SOLVED' (match_tac ctxt @{thms disjI1} THEN_ALL_NEW assume_tac ctxt)
+ ORELSE' (match_tac ctxt @{thms disjI2} THEN' TRY' (assume_tac ctxt)))))
+
+fun and_pos ctxt _ t =
+ SMT_Replay_Methods.prove ctxt t (fn _ =>
+ REPEAT_CHANGED (resolve_tac ctxt @{thms verit_and_pos})
+ THEN' TRY' (assume_tac ctxt))
+
+end
+
+
+(* Equivalence Transformation *)
+
+local
+ fun prove_equiv equiv_thm ctxt [thm] t = SMT_Replay_Methods.prove ctxt t (fn _ =>
+ Method.insert_tac ctxt [equiv_thm OF [thm]]
+ THEN' TRY' (assume_tac ctxt))
+ | prove_equiv _ ctxt thms t = replay_error ctxt "invalid application in some equiv rule" Unsupported thms t
+in
+
+val not_equiv1 = prove_equiv @{lemma \<open>\<not>(A \<longleftrightarrow> B) \<Longrightarrow> A \<or> B\<close> by blast}
+val not_equiv2 = prove_equiv @{lemma \<open>\<not>(A \<longleftrightarrow> B) \<Longrightarrow> \<not>A \<or> \<not>B\<close> by blast}
+val equiv1 = prove_equiv @{lemma \<open>(A \<longleftrightarrow> B) \<Longrightarrow> \<not>A \<or> B\<close> by blast}
+val equiv2 = prove_equiv @{lemma \<open>(A \<longleftrightarrow> B) \<Longrightarrow> A \<or> \<not>B\<close> by blast}
+val not_implies1 = prove_equiv @{lemma \<open>\<not>(A \<longrightarrow> B) \<Longrightarrow> A\<close> by blast}
+val not_implies2 = prove_equiv @{lemma \<open>\<not>(A \<longrightarrow>B) \<Longrightarrow> \<not>B\<close> by blast}
+
+end
+
+
+(* Equivalence Lemma *)
+(*equiv_pos2 is very important for performance. We have tried various tactics, including
+a specialisation of SMT_Replay_Methods.match_instantiate, but there never was a measurable
+and consistent gain.*)
+local
+ fun prove_equiv_pos_neg2 thm ctxt _ t =
+ SMT_Replay_Methods.match_instantiate ctxt t thm
+in
+
+val equiv_pos1_thm = @{lemma \<open>\<not>(a \<longleftrightarrow> b) \<or> a \<or> \<not>b\<close> by blast+}
+val equiv_pos1 = prove_equiv_pos_neg2 equiv_pos1_thm
+
+val equiv_pos2_thm = @{lemma \<open>\<And>a b. (a \<noteq> b) \<or> \<not>a \<or> b\<close> by blast+}
+val equiv_pos2 = prove_equiv_pos_neg2 equiv_pos2_thm
+
+val equiv_neg1_thm = @{lemma \<open>(a \<longleftrightarrow> b) \<or> \<not>a \<or> \<not>b\<close> by blast+}
+val equiv_neg1 = prove_equiv_pos_neg2 equiv_neg1_thm
+
+val equiv_neg2_thm = @{lemma \<open>(a \<longleftrightarrow> b) \<or> a \<or> b\<close> by blast}
+val equiv_neg2 = prove_equiv_pos_neg2 equiv_neg2_thm
+
+end
+
+
+local
+ fun implies_pos_neg_term ctxt thm (\<^term>\<open>Trueprop\<close> $
+ (\<^term>\<open>HOL.disj\<close> $ (\<^term>\<open>HOL.implies\<close> $ a $ b) $ _)) =
+ Drule.infer_instantiate' ctxt (map (SOME o Thm.cterm_of ctxt) [a, b]) thm
+ | implies_pos_neg_term ctxt thm t =
+ replay_error ctxt "invalid application in Implies" Unsupported [thm] t
+
+ fun prove_implies_pos_neg thm ctxt _ t =
+ let val thm = implies_pos_neg_term ctxt thm t
+ in
+ SMT_Replay_Methods.prove ctxt t (fn _ =>
+ Method.insert_tac ctxt [thm]
+ THEN' TRY' (assume_tac ctxt))
+ end
+in
+
+val implies_neg1_thm = @{lemma \<open>(a \<longrightarrow> b) \<or> a\<close> by blast}
+val implies_neg1 = prove_implies_pos_neg implies_neg1_thm
+
+val implies_neg2_thm = @{lemma \<open>(a \<longrightarrow> b) \<or> \<not>b\<close> by blast}
+val implies_neg2 = prove_implies_pos_neg implies_neg2_thm
+
+val implies_thm = @{lemma \<open>(a \<longrightarrow> b) \<Longrightarrow> \<not>a \<or> b\<close> by blast}
+fun implies_rules ctxt prems t = SMT_Replay_Methods.prove ctxt t (fn _ =>
+ Method.insert_tac ctxt [implies_thm OF prems]
+ THEN' TRY' (assume_tac ctxt))
+
+end
+
+
+(* BFun *)
+
+local
+ val bfun_theorems = @{thms verit_bfun_elim}
+in
+
+fun bfun_elim ctxt prems t = SMT_Replay_Methods.prove ctxt t (fn _ =>
+ Method.insert_tac ctxt prems
+ THEN' TRY' (eqsubst_all ctxt bfun_theorems)
+ THEN' TRY' (simplify_tac ctxt @{thms eq_commute all_conj_distrib ex_disj_distrib}))
+
+end
+
+
+(* If-Then-Else *)
+
+local
+ fun prove_ite ite_thm thm ctxt =
+ resolve_tac ctxt [ite_thm OF [thm]]
+ THEN' TRY' (assume_tac ctxt)
+in
+
+val ite_pos1_thm =
+ @{lemma \<open>\<not>(if x then P else Q) \<or> x \<or> Q\<close> by auto}
+
+fun ite_pos1 ctxt _ t = SMT_Replay_Methods.prove ctxt t (fn _ =>
+ resolve_tac ctxt [ite_pos1_thm])
+
+val ite_pos2_thms =
+ @{lemma \<open>\<not>(if x then P else Q) \<or> \<not>x \<or> P\<close> \<open>\<not>(if \<not>x then P else Q) \<or> x \<or> P\<close> by auto}
+
+fun ite_pos2 ctxt _ t = SMT_Replay_Methods.prove ctxt t (fn _ => resolve_tac ctxt ite_pos2_thms)
+
+val ite_neg1_thms =
+ @{lemma \<open>(if x then P else Q) \<or> x \<or> \<not>Q\<close> \<open>(if x then P else \<not>Q) \<or> x \<or> Q\<close> by auto}
+
+fun ite_neg1 ctxt _ t = SMT_Replay_Methods.prove ctxt t (fn _ => resolve_tac ctxt ite_neg1_thms)
+
+val ite_neg2_thms =
+ @{lemma \<open>(if x then P else Q) \<or> \<not>x \<or> \<not>P\<close> \<open>(if \<not>x then P else Q) \<or> x \<or> \<not>P\<close>
+ \<open>(if x then \<not>P else Q) \<or> \<not>x \<or> P\<close> \<open>(if \<not>x then \<not>P else Q) \<or> x \<or> P\<close>
+ by auto}
+
+fun ite_neg2 ctxt _ t = SMT_Replay_Methods.prove ctxt t (fn _ => resolve_tac ctxt ite_neg2_thms)
+
+val ite1_thm =
+ @{lemma \<open>(if x then P else Q) \<Longrightarrow> x \<or> Q\<close> by (auto split: if_splits) }
+
+fun ite1 ctxt [thm] t = SMT_Replay_Methods.prove ctxt t (fn _ => prove_ite ite1_thm thm ctxt)
+
+val ite2_thm =
+ @{lemma \<open>(if x then P else Q) \<Longrightarrow> \<not>x \<or> P\<close> by (auto split: if_splits) }
+
+fun ite2 ctxt [thm] t = SMT_Replay_Methods.prove ctxt t (fn _ => prove_ite ite2_thm thm ctxt)
+
+val not_ite1_thm =
+ @{lemma \<open>\<not>(if x then P else Q) \<Longrightarrow> x \<or> \<not>Q\<close> by (auto split: if_splits) }
+
+fun not_ite1 ctxt [thm] t = SMT_Replay_Methods.prove ctxt t (fn _ => prove_ite not_ite1_thm thm ctxt)
+
+val not_ite2_thm =
+ @{lemma \<open>\<not>(if x then P else Q) \<Longrightarrow> \<not>x \<or> \<not>P\<close> by (auto split: if_splits) }
+
+fun not_ite2 ctxt [thm] t = SMT_Replay_Methods.prove ctxt t (fn _ => prove_ite not_ite2_thm thm ctxt)
+
+(*ite_intro can introduce new terms that are in the proof exactly the sames as the old one, but are
+not internally, hence the possible reordering.*)
+fun ite_intro ctxt _ t =
+ let
+ fun simplify_ite ctxt thms =
+ ctxt
+ |> empty_simpset
+ |> put_simpset HOL_basic_ss
+ |> (fn ctxt => ctxt addsimps thms @ @{thms if_True if_False refl simp_thms if_cancel})
+ |> Raw_Simplifier.add_eqcong @{thm verit_ite_if_cong}
+ |> Simplifier.full_simp_tac
+ in
+ SMT_Replay_Methods.prove ctxt t (fn _ => simplify_ite ctxt []
+ THEN' TRY' (simplify_ite ctxt @{thms eq_commute}))
+ end
+end
+
+
+(* Quantifiers *)
+
+local
+ val rm_unused = @{lemma \<open>(\<forall>x. P) = P\<close> \<open>(\<exists>x. P) = P\<close> by blast+}
+
+ fun qnt_cnf_tac ctxt =
+ simplify_tac ctxt @{thms de_Morgan_conj de_Morgan_disj imp_conv_disj
+ iff_conv_conj_imp if_bool_eq_disj ex_simps all_simps ex_disj_distrib
+ verit_connective_def(3) all_conj_distrib}
+ THEN' TRY' (Blast.blast_tac ctxt)
+in
+fun qnt_rm_unused ctxt _ t = SMT_Replay_Methods.prove ctxt t (fn _ =>
+ K (unfold_tac ctxt rm_unused))
+
+fun onepoint ctxt prems t = SMT_Replay_Methods.prove ctxt t (fn _ =>
+ Method.insert_tac ctxt prems
+ THEN' Simplifier.full_simp_tac (put_simpset HOL_basic_ss (empty_simpset ctxt)
+ addsimps @{thms HOL.simp_thms HOL.all_simps}
+ addsimprocs [@{simproc HOL.defined_All}, @{simproc HOL.defined_Ex}])
+ THEN' TRY' (Blast.blast_tac ctxt)
+ THEN' TRY' (Metis_Tactic.metis_tac [] ATP_Problem_Generate.combsN ctxt []))
+
+fun qnt_join ctxt _ t = SMT_Replay_Methods.prove ctxt t Classical.fast_tac
+
+fun qnt_cnf ctxt _ t = SMT_Replay_Methods.prove ctxt t qnt_cnf_tac
+
+fun qnt_simplify ctxt _ t = SMT_Replay_Methods.prove ctxt t (fn _ =>
+ partial_simplify_tac ctxt [])
+
+end
+
+(* Equality *)
+
+fun eq_transitive ctxt _ t = SMT_Replay_Methods.prove ctxt t (fn _ =>
+ REPEAT_CHANGED (resolve_tac ctxt [@{thm disj_not1} RSN (1, @{thm iffD2}) OF @{thms impI}])
+ THEN' REPEAT' (resolve_tac ctxt @{thms impI})
+ THEN' REPEAT' (eresolve_tac ctxt @{thms conjI})
+ THEN' REPEAT' (fn i => dresolve_tac ctxt @{thms verit_eq_transitive} i THEN assume_tac ctxt (i+1))
+ THEN' resolve_tac ctxt @{thms refl})
+
+local
+ fun find_rew rews t t' =
+ (case AList.lookup (op =) rews (t, t') of
+ SOME thm => SOME (thm COMP @{thm symmetric})
+ | NONE =>
+ (case AList.lookup (op =) rews (t', t) of
+ SOME thm => SOME thm
+ | NONE => NONE))
+
+ fun eq_pred_conv rews t ctxt ctrm =
+ (case find_rew rews t (Thm.term_of ctrm) of
+ SOME thm => Conv.rewr_conv thm ctrm
+ | NONE =>
+ (case t of
+ f $ arg =>
+ (Conv.fun_conv (eq_pred_conv rews f ctxt) then_conv
+ Conv.arg_conv (eq_pred_conv rews arg ctxt)) ctrm
+ | Abs (_, _, f) => Conv.abs_conv (eq_pred_conv rews f o snd) ctxt ctrm
+ | _ => Conv.all_conv ctrm))
+
+ fun eq_pred_rewrite_tac ({context = ctxt, prems, concl, ...}: Subgoal.focus) =
+ let
+ val rews = prems
+ |> map_filter (try (apfst (HOLogic.dest_eq o Thm.term_of o Object_Logic.dest_judgment ctxt o
+ Thm.cconcl_of) o `(fn x => x)))
+ |> map (apsnd (fn x => @{thm eq_reflection} OF [x]))
+ fun conv_left conv = Conv.arg_conv (Conv.arg_conv conv)
+ fun conv_left_neg conv = Conv.arg_conv (Conv.arg_conv (Conv.arg_conv conv))
+ val (t1, conv_term) =
+ (case Thm.term_of (Object_Logic.dest_judgment ctxt concl) of
+ Const(_, _) $ (Const(\<^const_name>\<open>HOL.Not\<close>, _) $ t1) $ _ => (t1, conv_left)
+ | Const(_, _) $ t1 $ (Const(\<^const_name>\<open>HOL.Not\<close>, _) $ _) => (t1, conv_left_neg)
+ | Const(_, _) $ t1 $ _ => (t1, conv_left)
+ | t1 => (t1, conv_left))
+ fun throwing_CONVERSION cv i st = Seq.single (Conv.gconv_rule cv i st)
+ in
+ throwing_CONVERSION (conv_term (eq_pred_conv rews t1 ctxt))
+ end
+in
+
+fun eq_congruent_pred ctxt _ t =
+ SMT_Replay_Methods.prove ctxt t (fn _ =>
+ REPEAT' (resolve_tac ctxt [@{thm disj_not1[of \<open>_ = _\<close>]} RSN (1, @{thm iffD2}) OF @{thms impI}])
+ THEN' (fn i => Subgoal.FOCUS (fn focus => eq_pred_rewrite_tac focus i) ctxt i)
+ THEN' (resolve_tac ctxt @{thms refl excluded_middle excluded_middle[of \<open>\<not>_\<close>, unfolded not_not]}
+ ORELSE' assume_tac ctxt))
+
+val eq_congruent = eq_congruent_pred
+
+end
+
+
+(* Subproof *)
+
+fun subproof ctxt [prem] t =
+ SMT_Replay_Methods.prove ctxt t (fn _ =>
+ (REPEAT' (resolve_tac ctxt [@{thm disj_not1} RSN (1, @{thm iffD2}) OF [@{thm impI}],
+ @{thm disj_not1[of \<open>\<not>_\<close>, unfolded not_not]} RSN (1, @{thm iffD2}) OF [@{thm impI}]])
+ THEN' resolve_tac ctxt [prem]
+ THEN_ALL_NEW assume_tac ctxt
+ THEN' TRY' (assume_tac ctxt))
+ ORELSE' TRY' (Method.insert_tac ctxt [prem] THEN' Blast.blast_tac ctxt))
+ | subproof ctxt prems t =
+ replay_error ctxt "invalid application, only one assumption expected" Subproof prems t
+
+
+(* Simplifying Steps *)
+
+(* The reconstruction is a bit tricky: At first we only rewrite only based on the rules that are
+passed as argument. Then we simply use simp_thms to reconstruct all the proofs (and these theorems
+cover all the simplification below).
+*)
+local
+ fun rewrite_only_thms ctxt thms =
+ ctxt
+ |> empty_simpset
+ |> put_simpset HOL_basic_ss
+ |> (fn ctxt => ctxt addsimps thms)
+ |> Simplifier.full_simp_tac
+ fun rewrite_only_thms_tmp ctxt thms =
+ rewrite_only_thms ctxt thms
+ THEN' TRY' (K (Clasimp.auto_tac ctxt)) (*TODO: TEMP until cvc5 produces fine grained simplification steps *)
+
+ fun rewrite_thms ctxt thms =
+ ctxt
+ |> empty_simpset
+ |> put_simpset HOL_basic_ss
+ |> Raw_Simplifier.add_eqcong @{thm eq_reflection[OF imp_cong]}
+ |> (fn ctxt => ctxt addsimps thms addsimps @{thms simp_thms})
+ |> Simplifier.full_simp_tac
+
+ fun chain_rewrite_thms ctxt thms =
+ TRY' (rewrite_only_thms ctxt thms)
+ THEN' TRY' (rewrite_thms ctxt thms)
+ THEN' TRY' (K (Clasimp.auto_tac ctxt)) (*TODO: TEMP until cvc5 produces fine grained simplification steps *)
+
+ fun chain_rewrite_two_step_with_ac_simps ctxt thms1 thms2 =
+ TRY' (rewrite_only_thms ctxt thms1)
+ THEN' TRY' (rewrite_thms ctxt thms2)
+ THEN' TRY' (K (Clasimp.auto_tac ctxt)) (*TODO: TEMP until cvc5 produces fine grained simplification steps *)
+
+ fun chain_rewrite_thms_two_step ctxt thms1 thms2 thms3 thms4 =
+ chain_rewrite_two_step_with_ac_simps ctxt thms1 thms2
+ THEN' TRY' (chain_rewrite_two_step_with_ac_simps ctxt thms3 thms4)
+
+ val imp_refl = @{lemma \<open>(P \<longrightarrow> P) = True\<close> by blast}
+
+in
+fun rewrite_bool_simplify ctxt _ t =
+ SMT_Replay_Methods.prove ctxt t (fn _ =>
+ (chain_rewrite_thms ctxt @{thms verit_bool_simplify}))
+
+fun rewrite_and_simplify ctxt _ t =
+ SMT_Replay_Methods.prove ctxt t (fn _ =>
+ (chain_rewrite_two_step_with_ac_simps ctxt @{thms verit_and_simplify verit_and_simplify1}
+ @{thms verit_and_simplify}))
+
+fun rewrite_or_simplify ctxt _ t =
+ SMT_Replay_Methods.prove ctxt t (fn _ =>
+ (chain_rewrite_two_step_with_ac_simps ctxt @{thms verit_or_simplify verit_or_simplify_1}
+ @{thms verit_or_simplify})
+ THEN' TRY' (REPEAT' (match_tac ctxt @{thms verit_and_pos}) THEN' assume_tac ctxt))
+
+fun rewrite_not_simplify ctxt _ t =
+ SMT_Replay_Methods.prove ctxt t (fn _ =>
+ (rewrite_only_thms_tmp ctxt @{thms verit_not_simplify}))
+
+fun rewrite_equiv_simplify ctxt _ t =
+ SMT_Replay_Methods.prove ctxt t (fn _ =>
+ (rewrite_only_thms_tmp ctxt @{thms verit_equiv_simplify}))
+
+fun rewrite_eq_simplify ctxt _ t =
+ SMT_Replay_Methods.prove ctxt t (fn _ =>
+ (chain_rewrite_two_step_with_ac_simps ctxt
+ @{thms verit_eq_simplify}
+ (Named_Theorems.get ctxt @{named_theorems ac_simps})))
+
+fun rewrite_implies_simplify ctxt _ t =
+ SMT_Replay_Methods.prove ctxt t (fn _ =>
+ (rewrite_only_thms_tmp ctxt @{thms verit_implies_simplify}))
+
+(* It is more efficient to first fold the implication symbols.
+ That is however not enough when symbols appears within
+ expressions, hence we also unfold implications in the
+ other direction. *)
+fun rewrite_connective_def ctxt _ t =
+ SMT_Replay_Methods.prove ctxt t (fn _ =>
+ chain_rewrite_thms_two_step ctxt
+ (imp_refl :: @{thms not_not verit_connective_def[symmetric]})
+ (@{thms verit_connective_def[symmetric]})
+ (imp_refl :: @{thms not_not verit_connective_def})
+ (@{thms verit_connective_def}))
+
+fun rewrite_minus_simplify ctxt _ t =
+ SMT_Replay_Methods.prove ctxt t (fn _ =>
+ chain_rewrite_two_step_with_ac_simps ctxt
+ @{thms arith_simps verit_minus_simplify}
+ (Named_Theorems.get ctxt @{named_theorems ac_simps} @
+ @{thms numerals arith_simps arith_special
+ numeral_class.numeral.numeral_One}))
+
+fun rewrite_comp_simplify ctxt _ t =
+ SMT_Replay_Methods.prove ctxt t (fn _ =>
+ chain_rewrite_thms ctxt @{thms verit_comp_simplify})
+
+fun rewrite_ite_simplify ctxt _ t =
+ SMT_Replay_Methods.prove ctxt t (fn _ =>
+ (rewrite_only_thms_tmp ctxt @{thms verit_ite_simplify}))
+end
+
+
+(* Simplifications *)
+
+local
+ fun simplify_arith ctxt thms = partial_simplify_tac ctxt
+ (thms @ Named_Theorems.get ctxt @{named_theorems ac_simps} @ @{thms arith_simps})
+in
+
+fun sum_simplify ctxt _ t =
+ SMT_Replay_Methods.prove ctxt t (fn _ =>
+ simplify_arith ctxt @{thms verit_sum_simplify arith_special}
+ THEN' TRY' (K (Clasimp.auto_tac ctxt))) (*TODO: TEMP until cvc5 produces fine grained simplification steps *)
+
+fun prod_simplify ctxt _ t =
+ SMT_Replay_Methods.prove ctxt t (fn _ =>
+ simplify_arith ctxt @{thms verit_prod_simplify}
+ THEN' TRY' (K (Clasimp.auto_tac ctxt))) (*TODO: TEMP until cvc5 produces fine grained simplification steps *)
+end
+
+fun all_simplify ctxt _ t =
+ SMT_Replay_Methods.prove ctxt t (fn _ =>
+ TRY' (K (Clasimp.auto_tac ctxt))) (*TODO: TEMP until cvc5 produces fine grained simplification steps *)
+
+(* Arithmetics *)
+local
+
+val la_rw_eq_thm = @{lemma \<open>(a :: nat) = b \<or> (a \<le> b) \<or> (a \<ge> b)\<close> by auto}
+in
+fun la_rw_eq ctxt _ t = SMT_Replay_Methods.match_instantiate ctxt t la_rw_eq_thm
+
+fun la_generic_prove args ctxt _ = SMT_Replay_Arith.la_farkas args ctxt
+
+fun la_generic ctxt _ args =
+ prove_abstract (SMT_Replay_Methods.abstract_arith_shallow ctxt) (la_generic_prove args) ctxt []
+
+fun lia_generic ctxt _ t =
+ SMT_Replay_Methods.prove_arith_rewrite SMT_Replay_Methods.abstract_neq ctxt t
+
+fun la_disequality ctxt _ t =
+ SMT_Replay_Methods.match_instantiate ctxt t @{thm verit_la_disequality}
+
+end
+
+
+(* Symm and Not_Symm*)
+
+local
+ fun prove_symm symm_thm ctxt [thm] t = SMT_Replay_Methods.prove ctxt t (fn _ =>
+ Method.insert_tac ctxt [symm_thm OF [thm]]
+ THEN' TRY' (assume_tac ctxt))
+ | prove_symm _ ctxt thms t = replay_error ctxt "invalid application in some symm rule" Unsupported thms t
+in
+ val symm_thm =
+ @{lemma \<open>(B = A) \<Longrightarrow> A = B\<close> by blast }
+ val symm = prove_symm symm_thm
+
+ val not_symm_thm =
+ @{lemma \<open>\<not>(B = A) \<Longrightarrow> \<not>(A = B)\<close> by blast }
+ val not_symm = prove_symm not_symm_thm
+end
+
+(* Reordering *)
+local
+ fun prove_reordering ctxt [thm] t = SMT_Replay_Methods.prove ctxt t (fn _ =>(
+ Method.insert_tac ctxt [thm]
+ THEN' match_tac ctxt @{thms ccontr}
+ THEN' K (unfold_tac ctxt @{thms de_Morgan_disj})
+ THEN' TRY o CHANGED_PROP o REPEAT_ALL_NEW (ematch_tac ctxt @{thms conjE})
+ THEN' CHANGED_PROP o REPEAT_ALL_NEW (ematch_tac ctxt @{thms disjE})
+ THEN' REPEAT'(ematch_tac ctxt @{thms notE} THEN' TRY' (assume_tac ctxt))
+ ))
+ | prove_reordering ctxt thms t =
+ replay_error ctxt "invalid application in some reordering rule" Unsupported thms t
+in
+ val reordering = prove_reordering
+end
+
+end;
--- a/src/HOL/Tools/SMT/smt_systems.ML Wed Feb 23 08:43:44 2022 +0100
+++ b/src/HOL/Tools/SMT/smt_systems.ML Wed Mar 23 10:54:22 2022 +0100
@@ -148,7 +148,7 @@
((256, mepoN), []),
((32, meshN), [])],
outcome = on_first_non_unsupported_line (outcome_of "unsat" "sat" "unknown" "Time limit exceeded"),
- parse_proof = SOME (K VeriT_Proof_Parse.parse_proof),
+ parse_proof = SOME (K Lethe_Proof_Parse.parse_proof),
replay = SOME Verit_Replay.replay }
end
--- a/src/HOL/Tools/SMT/verit_replay.ML Wed Feb 23 08:43:44 2022 +0100
+++ b/src/HOL/Tools/SMT/verit_replay.ML Wed Mar 23 10:54:22 2022 +0100
@@ -137,9 +137,9 @@
val args = map (Term.subst_free concl_tranformation o subst_only_free global_transformation) args
val insts = Symtab.map (K (Term.subst_free concl_tranformation o subst_only_free global_transformation)) insts
val proof_prems =
- if Verit_Replay_Methods.requires_subproof_assms prems rule then proof_prems else []
+ if Lethe_Replay_Methods.requires_subproof_assms prems rule then proof_prems else []
val local_inputs =
- if Verit_Replay_Methods.requires_local_input prems rule then input @ inputs else []
+ if Lethe_Replay_Methods.requires_local_input prems rule then input @ inputs else []
val replay = Timing.timing (replay_thm Verit_Replay_Methods.method_for rewrite_rules ll_defs
ctxt assumed [] (proof_prems @ local_inputs) (nthms @ nthms') concl_tranformation
global_transformation args insts)
--- a/src/HOL/Tools/SMT/verit_replay_methods.ML Wed Feb 23 08:43:44 2022 +0100
+++ b/src/HOL/Tools/SMT/verit_replay_methods.ML Wed Mar 23 10:54:22 2022 +0100
@@ -10,9 +10,6 @@
val method_for: string -> Proof.context -> thm list -> term list -> term Symtab.table ->
(string * term) list -> term -> thm
- val requires_subproof_assms : string list -> string -> bool
- val requires_local_input: string list -> string -> bool
- val eq_congruent_pred: Proof.context -> 'a -> term -> thm
val discharge: Proof.context -> thm list -> term -> thm
end;
@@ -20,63 +17,7 @@
structure Verit_Replay_Methods: VERIT_REPLAY_METHODS =
struct
-(*Some general comments on the proof format:
- 1. Double negations are not always removed. This means for example that the equivalence rules
- cannot assume that the double negations have already been removed. Therefore, we match the
- term, instantiate the theorem, then use simp (to remove double negations), and finally use
- assumption.
- 2. The reconstruction for rule forall_inst is buggy and tends to be very fragile, because the rule
- is doing much more that is supposed to do. Moreover types can make trivial goals (for the
- boolean structure) impossible to prove.
- 3. Duplicate literals are sometimes removed, mostly by the SAT solver.
-
- Rules unsupported on purpose:
- * Distinct_Elim, XOR, let (we don't need them).
- * deep_skolemize (because it is not clear if verit still generates using it).
-*)
-
-datatype verit_rule =
- False | True |
-
- (*input: a repeated (normalized) assumption of assumption of in a subproof*)
- Normalized_Input | Local_Input |
- (*Subproof:*)
- Subproof |
- (*Conjunction:*)
- And | Not_And | And_Pos | And_Neg |
- (*Disjunction""*)
- Or | Or_Pos | Not_Or | Or_Neg |
- (*Disjunction:*)
- Implies | Implies_Neg1 | Implies_Neg2 | Implies_Pos | Not_Implies1 | Not_Implies2 |
- (*Equivalence:*)
- Equiv_neg1 | Equiv_pos1 | Equiv_pos2 | Equiv_neg2 | Not_Equiv1 | Not_Equiv2 | Equiv1 | Equiv2 |
- (*If-then-else:*)
- ITE_Pos1 | ITE_Pos2 | ITE_Neg1 | ITE_Neg2 | Not_ITE1 | Not_ITE2 | ITE_Intro | ITE1 | ITE2 |
- (*Equality:*)
- Eq_Congruent | Eq_Reflexive | Eq_Transitive | Eq_Congruent_Pred | Trans | Refl | Cong |
- (*Arithmetics:*)
- LA_Disequality | LA_Generic | LA_Tautology | LIA_Generic | LA_Totality | LA_RW_Eq |
- NLA_Generic |
- (*Quantifiers:*)
- Forall_Inst | Qnt_Rm_Unused | Qnt_Join | Onepoint | Bind | Skolem_Forall | Skolem_Ex |
- (*Resolution:*)
- Theory_Resolution | Resolution |
- (*Temporary rules, that the verit developpers want to remove:*)
- AC_Simp |
- Bfun_Elim |
- Qnt_CNF |
- (*Used to introduce skolem constants*)
- Definition |
- (*Former cong rules*)
- Bool_Simplify | Or_Simplify | Not_Simplify | And_Simplify | Equiv_Simplify |
- Implies_Simplify | Connective_Def | Minus_Simplify | Comp_Simplify |
- Eq_Simplify | ITE_Simplify | Sum_Simplify | Prod_Simplify |
- Qnt_Simplify | Not_Not | Tautological_Clause | Duplicate_Literals |
- (*Unsupported rule*)
- Unsupported |
- (*For compression*)
- Theory_Resolution2
-
+open Lethe_Replay_Methods
fun verit_rule_of "bind" = Bind
| verit_rule_of "cong" = Cong
@@ -149,944 +90,19 @@
| verit_rule_of "tautology" = Tautological_Clause
| verit_rule_of "qnt_cnf" = Qnt_CNF
| verit_rule_of r =
- if r = Verit_Proof.normalized_input_rule then Normalized_Input
- else if r = Verit_Proof.local_input_rule then Local_Input
- else if r = Verit_Proof.veriT_def then Definition
- else if r = Verit_Proof.not_not_rule then Not_Not
- else if r = Verit_Proof.contract_rule then Duplicate_Literals
- else if r = Verit_Proof.ite_intro_rule then ITE_Intro
- else if r = Verit_Proof.eq_congruent_rule then Eq_Congruent
- else if r = Verit_Proof.eq_congruent_pred_rule then Eq_Congruent_Pred
- else if r = Verit_Proof.theory_resolution2_rule then Theory_Resolution2
- else if r = Verit_Proof.th_resolution_rule then Theory_Resolution
- else if r = Verit_Proof.equiv_pos2_rule then Equiv_pos2
+ if r = Lethe_Proof.normalized_input_rule then Normalized_Input
+ else if r = Lethe_Proof.local_input_rule then Local_Input
+ else if r = Lethe_Proof.lethe_def then Definition
+ else if r = Lethe_Proof.not_not_rule then Not_Not
+ else if r = Lethe_Proof.contract_rule then Duplicate_Literals
+ else if r = Lethe_Proof.ite_intro_rule then ITE_Intro
+ else if r = Lethe_Proof.eq_congruent_rule then Eq_Congruent
+ else if r = Lethe_Proof.eq_congruent_pred_rule then Eq_Congruent_Pred
+ else if r = Lethe_Proof.theory_resolution2_rule then Theory_Resolution2
+ else if r = Lethe_Proof.th_resolution_rule then Theory_Resolution
+ else if r = Lethe_Proof.equiv_pos2_rule then Equiv_pos2
else (@{print} ("Unsupport rule", r); Unsupported)
-fun string_of_verit_rule Bind = "Bind"
- | string_of_verit_rule Cong = "Cong"
- | string_of_verit_rule Refl = "Refl"
- | string_of_verit_rule Equiv1 = "Equiv1"
- | string_of_verit_rule Equiv2 = "Equiv2"
- | string_of_verit_rule Equiv_pos1 = "Equiv_pos1"
- | string_of_verit_rule Equiv_pos2 = "Equiv_pos2"
- | string_of_verit_rule Equiv_neg1 = "Equiv_neg1"
- | string_of_verit_rule Equiv_neg2 = "Equiv_neg2"
- | string_of_verit_rule Skolem_Forall = "Skolem_Forall"
- | string_of_verit_rule Skolem_Ex = "Skolem_Ex"
- | string_of_verit_rule Eq_Reflexive = "Eq_Reflexive"
- | string_of_verit_rule Theory_Resolution = "Theory_Resolution"
- | string_of_verit_rule Theory_Resolution2 = "Theory_Resolution2"
- | string_of_verit_rule Forall_Inst = "forall_inst"
- | string_of_verit_rule Or = "Or"
- | string_of_verit_rule Not_Or = "Not_Or"
- | string_of_verit_rule Resolution = "Resolution"
- | string_of_verit_rule Eq_Congruent = "eq_congruent"
- | string_of_verit_rule Trans = "trans"
- | string_of_verit_rule False = "false"
- | string_of_verit_rule And = "and"
- | string_of_verit_rule And_Pos = "and_pos"
- | string_of_verit_rule Not_And = "not_and"
- | string_of_verit_rule And_Neg = "and_neg"
- | string_of_verit_rule Or_Pos = "or_pos"
- | string_of_verit_rule Or_Neg = "or_neg"
- | string_of_verit_rule AC_Simp = "ac_simp"
- | string_of_verit_rule Not_Equiv1 = "not_equiv1"
- | string_of_verit_rule Not_Equiv2 = "not_equiv2"
- | string_of_verit_rule Not_Implies1 = "not_implies1"
- | string_of_verit_rule Not_Implies2 = "not_implies2"
- | string_of_verit_rule Implies_Neg1 = "implies_neg1"
- | string_of_verit_rule Implies_Neg2 = "implies_neg2"
- | string_of_verit_rule Implies = "implies"
- | string_of_verit_rule Bfun_Elim = "bfun_elim"
- | string_of_verit_rule ITE1 = "ite1"
- | string_of_verit_rule ITE2 = "ite2"
- | string_of_verit_rule Not_ITE1 = "not_ite1"
- | string_of_verit_rule Not_ITE2 = "not_ite2"
- | string_of_verit_rule ITE_Pos1 = "ite_pos1"
- | string_of_verit_rule ITE_Pos2 = "ite_pos2"
- | string_of_verit_rule ITE_Neg1 = "ite_neg1"
- | string_of_verit_rule ITE_Neg2 = "ite_neg2"
- | string_of_verit_rule ITE_Intro = "ite_intro"
- | string_of_verit_rule LA_Disequality = "la_disequality"
- | string_of_verit_rule LA_Generic = "la_generic"
- | string_of_verit_rule LIA_Generic = "lia_generic"
- | string_of_verit_rule LA_Tautology = "la_tautology"
- | string_of_verit_rule LA_RW_Eq = "la_rw_eq"
- | string_of_verit_rule LA_Totality = "LA_Totality"
- | string_of_verit_rule NLA_Generic = "nla_generic"
- | string_of_verit_rule Eq_Transitive = "eq_transitive"
- | string_of_verit_rule Qnt_Rm_Unused = "qnt_remove_unused"
- | string_of_verit_rule Onepoint = "onepoint"
- | string_of_verit_rule Qnt_Join = "qnt_join"
- | string_of_verit_rule Eq_Congruent_Pred = "eq_congruent_pred"
- | string_of_verit_rule Normalized_Input = Verit_Proof.normalized_input_rule
- | string_of_verit_rule Local_Input = Verit_Proof.local_input_rule
- | string_of_verit_rule Subproof = "subproof"
- | string_of_verit_rule Bool_Simplify = "bool_simplify"
- | string_of_verit_rule Equiv_Simplify = "equiv_simplify"
- | string_of_verit_rule Eq_Simplify = "eq_simplify"
- | string_of_verit_rule Not_Simplify = "not_simplify"
- | string_of_verit_rule And_Simplify = "and_simplify"
- | string_of_verit_rule Or_Simplify = "or_simplify"
- | string_of_verit_rule ITE_Simplify = "ite_simplify"
- | string_of_verit_rule Implies_Simplify = "implies_simplify"
- | string_of_verit_rule Connective_Def = "connective_def"
- | string_of_verit_rule Minus_Simplify = "minus_simplify"
- | string_of_verit_rule Sum_Simplify = "sum_simplify"
- | string_of_verit_rule Prod_Simplify = "prod_simplify"
- | string_of_verit_rule Comp_Simplify = "comp_simplify"
- | string_of_verit_rule Qnt_Simplify = "qnt_simplify"
- | string_of_verit_rule Not_Not = Verit_Proof.not_not_rule
- | string_of_verit_rule Tautological_Clause = "tautology"
- | string_of_verit_rule Duplicate_Literals = Verit_Proof.contract_rule
- | string_of_verit_rule Qnt_CNF = "qnt_cnf"
- | string_of_verit_rule r = "Unknown rule: " ^ \<^make_string> r
-
-fun replay_error ctxt msg rule thms t =
- SMT_Replay_Methods.replay_error ctxt msg (string_of_verit_rule rule) thms t
-
-
-(* utility function *)
-
-fun eqsubst_all ctxt thms =
- K ((REPEAT o HEADGOAL) (EqSubst.eqsubst_asm_tac ctxt [0] thms))
- THEN' K ((REPEAT o HEADGOAL) (EqSubst.eqsubst_tac ctxt [0] thms))
-
-fun simplify_tac ctxt thms =
- ctxt
- |> empty_simpset
- |> put_simpset HOL_basic_ss
- |> (fn ctxt => ctxt addsimps @{thms simp_thms} addsimps thms)
- |> Simplifier.asm_full_simp_tac
-
-(* sko_forall requires the assumptions to be able to prove the equivalence in case of double
-skolemization. See comment below. *)
-fun requires_subproof_assms _ t =
- member (op =) ["refl", "sko_forall", "sko_ex", "cong"] t
-
-fun requires_local_input _ t =
- member (op =) [Verit_Proof.local_input_rule] t
-
-(*This is a weaker simplification form. It is weaker, but is also less likely to loop*)
-fun partial_simplify_tac ctxt thms =
- ctxt
- |> empty_simpset
- |> put_simpset HOL_basic_ss
- |> (fn ctxt => ctxt addsimps @{thms simp_thms} addsimps thms)
- |> Simplifier.full_simp_tac
-
-val try_provers = SMT_Replay_Methods.try_provers "verit"
-
-fun TRY' tac = fn i => TRY (tac i)
-fun REPEAT' tac = fn i => REPEAT (tac i)
-fun REPEAT_CHANGED tac = fn i => REPEAT (CHANGED (tac i))
-
-
-(* Bind *)
-
-(*The bind rule is non-obvious due to the handling of quantifiers:
- "\<And>x y a. x = y ==> (\<forall>b. P a b x) \<longleftrightarrow> (\<forall>b. P' a b y)"
- ------------------------------------------------------
- \<forall>a. (\<forall>b x. P a b x) \<longleftrightarrow> (\<forall>b y. P' a b y)
-is a valid application.*)
-
-val bind_thms =
- [@{lemma \<open>(\<And>x x'. x = x' \<Longrightarrow> P x = Q x') \<Longrightarrow> (\<forall>x. P x) = (\<forall>y. Q y)\<close> by blast},
- @{lemma \<open>(\<And>x x'. x = x' \<Longrightarrow> P x = Q x') \<Longrightarrow> (\<exists>x. P x) = (\<exists>y. Q y)\<close> by blast},
- @{lemma \<open>(\<And>x x'. x = x' \<Longrightarrow> P x = Q x') \<Longrightarrow> (\<exists>x. P x = Q x)\<close> by blast},
- @{lemma \<open>(\<And>x x'. x = x' \<Longrightarrow> P x = Q x') \<Longrightarrow> (\<forall>x. P x = Q x)\<close> by blast}]
-
-val bind_thms_same_name =
- [@{lemma \<open>(\<And>x. P x = Q x) \<Longrightarrow> (\<forall>x. P x) = (\<forall>y. Q y)\<close> by blast},
- @{lemma \<open>(\<And>x. P x = Q x) \<Longrightarrow> (\<exists>x. P x) = (\<exists>y. Q y)\<close> by blast},
- @{lemma \<open>(\<And>x. P x = Q x) \<Longrightarrow> (\<exists>x. P x = Q x)\<close> by blast},
- @{lemma \<open>(\<And>x. P x = Q x) \<Longrightarrow> (\<forall>x. P x = Q x)\<close> by blast}]
-
-fun extract_quantified_names_q (_ $ Abs (name, _, t)) =
- apfst (curry (op ::) name) (extract_quantified_names_q t)
- | extract_quantified_names_q t = ([], t)
-
-fun extract_forall_quantified_names_q (Const(\<^const_name>\<open>HOL.All\<close>, _) $ Abs (name, ty, t)) =
- (name, ty) :: (extract_forall_quantified_names_q t)
- | extract_forall_quantified_names_q _ = []
-
-fun extract_all_forall_quantified_names_q (Const(\<^const_name>\<open>HOL.All\<close>, _) $ Abs (name, _, t)) =
- name :: (extract_all_forall_quantified_names_q t)
- | extract_all_forall_quantified_names_q (t $ u) =
- extract_all_forall_quantified_names_q t @ extract_all_forall_quantified_names_q u
- | extract_all_forall_quantified_names_q _ = []
-
-val extract_quantified_names_ba =
- SMT_Replay_Methods.dest_prop
- #> extract_quantified_names_q
- ##> HOLogic.dest_eq
- ##> fst
- ##> extract_quantified_names_q
- ##> fst
-
-val extract_quantified_names =
- extract_quantified_names_ba
- #> (op @)
-
-val extract_all_forall_quantified_names =
- SMT_Replay_Methods.dest_prop
- #> HOLogic.dest_eq
- #> fst
- #> extract_all_forall_quantified_names_q
-
-
-fun extract_all_exists_quantified_names_q (Const(\<^const_name>\<open>HOL.Ex\<close>, _) $ Abs (name, _, t)) =
- name :: (extract_all_exists_quantified_names_q t)
- | extract_all_exists_quantified_names_q (t $ u) =
- extract_all_exists_quantified_names_q t @ extract_all_exists_quantified_names_q u
- | extract_all_exists_quantified_names_q _ = []
-
-val extract_all_exists_quantified_names =
- SMT_Replay_Methods.dest_prop
- #> HOLogic.dest_eq
- #> fst
- #> extract_all_exists_quantified_names_q
-
-
-val extract_bind_names =
- HOLogic.dest_eq
- #> apply2 (fn (Free (name, _)) => name)
-
-fun combine_quant ctxt ((n1, n2) :: bounds) (n1' :: formula) =
- TRY' (if n1 = n1'
- then if n1 <> n2
- then
- resolve_tac ctxt bind_thms
- THEN' TRY'(resolve_tac ctxt [@{thm refl}])
- THEN' combine_quant ctxt bounds formula
- else resolve_tac ctxt bind_thms_same_name THEN' combine_quant ctxt bounds formula
- else resolve_tac ctxt @{thms allI} THEN' combine_quant ctxt ((n1, n2) :: bounds) formula)
- | combine_quant _ _ _ = K all_tac
-
-fun bind_quants ctxt args t =
- combine_quant ctxt (map extract_bind_names args) (extract_quantified_names t)
-
-fun generalize_prems_q [] prems = prems
- | generalize_prems_q (_ :: quants) prems =
- generalize_prems_q quants (@{thm spec} OF [prems])
-
-fun generalize_prems t = generalize_prems_q (fst (extract_quantified_names_ba t))
-
-fun bind ctxt [prems] args t =
- SMT_Replay_Methods.prove ctxt t (fn _ =>
- bind_quants ctxt args t
- THEN' TRY' (SOLVED' (resolve_tac ctxt [generalize_prems t prems]
- THEN_ALL_NEW TRY' (assume_tac ctxt ORELSE' resolve_tac ctxt @{thms refl}))))
- | bind ctxt thms _ t = replay_error ctxt "invalid bind application" Bind thms t
-
-
-(* Congruence/Refl *)
-
-fun cong ctxt thms = try_provers ctxt
- (string_of_verit_rule Cong) [
- ("basic", SMT_Replay_Methods.cong_basic ctxt thms),
- ("unfolding then reflexivity", SMT_Replay_Methods.cong_unfolding_trivial ctxt thms),
- ("unfolding then auto", SMT_Replay_Methods.cong_unfolding_first ctxt thms),
- ("full", SMT_Replay_Methods.cong_full ctxt thms)] thms
-
-fun refl ctxt thm t =
- (case find_first (fn thm => t = Thm.full_prop_of thm) thm of
- SOME thm => thm
- | NONE =>
- (case try (fn t => SMT_Replay_Methods.match_instantiate ctxt t @{thm refl}) t of
- NONE => cong ctxt thm t
- | SOME thm => thm))
-
-
-(* Instantiation *)
-
-local
-fun dropWhile _ [] = []
- | dropWhile f (x :: xs) = if f x then dropWhile f xs else x :: xs
-in
-
-fun forall_inst ctxt _ _ insts t =
- let
- fun instantiate_and_solve i ({context = ctxt, prems = [prem], ...}: Subgoal.focus) =
- let
- val t = Thm.prop_of prem
- val quants = t
- |> SMT_Replay_Methods.dest_prop
- |> extract_forall_quantified_names_q
- val insts = map (Symtab.lookup insts o fst) (rev quants)
- |> dropWhile (curry (op =) NONE)
- |> rev
- fun option_map _ NONE = NONE
- | option_map f (SOME a) = SOME (f a)
- fun instantiate_with inst prem =
- Drule.infer_instantiate' ctxt [NONE, inst] @{thm spec} OF [prem]
- val inst_thm =
- fold instantiate_with
- (map (option_map (Thm.cterm_of ctxt)) insts)
- prem
- in
- (Method.insert_tac ctxt [inst_thm]
- THEN' TRY' (fn i => assume_tac ctxt i)
- THEN' TRY' (partial_simplify_tac ctxt @{thms eq_commute})) i
- end
- | instantiate_and_solve _ ({context = ctxt, prems = thms, ...}: Subgoal.focus) =
- replay_error ctxt "invalid application" Forall_Inst thms t
- in
- SMT_Replay_Methods.prove ctxt t (fn _ =>
- match_tac ctxt [@{thm disj_not1} RSN (1, @{thm iffD2}) OF [@{thm impI}]]
- THEN' (fn i => Subgoal.FOCUS (instantiate_and_solve i) ctxt i))
- end
-end
-
-
-(* Or *)
-
-fun or _ (thm :: _) _ = thm
- | or ctxt thms t = replay_error ctxt "invalid bind application" Or thms t
-
-
-(* Implication *)
-
-val implies_pos_thm =
- [@{lemma \<open>\<not>(A \<longrightarrow> B) \<or> \<not>A \<or> B\<close> by blast},
- @{lemma \<open>\<not>(\<not>A \<longrightarrow> B) \<or> A \<or> B\<close> by blast}]
-
-fun implies_pos ctxt _ t = SMT_Replay_Methods.prove ctxt t (fn _ =>
- resolve_tac ctxt implies_pos_thm)
-
-fun extract_rewrite_rule_assumption _ thms =
- let
- fun is_rewrite_rule thm =
- (case Thm.prop_of thm of
- \<^term>\<open>Trueprop\<close> $ (Const(\<^const_name>\<open>HOL.eq\<close>, _) $ _ $ Free(_, _)) => true
- | _ => false)
- fun is_context_rule thm =
- (case Thm.prop_of thm of
- \<^term>\<open>Trueprop\<close> $ (Const(\<^const_name>\<open>HOL.eq\<close>, _) $ Free(_, _) $ Free(_, _)) => true
- | _ => false)
- val ctxt_eq =
- thms
- |> filter is_context_rule
- val rew =
- thms
- |> filter_out is_context_rule
- |> filter is_rewrite_rule
- in
- (ctxt_eq, rew)
- end
-
-local
- fun rewrite_all_skolems thm_indirect ctxt (SOME thm :: thms) =
- EqSubst.eqsubst_tac ctxt [0] [thm_indirect OF [thm]]
- THEN' (partial_simplify_tac ctxt (@{thms eq_commute}))
- THEN' rewrite_all_skolems thm_indirect ctxt thms
- | rewrite_all_skolems thm_indirect ctxt (NONE :: thms) = rewrite_all_skolems thm_indirect ctxt thms
- | rewrite_all_skolems _ _ [] = K (all_tac)
-
- fun extract_var_name (thm :: thms) =
- let val name = Thm.concl_of thm
- |> SMT_Replay_Methods.dest_prop
- |> HOLogic.dest_eq
- |> fst
- |> (fn Const(_,_) $ Abs(name, _, _) => [(name,@{thm sym} OF [thm])]
- | _ => [])
- in name :: extract_var_name thms end
- | extract_var_name [] = []
-
-fun skolem_tac extractor thm1 thm2 ctxt thms t =
- let
- val (ctxt_eq, ts) = extract_rewrite_rule_assumption ctxt thms
- fun ordered_definitions () =
- let
- val var_order = extractor t
- val thm_names_with_var = extract_var_name ts |> flat
- in map (AList.lookup (op =) thm_names_with_var) var_order end
-
- in
- SMT_Replay_Methods.prove ctxt t (fn _ =>
- K (unfold_tac ctxt ctxt_eq)
- THEN' ((SOLVED' (K (unfold_tac ctxt (map (fn thm => thm1 OF [@{thm sym} OF [thm]]) ts))))
- ORELSE'
- (rewrite_all_skolems thm2 ctxt (ordered_definitions ())
- THEN' partial_simplify_tac ctxt @{thms eq_commute})))
- end
-in
-
-val skolem_forall =
- skolem_tac extract_all_forall_quantified_names @{thm verit_sko_forall_indirect}
- @{thm verit_sko_forall_indirect2}
-
-val skolem_ex =
- skolem_tac extract_all_exists_quantified_names @{thm verit_sko_ex_indirect}
- @{thm verit_sko_ex_indirect2}
-
-end
-
-fun eq_reflexive ctxt _ t = SMT_Replay_Methods.match_instantiate ctxt t @{thm refl}
-
-local
- fun not_not_prove ctxt _ =
- K (unfold_tac ctxt @{thms not_not})
- THEN' match_tac ctxt @{thms verit_or_simplify_1}
-
- fun duplicate_literals_prove ctxt prems =
- Method.insert_tac ctxt prems
- THEN' match_tac ctxt @{thms ccontr}
- THEN' K (unfold_tac ctxt @{thms de_Morgan_disj not_not})
- THEN' TRY o CHANGED_PROP o REPEAT_ALL_NEW (ematch_tac ctxt @{thms conjE})
- THEN' CHANGED_PROP o REPEAT_ALL_NEW (ematch_tac ctxt @{thms disjE})
- THEN' REPEAT' (ematch_tac ctxt @{thms notE} THEN' TRY' (assume_tac ctxt))
-
- fun tautological_clause_prove ctxt _ =
- match_tac ctxt @{thms verit_or_neg}
- THEN' K (unfold_tac ctxt @{thms not_not disj_assoc[symmetric]})
- THEN' TRY' (match_tac ctxt @{thms notE} THEN_ALL_NEW assume_tac ctxt)
-
- val theory_resolution2_lemma = @{lemma \<open>a \<Longrightarrow> a = b \<Longrightarrow> b\<close> by blast}
-in
-
-fun prove_abstract abstracter tac ctxt thms t =
- let
- val thms = map (Conv.fconv_rule Thm.eta_long_conversion) thms
- val t' = Thm.eta_long_conversion (Object_Logic.dest_judgment ctxt (Thm.cterm_of ctxt t))
- val (_, t2) = Logic.dest_equals (Thm.prop_of t')
- val thm =
- SMT_Replay_Methods.prove_abstract ctxt thms t2 tac (
- fold_map (abstracter o SMT_Replay_Methods.dest_thm) thms ##>>
- abstracter (SMT_Replay_Methods.dest_prop t2))
- in
- @{thm verit_Pure_trans} OF [t', thm]
- end
-
-val not_not = prove_abstract SMT_Replay_Methods.abstract_bool_shallow not_not_prove
-
-val tautological_clause =
- prove_abstract SMT_Replay_Methods.abstract_bool_shallow tautological_clause_prove
-
-val duplicate_literals =
- prove_abstract SMT_Replay_Methods.abstract_bool_shallow duplicate_literals_prove
-
-val unit_res = prove_abstract SMT_Replay_Methods.abstract_bool_shallow SMT_Replay_Methods.prop_tac
-
-(*match_instantiate does not work*)
-fun theory_resolution2 ctxt prems t =
- SMT_Replay_Methods.prove ctxt t (fn _ => match_tac ctxt [theory_resolution2_lemma OF prems])
-
-end
-
-
-fun normalized_input ctxt prems t = SMT_Replay_Methods.prove ctxt t (fn _ =>
- Method.insert_tac ctxt prems
- THEN' TRY' (K (unfold_tac ctxt @{thms SMT.trigger_def}))
- THEN' TRY' (partial_simplify_tac ctxt @{thms eq_commute}))
-
-val false_rule_thm = @{lemma \<open>\<not>False\<close> by blast}
-
-fun false_rule ctxt _ t = SMT_Replay_Methods.match_instantiate ctxt t false_rule_thm
-
-
-(* Transitivity *)
-
-val trans_bool_thm =
- @{lemma \<open>P = Q \<Longrightarrow> Q \<Longrightarrow> P\<close> by blast}
-
-fun trans ctxt thms t =
- let
- val prop_of = HOLogic.dest_Trueprop o Thm.full_prop_of
- fun combine_thms [thm1, thm2] =
- (case (prop_of thm1, prop_of thm2) of
- ((Const(\<^const_name>\<open>HOL.eq\<close>, _) $ t1 $ t2),
- (Const(\<^const_name>\<open>HOL.eq\<close>, _) $ t3 $ t4)) =>
- if t2 aconv t3 then thm1 RSN (1, thm2 RSN (2, @{thm trans}))
- else if t2 aconv t4 then thm1 RSN ((1, ((thm2 RS sym)) RSN (2, @{thm trans})))
- else if t1 aconv t4 then thm2 RSN (1, thm1 RSN (2, @{thm trans}))
- else raise Fail "invalid trans theorem"
- | _ => trans_bool_thm OF [thm1, thm2])
- | combine_thms (thm1 :: thm2 :: thms) =
- combine_thms (combine_thms [thm1, thm2] :: thms)
- | combine_thms thms = replay_error ctxt "invalid bind application" Trans thms t
- val t' = Thm.eta_long_conversion (Object_Logic.dest_judgment ctxt (Thm.cterm_of ctxt t))
- val (_, t2) = Logic.dest_equals (Thm.prop_of t')
- val thms = map (Conv.fconv_rule Thm.eta_long_conversion) thms
- val trans_thm = combine_thms thms
- in
- (case (prop_of trans_thm, t2) of
- ((Const(\<^const_name>\<open>HOL.eq\<close>, _) $ t1 $ _),
- (Const(\<^const_name>\<open>HOL.eq\<close>, _) $ t3 $ _)) =>
- if t1 aconv t3 then trans_thm else trans_thm RS sym
- | _ => trans_thm (*to be on the safe side*))
- end
-
-
-fun tmp_AC_rule ctxt thms t =
- SMT_Replay_Methods.prove ctxt t (fn _ =>
- Method.insert_tac ctxt thms
- THEN' REPEAT_ALL_NEW (partial_simplify_tac ctxt @{thms eq_commute}
- THEN' TRY' (simplify_tac ctxt @{thms ac_simps conj_ac})
- THEN' TRY' (Classical.fast_tac ctxt)))
-
-
-(* And/Or *)
-
-local
- fun not_and_rule_prove ctxt prems =
- Method.insert_tac ctxt prems
- THEN' K (unfold_tac ctxt @{thms de_Morgan_conj})
- THEN_ALL_NEW TRY' (assume_tac ctxt)
-
- fun or_pos_prove ctxt _ =
- K (unfold_tac ctxt @{thms de_Morgan_disj not_not})
- THEN' match_tac ctxt @{thms verit_and_pos}
- THEN' K (unfold_tac ctxt @{thms de_Morgan_conj de_Morgan_disj not_not})
- THEN' TRY' (assume_tac ctxt)
-
- fun not_or_rule_prove ctxt prems =
- Method.insert_tac ctxt prems
- THEN' K (unfold_tac ctxt @{thms de_Morgan_disj not_not})
- THEN' TRY' (REPEAT' (match_tac ctxt @{thms conjI}))
- THEN_ALL_NEW ((REPEAT' (ematch_tac ctxt @{thms conjE}))
- THEN' TRY' (assume_tac ctxt))
-
- fun and_rule_prove ctxt prems =
- Method.insert_tac ctxt prems
- THEN' (fn i => REPEAT (dresolve_tac ctxt @{thms conjE} i THEN assume_tac ctxt (i+1)))
- THEN' TRY' (assume_tac ctxt)
-
- fun and_neg_rule_prove ctxt _ =
- match_tac ctxt @{thms verit_and_neg}
- THEN' K (unfold_tac ctxt @{thms de_Morgan_conj not_not})
- THEN' TRY' (assume_tac ctxt)
-
- fun prover tac = prove_abstract SMT_Replay_Methods.abstract_prop tac
-
-in
-
-val and_rule = prover and_rule_prove
-
-val not_and_rule = prover not_and_rule_prove
-
-val not_or_rule = prover not_or_rule_prove
-
-val or_pos_rule = prover or_pos_prove
-
-val and_neg_rule = prover and_neg_rule_prove
-
-val or_neg_rule = prove_abstract SMT_Replay_Methods.abstract_bool (fn ctxt => fn _ =>
- resolve_tac ctxt @{thms verit_or_neg}
- THEN' K (unfold_tac ctxt @{thms not_not})
- THEN_ALL_NEW
- (REPEAT'
- (SOLVED' (match_tac ctxt @{thms disjI1} THEN_ALL_NEW assume_tac ctxt)
- ORELSE' (match_tac ctxt @{thms disjI2} THEN' TRY' (assume_tac ctxt)))))
-
-fun and_pos ctxt _ t =
- SMT_Replay_Methods.prove ctxt t (fn _ =>
- REPEAT_CHANGED (resolve_tac ctxt @{thms verit_and_pos})
- THEN' TRY' (assume_tac ctxt))
-
-end
-
-
-(* Equivalence Transformation *)
-
-local
- fun prove_equiv equiv_thm ctxt [thm] t = SMT_Replay_Methods.prove ctxt t (fn _ =>
- Method.insert_tac ctxt [equiv_thm OF [thm]]
- THEN' TRY' (assume_tac ctxt))
- | prove_equiv _ ctxt thms t = replay_error ctxt "invalid application in some equiv rule" Unsupported thms t
-in
-
-val not_equiv1 = prove_equiv @{lemma \<open>\<not>(A \<longleftrightarrow> B) \<Longrightarrow> A \<or> B\<close> by blast}
-val not_equiv2 = prove_equiv @{lemma \<open>\<not>(A \<longleftrightarrow> B) \<Longrightarrow> \<not>A \<or> \<not>B\<close> by blast}
-val equiv1 = prove_equiv @{lemma \<open>(A \<longleftrightarrow> B) \<Longrightarrow> \<not>A \<or> B\<close> by blast}
-val equiv2 = prove_equiv @{lemma \<open>(A \<longleftrightarrow> B) \<Longrightarrow> A \<or> \<not>B\<close> by blast}
-val not_implies1 = prove_equiv @{lemma \<open>\<not>(A \<longrightarrow> B) \<Longrightarrow> A\<close> by blast}
-val not_implies2 = prove_equiv @{lemma \<open>\<not>(A \<longrightarrow>B) \<Longrightarrow> \<not>B\<close> by blast}
-
-end
-
-
-(* Equivalence Lemma *)
-(*equiv_pos2 is very important for performance. We have tried various tactics, including
-a specialisation of SMT_Replay_Methods.match_instantiate, but there never was a measurable
-and consistent gain.*)
-local
- fun prove_equiv_pos_neg2 thm ctxt _ t =
- SMT_Replay_Methods.match_instantiate ctxt t thm
-in
-
-val equiv_pos1_thm = @{lemma \<open>\<not>(a \<longleftrightarrow> b) \<or> a \<or> \<not>b\<close> by blast+}
-val equiv_pos1 = prove_equiv_pos_neg2 equiv_pos1_thm
-
-val equiv_pos2_thm = @{lemma \<open>\<And>a b. (a \<noteq> b) \<or> \<not>a \<or> b\<close> by blast+}
-val equiv_pos2 = prove_equiv_pos_neg2 equiv_pos2_thm
-
-val equiv_neg1_thm = @{lemma \<open>(a \<longleftrightarrow> b) \<or> \<not>a \<or> \<not>b\<close> by blast+}
-val equiv_neg1 = prove_equiv_pos_neg2 equiv_neg1_thm
-
-val equiv_neg2_thm = @{lemma \<open>(a \<longleftrightarrow> b) \<or> a \<or> b\<close> by blast}
-val equiv_neg2 = prove_equiv_pos_neg2 equiv_neg2_thm
-
-end
-
-
-local
- fun implies_pos_neg_term ctxt thm (\<^term>\<open>Trueprop\<close> $
- (\<^term>\<open>HOL.disj\<close> $ (\<^term>\<open>HOL.implies\<close> $ a $ b) $ _)) =
- Drule.infer_instantiate' ctxt (map (SOME o Thm.cterm_of ctxt) [a, b]) thm
- | implies_pos_neg_term ctxt thm t =
- replay_error ctxt "invalid application in Implies" Unsupported [thm] t
-
- fun prove_implies_pos_neg thm ctxt _ t =
- let val thm = implies_pos_neg_term ctxt thm t
- in
- SMT_Replay_Methods.prove ctxt t (fn _ =>
- Method.insert_tac ctxt [thm]
- THEN' TRY' (assume_tac ctxt))
- end
-in
-
-val implies_neg1_thm = @{lemma \<open>(a \<longrightarrow> b) \<or> a\<close> by blast}
-val implies_neg1 = prove_implies_pos_neg implies_neg1_thm
-
-val implies_neg2_thm = @{lemma \<open>(a \<longrightarrow> b) \<or> \<not>b\<close> by blast}
-val implies_neg2 = prove_implies_pos_neg implies_neg2_thm
-
-val implies_thm = @{lemma \<open>(a \<longrightarrow> b) \<Longrightarrow> \<not>a \<or> b\<close> by blast}
-fun implies_rules ctxt prems t = SMT_Replay_Methods.prove ctxt t (fn _ =>
- Method.insert_tac ctxt [implies_thm OF prems]
- THEN' TRY' (assume_tac ctxt))
-
-end
-
-
-(* BFun *)
-
-local
- val bfun_theorems = @{thms verit_bfun_elim}
-in
-
-fun bfun_elim ctxt prems t = SMT_Replay_Methods.prove ctxt t (fn _ =>
- Method.insert_tac ctxt prems
- THEN' TRY' (eqsubst_all ctxt bfun_theorems)
- THEN' TRY' (simplify_tac ctxt @{thms eq_commute all_conj_distrib ex_disj_distrib}))
-
-end
-
-
-(* If-Then-Else *)
-
-local
- fun prove_ite ite_thm thm ctxt =
- resolve_tac ctxt [ite_thm OF [thm]]
- THEN' K (unfold_tac ctxt @{thms not_not})
- THEN' TRY' (assume_tac ctxt)
-in
-
-val ite_pos1_thm =
- @{lemma \<open>\<not>(if x then P else Q) \<or> x \<or> Q\<close> by auto}
-
-fun ite_pos1 ctxt _ t = SMT_Replay_Methods.prove ctxt t (fn _ =>
- resolve_tac ctxt [ite_pos1_thm])
-
-val ite_pos2_thms =
- @{lemma \<open>\<not>(if x then P else Q) \<or> \<not>x \<or> P\<close> \<open>\<not>(if \<not>x then P else Q) \<or> x \<or> P\<close> by auto}
-
-fun ite_pos2 ctxt _ t = SMT_Replay_Methods.prove ctxt t (fn _ => resolve_tac ctxt ite_pos2_thms)
-
-val ite_neg1_thms =
- @{lemma \<open>(if x then P else Q) \<or> x \<or> \<not>Q\<close> \<open>(if x then P else \<not>Q) \<or> x \<or> Q\<close> by auto}
-
-fun ite_neg1 ctxt _ t = SMT_Replay_Methods.prove ctxt t (fn _ => resolve_tac ctxt ite_neg1_thms)
-
-val ite_neg2_thms =
- @{lemma \<open>(if x then P else Q) \<or> \<not>x \<or> \<not>P\<close> \<open>(if \<not>x then P else Q) \<or> x \<or> \<not>P\<close>
- \<open>(if x then \<not>P else Q) \<or> \<not>x \<or> P\<close> \<open>(if \<not>x then \<not>P else Q) \<or> x \<or> P\<close>
- by auto}
-
-fun ite_neg2 ctxt _ t = SMT_Replay_Methods.prove ctxt t (fn _ => resolve_tac ctxt ite_neg2_thms)
-
-val ite1_thm =
- @{lemma \<open>(if x then P else Q) \<Longrightarrow> x \<or> Q\<close> by (auto split: if_splits) }
-
-fun ite1 ctxt [thm] t = SMT_Replay_Methods.prove ctxt t (fn _ => prove_ite ite1_thm thm ctxt)
-
-val ite2_thm =
- @{lemma \<open>(if x then P else Q) \<Longrightarrow> \<not>x \<or> P\<close> by (auto split: if_splits) }
-
-fun ite2 ctxt [thm] t = SMT_Replay_Methods.prove ctxt t (fn _ => prove_ite ite2_thm thm ctxt)
-
-val not_ite1_thm =
- @{lemma \<open>\<not>(if x then P else Q) \<Longrightarrow> x \<or> \<not>Q\<close> by (auto split: if_splits) }
-
-fun not_ite1 ctxt [thm] t = SMT_Replay_Methods.prove ctxt t (fn _ => prove_ite not_ite1_thm thm ctxt)
-
-val not_ite2_thm =
- @{lemma \<open>\<not>(if x then P else Q) \<Longrightarrow> \<not>x \<or> \<not>P\<close> by (auto split: if_splits) }
-
-fun not_ite2 ctxt [thm] t = SMT_Replay_Methods.prove ctxt t (fn _ => prove_ite not_ite2_thm thm ctxt)
-
-(*ite_intro can introduce new terms that are in the proof exactly the sames as the old one, but are
-not internally, hence the possible reordering.*)
-fun ite_intro ctxt _ t =
- let
- fun simplify_ite ctxt thms =
- ctxt
- |> empty_simpset
- |> put_simpset HOL_basic_ss
- |> (fn ctxt => ctxt addsimps thms @ @{thms if_True if_False refl simp_thms if_cancel})
- |> Raw_Simplifier.add_eqcong @{thm verit_ite_if_cong}
- |> Simplifier.full_simp_tac
- in
- SMT_Replay_Methods.prove ctxt t (fn _ => simplify_ite ctxt []
- THEN' TRY' (simplify_ite ctxt @{thms eq_commute}))
- end
-end
-
-
-(* Quantifiers *)
-
-local
- val rm_unused = @{lemma \<open>(\<forall>x. P) = P\<close> \<open>(\<exists>x. P) = P\<close> by blast+}
-
- fun qnt_cnf_tac ctxt =
- simplify_tac ctxt @{thms de_Morgan_conj de_Morgan_disj imp_conv_disj
- iff_conv_conj_imp if_bool_eq_disj ex_simps all_simps ex_disj_distrib
- verit_connective_def(3) all_conj_distrib}
- THEN' TRY' (Blast.blast_tac ctxt)
-in
-fun qnt_rm_unused ctxt _ t = SMT_Replay_Methods.prove ctxt t (fn _ =>
- K (unfold_tac ctxt rm_unused))
-
-fun onepoint ctxt prems t = SMT_Replay_Methods.prove ctxt t (fn _ =>
- Method.insert_tac ctxt prems
- THEN' Simplifier.full_simp_tac (put_simpset HOL_basic_ss (empty_simpset ctxt)
- addsimps @{thms HOL.simp_thms HOL.all_simps}
- addsimprocs [@{simproc HOL.defined_All}, @{simproc HOL.defined_Ex}])
- THEN' TRY' (Blast.blast_tac ctxt)
- THEN' TRY' (Metis_Tactic.metis_tac [] ATP_Problem_Generate.combsN ctxt []))
-
-fun qnt_join ctxt _ t = SMT_Replay_Methods.prove ctxt t Classical.fast_tac
-
-fun qnt_cnf ctxt _ t = SMT_Replay_Methods.prove ctxt t qnt_cnf_tac
-
-fun qnt_simplify ctxt _ t = SMT_Replay_Methods.prove ctxt t (fn _ =>
- partial_simplify_tac ctxt [])
-
-end
-
-(* Equality *)
-
-fun eq_transitive ctxt _ t = SMT_Replay_Methods.prove ctxt t (fn _ =>
- REPEAT_CHANGED (resolve_tac ctxt [@{thm disj_not1} RSN (1, @{thm iffD2}) OF @{thms impI}])
- THEN' REPEAT' (resolve_tac ctxt @{thms impI})
- THEN' REPEAT' (eresolve_tac ctxt @{thms conjI})
- THEN' REPEAT' (fn i => dresolve_tac ctxt @{thms verit_eq_transitive} i THEN assume_tac ctxt (i+1))
- THEN' resolve_tac ctxt @{thms refl})
-
-local
- fun find_rew rews t t' =
- (case AList.lookup (op =) rews (t, t') of
- SOME thm => SOME (thm COMP @{thm symmetric})
- | NONE =>
- (case AList.lookup (op =) rews (t', t) of
- SOME thm => SOME thm
- | NONE => NONE))
-
- fun eq_pred_conv rews t ctxt ctrm =
- (case find_rew rews t (Thm.term_of ctrm) of
- SOME thm => Conv.rewr_conv thm ctrm
- | NONE =>
- (case t of
- f $ arg =>
- (Conv.fun_conv (eq_pred_conv rews f ctxt) then_conv
- Conv.arg_conv (eq_pred_conv rews arg ctxt)) ctrm
- | Abs (_, _, f) => Conv.abs_conv (eq_pred_conv rews f o snd) ctxt ctrm
- | _ => Conv.all_conv ctrm))
-
- fun eq_pred_rewrite_tac ({context = ctxt, prems, concl, ...}: Subgoal.focus) =
- let
- val rews = prems
- |> map_filter (try (apfst (HOLogic.dest_eq o Thm.term_of o Object_Logic.dest_judgment ctxt o
- Thm.cconcl_of) o `(fn x => x)))
- |> map (apsnd (fn x => @{thm eq_reflection} OF [x]))
- fun conv_left conv = Conv.arg_conv (Conv.arg_conv conv)
- fun conv_left_neg conv = Conv.arg_conv (Conv.arg_conv (Conv.arg_conv conv))
- val (t1, conv_term) =
- (case Thm.term_of (Object_Logic.dest_judgment ctxt concl) of
- Const(_, _) $ (Const(\<^const_name>\<open>HOL.Not\<close>, _) $ t1) $ _ => (t1, conv_left)
- | Const(_, _) $ t1 $ (Const(\<^const_name>\<open>HOL.Not\<close>, _) $ _) => (t1, conv_left_neg)
- | Const(_, _) $ t1 $ _ => (t1, conv_left)
- | t1 => (t1, conv_left))
-
- fun throwing_CONVERSION cv i st = Seq.single (Conv.gconv_rule cv i st)
- in
- throwing_CONVERSION (conv_term (eq_pred_conv rews t1 ctxt))
- end
-in
-
-fun eq_congruent_pred ctxt _ t =
- SMT_Replay_Methods.prove ctxt t (fn _ =>
- REPEAT' (resolve_tac ctxt [@{thm disj_not1[of \<open>_ = _\<close>]} RSN (1, @{thm iffD2}) OF @{thms impI}])
- THEN' (fn i => Subgoal.FOCUS (fn focus => eq_pred_rewrite_tac focus i) ctxt i)
- THEN' (resolve_tac ctxt @{thms refl excluded_middle excluded_middle[of \<open>\<not>_\<close>, unfolded not_not]}
- ORELSE' assume_tac ctxt))
-
-val eq_congruent = eq_congruent_pred
-
-end
-
-
-(* Subproof *)
-
-fun subproof ctxt [prem] t =
- SMT_Replay_Methods.prove ctxt t (fn _ =>
- (resolve_tac ctxt [@{thm disj_not1} RSN (1, @{thm iffD2}) OF [@{thm impI}],
- @{thm disj_not1[of \<open>\<not>_\<close>, unfolded not_not]} RSN (1, @{thm iffD2}) OF [@{thm impI}]]
- THEN' resolve_tac ctxt [prem]
- THEN_ALL_NEW assume_tac ctxt
- THEN' TRY' (assume_tac ctxt))
- ORELSE' TRY' (Method.insert_tac ctxt [prem] THEN' Blast.blast_tac ctxt))
- | subproof ctxt prems t =
- replay_error ctxt "invalid application, only one assumption expected" Subproof prems t
-
-
-(* Simplifying Steps *)
-
-(* The reconstruction is a bit tricky: At first we only rewrite only based on the rules that are
-passed as argument. Then we simply use simp_thms to reconstruct all the proofs (and these theorems
-cover all the simplification below).
-*)
-local
- fun rewrite_only_thms ctxt thms =
- ctxt
- |> empty_simpset
- |> put_simpset HOL_basic_ss
- |> (fn ctxt => ctxt addsimps thms)
- |> Simplifier.full_simp_tac
- fun rewrite_thms ctxt thms =
- ctxt
- |> empty_simpset
- |> put_simpset HOL_basic_ss
- |> Raw_Simplifier.add_eqcong @{thm eq_reflection[OF imp_cong]}
- |> (fn ctxt => ctxt addsimps thms addsimps @{thms simp_thms})
- |> Simplifier.full_simp_tac
-
- fun chain_rewrite_thms ctxt thms =
- TRY' (rewrite_only_thms ctxt thms)
- THEN' TRY' (rewrite_thms ctxt thms)
-
- fun chain_rewrite_two_step_with_ac_simps ctxt thms1 thms2 =
- TRY' (rewrite_only_thms ctxt thms1)
- THEN' TRY' (rewrite_thms ctxt thms2)
-
- fun chain_rewrite_thms_two_step ctxt thms1 thms2 thms3 thms4 =
- chain_rewrite_two_step_with_ac_simps ctxt thms1 thms2
- THEN' TRY' (chain_rewrite_two_step_with_ac_simps ctxt thms3 thms4)
-
- val imp_refl = @{lemma \<open>(P \<longrightarrow> P) = True\<close> by blast}
-
-in
-fun rewrite_bool_simplify ctxt _ t =
- SMT_Replay_Methods.prove ctxt t (fn _ =>
- (chain_rewrite_thms ctxt @{thms verit_bool_simplify}))
-
-fun rewrite_and_simplify ctxt _ t =
- SMT_Replay_Methods.prove ctxt t (fn _ =>
- (chain_rewrite_two_step_with_ac_simps ctxt @{thms verit_and_simplify verit_and_simplify1}
- @{thms verit_and_simplify}))
-
-fun rewrite_or_simplify ctxt _ t =
- SMT_Replay_Methods.prove ctxt t (fn _ =>
- (chain_rewrite_two_step_with_ac_simps ctxt @{thms verit_or_simplify verit_or_simplify_1}
- @{thms verit_or_simplify})
- THEN' TRY' (REPEAT' (match_tac ctxt @{thms verit_and_pos}) THEN' assume_tac ctxt))
-
-fun rewrite_not_simplify ctxt _ t =
- SMT_Replay_Methods.prove ctxt t (fn _ =>
- (rewrite_only_thms ctxt @{thms verit_not_simplify}))
-
-fun rewrite_equiv_simplify ctxt _ t =
- SMT_Replay_Methods.prove ctxt t (fn _ =>
- (rewrite_only_thms ctxt @{thms verit_equiv_simplify}))
-
-fun rewrite_eq_simplify ctxt _ t =
- SMT_Replay_Methods.prove ctxt t (fn _ =>
- (chain_rewrite_two_step_with_ac_simps ctxt
- @{thms verit_eq_simplify}
- (Named_Theorems.get ctxt @{named_theorems ac_simps})))
-
-fun rewrite_implies_simplify ctxt _ t =
- SMT_Replay_Methods.prove ctxt t (fn _ =>
- (rewrite_only_thms ctxt @{thms verit_implies_simplify}))
-
-(* It is more efficient to first fold the implication symbols.
- That is however not enough when symbols appears within
- expressions, hence we also unfold implications in the
- other direction. *)
-fun rewrite_connective_def ctxt _ t =
- SMT_Replay_Methods.prove ctxt t (fn _ =>
- chain_rewrite_thms_two_step ctxt
- (imp_refl :: @{thms not_not verit_connective_def[symmetric]})
- (@{thms verit_connective_def[symmetric]})
- (imp_refl :: @{thms not_not verit_connective_def})
- (@{thms verit_connective_def}))
-
-fun rewrite_minus_simplify ctxt _ t =
- SMT_Replay_Methods.prove ctxt t (fn _ =>
- chain_rewrite_two_step_with_ac_simps ctxt
- @{thms arith_simps verit_minus_simplify}
- (Named_Theorems.get ctxt @{named_theorems ac_simps} @
- @{thms numerals arith_simps arith_special
- numeral_class.numeral.numeral_One}))
-
-fun rewrite_comp_simplify ctxt _ t =
- SMT_Replay_Methods.prove ctxt t (fn _ =>
- chain_rewrite_thms ctxt @{thms verit_comp_simplify})
-
-fun rewrite_ite_simplify ctxt _ t =
- SMT_Replay_Methods.prove ctxt t (fn _ =>
- (rewrite_only_thms ctxt @{thms verit_ite_simplify}))
-end
-
-
-(* Simplifications *)
-
-local
- fun simplify_arith ctxt thms = partial_simplify_tac ctxt
- (thms @ Named_Theorems.get ctxt @{named_theorems ac_simps} @ @{thms arith_simps})
-in
-
-fun sum_simplify ctxt _ t =
- SMT_Replay_Methods.prove ctxt t (fn _ =>
- simplify_arith ctxt @{thms verit_sum_simplify arith_special})
-
-fun prod_simplify ctxt _ t =
- SMT_Replay_Methods.prove ctxt t (fn _ =>
- simplify_arith ctxt @{thms verit_prod_simplify})
-
-end
-
-
-(* Arithmetics *)
-local
-
-val la_rw_eq_thm = @{lemma \<open>(a :: nat) = b \<or> (a \<le> b) \<or> (a \<ge> b)\<close> by auto}
-in
-fun la_rw_eq ctxt _ t = SMT_Replay_Methods.match_instantiate ctxt t la_rw_eq_thm
-
-fun la_generic_prove args ctxt _ = SMT_Replay_Arith.la_farkas args ctxt
-
-fun la_generic ctxt _ args =
- prove_abstract (SMT_Replay_Methods.abstract_arith_shallow ctxt) (la_generic_prove args) ctxt []
-
-fun lia_generic ctxt _ t =
- SMT_Replay_Methods.prove_arith_rewrite SMT_Replay_Methods.abstract_neq ctxt t
-
-fun la_disequality ctxt _ t =
- SMT_Replay_Methods.match_instantiate ctxt t @{thm verit_la_disequality}
-
-end
(* Combining all together *)
--- a/src/Pure/Admin/build_release.scala Wed Feb 23 08:43:44 2022 +0100
+++ b/src/Pure/Admin/build_release.scala Wed Mar 23 10:54:22 2022 +0100
@@ -316,7 +316,7 @@
-classpath """" + classpath.map(p => "$ISABELLE_HOME/" + p.implode).mkString(":") + """" \
"-splash:$ISABELLE_HOME/lib/logo/isabelle.gif" \
""" + (if (dock_icon) """"-Xdock:icon=$ISABELLE_HOME/lib/logo/isabelle_transparent-128.png" \
-""" else "") + """isabelle.jedit.Main "$@"
+""" else "") + """isabelle.jedit.JEdit_Main "$@"
"""
val script_path = isabelle_target + Path.explode("lib/scripts/Isabelle_app")
File.write(script_path, script)
--- a/src/Pure/ROOT.scala Wed Feb 23 08:43:44 2022 +0100
+++ b/src/Pure/ROOT.scala Wed Mar 23 10:54:22 2022 +0100
@@ -18,6 +18,7 @@
val quote = Library.quote _
val commas = Library.commas _
val commas_quote = Library.commas_quote _
+ val proper_bool = Library.proper_bool _
val proper_string = Library.proper_string _
def proper_list[A](list: List[A]): Option[List[A]] = Library.proper_list(list)
}
--- a/src/Pure/System/isabelle_tool.scala Wed Feb 23 08:43:44 2022 +0100
+++ b/src/Pure/System/isabelle_tool.scala Wed Mar 23 10:54:22 2022 +0100
@@ -210,7 +210,6 @@
Update_Then.isabelle_tool,
Update_Theorems.isabelle_tool,
isabelle.mirabelle.Mirabelle.isabelle_tool,
- isabelle.vscode.TextMate_Grammar.isabelle_tool,
isabelle.vscode.Language_Server.isabelle_tool)
class Admin_Tools extends Isabelle_Scala_Tools(
@@ -237,4 +236,4 @@
isabelle.vscode.Build_VSCode.isabelle_tool,
isabelle.vscode.Build_VSCodium.isabelle_tool1,
isabelle.vscode.Build_VSCodium.isabelle_tool2,
- isabelle.vscode.VSCode_Setup.isabelle_tool)
+ isabelle.vscode.VSCode_Main.isabelle_tool)
--- a/src/Pure/Thy/sessions.scala Wed Feb 23 08:43:44 2022 +0100
+++ b/src/Pure/Thy/sessions.scala Wed Mar 23 10:54:22 2022 +0100
@@ -1013,7 +1013,7 @@
def is_session_dir(dir: Path): Boolean =
(dir + ROOT).is_file || (dir + ROOTS).is_file
- private def check_session_dir(dir: Path): Path =
+ def check_session_dir(dir: Path): Path =
if (is_session_dir(dir)) File.pwd() + dir.expand
else error("Bad session root directory (missing ROOT or ROOTS): " + dir.expand.toString)
--- a/src/Pure/library.scala Wed Feb 23 08:43:44 2022 +0100
+++ b/src/Pure/library.scala Wed Mar 23 10:54:22 2022 +0100
@@ -280,6 +280,9 @@
/* proper values */
+ def proper_bool(b: Boolean): Option[Boolean] =
+ if (!b) None else Some(b)
+
def proper_string(s: String): Option[String] =
if (s == null || s == "") None else Some(s)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Tools/VSCode/extension/MANIFEST Wed Mar 23 10:54:22 2022 +0100
@@ -0,0 +1,39 @@
+isabelle-language.json
+isabelle-ml-grammar.json
+isabelle-ml-language.json
+isabelle.png
+isabelle_vscode.png
+media/main.js
+media/Preview_inverse.svg
+media/PreviewOnRightPane_16x_dark.svg
+media/PreviewOnRightPane_16x.svg
+media/Preview.svg
+media/ViewSource_inverse.svg
+media/ViewSource.svg
+media/vscode.css
+package.json
+README.md
+src/abbreviations.ts
+src/completion.ts
+src/decorations.ts
+src/extension.ts
+src/file.ts
+src/library.ts
+src/lsp.ts
+src/output_view.ts
+src/platform.ts
+src/prefix_tree.ts
+src/preview_panel.ts
+src/script_decorations.ts
+src/state_panel.ts
+src/symbol.ts
+src/vscode_lib.ts
+test/extension.test.ts
+test/index.ts
+tsconfig.json
+.vscodeignore
+.vscode/launch.json
+.vscode/settings.json
+.vscode/tasks.json
+yarn.lock
+.yarnrc
--- a/src/Tools/VSCode/extension/package.json Wed Feb 23 08:43:44 2022 +0100
+++ b/src/Tools/VSCode/extension/package.json Wed Mar 23 10:54:22 2022 +0100
@@ -193,14 +193,6 @@
"configuration": {
"title": "Isabelle",
"properties": {
- "isabelle.args": {
- "type": "array",
- "items": {
- "type": "string"
- },
- "default": [],
- "description": "Command-line arguments for isabelle vscode_server process."
- },
"isabelle.replacement": {
"type": "string",
"default": "non-alphanumeric",
--- a/src/Tools/VSCode/extension/src/extension.ts Wed Feb 23 08:43:44 2022 +0100
+++ b/src/Tools/VSCode/extension/src/extension.ts Wed Mar 23 10:54:22 2022 +0100
@@ -25,6 +25,63 @@
let last_caret_update: lsp.Caret_Update = {}
+
+/* command-line arguments from "isabelle vscode" */
+
+interface Args
+{
+ options?: string[],
+ logic?: string,
+ logic_ancestor?: string,
+ logic_requirements?: boolean,
+ sesion_dirs?: string[],
+ include_sessions?: string[],
+ modes?: string[],
+ log_file?: string,
+ verbose?: boolean
+}
+
+function print_value(x: any): string
+{
+ return typeof(x) === "string" ? x : JSON.stringify(x)
+}
+
+function isabelle_options(args: Args): string[]
+{
+ var result: string[] = []
+ function add(s: string) { result.push(s) }
+ function add_value(opt: string, slot: string)
+ {
+ const x = args[slot]
+ if (x) { add(opt); add(print_value(x)) }
+ }
+ function add_values(opt: string, slot: string)
+ {
+ const xs: any[] = args[slot]
+ if (xs) {
+ for (const x of xs) { add(opt); add(print_value(x)) }
+ }
+ }
+
+ add("-o"); add("vscode_unicode_symbols")
+ add("-o"); add("vscode_pide_extensions")
+ add_values("-o", "options")
+
+ add_value("-A", "logic_ancestor")
+ if (args.logic) { add_value(args.logic_requirements ? "-R" : "-l", "logic") }
+
+ add_values("-d", "session_dirs")
+ add_values("-i", "include_sessions")
+ add_values("-m", "modes")
+ add_value("-L", "log_file")
+ if (args.verbose) { add("-v") }
+
+ return result
+}
+
+
+/* activate extension */
+
export async function activate(context: ExtensionContext)
{
/* server */
@@ -32,16 +89,15 @@
try {
const isabelle_home = library.getenv_strict("ISABELLE_HOME")
const isabelle_tool = isabelle_home + "/bin/isabelle"
- const isabelle_args =
- ["-o", "vscode_unicode_symbols", "-o", "vscode_pide_extensions"]
- .concat(vscode_lib.get_configuration<Array<string>>("args"))
+ const args = JSON.parse(library.getenv("ISABELLE_VSCODIUM_ARGS") || "{}")
+ const server_opts = isabelle_options(args)
const server_options: ServerOptions =
platform.is_windows() ?
{ command: file.cygwin_bash(),
- args: ["-l", isabelle_tool, "vscode_server"].concat(isabelle_args) } :
+ args: ["-l", isabelle_tool, "vscode_server"].concat(server_opts) } :
{ command: isabelle_tool,
- args: ["vscode_server"].concat(isabelle_args) }
+ args: ["vscode_server"].concat(server_opts) }
const language_client_options: LanguageClientOptions = {
documentSelector: [
--- a/src/Tools/VSCode/extension/vsc-extension-quickstart.md Wed Feb 23 08:43:44 2022 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,33 +0,0 @@
-# Welcome to your first VS Code Extension
-
-## What's in the folder
-* This folder contains all of the files necessary for your extension
-* `package.json` - this is the manifest file in which you declare your extension and command.
-The sample plugin registers a command and defines its title and command name. With this information
-VS Code can show the command in the command palette. It doesn’t yet need to load the plugin.
-* `src/extension.ts` - this is the main file where you will provide the implementation of your command.
-The file exports one function, `activate`, which is called the very first time your extension is
-activated (in this case by executing the command). Inside the `activate` function we call `registerCommand`.
-We pass the function containing the implementation of the command as the second parameter to
-`registerCommand`.
-
-## Get up and running straight away
-* press `F5` to open a new window with your extension loaded
-* run your command from the command palette by pressing (`Ctrl+Shift+P` or `Cmd+Shift+P` on Mac) and typing `Hello World`
-* set breakpoints in your code inside `src/extension.ts` to debug your extension
-* find output from your extension in the debug console
-
-## Make changes
-* you can relaunch the extension from the debug toolbar after changing code in `src/extension.ts`
-* you can also reload (`Ctrl+R` or `Cmd+R` on Mac) the VS Code window with your extension to load your changes
-
-## Explore the API
-* you can open the full set of our API when you open the file `node_modules/vscode/vscode.d.ts`
-
-## Run tests
-* open the debug viewlet (`Ctrl+Shift+D` or `Cmd+Shift+D` on Mac) and from the launch configuration dropdown pick `Launch Tests`
-* press `F5` to run the tests in a new window with your extension loaded
-* see the output of the test result in the debug console
-* make changes to `test/extension.test.ts` or create new test files inside the `test` folder
- * by convention, the test runner will only consider files matching the name pattern `**.test.ts`
- * you can create folders inside the `test` folder to structure your tests any way you want
\ No newline at end of file
--- a/src/Tools/VSCode/src/build_vscode_extension.scala Wed Feb 23 08:43:44 2022 +0100
+++ b/src/Tools/VSCode/src/build_vscode_extension.scala Wed Mar 23 10:54:22 2022 +0100
@@ -12,49 +12,177 @@
object Build_VSCode
{
- val extension_dir = Path.explode("$ISABELLE_VSCODE_HOME/extension")
+ /* build grammar */
+ def default_logic: String = Isabelle_System.getenv("ISABELLE_LOGIC")
- /* grammar */
-
- def build_grammar(options: Options, progress: Progress = new Progress): Unit =
+ def build_grammar(options: Options, build_dir: Path,
+ logic: String = default_logic,
+ dirs: List[Path] = Nil,
+ progress: Progress = new Progress): Unit =
{
- val logic = TextMate_Grammar.default_logic
- val keywords = Sessions.base_info(options, logic).check.base.overall_syntax.keywords
+ val keywords =
+ Sessions.base_info(options, logic, dirs = dirs).check.base.overall_syntax.keywords
+
+ val output_path = build_dir + Path.explode("isabelle-grammar.json")
+ progress.echo(output_path.expand.implode)
+
+ val (minor_keywords, operators) =
+ keywords.minor.iterator.toList.partition(Symbol.is_ascii_identifier)
+
+ def major_keywords(pred: String => Boolean): List[String] =
+ (for {
+ k <- keywords.major.iterator
+ kind <- keywords.kinds.get(k)
+ if pred(kind)
+ } yield k).toList
+
+ val keywords1 =
+ major_keywords(k => k != Keyword.THY_END && k != Keyword.PRF_ASM && k != Keyword.PRF_ASM_GOAL)
+ val keywords2 = minor_keywords ::: major_keywords(Set(Keyword.THY_END))
+ val keywords3 = major_keywords(Set(Keyword.PRF_ASM, Keyword.PRF_ASM_GOAL))
+
+ def grouped_names(as: List[String]): String =
+ JSON.Format("\\b(" + as.sorted.map(Library.escape_regex).mkString("|") + ")\\b")
- val output_path = extension_dir + Path.explode(TextMate_Grammar.default_output(logic))
- progress.echo(output_path.expand.implode)
- File.write_backup(output_path, TextMate_Grammar.generate(keywords))
+ File.write_backup(output_path, """{
+ "name": "Isabelle",
+ "scopeName": "source.isabelle",
+ "fileTypes": ["thy"],
+ "uuid": """ + JSON.Format(UUID.random().toString) + """,
+ "repository": {
+ "comment": {
+ "patterns": [
+ {
+ "name": "comment.block.isabelle",
+ "begin": "\\(\\*",
+ "patterns": [{ "include": "#comment" }],
+ "end": "\\*\\)"
+ }
+ ]
+ },
+ "cartouche": {
+ "patterns": [
+ {
+ "name": "string.quoted.other.multiline.isabelle",
+ "begin": "(?:\\\\<open>|‹)",
+ "patterns": [{ "include": "#cartouche" }],
+ "end": "(?:\\\\<close>|›)"
+ }
+ ]
+ }
+ },
+ "patterns": [
+ {
+ "include": "#comment"
+ },
+ {
+ "include": "#cartouche"
+ },
+ {
+ "name": "keyword.control.isabelle",
+ "match": """ + grouped_names(keywords1) + """
+ },
+ {
+ "name": "keyword.other.unit.isabelle",
+ "match": """ + grouped_names(keywords2) + """
+ },
+ {
+ "name": "keyword.operator.isabelle",
+ "match": """ + grouped_names(operators) + """
+ },
+ {
+ "name": "entity.name.type.isabelle",
+ "match": """ + grouped_names(keywords3) + """
+ },
+ {
+ "name": "constant.numeric.isabelle",
+ "match": "\\b\\d*\\.?\\d+\\b"
+ },
+ {
+ "name": "string.quoted.double.isabelle",
+ "begin": "\"",
+ "patterns": [
+ {
+ "name": "constant.character.escape.isabelle",
+ "match": """ + JSON.Format("""\\[\"]|\\\d\d\d""") + """
+ }
+ ],
+ "end": "\""
+ },
+ {
+ "name": "string.quoted.backtick.isabelle",
+ "begin": "`",
+ "patterns": [
+ {
+ "name": "constant.character.escape.isabelle",
+ "match": """ + JSON.Format("""\\[\`]|\\\d\d\d""") + """
+ }
+ ],
+ "end": "`"
+ },
+ {
+ "name": "string.quoted.verbatim.isabelle",
+ "begin": """ + JSON.Format("""\{\*""") + """,
+ "patterns": [
+ { "match": """ + JSON.Format("""[^*]+|\*(?!\})""") + """ }
+ ],
+ "end": """ + JSON.Format("""\*\}""") + """
+ }
+ ]
+}
+""")
}
/* extension */
+ lazy val extension_dir = Path.explode("$ISABELLE_VSCODE_HOME/extension")
+
def uninstall_extension(progress: Progress = new Progress): Unit =
- progress.bash("isabelle vscode --uninstall-extension Isabelle.isabelle").check
+ VSCode_Main.run_cli(
+ List("--uninstall-extension", "Isabelle.isabelle"), progress = progress).check
- def install_extension(vsix_path: Path, progress: Progress = new Progress): Unit =
- progress.bash("isabelle vscode --install-extension " +
- File.bash_platform_path(vsix_path)).check
+ def install_extension(vsix: File.Content, progress: Progress = new Progress): Unit =
+ {
+ Isabelle_System.with_tmp_dir("tmp")(tmp_dir =>
+ {
+ vsix.write(tmp_dir)
+ VSCode_Main.run_cli(
+ List("--install-extension", File.platform_path(tmp_dir + vsix.path)), progress = progress).check
+ })
+ }
- def build_extension(progress: Progress = new Progress): Path =
+ def build_extension(options: Options,
+ logic: String = default_logic,
+ dirs: List[Path] = Nil,
+ progress: Progress = new Progress): File.Content =
{
Isabelle_System.require_command("node")
Isabelle_System.require_command("yarn")
Isabelle_System.require_command("vsce")
- val output_path = extension_dir + Path.explode("out")
- Isabelle_System.rm_tree(output_path)
- Isabelle_System.make_directory(output_path)
- progress.echo(output_path.expand.implode)
+ Isabelle_System.with_tmp_dir("build")(build_dir =>
+ {
+ for {
+ line <- split_lines(File.read(extension_dir + Path.explode("MANIFEST")))
+ if line.nonEmpty
+ } {
+ val path = Path.explode(line)
+ Isabelle_System.copy_file(extension_dir + path,
+ Isabelle_System.make_directory(build_dir + path.dir))
+ }
- val result =
- progress.bash("yarn && vsce package", cwd = extension_dir.file, echo = true).check
+ build_grammar(options, build_dir, logic = logic, dirs = dirs, progress = progress)
- val Pattern = """.*Packaged:.*(isabelle-.*\.vsix).*""".r
- result.out_lines.collectFirst(
- { case Pattern(vsix_name) => extension_dir + Path.basic(vsix_name) })
- .getOrElse(error("Failed to guess resulting .vsix file name"))
+ val result =
+ progress.bash("yarn && vsce package", cwd = build_dir.file, echo = true).check
+ val Pattern = """.*Packaged:.*(isabelle-.*\.vsix).*""".r
+ val path =
+ result.out_lines.collectFirst({ case Pattern(name) => Path.explode(name) })
+ .getOrElse(error("Failed to guess resulting .vsix file name"))
+ File.Content(path, Bytes.read(build_dir + path))
+ })
}
@@ -64,6 +192,8 @@
Isabelle_Tool("build_vscode_extension", "build Isabelle/VSCode extension module",
Scala_Project.here, args =>
{
+ var dirs: List[Path] = Nil
+ var logic = default_logic
var install = false
var uninstall = false
@@ -73,12 +203,15 @@
Options are:
-I install resulting extension
-U uninstall extension (no build)
+ -d DIR include session directory
+ -l NAME logic session name (default ISABELLE_LOGIC=""" + quote(default_logic) + """)
-Build Isabelle/VSCode extension module in directory
-""" + extension_dir.expand + """
+Build Isabelle/VSCode extension module (vsix).
""",
"I" -> (_ => install = true),
- "U" -> (_ => uninstall = true))
+ "U" -> (_ => uninstall = true),
+ "d:" -> (arg => dirs = dirs ::: List(Path.explode(arg))),
+ "l:" -> (arg => logic = arg))
val more_args = getopts(args)
if (more_args.nonEmpty) getopts.usage()
@@ -86,13 +219,11 @@
val options = Options.init()
val progress = new Console_Progress()
- if (uninstall) {
- uninstall_extension(progress = progress)
- }
+ if (uninstall) uninstall_extension(progress = progress)
else {
- build_grammar(options, progress = progress)
- val path = build_extension(progress = progress)
- if (install) install_extension(path, progress = progress)
+ val vsix = build_extension(options, logic = logic, dirs = dirs, progress = progress)
+ vsix.write(extension_dir)
+ if (install) install_extension(vsix, progress = progress)
}
})
}
--- a/src/Tools/VSCode/src/language_server.scala Wed Feb 23 08:43:44 2022 +0100
+++ b/src/Tools/VSCode/src/language_server.scala Wed Mar 23 10:54:22 2022 +0100
@@ -39,6 +39,7 @@
var include_sessions: List[String] = Nil
var logic = default_logic
var modes: List[String] = Nil
+ var no_build = false
var options = Options.init()
var verbose = false
@@ -53,6 +54,7 @@
-i NAME include session in name-space of theories
-l NAME logic session name (default ISABELLE_LOGIC=""" + quote(default_logic) + """)
-m MODE add print mode for output
+ -n no build of session image on startup
-o OPTION override Isabelle system OPTION (via NAME=VAL or NAME)
-v verbose logging
@@ -65,6 +67,7 @@
"i:" -> (arg => include_sessions = include_sessions ::: List(arg)),
"l:" -> (arg => logic = arg),
"m:" -> (arg => modes = arg :: modes),
+ "n" -> (_ => no_build = true),
"o:" -> (arg => options = options + arg),
"v" -> (_ => verbose = true))
@@ -76,7 +79,8 @@
val server =
new Language_Server(channel, options, session_name = logic, session_dirs = dirs,
include_sessions = include_sessions, session_ancestor = logic_ancestor,
- session_requirements = logic_requirements, modes = modes, log = log)
+ session_requirements = logic_requirements, session_no_build = no_build,
+ modes = modes, log = log)
// prevent spurious garbage on the main protocol channel
val orig_out = System.out
@@ -103,6 +107,7 @@
session_dirs: List[Path] = Nil,
session_ancestor: Option[String] = None,
session_requirements: Boolean = false,
+ session_no_build: Boolean = false,
modes: List[String] = Nil,
log: Logger = No_Logger)
{
@@ -274,7 +279,7 @@
selection = Sessions.Selection.session(base_info.session), build_heap = true,
no_build = no_build, dirs = session_dirs, infos = base_info.infos)
- if (!build(no_build = true).ok) {
+ if (!session_no_build && !build(no_build = true).ok) {
val start_msg = "Build started for Isabelle/" + base_info.session + " ..."
val fail_msg = "Session build failed -- prover process remains inactive!"
--- a/src/Tools/VSCode/src/textmate_grammar.scala Wed Feb 23 08:43:44 2022 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,167 +0,0 @@
-/* Title: Tools/VSCode/src/textmate_grammar.scala
- Author: Makarius
-
-Generate static TextMate grammar for VSCode editor.
-*/
-
-package isabelle.vscode
-
-
-import isabelle._
-
-
-object TextMate_Grammar
-{
- /* generate grammar */
-
- lazy val default_logic = Isabelle_System.getenv("ISABELLE_LOGIC")
-
- def default_output(logic: String = ""): String =
- if (logic == "" || logic == default_logic) "isabelle-grammar.json"
- else "isabelle-" + logic + "-grammar.json"
-
- def generate(keywords: Keyword.Keywords): JSON.S =
- {
- val (minor_keywords, operators) =
- keywords.minor.iterator.toList.partition(Symbol.is_ascii_identifier)
-
- def major_keywords(pred: String => Boolean): List[String] =
- (for {
- k <- keywords.major.iterator
- kind <- keywords.kinds.get(k)
- if pred(kind)
- } yield k).toList
-
- val keywords1 =
- major_keywords(k => k != Keyword.THY_END && k != Keyword.PRF_ASM && k != Keyword.PRF_ASM_GOAL)
- val keywords2 = minor_keywords ::: major_keywords(Set(Keyword.THY_END))
- val keywords3 = major_keywords(Set(Keyword.PRF_ASM, Keyword.PRF_ASM_GOAL))
-
-
- def grouped_names(as: List[String]): String =
- JSON.Format("\\b(" + as.sorted.map(Library.escape_regex).mkString("|") + ")\\b")
-
- """{
- "name": "Isabelle",
- "scopeName": "source.isabelle",
- "fileTypes": ["thy"],
- "uuid": """ + JSON.Format(UUID.random().toString) + """,
- "repository": {
- "comment": {
- "patterns": [
- {
- "name": "comment.block.isabelle",
- "begin": "\\(\\*",
- "patterns": [{ "include": "#comment" }],
- "end": "\\*\\)"
- }
- ]
- },
- "cartouche": {
- "patterns": [
- {
- "name": "string.quoted.other.multiline.isabelle",
- "begin": "(?:\\\\<open>|‹)",
- "patterns": [{ "include": "#cartouche" }],
- "end": "(?:\\\\<close>|›)"
- }
- ]
- }
- },
- "patterns": [
- {
- "include": "#comment"
- },
- {
- "include": "#cartouche"
- },
- {
- "name": "keyword.control.isabelle",
- "match": """ + grouped_names(keywords1) + """
- },
- {
- "name": "keyword.other.unit.isabelle",
- "match": """ + grouped_names(keywords2) + """
- },
- {
- "name": "keyword.operator.isabelle",
- "match": """ + grouped_names(operators) + """
- },
- {
- "name": "entity.name.type.isabelle",
- "match": """ + grouped_names(keywords3) + """
- },
- {
- "name": "constant.numeric.isabelle",
- "match": "\\b\\d*\\.?\\d+\\b"
- },
- {
- "name": "string.quoted.double.isabelle",
- "begin": "\"",
- "patterns": [
- {
- "name": "constant.character.escape.isabelle",
- "match": """ + JSON.Format("""\\[\"]|\\\d\d\d""") + """
- }
- ],
- "end": "\""
- },
- {
- "name": "string.quoted.backtick.isabelle",
- "begin": "`",
- "patterns": [
- {
- "name": "constant.character.escape.isabelle",
- "match": """ + JSON.Format("""\\[\`]|\\\d\d\d""") + """
- }
- ],
- "end": "`"
- },
- {
- "name": "string.quoted.verbatim.isabelle",
- "begin": """ + JSON.Format("""\{\*""") + """,
- "patterns": [
- { "match": """ + JSON.Format("""[^*]+|\*(?!\})""") + """ }
- ],
- "end": """ + JSON.Format("""\*\}""") + """
- }
- ]
-}
-"""
- }
-
-
- /* Isabelle tool wrapper */
-
- val isabelle_tool = Isabelle_Tool("vscode_grammar",
- "generate static TextMate grammar for VSCode editor", Scala_Project.here, args =>
- {
- var dirs: List[Path] = Nil
- var logic = default_logic
- var output: Option[Path] = None
-
- val getopts = Getopts("""
-Usage: isabelle vscode_grammar [OPTIONS]
-
- Options are:
- -d DIR include session directory
- -l NAME logic session name (default ISABELLE_LOGIC=""" + quote(default_logic) + """)
- -o FILE output file name (default """ + default_output() + """)
-
- Generate static TextMate grammar for VSCode editor.
-""",
- "d:" -> (arg => dirs = dirs ::: List(Path.explode(arg))),
- "l:" -> (arg => logic = arg),
- "o:" -> (arg => output = Some(Path.explode(arg))))
-
- val more_args = getopts(args)
- if (more_args.nonEmpty) getopts.usage()
-
- val keywords =
- Sessions.base_info(Options.init(), logic, dirs = dirs).check.base.overall_syntax.keywords
- val output_path = output getOrElse Path.explode(default_output(logic))
-
- Output.writeln(output_path.implode)
- File.write_backup(output_path, generate(keywords))
- })
-}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Tools/VSCode/src/vscode_main.scala Wed Mar 23 10:54:22 2022 +0100
@@ -0,0 +1,174 @@
+/* Title: Tools/VSCode/src/vscode_main.scala
+ Author: Makarius
+
+Main application entry point for Isabelle/VSCode.
+*/
+
+package isabelle.vscode
+
+
+import isabelle._
+
+
+object VSCode_Main
+{
+ /* vscodium command-line interface */
+
+ private def platform_path(s: String): String = File.platform_path(Path.explode(s))
+
+ def server_log_path: Path =
+ Path.explode("$ISABELLE_VSCODE_SETTINGS/server.log").expand
+
+ def run_cli(args: List[String],
+ environment: Iterable[(String, String)] = Nil,
+ options: List[String] = Nil,
+ logic: String = "",
+ logic_ancestor: String = "",
+ logic_requirements: Boolean = false,
+ session_dirs: List[Path] = Nil,
+ include_sessions: List[String] = Nil,
+ modes: List[String] = Nil,
+ no_build: Boolean = false,
+ server_log: Boolean = false,
+ verbose: Boolean = false,
+ background: Boolean = false,
+ progress: Progress = new Progress): Process_Result =
+ {
+ val args_json =
+ JSON.optional("options" -> proper_list(options)) ++
+ JSON.optional("logic" -> proper_string(logic)) ++
+ JSON.optional("logic_ancestor" -> proper_string(logic_ancestor)) ++
+ JSON.optional("logic_requirements" -> proper_bool(logic_requirements)) ++
+ JSON.optional("session_dirs" ->
+ proper_list(session_dirs.map(dir => Sessions.check_session_dir(dir).absolute.implode))) ++
+ JSON.optional("include_sessions" -> proper_list(include_sessions)) ++
+ JSON.optional("modes" -> proper_list(modes)) ++
+ JSON.optional("no_build" -> proper_bool(no_build)) ++
+ JSON.optional("log_file" ->
+ (if (server_log) Some(server_log_path.absolute.implode) else None)) ++
+ JSON.optional("verbose" -> proper_bool(verbose))
+
+ val env = new java.util.HashMap(Isabelle_System.settings())
+ for ((a, b) <- environment) env.put(a, b)
+ env.put("ISABELLE_VSCODIUM_ARGS", JSON.Format(args_json))
+ env.put("ISABELLE_VSCODIUM_APP", platform_path("$ISABELLE_VSCODIUM_RESOURCES/vscodium"))
+ env.put("ELECTRON_RUN_AS_NODE", "1")
+
+ val electron = Isabelle_System.getenv("ISABELLE_VSCODIUM_ELECTRON")
+ if (electron.isEmpty) {
+ error("""Undefined $ISABELLE_VSCODIUM_ELECTRON: missing "vscodium" component""")
+ }
+ val args0 =
+ List(platform_path("$ISABELLE_VSCODIUM_RESOURCES/vscodium/out/cli.js"),
+ "--ms-enable-electron-run-as-node", "--locale", "en-US",
+ "--user-data-dir", platform_path("$ISABELLE_VSCODE_SETTINGS/user-data"),
+ "--extensions-dir", platform_path("$ISABELLE_VSCODE_SETTINGS/extensions"))
+ val script =
+ Bash.strings(electron :: args0 ::: args) +
+ (if (background) " > /dev/null 2> /dev/null &" else "")
+
+ progress.bash(script, env = env, echo = true)
+ }
+
+
+ /* settings */
+
+ def settings_path: Path =
+ Path.explode("$ISABELLE_VSCODE_SETTINGS/user-data/User/settings.json")
+
+ private val default_settings = """ {
+ "editor.fontFamily": "'Isabelle DejaVu Sans Mono'",
+ "editor.fontSize": 18,
+ "editor.lineNumbers": "off",
+ "editor.renderIndentGuides": false,
+ "editor.rulers": [80, 100],
+ "editor.unicodeHighlight.ambiguousCharacters": false,
+ "extensions.autoCheckUpdates": false,
+ "extensions.autoUpdate": false,
+ "terminal.integrated.fontFamily": "monospace",
+ "update.mode": "none"
+ }
+"""
+
+ def init_settings(): Unit =
+ {
+ if (!settings_path.is_file) {
+ Isabelle_System.make_directory(settings_path.dir)
+ File.write(settings_path, default_settings)
+ }
+ }
+
+
+ /* Isabelle tool wrapper */
+
+ val isabelle_tool =
+ Isabelle_Tool("vscode", "Isabelle/VSCode interface wrapper", Scala_Project.here, args =>
+ {
+ var logic_ancestor = ""
+ var console = false
+ var server_log = false
+ var logic_requirements = false
+ var session_dirs = List.empty[Path]
+ var include_sessions = List.empty[String]
+ var logic = ""
+ var modes = List.empty[String]
+ var no_build = false
+ var options = List.empty[String]
+ var verbose = false
+
+ def add_option(opt: String): Unit = options = options ::: List(opt)
+
+ val getopts = Getopts("""
+Usage: isabelle vscode [OPTIONS] [-- VSCODE_OPTIONS ...]
+
+ -A NAME ancestor session for option -R (default: parent)
+ -C run as foreground process, with console output
+ -L enable language server log to file:
+ """ + server_log_path.implode + """
+ -R NAME build image with requirements from other sessions
+ -d DIR include session directory
+ -i NAME include session in name-space of theories
+ -l NAME logic session name
+ -m MODE add print mode for output
+ -n no build of session image on startup
+ -o OPTION override Isabelle system OPTION (via NAME=VAL or NAME)
+ -p CMD ML process command prefix (process policy)
+ -s system build mode for session image (system_heaps=true)
+ -u user build mode for session image (system_heaps=false)
+ -v verbose logging of language server
+
+ Start Isabelle/VSCode application, with automatic configuration of
+ user settings.
+
+ The following initial settings are provided for a fresh installation:
+""" + default_settings,
+ "A:" -> (arg => logic_ancestor = arg),
+ "C" -> (_ => console = true),
+ "L" -> (_ => server_log = true),
+ "R:" -> (arg => { logic = arg; logic_requirements = true }),
+ "d:" -> (arg => session_dirs = session_dirs ::: List(Path.explode(arg))),
+ "i:" -> (arg => include_sessions = include_sessions ::: List(arg)),
+ "l:" -> (arg => { logic = arg; logic_requirements = false }),
+ "m:" -> (arg => modes = modes ::: List(arg)),
+ "n" -> (_ => no_build = true),
+ "o:" -> add_option,
+ "p:" -> (arg => add_option("ML_process_policy=" + arg)),
+ "s" -> (_ => add_option("system_heaps=true")),
+ "u" -> (_ => add_option("system_heaps=false")),
+ "v" -> (_ => verbose = true))
+
+ val more_args = getopts(args)
+
+ init_settings()
+
+ val (background, progress) =
+ if (console) (false, new Console_Progress)
+ else { run_cli(List("--version")).check; (true, new Progress) }
+
+ run_cli(more_args, options = options, logic = logic, logic_ancestor = logic_ancestor,
+ logic_requirements = logic_requirements, session_dirs = session_dirs,
+ include_sessions = include_sessions, modes = modes, no_build = no_build,
+ server_log = server_log, verbose = verbose, background = background,
+ progress = progress).check
+ })
+}
--- a/src/Tools/VSCode/src/vscode_setup.scala Wed Feb 23 08:43:44 2022 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,66 +0,0 @@
-/* Title: Tools/VSCode/src/vscode_setup.scala
- Author: Makarius
-
-Provide user configuration for Isabelle/VSCode.
-*/
-
-package isabelle.vscode
-
-
-import isabelle._
-
-
-object VSCode_Setup
-{
- /* vscode setup */
-
- def vscode_settings_user: Path =
- Path.explode("$ISABELLE_VSCODE_SETTINGS/user-data/User/settings.json")
-
- private val init_settings = """ {
- "editor.fontFamily": "'Isabelle DejaVu Sans Mono'",
- "editor.fontSize": 18,
- "editor.lineNumbers": "off",
- "editor.renderIndentGuides": false,
- "editor.rulers": [80, 100],
- "editor.unicodeHighlight.ambiguousCharacters": false,
- "extensions.autoCheckUpdates": false,
- "extensions.autoUpdate": false,
- "terminal.integrated.fontFamily": "monospace",
- "update.mode": "none"
- }
-"""
-
- def vscode_setup(): Unit =
- {
- if (Isabelle_System.getenv("ISABELLE_VSCODIUM_ELECTRON").isEmpty) {
- error("""Undefined $ISABELLE_VSCODIUM_ELECTRON: missing "vscodium" component""")
- }
-
- if (!vscode_settings_user.is_file) {
- Isabelle_System.make_directory(vscode_settings_user.dir)
- File.write(vscode_settings_user, init_settings)
- }
- }
-
-
- /* Isabelle tool wrapper */
-
- val isabelle_tool =
- Isabelle_Tool("vscode_setup", "provide user configuration for Isabelle/VSCode",
- Scala_Project.here, args =>
- {
- val getopts = Getopts("""
-Usage: vscode_setup
-
- Provide user configuration for Isabelle/VSCode.
-
- The following initial settings are provided for a fresh installation:
-""" + init_settings)
-
- val more_args = getopts(args)
- if (more_args.nonEmpty) getopts.usage()
-
- vscode_setup()
- })
-}
--- a/src/Tools/jEdit/lib/Tools/jedit Wed Feb 23 08:43:44 2022 +0100
+++ b/src/Tools/jEdit/lib/Tools/jedit Wed Mar 23 10:54:22 2022 +0100
@@ -106,6 +106,7 @@
;;
l)
JEDIT_LOGIC="$OPTARG"
+ JEDIT_LOGIC_REQUIREMENTS="false"
;;
m)
if [ -z "$JEDIT_PRINT_MODE" ]; then
@@ -164,5 +165,5 @@
JEDIT_INCLUDE_SESSIONS JEDIT_PRINT_MODE JEDIT_NO_BUILD JEDIT_BUILD_MODE
export JEDIT_ML_PROCESS_POLICY="$ML_PROCESS_POLICY"
exec isabelle java -splash:"$(platform_path "$ISABELLE_HOME/lib/logo/isabelle.gif")" \
- "${JAVA_ARGS[@]}" isabelle.jedit.Main "${ARGS[@]}"
+ "${JAVA_ARGS[@]}" isabelle.jedit.JEdit_Main "${ARGS[@]}"
fi
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Tools/jEdit/src/jedit_main.scala Wed Mar 23 10:54:22 2022 +0100
@@ -0,0 +1,142 @@
+/* Title: src/Tools/jEdit/src/jedit_main.scala
+ Author: Makarius
+
+Main application entry point for Isabelle/jEdit.
+*/
+
+package isabelle.jedit
+
+
+import isabelle._
+
+import org.gjt.sp.jedit.{MiscUtilities, jEdit}
+
+
+object JEdit_Main
+{
+ /* main entry point */
+
+ def main(args: Array[String]): Unit =
+ {
+ if (args.nonEmpty && args(0) == "-init") {
+ Isabelle_System.init()
+ }
+ else {
+ val start =
+ {
+ try {
+ Isabelle_System.init()
+ Isabelle_Fonts.init()
+
+ GUI.init_lafs()
+
+
+ /* ROOTS template */
+
+ {
+ val roots = Path.explode("$ISABELLE_HOME_USER/ROOTS")
+ if (!roots.is_file) File.write(roots, """# Additional session root directories
+#
+# * each line contains one directory entry in Isabelle path notation, e.g.
+#
+# $ISABELLE_HOME/../AFP/thys
+#
+# for a copy of AFP put side-by-side to the Isabelle distribution
+#
+# * Isabelle/jEdit provides formal markup for C-hover-click and completion
+#
+# * lines starting with "#" are stripped
+#
+# * changes require restart of the Isabelle application
+#
+#:mode=text:encoding=UTF-8:
+
+#$ISABELLE_HOME/../AFP/thys
+""")
+ }
+
+
+ /* settings directory */
+
+ val settings_dir = Path.explode("$JEDIT_SETTINGS")
+
+ val properties = settings_dir + Path.explode("properties")
+ if (properties.is_file) {
+ val props1 = split_lines(File.read(properties))
+ val props2 = props1.filterNot(_.startsWith("plugin-blacklist.isabelle_jedit"))
+ if (props1 != props2) File.write(properties, cat_lines(props2))
+ }
+
+ Isabelle_System.make_directory(settings_dir + Path.explode("DockableWindowManager"))
+
+ if (!(settings_dir + Path.explode("perspective.xml")).is_file) {
+ File.write(settings_dir + Path.explode("DockableWindowManager/perspective-view0.xml"),
+ """<DOCKING LEFT="isabelle-documentation" TOP="" RIGHT="isabelle-theories" BOTTOM="" LEFT_POS="250" TOP_POS="0" RIGHT_POS="250" BOTTOM_POS="250" />""")
+ File.write(settings_dir + Path.explode("perspective.xml"),
+ XML.header +
+"""<!DOCTYPE PERSPECTIVE SYSTEM "perspective.dtd">
+<PERSPECTIVE>
+<VIEW PLAIN="FALSE">
+<GEOMETRY X="0" Y="35" WIDTH="1200" HEIGHT="850" EXT_STATE="0" />
+</VIEW>
+</PERSPECTIVE>""")
+ }
+
+ for (plugin <- Scala_Project.plugins) { plugin.context().build() }
+
+
+ /* args */
+
+ val jedit_settings =
+ "-settings=" + File.platform_path(Path.explode("$JEDIT_SETTINGS"))
+
+ val jedit_server =
+ System.getProperty("isabelle.jedit_server") match {
+ case null | "" => "-noserver"
+ case name => "-server=" + name
+ }
+
+ val jedit_options =
+ Isabelle_System.getenv_strict("JEDIT_OPTIONS").split(" +")
+
+ val more_args =
+ {
+ args.toList.dropWhile(arg => arg.startsWith("-") && arg != "--") match {
+ case Nil | List("--") =>
+ args ++ Array(File.platform_path(Path.explode("$USER_HOME/Scratch.thy")))
+ case List(":") => args.slice(0, args.length - 1)
+ case _ => args
+ }
+ }
+
+
+ /* environment */
+
+ for (name <- List("ISABELLE_HOME", "ISABELLE_HOME_USER", "JEDIT_HOME", "JEDIT_SETTINGS")) {
+ MiscUtilities.putenv(name, File.platform_path(Isabelle_System.getenv(name)))
+ }
+ MiscUtilities.putenv("ISABELLE_ROOT", null)
+
+
+ /* properties */
+
+ System.setProperty("jedit.home", File.platform_path(Path.explode("$JEDIT_HOME")))
+ System.setProperty("scala.home", File.platform_path(Path.explode("$SCALA_HOME")))
+ System.setProperty("scala.color", "false")
+
+
+ /* main startup */
+
+ () => jEdit.main(Array(jedit_settings, jedit_server) ++ jedit_options ++ more_args)
+ }
+ catch {
+ case exn: Throwable =>
+ GUI.init_laf()
+ GUI.dialog(null, "Isabelle", GUI.scrollable_text(Exn.message(exn)))
+ sys.exit(Process_Result.RC.failure)
+ }
+ }
+ start()
+ }
+ }
+}
--- a/src/Tools/jEdit/src/main.scala Wed Feb 23 08:43:44 2022 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,142 +0,0 @@
-/* Title: src/Tools/jEdit/src/main.scala
- Author: Makarius
-
-Main Isabelle application entry point.
-*/
-
-package isabelle.jedit
-
-
-import isabelle._
-
-import org.gjt.sp.jedit.{MiscUtilities, jEdit}
-
-
-object Main
-{
- /* main entry point */
-
- def main(args: Array[String]): Unit =
- {
- if (args.nonEmpty && args(0) == "-init") {
- Isabelle_System.init()
- }
- else {
- val start =
- {
- try {
- Isabelle_System.init()
- Isabelle_Fonts.init()
-
- GUI.init_lafs()
-
-
- /* ROOTS template */
-
- {
- val roots = Path.explode("$ISABELLE_HOME_USER/ROOTS")
- if (!roots.is_file) File.write(roots, """# Additional session root directories
-#
-# * each line contains one directory entry in Isabelle path notation, e.g.
-#
-# $ISABELLE_HOME/../AFP/thys
-#
-# for a copy of AFP put side-by-side to the Isabelle distribution
-#
-# * Isabelle/jEdit provides formal markup for C-hover-click and completion
-#
-# * lines starting with "#" are stripped
-#
-# * changes require restart of the Isabelle application
-#
-#:mode=text:encoding=UTF-8:
-
-#$ISABELLE_HOME/../AFP/thys
-""")
- }
-
-
- /* settings directory */
-
- val settings_dir = Path.explode("$JEDIT_SETTINGS")
-
- val properties = settings_dir + Path.explode("properties")
- if (properties.is_file) {
- val props1 = split_lines(File.read(properties))
- val props2 = props1.filterNot(_.startsWith("plugin-blacklist.isabelle_jedit"))
- if (props1 != props2) File.write(properties, cat_lines(props2))
- }
-
- Isabelle_System.make_directory(settings_dir + Path.explode("DockableWindowManager"))
-
- if (!(settings_dir + Path.explode("perspective.xml")).is_file) {
- File.write(settings_dir + Path.explode("DockableWindowManager/perspective-view0.xml"),
- """<DOCKING LEFT="isabelle-documentation" TOP="" RIGHT="isabelle-theories" BOTTOM="" LEFT_POS="250" TOP_POS="0" RIGHT_POS="250" BOTTOM_POS="250" />""")
- File.write(settings_dir + Path.explode("perspective.xml"),
- XML.header +
-"""<!DOCTYPE PERSPECTIVE SYSTEM "perspective.dtd">
-<PERSPECTIVE>
-<VIEW PLAIN="FALSE">
-<GEOMETRY X="0" Y="35" WIDTH="1200" HEIGHT="850" EXT_STATE="0" />
-</VIEW>
-</PERSPECTIVE>""")
- }
-
- for (plugin <- Scala_Project.plugins) { plugin.context().build() }
-
-
- /* args */
-
- val jedit_settings =
- "-settings=" + File.platform_path(Path.explode("$JEDIT_SETTINGS"))
-
- val jedit_server =
- System.getProperty("isabelle.jedit_server") match {
- case null | "" => "-noserver"
- case name => "-server=" + name
- }
-
- val jedit_options =
- Isabelle_System.getenv_strict("JEDIT_OPTIONS").split(" +")
-
- val more_args =
- {
- args.toList.dropWhile(arg => arg.startsWith("-") && arg != "--") match {
- case Nil | List("--") =>
- args ++ Array(File.platform_path(Path.explode("$USER_HOME/Scratch.thy")))
- case List(":") => args.slice(0, args.length - 1)
- case _ => args
- }
- }
-
-
- /* environment */
-
- for (name <- List("ISABELLE_HOME", "ISABELLE_HOME_USER", "JEDIT_HOME", "JEDIT_SETTINGS")) {
- MiscUtilities.putenv(name, File.platform_path(Isabelle_System.getenv(name)))
- }
- MiscUtilities.putenv("ISABELLE_ROOT", null)
-
-
- /* properties */
-
- System.setProperty("jedit.home", File.platform_path(Path.explode("$JEDIT_HOME")))
- System.setProperty("scala.home", File.platform_path(Path.explode("$SCALA_HOME")))
- System.setProperty("scala.color", "false")
-
-
- /* main startup */
-
- () => jEdit.main(Array(jedit_settings, jedit_server) ++ jedit_options ++ more_args)
- }
- catch {
- case exn: Throwable =>
- GUI.init_laf()
- GUI.dialog(null, "Isabelle", GUI.scrollable_text(Exn.message(exn)))
- sys.exit(Process_Result.RC.failure)
- }
- }
- start()
- }
- }
-}