author | paulson |
Wed, 15 Feb 2023 12:48:53 +0000 | |
changeset 77274 | 05ad117ee3fb |
parent 77269 | bc43f86c9598 (diff) |
parent 77273 | f82317de6f28 (current diff) |
child 77275 | 386b1b33785c |
--- a/NEWS Wed Feb 15 12:46:12 2023 +0000 +++ b/NEWS Wed Feb 15 12:48:53 2023 +0000 @@ -43,6 +43,8 @@ *** HOL *** +* Map.map_of and lemmas moved to List. + * Theory "HOL.Euclidean_Division" renamed to "HOL.Euclidean_Rings"; "euclidean division" typically denotes a particular division on integers. Minor INCOMPATIBILITY. @@ -224,6 +226,15 @@ * Mirabelle: - Added session to output directory structure. Minor INCOMPATIBILITY. +* Metis: + - Made clausifier more robust in the face of nested lambdas. + Minor INCOMPATIBILITY. + +* Sledgehammer: + - Added refutational mode to find likely unprovable conectures. It is + enabled by default in addition to the usual proving mode and can be + enabled or disabled using the 'refute' option. + *** ML ***
--- a/src/Doc/Main/Main_Doc.thy Wed Feb 15 12:46:12 2023 +0000 +++ b/src/Doc/Main/Main_Doc.thy Wed Feb 15 12:48:53 2023 +0000 @@ -543,6 +543,7 @@ \<^const>\<open>List.list_all2\<close> & \<^typeof>\<open>List.list_all2\<close>\\ \<^const>\<open>List.list_update\<close> & \<^typeof>\<open>List.list_update\<close>\\ \<^const>\<open>List.map\<close> & \<^typeof>\<open>List.map\<close>\\ +\<^const>\<open>List.map_of\<close> & \<^typeof>\<open>List.map_of\<close>\\ \<^const>\<open>List.measures\<close> & @{term_type_only List.measures "('a\<Rightarrow>nat)list\<Rightarrow>('a*'a)set"}\\ \<^const>\<open>List.nth\<close> & \<^typeof>\<open>List.nth\<close>\\ \<^const>\<open>List.nths\<close> & \<^typeof>\<open>List.nths\<close>\\ @@ -585,32 +586,31 @@ qualifier \<open>q\<^sub>i\<close> is either a generator \mbox{\<open>pat \<leftarrow> e\<close>} or a guard, i.e.\ boolean expression. -\section*{Map} - -Maps model partial functions and are often used as finite tables. However, -the domain of a map may be infinite. - -\begin{tabular}{@ {} l @ {~::~} l @ {}} -\<^const>\<open>Map.empty\<close> & \<^typeof>\<open>Map.empty\<close>\\ -\<^const>\<open>Map.map_add\<close> & \<^typeof>\<open>Map.map_add\<close>\\ -\<^const>\<open>Map.map_comp\<close> & \<^typeof>\<open>Map.map_comp\<close>\\ -\<^const>\<open>Map.restrict_map\<close> & @{term_type_only Map.restrict_map "('a\<Rightarrow>'b option)\<Rightarrow>'a set\<Rightarrow>('a\<Rightarrow>'b option)"}\\ -\<^const>\<open>Map.dom\<close> & @{term_type_only Map.dom "('a\<Rightarrow>'b option)\<Rightarrow>'a set"}\\ -\<^const>\<open>Map.ran\<close> & @{term_type_only Map.ran "('a\<Rightarrow>'b option)\<Rightarrow>'b set"}\\ -\<^const>\<open>Map.map_le\<close> & \<^typeof>\<open>Map.map_le\<close>\\ -\<^const>\<open>Map.map_of\<close> & \<^typeof>\<open>Map.map_of\<close>\\ -\<^const>\<open>Map.map_upds\<close> & \<^typeof>\<open>Map.map_upds\<close>\\ -\end{tabular} - -\subsubsection*{Syntax} - -\begin{tabular}{@ {} l @ {\quad$\equiv$\quad} l @ {}} -\<^term>\<open>Map.empty\<close> & @{term[source] "\<lambda>_. None"}\\ -\<^term>\<open>m(x:=Some y)\<close> & @{term[source]"m(x:=Some y)"}\\ -\<open>m(x\<^sub>1\<mapsto>y\<^sub>1,\<dots>,x\<^sub>n\<mapsto>y\<^sub>n)\<close> & @{text[source]"m(x\<^sub>1\<mapsto>y\<^sub>1)\<dots>(x\<^sub>n\<mapsto>y\<^sub>n)"}\\ -\<open>[x\<^sub>1\<mapsto>y\<^sub>1,\<dots>,x\<^sub>n\<mapsto>y\<^sub>n]\<close> & @{text[source]"Map.empty(x\<^sub>1\<mapsto>y\<^sub>1,\<dots>,x\<^sub>n\<mapsto>y\<^sub>n)"}\\ -\<^term>\<open>map_upds m xs ys\<close> & @{term[source]"map_upds m xs ys"}\\ -\end{tabular} +%\section*{Map} +% +%Maps model partial functions and are often used as finite tables. However, +%the domain of a map may be infinite. +% +%\begin{tabular}{@ {} l @ {~::~} l @ {}} +%\<const>\<open>Map.empty\<close> & \<typeof>\<open>Map.empty\<close>\\ +%\<const>\<open>Map.map_add\<close> & \<typeof>\<open>Map.map_add\<close>\\ +%\<const>\<open>Map.map_comp\<close> & \<typeof>\<open>Map.map_comp\<close>\\ +%\<const>\<open>Map.restrict_map\<close> & @ {term_type_only Map.restrict_map "('a\<Rightarrow>'b option)\<Rightarrow>'a set\<Rightarrow>('a\<Rightarrow>'b option)"}\\ +%\<const>\<open>Map.dom\<close> & @{term_type_only Map.dom "('a\<Rightarrow>'b option)\<Rightarrow>'a set"}\\ +%\<const>\<open>Map.ran\<close> & @{term_type_only Map.ran "('a\<Rightarrow>'b option)\<Rightarrow>'b set"}\\ +%\<const>\<open>Map.map_le\<close> & \<typeof>\<open>Map.map_le\<close>\\ +%\<const>\<open>Map.map_upds\<close> & \<typeof>\<open>Map.map_upds\<close>\\ +%\end{tabular} +% +%\subsubsection*{Syntax} +% +%\begin{tabular}{@ {} l @ {\quad$\equiv$\quad} l @ {}} +%\<term>\<open>Map.empty\<close> & @ {term[source] "\<lambda>_. None"}\\ +%\<term>\<open>m(x:=Some y)\<close> & @ {term[source]"m(x:=Some y)"}\\ +%m(x1\<mapsto>y1,\<dots>,xn\<mapsto>yn) & @ {text[source]"m(x1\<mapsto>y1)\<dots>(xn\<mapsto>yn)"}\\ +%[x1\<mapsto>y1,\<dots>,xn\<mapsto>yn] & @ {text[source]"Map.empty(x1\<mapsto>y1,\<dots>,xn\<mapsto>yn)"}\\ +%\<term>\<open>map_upds m xs ys\<close> & @ {term[source]"map_upds m xs ys"}\\ +%\end{tabular} \section*{Infix operators in Main} % \<^theory>\<open>Main\<close>
--- a/src/Doc/Sledgehammer/document/root.tex Wed Feb 15 12:46:12 2023 +0000 +++ b/src/Doc/Sledgehammer/document/root.tex Wed Feb 15 12:48:53 2023 +0000 @@ -99,9 +99,10 @@ \label{introduction} Sledgehammer is a tool that applies automatic theorem provers (ATPs) -and satisfiability-modulo-theories (SMT) solvers on the current goal.% -\footnote{The distinction between ATPs and SMT solvers is convenient but mostly -historical.} +and satisfiability-modulo-theories (SMT) solvers on the current goal, mostly +to find proofs but also to find inconsistencies.% +\footnote{The distinction between ATPs and SMT solvers is mostly +historical but convenient.} % The supported ATPs include agsyHOL \cite{agsyHOL}, Alt-Ergo \cite{alt-ergo}, E \cite{schulz-2019}, iProver \cite{korovin-2009}, LEO-II \cite{leo2}, Leo-III @@ -113,9 +114,9 @@ \cite{bouton-et-al-2009}, and Z3 \cite{de-moura-2008}. These are always run locally. -The problem passed to the external provers (or solvers) consists of your current -goal together with a heuristic selection of hundreds of facts (theorems) from the -current theory context, filtered by relevance. +The problem passed to the automatic provers (or solvers) consists of your +current goal together with a heuristic selection of hundreds of facts (theorems) +from the current theory context, filtered by relevance. The result of a successful proof search is some source text that typically reconstructs the proof within Isabelle. For ATPs, the reconstructed proof @@ -123,6 +124,10 @@ integrates the Metis ATP in Isabelle/HOL with explicit inferences going through the kernel. Thus its results are correct by construction. +Sometimes the automatic provers might detect that the goal is inconsistent with +the background facts---or even that the background facts are inconsistent +regardless of of the goal. + For Isabelle/jEdit users, Sledgehammer provides an automatic mode that can be enabled via the ``Auto Sledgehammer'' option under ``Plugins > Plugin Options > Isabelle > General.'' In this mode, a reduced version of Sledgehammer is run on @@ -162,9 +167,9 @@ already include properly set up executables for CVC4, cvc5, E, SPASS, Vampire, veriT, Z3, and Zipperposition ready to use. -\item[\labelitemi] Alternatively, you can download the Isabelle-aware CVC4, E, -SPASS, Vampire, veriT, Z3, and Zipperposition binary packages from \download. -Extract the archives, then add a line to your +\item[\labelitemi] Alternatively, you can download the Isabelle-aware CVC4, +cvc5, E, SPASS, Vampire, veriT, Z3, and Zipperposition binary packages from +\download. Extract the archives, then add a line to your \texttt{\$ISABELLE\_HOME\_USER\slash etc\slash components}% \footnote{The variable \texttt{\$ISABELLE\_HOME\_USER} is set by Isabelle at startup. Its value can be retrieved by executing \texttt{isabelle} @@ -225,8 +230,8 @@ \postw Instead of issuing the \textbf{sledgehammer} command, you can also use the -Sledgehammer panel in Isabelle/jEdit. Sledgehammer might produce something like -the following output after a few seconds: +Sledgehammer panel in Isabelle/jEdit. Either way, Sledgehammer will produce +something like the following output after a few seconds: \prew \slshape @@ -238,7 +243,7 @@ cvc4: Try this: \textbf{by} \textit{simp} (0.4 ms) \\ z3: Try this: \textbf{by} \textit{simp} (0.5 ms) \\ vampire: Try this: \textbf{by} \textit{simp} (0.3 ms) \\ -QED +Done \postw Sledgehammer ran CVC4, E, Vampire, Z3, and possibly other provers in parallel. @@ -260,6 +265,23 @@ readable and also faster than \textit{metis} or \textit{smt} one-line proofs. This feature is experimental. +For goals that are inconsistent with the background theory (and hence unprovable +unless the theory is itself inconsistent), such as + +\prew +\textbf{lemma} ``$\mathit{length}\; (\mathit{zip}\; \mathit{xs}\; \mathit{ys}) = \mathit{length}\; \mathit{xs}$'' \\ +\textbf{sledgehammer} +\postw + +Sledgehammer's output might look as follows: + +\prew +\slshape +vampire found an inconsistency\ldots \\ +vampire: The goal is inconsistent with these facts: append\_Cons, nth\_append\_length, self\_append\_conv2, zip\_eq\_Nil\_iff +Done +\postw + \section{Hints} \label{hints} @@ -791,6 +813,16 @@ \opnodefault{prover}{string} Alias for \textit{provers}. +\opsmart{check\_consistent}{dont\_check\_consistent} +Specifies whether Sledgehammer should look for inconsistencies or for proofs. By +default, it looks for both proofs and inconsistencies. + +An inconsistency indicates that the goal, taken as an axiom, would be +inconsistent with some specific background facts if it were added as a lemma, +indicating a likely issue with the goal. Sometimes the inconsistency involves +only the background theory; this might happen, for example, if an axiom is used +or if a lemma was introduced with \textbf{sorry}. + \optrue{minimize}{dont\_minimize} Specifies whether the proof minimization tool should be invoked automatically after proof search.
--- a/src/HOL/Data_Structures/Trie_Map.thy Wed Feb 15 12:46:12 2023 +0000 +++ b/src/HOL/Data_Structures/Trie_Map.thy Wed Feb 15 12:48:53 2023 +0000 @@ -6,14 +6,31 @@ Trie_Fun begin -text \<open>An implementation of tries based on maps implemented by red-black trees. -Works for any kind of search tree.\<close> +text \<open>An implementation of tries for an arbitrary alphabet \<open>'a\<close> where +the mapping from an element of type \<open>'a\<close> to the sub-trie is implemented by a binary search tree. +Although this implementation uses maps implemented by red-black trees it works for any +implementation of maps. + +This is an implementation of the ``ternary search trees'' by Bentley and Sedgewick +[SODA 1997, Dr. Dobbs 1998]. The name derives from the fact that a node in the BST can now +be drawn to have 3 children, where the middle child is the sub-trie that the node maps +its key to. Hence the name \<open>trie3\<close>. + +Example from @{url "https://en.wikipedia.org/wiki/Ternary_search_tree#Description"}: -text \<open>Implementation of map:\<close> + c + / | \ + a u h + | | | \ + t. t e. u + / / | / | + s. p. e. i. s. -type_synonym 'a mapi = "'a rbt" +Characters with a dot are final. +Thus the tree represents the set of strings "cute","cup","at","as","he","us" and "i". +\<close> -datatype 'a trie_map = Nd bool "('a * 'a trie_map) mapi" +datatype 'a trie3 = Nd3 bool "('a * 'a trie3) rbt" text \<open>In principle one should be able to given an implementation of tries once and for all for any map implementation and not just for a specific one (RBT) as done here. @@ -21,89 +38,90 @@ However, the development below works verbatim for any map implementation, eg \<open>Tree_Map\<close>, and not just \<open>RBT_Map\<close>, except for the termination lemma \<open>lookup_size\<close>.\<close> + term size_tree lemma lookup_size[termination_simp]: - fixes t :: "('a::linorder * 'a trie_map) rbt" + fixes t :: "('a::linorder * 'a trie3) rbt" shows "lookup t a = Some b \<Longrightarrow> size b < Suc (size_tree (\<lambda>ab. Suc(Suc (size (snd(fst ab))))) t)" apply(induction t a rule: lookup.induct) apply(auto split: if_splits) done -definition empty :: "'a trie_map" where -[simp]: "empty = Nd False Leaf" +definition empty3 :: "'a trie3" where +[simp]: "empty3 = Nd3 False Leaf" -fun isin :: "('a::linorder) trie_map \<Rightarrow> 'a list \<Rightarrow> bool" where -"isin (Nd b m) [] = b" | -"isin (Nd b m) (x # xs) = (case lookup m x of None \<Rightarrow> False | Some t \<Rightarrow> isin t xs)" +fun isin3 :: "('a::linorder) trie3 \<Rightarrow> 'a list \<Rightarrow> bool" where +"isin3 (Nd3 b m) [] = b" | +"isin3 (Nd3 b m) (x # xs) = (case lookup m x of None \<Rightarrow> False | Some t \<Rightarrow> isin3 t xs)" -fun insert :: "('a::linorder) list \<Rightarrow> 'a trie_map \<Rightarrow> 'a trie_map" where -"insert [] (Nd b m) = Nd True m" | -"insert (x#xs) (Nd b m) = - Nd b (update x (insert xs (case lookup m x of None \<Rightarrow> empty | Some t \<Rightarrow> t)) m)" +fun insert3 :: "('a::linorder) list \<Rightarrow> 'a trie3 \<Rightarrow> 'a trie3" where +"insert3 [] (Nd3 b m) = Nd3 True m" | +"insert3 (x#xs) (Nd3 b m) = + Nd3 b (update x (insert3 xs (case lookup m x of None \<Rightarrow> empty3 | Some t \<Rightarrow> t)) m)" -fun delete :: "('a::linorder) list \<Rightarrow> 'a trie_map \<Rightarrow> 'a trie_map" where -"delete [] (Nd b m) = Nd False m" | -"delete (x#xs) (Nd b m) = Nd b +fun delete3 :: "('a::linorder) list \<Rightarrow> 'a trie3 \<Rightarrow> 'a trie3" where +"delete3 [] (Nd3 b m) = Nd3 False m" | +"delete3 (x#xs) (Nd3 b m) = Nd3 b (case lookup m x of None \<Rightarrow> m | - Some t \<Rightarrow> update x (delete xs t) m)" + Some t \<Rightarrow> update x (delete3 xs t) m)" subsection "Correctness" -text \<open>Proof by stepwise refinement. First abstract to type @{typ "'a trie"}.\<close> +text \<open>Proof by stepwise refinement. First abs3tract to type @{typ "'a trie"}.\<close> -fun abs :: "'a::linorder trie_map \<Rightarrow> 'a trie" where -"abs (Nd b t) = Trie_Fun.Nd b (\<lambda>a. map_option abs (lookup t a))" +fun abs3 :: "'a::linorder trie3 \<Rightarrow> 'a trie" where +"abs3 (Nd3 b t) = Nd b (\<lambda>a. map_option abs3 (lookup t a))" -fun invar :: "('a::linorder)trie_map \<Rightarrow> bool" where -"invar (Nd b m) = (M.invar m \<and> (\<forall>a t. lookup m a = Some t \<longrightarrow> invar t))" +fun invar3 :: "('a::linorder)trie3 \<Rightarrow> bool" where +"invar3 (Nd3 b m) = (M.invar m \<and> (\<forall>a t. lookup m a = Some t \<longrightarrow> invar3 t))" -lemma isin_abs: "isin t xs = Trie_Fun.isin (abs t) xs" -apply(induction t xs rule: isin.induct) +lemma isin_abs3: "isin3 t xs = isin (abs3 t) xs" +apply(induction t xs rule: isin3.induct) apply(auto split: option.split) done -lemma abs_insert: "invar t \<Longrightarrow> abs(insert xs t) = Trie_Fun.insert xs (abs t)" -apply(induction xs t rule: insert.induct) +lemma abs3_insert3: "invar3 t \<Longrightarrow> abs3(insert3 xs t) = insert xs (abs3 t)" +apply(induction xs t rule: insert3.induct) apply(auto simp: M.map_specs RBT_Set.empty_def[symmetric] split: option.split) done -lemma abs_delete: "invar t \<Longrightarrow> abs(delete xs t) = Trie_Fun.delete xs (abs t)" -apply(induction xs t rule: delete.induct) +lemma abs3_delete3: "invar3 t \<Longrightarrow> abs3(delete3 xs t) = delete xs (abs3 t)" +apply(induction xs t rule: delete3.induct) apply(auto simp: M.map_specs split: option.split) done -lemma invar_insert: "invar t \<Longrightarrow> invar (insert xs t)" -apply(induction xs t rule: insert.induct) +lemma invar3_insert3: "invar3 t \<Longrightarrow> invar3 (insert3 xs t)" +apply(induction xs t rule: insert3.induct) apply(auto simp: M.map_specs RBT_Set.empty_def[symmetric] split: option.split) done -lemma invar_delete: "invar t \<Longrightarrow> invar (delete xs t)" -apply(induction xs t rule: delete.induct) +lemma invar3_delete3: "invar3 t \<Longrightarrow> invar3 (delete3 xs t)" +apply(induction xs t rule: delete3.induct) apply(auto simp: M.map_specs split: option.split) done text \<open>Overall correctness w.r.t. the \<open>Set\<close> ADT:\<close> interpretation S2: Set -where empty = empty and isin = isin and insert = insert and delete = delete -and set = "set o abs" and invar = invar +where empty = empty3 and isin = isin3 and insert = insert3 and delete = delete3 +and set = "set o abs3" and invar = invar3 proof (standard, goal_cases) case 1 show ?case by (simp add: isin_case split: list.split) next - case 2 thus ?case by (simp add: isin_abs) + case 2 thus ?case by (simp add: isin_abs3) next - case 3 thus ?case by (simp add: set_insert abs_insert del: set_def) + case 3 thus ?case by (simp add: set_insert abs3_insert3 del: set_def) next - case 4 thus ?case by (simp add: set_delete abs_delete del: set_def) + case 4 thus ?case by (simp add: set_delete abs3_delete3 del: set_def) next case 5 thus ?case by (simp add: M.map_specs RBT_Set.empty_def[symmetric]) next - case 6 thus ?case by (simp add: invar_insert) + case 6 thus ?case by (simp add: invar3_insert3) next - case 7 thus ?case by (simp add: invar_delete) + case 7 thus ?case by (simp add: invar3_delete3) qed
--- a/src/HOL/List.thy Wed Feb 15 12:46:12 2023 +0000 +++ b/src/HOL/List.thy Wed Feb 15 12:48:53 2023 +0000 @@ -243,6 +243,11 @@ abbreviation length :: "'a list \<Rightarrow> nat" where "length \<equiv> size" +primrec map_of :: "('a \<times> 'b) list \<Rightarrow> 'a \<Rightarrow> 'b option" +where + "map_of [] = (\<lambda>x. None)" +| "map_of (p # ps) = (map_of ps)(fst p := Some(snd p))" + definition enumerate :: "nat \<Rightarrow> 'a list \<Rightarrow> (nat \<times> 'a) list" where enumerate_eq_zip: "enumerate n xs = zip [n..<n + length xs] xs" @@ -4681,6 +4686,126 @@ by (subst foldr_fold [symmetric]) simp_all +subsubsection \<open>\<^const>\<open>map_of\<close>\<close> + +lemma map_of_eq_None_iff: + "(map_of xys x = None) = (x \<notin> fst ` (set xys))" +by (induct xys) simp_all + +lemma map_of_eq_Some_iff [simp]: + "distinct(map fst xys) \<Longrightarrow> (map_of xys x = Some y) = ((x,y) \<in> set xys)" +proof (induct xys) + case (Cons xy xys) + then show ?case + by (cases xy) (auto simp flip: map_of_eq_None_iff) +qed auto + +lemma Some_eq_map_of_iff [simp]: + "distinct(map fst xys) \<Longrightarrow> (Some y = map_of xys x) = ((x,y) \<in> set xys)" +by (auto simp del: map_of_eq_Some_iff simp: map_of_eq_Some_iff [symmetric]) + +lemma map_of_is_SomeI [simp]: + "\<lbrakk>distinct(map fst xys); (x,y) \<in> set xys\<rbrakk> \<Longrightarrow> map_of xys x = Some y" + by simp + +lemma map_of_zip_is_None [simp]: + "length xs = length ys \<Longrightarrow> (map_of (zip xs ys) x = None) = (x \<notin> set xs)" +by (induct rule: list_induct2) simp_all + +lemma map_of_zip_is_Some: + assumes "length xs = length ys" + shows "x \<in> set xs \<longleftrightarrow> (\<exists>y. map_of (zip xs ys) x = Some y)" +using assms by (induct rule: list_induct2) simp_all + +lemma map_of_zip_upd: + fixes x :: 'a and xs :: "'a list" and ys zs :: "'b list" + assumes "length ys = length xs" + and "length zs = length xs" + and "x \<notin> set xs" + and "(map_of (zip xs ys))(x := Some y) = (map_of (zip xs zs))(x := Some z)" + shows "map_of (zip xs ys) = map_of (zip xs zs)" +proof + fix x' :: 'a + show "map_of (zip xs ys) x' = map_of (zip xs zs) x'" + proof (cases "x = x'") + case True + from assms True map_of_zip_is_None [of xs ys x'] + have "map_of (zip xs ys) x' = None" by simp + moreover from assms True map_of_zip_is_None [of xs zs x'] + have "map_of (zip xs zs) x' = None" by simp + ultimately show ?thesis by simp + next + case False from assms + have "((map_of (zip xs ys))(x := Some y)) x' = ((map_of (zip xs zs))(x := Some z)) x'" by auto + with False show ?thesis by simp + qed +qed + +lemma map_of_zip_inject: + assumes "length ys = length xs" + and "length zs = length xs" + and dist: "distinct xs" + and map_of: "map_of (zip xs ys) = map_of (zip xs zs)" + shows "ys = zs" + using assms(1) assms(2)[symmetric] + using dist map_of +proof (induct ys xs zs rule: list_induct3) + case Nil show ?case by simp +next + case (Cons y ys x xs z zs) + from \<open>map_of (zip (x#xs) (y#ys)) = map_of (zip (x#xs) (z#zs))\<close> + have map_of: "(map_of (zip xs ys))(x := Some y) = (map_of (zip xs zs))(x := Some z)" by simp + from Cons have "length ys = length xs" and "length zs = length xs" + and "x \<notin> set xs" by simp_all + then have "map_of (zip xs ys) = map_of (zip xs zs)" using map_of by (rule map_of_zip_upd) + with Cons.hyps \<open>distinct (x # xs)\<close> have "ys = zs" by simp + moreover have "y = z" using fun_upd_eqD[OF map_of] by simp + ultimately show ?case by simp +qed + +lemma map_of_zip_nth: + assumes "length xs = length ys" + assumes "distinct xs" + assumes "i < length ys" + shows "map_of (zip xs ys) (xs ! i) = Some (ys ! i)" +using assms proof (induct arbitrary: i rule: list_induct2) + case Nil + then show ?case by simp +next + case (Cons x xs y ys) + then show ?case + using less_Suc_eq_0_disj by auto +qed + +lemma map_of_zip_map: + "map_of (zip xs (map f xs)) = (\<lambda>x. if x \<in> set xs then Some (f x) else None)" + by (induct xs) (simp_all add: fun_eq_iff) + +lemma map_of_SomeD: "map_of xs k = Some y \<Longrightarrow> (k, y) \<in> set xs" + by (induct xs) (auto split: if_splits) + +lemma map_of_mapk_SomeI: + "inj f \<Longrightarrow> map_of t k = Some x \<Longrightarrow> + map_of (map (case_prod (\<lambda>k. Pair (f k))) t) (f k) = Some x" +by (induct t) (auto simp: inj_eq) + +lemma weak_map_of_SomeI: "(k, x) \<in> set l \<Longrightarrow> \<exists>x. map_of l k = Some x" +by (induct l) auto + +lemma map_of_filter_in: + "map_of xs k = Some z \<Longrightarrow> P k z \<Longrightarrow> map_of (filter (case_prod P) xs) k = Some z" +by (induct xs) auto + +lemma map_of_map: + "map_of (map (\<lambda>(k, v). (k, f v)) xs) = map_option f \<circ> map_of xs" + by (induct xs) (auto simp: fun_eq_iff) + +lemma map_of_Cons_code [code]: + "map_of [] k = None" + "map_of ((l, v) # ps) k = (if l = k then Some v else map_of ps k)" + by simp_all + + subsubsection \<open>\<^const>\<open>enumerate\<close>\<close> lemma enumerate_simps [simp, code]:
--- a/src/HOL/Map.thy Wed Feb 15 12:46:12 2023 +0000 +++ b/src/HOL/Map.thy Wed Feb 15 12:48:53 2023 +0000 @@ -70,21 +70,11 @@ "_Map (_Maplets ms1 ms2)" \<leftharpoondown> "_MapUpd (_Map ms1) ms2" "_Maplets ms1 (_Maplets ms2 ms3)" \<leftharpoondown> "_Maplets (_Maplets ms1 ms2) ms3" -primrec map_of :: "('a \<times> 'b) list \<Rightarrow> 'a \<rightharpoonup> 'b" -where - "map_of [] = empty" -| "map_of (p # ps) = (map_of ps)(fst p \<mapsto> snd p)" - definition map_upds :: "('a \<rightharpoonup> 'b) \<Rightarrow> 'a list \<Rightarrow> 'b list \<Rightarrow> 'a \<rightharpoonup> 'b" where "map_upds m xs ys = m ++ map_of (rev (zip xs ys))" translations "_MapUpd m (_maplets x y)" \<rightleftharpoons> "CONST map_upds m x y" -lemma map_of_Cons_code [code]: - "map_of [] k = None" - "map_of ((l, v) # ps) k = (if l = k then Some v else map_of ps k)" - by simp_all - subsection \<open>@{term [source] empty}\<close> @@ -142,99 +132,6 @@ "empty = map_of xys \<longleftrightarrow> xys = []" by(subst eq_commute) simp -lemma map_of_eq_None_iff: - "(map_of xys x = None) = (x \<notin> fst ` (set xys))" -by (induct xys) simp_all - -lemma map_of_eq_Some_iff [simp]: - "distinct(map fst xys) \<Longrightarrow> (map_of xys x = Some y) = ((x,y) \<in> set xys)" -proof (induct xys) - case (Cons xy xys) - then show ?case - by (cases xy) (auto simp flip: map_of_eq_None_iff) -qed auto - -lemma Some_eq_map_of_iff [simp]: - "distinct(map fst xys) \<Longrightarrow> (Some y = map_of xys x) = ((x,y) \<in> set xys)" -by (auto simp del: map_of_eq_Some_iff simp: map_of_eq_Some_iff [symmetric]) - -lemma map_of_is_SomeI [simp]: - "\<lbrakk>distinct(map fst xys); (x,y) \<in> set xys\<rbrakk> \<Longrightarrow> map_of xys x = Some y" - by simp - -lemma map_of_zip_is_None [simp]: - "length xs = length ys \<Longrightarrow> (map_of (zip xs ys) x = None) = (x \<notin> set xs)" -by (induct rule: list_induct2) simp_all - -lemma map_of_zip_is_Some: - assumes "length xs = length ys" - shows "x \<in> set xs \<longleftrightarrow> (\<exists>y. map_of (zip xs ys) x = Some y)" -using assms by (induct rule: list_induct2) simp_all - -lemma map_of_zip_upd: - fixes x :: 'a and xs :: "'a list" and ys zs :: "'b list" - assumes "length ys = length xs" - and "length zs = length xs" - and "x \<notin> set xs" - and "map_of (zip xs ys)(x \<mapsto> y) = map_of (zip xs zs)(x \<mapsto> z)" - shows "map_of (zip xs ys) = map_of (zip xs zs)" -proof - fix x' :: 'a - show "map_of (zip xs ys) x' = map_of (zip xs zs) x'" - proof (cases "x = x'") - case True - from assms True map_of_zip_is_None [of xs ys x'] - have "map_of (zip xs ys) x' = None" by simp - moreover from assms True map_of_zip_is_None [of xs zs x'] - have "map_of (zip xs zs) x' = None" by simp - ultimately show ?thesis by simp - next - case False from assms - have "(map_of (zip xs ys)(x \<mapsto> y)) x' = (map_of (zip xs zs)(x \<mapsto> z)) x'" by auto - with False show ?thesis by simp - qed -qed - -lemma map_of_zip_inject: - assumes "length ys = length xs" - and "length zs = length xs" - and dist: "distinct xs" - and map_of: "map_of (zip xs ys) = map_of (zip xs zs)" - shows "ys = zs" - using assms(1) assms(2)[symmetric] - using dist map_of -proof (induct ys xs zs rule: list_induct3) - case Nil show ?case by simp -next - case (Cons y ys x xs z zs) - from \<open>map_of (zip (x#xs) (y#ys)) = map_of (zip (x#xs) (z#zs))\<close> - have map_of: "map_of (zip xs ys)(x \<mapsto> y) = map_of (zip xs zs)(x \<mapsto> z)" by simp - from Cons have "length ys = length xs" and "length zs = length xs" - and "x \<notin> set xs" by simp_all - then have "map_of (zip xs ys) = map_of (zip xs zs)" using map_of by (rule map_of_zip_upd) - with Cons.hyps \<open>distinct (x # xs)\<close> have "ys = zs" by simp - moreover from map_of have "y = z" by (rule map_upd_eqD1) - ultimately show ?case by simp -qed - -lemma map_of_zip_nth: - assumes "length xs = length ys" - assumes "distinct xs" - assumes "i < length ys" - shows "map_of (zip xs ys) (xs ! i) = Some (ys ! i)" -using assms proof (induct arbitrary: i rule: list_induct2) - case Nil - then show ?case by simp -next - case (Cons x xs y ys) - then show ?case - using less_Suc_eq_0_disj by auto -qed - -lemma map_of_zip_map: - "map_of (zip xs (map f xs)) = (\<lambda>x. if x \<in> set xs then Some (f x) else None)" - by (induct xs) (simp_all add: fun_eq_iff) - lemma finite_range_map_of: "finite (range (map_of xys))" proof (induct xys) case (Cons a xys) @@ -242,25 +139,6 @@ using finite_range_updI by fastforce qed auto -lemma map_of_SomeD: "map_of xs k = Some y \<Longrightarrow> (k, y) \<in> set xs" - by (induct xs) (auto split: if_splits) - -lemma map_of_mapk_SomeI: - "inj f \<Longrightarrow> map_of t k = Some x \<Longrightarrow> - map_of (map (case_prod (\<lambda>k. Pair (f k))) t) (f k) = Some x" -by (induct t) (auto simp: inj_eq) - -lemma weak_map_of_SomeI: "(k, x) \<in> set l \<Longrightarrow> \<exists>x. map_of l k = Some x" -by (induct l) auto - -lemma map_of_filter_in: - "map_of xs k = Some z \<Longrightarrow> P k z \<Longrightarrow> map_of (filter (case_prod P) xs) k = Some z" -by (induct xs) auto - -lemma map_of_map: - "map_of (map (\<lambda>(k, v). (k, f v)) xs) = map_option f \<circ> map_of xs" - by (induct xs) (auto simp: fun_eq_iff) - lemma dom_map_option: "dom (\<lambda>k. map_option (f k) (m k)) = dom m" by (simp add: dom_def)
--- a/src/HOL/Metis.thy Wed Feb 15 12:46:12 2023 +0000 +++ b/src/HOL/Metis.thy Wed Feb 15 12:48:53 2023 +0000 @@ -27,7 +27,8 @@ lemma not_atomize_select: "(\<not> A \<Longrightarrow> select False) \<equiv> Trueprop A" unfolding select_def by (rule not_atomize) -lemma select_FalseI: "False \<Longrightarrow> select False" by simp +lemma select_FalseI: "False \<Longrightarrow> select False" +by simp definition lambda :: "'a \<Rightarrow> 'a" where "lambda = (\<lambda>x. x)"
--- a/src/HOL/Tools/Meson/meson_clausify.ML Wed Feb 15 12:46:12 2023 +0000 +++ b/src/HOL/Tools/Meson/meson_clausify.ML Wed Feb 15 12:48:53 2023 +0000 @@ -54,8 +54,21 @@ let val T = fastype_of t in \<^Const>\<open>Meson.skolem T for t\<close> end +fun eta_expand_All_Ex_arg ((cst as (Const _)) $ Abs (s, T, t')) = + cst $ Abs (s, T, eta_expand_All_Ex_arg t') + | eta_expand_All_Ex_arg ((cst as (Const (cst_s, cst_T))) $ t') = + if member (op =) [\<^const_name>\<open>All\<close>, \<^const_name>\<open>Ex\<close>] cst_s then + cst $ Abs (Name.uu, domain_type (domain_type cst_T), + incr_boundvars 1 (eta_expand_All_Ex_arg t') $ Bound 0) + else + cst $ eta_expand_All_Ex_arg t' + | eta_expand_All_Ex_arg (Abs (s, T, t')) = Abs (s, T, eta_expand_All_Ex_arg t') + | eta_expand_All_Ex_arg (t1 $ t2) = + eta_expand_All_Ex_arg t1 $ eta_expand_All_Ex_arg t2 + | eta_expand_All_Ex_arg t = t + fun beta_eta_in_abs_body (Abs (s, T, t')) = Abs (s, T, beta_eta_in_abs_body t') - | beta_eta_in_abs_body t = Envir.beta_eta_contract t + | beta_eta_in_abs_body t = eta_expand_All_Ex_arg (Envir.beta_eta_contract t) (*Traverse a theorem, accumulating Skolem function definitions.*) fun old_skolem_defs th =
--- a/src/HOL/Tools/SMT/smt_solver.ML Wed Feb 15 12:46:12 2023 +0000 +++ b/src/HOL/Tools/SMT/smt_solver.ML Wed Feb 15 12:48:53 2023 +0000 @@ -21,7 +21,7 @@ command: unit -> string list, options: Proof.context -> string list, smt_options: (string * string) list, - good_slices: ((int * int * string) * string list) list, + good_slices: ((int * bool * bool * int * string) * string list) list, outcome: string -> string list -> outcome * string list, parse_proof: (Proof.context -> SMT_Translate.replay_data -> ((string * ATP_Problem_Generate.stature) * thm) list -> term list -> term -> string list -> @@ -30,7 +30,8 @@ (*registry*) val add_solver: solver_config -> theory -> theory - val good_slices: Proof.context -> string -> ((int * int * string) * string list) list + val good_slices: Proof.context -> string -> + ((int * bool * bool * int * string) * string list) list (*filter*) val smt_filter: Proof.context -> thm -> ((string * ATP_Problem_Generate.stature) * thm) list -> @@ -155,7 +156,7 @@ command: unit -> string list, options: Proof.context -> string list, smt_options: (string * string) list, - good_slices: ((int * int * string) * string list) list, + good_slices: ((int * bool * bool * int * string) * string list) list, outcome: string -> string list -> outcome * string list, parse_proof: (Proof.context -> SMT_Translate.replay_data -> ((string * ATP_Problem_Generate.stature) * thm) list -> term list -> term -> string list -> @@ -180,7 +181,7 @@ type solver_info = { command: unit -> string list, smt_options: (string * string) list, - good_slices: ((int * int * string) * string list) list, + good_slices: ((int * bool * bool * int * string) * string list) list, parse_proof: Proof.context -> SMT_Translate.replay_data -> ((string * ATP_Problem_Generate.stature) * thm) list -> term list -> term -> string list -> parsed_proof,
--- a/src/HOL/Tools/SMT/smt_systems.ML Wed Feb 15 12:46:12 2023 +0000 +++ b/src/HOL/Tools/SMT/smt_systems.ML Wed Feb 15 12:48:53 2023 +0000 @@ -104,13 +104,13 @@ smt_options = [(":produce-unsat-cores", "true")], good_slices = (* FUDGE *) - [((2, 512, meshN), ["--full-saturate-quant", "--inst-when=full-last-call", "--inst-no-entail", "--term-db-mode=relevant", "--multi-trigger-linear"]), - ((2, 64, meshN), ["--decision=internal", "--simplification=none", "--full-saturate-quant"]), - ((2, 1024, meshN), ["--relevant-triggers", "--full-saturate-quant"]), - ((2, 256, mepoN), ["--trigger-sel=max", "--full-saturate-quant"]), - ((2, 32, meshN), ["--term-db-mode=relevant", "--full-saturate-quant"]), - ((2, 128, meshN), ["--no-e-matching", "--full-saturate-quant"]), - ((2, 256, meshN), ["--finite-model-find", "--fmf-inst-engine"])], + [((2, false, false, 512, meshN), ["--full-saturate-quant", "--inst-when=full-last-call", "--inst-no-entail", "--term-db-mode=relevant", "--multi-trigger-linear"]), + ((2, false, false, 64, meshN), ["--decision=internal", "--simplification=none", "--full-saturate-quant"]), + ((2, false, true, 256, mepoN), ["--trigger-sel=max", "--full-saturate-quant"]), + ((2, false, false, 1024, meshN), ["--relevant-triggers", "--full-saturate-quant"]), + ((2, false, false, 32, meshN), ["--term-db-mode=relevant", "--full-saturate-quant"]), + ((2, false, false, 128, meshN), ["--no-e-matching", "--full-saturate-quant"]), + ((2, false, false, 256, meshN), ["--finite-model-find", "--fmf-inst-engine"])], outcome = on_first_line (outcome_of "unsat" "sat" "unknown" "timeout"), parse_proof = SOME (K CVC_Proof_Parse.parse_proof), replay = NONE } @@ -124,13 +124,13 @@ smt_options = [(":produce-unsat-cores", "true")], good_slices = (* FUDGE *) - [((2, 512, meshN), ["--full-saturate-quant", "--inst-when=full-last-call", "--inst-no-entail", "--term-db-mode=relevant", "--multi-trigger-linear"]), - ((2, 64, meshN), ["--decision=internal", "--simplification=none", "--full-saturate-quant"]), - ((2, 1024, meshN), ["--relevant-triggers", "--full-saturate-quant"]), - ((2, 256, mepoN), ["--trigger-sel=max", "--full-saturate-quant"]), - ((2, 32, meshN), ["--term-db-mode=relevant", "--full-saturate-quant"]), - ((2, 128, meshN), ["--no-e-matching", "--full-saturate-quant"]), - ((2, 256, meshN), ["--finite-model-find", "--fmf-inst-engine"])], + [((2, false, false, 512, meshN), ["--full-saturate-quant", "--inst-when=full-last-call", "--inst-no-entail", "--term-db-mode=relevant", "--multi-trigger-linear"]), + ((2, false, false, 64, meshN), ["--decision=internal", "--simplification=none", "--full-saturate-quant"]), + ((2, false, true, 256, mepoN), ["--trigger-sel=max", "--full-saturate-quant"]), + ((2, false, false, 1024, meshN), ["--relevant-triggers", "--full-saturate-quant"]), + ((2, false, false, 32, meshN), ["--term-db-mode=relevant", "--full-saturate-quant"]), + ((2, false, false, 128, meshN), ["--no-e-matching", "--full-saturate-quant"]), + ((2, false, false, 256, meshN), ["--finite-model-find", "--fmf-inst-engine"])], outcome = on_first_line (outcome_of "unsat" "sat" "unknown" "timeout"), parse_proof = SOME (K CVC_Proof_Parse.parse_proof), replay = NONE } @@ -169,12 +169,12 @@ smt_options = [(":produce-proofs", "true")], good_slices = (* FUDGE *) - [((2, 1024, meshN), []), - ((2, 512, mashN), []), - ((2, 64, meshN), []), - ((2, 128, meshN), []), - ((2, 256, mepoN), []), - ((2, 32, meshN), [])], + [((2, false, false, 1024, meshN), []), + ((2, false, false, 512, mashN), []), + ((2, false, true, 128, meshN), []), + ((2, false, false, 64, meshN), []), + ((2, false, false, 256, mepoN), []), + ((2, false, false, 32, meshN), [])], outcome = on_first_non_unsupported_line (outcome_of "unsat" "sat" "unknown" "Time limit exceeded"), parse_proof = SOME (K Lethe_Proof_Parse.parse_proof), replay = SOME Verit_Replay.replay } @@ -210,12 +210,12 @@ smt_options = [(":produce-proofs", "true")], good_slices = (* FUDGE *) - [((2, 1024, meshN), []), - ((2, 512, mepoN), []), - ((2, 64, meshN), []), - ((2, 256, meshN), []), - ((2, 128, mashN), []), - ((2, 32, meshN), [])], + [((2, false, false, 1024, meshN), []), + ((2, false, false, 512, mepoN), []), + ((2, false, false, 64, meshN), []), + ((2, false, true, 256, meshN), []), + ((2, false, false, 128, mashN), []), + ((2, false, false, 32, meshN), [])], outcome = on_first_line (outcome_of "unsat" "sat" "unknown" "timeout"), parse_proof = SOME Z3_Replay.parse_proof, replay = SOME Z3_Replay.replay } @@ -250,7 +250,7 @@ Theory.setup (Method.setup \<^binding>\<open>smt\<close> (Scan.lift parse_smt_options -- Attrib.thms >> (METHOD oo smt_method)) - "Call to the SMT solvers veriT or z3") + "Call to the SMT solver veriT or z3") (* overall setup *)
--- a/src/HOL/Tools/Sledgehammer/sledgehammer.ML Wed Feb 15 12:46:12 2023 +0000 +++ b/src/HOL/Tools/Sledgehammer/sledgehammer.ML Wed Feb 15 12:48:53 2023 +0000 @@ -70,6 +70,12 @@ | alternative _ NONE (y as SOME _) = y | alternative _ NONE NONE = NONE +fun varify_nonfixed_terms_global nonfixeds tm = tm + |> Same.commit (Term_Subst.map_aterms_same + (fn Free (x, T) => if member (op =) nonfixeds x then Var ((x, 0), T) else raise Same.SAME + | Var (xi, _) => raise TERM (Logic.bad_schematic xi, [tm]) + | _ => raise Same.SAME)) + fun max_outcome outcomes = let val some = find_first (fn (SH_Some _, _) => true | _ => false) outcomes @@ -148,17 +154,20 @@ fun launch_prover (params as {verbose, spy, slices, timeout, ...}) mode learn (problem as {state, subgoal, factss, ...} : prover_problem) - (slice as ((slice_size, num_facts, fact_filter), _)) name = + (slice as ((slice_size, abduce, check_consistent, num_facts, fact_filter), _)) name = let val ctxt = Proof.context_of state - val _ = spying spy (fn () => (state, subgoal, name, "Launched")) + val _ = spying spy (fn () => (state, subgoal, name, + "Launched" ^ (if abduce then " (abduce)" else "") ^ + (if check_consistent then " (check_consistent)" else ""))) val _ = if verbose then writeln (name ^ " with " ^ string_of_int num_facts ^ " " ^ fact_filter ^ " fact" ^ plural_s num_facts ^ " for " ^ string_of_time (slice_timeout slice_size slices timeout) ^ - "...") + (if abduce then " (abduce)" else "") ^ + (if check_consistent then " (check_consistent)" else "") ^ "...") else () @@ -168,7 +177,8 @@ |> filter_used_facts false used_facts |> map (fn ((name, _), j) => name ^ "@" ^ string_of_int j) |> commas - |> prefix ("Facts in " ^ name ^ " proof: ") + |> prefix ("Facts in " ^ name ^ " " ^ + (if check_consistent then "inconsistency" else "proof") ^ ": ") |> writeln fun spying_str_of_res ({outcome = NONE, used_facts, used_from, ...} : prover_result) = @@ -196,8 +206,8 @@ |> AList.group (op =) |> map (fn (indices, fact_filters) => commas fact_filters ^ ": " ^ indices) in - "Success: Found proof with " ^ string_of_int num_used_facts ^ " fact" ^ - plural_s num_used_facts ^ + "Success: Found " ^ (if check_consistent then "inconsistency" else "proof") ^ " with " ^ + string_of_int num_used_facts ^ " fact" ^ plural_s num_used_facts ^ (if num_used_facts = 0 then "" else ": " ^ commas filter_infos) end | spying_str_of_res {outcome = SOME failure, ...} = @@ -233,6 +243,21 @@ (output, output_message) end +fun analyze_prover_result_for_consistency (result as {outcome, used_facts, ...} : prover_result) = + if outcome = SOME ATP_Proof.TimedOut then + (SH_Timeout, K "") + else if is_some outcome then + (SH_None, K "") + else + (SH_Some (result, []), fn () => + (if member (op = o apsnd fst) used_facts sledgehammer_goal_as_fact then + (case map fst (filter_out (equal sledgehammer_goal_as_fact o fst) used_facts) of + [] => "The goal is inconsistent" + | facts => "The goal is inconsistent with these facts: " ^ commas facts) + else + "The following facts are inconsistent: " ^ + commas (map fst used_facts))) + fun check_expected_outcome ctxt prover_name expect outcome = let val outcome_code = short_string_of_sledgehammer_outcome outcome @@ -248,22 +273,48 @@ if exists (fn (_, (Played _, _)) => true | _ => false) preplay_results then () else - error ("Unexpected outcome: the external prover found a some proof but preplay failed") + error ("Unexpected outcome: the external prover found a proof but preplay failed") | ("unknown", SH_Unknown) => () | ("timeout", SH_Timeout) => () | ("none", SH_None) => () | _ => error ("Unexpected outcome: " ^ quote outcome_code)) end -fun launch_prover_and_preplay (params as {debug, timeout, expect, ...}) mode writeln_result learn - (problem as {state, subgoal, ...}) slice prover_name = +fun launch_prover_and_preplay (params as {debug, timeout, expect, ...}) mode found_something + writeln_result learn (problem as {state, subgoal, ...}) + (slice as ((_, _, check_consistent, _, _), _)) prover_name = let val ctxt = Proof.context_of state val hard_timeout = Time.scale 5.0 timeout + fun flip_problem {comment, state, goal, subgoal, factss = factss, ...} = + let + val thy = Proof_Context.theory_of ctxt + val assms = Assumption.all_assms_of ctxt + val assm_ts = map Thm.term_of assms + val subgoal_t = Logic.get_goal (Thm.prop_of goal) subgoal + val polymorphic_subgoal_t = (Logic.list_implies (assm_ts, subgoal_t)) + |> Logic.varify_global + val nonfixeds = + subtract (op =) (fold Term.add_free_names assm_ts []) (Term.add_free_names subgoal_t []) + val monomorphic_subgoal_t = subgoal_t + |> varify_nonfixed_terms_global nonfixeds + val subgoal_thms = map (Skip_Proof.make_thm thy) + [polymorphic_subgoal_t, monomorphic_subgoal_t] + val new_facts = + map (fn thm => (((sledgehammer_goal_as_fact, (Assum, General)), thm))) subgoal_thms + in + {comment = comment, state = state, goal = Thm.trivial @{cprop False}, subgoal = 1, + subgoal_count = 1, factss = map (apsnd (append new_facts)) factss, + found_something = found_something "an inconsistency"} + end + + val problem = problem |> check_consistent ? flip_problem + fun really_go () = launch_prover params mode learn problem slice prover_name - |> preplay_prover_result params state subgoal + |> (if check_consistent then analyze_prover_result_for_consistency else + preplay_prover_result params state subgoal) fun go () = if debug then @@ -330,13 +381,15 @@ end fun prover_slices_of_schedule ctxt factss - ({max_facts, fact_filter, type_enc, lam_trans, uncurried_aliases, ...} : params) schedule = + ({abduce, check_consistent, max_facts, fact_filter, type_enc, lam_trans, uncurried_aliases, + ...} : params) + schedule = let fun triplicate_slices original = let val shift = - map (apfst (fn (slice_size, num_facts, fact_filter) => - (slice_size, num_facts, + map (apfst (fn (slice_size, abduce, check_consistent, num_facts, fact_filter) => + (slice_size, abduce, check_consistent, num_facts, if fact_filter = mashN then mepoN else if fact_filter = mepoN then meshN else mashN))) @@ -353,14 +406,17 @@ the_default uncurried_aliases0 uncurried_aliases, extra_extra0) | adjust_extra (extra as SMT_Slice _) = extra - fun adjust_slice max_slice_size ((slice_size0, num_facts0, fact_filter0), extra) = + fun adjust_slice max_slice_size + ((slice_size0, abduce0, check_consistent0, num_facts0, fact_filter0), extra) = let val slice_size = Int.min (max_slice_size, slice_size0) + val abduce = abduce |> the_default abduce0 + val check_consistent = check_consistent |> the_default check_consistent0 val fact_filter = fact_filter |> the_default fact_filter0 val max_facts = max_facts |> the_default num_facts0 val num_facts = Int.min (max_facts, length (facts_of_filter fact_filter factss)) in - ((slice_size, num_facts, fact_filter), adjust_extra extra) + ((slice_size, abduce, check_consistent, num_facts, fact_filter), adjust_extra extra) end val provers = distinct (op =) schedule @@ -378,7 +434,7 @@ SOME (slice0 :: slices) => let val prover_slices' = AList.update (op =) (prover, slices) prover_slices - val slice as ((slice_size, _, _), _) = + val slice as ((slice_size, _, _, _, _), _) = adjust_slice ((slices_left + max_threads - 1) div max_threads) slice0 in (prover, slice) :: translate_schedule prover_slices' (slices_left - slice_size) schedule @@ -389,8 +445,8 @@ |> distinct (op =) end -fun run_sledgehammer (params as {verbose, spy, provers, induction_rules, max_facts, max_proofs, - slices, ...}) +fun run_sledgehammer (params as {verbose, spy, provers, check_consistent, induction_rules, + max_facts, max_proofs, slices, ...}) mode writeln_result i (fact_override as {only, ...}) state = if null provers then error "No prover is set" @@ -402,12 +458,13 @@ val _ = Proof.assert_backward state val print = if mode = Normal andalso is_none writeln_result then writeln else K () - val found_proofs = Synchronized.var "found_proofs" 0 + val found_proofs_and_inconsistencies = Synchronized.var "found_proofs_and_inconsistencies" 0 - fun found_proof prover_name = + fun found_something a_proof_or_inconsistency prover_name = if mode = Normal then - (Synchronized.change found_proofs (fn n => n + 1); - (the_default writeln writeln_result) (prover_name ^ " found a proof...")) + (Synchronized.change found_proofs_and_inconsistencies (fn n => n + 1); + (the_default writeln writeln_result) (prover_name ^ " found " ^ + a_proof_or_inconsistency ^ "...")) else () @@ -437,7 +494,8 @@ SOME n => n | NONE => fold (fn prover => - fold (fn ((_, n, _), _) => Integer.max n) (get_slices ctxt prover)) + fold (fn ((_, _, _, max_facts, _), _) => Integer.max max_facts) + (get_slices ctxt prover)) provers 0) * 51 div 50 (* some slack to account for filtering of induction facts below *) @@ -463,9 +521,9 @@ val factss = get_factss provers val problem = {comment = "", state = state, goal = goal, subgoal = i, subgoal_count = n, - factss = factss, found_proof = found_proof} + factss = factss, found_something = found_something "a proof"} val learn = mash_learn_proof ctxt params (Thm.prop_of goal) - val launch = launch_prover_and_preplay params mode writeln_result learn + val launch = launch_prover_and_preplay params mode found_something writeln_result learn val schedule = if mode = Auto_Try then provers @@ -487,7 +545,7 @@ else (learn chained_thms; Par_List.map (fn (prover, slice) => - if Synchronized.value found_proofs < max_proofs then + if Synchronized.value found_proofs_and_inconsistencies < max_proofs then launch problem slice prover else (SH_None, "")) @@ -499,11 +557,19 @@ handle Timeout.TIMEOUT _ => (SH_Timeout, "")) |> `(fn (outcome, message) => (case outcome of - SH_Some _ => (the_default writeln writeln_result "QED"; true) + SH_Some _ => (the_default writeln writeln_result "Done"; true) | SH_Unknown => (the_default writeln writeln_result message; false) - | SH_Timeout => (the_default writeln writeln_result "No proof found"; false) + | SH_Timeout => + (the_default writeln writeln_result + ("No " ^ (if check_consistent = SOME true then "inconsistency" else "proof") ^ + " found"); + false) | SH_None => (the_default writeln writeln_result - (if message = "" then "No proof found" else "Warning: " ^ message); + (if message = "" then + "No " ^ (if check_consistent = SOME true then "inconsistency" else "proof") ^ + " found" + else + "Warning: " ^ message); false))) end)
--- a/src/HOL/Tools/Sledgehammer/sledgehammer_atp_systems.ML Wed Feb 15 12:46:12 2023 +0000 +++ b/src/HOL/Tools/Sledgehammer/sledgehammer_atp_systems.ML Wed Feb 15 12:48:53 2023 +0000 @@ -11,7 +11,7 @@ type atp_formula_role = ATP_Problem.atp_formula_role type atp_failure = ATP_Proof.atp_failure - type base_slice = int * int * string + type base_slice = int * bool * bool * int * string type atp_slice = atp_format * string * string * bool * string type atp_config = {exec : string list * string list, @@ -67,8 +67,8 @@ val default_max_mono_iters = 3 (* FUDGE *) val default_max_new_mono_instances = 100 (* FUDGE *) -(* desired slice size, desired number of facts, fact filter *) -type base_slice = int * int * string +(* desired slice size, abduce, check_consistent, desired number of facts, fact filter *) +type base_slice = int * bool * bool * int * string (* problem file format, type encoding, lambda translation scheme, uncurried aliases?, prover-specific extra information *) @@ -142,7 +142,7 @@ generate_isabelle_info = false, good_slices = (* FUDGE *) - K [((2, 60, meshN), (THF (Monomorphic, {with_ite = false, with_let = false}, THF_Without_Choice), "mono_native_higher", keep_lamsN, false, ""))], + K [((2, false, false, 60, meshN), (THF (Monomorphic, {with_ite = false, with_let = false}, THF_Without_Choice), "mono_native_higher", keep_lamsN, false, ""))], good_max_mono_iters = default_max_mono_iters - 1 (* FUDGE *), good_max_new_mono_instances = default_max_new_mono_instances} @@ -165,7 +165,7 @@ generate_isabelle_info = false, good_slices = (* FUDGE *) - K [((1000 (* infinity *), 100, meshN), (TF1, "poly_native", liftingN, false, ""))], + K [((1000 (* infinity *), false, false, 100, meshN), (TF1, "poly_native", liftingN, false, ""))], good_max_mono_iters = default_max_mono_iters, good_max_new_mono_instances = default_max_new_mono_instances} @@ -200,12 +200,12 @@ (THF (Monomorphic, {with_ite = false, with_let = false}, THF_Lambda_Free), "mono_native_higher", combsN, "--auto-schedule") in (* FUDGE *) - K [((1000 (* infinity *), 512, meshN), (format, type_enc, lam_trans, false, extra_options)), - ((1000 (* infinity *), 1024, meshN), (format, type_enc, lam_trans, false, extra_options)), - ((1000 (* infinity *), 128, mepoN), (format, type_enc, lam_trans, false, extra_options)), - ((1000 (* infinity *), 724, meshN), (TF0, "poly_guards??", lam_trans, false, extra_options)), - ((1000 (* infinity *), 256, mepoN), (format, type_enc, liftingN, false, extra_options)), - ((1000 (* infinity *), 64, mashN), (format, type_enc, combsN, false, extra_options))] + K [((1000 (* infinity *), false, false, 512, meshN), (format, type_enc, lam_trans, false, extra_options)), + ((1000 (* infinity *), false, false, 1024, meshN), (format, type_enc, lam_trans, false, extra_options)), + ((1000 (* infinity *), false, false, 128, mepoN), (format, type_enc, lam_trans, false, extra_options)), + ((1000 (* infinity *), false, false, 724, meshN), (TF0, "poly_guards??", lam_trans, false, extra_options)), + ((1000 (* infinity *), false, true, 256, mepoN), (format, type_enc, liftingN, false, extra_options)), + ((1000 (* infinity *), false, false, 64, mashN), (format, type_enc, combsN, false, extra_options))] end, good_max_mono_iters = default_max_mono_iters, good_max_new_mono_instances = default_max_new_mono_instances} @@ -229,11 +229,11 @@ generate_isabelle_info = false, good_slices = (* FUDGE *) - K [((2, 32, meshN), (TF0, "mono_native", liftingN, false, "")), - ((2, 512, meshN), (TX0, "mono_native", liftingN, false, "")), - ((2, 128, mashN), (TF0, "mono_native", combsN, false, "")), - ((2, 1024, meshN), (TF0, "mono_native", liftingN, false, "")), - ((2, 256, mepoN), (TF0, "mono_native", combsN, false, ""))], + K [((2, false, false, 32, meshN), (TF0, "mono_native", liftingN, false, "")), + ((2, false, false, 512, meshN), (TX0, "mono_native", liftingN, false, "")), + ((2, false, false, 128, mashN), (TF0, "mono_native", combsN, false, "")), + ((2, false, false, 1024, meshN), (TF0, "mono_native", liftingN, false, "")), + ((2, false, true, 256, mepoN), (TF0, "mono_native", combsN, false, ""))], good_max_mono_iters = default_max_mono_iters, good_max_new_mono_instances = default_max_new_mono_instances} @@ -259,7 +259,7 @@ generate_isabelle_info = false, good_slices = (* FUDGE *) - K [((2, 40, meshN), (THF (Monomorphic, {with_ite = false, with_let = false}, THF_Without_Choice), "mono_native_higher", keep_lamsN, false, ""))], + K [((2, false, false, 40, meshN), (THF (Monomorphic, {with_ite = false, with_let = false}, THF_Without_Choice), "mono_native_higher", keep_lamsN, false, ""))], good_max_mono_iters = default_max_mono_iters - 1 (* FUDGE *), good_max_new_mono_instances = default_max_new_mono_instances} @@ -281,8 +281,8 @@ generate_isabelle_info = false, good_slices = (* FUDGE *) - K [((6, 512, meshN), (TH0, "mono_native_higher", keep_lamsN, false, "")), - ((6, 512, meshN), (TF0, "mono_native", liftingN, false, ""))], + K [((6, false, false, 512, meshN), (TH0, "mono_native_higher", keep_lamsN, false, "")), + ((6, false, false, 512, meshN), (TF0, "mono_native", liftingN, false, ""))], good_max_mono_iters = default_max_mono_iters - 1 (* FUDGE *), good_max_new_mono_instances = default_max_new_mono_instances} @@ -306,7 +306,7 @@ generate_isabelle_info = false, good_slices = (* FUDGE *) - K [((12, 256, meshN), (THF (Monomorphic, {with_ite = false, with_let = false}, THF_Without_Choice), "mono_native_higher", keep_lamsN, false, ""))], + K [((12, false, false, 256, meshN), (THF (Monomorphic, {with_ite = false, with_let = false}, THF_Without_Choice), "mono_native_higher", keep_lamsN, false, ""))], good_max_mono_iters = default_max_mono_iters - 1 (* FUDGE *), good_max_new_mono_instances = default_max_new_mono_instances} @@ -341,14 +341,14 @@ generate_isabelle_info = true, good_slices = (* FUDGE *) - K [((2, 150, meshN), (DFG Monomorphic, "mono_native", combsN, true, "")), - ((2, 500, meshN), (DFG Monomorphic, "mono_native", liftingN, true, spass_H2SOS)), - ((2, 50, meshN), (DFG Monomorphic, "mono_native", liftingN, true, spass_H2LR0LT0)), - ((2, 250, meshN), (DFG Monomorphic, "mono_native", combsN, true, spass_H2NuVS0)), - ((2, 1000, mepoN), (DFG Monomorphic, "mono_native", liftingN, true, spass_H1SOS)), - ((2, 150, meshN), (DFG Monomorphic, "poly_guards??", liftingN, false, spass_H2NuVS0Red2)), - ((2, 300, meshN), (DFG Monomorphic, "mono_native", combsN, true, spass_H2SOS)), - ((2, 100, meshN), (DFG Monomorphic, "mono_native", combs_and_liftingN, true, spass_H2))], + K [((2, false, false, 150, meshN), (DFG Monomorphic, "mono_native", combsN, true, "")), + ((2, false, false, 500, meshN), (DFG Monomorphic, "mono_native", liftingN, true, spass_H2SOS)), + ((2, false, false, 50, meshN), (DFG Monomorphic, "mono_native", liftingN, true, spass_H2LR0LT0)), + ((2, false, false, 250, meshN), (DFG Monomorphic, "mono_native", combsN, true, spass_H2NuVS0)), + ((2, false, false, 1000, mepoN), (DFG Monomorphic, "mono_native", liftingN, true, spass_H1SOS)), + ((2, false, true, 150, meshN), (DFG Monomorphic, "poly_guards??", liftingN, false, spass_H2NuVS0Red2)), + ((2, false, false, 300, meshN), (DFG Monomorphic, "mono_native", combsN, true, spass_H2SOS)), + ((2, false, false, 100, meshN), (DFG Monomorphic, "mono_native", combs_and_liftingN, true, spass_H2))], good_max_mono_iters = default_max_mono_iters, good_max_new_mono_instances = default_max_new_mono_instances} @@ -387,14 +387,14 @@ generate_isabelle_info = false, good_slices = (* FUDGE *) - K [((2, 512, meshN), (TX1, "mono_native_fool", combsN, false, sosN)), - ((2, 1024, meshN), (TX1, "mono_native_fool", liftingN, false, sosN)), - ((2, 256, mashN), (TX1, "mono_native_fool", liftingN, false, no_sosN)), - ((2, 512, mepoN), (TF1, "poly_native", liftingN, false, no_sosN)), - ((2, 16, meshN), (TX1, "mono_native_fool", liftingN, false, no_sosN)), - ((2, 32, meshN), (TX1, "mono_native_fool", combsN, false, no_sosN)), - ((2, 64, meshN), (TX1, "mono_native_fool", combs_or_liftingN, false, no_sosN)), - ((2, 128, meshN), (TX1, "mono_native_fool", liftingN, false, no_sosN))], + K [((2, false, false, 512, meshN), (TX1, "mono_native_fool", combsN, false, sosN)), + ((2, false, false, 1024, meshN), (TX1, "mono_native_fool", liftingN, false, sosN)), + ((2, false, false, 256, mashN), (TX1, "mono_native_fool", liftingN, false, no_sosN)), + ((2, false, true, 512, mepoN), (TF1, "poly_native", liftingN, false, no_sosN)), + ((2, false, false, 16, meshN), (TX1, "mono_native_fool", liftingN, false, no_sosN)), + ((2, false, false, 32, meshN), (TX1, "mono_native_fool", combsN, false, no_sosN)), + ((2, false, false, 64, meshN), (TX1, "mono_native_fool", combs_or_liftingN, false, no_sosN)), + ((2, false, false, 128, meshN), (TX1, "mono_native_fool", liftingN, false, no_sosN))], good_max_mono_iters = default_max_mono_iters, good_max_new_mono_instances = 2 * default_max_new_mono_instances (* FUDGE *)} @@ -419,21 +419,21 @@ prem_role = Hypothesis, generate_isabelle_info = true, good_slices = - K [((1, 512, meshN), (format, "mono_native_higher_fool", keep_lamsN, false, "--mode=ho-pragmatic --tptp-def-as-rewrite --rewrite-before-cnf=true --max-inferences=1 --ho-unif-max-depth=1 --ho-max-elims=0 --ho-max-app-projections=0 --ho-max-rigid-imitations=1 --ho-max-identifications=0 --boolean-reasoning=bool-hoist --bool-hoist-simpl=true --bool-select=LI --recognize-injectivity=true --ext-rules=ext-family --ext-rules-max-depth=1 --ho-choice-inst=true --ho-prim-enum=none --ho-elim-leibniz=0 --interpret-bool-funs=true --try-e=\"$E_HOME/eprover\" --tmp-dir=\"$ISABELLE_TMP_PREFIX\" --ho-unif-level=pragmatic-framework --select=bb+e-selection2 --post-cnf-lambda-lifting=true -q \"4|prefer-sos|pnrefined(2,1,1,1,2,2,2)\" -q \"6|prefer-processed|conjecture-relative-struct(1.5,3.5,2,3)\" -q \"1|const|fifo\" -q \"4|prefer-ground|orient-lmax(2,1,2,1,1)\" -q \"4|defer-sos|conjecture-relative-struct(1,5,2,3)\" --avatar=off --recognize-injectivity=true --ho-neg-ext=true --e-timeout=2 --ho-pattern-decider=true --ho-fixpoint-decider=true --e-max-derived=50 --ignore-orphans=true --e-auto=true --presaturate=true --e-call-point=0.1")), (* sh5_sh1.sh *) - ((1, 1024, meshN), (format, "mono_native_higher_fool", keep_lamsN, false, "--mode=ho-pragmatic --tptp-def-as-rewrite --rewrite-before-cnf=true --mode=ho-competitive --boolean-reasoning=simpl-only --ext-rules=ext-family --ext-rules-max-depth=1 --ho-prim-enum=none --avatar=off --recognize-injectivity=true --ho-elim-leibniz=1 --ho-unif-level=pragmatic-framework --no-max-vars --max-inferences=2 --ho-unif-max-depth=1 -q \"6|prefer-sos|pnrefined(1,1,1,2,2,2,0.5)\" -q \"6|const|conjecture-relative-var(1.02,l,f)\" -q \"1|prefer-processed|fifo\" -q \"1|prefer-non-goals|conjecture-relative-var(1,l,f)\" -q \"4|prefer-easy-ho|conjecture-relative-var(1.01,s,f)\" --select=e-selection16 --ho-choice-inst=true --try-e=\"$E_HOME/eprover\" --tmp-dir=\"$ISABELLE_TMP_PREFIX\" --e-timeout=3 --e-auto=true --sine=50 --sine-tolerance=1.0 --sine-depth-max=3 --sine-depth-min=1 --sine-trim-implications=true --ho-unif-level=pragmatic-framework --e-encode-lambdas=lift --scan-clause-ac=false --kbo-weight-fun=lambda-def-invfreqrank --e-call-point=0.1")), (* sh8_shallow_sine.sh *) - ((1, 256, mepoN), (format, "mono_native_higher_fool", keep_lamsN, false, "-nc --tptp-def-as-rewrite --rewrite-before-cnf=true --mode=ho-competitive --boolean-reasoning=simpl-only --ext-rules=off --ho-prim-enum=full --ho-prim-max=1 --avatar=off --recognize-injectivity=true --ho-elim-leibniz=4 --ho-unif-level=full-framework --no-max-vars -q \"2|prefer-goals|conjecture-relative-e(0.5,1,100,100,100,100,1.5,1.5,1)\" -q \"4|const|conjecture-relative-e(0.1,1,100,100,100,100,1.5,1.5,1.5)\" -q \"1|prefer-processed|fifo\" -q \"1|prefer-non-goals|conjecture-relative-e(0.5,1,100,100,100,100,1.5,1.5,1.5)\" -q \"4|prefer-sos|pnrefined(1,1,1,1,2,1.5,2)\" --select=ho-selection5 --ho-choice-inst=true --try-e=\"$E_HOME/eprover\" --tmp-dir=\"$ISABELLE_TMP_PREFIX\" --e-timeout=5 --e-call-point=0.25 --e-auto=true --sine=50 --sine-tolerance=2 --sine-depth-max=4 --sine-depth-min=1 --e-max-derived=96 --e-encode-lambdas=lift --scan-clause-ac=false --kbo-weight-fun=arity0 --prec-gen-fun=invfreq_conj")), (* sh10_new_c.s3.sh *) - ((1, 512, mepoN), (format, "mono_native_higher_fool", keep_lamsN, false, "--mode=ho-pragmatic -nc --tptp-def-as-rewrite --rewrite-before-cnf=true --mode=ho-competitive --boolean-reasoning=simpl-only --ext-rules=ext-family --ext-rules-max-depth=1 --ho-prim-enum=none --avatar=off --recognize-injectivity=true --ho-elim-leibniz=1 --ho-unif-level=pragmatic-framework --no-max-vars --max-inferences=4 --ho-max-app-projections=1 --ho-max-elims=0 --ho-max-rigid-imitations=2 --ho-max-identifications=0 --ho-unif-max-depth=3 -q \"6|prefer-sos|pnrefined(1,1,1,2,2,2,0.5)\" -q \"6|const|conjecture-relative-var(1.02,l,f)\" -q \"1|prefer-processed|fifo\" -q \"1|prefer-non-goals|conjecture-relative-var(1,l,f)\" -q \"4|prefer-easy-ho|conjecture-relative-var(1.01,s,f)\" --select=e-selection7 --ho-choice-inst=true --try-e=\"$E_HOME/eprover\" --tmp-dir=\"$ISABELLE_TMP_PREFIX\" --e-timeout=7 --sine=50 --sine-tolerance=1 --sine-depth-max=2 --sine-depth-min=1 --e-max-derived=64 --sine-ignore-k-most-common-syms=2 --sine-trim-implications=true --e-encode-lambdas=lift --scan-clause-ac=false --lambdasup=0 --kbo-weight-fun=lambda-def-invfreqrank --demod-in-var-args=true --bool-demod=true --lambda-demod=true --e-call-point=0.1")), (* sh10_c_ic.sh *) - ((1, 512, meshN), (format, "mono_native_higher_fool", keep_lamsN, false, "--mode=ho-comb-complete --boolean-reasoning=simpl-only --ext-rules=off --kbo-weight-fun=lambda-def-sqarity --ho-prim-enum=none --tptp-def-as-rewrite -q \"4|prefer-sos|orient-lmax(2,1,2,1,1)\" -q \"4|defer-sos|conjecture-relative-var(1,s,f)\" -q \"3|const|default\" -q \"1|prefer-processed|fifo\" --ho-elim-leibniz=1 --select=NoSelection --solve-formulas=true --lazy-cnf=true --lazy-cnf-kind=simp --lazy-cnf-renaming-threshold=8 --sine=60 --sine-tolerance=2 --sine-depth-max=5 --sine-depth-min=1 --try-e=\"$E_HOME/eprover\" --tmp-dir=\"$ISABELLE_TMP_PREFIX\" --e-timeout=3 --e-auto=true --e-max-derived=50 --e-encode-lambdas=lift --scan-clause-ac=false --presaturate=true --comb-b-penalty=3 --comb-c-penalty=3 --comb-k-penalty=1 --comb-s-penalty=5 --subvarsup=false --e-call-point=0.15")), (* sh8_b.comb.sh (modified) *) - ((1, 1024, mashN), (format, "mono_native_higher_fool", keep_lamsN, false, "--mode=ho-pragmatic --boolean-reasoning=simpl-only --ho-unif-max-depth=0 --ho-prim-enum=none -q \"2|prefer-ho-steps|conjecture-relative-e(0.1,0.5,100,100,100,100,1.5,1.5,1.5)\" -q \"1|prefer-sos|pnrefined(1,1,1,2,2,2,0.5)\" -q \"2|prefer-ground|default\" -q \"2|prefer-empty-trail|conjecture-relative-e(0.1,0.5,100,100,100,100,1.5,1.5,1.5)\" -q \"1|prefer-processed|fifo\" --select=bb+e-selection7 --ho-pattern-decider=false --ho-fixpoint-decider=true --ho-solid-decider=false --sine=150 --sine-tolerance=2 --sine-depth-max=3 --sine-depth-min=1 --prec-gen-fun=invfreqhack --lazy-cnf=true --lazy-cnf-kind=simp --lazy-cnf-renaming-threshold=2 --fluid-log-hoist=false --tptp-def-as-rewrite --rewrite-before-cnf=true --ho-prim-enum=eq --ho-prim-enum-add-var=true --ho-prim-max=1 --ho-prim-enum-early-bird=true --avatar=eager --split-only-ground=true")), (* sh5_add_var_l_av.sh *) - ((1, 256, mashN), (format, "mono_native_higher_fool", keep_lamsN, false, "--mode=ho-pragmatic --max-inferences=4 --ho-max-app-projections=1 --ho-max-elims=0 --ho-max-rigid-imitations=2 --ho-max-identifications=0 --ho-unif-max-depth=2 --max-inferences=3 --boolean-reasoning=bool-hoist --bool-select=LO --ext-rules=off --kbo-weight-fun=lambda-def-invfreqrank --ho-prim-enum=none --ho-unif-level=pragmatic-framework -q \"1|prefer-sos|conjecture-relative-var(1.01,s,f)\" -q \"4|const|conjecture-relative-var(1.05,l,f)\" -q \"1|prefer-processed|fifo\" -q \"1|prefer-non-goals|conjecture-relative-var(1.02,l,f)\" -q \"4|prefer-sos|pnrefined(3,2,3,2,2,1.5,2)\" --ho-elim-leibniz=1 --ho-fixpoint-decider=true --ho-pattern-decider=true --ho-solid-decider=true --select=e-selection2 --solve-formulas=true --lambdasup=0 --try-e=\"$E_HOME/eprover\" --tmp-dir=\"$ISABELLE_TMP_PREFIX\" --e-timeout=3 --e-max-derived=48 --e-encode-lambdas=lift --presaturate=true --prec-gen-fun=invfreq --e-call-point=0.2 --e-auto=true --sine-trim-implications=true")), (* sh10_e_lift.sh *) - ((1, 512, mepoN), (format, "mono_native_higher_fool", keep_lamsN, false, "-nc --tptp-def-as-rewrite --mode=ho-pragmatic --max-inferences=4 --ho-max-app-projections=1 --ho-max-elims=0 --ho-max-rigid-imitations=1 --ho-max-identifications=0 --ho-unif-max-depth=2 --boolean-reasoning=simpl-only --ext-rules=off --kbo-weight-fun=lambda-def-const --ho-prim-enum=neg --ho-prim-enum-early-bird=true --tptp-def-as-rewrite --rewrite-before-cnf=true --ho-unif-level=pragmatic-framework --ho-unif-max-depth=1 --sine=50 --sine-tolerance=1.0 --sine-depth-max=3 --sine-depth-min=1 --sine-trim-implications=true -q \"4|prefer-sos|orient-lmax(2,1,2,1,1)\" -q \"4|defer-sos|conjecture-relative-var(1,s,f)\" -q \"3|const|default\" -q \"1|prefer-processed|fifo\" --ho-elim-leibniz=1 --ho-fixpoint-decider=true --ho-pattern-decider=true --ho-solid-decider=false --select=e-selection10 --solve-formulas=true --sup-at-vars=false --sup-at-var-headed=false --sup-from-var-headed=true --lazy-cnf=true --lazy-cnf-kind=simp --lazy-cnf-renaming-threshold=4 --try-e=\"$E_HOME/eprover\" --tmp-dir=\"$ISABELLE_TMP_PREFIX\" --e-timeout=2 --e-auto=true --e-max-derived=32 --e-encode-lambdas=lift --scan-clause-ac=false --presaturate=true --e-call-point=0.16")), (* sh5_shallow_sine.sh *) - ((1, 512, mashN), (format, "mono_native_higher_fool", keep_lamsN, false, "-nc --tptp-def-as-rewrite --rewrite-before-cnf=true --mode=ho-competitive --boolean-reasoning=bool-hoist --ext-rules=off --recognize-injectivity=true --ho-unif-level=full-framework -q \"4|prefer-goals|pnrefined(1,1,1,2,2,2,0.5)\" -q \"1|prefer-fo|conjecture-relative-var(1.02,l,f)\" -q \"1|prefer-non-goals|conjecture-relative-var(1,l,f)\" -q \"4|prefer-ho-steps|conjecture-relative-var(1.01,s,f)\" -q \"1|prefer-processed|fifo\" --select=bb+ho-selection --scan-clause-ac=false --kbo-weight-fun=invfreqrank --fluidsup=true --boolean-reasoning=bool-hoist --fluid-log-hoist=true --fluid-hoist=true --ite-axioms=true --lazy-cnf=true --ho-solid-decider=true --ho-fixpoint-decider=true --bool-select=\"sel1(pos_ctx)\" --try-e=\"$E_HOME/eprover\" --tmp-dir=\"$ISABELLE_TMP_PREFIX\" --e-timeout=2 --e-auto=true --e-call-point=0.35 --avatar=off --e-max-derived=50")), (* sh5_e_short1.sh *) - ((1, 1024, meshN), (format, "mono_native_higher_fool", keep_lamsN, false, "--mode=ho-pragmatic --tptp-def-as-rewrite --rewrite-before-cnf=true --boolean-reasoning=simpl-only --select=e-selection12 --prec-gen-fun=invfreq_conj --ord=lambda_kbo --ho-unif-level=full-framework --ho-pattern-decider=true --ho-solid-decider=false --ho-fixpoint-decider=true --lazy-cnf=true --lazy-cnf-kind=simp --lazy-cnf-renaming-threshold=1 --sine=100 --sine-depth-min=1 --sine-depth-max=5 --sine-tolerance=1.5 -q \"1|prefer-sos|default\" -q \"8|prefer-processed|conjecture-relative-var(1,l,f)\" -q \"1|prefer-processed|fifo\" -q \"1|prefer-lambdas|explore\" -q \"1|prefer-non-goals|explore\" -q \"1|prefer-processed|conjecture-relative-var(1,s,f)\" --kbo-weight-fun=arity0")), (* sh5_32.sh *) - ((1, 256, meshN), (format, "mono_native_higher_fool", keep_lamsN, false, "--mode=ho-pragmatic --boolean-reasoning=simpl-only --ho-unif-max-depth=2 --ho-max-app-projections=0 --ho-max-rigid-imitations=1 --ho-max-elims=0 --ho-max-identifications=1 --max-inferences=3 --ext-rules=off --recognize-injectivity=true --ho-prim-enum=none --ho-choice-inst=true -q \"3|prefer-fo|conjecture-relative-var(1.02,l,f)\" -q \"3|prefer-sos|pnrefined(1,1,1,2,2,2,0.5)\" -q \"2|prefer-ground|orient-lmax(2,1,2,1,1)\" -q \"1|prefer-processed|fifo\" --select=MaxGoalNS --sine=60 --sine-tolerance=1.5 --sine-depth-max=3 --sine-depth-min=1 --prec-gen-fun=invfreqhack --lazy-cnf=true --lazy-cnf-kind=simp --lazy-cnf-renaming-threshold=3 --kbo-weight-fun-from-precedence=true --kbo-weight-fun-from-precedence-rank=5 --trigger-bool-inst=1 --avatar=lazy --tptp-def-as-rewrite --rewrite-before-cnf=true --sup-from-var-headed=false --sup-at-vars=false")), (* sh5_sh4.sh *) - ((1, 512, mashN), (format, "mono_native_higher_fool", keep_lamsN, false, "--tptp-def-as-rewrite --rewrite-before-cnf=true --mode=lambda-free-intensional --check-lambda-free=false --boolean-reasoning=simpl-only --post-cnf-lambda-lifting=true --ext-rules=off --ho-prim-enum=none --recognize-injectivity=true --no-max-vars --select=e-selection8 --prec-gen-fun=invfreq --kbo-weight-fun=invfreqrank --kbo-const-weight=2 --ord=lambdafree_kbo --ignore-orphans=true -q \"1|prefer-sos|conjecture-relative-struct(1.5,3.5,2,3)\" -q \"1|const|conj_pref_weight(0.5,100,0.2,0.2,4)\" -q \"1|prefer-processed|fifo\" -q \"1|prefer-non-goals|conjecture-relative-e(0.3,0.25,100,100,100,100,1.5,1.5,1)\" -q \"1|prefer-sos|pnrefined(3,2,3,2,2,1.5,2)\" --lazy-cnf=true --lazy-cnf-renaming-threshold=2")), (* sh5_lifting2.sh *) - ((1, 512, meshN), (format, "mono_native_higher_fool", keep_lamsN, false, "-nc --tptp-def-as-rewrite --rewrite-before-cnf=true --tptp-rewrite-formulas-only=true --mode=ho-pragmatic --boolean-reasoning=simpl-only --ext-rules=ext-family --ext-rules-max-depth=1 --ho-prim-enum=neg --ho-prim-max=1 --recognize-injectivity=true --ho-elim-leibniz=1 --ho-unif-level=pragmatic-framework --no-max-vars -q \"1|prefer-sos|conjecture-relative-var(1.02,l,f)\" -q \"4|const|conjecture-relative-var(1,s,f)\" -q \"1|prefer-processed|fifo\" -q \"1|prefer-non-goals|conjecture-relative-var(1,l,f)\" -q \"4|prefer-sos|pnrefined(2,1,1,1,2,2,2)\" --select=e-selection7 --ho-choice-inst=true --try-e=\"$E_HOME/eprover\" --tmp-dir=\"$ISABELLE_TMP_PREFIX\" --e-timeout=2 --e-auto=true --sine=50 --sine-tolerance=10 --sine-depth-max=5 --sine-depth-min=1 --e-max-derived=64 --e-encode-lambdas=lift --scan-clause-ac=false --prec-gen-fun=invfreq_conj --ord=lambda_kbo --solid-subsumption=true --ignore-orphans=true --e-call-point=0.2")), (* sh5_noforms.sh *) - ((1, 1024, meshN), (format, "mono_native_higher_fool", keep_lamsN, false, "--mode=ho-pragmatic --tptp-def-as-rewrite --rewrite-before-cnf=true --max-inferences=4 --ho-unif-max-depth=3 --ho-max-elims=0 --ho-max-app-projections=1 --ho-max-identifications=0 --ho-max-rigid-imitations=2 --ho-unif-level=pragmatic-framework --boolean-reasoning=simpl-only --kbo-weight-fun=freqrank --ext-rules=ext-family --ext-rules-max-depth=2 --ho-prim-enum=eq --ho-prim-max=2 --interpret-bool-funs=false -q \"2|prefer-goals|default\" -q \"8|prefer-sos|conjecture-relative-var(1,l,f)\" -q \"1|prefer-processed|fifo\" -q \"1|prefer-lambdas|explore\" -q \"1|prefer-non-goals|explore\" -q \"1|prefer-processed|conjecture-relative-var(1,s,f)\" --recognize-injectivity=true --ho-selection-restriction=none --select=ho-selection2 --solve-formulas=true")), (* sh8_old_zip1.sh *) - ((1, 256, mashN), (format, "mono_native_higher_fool", keep_lamsN, false, "--mode=ho-pragmatic --boolean-reasoning=simpl-only --tptp-def-as-rewrite --rewrite-before-cnf=true --kbo-weight-fun=freqrank -q \"1|prefer-sos|default\" -q \"1|prefer-goals|conjecture-relative-var(1.03,s,f)\" -q \"1|prefer-non-goals|default\" -q \"5|const|conjecture-relative-var(1.01,l,f)\" -q \"1|prefer-processed|fifo\" -q \"1|const|conjecture-relative-var(1.05,l,f)\" -q \"1|defer-sos|conjecture-relative-var(1.1,s,f)\" --select=e-selection9 --recognize-injectivity=true --ho-choice-inst=false --ho-selection-restriction=none --sine=50 --sine-tolerance=3 --sine-depth-max=3 --sine-depth-min=1 --eq-encode=true --avatar=eager --sine-trim-implications=true"))], (* sh5_sh.eqenc.sh *) + K [((1, false, false, 512, meshN), (format, "mono_native_higher_fool", keep_lamsN, false, "--mode=ho-pragmatic --tptp-def-as-rewrite --rewrite-before-cnf=true --max-inferences=1 --ho-unif-max-depth=1 --ho-max-elims=0 --ho-max-app-projections=0 --ho-max-rigid-imitations=1 --ho-max-identifications=0 --boolean-reasoning=bool-hoist --bool-hoist-simpl=true --bool-select=LI --recognize-injectivity=true --ext-rules=ext-family --ext-rules-max-depth=1 --ho-choice-inst=true --ho-prim-enum=none --ho-elim-leibniz=0 --interpret-bool-funs=true --try-e=\"$E_HOME/eprover\" --tmp-dir=\"$ISABELLE_TMP_PREFIX\" --ho-unif-level=pragmatic-framework --select=bb+e-selection2 --post-cnf-lambda-lifting=true -q \"4|prefer-sos|pnrefined(2,1,1,1,2,2,2)\" -q \"6|prefer-processed|conjecture-relative-struct(1.5,3.5,2,3)\" -q \"1|const|fifo\" -q \"4|prefer-ground|orient-lmax(2,1,2,1,1)\" -q \"4|defer-sos|conjecture-relative-struct(1,5,2,3)\" --avatar=off --recognize-injectivity=true --ho-neg-ext=true --e-timeout=2 --ho-pattern-decider=true --ho-fixpoint-decider=true --e-max-derived=50 --ignore-orphans=true --e-auto=true --presaturate=true --e-call-point=0.1")), (* sh5_sh1.sh *) + ((1, false, false, 512, mepoN), (format, "mono_native_higher_fool", keep_lamsN, false, "--mode=ho-pragmatic -nc --tptp-def-as-rewrite --rewrite-before-cnf=true --mode=ho-competitive --boolean-reasoning=simpl-only --ext-rules=ext-family --ext-rules-max-depth=1 --ho-prim-enum=none --avatar=off --recognize-injectivity=true --ho-elim-leibniz=1 --ho-unif-level=pragmatic-framework --no-max-vars --max-inferences=4 --ho-max-app-projections=1 --ho-max-elims=0 --ho-max-rigid-imitations=2 --ho-max-identifications=0 --ho-unif-max-depth=3 -q \"6|prefer-sos|pnrefined(1,1,1,2,2,2,0.5)\" -q \"6|const|conjecture-relative-var(1.02,l,f)\" -q \"1|prefer-processed|fifo\" -q \"1|prefer-non-goals|conjecture-relative-var(1,l,f)\" -q \"4|prefer-easy-ho|conjecture-relative-var(1.01,s,f)\" --select=e-selection7 --ho-choice-inst=true --try-e=\"$E_HOME/eprover\" --tmp-dir=\"$ISABELLE_TMP_PREFIX\" --e-timeout=7 --sine=50 --sine-tolerance=1 --sine-depth-max=2 --sine-depth-min=1 --e-max-derived=64 --sine-ignore-k-most-common-syms=2 --sine-trim-implications=true --e-encode-lambdas=lift --scan-clause-ac=false --lambdasup=0 --kbo-weight-fun=lambda-def-invfreqrank --demod-in-var-args=true --bool-demod=true --lambda-demod=true --e-call-point=0.1")), (* sh10_c_ic.sh *) + ((1, false, false, 1024, meshN), (format, "mono_native_higher_fool", keep_lamsN, false, "--mode=ho-pragmatic --tptp-def-as-rewrite --rewrite-before-cnf=true --mode=ho-competitive --boolean-reasoning=simpl-only --ext-rules=ext-family --ext-rules-max-depth=1 --ho-prim-enum=none --avatar=off --recognize-injectivity=true --ho-elim-leibniz=1 --ho-unif-level=pragmatic-framework --no-max-vars --max-inferences=2 --ho-unif-max-depth=1 -q \"6|prefer-sos|pnrefined(1,1,1,2,2,2,0.5)\" -q \"6|const|conjecture-relative-var(1.02,l,f)\" -q \"1|prefer-processed|fifo\" -q \"1|prefer-non-goals|conjecture-relative-var(1,l,f)\" -q \"4|prefer-easy-ho|conjecture-relative-var(1.01,s,f)\" --select=e-selection16 --ho-choice-inst=true --try-e=\"$E_HOME/eprover\" --tmp-dir=\"$ISABELLE_TMP_PREFIX\" --e-timeout=3 --e-auto=true --sine=50 --sine-tolerance=1.0 --sine-depth-max=3 --sine-depth-min=1 --sine-trim-implications=true --ho-unif-level=pragmatic-framework --e-encode-lambdas=lift --scan-clause-ac=false --kbo-weight-fun=lambda-def-invfreqrank --e-call-point=0.1")), (* sh8_shallow_sine.sh *) + ((1, false, false, 256, mepoN), (format, "mono_native_higher_fool", keep_lamsN, false, "-nc --tptp-def-as-rewrite --rewrite-before-cnf=true --mode=ho-competitive --boolean-reasoning=simpl-only --ext-rules=off --ho-prim-enum=full --ho-prim-max=1 --avatar=off --recognize-injectivity=true --ho-elim-leibniz=4 --ho-unif-level=full-framework --no-max-vars -q \"2|prefer-goals|conjecture-relative-e(0.5,1,100,100,100,100,1.5,1.5,1)\" -q \"4|const|conjecture-relative-e(0.1,1,100,100,100,100,1.5,1.5,1.5)\" -q \"1|prefer-processed|fifo\" -q \"1|prefer-non-goals|conjecture-relative-e(0.5,1,100,100,100,100,1.5,1.5,1.5)\" -q \"4|prefer-sos|pnrefined(1,1,1,1,2,1.5,2)\" --select=ho-selection5 --ho-choice-inst=true --try-e=\"$E_HOME/eprover\" --tmp-dir=\"$ISABELLE_TMP_PREFIX\" --e-timeout=5 --e-call-point=0.25 --e-auto=true --sine=50 --sine-tolerance=2 --sine-depth-max=4 --sine-depth-min=1 --e-max-derived=96 --e-encode-lambdas=lift --scan-clause-ac=false --kbo-weight-fun=arity0 --prec-gen-fun=invfreq_conj")), (* sh10_new_c.s3.sh *) + ((1, false, true, 512, meshN), (format, "mono_native_higher_fool", keep_lamsN, false, "--mode=ho-comb-complete --boolean-reasoning=simpl-only --ext-rules=off --kbo-weight-fun=lambda-def-sqarity --ho-prim-enum=none --tptp-def-as-rewrite -q \"4|prefer-sos|orient-lmax(2,1,2,1,1)\" -q \"4|defer-sos|conjecture-relative-var(1,s,f)\" -q \"3|const|default\" -q \"1|prefer-processed|fifo\" --ho-elim-leibniz=1 --select=NoSelection --solve-formulas=true --lazy-cnf=true --lazy-cnf-kind=simp --lazy-cnf-renaming-threshold=8 --sine=60 --sine-tolerance=2 --sine-depth-max=5 --sine-depth-min=1 --try-e=\"$E_HOME/eprover\" --tmp-dir=\"$ISABELLE_TMP_PREFIX\" --e-timeout=3 --e-auto=true --e-max-derived=50 --e-encode-lambdas=lift --scan-clause-ac=false --presaturate=true --comb-b-penalty=3 --comb-c-penalty=3 --comb-k-penalty=1 --comb-s-penalty=5 --subvarsup=false --e-call-point=0.15")), (* sh8_b.comb.sh (modified) *) + ((1, false, false, 1024, mashN), (format, "mono_native_higher_fool", keep_lamsN, false, "--mode=ho-pragmatic --boolean-reasoning=simpl-only --ho-unif-max-depth=0 --ho-prim-enum=none -q \"2|prefer-ho-steps|conjecture-relative-e(0.1,0.5,100,100,100,100,1.5,1.5,1.5)\" -q \"1|prefer-sos|pnrefined(1,1,1,2,2,2,0.5)\" -q \"2|prefer-ground|default\" -q \"2|prefer-empty-trail|conjecture-relative-e(0.1,0.5,100,100,100,100,1.5,1.5,1.5)\" -q \"1|prefer-processed|fifo\" --select=bb+e-selection7 --ho-pattern-decider=false --ho-fixpoint-decider=true --ho-solid-decider=false --sine=150 --sine-tolerance=2 --sine-depth-max=3 --sine-depth-min=1 --prec-gen-fun=invfreqhack --lazy-cnf=true --lazy-cnf-kind=simp --lazy-cnf-renaming-threshold=2 --fluid-log-hoist=false --tptp-def-as-rewrite --rewrite-before-cnf=true --ho-prim-enum=eq --ho-prim-enum-add-var=true --ho-prim-max=1 --ho-prim-enum-early-bird=true --avatar=eager --split-only-ground=true")), (* sh5_add_var_l_av.sh *) + ((1, false, false, 256, mashN), (format, "mono_native_higher_fool", keep_lamsN, false, "--mode=ho-pragmatic --max-inferences=4 --ho-max-app-projections=1 --ho-max-elims=0 --ho-max-rigid-imitations=2 --ho-max-identifications=0 --ho-unif-max-depth=2 --max-inferences=3 --boolean-reasoning=bool-hoist --bool-select=LO --ext-rules=off --kbo-weight-fun=lambda-def-invfreqrank --ho-prim-enum=none --ho-unif-level=pragmatic-framework -q \"1|prefer-sos|conjecture-relative-var(1.01,s,f)\" -q \"4|const|conjecture-relative-var(1.05,l,f)\" -q \"1|prefer-processed|fifo\" -q \"1|prefer-non-goals|conjecture-relative-var(1.02,l,f)\" -q \"4|prefer-sos|pnrefined(3,2,3,2,2,1.5,2)\" --ho-elim-leibniz=1 --ho-fixpoint-decider=true --ho-pattern-decider=true --ho-solid-decider=true --select=e-selection2 --solve-formulas=true --lambdasup=0 --try-e=\"$E_HOME/eprover\" --tmp-dir=\"$ISABELLE_TMP_PREFIX\" --e-timeout=3 --e-max-derived=48 --e-encode-lambdas=lift --presaturate=true --prec-gen-fun=invfreq --e-call-point=0.2 --e-auto=true --sine-trim-implications=true")), (* sh10_e_lift.sh *) + ((1, false, false, 512, mepoN), (format, "mono_native_higher_fool", keep_lamsN, false, "-nc --tptp-def-as-rewrite --mode=ho-pragmatic --max-inferences=4 --ho-max-app-projections=1 --ho-max-elims=0 --ho-max-rigid-imitations=1 --ho-max-identifications=0 --ho-unif-max-depth=2 --boolean-reasoning=simpl-only --ext-rules=off --kbo-weight-fun=lambda-def-const --ho-prim-enum=neg --ho-prim-enum-early-bird=true --tptp-def-as-rewrite --rewrite-before-cnf=true --ho-unif-level=pragmatic-framework --ho-unif-max-depth=1 --sine=50 --sine-tolerance=1.0 --sine-depth-max=3 --sine-depth-min=1 --sine-trim-implications=true -q \"4|prefer-sos|orient-lmax(2,1,2,1,1)\" -q \"4|defer-sos|conjecture-relative-var(1,s,f)\" -q \"3|const|default\" -q \"1|prefer-processed|fifo\" --ho-elim-leibniz=1 --ho-fixpoint-decider=true --ho-pattern-decider=true --ho-solid-decider=false --select=e-selection10 --solve-formulas=true --sup-at-vars=false --sup-at-var-headed=false --sup-from-var-headed=true --lazy-cnf=true --lazy-cnf-kind=simp --lazy-cnf-renaming-threshold=4 --try-e=\"$E_HOME/eprover\" --tmp-dir=\"$ISABELLE_TMP_PREFIX\" --e-timeout=2 --e-auto=true --e-max-derived=32 --e-encode-lambdas=lift --scan-clause-ac=false --presaturate=true --e-call-point=0.16")), (* sh5_shallow_sine.sh *) + ((1, false, false, 512, mashN), (format, "mono_native_higher_fool", keep_lamsN, false, "-nc --tptp-def-as-rewrite --rewrite-before-cnf=true --mode=ho-competitive --boolean-reasoning=bool-hoist --ext-rules=off --recognize-injectivity=true --ho-unif-level=full-framework -q \"4|prefer-goals|pnrefined(1,1,1,2,2,2,0.5)\" -q \"1|prefer-fo|conjecture-relative-var(1.02,l,f)\" -q \"1|prefer-non-goals|conjecture-relative-var(1,l,f)\" -q \"4|prefer-ho-steps|conjecture-relative-var(1.01,s,f)\" -q \"1|prefer-processed|fifo\" --select=bb+ho-selection --scan-clause-ac=false --kbo-weight-fun=invfreqrank --fluidsup=true --boolean-reasoning=bool-hoist --fluid-log-hoist=true --fluid-hoist=true --ite-axioms=true --lazy-cnf=true --ho-solid-decider=true --ho-fixpoint-decider=true --bool-select=\"sel1(pos_ctx)\" --try-e=\"$E_HOME/eprover\" --tmp-dir=\"$ISABELLE_TMP_PREFIX\" --e-timeout=2 --e-auto=true --e-call-point=0.35 --avatar=off --e-max-derived=50")), (* sh5_e_short1.sh *) + ((1, false, false, 1024, meshN), (format, "mono_native_higher_fool", keep_lamsN, false, "--mode=ho-pragmatic --tptp-def-as-rewrite --rewrite-before-cnf=true --boolean-reasoning=simpl-only --select=e-selection12 --prec-gen-fun=invfreq_conj --ord=lambda_kbo --ho-unif-level=full-framework --ho-pattern-decider=true --ho-solid-decider=false --ho-fixpoint-decider=true --lazy-cnf=true --lazy-cnf-kind=simp --lazy-cnf-renaming-threshold=1 --sine=100 --sine-depth-min=1 --sine-depth-max=5 --sine-tolerance=1.5 -q \"1|prefer-sos|default\" -q \"8|prefer-processed|conjecture-relative-var(1,l,f)\" -q \"1|prefer-processed|fifo\" -q \"1|prefer-lambdas|explore\" -q \"1|prefer-non-goals|explore\" -q \"1|prefer-processed|conjecture-relative-var(1,s,f)\" --kbo-weight-fun=arity0")), (* sh5_32.sh *) + ((1, false, false, 256, meshN), (format, "mono_native_higher_fool", keep_lamsN, false, "--mode=ho-pragmatic --boolean-reasoning=simpl-only --ho-unif-max-depth=2 --ho-max-app-projections=0 --ho-max-rigid-imitations=1 --ho-max-elims=0 --ho-max-identifications=1 --max-inferences=3 --ext-rules=off --recognize-injectivity=true --ho-prim-enum=none --ho-choice-inst=true -q \"3|prefer-fo|conjecture-relative-var(1.02,l,f)\" -q \"3|prefer-sos|pnrefined(1,1,1,2,2,2,0.5)\" -q \"2|prefer-ground|orient-lmax(2,1,2,1,1)\" -q \"1|prefer-processed|fifo\" --select=MaxGoalNS --sine=60 --sine-tolerance=1.5 --sine-depth-max=3 --sine-depth-min=1 --prec-gen-fun=invfreqhack --lazy-cnf=true --lazy-cnf-kind=simp --lazy-cnf-renaming-threshold=3 --kbo-weight-fun-from-precedence=true --kbo-weight-fun-from-precedence-rank=5 --trigger-bool-inst=1 --avatar=lazy --tptp-def-as-rewrite --rewrite-before-cnf=true --sup-from-var-headed=false --sup-at-vars=false")), (* sh5_sh4.sh *) + ((1, false, false, 512, mashN), (format, "mono_native_higher_fool", keep_lamsN, false, "--tptp-def-as-rewrite --rewrite-before-cnf=true --mode=lambda-free-intensional --check-lambda-free=false --boolean-reasoning=simpl-only --post-cnf-lambda-lifting=true --ext-rules=off --ho-prim-enum=none --recognize-injectivity=true --no-max-vars --select=e-selection8 --prec-gen-fun=invfreq --kbo-weight-fun=invfreqrank --kbo-const-weight=2 --ord=lambdafree_kbo --ignore-orphans=true -q \"1|prefer-sos|conjecture-relative-struct(1.5,3.5,2,3)\" -q \"1|const|conj_pref_weight(0.5,100,0.2,0.2,4)\" -q \"1|prefer-processed|fifo\" -q \"1|prefer-non-goals|conjecture-relative-e(0.3,0.25,100,100,100,100,1.5,1.5,1)\" -q \"1|prefer-sos|pnrefined(3,2,3,2,2,1.5,2)\" --lazy-cnf=true --lazy-cnf-renaming-threshold=2")), (* sh5_lifting2.sh *) + ((1, false, false, 512, meshN), (format, "mono_native_higher_fool", keep_lamsN, false, "-nc --tptp-def-as-rewrite --rewrite-before-cnf=true --tptp-rewrite-formulas-only=true --mode=ho-pragmatic --boolean-reasoning=simpl-only --ext-rules=ext-family --ext-rules-max-depth=1 --ho-prim-enum=neg --ho-prim-max=1 --recognize-injectivity=true --ho-elim-leibniz=1 --ho-unif-level=pragmatic-framework --no-max-vars -q \"1|prefer-sos|conjecture-relative-var(1.02,l,f)\" -q \"4|const|conjecture-relative-var(1,s,f)\" -q \"1|prefer-processed|fifo\" -q \"1|prefer-non-goals|conjecture-relative-var(1,l,f)\" -q \"4|prefer-sos|pnrefined(2,1,1,1,2,2,2)\" --select=e-selection7 --ho-choice-inst=true --try-e=\"$E_HOME/eprover\" --tmp-dir=\"$ISABELLE_TMP_PREFIX\" --e-timeout=2 --e-auto=true --sine=50 --sine-tolerance=10 --sine-depth-max=5 --sine-depth-min=1 --e-max-derived=64 --e-encode-lambdas=lift --scan-clause-ac=false --prec-gen-fun=invfreq_conj --ord=lambda_kbo --solid-subsumption=true --ignore-orphans=true --e-call-point=0.2")), (* sh5_noforms.sh *) + ((1, false, false, 1024, meshN), (format, "mono_native_higher_fool", keep_lamsN, false, "--mode=ho-pragmatic --tptp-def-as-rewrite --rewrite-before-cnf=true --max-inferences=4 --ho-unif-max-depth=3 --ho-max-elims=0 --ho-max-app-projections=1 --ho-max-identifications=0 --ho-max-rigid-imitations=2 --ho-unif-level=pragmatic-framework --boolean-reasoning=simpl-only --kbo-weight-fun=freqrank --ext-rules=ext-family --ext-rules-max-depth=2 --ho-prim-enum=eq --ho-prim-max=2 --interpret-bool-funs=false -q \"2|prefer-goals|default\" -q \"8|prefer-sos|conjecture-relative-var(1,l,f)\" -q \"1|prefer-processed|fifo\" -q \"1|prefer-lambdas|explore\" -q \"1|prefer-non-goals|explore\" -q \"1|prefer-processed|conjecture-relative-var(1,s,f)\" --recognize-injectivity=true --ho-selection-restriction=none --select=ho-selection2 --solve-formulas=true")), (* sh8_old_zip1.sh *) + ((1, false, false, 256, mashN), (format, "mono_native_higher_fool", keep_lamsN, false, "--mode=ho-pragmatic --boolean-reasoning=simpl-only --tptp-def-as-rewrite --rewrite-before-cnf=true --kbo-weight-fun=freqrank -q \"1|prefer-sos|default\" -q \"1|prefer-goals|conjecture-relative-var(1.03,s,f)\" -q \"1|prefer-non-goals|default\" -q \"5|const|conjecture-relative-var(1.01,l,f)\" -q \"1|prefer-processed|fifo\" -q \"1|const|conjecture-relative-var(1.05,l,f)\" -q \"1|defer-sos|conjecture-relative-var(1.1,s,f)\" --select=e-selection9 --recognize-injectivity=true --ho-choice-inst=false --ho-selection-restriction=none --sine=50 --sine-tolerance=3 --sine-depth-max=3 --sine-depth-min=1 --eq-encode=true --avatar=eager --sine-trim-implications=true"))], (* sh5_sh.eqenc.sh *) good_max_mono_iters = default_max_mono_iters, good_max_new_mono_instances = default_max_new_mono_instances} end @@ -515,30 +515,30 @@ (Crashed, "Unrecoverable Segmentation Fault")] @ known_szs_status_failures) Hypothesis false - (K ((1000 (* infinity *), 50, meshN), (CNF_UEQ, type_enc, combsN, false, "")) (* FUDGE *)) + (K ((1000 (* infinity *), false, false, 50, meshN), (CNF_UEQ, type_enc, combsN, false, "")) (* FUDGE *)) val remote_agsyhol = remotify_atp agsyhol "agsyHOL" ["1.0", "1"] - (K ((1000 (* infinity *), 60, meshN), (THF (Monomorphic, {with_ite = false, with_let = false}, THF_Without_Choice), "mono_native_higher", keep_lamsN, false, "")) (* FUDGE *)) + (K ((1000 (* infinity *), false, false, 60, meshN), (THF (Monomorphic, {with_ite = false, with_let = false}, THF_Without_Choice), "mono_native_higher", keep_lamsN, false, "")) (* FUDGE *)) val remote_alt_ergo = remotify_atp alt_ergo "Alt-Ergo" ["0.95.2"] - (K ((1000 (* infinity *), 250, meshN), (TF1, "poly_native", keep_lamsN, false, "")) (* FUDGE *)) + (K ((1000 (* infinity *), false, false, 250, meshN), (TF1, "poly_native", keep_lamsN, false, "")) (* FUDGE *)) val remote_e = remotify_atp e "E" ["2.0", "1.9.1", "1.8"] - (K ((1000 (* infinity *), 750, meshN), (TF0, "mono_native", combsN, false, "")) (* FUDGE *)) + (K ((1000 (* infinity *), false, false, 750, meshN), (TF0, "mono_native", combsN, false, "")) (* FUDGE *)) val remote_iprover = remotify_atp iprover "iProver" ["0.99"] - (K ((1000 (* infinity *), 150, meshN), (FOF, "mono_guards??", liftingN, false, "")) (* FUDGE *)) + (K ((1000 (* infinity *), false, false, 150, meshN), (FOF, "mono_guards??", liftingN, false, "")) (* FUDGE *)) val remote_leo2 = remotify_atp leo2 "LEO-II" ["1.5.0", "1.4", "1.3", "1.2", "1"] - (K ((1000 (* infinity *), 40, meshN), (THF (Monomorphic, {with_ite = false, with_let = false}, THF_Without_Choice), "mono_native_higher", liftingN, false, "")) (* FUDGE *)) + (K ((1000 (* infinity *), false, false, 40, meshN), (THF (Monomorphic, {with_ite = false, with_let = false}, THF_Without_Choice), "mono_native_higher", liftingN, false, "")) (* FUDGE *)) val remote_leo3 = remotify_atp leo3 "Leo-III" ["1.1"] - (K ((1000 (* infinity *), 150, meshN), (THF (Polymorphic, {with_ite = false, with_let = false}, THF_Without_Choice), "poly_native_higher", keep_lamsN, false, "")) (* FUDGE *)) + (K ((1000 (* infinity *), false, false, 150, meshN), (THF (Polymorphic, {with_ite = false, with_let = false}, THF_Without_Choice), "poly_native_higher", keep_lamsN, false, "")) (* FUDGE *)) val remote_waldmeister = gen_remote_waldmeister waldmeisterN "raw_mono_tags??" val remote_zipperposition = remotify_atp zipperposition "Zipperpin" ["2.1", "2.0"] - (K ((1000 (* infinity *), 512, meshN), (THF (Monomorphic, {with_ite = false, with_let = false}, THF_Without_Choice), "mono_native_higher", keep_lamsN, false, "")) (* FUDGE *)) + (K ((1000 (* infinity *), false, false, 512, meshN), (THF (Monomorphic, {with_ite = false, with_let = false}, THF_Without_Choice), "mono_native_higher", keep_lamsN, false, "")) (* FUDGE *)) (* Dummy prover *) @@ -551,7 +551,7 @@ prem_role = prem_role, generate_isabelle_info = false, good_slices = - K [((2, 256, "mepo"), (format, type_enc, + K [((2, false, false, 256, "mepo"), (format, type_enc, if is_format_higher_order format then keep_lamsN else combsN, uncurried_aliases, ""))], good_max_mono_iters = default_max_mono_iters, good_max_new_mono_instances = default_max_new_mono_instances}
--- a/src/HOL/Tools/Sledgehammer/sledgehammer_commands.ML Wed Feb 15 12:46:12 2023 +0000 +++ b/src/HOL/Tools/Sledgehammer/sledgehammer_commands.ML Wed Feb 15 12:48:53 2023 +0000 @@ -53,6 +53,8 @@ ("verbose", "false"), ("overlord", "false"), ("spy", "false"), + ("abduce", "smart"), + ("check_consistent", "smart"), ("type_enc", "smart"), ("strict", "false"), ("lam_trans", "smart"), @@ -84,6 +86,8 @@ ("quiet", "verbose"), ("no_overlord", "overlord"), ("dont_spy", "spy"), + ("dont_abduce", "abduce"), + ("dont_check_consistent", "check_consistent"), ("non_strict", "strict"), ("no_uncurried_aliases", "uncurried_aliases"), ("dont_learn", "learn"), @@ -228,6 +232,16 @@ val overlord = lookup_bool "overlord" val spy = getenv "SLEDGEHAMMER_SPY" = "yes" orelse lookup_bool "spy" val provers = lookup_string "provers" |> space_explode " " |> mode = Auto_Try ? single o hd + val check_consistent = + if mode = Auto_Try then + SOME false + else + lookup_option lookup_bool "check_consistent" + val abduce = + if mode = Auto_Try then + SOME false + else + lookup_option lookup_bool "abduce" val type_enc = if mode = Auto_Try then NONE @@ -261,13 +275,14 @@ val expect = lookup_string "expect" in {debug = debug, verbose = verbose, overlord = overlord, spy = spy, provers = provers, - type_enc = type_enc, strict = strict, lam_trans = lam_trans, - uncurried_aliases = uncurried_aliases, learn = learn, fact_filter = fact_filter, - induction_rules = induction_rules, max_facts = max_facts, fact_thresholds = fact_thresholds, - max_mono_iters = max_mono_iters, max_new_mono_instances = max_new_mono_instances, - max_proofs = max_proofs, isar_proofs = isar_proofs, compress = compress, try0 = try0, - smt_proofs = smt_proofs, minimize = minimize, slices = slices, timeout = timeout, - preplay_timeout = preplay_timeout, expect = expect} + abduce = abduce, check_consistent = check_consistent, type_enc = type_enc, strict = strict, + lam_trans = lam_trans, uncurried_aliases = uncurried_aliases, learn = learn, + fact_filter = fact_filter, induction_rules = induction_rules, max_facts = max_facts, + fact_thresholds = fact_thresholds, max_mono_iters = max_mono_iters, + max_new_mono_instances = max_new_mono_instances, max_proofs = max_proofs, + isar_proofs = isar_proofs, compress = compress, try0 = try0, smt_proofs = smt_proofs, + minimize = minimize, slices = slices, timeout = timeout, preplay_timeout = preplay_timeout, + expect = expect} end fun get_params mode = extract_params mode o default_raw_params
--- a/src/HOL/Tools/Sledgehammer/sledgehammer_fact.ML Wed Feb 15 12:46:12 2023 +0000 +++ b/src/HOL/Tools/Sledgehammer/sledgehammer_fact.ML Wed Feb 15 12:48:53 2023 +0000 @@ -18,6 +18,7 @@ del : (Facts.ref * Token.src list) list, only : bool} + val sledgehammer_goal_as_fact : string val no_fact_override : fact_override val fact_of_ref : Proof.context -> Keyword.keywords -> thm list -> status Termtab.table -> @@ -64,6 +65,8 @@ val max_facts_for_complex_check = 25000 val max_simps_for_clasimpset = 10000 +val sledgehammer_goal_as_fact = "Sledgehammer.goal_as_fact" + val no_fact_override = {add = [], del = [], only = false} fun needs_quoting keywords s =
--- a/src/HOL/Tools/Sledgehammer/sledgehammer_mash.ML Wed Feb 15 12:46:12 2023 +0000 +++ b/src/HOL/Tools/Sledgehammer/sledgehammer_mash.ML Wed Feb 15 12:48:53 2023 +0000 @@ -868,7 +868,7 @@ let val problem = {comment = "Goal: " ^ goal_name, state = Proof.init ctxt, goal = goal, subgoal = 1, - subgoal_count = 1, factss = [("", facts)], found_proof = K ()} + subgoal_count = 1, factss = [("", facts)], found_something = K ()} val slice = hd (get_slices ctxt prover) in get_minimizing_prover ctxt MaSh (K ()) prover params problem slice
--- a/src/HOL/Tools/Sledgehammer/sledgehammer_prover.ML Wed Feb 15 12:46:12 2023 +0000 +++ b/src/HOL/Tools/Sledgehammer/sledgehammer_prover.ML Wed Feb 15 12:48:53 2023 +0000 @@ -29,6 +29,8 @@ overlord : bool, spy : bool, provers : string list, + abduce : bool option, + check_consistent : bool option, type_enc : string option, strict : bool, lam_trans : string option, @@ -61,7 +63,7 @@ subgoal : int, subgoal_count : int, factss : (string * fact list) list, - found_proof : string -> unit} + found_something : string -> unit} datatype prover_slice_extra = ATP_Slice of atp_slice @@ -134,6 +136,8 @@ overlord : bool, spy : bool, provers : string list, + abduce : bool option, + check_consistent : bool option, type_enc : string option, strict : bool, lam_trans : string option, @@ -160,8 +164,8 @@ YXML.content_of (ML_Pretty.string_of_polyml (ML_system_pretty (params, 100))) fun maybe_filter_out_induction_rules induction_rules : fact list -> fact list = - induction_rules = Exclude ? - filter_out (fn ((_, (_, ATP_Problem_Generate.Induction)), _) => true | _ => false) + induction_rules = Exclude ? + filter_out (fn ((_, (_, ATP_Problem_Generate.Induction)), _) => true | _ => false) fun slice_timeout slice_size slices timeout = let @@ -178,7 +182,7 @@ subgoal : int, subgoal_count : int, factss : (string * fact list) list, - found_proof : string -> unit} + found_something : string -> unit} datatype prover_slice_extra = ATP_Slice of atp_slice @@ -234,9 +238,14 @@ SOME facts => facts | NONE => snd (hd factss)) -fun facts_of_basic_slice (_, num_facts, fact_filter) factss = - facts_of_filter fact_filter factss - |> take num_facts +fun facts_of_basic_slice (_, _, _, num_facts, fact_filter) factss = + let + val facts = facts_of_filter fact_filter factss + val (goal_facts, nongoal_facts) = + List.partition (equal sledgehammer_goal_as_fact o fst o fst) facts + in + goal_facts @ take num_facts nongoal_facts + end fun is_fact_chained ((_, (sc, _)), _) = sc = Chained
--- a/src/HOL/Tools/Sledgehammer/sledgehammer_prover_atp.ML Wed Feb 15 12:46:12 2023 +0000 +++ b/src/HOL/Tools/Sledgehammer/sledgehammer_prover_atp.ML Wed Feb 15 12:48:53 2023 +0000 @@ -104,10 +104,10 @@ fun run_atp mode name ({debug, verbose, overlord, strict, max_mono_iters, max_new_mono_instances, isar_proofs, compress, try0, smt_proofs, minimize, slices, timeout, preplay_timeout, spy, ...} : params) - ({comment, state, goal, subgoal, subgoal_count, factss, found_proof} : prover_problem) + ({comment, state, goal, subgoal, subgoal_count, factss, found_something} : prover_problem) slice = let - val (basic_slice as (slice_size, _, _), + val (basic_slice as (slice_size, _, _, _, _), ATP_Slice (good_format, good_type_enc, good_lam_trans, good_uncurried_aliases, extra)) = slice val facts = facts_of_basic_slice basic_slice factss @@ -247,7 +247,7 @@ in if debug then (warning (string_of_atp_failure failure); NONE) else SOME failure end - | NONE => (found_proof name; NONE)) + | NONE => (found_something name; NONE)) | _ => outcome) in (atp_problem_data,
--- a/src/HOL/Tools/Sledgehammer/sledgehammer_prover_minimize.ML Wed Feb 15 12:46:12 2023 +0000 +++ b/src/HOL/Tools/Sledgehammer/sledgehammer_prover_minimize.ML Wed Feb 15 12:48:53 2023 +0000 @@ -83,7 +83,8 @@ fun test_facts ({debug, verbose, overlord, spy, provers, max_mono_iters, max_new_mono_instances, type_enc, strict, lam_trans, uncurried_aliases, isar_proofs, compress, try0, smt_proofs, minimize, preplay_timeout, induction_rules, ...} : params) - slice silent (prover : prover) timeout i n state goal facts = + (slice as ((_, abduce, check_consistent, _, _), _)) silent (prover : prover) timeout i n state + goal facts = let val _ = print silent (fn () => "Testing " ^ n_facts (map fst facts) ^ (if verbose then " (timeout: " ^ string_of_time timeout ^ ")" else "") ^ "...") @@ -91,9 +92,9 @@ val facts = facts |> maps (fn (n, ths) => map (pair n) ths) val params = {debug = debug, verbose = verbose, overlord = overlord, spy = spy, provers = provers, - type_enc = type_enc, strict = strict, lam_trans = lam_trans, - uncurried_aliases = uncurried_aliases, learn = false, fact_filter = NONE, - induction_rules = induction_rules, max_facts = SOME (length facts), + type_enc = type_enc, abduce = SOME abduce, check_consistent = SOME check_consistent, + strict = strict, lam_trans = lam_trans, uncurried_aliases = uncurried_aliases, learn = false, + fact_filter = NONE, induction_rules = induction_rules, max_facts = SOME (length facts), fact_thresholds = (1.01, 1.01), max_mono_iters = max_mono_iters, max_new_mono_instances = max_new_mono_instances, max_proofs = 1, isar_proofs = isar_proofs, compress = compress, try0 = try0, smt_proofs = smt_proofs, @@ -101,7 +102,7 @@ expect = ""} val problem = {comment = "", state = state, goal = goal, subgoal = i, subgoal_count = n, - factss = [("", facts)], found_proof = K ()} + factss = [("", facts)], found_something = K ()} val result0 as {outcome = outcome0, used_facts, used_from, preferred_methss, run_time, message} = prover params problem slice @@ -117,7 +118,7 @@ (case outcome of SOME failure => string_of_atp_failure failure | NONE => - "Found proof" ^ + "Found " ^ (if check_consistent then "inconsistency" else "proof") ^ (if length used_facts = length facts then "" else " with " ^ n_facts used_facts) ^ " (" ^ string_of_time run_time ^ ")")); result
--- a/src/HOL/Tools/Sledgehammer/sledgehammer_prover_smt.ML Wed Feb 15 12:46:12 2023 +0000 +++ b/src/HOL/Tools/Sledgehammer/sledgehammer_prover_smt.ML Wed Feb 15 12:48:53 2023 +0000 @@ -120,10 +120,10 @@ fun run_smt_solver mode name (params as {debug, verbose, isar_proofs, compress, try0, smt_proofs, minimize, preplay_timeout, ...}) - ({state, goal, subgoal, subgoal_count, factss, found_proof, ...} : prover_problem) + ({state, goal, subgoal, subgoal_count, factss, found_something, ...} : prover_problem) slice = let - val (basic_slice as (slice_size, _, _), SMT_Slice options) = slice + val (basic_slice as (slice_size, _, _, _, _), SMT_Slice options) = slice val facts = facts_of_basic_slice basic_slice factss val ctxt = Proof.context_of state @@ -139,7 +139,7 @@ (case outcome of NONE => let - val _ = found_proof name; + val _ = found_something name; val preferred = if smt_proofs then SMT_Method (if name = "z3" then SMT_Z3 else SMT_Verit "default")
--- a/src/HOL/Tools/Sledgehammer/sledgehammer_tactics.ML Wed Feb 15 12:46:12 2023 +0000 +++ b/src/HOL/Tools/Sledgehammer/sledgehammer_tactics.ML Wed Feb 15 12:48:53 2023 +0000 @@ -33,7 +33,7 @@ val params as {provers, induction_rules, max_facts, ...} = default_params thy override_params val name = hd provers val prover = get_prover ctxt mode name - val default_max_facts = #2 (fst (hd (get_slices ctxt name))) + val default_max_facts = #4 (fst (hd (get_slices ctxt name))) val (_, hyp_ts, concl_t) = ATP_Util.strip_subgoal goal i ctxt val keywords = Thy_Header.get_keywords' ctxt val css_table = clasimpset_rule_table_of ctxt @@ -45,7 +45,7 @@ |> hd |> snd val problem = {comment = "", state = Proof.init ctxt, goal = goal, subgoal = i, subgoal_count = n, - factss = [("", facts)], found_proof = K ()} + factss = [("", facts)], found_something = K ()} val slice = hd (get_slices ctxt name) in (case prover params problem slice of