| author | wenzelm | 
| Wed, 12 Jun 2024 21:59:44 +0200 | |
| changeset 80362 | d395b7e14102 | 
| parent 74561 | 8e6c973003c8 | 
| child 80636 | 4041e7c8059d | 
| permissions | -rw-r--r-- | 
| 55596 | 1 | (* Title: HOL/TPTP/TPTP_Parser/tptp_reconstruct.ML | 
| 2 | Author: Nik Sultana, Cambridge University Computer Laboratory | |
| 3 | ||
| 4 | Reconstructs TPTP proofs in Isabelle/HOL. | |
| 5 | Specialised to work with proofs produced by LEO-II. | |
| 6 | ||
| 7 | TODO | |
| 8 | - Proof transformation to remove "copy" steps, and perhaps other dud inferences. | |
| 9 | *) | |
| 10 | ||
| 11 | signature TPTP_RECONSTRUCT = | |
| 12 | sig | |
| 13 | (* Interface used by TPTP_Reconstruct.thy, to define LEO-II proof reconstruction. *) | |
| 14 | ||
| 15 | datatype formula_kind = | |
| 16 | Conjunctive of bool option | |
| 17 | | Disjunctive of bool option | |
| 18 | | Biimplicational of bool option | |
| 19 | | Negative of bool option | |
| 20 | | Existential of bool option * typ | |
| 21 | | Universal of bool option * typ | |
| 22 | | Equational of bool option * typ | |
| 23 | | Atomic of bool option | |
| 24 | | Implicational of bool option | |
| 25 | type formula_meaning = | |
| 26 | (string * | |
| 27 |      {role : TPTP_Syntax.role,
 | |
| 28 | fmla : term, | |
| 29 | source_inf_opt : TPTP_Proof.source_info option}) | |
| 30 | type proof_annotation = | |
| 31 |     {problem_name : TPTP_Problem_Name.problem_name,
 | |
| 32 | skolem_defs : ((*skolem const name*)string * Binding.binding) list, | |
| 33 | defs : ((*node name*)string * Binding.binding) list, | |
| 34 | axs : ((*node name*)string * Binding.binding) list, | |
| 35 | (*info for each node (for all lines in the TPTP proof)*) | |
| 36 | meta : formula_meaning list} | |
| 37 | type rule_info = | |
| 38 |     {inference_name : string, (*name of calculus rule*)
 | |
| 39 | inference_fmla : term, (*the inference as a term*) | |
| 40 | parents : string list} | |
| 41 | ||
| 42 | exception UNPOLARISED of term | |
| 43 | ||
| 44 | val remove_polarity : bool -> term -> term * bool | |
| 45 | val interpret_bindings : | |
| 46 | TPTP_Problem_Name.problem_name -> theory -> TPTP_Proof.parent_detail list -> (string * term) list -> (string * term) list | |
| 47 | val diff_and_instantiate : Proof.context -> thm -> term -> term -> thm (*FIXME from library*) | |
| 48 | val strip_top_all_vars : (string * typ) list -> term -> (string * typ) list * term | |
| 49 | val strip_top_All_vars : term -> (string * typ) list * term | |
| 50 | val strip_top_All_var : term -> (string * typ) * term | |
| 51 | val new_consts_between : term -> term -> term list | |
| 52 | val get_pannot_of_prob : theory -> TPTP_Problem_Name.problem_name -> proof_annotation | |
| 53 | val inference_at_node : 'a -> TPTP_Problem_Name.problem_name -> formula_meaning list -> string -> rule_info option | |
| 54 |   val node_info : (string * 'a) list -> ('a -> 'b) -> string -> 'b
 | |
| 55 | ||
| 56 | type step_id = string | |
| 57 | datatype rolling_stock = | |
| 58 | Step of step_id | |
| 59 | | Assumed | |
| 60 | | Unconjoin | |
| 61 | | Split of step_id (*where split occurs*) * | |
| 62 | step_id (*where split ends*) * | |
| 63 | step_id list (*children of the split*) | |
| 64 | | Synth_step of step_id (*A step which doesn't necessarily appear in | |
| 65 | the original proof, or which has been modified slightly for better | |
| 66 | handling by Isabelle*) | |
| 67 | | Annotated_step of step_id * string (*Same interpretation as | |
| 68 | "Step", except that additional information is attached. This is | |
| 69 | currently used for debugging: Steps are mapped to Annotated_steps | |
| 70 | and their rule names are included as strings*) | |
| 71 | | Definition of step_id (*Mirrors TPTP role*) | |
| 72 | | Axiom of step_id (*Mirrors TPTP role*) | |
| 73 | | Caboose | |
| 74 | ||
| 75 | ||
| 76 | (* Interface for using the proof reconstruction. *) | |
| 77 | ||
| 78 | val import_thm : bool -> Path.T list -> Path.T -> (proof_annotation -> theory -> proof_annotation * theory) -> theory -> theory | |
| 79 | val get_fmlas_of_prob : theory -> TPTP_Problem_Name.problem_name -> TPTP_Interpret.tptp_formula_meaning list | |
| 80 |   val structure_fmla_meaning : 'a * 'b * 'c * 'd -> 'a * {fmla: 'c, role: 'b, source_inf_opt: 'd}
 | |
| 81 | val make_skeleton : Proof.context -> proof_annotation -> rolling_stock list | |
| 82 | val naive_reconstruct_tacs : | |
| 83 | (Proof.context -> TPTP_Problem_Name.problem_name -> step_id -> thm) -> | |
| 84 | TPTP_Problem_Name.problem_name -> Proof.context -> (rolling_stock * term option * (thm * tactic) option) list | |
| 85 | val naive_reconstruct_tac : | |
| 86 | Proof.context -> (Proof.context -> TPTP_Problem_Name.problem_name -> step_id -> thm) -> TPTP_Problem_Name.problem_name -> tactic | |
| 87 | val reconstruct : Proof.context -> (TPTP_Problem_Name.problem_name -> tactic) -> TPTP_Problem_Name.problem_name -> thm | |
| 88 | end | |
| 89 | ||
| 90 | structure TPTP_Reconstruct : TPTP_RECONSTRUCT = | |
| 91 | struct | |
| 92 | ||
| 93 | open TPTP_Reconstruct_Library | |
| 94 | open TPTP_Syntax | |
| 95 | ||
| 96 | (*FIXME move to more general struct*) | |
| 97 | (*Extract the formulas of an imported TPTP problem -- these formulas | |
| 98 | may make up a proof*) | |
| 99 | fun get_fmlas_of_prob thy prob_name : TPTP_Interpret.tptp_formula_meaning list = | |
| 100 | AList.lookup (op =) (TPTP_Interpret.get_manifests thy) prob_name | |
| 101 | |> the |> #3 (*get formulas*); | |
| 102 | ||
| 103 | ||
| 104 | (** General **) | |
| 105 | ||
| 106 | (* Proof annotations *) | |
| 107 | ||
| 108 | (*FIXME modify TPTP_Interpret.tptp_formula_meaning into this type*) | |
| 109 | type formula_meaning = | |
| 110 | (string * | |
| 111 |    {role : TPTP_Syntax.role,
 | |
| 112 | fmla : term, | |
| 113 | source_inf_opt : TPTP_Proof.source_info option}) | |
| 114 | ||
| 115 | fun apply_to_parent_info f | |
| 116 |    (n, {role, fmla, source_inf_opt}) =
 | |
| 117 | let | |
| 118 | val source_inf_opt' = | |
| 119 | case source_inf_opt of | |
| 120 | NONE => NONE | |
| 121 | | SOME (TPTP_Proof.Inference (inf_name, sinfos, pinfos)) => | |
| 122 | SOME (TPTP_Proof.Inference (inf_name, sinfos, f pinfos)) | |
| 123 | in | |
| 124 |    (n, {role = role, fmla = fmla, source_inf_opt = source_inf_opt'})
 | |
| 125 | end | |
| 126 | ||
| 127 | fun structure_fmla_meaning (s, r, t, info) = | |
| 128 |   (s, {role = r, fmla = t, source_inf_opt = info})
 | |
| 129 | ||
| 130 | type proof_annotation = | |
| 131 |   {problem_name : TPTP_Problem_Name.problem_name,
 | |
| 132 | skolem_defs : ((*skolem const name*)string * Binding.binding) list, | |
| 133 | defs : ((*node name*)string * Binding.binding) list, | |
| 134 | axs : ((*node name*)string * Binding.binding) list, | |
| 135 | (*info for each node (for all lines in the TPTP proof)*) | |
| 136 | meta : formula_meaning list} | |
| 137 | ||
| 138 | fun empty_pannot prob_name = | |
| 139 |   {problem_name = prob_name,
 | |
| 140 | skolem_defs = [], | |
| 141 | defs = [], | |
| 142 | axs = [], | |
| 143 | meta = []} | |
| 144 | ||
| 145 | ||
| 146 | (* Storage of proof data *) | |
| 147 | ||
| 148 | exception MANIFEST of TPTP_Problem_Name.problem_name * string (*FIXME move to TPTP_Interpret?*) | |
| 149 | ||
| 150 | type manifest = TPTP_Problem_Name.problem_name * proof_annotation | |
| 151 | ||
| 152 | (*manifest equality simply depends on problem name*) | |
| 153 | fun manifest_eq ((prob_name1, _), (prob_name2, _)) = prob_name1 = prob_name2 | |
| 154 | ||
| 155 | structure TPTP_Reconstruction_Data = Theory_Data | |
| 156 | ( | |
| 157 | type T = manifest list | |
| 158 | val empty = [] | |
| 159 | fun merge data : T = Library.merge manifest_eq data | |
| 160 | ) | |
| 161 | val get_manifests : theory -> manifest list = TPTP_Reconstruction_Data.get | |
| 162 | ||
| 163 | fun update_manifest prob_name pannot thy = | |
| 164 | let | |
| 165 | val idx = | |
| 166 | find_index | |
| 167 | (fn (n, _) => n = prob_name) | |
| 168 | (get_manifests thy) | |
| 169 | val transf = (fn _ => | |
| 170 | (prob_name, pannot)) | |
| 171 | in | |
| 172 | TPTP_Reconstruction_Data.map | |
| 173 | (nth_map idx transf) | |
| 174 | thy | |
| 175 | end | |
| 176 | ||
| 177 | (*similar to get_fmlas_of_prob but for proofs*) | |
| 178 | fun get_pannot_of_prob thy prob_name : proof_annotation = | |
| 179 | case AList.lookup (op =) (get_manifests thy) prob_name of | |
| 180 | SOME pa => pa | |
| 181 | | NONE => raise (MANIFEST (prob_name, "Could not find proof annotation")) | |
| 182 | ||
| 183 | ||
| 184 | (* Constants *) | |
| 185 | ||
| 186 | (*Prefix used for naming inferences which were added during proof | |
| 187 | transformation. (e.g., this is used to name "bind"-inference nodes | |
| 188 | described below)*) | |
| 189 | val inode_prefixK = "inode" | |
| 190 | ||
| 191 | (*New inference rule name, which is added to indicate that some | |
| 192 | variable has been instantiated. Additional proof metadata will | |
| 193 | indicate which variable, and how it was instantiated*) | |
| 194 | val bindK = "bind" | |
| 195 | ||
| 196 | (*New inference rule name, which is added to indicate that some | |
| 197 | (validity-preserving) preprocessing has been done to a (singleton) | |
| 198 | clause prior to it being split.*) | |
| 199 | val split_preprocessingK = "split_preprocessing" | |
| 200 | ||
| 201 | ||
| 202 | (* Storage of internal values *) | |
| 203 | ||
| 204 | type tptp_reconstruction_state = {next_int : int}
 | |
| 205 | structure TPTP_Reconstruction_Internal_Data = Theory_Data | |
| 206 | ( | |
| 207 | type T = tptp_reconstruction_state | |
| 208 |   val empty = {next_int = 0}
 | |
| 209 | fun merge data : T = snd data | |
| 210 | ) | |
| 211 | ||
| 212 | (*increment internal counter, and obtain the current next value*) | |
| 213 | fun get_next_int thy : int * theory = | |
| 214 | let | |
| 215 | val state = TPTP_Reconstruction_Internal_Data.get thy | |
| 216 |     val state' = {next_int = 1 + #next_int state}
 | |
| 217 | in | |
| 218 | (#next_int state, | |
| 219 | TPTP_Reconstruction_Internal_Data.put state' thy) | |
| 220 | end | |
| 221 | ||
| 222 | (*FIXME in some applications (e.g. where the name is used for an | |
| 223 | inference node) need to check that the name is fresh, to avoid | |
| 224 | collisions with other bits of the proof*) | |
| 225 | val get_next_name = | |
| 226 | get_next_int | |
| 227 | #> apfst (fn i => inode_prefixK ^ Int.toString i) | |
| 228 | ||
| 229 | ||
| 230 | (* Building the index *) | |
| 231 | ||
| 232 | (*thrown when we're expecting a TPTP_Proof.Bind annotation but find something else*) | |
| 233 | exception NON_BINDING | |
| 234 | (*given a list of pairs consisting of a variable name and | |
| 235 | TPTP formula, returns the list consisting of the original | |
| 236 | variable name and the interpreted HOL formula. Needs the | |
| 237 | problem name to ensure use of correct interpretations for | |
| 238 | constants and types.*) | |
| 239 | fun interpret_bindings (prob_name : TPTP_Problem_Name.problem_name) thy bindings acc = | |
| 240 | if null bindings then acc | |
| 241 | else | |
| 242 | case hd bindings of | |
| 243 | TPTP_Proof.Bind (v, fmla) => | |
| 244 | let | |
| 245 | val (type_map, const_map) = | |
| 246 | case AList.lookup (op =) (TPTP_Interpret.get_manifests thy) prob_name of | |
| 247 | NONE => raise (MANIFEST (prob_name, "Problem details not found in interpretation manifest")) | |
| 248 | | SOME (type_map, const_map, _) => (type_map, const_map) | |
| 249 | ||
| 250 | (*FIXME get config from the envir or make it parameter*) | |
| 251 | val config = | |
| 252 |               {cautious = true,
 | |
| 253 | problem_name = SOME prob_name} | |
| 254 | val result = | |
| 255 | (v, | |
| 256 | TPTP_Interpret.interpret_formula | |
| 257 | config TPTP_Syntax.THF | |
| 258 | const_map [] type_map fmla thy | |
| 259 | |> fst) | |
| 260 | in | |
| 261 | interpret_bindings prob_name thy (tl bindings) (result :: acc) | |
| 262 | end | |
| 263 | | _ => raise NON_BINDING | |
| 264 | ||
| 265 | type rule_info = | |
| 266 |   {inference_name : string, (*name of calculus rule*)
 | |
| 267 | inference_fmla : term, (*the inference as a term*) | |
| 268 | parents : string list} | |
| 269 | ||
| 270 | (*Instantiates a binding in orig_parent_fmla. Used in a proof | |
| 271 | transformation to factor out instantiations from inferences.*) | |
| 272 | fun apply_binding thy prob_name orig_parent_fmla target_fmla bindings = | |
| 273 | let | |
| 274 | val bindings' = interpret_bindings prob_name thy bindings [] | |
| 275 | ||
| 276 | (*capture selected free variables. these variables, and their | |
| 277 | intended de Bruijn index, are included in "var_ctxt"*) | |
| 278 | fun bind_free_vars var_ctxt t = | |
| 279 | case t of | |
| 280 | Const _ => t | |
| 281 | | Var _ => t | |
| 282 | | Bound _ => t | |
| 283 | | Abs (x, ty, t') => Abs (x, ty, bind_free_vars (x :: var_ctxt) t') | |
| 284 | | Free (x, ty) => | |
| 285 | let | |
| 286 | val idx = find_index (fn y => y = x) var_ctxt | |
| 287 | in | |
| 288 | if idx > ~1 andalso | |
| 289 | ty = dummyT (*this check not really needed*) then | |
| 290 | Bound idx | |
| 291 | else t | |
| 292 | end | |
| 293 | | t1 $ t2 => bind_free_vars var_ctxt t1 $ bind_free_vars var_ctxt t2 | |
| 294 | ||
| 295 | (*Instantiate specific quantified variables: | |
| 296 | Look for subterms of form (! (% x. M)) where "x" appears as a "bound_var", | |
| 297 | then replace "x" for "body" in "M". | |
| 298 | Should only be applied at formula top level -- i.e., once past the quantifier | |
| 299 | prefix we needn't bother with looking for bound_vars. | |
| 300 | "var"_ctxt is used to keep track of lambda-bindings we encounter, to capture | |
| 301 | free variables in "body" correctly (i.e., replace Free with Bound having the | |
| 302 | right index)*) | |
| 303 | fun instantiate_bound (binding as (bound_var, body)) (initial as (var_ctxt, t)) = | |
| 304 | case t of | |
| 305 | Const _ => initial | |
| 306 | | Free _ => initial | |
| 307 | | Var _ => initial | |
| 308 | | Bound _ => initial | |
| 309 | | Abs _ => initial | |
| 310 | | t1 $ (t2 as Abs (x, ty, t')) => | |
| 311 | if is_Const t1 then | |
| 312 | (*Could be fooled by shadowing, but if order matters | |
| 313 | then should still be able to handle formulas like | |
| 314 | (! X, X. F).*) | |
| 315 | if x = bound_var andalso | |
| 69597 | 316 | fst (dest_Const t1) = \<^const_name>\<open>All\<close> then | 
| 55596 | 317 | (*Body might contain free variables, so bind them using "var_ctxt". | 
| 318 | this involves replacing instances of Free with instances of Bound | |
| 319 | at the right index.*) | |
| 320 | let val body' = bind_free_vars var_ctxt body | |
| 321 | in | |
| 322 | (var_ctxt, | |
| 323 | betapply (t2, body')) | |
| 324 | end | |
| 325 | else | |
| 326 | let | |
| 327 | val (var_ctxt', rest) = instantiate_bound binding (x :: var_ctxt, t') | |
| 328 | in | |
| 329 | (var_ctxt', | |
| 330 | t1 $ Abs (x, ty, rest)) | |
| 331 | end | |
| 332 | else initial | |
| 333 | | t1 $ t2 => | |
| 334 | let | |
| 335 | val (var_ctxt', rest) = instantiate_bound binding (var_ctxt, t2) | |
| 336 | in | |
| 337 | (var_ctxt', t1 $ rest) | |
| 338 | end | |
| 339 | ||
| 340 | (*Here we preempt the following problem: | |
| 341 | if have (! X1, X2, X3. body), and X1 is instantiated to | |
| 342 | "c X2 X3", then the current code will yield | |
| 343 | (! X2, X3, X2a, X3a. body'). | |
| 344 | To avoid this, we must first push X1 in, before calling | |
| 345 | instantiate_bound, to make sure that bound variables don't | |
| 346 | get free.*) | |
| 347 | fun safe_instantiate_bound (binding as (bound_var, body)) (var_ctxt, t) = | |
| 348 | instantiate_bound binding | |
| 349 | (var_ctxt, push_allvar_in bound_var t) | |
| 350 | ||
| 351 | (*return true if one of the types is polymorphic*) | |
| 352 | fun is_polymorphic tys = | |
| 353 | if null tys then false | |
| 354 | else | |
| 355 | case hd tys of | |
| 356 | Type (_, tys') => is_polymorphic (tl tys @ tys') | |
| 357 | | TFree _ => true | |
| 358 | | TVar _ => true | |
| 359 | ||
| 360 | (*find the type of a quantified variable, at the "topmost" binding | |
| 361 | occurrence*) | |
| 362 | local | |
| 363 | fun type_of_quantified_var' s ts = | |
| 364 | if null ts then NONE | |
| 365 | else | |
| 366 | case hd ts of | |
| 367 | Const _ => type_of_quantified_var' s (tl ts) | |
| 368 | | Free _ => type_of_quantified_var' s (tl ts) | |
| 369 | | Var _ => type_of_quantified_var' s (tl ts) | |
| 370 | | Bound _ => type_of_quantified_var' s (tl ts) | |
| 371 | | Abs (s', ty, t') => | |
| 372 | if s = s' then SOME ty | |
| 373 | else type_of_quantified_var' s (t' :: tl ts) | |
| 374 | | t1 $ t2 => type_of_quantified_var' s (t1 :: t2 :: tl ts) | |
| 375 | in | |
| 376 | fun type_of_quantified_var s = | |
| 377 | single #> type_of_quantified_var' s | |
| 378 | end | |
| 379 | ||
| 380 | (*Form the universal closure of "t". | |
| 381 | NOTE remark above "val frees" about ordering of quantified variables*) | |
| 382 | fun close_formula t = | |
| 383 | let | |
| 384 | (*The ordering of Frees in this list affects the order in which variables appear | |
| 385 | in the quantification prefix. Currently this is assumed not to matter. | |
| 386 | This consists of a list of pairs: the first element consists of the "original" | |
| 387 | free variable, and the latter consists of the monomorphised equivalent. The | |
| 388 | two elements are identical if the original is already monomorphic. | |
| 389 | This monomorphisation is needed since, owing to TPTP's lack of type annotations, | |
| 390 | variables might not be constrained by type info. This results in them being | |
| 391 | interpreted as polymorphic. E.g., this issue comes up in CSR148^1*) | |
| 392 | val frees_monomorphised = | |
| 393 | fold_aterms | |
| 394 | (fn t => fn rest => | |
| 395 | if is_Free t then | |
| 396 | let | |
| 397 | val (s, ty) = dest_Free t | |
| 398 | val ty' = | |
| 399 | if ty = dummyT orelse is_polymorphic [ty] then | |
| 400 | the (type_of_quantified_var s target_fmla) | |
| 401 | else ty | |
| 402 | in insert (op =) (t, Free (s, ty')) rest | |
| 403 | end | |
| 404 | else rest) | |
| 405 | t [] | |
| 406 | in | |
| 407 | Term.subst_free frees_monomorphised t | |
| 408 | |> fold (fn (s, ty) => fn t => | |
| 409 | HOLogic.mk_all (s, ty, t)) | |
| 410 | (map (snd #> dest_Free) frees_monomorphised) | |
| 411 | end | |
| 412 | ||
| 413 | (*FIXME currently assuming that we're only ever given a single binding each time this is called*) | |
| 69597 | 414 | val _ = \<^assert> (length bindings' = 1) | 
| 55596 | 415 | |
| 416 | in | |
| 417 | fold safe_instantiate_bound bindings' ([], HOLogic.dest_Trueprop orig_parent_fmla) | |
| 418 | |> snd (*discard var typing context*) | |
| 419 | |> close_formula | |
| 420 | |> single | |
| 421 | |> Type_Infer_Context.infer_types (Context.proof_of (Context.Theory thy)) | |
| 422 | |> the_single | |
| 423 | |> HOLogic.mk_Trueprop | |
| 424 | |> rpair bindings' | |
| 425 | end | |
| 426 | ||
| 427 | exception RECONSTRUCT of string | |
| 428 | ||
| 429 | (*Some of these may be redundant wrt the original aims of this | |
| 430 | datatype, but it's useful to have a datatype to classify formulas | |
| 431 | for use by other functions as well.*) | |
| 432 | datatype formula_kind = | |
| 433 | Conjunctive of bool option | |
| 434 | | Disjunctive of bool option | |
| 435 | | Biimplicational of bool option | |
| 436 | | Negative of bool option | |
| 437 | | Existential of bool option * typ | |
| 438 | | Universal of bool option * typ | |
| 439 | | Equational of bool option * typ | |
| 440 | | Atomic of bool option | |
| 441 | | Implicational of bool option | |
| 442 | ||
| 443 | exception UNPOLARISED of term | |
| 444 | (*Remove "= $true" or "= $false$ from the edge | |
| 445 | of a formula. Use "try" in case formula is not | |
| 446 | polarised.*) | |
| 447 | fun remove_polarity strict formula = | |
| 448 | case try HOLogic.dest_eq formula of | |
| 449 | NONE => if strict then raise (UNPOLARISED formula) | |
| 450 | else (formula, true) | |
| 69597 | 451 | | SOME (x, p as \<^term>\<open>True\<close>) => (x, true) | 
| 452 | | SOME (x, p as \<^term>\<open>False\<close>) => (x, false) | |
| 55596 | 453 | | SOME (x, _) => | 
| 454 | if strict then raise (UNPOLARISED formula) | |
| 455 | else (formula, true) | |
| 456 | ||
| 457 | (*flattens a formula wrt associative operators*) | |
| 458 | fun flatten formula_kind formula = | |
| 459 | let | |
| 69597 | 460 | fun is_conj (Const (\<^const_name>\<open>HOL.conj\<close>, _) $ _ $ _) = true | 
| 55596 | 461 | | is_conj _ = false | 
| 69597 | 462 | fun is_disj (Const (\<^const_name>\<open>HOL.disj\<close>, _) $ _ $ _) = true | 
| 55596 | 463 | | is_disj _ = false | 
| 69597 | 464 | fun is_iff (Const (\<^const_name>\<open>HOL.eq\<close>, ty) $ _ $ _) = | 
| 55596 | 465 | ty = ([HOLogic.boolT, HOLogic.boolT] ---> HOLogic.boolT) | 
| 466 | | is_iff _ = false | |
| 467 | ||
| 468 | fun flatten' formula acc = | |
| 469 | case formula of | |
| 69597 | 470 | Const (\<^const_name>\<open>HOL.conj\<close>, _) $ t1 $ t2 => | 
| 55596 | 471 | (case formula_kind of | 
| 472 | Conjunctive _ => | |
| 473 | let | |
| 474 | val left = | |
| 475 | if is_conj t1 then flatten' t1 acc else (t1 :: acc) | |
| 476 | in | |
| 477 | if is_conj t2 then flatten' t2 left else (t2 :: left) | |
| 478 | end | |
| 479 | | _ => formula :: acc) | |
| 69597 | 480 | | Const (\<^const_name>\<open>HOL.disj\<close>, _) $ t1 $ t2 => | 
| 55596 | 481 | (case formula_kind of | 
| 482 | Disjunctive _ => | |
| 483 | let | |
| 484 | val left = | |
| 485 | if is_disj t1 then flatten' t1 acc else (t1 :: acc) | |
| 486 | in | |
| 487 | if is_disj t2 then flatten' t2 left else (t2 :: left) | |
| 488 | end | |
| 489 | | _ => formula :: acc) | |
| 69597 | 490 | | Const (\<^const_name>\<open>HOL.eq\<close>, ty) $ t1 $ t2 => | 
| 55596 | 491 | if ty = ([HOLogic.boolT, HOLogic.boolT] ---> HOLogic.boolT) then | 
| 492 | case formula_kind of | |
| 493 | Biimplicational _ => | |
| 494 | let | |
| 495 | val left = | |
| 496 | if is_iff t1 then flatten' t1 acc else (t1 :: acc) | |
| 497 | in | |
| 498 | if is_iff t2 then flatten' t2 left else (t2 :: left) | |
| 499 | end | |
| 500 | | _ => formula :: acc | |
| 501 | else formula :: acc | |
| 502 | | _ => [formula] | |
| 503 | ||
| 504 | val formula' = try_dest_Trueprop formula | |
| 505 | in | |
| 506 | case formula_kind of | |
| 507 | Conjunctive (SOME _) => | |
| 508 | remove_polarity false formula' | |
| 509 | |> fst | |
| 510 | |> (fn t => flatten' t []) | |
| 511 | | Disjunctive (SOME _) => | |
| 512 | remove_polarity false formula' | |
| 513 | |> fst | |
| 514 | |> (fn t => flatten' t []) | |
| 515 | | Biimplicational (SOME _) => | |
| 516 | remove_polarity false formula' | |
| 517 | |> fst | |
| 518 | |> (fn t => flatten' t []) | |
| 519 | | _ => flatten' formula' [] | |
| 520 | end | |
| 521 | ||
| 522 | fun node_info fms projector node_name = | |
| 523 | case AList.lookup (op =) fms node_name of | |
| 524 | NONE => | |
| 525 |         raise (RECONSTRUCT ("node " ^ node_name ^
 | |
| 526 | " doesn't exist")) | |
| 527 | | SOME info => projector info | |
| 528 | ||
| 529 | (*Given a list of parent infos, extract the parent node names | |
| 530 | and the additional info (e.g., if there was an instantiation | |
| 531 | in addition to the inference). | |
| 532 | if "filtered"=true then exclude axiom and definition parents*) | |
| 533 | fun dest_parent_infos filtered fms parent_infos : {name : string, details : TPTP_Proof.parent_detail list} list =
 | |
| 534 | let | |
| 535 | (*Removes "definition" dependencies since these play no | |
| 536 | logical role -- i.e. they just give the expansions of | |
| 537 | constants. | |
| 538 | Removes "axiom" dependencies since these do not need to | |
| 539 | be derived; the reconstruction handler in "leo2_tac" can | |
| 540 | pick up the relevant axioms (using the info in the proof | |
| 541 | annotation) and use them in its reconstruction. | |
| 542 | *) | |
| 543 | val filter_deps = | |
| 58412 | 544 |       filter (fn {name, ...} =>
 | 
| 55596 | 545 | let | 
| 546 | val role = node_info fms #role name | |
| 547 | in role <> TPTP_Syntax.Role_Definition andalso | |
| 548 | role <> TPTP_Syntax.Role_Axiom | |
| 549 | end) | |
| 550 | val parent_nodelist = | |
| 551 | parent_infos | |
| 552 | |> map (fn n => | |
| 553 | case n of | |
| 554 |                      TPTP_Proof.Parent parent => {name = parent, details = []}
 | |
| 555 | | TPTP_Proof.ParentWithDetails (parent, details) => | |
| 556 |                      {name = parent, details = details})
 | |
| 557 | in | |
| 558 | parent_nodelist | |
| 559 | |> filtered ? filter_deps | |
| 560 | end | |
| 561 | ||
| 562 | fun parents_of_node fms n = | |
| 563 | case node_info fms #source_inf_opt n of | |
| 564 | NONE => [] | |
| 565 | | SOME (TPTP_Proof.File _) => [] | |
| 566 | | SOME (TPTP_Proof.Inference (_, _ : TPTP_Proof.useful_info_as list, parent_infos)) => | |
| 567 | dest_parent_infos false fms parent_infos | |
| 568 | |> map #name | |
| 569 | ||
| 570 | exception FIND_ANCESTOR_USING_RULE of string | |
| 571 | (*BFS for an ancestor inference involving a specific rule*) | |
| 572 | fun find_ancestor_using_rule pannot inference_rule (fringe : string list) : string = | |
| 573 | if null fringe then | |
| 574 | raise (FIND_ANCESTOR_USING_RULE inference_rule) | |
| 575 | else | |
| 576 | case node_info (#meta pannot) #source_inf_opt (hd fringe) of | |
| 577 | NONE => find_ancestor_using_rule pannot inference_rule (tl fringe) | |
| 578 | | SOME (TPTP_Proof.File _) => find_ancestor_using_rule pannot inference_rule (tl fringe) | |
| 579 | | SOME (TPTP_Proof.Inference (rule_name, _ : TPTP_Proof.useful_info_as list, parent_infos)) => | |
| 580 | if rule_name = inference_rule then hd fringe | |
| 581 | else | |
| 582 | find_ancestor_using_rule pannot inference_rule | |
| 583 | (tl fringe @ | |
| 584 | map #name (dest_parent_infos true (#meta pannot) parent_infos)) | |
| 585 | ||
| 586 | (*Given a node in the proof, produce the term representing the inference | |
| 587 | that took place in that step, the inference rule used, and which | |
| 588 | other (non-axiom and non-definition) nodes participated in the | |
| 589 | inference*) | |
| 590 | fun inference_at_node thy (prob_name : TPTP_Problem_Name.problem_name) | |
| 591 | (fms : formula_meaning list) from : rule_info option = | |
| 592 | let | |
| 593 | exception INFERENCE_AT_NODE of string | |
| 594 | ||
| 595 | (*lookup formula associated with a node*) | |
| 596 | val fmla_of_node = | |
| 597 | node_info fms #fmla | |
| 598 | #> try_dest_Trueprop | |
| 599 | ||
| 600 | fun build_inference_info rule_name parent_infos = | |
| 601 | let | |
| 69597 | 602 | val _ = \<^assert> (not (null parent_infos)) | 
| 55596 | 603 | |
| 604 | (*hypothesis formulas (with bindings already | |
| 605 | instantiated during the proof-transformation | |
| 606 | applied when loading the proof), | |
| 607 | including any axioms or definitions*) | |
| 608 | val parent_nodes = | |
| 609 | dest_parent_infos false fms parent_infos | |
| 610 | |> map #name | |
| 611 | ||
| 612 | val parent_fmlas = map fmla_of_node (rev(*FIXME can do away with this? it matters because of order of conjunction. is there a matching rev elsewhere?*) parent_nodes) | |
| 613 | ||
| 614 | val inference_term = | |
| 615 | if null parent_fmlas then | |
| 616 | fmla_of_node from | |
| 617 | |> HOLogic.mk_Trueprop | |
| 618 | else | |
| 619 | Logic.mk_implies | |
| 620 | (fold | |
| 621 | (curry HOLogic.mk_conj) | |
| 622 | (tl parent_fmlas) | |
| 623 | (hd parent_fmlas) | |
| 624 | |> HOLogic.mk_Trueprop, | |
| 625 | fmla_of_node from |> HOLogic.mk_Trueprop) | |
| 626 | in | |
| 627 |           SOME {inference_name = rule_name,
 | |
| 628 | inference_fmla = inference_term, | |
| 629 | parents = parent_nodes} | |
| 630 | end | |
| 631 | in | |
| 632 | (*examine node's "source" annotation: we're only interested | |
| 633 | if it's an inference*) | |
| 634 | case node_info fms #source_inf_opt from of | |
| 635 | NONE => NONE | |
| 636 | | SOME (TPTP_Proof.File _) => NONE | |
| 637 | | SOME (TPTP_Proof.Inference (rule_name, _ : TPTP_Proof.useful_info_as list, parent_infos)) => | |
| 638 | if List.null parent_infos then | |
| 639 | raise (INFERENCE_AT_NODE | |
| 640 |                             ("empty parent list for node " ^
 | |
| 641 | from ^ ": check proof format")) | |
| 642 | else | |
| 643 | build_inference_info rule_name parent_infos | |
| 644 | end | |
| 645 | ||
| 646 | ||
| 647 | (** Proof skeleton **) | |
| 648 | ||
| 649 | (* Emulating skeleton steps *) | |
| 650 | ||
| 651 | (* | |
| 652 | Builds a rule (thm) of the following form: | |
| 653 | ||
| 654 | ||
| 655 | prem1 premn | |
| 656 | ... ... ... | |
| 657 | major_prem conc1 concn | |
| 658 | ----------------------------------------------- | |
| 659 | conclusion | |
| 660 | ||
| 661 | where major_prem is a disjunction of prem1,...,premn. | |
| 662 | *) | |
| 663 | fun make_elimination_rule_t ctxt major_prem prems_and_concs conclusion = | |
| 664 | let | |
| 665 | val thy = Proof_Context.theory_of ctxt | |
| 666 | val minor_prems = | |
| 667 | map (fn (v, conc) => | |
| 668 | Logic.mk_implies (v, HOLogic.mk_Trueprop conc)) | |
| 669 | prems_and_concs | |
| 670 | in | |
| 671 | (Logic.list_implies | |
| 672 | (major_prem :: minor_prems, | |
| 673 | conclusion)) | |
| 674 | end | |
| 675 | ||
| 676 | (*In summary, we emulate an n-way splitting rule via an n-way | |
| 677 | disjunction elimination. | |
| 678 | ||
| 679 | Given a split formula and conclusion, we prove a rule which | |
| 680 | simulates the split. The conclusion is assumed to be a conjunction | |
| 681 | of conclusions for each branch of the split. The | |
| 682 | "minor_prem_assumptions" are the assumptions discharged in each | |
| 683 | branch; they're passed to the function to make sure that the | |
| 684 | generated rule is compatible with the skeleton (since the skeleton | |
| 685 | fixes the "order" of the reconstruction, based on the proof's | |
| 686 | structure). | |
| 687 | ||
| 688 | Concretely, if P is "(_ & _) = $false" or "(_ | _) = $true" then | |
| 689 | splitting behaves as follows: | |
| 690 | ||
| 691 | P | |
| 692 | ------------------------------- | |
| 693 | _ = $false _ = $false | |
| 694 | ... ... ... | |
| 695 | R1 Rn | |
| 696 | ------------------------------- | |
| 697 | R1 & ... & Rn | |
| 698 | ||
| 699 | Splitting (binary) iffs works as follows: | |
| 700 | ||
| 701 | (A <=> B) = $false | |
| 702 | ------------------------------------------ | |
| 703 | (A => B) = $false (B => A) = $false | |
| 704 | ... ... | |
| 705 | R1 R2 | |
| 706 | ------------------------------------------ | |
| 707 | R1 & R2 | |
| 708 | *) | |
| 709 | fun simulate_split ctxt split_fmla minor_prem_assumptions conclusion = | |
| 710 | let | |
| 711 | val prems_and_concs = | |
| 712 | ListPair.zip (minor_prem_assumptions, flatten (Conjunctive NONE) conclusion) | |
| 713 | ||
| 714 | val rule_t = make_elimination_rule_t ctxt split_fmla prems_and_concs conclusion | |
| 715 | ||
| 716 | (*these are replaced by fresh variables in the abstract term*) | |
| 717 | val abstraction_subterms = | |
| 718 | (map (try_dest_Trueprop #> remove_polarity true #> fst) | |
| 719 | minor_prem_assumptions) | |
| 720 | ||
| 721 | (*generate an abstract rule as a term...*) | |
| 722 | val abs_rule_t = | |
| 723 | abstract | |
| 724 | abstraction_subterms | |
| 725 | rule_t | |
| 726 | |> snd (*ignore mapping info. this is a bit wasteful*) | |
| 727 | (*FIXME optimisation: instead on relying on diff | |
| 728 | to regenerate this info, could use it directly*) | |
| 729 | ||
| 730 | (*...and validate the abstract rule*) | |
| 731 | val abs_rule_thm = | |
| 732 | Goal.prove ctxt [] [] abs_rule_t | |
| 733 | (fn pdata => HEADGOAL (blast_tac (#context pdata))) | |
| 734 | |> Drule.export_without_context | |
| 735 | in | |
| 736 | (*Instantiate the abstract rule based on the contents of the | |
| 737 | required instance*) | |
| 59582 | 738 | diff_and_instantiate ctxt abs_rule_thm (Thm.prop_of abs_rule_thm) rule_t | 
| 55596 | 739 | end | 
| 740 | ||
| 741 | ||
| 742 | (* Building the skeleton *) | |
| 743 | ||
| 744 | type step_id = string | |
| 745 | datatype rolling_stock = | |
| 746 | Step of step_id | |
| 747 | | Assumed | |
| 748 | | Unconjoin | |
| 749 | | Split of step_id (*where split occurs*) * | |
| 750 | step_id (*where split ends*) * | |
| 751 | step_id list (*children of the split*) | |
| 752 | | Synth_step of step_id (*A step which doesn't necessarily appear in | |
| 753 | the original proof, or which has been modified slightly for better | |
| 754 | handling by Isabelle*) (*FIXME "inodes" should be made into Synth_steps*) | |
| 755 | | Annotated_step of step_id * string (*Same interpretation as | |
| 756 | "Step", except that additional information is attached. This is | |
| 757 | currently used for debugging: Steps are mapped to Annotated_steps | |
| 758 | and their rule names are included as strings*) | |
| 759 | | Definition of step_id (*Mirrors TPTP role*) | |
| 760 | | Axiom of step_id (*Mirrors TPTP role*) | |
| 761 | (* | Derived of step_id -- to be used by memoization*) | |
| 762 | | Caboose | |
| 763 | ||
| 764 | fun stock_to_string (Step n) = n | |
| 765 |   | stock_to_string (Annotated_step (n, anno)) = n ^ "(" ^ anno ^ ")"
 | |
| 766 | | stock_to_string _ = error "Stock is not a step" (*FIXME more meaningful message*) | |
| 767 | ||
| 768 | fun filter_by_role tptp_role = | |
| 769 | filter | |
| 770 | (fn (_, info) => | |
| 771 | #role info = tptp_role) | |
| 772 | ||
| 773 | fun filter_by_name node_name = | |
| 774 | filter | |
| 775 | (fn (n, _) => | |
| 776 | n = node_name) | |
| 777 | ||
| 778 | exception NO_MARKER_NODE | |
| 779 | (*We fall back on node "1" in case the proof is not that of a theorem*) | |
| 780 | fun proof_beginning_node fms = | |
| 781 | let | |
| 782 | val result = | |
| 783 | cascaded_filter_single true | |
| 784 | [filter_by_role TPTP_Syntax.Role_Conjecture, | |
| 785 | filter_by_name "1"] (*FIXME const*) | |
| 786 | fms | |
| 787 | in | |
| 788 | case result of | |
| 789 | SOME x => fst x (*get the node name*) | |
| 790 | | NONE => raise NO_MARKER_NODE | |
| 791 | end | |
| 792 | ||
| 793 | (*Get the name of the node where the proof ends*) | |
| 794 | fun proof_end_node fms = | |
| 795 | (*FIXME this isn't very nice: we assume that the last line in the | |
| 796 | proof file is the closing line of the proof. It would be nicer if | |
| 797 | such a line is specially marked (with a role), since there is no | |
| 798 | obvious ordering on names, since they can be strings. | |
| 799 | Another way would be to run an analysis on the graph to find | |
| 800 | this node, since it has properties which should make it unique | |
| 801 | in a graph*) | |
| 802 | fms | |
| 803 | |> hd (*since proof has been reversed prior*) | |
| 804 | |> fst (*get node name*) | |
| 805 | ||
| 806 | (*Generate list of (possibly reconstructed) inferences which can be | |
| 807 | composed together to reconstruct the whole proof.*) | |
| 808 | fun make_skeleton ctxt (pannot : proof_annotation) : rolling_stock list = | |
| 809 | let | |
| 810 | val thy = Proof_Context.theory_of ctxt | |
| 811 | ||
| 812 | fun stock_is_ax_or_def (Axiom _) = true | |
| 813 | | stock_is_ax_or_def (Definition _) = true | |
| 814 | | stock_is_ax_or_def _ = false | |
| 815 | ||
| 816 | fun stock_of n = | |
| 817 | case node_info (#meta pannot) #role n of | |
| 818 | TPTP_Syntax.Role_Definition => (true, Definition n) | |
| 819 | | TPTP_Syntax.Role_Axiom => (true, Axiom n) | |
| 820 | | _ => (false, Step n) | |
| 821 | ||
| 822 | fun n_is_split_conjecture (inference_info : rule_info option) = | |
| 823 | case inference_info of | |
| 824 | NONE => false | |
| 825 | | SOME inference_info => #inference_name inference_info = "split_conjecture" | |
| 826 | ||
| 827 | (*Different kinds of inference sequences: | |
| 828 | - Linear: (just add a step to the skeleton) | |
| 829 | ---...--- | |
| 830 | ||
| 831 | - Fan-in: (treat each in-path as conjoined with the others. Linearise all the paths, and concatenate them.) | |
| 832 | /---... | |
| 833 | ------< | |
| 834 | \---... | |
| 835 | ||
| 836 | - Real split: Instead of treating as a conjunction, as in | |
| 837 | normal fan-ins, we need to treat specially by looking | |
| 838 | at the location where the split occurs, and turn the | |
| 839 | split inference into a validity-preserving subproof. | |
| 840 | As with fan-ins, we handle each fan-in path, and | |
| 841 | concatenate. | |
| 842 | /---...---\ | |
| 843 | ------< >------ | |
| 844 | \---...---/ | |
| 845 | ||
| 846 | - Fake split: (treat like linear, since there isn't a split-node) | |
| 847 | ------<---...---------- | |
| 848 | ||
| 849 | Different kinds of sequences endings: | |
| 850 | - "Stop before": Non-decreasing list of nodes where should terminate. | |
| 851 | This starts off with the end node, and the split_nodes | |
| 852 | will be added dynamically as the skeleton is built. | |
| 853 | - Axiom/Definition | |
| 854 | *) | |
| 855 | ||
| 856 | (*The following functions build the skeleton for the reconstruction starting | |
| 857 | from the node labelled "n" and stopping just before an element in stop_just_befores*) | |
| 858 | (*FIXME could throw exception if none of stop_just_befores is ever encountered*) | |
| 859 | ||
| 860 | (*This approach below is naive because it linearises the proof DAG, and this would | |
| 861 | duplicate some effort if the DAG isn't already linear.*) | |
| 862 | exception SKELETON | |
| 863 | ||
| 864 | fun check_parents stop_just_befores n = | |
| 865 | let | |
| 866 | val parents = parents_of_node (#meta pannot) n | |
| 867 | in | |
| 868 | if length parents = 1 then | |
| 869 | AList.lookup (op =) stop_just_befores (the_single parents) | |
| 870 | else | |
| 871 | NONE | |
| 872 | end | |
| 873 | ||
| 874 | fun naive_skeleton' stop_just_befores n = | |
| 875 | case check_parents stop_just_befores n of | |
| 876 | SOME skel => skel | |
| 877 | | NONE => | |
| 878 | let | |
| 879 | val inference_info = inference_at_node thy (#problem_name pannot) (#meta pannot) n | |
| 880 | in | |
| 881 | if is_none inference_info then | |
| 882 | (*this is the case for the conjecture, definitions and axioms*) | |
| 883 | if node_info (#meta pannot) #role n = TPTP_Syntax.Role_Definition then | |
| 884 | [(Definition n), Assumed] | |
| 885 | else if node_info (#meta pannot) #role n = TPTP_Syntax.Role_Axiom then | |
| 886 | [Axiom n] | |
| 887 | else raise SKELETON | |
| 888 | else | |
| 889 | let | |
| 890 | val inference_info = the inference_info | |
| 891 | val parents = #parents inference_info | |
| 892 | in | |
| 893 | (*FIXME memoize antecedent_steps?*) | |
| 894 | if #inference_name inference_info = "solved_all_splits" andalso length parents > 1 then | |
| 895 | (*splitting involves fanning out then in; this is to be | |
| 896 | treated different than other fan-out-ins.*) | |
| 897 | let | |
| 898 | (*find where the proofs fanned-out: pick some antecedent, | |
| 899 | then find ancestor to use a "split_conjecture" inference.*) | |
| 900 | (*NOTE we assume that splits can't be nested*) | |
| 901 | val split_node = | |
| 902 | find_ancestor_using_rule pannot "split_conjecture" [hd parents] | |
| 903 | |> parents_of_node (#meta pannot) | |
| 904 | |> the_single | |
| 905 | ||
| 906 | (*compute the skeletons starting at parents to either the split_node | |
| 907 | if the antecedent is descended from the split_node, or the | |
| 908 | stop_just_before otherwise*) | |
| 909 | val skeletons_up = | |
| 910 | map (naive_skeleton' ((split_node, [Assumed]) :: stop_just_befores)) parents | |
| 911 | in | |
| 912 | (*point to the split node, so that custom rule can be built later on*) | |
| 913 | Step n :: (Split (split_node, n, parents)) :: (*this will create the elimination rule*) | |
| 914 | naive_skeleton' stop_just_befores split_node @ (*this will discharge the major premise*) | |
| 58411 | 915 | flat skeletons_up @ [Assumed] (*this will discharge the minor premises*) | 
| 55596 | 916 | end | 
| 917 | else if length parents > 1 then | |
| 918 | (*Handle fan-in nodes which aren't split-sinks by | |
| 919 | enclosing each branch but one in conjI-assumption invocations*) | |
| 920 | let | |
| 921 | val skeletons_up = | |
| 922 | map (naive_skeleton' stop_just_befores) parents | |
| 923 | in | |
| 924 | Step n :: concat_between skeletons_up (SOME Unconjoin, NONE) @ [Assumed] | |
| 925 | end | |
| 926 | else | |
| 927 | Step n :: naive_skeleton' stop_just_befores (the_single parents) | |
| 928 | end | |
| 929 | end | |
| 930 | in | |
| 931 | if List.null (#meta pannot) then [] (*in case "proof" file is empty*) | |
| 932 | else | |
| 933 | naive_skeleton' | |
| 934 | [(proof_beginning_node (#meta pannot), [Assumed])] | |
| 935 | (proof_end_node (#meta pannot)) | |
| 936 | (*make last step the Caboose*) | |
| 937 | |> rev |> tl |> cons Caboose |> rev (*FIXME hacky*) | |
| 938 | end | |
| 939 | ||
| 940 | ||
| 941 | (* Using the skeleton *) | |
| 942 | ||
| 943 | exception SKELETON | |
| 944 | local | |
| 945 | (*Change the negated assumption (which is output by the contradiction rule) into | |
| 946 | a form familiar to Leo2*) | |
| 947 | val neg_eq_false = | |
| 948 |       @{lemma "!! P. (~ P) ==> (P = False)" by auto}
 | |
| 949 | ||
| 950 | (*FIXME this is just a dummy thm to annotate the assumption tac "atac"*) | |
| 951 | val solved_all_splits = | |
| 952 |       @{lemma "False = True ==> False" by auto}
 | |
| 953 | ||
| 954 | fun skel_to_naive_tactic ctxt prover_tac prob_name skel memo = fn st => | |
| 955 | let | |
| 956 | val thy = Proof_Context.theory_of ctxt | |
| 957 | val pannot = get_pannot_of_prob thy prob_name | |
| 958 | fun tac_and_memo node memo = | |
| 959 | case AList.lookup (op =) memo node of | |
| 960 | NONE => | |
| 961 | let | |
| 962 | val tac = | |
| 963 | (*FIXME formula_sizelimit not being | |
| 964 | checked here*) | |
| 965 | prover_tac ctxt prob_name node | |
| 966 | in (tac, (node, tac) :: memo) end | |
| 967 | | SOME tac => (tac, memo) | |
| 968 | fun rest skel' memo = | |
| 969 | skel_to_naive_tactic ctxt prover_tac prob_name skel' memo | |
| 970 | ||
| 971 | val tactic = | |
| 972 | if null skel then | |
| 973 | raise SKELETON (*FIXME or classify it as a Caboose: TRY (HEADGOAL atac) *) | |
| 974 | else | |
| 975 | case hd skel of | |
| 60754 | 976 | Assumed => TRY (HEADGOAL (assume_tac ctxt)) THEN rest (tl skel) memo | 
| 977 | | Caboose => TRY (HEADGOAL (assume_tac ctxt)) | |
| 978 |               | Unconjoin => resolve_tac ctxt @{thms conjI} 1 THEN rest (tl skel) memo
 | |
| 55596 | 979 | | Split (split_node, solved_node, antes) => | 
| 980 | let | |
| 981 | val split_fmla = node_info (#meta pannot) #fmla split_node | |
| 982 | val conclusion = | |
| 983 | (inference_at_node thy prob_name (#meta pannot) solved_node | |
| 984 | |> the | |
| 985 | |> #inference_fmla) | |
| 986 | |> Logic.dest_implies (*FIXME there might be !!-variables?*) | |
| 987 | |> #1 | |
| 988 | val minor_prems_assumps = | |
| 989 | map (fn ante => find_ancestor_using_rule pannot "split_conjecture" [ante]) antes | |
| 990 | |> map (node_info (#meta pannot) #fmla) | |
| 991 | val split_thm = | |
| 992 | simulate_split ctxt split_fmla minor_prems_assumps conclusion | |
| 993 | in | |
| 60754 | 994 | resolve_tac ctxt [split_thm] 1 THEN rest (tl skel) memo | 
| 55596 | 995 | end | 
| 996 | | Step s => | |
| 997 | let | |
| 60754 | 998 | val (th, memo') = tac_and_memo s memo | 
| 55596 | 999 | in | 
| 60754 | 1000 | resolve_tac ctxt [th] 1 THEN rest (tl skel) memo' | 
| 55596 | 1001 | end | 
| 1002 | | Definition n => | |
| 1003 | let | |
| 1004 | val def_thm = | |
| 1005 | case AList.lookup (op =) (#defs pannot) n of | |
| 1006 |                           NONE => error ("Did not find definition: " ^ n)
 | |
| 59858 | 1007 | | SOME binding => Global_Theory.get_thm thy (Binding.name_of binding) | 
| 55596 | 1008 | in | 
| 60754 | 1009 | resolve_tac ctxt [def_thm] 1 THEN rest (tl skel) memo | 
| 55596 | 1010 | end | 
| 1011 | | Axiom n => | |
| 1012 | let | |
| 1013 | val ax_thm = | |
| 1014 | case AList.lookup (op =) (#axs pannot) n of | |
| 1015 |                           NONE => error ("Did not find axiom: " ^ n)
 | |
| 59858 | 1016 | | SOME binding => Global_Theory.get_thm thy (Binding.name_of binding) | 
| 55596 | 1017 | in | 
| 60754 | 1018 | resolve_tac ctxt [ax_thm] 1 THEN rest (tl skel) memo | 
| 55596 | 1019 | end | 
| 1020 | | _ => raise SKELETON | |
| 1021 | in tactic st end | |
| 1022 | (*FIXME fuse these*) | |
| 1023 | (*As above, but creates debug-friendly tactic. | |
| 1024 | This is also used for "partial proof reconstruction"*) | |
| 1025 | fun skel_to_naive_tactic_dbg prover_tac ctxt prob_name skel (memo : (string * (thm * tactic) option) list) = | |
| 1026 | let | |
| 1027 | val thy = Proof_Context.theory_of ctxt | |
| 1028 | val pannot = get_pannot_of_prob thy prob_name | |
| 1029 | ||
| 60649 
e007aa6a8aa2
more explicit use of context and elimination of Thm.theory_of_thm, although unclear (and untested?) situations remain;
 wenzelm parents: 
59936diff
changeset | 1030 | (* FIXME !???! | 
| 55596 | 1031 | fun rtac_wrap thm_f i = fn st => | 
| 1032 | let | |
| 1033 | val thy = Thm.theory_of_thm st | |
| 1034 | in | |
| 1035 | rtac (thm_f thy) i st | |
| 1036 | end | |
| 60649 
e007aa6a8aa2
more explicit use of context and elimination of Thm.theory_of_thm, although unclear (and untested?) situations remain;
 wenzelm parents: 
59936diff
changeset | 1037 | *) | 
| 55596 | 1038 | |
| 1039 | (*Some nodes don't have an inference name, such as the conjecture, | |
| 1040 | definitions and axioms. Such nodes shouldn't appear in the | |
| 1041 | skeleton.*) | |
| 1042 | fun inference_name_of_node node = | |
| 1043 | case AList.lookup (op =) (#meta pannot) node of | |
| 1044 | NONE => (warning "Inference step lacks an inference name"; "(Shouldn't be here)") | |
| 1045 | | SOME info => | |
| 1046 | case #source_inf_opt info of | |
| 1047 | SOME (TPTP_Proof.Inference (infname, _, _)) => | |
| 1048 | infname | |
| 1049 | | _ => (warning "Inference step lacks an inference name"; "(Shouldn't be here)") | |
| 1050 | ||
| 1051 | fun inference_fmla node = | |
| 1052 | case inference_at_node thy prob_name (#meta pannot) node of | |
| 1053 | NONE => NONE | |
| 1054 |             | SOME {inference_fmla, ...} => SOME inference_fmla
 | |
| 1055 | ||
| 1056 | fun rest memo' ctxt' = skel_to_naive_tactic_dbg prover_tac ctxt' prob_name (tl skel) memo' | |
| 1057 | (*reconstruct the inference. also set timeout in case | |
| 1058 | tactic takes too long*) | |
| 1059 | val try_make_step = | |
| 1060 | (*FIXME const timeout*) | |
| 62519 | 1061 | (* Timeout.apply (Time.fromSeconds 5) *) | 
| 55596 | 1062 | (fn ctxt' => | 
| 1063 | let | |
| 1064 | fun thm ctxt'' = prover_tac ctxt'' prob_name (hd skel |> stock_to_string) | |
| 1065 | val reconstructed_inference = thm ctxt' | |
| 60754 | 1066 | fun rec_inf_tac st = HEADGOAL (resolve_tac ctxt' [thm ctxt']) st | 
| 55596 | 1067 | in (reconstructed_inference, | 
| 1068 | rec_inf_tac) | |
| 1069 | end) | |
| 1070 | fun ignore_interpretation_exn f x = SOME (f x) | |
| 62243 | 1071 | handle INTERPRET_INFERENCE => NONE | 
| 55596 | 1072 | in | 
| 1073 | if List.null skel then | |
| 1074 | raise SKELETON | |
| 1075 | (*FIXME or classify it as follows: | |
| 1076 | [(Caboose, | |
| 59582 | 1077 |             Thm.prop_of @{thm asm_rl}
 | 
| 55596 | 1078 | |> SOME, | 
| 1079 |             SOME (@{thm asm_rl}, TRY (HEADGOAL atac)))]
 | |
| 1080 | *) | |
| 1081 | else | |
| 1082 | case hd skel of | |
| 1083 | Assumed => | |
| 1084 | (hd skel, | |
| 59582 | 1085 |                  Thm.prop_of @{thm asm_rl}
 | 
| 55596 | 1086 | |> SOME, | 
| 60754 | 1087 |                  SOME (@{thm asm_rl}, TRY (HEADGOAL (assume_tac ctxt)))) :: rest memo ctxt
 | 
| 55596 | 1088 | | Caboose => | 
| 1089 | [(Caboose, | |
| 59582 | 1090 |                   Thm.prop_of @{thm asm_rl}
 | 
| 55596 | 1091 | |> SOME, | 
| 60754 | 1092 |                   SOME (@{thm asm_rl}, TRY (HEADGOAL (assume_tac ctxt))))]
 | 
| 55596 | 1093 | | Unconjoin => | 
| 1094 | (hd skel, | |
| 59582 | 1095 |                  Thm.prop_of @{thm conjI}
 | 
| 55596 | 1096 | |> SOME, | 
| 60754 | 1097 |                  SOME (@{thm conjI}, resolve_tac ctxt @{thms conjI} 1)) :: rest memo ctxt
 | 
| 55596 | 1098 | | Split (split_node, solved_node, antes) => | 
| 1099 | let | |
| 1100 | val split_fmla = node_info (#meta pannot) #fmla split_node | |
| 1101 | val conclusion = | |
| 1102 | (inference_at_node thy prob_name (#meta pannot) solved_node | |
| 1103 | |> the | |
| 1104 | |> #inference_fmla) | |
| 1105 | |> Logic.dest_implies (*FIXME there might be !!-variables?*) | |
| 1106 | |> #1 | |
| 1107 | val minor_prems_assumps = | |
| 1108 | map (fn ante => find_ancestor_using_rule pannot "split_conjecture" [ante]) antes | |
| 1109 | |> map (node_info (#meta pannot) #fmla) | |
| 1110 | val split_thm = | |
| 1111 | simulate_split ctxt split_fmla minor_prems_assumps conclusion | |
| 1112 | in | |
| 1113 | (hd skel, | |
| 59582 | 1114 | Thm.prop_of split_thm | 
| 55596 | 1115 | |> SOME, | 
| 60754 | 1116 | SOME (split_thm, resolve_tac ctxt [split_thm] 1)) :: rest memo ctxt | 
| 55596 | 1117 | end | 
| 1118 | | Step node => | |
| 1119 | let | |
| 1120 | val inference_name = inference_name_of_node node | |
| 1121 | val inference_fmla = inference_fmla node | |
| 1122 | ||
| 1123 | (*FIXME debugging code | |
| 1124 | val _ = | |
| 1125 | if Config.get ctxt tptp_trace_reconstruction then | |
| 1126 |                        (tracing ("handling node " ^ node);
 | |
| 1127 |                         tracing ("inference " ^ inference_name);
 | |
| 1128 | if is_some inference_fmla then | |
| 1129 |                           tracing ("formula size " ^ Int.toString (Term.size_of_term (the inference_fmla)))
 | |
| 1130 | else ()(*; | |
| 1131 |                         tracing ("formula " ^ @{make_string inference_fmla}) *))
 | |
| 1132 | else ()*) | |
| 1133 | ||
| 1134 | val (inference_instance_thm, memo', ctxt') = | |
| 1135 | case AList.lookup (op =) memo node of | |
| 1136 | NONE => | |
| 1137 | let | |
| 1138 | val (thm, ctxt') = | |
| 1139 | (*Instead of NONE could have another value indicating that the formula was too big*) | |
| 1140 | if is_some inference_fmla andalso | |
| 1141 | (*FIXME could have different inference rules have different sizelimits*) | |
| 1142 | exceeds_tptp_max_term_size ctxt (Term.size_of_term (the inference_fmla)) then | |
| 1143 | ( | |
| 1144 |                                      warning ("Gave up on node " ^ node ^ " because of fmla size " ^
 | |
| 1145 | Int.toString (Term.size_of_term (the inference_fmla))); | |
| 1146 | (NONE, ctxt) | |
| 1147 | ) | |
| 1148 | else | |
| 1149 | let | |
| 1150 | val maybe_thm = ignore_interpretation_exn try_make_step ctxt | |
| 60649 
e007aa6a8aa2
more explicit use of context and elimination of Thm.theory_of_thm, although unclear (and untested?) situations remain;
 wenzelm parents: 
59936diff
changeset | 1151 | (* FIXME !???! | 
| 55596 | 1152 | val ctxt' = | 
| 1153 | if is_some maybe_thm then | |
| 1154 | the maybe_thm | |
| 1155 | |> #1 | |
| 1156 | |> Thm.theory_of_thm |> Proof_Context.init_global | |
| 1157 | else ctxt | |
| 60649 
e007aa6a8aa2
more explicit use of context and elimination of Thm.theory_of_thm, although unclear (and untested?) situations remain;
 wenzelm parents: 
59936diff
changeset | 1158 | *) | 
| 55596 | 1159 | in | 
| 60649 
e007aa6a8aa2
more explicit use of context and elimination of Thm.theory_of_thm, although unclear (and untested?) situations remain;
 wenzelm parents: 
59936diff
changeset | 1160 | (maybe_thm, ctxt) | 
| 55596 | 1161 | end | 
| 1162 | in (thm, (node, thm) :: memo, ctxt') end | |
| 1163 | | SOME maybe_thm => (maybe_thm, memo, ctxt) | |
| 1164 | in | |
| 1165 | (Annotated_step (node, inference_name), | |
| 1166 | inference_fmla, | |
| 1167 | inference_instance_thm) :: rest memo' ctxt' | |
| 1168 | end | |
| 1169 | | Definition n => | |
| 1170 | let | |
| 1171 | fun def_thm thy = | |
| 1172 | case AList.lookup (op =) (#defs pannot) n of | |
| 1173 |                         NONE => error ("Did not find definition: " ^ n)
 | |
| 59858 | 1174 | | SOME binding => Global_Theory.get_thm thy (Binding.name_of binding) | 
| 55596 | 1175 | in | 
| 1176 | (hd skel, | |
| 59582 | 1177 | Thm.prop_of (def_thm thy) | 
| 55596 | 1178 | |> SOME, | 
| 60754 | 1179 | SOME (def_thm thy, HEADGOAL (resolve_tac ctxt [def_thm thy]))) :: rest memo ctxt | 
| 55596 | 1180 | end | 
| 1181 | | Axiom n => | |
| 1182 | let | |
| 1183 | val ax_thm = | |
| 1184 | case AList.lookup (op =) (#axs pannot) n of | |
| 1185 |                         NONE => error ("Did not find axiom: " ^ n)
 | |
| 59858 | 1186 | | SOME binding => Global_Theory.get_thm thy (Binding.name_of binding) | 
| 55596 | 1187 | in | 
| 1188 | (hd skel, | |
| 59582 | 1189 | Thm.prop_of ax_thm | 
| 55596 | 1190 | |> SOME, | 
| 60754 | 1191 | SOME (ax_thm, resolve_tac ctxt [ax_thm] 1)) :: rest memo ctxt | 
| 55596 | 1192 | end | 
| 1193 | end | |
| 1194 | ||
| 1195 | (*The next function handles cases where Leo2 doesn't include the solved_all_splits | |
| 1196 | step at the end (e.g. because there wouldn't be a split -- the proof | |
| 1197 | would be linear*) | |
| 1198 | fun sas_if_needed_tac ctxt prob_name = | |
| 1199 | let | |
| 1200 | val thy = Proof_Context.theory_of ctxt | |
| 1201 | val pannot = get_pannot_of_prob thy prob_name | |
| 1202 | val last_inference_info_opt = | |
| 1203 | find_first | |
| 1204 | (fn (_, info) => #role info = TPTP_Syntax.Role_Plain) | |
| 1205 | (#meta pannot) | |
| 1206 | val last_inference_info = | |
| 1207 | case last_inference_info_opt of | |
| 1208 | NONE => NONE | |
| 1209 | | SOME (_, info) => #source_inf_opt info | |
| 1210 | in | |
| 1211 | if is_some last_inference_info andalso | |
| 1212 | TPTP_Proof.is_inference_called "solved_all_splits" | |
| 1213 | (the last_inference_info) | |
| 1214 |         then (@{thm asm_rl}, all_tac)
 | |
| 60754 | 1215 | else (solved_all_splits, TRY (resolve_tac ctxt [solved_all_splits] 1)) | 
| 55596 | 1216 | end | 
| 1217 | in | |
| 1218 | (*Build a tactic from a skeleton. This is naive because it uses the naive skeleton. | |
| 1219 |     The inference interpretation ("prover_tac") is a parameter -- it would usually be
 | |
| 1220 | different for different provers.*) | |
| 1221 | fun naive_reconstruct_tac ctxt prover_tac prob_name = | |
| 1222 | let | |
| 1223 | val thy = Proof_Context.theory_of ctxt | |
| 1224 | in | |
| 60754 | 1225 |       resolve_tac ctxt @{thms ccontr} 1
 | 
| 1226 | THEN dresolve_tac ctxt [neg_eq_false] 1 | |
| 55596 | 1227 | THEN (sas_if_needed_tac ctxt prob_name |> #2) | 
| 1228 | THEN skel_to_naive_tactic ctxt prover_tac prob_name | |
| 1229 | (make_skeleton ctxt | |
| 1230 | (get_pannot_of_prob thy prob_name)) [] | |
| 1231 | end | |
| 1232 | ||
| 1233 | (*As above, but generates a list of tactics. This is useful for debugging, to apply | |
| 1234 | the tactics one by one manually.*) | |
| 1235 | fun naive_reconstruct_tacs prover_tac prob_name ctxt = | |
| 1236 | let | |
| 1237 | val thy = Proof_Context.theory_of ctxt | |
| 1238 | in | |
| 59582 | 1239 |       (Synth_step "ccontr", Thm.prop_of @{thm ccontr} |> SOME,
 | 
| 60754 | 1240 |        SOME (@{thm ccontr}, resolve_tac ctxt @{thms ccontr} 1)) ::
 | 
| 59582 | 1241 | (Synth_step "neg_eq_false", Thm.prop_of neg_eq_false |> SOME, | 
| 60754 | 1242 | SOME (neg_eq_false, dresolve_tac ctxt [neg_eq_false] 1)) :: | 
| 59582 | 1243 |       (Synth_step "sas_if_needed_tac", Thm.prop_of @{thm asm_rl} (*FIXME *) |> SOME,
 | 
| 55596 | 1244 | SOME (sas_if_needed_tac ctxt prob_name)) :: | 
| 1245 | skel_to_naive_tactic_dbg prover_tac ctxt prob_name | |
| 1246 | (make_skeleton ctxt | |
| 1247 | (get_pannot_of_prob thy prob_name)) [] | |
| 1248 | end | |
| 1249 | end | |
| 1250 | ||
| 1251 | (*Produces a theorem given a tactic and a parsed proof. This function is handy | |
| 1252 | to test reconstruction, since it automates the interpretation and proving of the | |
| 1253 | parsed proof's goal.*) | |
| 1254 | fun reconstruct ctxt tactic prob_name = | |
| 1255 | let | |
| 1256 | val thy = Proof_Context.theory_of ctxt | |
| 1257 | val pannot = get_pannot_of_prob thy prob_name | |
| 1258 | val goal = | |
| 1259 | #meta pannot | |
| 58412 | 1260 | |> filter (fn (_, info) => | 
| 55596 | 1261 | #role info = TPTP_Syntax.Role_Conjecture) | 
| 1262 | in | |
| 1263 | if null (#meta pannot) then | |
| 1264 | (*since the proof is empty, return a trivial result.*) | |
| 1265 |       @{thm TrueI}
 | |
| 1266 | else if null goal then | |
| 1267 | raise (RECONSTRUCT "Proof lacks conjecture") | |
| 1268 | else | |
| 1269 | the_single goal | |
| 1270 | |> snd |> #fmla | |
| 1271 | |> (fn fmla => Goal.prove ctxt [] [] fmla (fn _ => tactic prob_name)) | |
| 1272 | end | |
| 1273 | ||
| 1274 | ||
| 1275 | (** Skolemisation setup **) | |
| 1276 | ||
| 1277 | (*Ignore these constants if they appear in the conclusion but not the hypothesis*) | |
| 1278 | (*FIXME possibly incomplete*) | |
| 1279 | val ignore_consts = | |
| 1280 | [HOLogic.conj, HOLogic.disj, HOLogic.imp, HOLogic.Not] | |
| 1281 | ||
| 1282 | (*Difference between the constants appearing between two terms, minus "ignore_consts"*) | |
| 1283 | fun new_consts_between t1 t2 = | |
| 58412 | 1284 | filter | 
| 1285 | (fn n => not (exists (fn n' => n' = n) ignore_consts)) | |
| 55596 | 1286 | (list_diff (consts_in t2) (consts_in t1)) | 
| 1287 | ||
| 1288 | (*Generate definition binding for an equation*) | |
| 1289 | fun mk_bind_eq prob_name params ((n, ty), t) = | |
| 1290 | let | |
| 1291 | val bnd = | |
| 56220 
4c43a2881b25
more explicit Long_Name operations (NB: analyzing qualifiers is inherently fragile);
 wenzelm parents: 
55596diff
changeset | 1292 | Binding.name (Long_Name.base_name n ^ "_def") | 
| 55596 | 1293 | |> Binding.qualify false (TPTP_Problem_Name.mangle_problem_name prob_name) | 
| 1294 | val t' = | |
| 1295 | Term.list_comb (Const (n, ty), params) | |
| 1296 | |> rpair t | |
| 1297 | |> HOLogic.mk_eq | |
| 1298 | |> HOLogic.mk_Trueprop | |
| 1299 | |> fold Logic.all params | |
| 1300 | in | |
| 1301 | (bnd, t') | |
| 1302 | end | |
| 1303 | ||
| 1304 | (*Generate binding for an axiom. Similar to "mk_bind_eq"*) | |
| 1305 | fun mk_bind_ax prob_name node t = | |
| 1306 | let | |
| 1307 | val bnd = | |
| 1308 | Binding.name node | |
| 1309 | (*FIXME add suffix? e.g. ^ "_ax"*) | |
| 1310 | |> Binding.qualify false (TPTP_Problem_Name.mangle_problem_name prob_name) | |
| 1311 | in | |
| 1312 | (bnd, t) | |
| 1313 | end | |
| 1314 | ||
| 1315 | (*Extract the constant name, type, and its definition*) | |
| 1316 | fun get_defn_components | |
| 69597 | 1317 | (Const (\<^const_name>\<open>HOL.Trueprop\<close>, _) $ | 
| 1318 | (Const (\<^const_name>\<open>HOL.eq\<close>, _) $ | |
| 55596 | 1319 | Const (name, ty) $ t)) = ((name, ty), t) | 
| 1320 | ||
| 1321 | ||
| 1322 | (*** Proof transformations ***) | |
| 1323 | ||
| 1324 | (*Transforms a proof_annotation value. | |
| 1325 | Argument "f" is the proof transformer*) | |
| 1326 | fun transf_pannot f (pannot : proof_annotation) : (theory * proof_annotation) = | |
| 1327 | let | |
| 1328 | val (thy', fms') = f (#meta pannot) | |
| 1329 | in | |
| 1330 | (thy', | |
| 1331 |      {problem_name = #problem_name pannot,
 | |
| 1332 | skolem_defs = #skolem_defs pannot, | |
| 1333 | defs = #defs pannot, | |
| 1334 | axs = #axs pannot, | |
| 1335 | meta = fms'}) | |
| 1336 | end | |
| 1337 | ||
| 1338 | ||
| 1339 | (** Proof transformer to add virtual inference steps | |
| 1340 | encoding "bind" annotations in Leo-II proofs **) | |
| 1341 | ||
| 1342 | (* | |
| 1343 | Involves finding an inference of this form: | |
| 1344 | ||
| 1345 | (!x1 ... xn. F) ... Cn | |
| 1346 | ------------------------------------ (Rule name) | |
| 1347 | G[t1/x1, ..., tn/xn] | |
| 1348 | ||
| 1349 | and turn it into this: | |
| 1350 | ||
| 1351 | ||
| 1352 | (!x1 ... xn. F) | |
| 1353 | ---------------------- bind | |
| 1354 | F[t1/x1, ..., tn/xn] ... Cn | |
| 1355 | -------------------------------------------- (Rule name) | |
| 1356 | G | |
| 1357 | ||
| 1358 | where "bind" is an inference rule (distinct from any rule name used | |
| 1359 | by Leo2) to indicate such inferences. This transformation is used | |
| 1360 | to factor out instantiations, thus allowing the reconstruction to | |
| 1361 | focus on (Rule name) rather than "(Rule name) + instantiations". | |
| 1362 | *) | |
| 1363 | fun interpolate_binds prob_name thy fms : theory * formula_meaning list = | |
| 1364 | let | |
| 1365 | fun factor_out_bind target_node pinfo intermediate_thy = | |
| 1366 | case pinfo of | |
| 1367 | TPTP_Proof.ParentWithDetails (n, pdetails) => | |
| 1368 | (*create new node which contains the "bind" inference, | |
| 1369 | to be added to graph*) | |
| 1370 | let | |
| 1371 | val (new_node_name, thy') = get_next_name intermediate_thy | |
| 1372 | val orig_fmla = node_info fms #fmla n | |
| 1373 | val target_fmla = node_info fms #fmla target_node | |
| 1374 | val new_node = | |
| 1375 | (new_node_name, | |
| 1376 |                {role = TPTP_Syntax.Role_Plain,
 | |
| 1377 | fmla = apply_binding thy' prob_name orig_fmla target_fmla pdetails |> fst, | |
| 1378 | source_inf_opt = | |
| 1379 | SOME (TPTP_Proof.Inference (bindK, [], [pinfo]))}) | |
| 1380 | in | |
| 1381 | ((TPTP_Proof.Parent new_node_name, SOME new_node), thy') | |
| 1382 | end | |
| 1383 | | _ => ((pinfo, NONE), intermediate_thy) | |
| 1384 | fun process_nodes (step as (n, data)) (intermediate_thy, rest) = | |
| 1385 | case #source_inf_opt data of | |
| 1386 | SOME (TPTP_Proof.Inference (inf_name, sinfos, pinfos)) => | |
| 1387 | let | |
| 1388 | val ((pinfos', parent_nodes), thy') = | |
| 1389 | fold_map (factor_out_bind n) pinfos intermediate_thy | |
| 1390 | |> apfst ListPair.unzip | |
| 1391 | val step' = | |
| 1392 |                 (n, {role = #role data, fmla = #fmla data,
 | |
| 1393 | source_inf_opt = SOME (TPTP_Proof.Inference (inf_name, sinfos, pinfos'))}) | |
| 1394 | in (thy', fold_options parent_nodes @ step' :: rest) end | |
| 1395 | | _ => (intermediate_thy, step :: rest) | |
| 1396 | in | |
| 1397 | fold process_nodes fms (thy, []) | |
| 1398 | (*new_nodes must come at the beginning, since we assume that the last line in a proof is the closing line*) | |
| 1399 | |> apsnd rev | |
| 1400 | end | |
| 1401 | ||
| 1402 | ||
| 1403 | (** Proof transformer to add virtual inference steps | |
| 1404 | encoding any transformation done immediately prior | |
| 1405 | to a splitting step **) | |
| 1406 | ||
| 1407 | (* | |
| 1408 | Involves finding an inference of this form: | |
| 1409 | ||
| 1410 | F = $false | |
| 1411 | ----------------------------------- split_conjecture | |
| 1412 | (F1 = $false) ... (Fn = $false) | |
| 1413 | ||
| 1414 | where F doesn't have an "and" or "iff" at the top level, | |
| 1415 | and turn it into this: | |
| 1416 | ||
| 1417 | F = $false | |
| 1418 | ----------------------------------- split_preprocessing | |
| 1419 | (F1 % ... % Fn) = $false | |
| 1420 | ----------------------------------- split_conjecture | |
| 1421 | (F1 = $false) ... (Fn = $false) | |
| 1422 | ||
| 1423 | where "%" is either an "and" or an "iff" connective. | |
| 1424 | This transformation is used to clarify the clause structure, to | |
| 1425 | make it immediately "obvious" how splitting is taking place | |
| 1426 | (by factoring out the other syntactic transformations -- e.g. | |
| 1427 | related to quantifiers -- performed by Leo2). Having the clause | |
| 1428 | in this "clearer" form makes the inference amenable to handling | |
| 1429 | using the "abstraction" technique, which allows us to validate | |
| 1430 | large inferences. | |
| 1431 | *) | |
| 1432 | exception PREPROCESS_SPLITS | |
| 1433 | fun preprocess_splits prob_name thy fms : theory * formula_meaning list = | |
| 1434 | let | |
| 1435 | (*Simulate the transformation done by Leo2's preprocessing | |
| 1436 | step during splitting. | |
| 1437 | NOTE: we assume that the clause is a singleton | |
| 1438 | ||
| 1439 | This transformation does the following: | |
| 1440 | - miniscopes !-quantifiers (and recurs) | |
| 1441 | - removes redundant ?-quantifiers (and recurs) | |
| 1442 | - eliminates double negation (and recurs) | |
| 1443 | - breaks up conjunction (and recurs) | |
| 1444 | - expands iff (and doesn't recur)*) | |
| 1445 | fun transform_fmla i fmla_t = | |
| 1446 | case fmla_t of | |
| 69597 | 1447 | Const (\<^const_name>\<open>HOL.All\<close>, ty) $ Abs (s, ty', t') => | 
| 55596 | 1448 | let | 
| 1449 | val (i', fmla_ts) = transform_fmla i t' | |
| 1450 | in | |
| 1451 | if i' > i then | |
| 1452 | (i' + 1, | |
| 1453 | map (fn t => | |
| 69597 | 1454 | Const (\<^const_name>\<open>HOL.All\<close>, ty) $ Abs (s, ty', t)) | 
| 55596 | 1455 | fmla_ts) | 
| 1456 | else (i, [fmla_t]) | |
| 1457 | end | |
| 69597 | 1458 | | Const (\<^const_name>\<open>HOL.Ex\<close>, ty) $ Abs (s, ty', t') => | 
| 55596 | 1459 | if loose_bvar (t', 0) then | 
| 1460 | (i, [fmla_t]) | |
| 1461 | else transform_fmla (i + 1) t' | |
| 69597 | 1462 | | \<^term>\<open>HOL.Not\<close> $ (\<^term>\<open>HOL.Not\<close> $ t') => | 
| 55596 | 1463 | transform_fmla (i + 1) t' | 
| 69597 | 1464 | | \<^term>\<open>HOL.conj\<close> $ t1 $ t2 => | 
| 55596 | 1465 | let | 
| 1466 | val (i1, fmla_t1s) = transform_fmla (i + 1) t1 | |
| 1467 | val (i2, fmla_t2s) = transform_fmla (i + 1) t2 | |
| 1468 | in | |
| 1469 | (i1 + i2 - i, fmla_t1s @ fmla_t2s) | |
| 1470 | end | |
| 69597 | 1471 | | Const (\<^const_name>\<open>HOL.eq\<close>, ty) $ t1 $ t2 => | 
| 55596 | 1472 | let | 
| 1473 | val (T1, (T2, res)) = | |
| 1474 | dest_funT ty | |
| 1475 | |> apsnd dest_funT | |
| 1476 | in | |
| 1477 | if T1 = HOLogic.boolT andalso T2 = HOLogic.boolT andalso | |
| 1478 | res = HOLogic.boolT then | |
| 1479 | (i + 1, | |
| 1480 | [HOLogic.mk_imp (t1, t2), | |
| 1481 | HOLogic.mk_imp (t2, t1)]) | |
| 1482 | else (i, [fmla_t]) | |
| 1483 | end | |
| 1484 | | _ => (i, [fmla_t]) | |
| 1485 | ||
| 1486 | fun preprocess_split thy split_node_name fmla_t = | |
| 1487 | (*create new node which contains the new inference, | |
| 1488 | to be added to graph*) | |
| 1489 | let | |
| 1490 | val (node_name, thy') = get_next_name thy | |
| 1491 | val (changes, fmla_conjs) = | |
| 1492 | transform_fmla 0 fmla_t | |
| 1493 | |> apsnd rev (*otherwise we run into problems because | |
| 1494 | of commutativity of conjunction*) | |
| 1495 | val target_fmla = | |
| 1496 | fold (curry HOLogic.mk_conj) (tl fmla_conjs) (hd fmla_conjs) | |
| 1497 | val new_node = | |
| 1498 | (node_name, | |
| 1499 |           {role = TPTP_Syntax.Role_Plain,
 | |
| 1500 | fmla = | |
| 69597 | 1501 | HOLogic.mk_eq (target_fmla, \<^term>\<open>False\<close>) (*polarise*) | 
| 55596 | 1502 | |> HOLogic.mk_Trueprop, | 
| 1503 | source_inf_opt = | |
| 1504 | SOME (TPTP_Proof.Inference (split_preprocessingK, [], [TPTP_Proof.Parent split_node_name]))}) | |
| 1505 | in | |
| 1506 | if changes = 0 then NONE | |
| 1507 | else SOME (TPTP_Proof.Parent node_name, new_node, thy') | |
| 1508 | end | |
| 1509 | in | |
| 1510 | fold | |
| 1511 | (fn step as (n, data) => fn (intermediate_thy, redirections, rest) => | |
| 1512 | case #source_inf_opt data of | |
| 1513 | SOME (TPTP_Proof.Inference | |
| 1514 | (inf_name, sinfos, pinfos)) => | |
| 1515 | if inf_name <> "split_conjecture" then | |
| 1516 | (intermediate_thy, redirections, step :: rest) | |
| 1517 | else | |
| 1518 | let | |
| 1519 | (* | |
| 1520 | NOTE: here we assume that the node only has one | |
| 1521 | parent, and that there is no additional | |
| 1522 | parent info. | |
| 1523 | *) | |
| 1524 | val split_node_name = | |
| 1525 | case pinfos of | |
| 1526 | [TPTP_Proof.Parent n] => n | |
| 1527 | | _ => raise PREPROCESS_SPLITS | |
| 1528 | (*check if we've already handled that already node*) | |
| 1529 | in | |
| 1530 | case AList.lookup (op =) redirections split_node_name of | |
| 1531 | SOME preprocessed_split_node_name => | |
| 1532 | let | |
| 1533 | val step' = | |
| 1534 | apply_to_parent_info (fn _ => [TPTP_Proof.Parent preprocessed_split_node_name]) step | |
| 1535 | in (intermediate_thy, redirections, step' :: rest) end | |
| 1536 | | NONE => | |
| 1537 | let | |
| 1538 | (*we know the polarity to be $false, from knowing Leo2*) | |
| 1539 | val split_fmla = | |
| 1540 | try_dest_Trueprop (node_info fms #fmla split_node_name) | |
| 1541 | |> remove_polarity true | |
| 1542 | |> fst | |
| 1543 | ||
| 1544 | val preprocess_result = | |
| 1545 | preprocess_split intermediate_thy | |
| 1546 | split_node_name | |
| 1547 | split_fmla | |
| 1548 | in | |
| 1549 | if is_none preprocess_result then | |
| 1550 | (*no preprocessing done by Leo2, so no need to introduce | |
| 1551 | a virtual inference. cache this result by | |
| 1552 | redirecting the split_node to itself*) | |
| 1553 | (intermediate_thy, | |
| 1554 | (split_node_name, split_node_name) :: redirections, | |
| 1555 | step :: rest) | |
| 1556 | else | |
| 1557 | let | |
| 1558 | val (new_parent_info, new_parent_node, thy') = the preprocess_result | |
| 1559 | val step' = | |
| 1560 |                                 (n, {role = #role data, fmla = #fmla data,
 | |
| 1561 | source_inf_opt = SOME (TPTP_Proof.Inference (inf_name, sinfos, [new_parent_info]))}) | |
| 1562 | in | |
| 1563 | (thy', | |
| 1564 | (split_node_name, fst new_parent_node) :: redirections, | |
| 1565 | step' :: new_parent_node :: rest) | |
| 1566 | end | |
| 1567 | end | |
| 1568 | end | |
| 1569 | | _ => (intermediate_thy, redirections, step :: rest)) | |
| 1570 | (rev fms) (*this allows us to put new inferences before other inferences which use them*) | |
| 1571 | (thy, [], []) | |
| 1572 | |> (fn (x, _, z) => (x, z)) (*discard redirection info*) | |
| 1573 | end | |
| 1574 | ||
| 1575 | ||
| 1576 | (** Proof transformer to remove repeated quantification **) | |
| 1577 | ||
| 1578 | exception DROP_REPEATED_QUANTIFICATION | |
| 1579 | fun drop_repeated_quantification thy (fms : formula_meaning list) : theory * formula_meaning list = | |
| 1580 | let | |
| 1581 | (*In case of repeated quantification, removes outer quantification. | |
| 1582 | Only need to look at top-level, since the repeated quantification | |
| 1583 | generally occurs at clause level*) | |
| 1584 | fun remove_repeated_quantification seen t = | |
| 1585 | case t of | |
| 1586 | (*NOTE we're assuming that variables having the same name, have the same type throughout*) | |
| 69597 | 1587 | Const (\<^const_name>\<open>HOL.All\<close>, ty) $ Abs (s, ty', t') => | 
| 55596 | 1588 | let | 
| 1589 | val (seen_so_far, seen') = | |
| 1590 | case AList.lookup (op =) seen s of | |
| 1591 | NONE => (0, (s, 0) :: seen) | |
| 1592 | | SOME n => (n + 1, AList.update (op =) (s, n + 1) seen) | |
| 1593 | val (pre_final_t, final_seen) = remove_repeated_quantification seen' t' | |
| 1594 | val final_t = | |
| 1595 | case AList.lookup (op =) final_seen s of | |
| 1596 | NONE => raise DROP_REPEATED_QUANTIFICATION | |
| 1597 | | SOME n => | |
| 1598 | if n > seen_so_far then pre_final_t | |
| 69597 | 1599 | else Const (\<^const_name>\<open>HOL.All\<close>, ty) $ Abs (s, ty', pre_final_t) | 
| 55596 | 1600 | in (final_t, final_seen) end | 
| 1601 | | _ => (t, seen) | |
| 1602 | ||
| 1603 |     fun remove_repeated_quantification' (n, {role, fmla, source_inf_opt}) =
 | |
| 1604 | (n, | |
| 1605 |        {role = role,
 | |
| 1606 | fmla = | |
| 1607 | try_dest_Trueprop fmla | |
| 1608 | |> remove_repeated_quantification [] | |
| 1609 | |> fst | |
| 1610 | |> HOLogic.mk_Trueprop, | |
| 1611 | source_inf_opt = source_inf_opt}) | |
| 1612 | in | |
| 1613 | (thy, map remove_repeated_quantification' fms) | |
| 1614 | end | |
| 1615 | ||
| 1616 | ||
| 1617 | (** Proof transformer to detect a redundant splitting and remove | |
| 1618 | the redundant branch. **) | |
| 1619 | ||
| 1620 | fun node_is_inference fms rule_name node_name = | |
| 1621 | case node_info fms #source_inf_opt node_name of | |
| 1622 | NONE => false | |
| 1623 | | SOME (TPTP_Proof.File _) => false | |
| 1624 | | SOME (TPTP_Proof.Inference (rule_name', _, _)) => rule_name' = rule_name | |
| 1625 | ||
| 1626 | (*In this analysis we're interested if there exists a split-free | |
| 1627 | path between the end of the proof and the negated conjecture. | |
| 1628 | If so, then this path (or the shortest such path) could be | |
| 1629 | retained, and the rest of the proof erased.*) | |
| 1630 | datatype branch_info = | |
| 1631 | Split_free (*Path is not part of a split. This is only used when path reaches the negated conjecture.*) | |
| 1632 | | Split_present (*Path is one of a number of splits. Such paths are excluded.*) | |
| 1633 | | Coinconsistent of int (*Path leads to a clause which is inconsistent with nodes concluded by other paths. | |
| 1634 | Therefore this path should be kept if the others are kept | |
| 1635 | (i.e., unless one of them results from a split)*) | |
| 1636 | | No_info (*Analysis hasn't come across anything definite yet, though it still hasn't completed.*) | |
| 1637 | (*A "paths" value consist of every way of reaching the destination, | |
| 1638 | including information come across it so far. Taking the head of | |
| 1639 | each way gives the fringe. All paths should share the same source | |
| 1640 | and sink.*) | |
| 1641 | type path = (branch_info * string list) | |
| 1642 | exception PRUNE_REDUNDANT_SPLITS | |
| 1643 | fun prune_redundant_splits prob_name thy fms : theory * formula_meaning list = | |
| 1644 | let | |
| 1645 | (*All paths start at the contradiction*) | |
| 1646 | val initial_path = (No_info, [proof_end_node fms]) | |
| 1647 | (*All paths should end at the proof's beginning*) | |
| 1648 | val end_node = proof_beginning_node fms | |
| 1649 | ||
| 1650 | fun compute_path (path as ((info, | |
| 1651 | (n :: ns)) : path))(*i.e. node list can't be empty*) | |
| 1652 | intermediate_thy = | |
| 1653 | case info of | |
| 1654 | Split_free => (([path], []), intermediate_thy) | |
| 1655 | | Coinconsistent branch_id => | |
| 1656 | (*If this branch has a split_conjecture parent then all "sibling" branches get erased.*) | |
| 1657 | (*This branch can't lead to yet another coinconsistent branch (in the case of Leo2).*) | |
| 1658 | let | |
| 1659 | val parent_nodes = parents_of_node fms n | |
| 1660 | in | |
| 58412 | 1661 | if exists (node_is_inference fms "split_conjecture") parent_nodes then | 
| 55596 | 1662 | (([], [branch_id]), intermediate_thy) (*all related branches are to be deleted*) | 
| 1663 | else | |
| 1664 | list_prod [] parent_nodes (n :: ns) | |
| 1665 | |> map (fn ns' => (Coinconsistent branch_id, ns')) | |
| 1666 | |> (fn x => ((x, []), intermediate_thy)) | |
| 1667 | end | |
| 1668 | ||
| 1669 | | No_info => | |
| 1670 | let | |
| 1671 | val parent_nodes = parents_of_node fms n | |
| 1672 | ||
| 1673 | (*if this node is a consistency checking node then parent nodes will be marked as coinconsistent*) | |
| 1674 | val (thy', new_branch_info) = | |
| 1675 | if node_is_inference fms "fo_atp_e" n orelse | |
| 1676 | node_is_inference fms "res" n then | |
| 1677 | let | |
| 1678 | val (i', intermediate_thy') = get_next_int intermediate_thy | |
| 1679 | in | |
| 1680 | (intermediate_thy', SOME (Coinconsistent i')) | |
| 1681 | end | |
| 1682 | else (intermediate_thy, NONE) | |
| 1683 | in | |
| 58412 | 1684 | if exists (node_is_inference fms "split_conjecture") parent_nodes then | 
| 55596 | 1685 | (([], []), thy') | 
| 1686 | else | |
| 1687 | list_prod [] parent_nodes (n :: ns) | |
| 1688 | |> map (fn ns' => | |
| 1689 | let | |
| 1690 | val info = | |
| 1691 | if is_some new_branch_info then the new_branch_info | |
| 1692 | else | |
| 1693 | if hd ns' = end_node then Split_free else No_info | |
| 1694 | in (info, ns') end) | |
| 1695 | |> (fn x => ((x, []), thy')) | |
| 1696 | end | |
| 1697 | | _ => raise PRUNE_REDUNDANT_SPLITS | |
| 1698 | ||
| 1699 | fun compute_paths intermediate_thy (paths : path list) = | |
| 1700 | if filter (fn (_, ns) => ns <> [] andalso hd ns = end_node) paths = paths then | |
| 1701 | (*fixpoint reached when all paths are at the head position*) | |
| 1702 | (intermediate_thy, paths) | |
| 1703 | else | |
| 1704 | let | |
| 1705 | val filtered_paths = filter (fn (info, _) : path => info <> Split_present) paths (*not interested in paths containing a split*) | |
| 1706 | val (paths', thy') = | |
| 1707 | fold_map compute_path filtered_paths intermediate_thy | |
| 1708 | in | |
| 1709 | paths' | |
| 1710 | |> ListPair.unzip (*we get a list of pairs of lists. we want a pair of lists*) | |
| 1711 | |> (fn (paths, branch_ids) => | |
| 58411 | 1712 | (flat paths, | 
| 55596 | 1713 | (*remove duplicate branch_ids*) | 
| 58411 | 1714 | fold (Library.insert (op =)) (flat branch_ids) [])) | 
| 55596 | 1715 | (*filter paths having branch_ids appearing in the second list*) | 
| 1716 | |> (fn (paths, branch_ids) => | |
| 1717 | filter (fn (info, _) => | |
| 1718 | case info of | |
| 58412 | 1719 | Coinconsistent branch_id => exists (fn x => x = branch_id) branch_ids | 
| 55596 | 1720 | | _ => true) paths) | 
| 1721 | |> compute_paths thy' | |
| 1722 | end | |
| 1723 | ||
| 1724 | val (thy', paths) = | |
| 1725 | compute_paths thy [initial_path] | |
| 1726 | |> apsnd | |
| 1727 | (filter (fn (branch_info, _) => | |
| 1728 | case branch_info of | |
| 1729 | Split_free => true | |
| 1730 | | Coinconsistent _ => true | |
| 1731 | | _ => false)) | |
| 1732 | (*Extract subset of fms which is used in a path. | |
| 1733 | Also, remove references (in parent info annotations) to erased nodes.*) | |
| 1734 | fun path_to_fms ((_, nodes) : path) = | |
| 1735 | fold | |
| 1736 | (fn n => fn fms' => | |
| 1737 | case AList.lookup (op =) fms' n of | |
| 1738 | SOME _ => fms' | |
| 1739 | | NONE => | |
| 1740 | let | |
| 1741 | val node_info = the (AList.lookup (op =) fms n) | |
| 1742 | ||
| 1743 | val source_info' = | |
| 1744 | case #source_inf_opt node_info of | |
| 1745 | NONE => error "Only the conjecture is an orphan" | |
| 1746 | | SOME (source_info as TPTP_Proof.File _) => source_info | |
| 1747 | | SOME (source_info as | |
| 1748 | TPTP_Proof.Inference (inference_name, | |
| 1749 | useful_infos : TPTP_Proof.useful_info_as list, | |
| 1750 | parent_infos)) => | |
| 1751 | let | |
| 1752 | fun is_node_in_fms' parent_info = | |
| 1753 | let | |
| 1754 | val parent_nodename = | |
| 1755 | case parent_info of | |
| 1756 | TPTP_Proof.Parent n => n | |
| 1757 | | TPTP_Proof.ParentWithDetails (n, _) => n | |
| 1758 | in | |
| 1759 | case AList.lookup (op =) fms' parent_nodename of | |
| 1760 | NONE => false | |
| 1761 | | SOME _ => true | |
| 1762 | end | |
| 1763 | in | |
| 1764 | TPTP_Proof.Inference (inference_name, | |
| 1765 | useful_infos, | |
| 1766 | filter is_node_in_fms' parent_infos) | |
| 1767 | end | |
| 1768 | in | |
| 1769 | (n, | |
| 1770 |                     {role = #role node_info,
 | |
| 1771 | fmla = #fmla node_info, | |
| 1772 | source_inf_opt = SOME source_info'}) :: fms' | |
| 1773 | end) | |
| 1774 | nodes | |
| 1775 | [] | |
| 1776 | in | |
| 1777 | if null paths then (thy', fms) else | |
| 1778 | (thy', | |
| 1779 | hd(*FIXME could pick path based on length, or some notion of "difficulty"*) paths | |
| 1780 | |> path_to_fms) | |
| 1781 | end | |
| 1782 | ||
| 1783 | ||
| 1784 | (*** Main functions ***) | |
| 1785 | ||
| 1786 | (*interpret proof*) | |
| 1787 | fun import_thm cautious path_prefixes file_name | |
| 1788 | (on_load : proof_annotation -> theory -> (proof_annotation * theory)) thy = | |
| 1789 | let | |
| 1790 | val prob_name = | |
| 69366 | 1791 | Path.file_name file_name | 
| 55596 | 1792 | |> TPTP_Problem_Name.parse_problem_name | 
| 1793 | val thy1 = TPTP_Interpret.import_file cautious path_prefixes file_name [] [] thy | |
| 1794 | val fms = get_fmlas_of_prob thy1 prob_name | |
| 1795 | in | |
| 1796 | if List.null fms then | |
| 62552 | 1797 |       (warning ("File " ^ Path.print file_name ^ " appears empty!");
 | 
| 55596 | 1798 | TPTP_Reconstruction_Data.map (cons ((prob_name, empty_pannot prob_name))) thy1) | 
| 1799 | else | |
| 1800 | let | |
| 1801 | val defn_equations = | |
| 58412 | 1802 | filter (fn (_, role, _, _) => role = TPTP_Syntax.Role_Definition) fms | 
| 55596 | 1803 | |> map (fn (node, _, t, _) => | 
| 1804 | (node, | |
| 1805 | get_defn_components t | |
| 1806 | |> mk_bind_eq prob_name [])) | |
| 1807 | val axioms = | |
| 58412 | 1808 | filter (fn (_, role, _, _) => role = TPTP_Syntax.Role_Axiom) fms | 
| 55596 | 1809 | |> map (fn (node, _, t, _) => | 
| 1810 | (node, | |
| 1811 | mk_bind_ax prob_name node t)) | |
| 1812 | ||
| 1813 | (*add definitions and axioms to the theory*) | |
| 1814 | val thy2 = | |
| 1815 | fold | |
| 1816 | (fn bnd => fn thy => | |
| 1817 | let | |
| 1818 | val ((name, thm), thy') = Thm.add_axiom_global bnd thy | |
| 1819 | in Global_Theory.add_thm ((#1 bnd, thm), []) thy' |> #2 end) | |
| 1820 | (map snd defn_equations @ map snd axioms) | |
| 1821 | thy1 | |
| 1822 | ||
| 1823 | (*apply global proof transformations*) | |
| 1824 | val (thy3, pre_pannot) : theory * proof_annotation = | |
| 1825 | transf_pannot | |
| 1826 | (prune_redundant_splits prob_name thy2 | |
| 1827 | #-> interpolate_binds prob_name | |
| 1828 | #-> preprocess_splits prob_name | |
| 1829 | #-> drop_repeated_quantification) | |
| 1830 |            {problem_name = prob_name,
 | |
| 1831 | skolem_defs = [], | |
| 1832 | defs = map (apsnd fst) defn_equations, | |
| 1833 | axs = map (apsnd fst) axioms, | |
| 1834 |             meta = map (fn (n, r, t, info) => (n, {role=r, fmla=t, source_inf_opt=info})) fms}
 | |
| 1835 | ||
| 1836 | (*store pannot*) | |
| 1837 | val thy4 = TPTP_Reconstruction_Data.map (cons ((prob_name, pre_pannot))) thy3 | |
| 1838 | ||
| 1839 | (*run hook, which might result in changed pannot and theory*) | |
| 1840 | val (pannot, thy5) = on_load pre_pannot thy4 | |
| 1841 | ||
| 1842 | (*store the most recent pannot*) | |
| 1843 | in TPTP_Reconstruction_Data.map (cons ((prob_name, pannot))) thy5 end | |
| 1844 | end | |
| 1845 | ||
| 1846 | (*This has been disabled since it requires a hook to be specified to use "import_thm" | |
| 1847 | val _ = | |
| 59936 
b8ffc3dc9e24
@{command_spec} is superseded by @{command_keyword};
 wenzelm parents: 
59858diff
changeset | 1848 |   Outer_Syntax.command @{command_keyword import_leo2_proof} "import TPTP proof"
 | 
| 55596 | 1849 | (Parse.path >> (fn name => | 
| 1850 | Toplevel.theory (fn thy => | |
| 1851 | let val path = Path.explode name | |
| 1852 | in import_thm true [Path.dir path, Path.explode "$TPTP"] path (*FIXME hook needs to be given here*) | |
| 1853 | thy end))) | |
| 1854 | *) | |
| 1855 | ||
| 1856 | ||
| 1857 | (** Archive **) | |
| 1858 | (*FIXME move elsewhere*) | |
| 1859 | (*This contains currently unused, but possibly useful, functions written | |
| 1860 | during experimentation, in case they are useful later on*) | |
| 1861 | ||
| 1862 | (*given a list of rules and a node, return | |
| 1863 | SOME (rule name) if that node's rule name | |
| 1864 | belongs to the list of rules*) | |
| 1865 | fun match_rules_of_current (pannot : proof_annotation) rules n = | |
| 1866 | case node_info (#meta pannot) #source_inf_opt n of | |
| 1867 | NONE => NONE | |
| 1868 | | SOME (TPTP_Proof.File _) => NONE | |
| 1869 | | SOME (TPTP_Proof.Inference (rule_name, _ : TPTP_Proof.useful_info_as list, _)) => | |
| 1870 | if member (op =) rules rule_name then SOME rule_name else NONE | |
| 1871 | ||
| 1872 | (*given a node and a list of rules, determine | |
| 1873 | whether all the rules can be matched to | |
| 1874 | parent nodes. If nonstrict then there may be | |
| 1875 | more parents than given rules.*) | |
| 1876 | fun match_rules_of_immediate_previous (pannot : proof_annotation) strict rules n = | |
| 1877 | case node_info (#meta pannot) #source_inf_opt n of | |
| 1878 | NONE => null rules | |
| 1879 | | SOME (TPTP_Proof.File _) => null rules | |
| 1880 | | SOME (TPTP_Proof.Inference (rule_name, _ : TPTP_Proof.useful_info_as list, parent_infos)) => | |
| 1881 | let | |
| 1882 | val matched_rules : string option list = | |
| 1883 | map (match_rules_of_current pannot rules) | |
| 1884 | (dest_parent_infos true (#meta pannot) parent_infos |> map #name) | |
| 1885 | in | |
| 1886 | if strict andalso member (op =) matched_rules NONE then false | |
| 1887 | else | |
| 1888 | (*check that all the rules were matched*) | |
| 1889 | fold | |
| 1890 | (fn (rule : string) => fn (st, matches : string option list) => | |
| 1891 | if not st then (st, matches) | |
| 1892 | else | |
| 1893 | let | |
| 1894 | val idx = find_index (fn match => SOME rule = match) matches | |
| 1895 | in | |
| 1896 | if idx < 0 then (false, matches) | |
| 1897 | else | |
| 1898 | (st, nth_drop idx matches) | |
| 1899 | end) | |
| 1900 | rules | |
| 1901 | (true, matched_rules) | |
| 1902 | |> #1 (*discard the other info*) | |
| 1903 | end | |
| 1904 | end |