--- a/src/HOL/IMP/Collecting_Examples.thy Fri Jan 11 22:01:49 2013 +0100
+++ b/src/HOL/IMP/Collecting_Examples.thy Fri Jan 11 22:16:48 2013 +0100
@@ -15,25 +15,25 @@
definition "show_acom xs = map_acom (\<lambda>S. show_state xs ` S)"
text{* Collecting semantics: *}
-value "show_acom [''x''] (((step {\<lambda>x. 0}) ^^ 1) C0)"
-value "show_acom [''x''] (((step {\<lambda>x. 0}) ^^ 2) C0)"
-value "show_acom [''x''] (((step {\<lambda>x. 0}) ^^ 3) C0)"
-value "show_acom [''x''] (((step {\<lambda>x. 0}) ^^ 4) C0)"
-value "show_acom [''x''] (((step {\<lambda>x. 0}) ^^ 5) C0)"
-value "show_acom [''x''] (((step {\<lambda>x. 0}) ^^ 6) C0)"
-value "show_acom [''x''] (((step {\<lambda>x. 0}) ^^ 7) C0)"
-value "show_acom [''x''] (((step {\<lambda>x. 0}) ^^ 8) C0)"
+
+value "show_acom [''x''] (((step {<>}) ^^ 1) C0)"
+value "show_acom [''x''] (((step {<>}) ^^ 2) C0)"
+value "show_acom [''x''] (((step {<>}) ^^ 3) C0)"
+value "show_acom [''x''] (((step {<>}) ^^ 4) C0)"
+value "show_acom [''x''] (((step {<>}) ^^ 5) C0)"
+value "show_acom [''x''] (((step {<>}) ^^ 6) C0)"
+value "show_acom [''x''] (((step {<>}) ^^ 7) C0)"
+value "show_acom [''x''] (((step {<>}) ^^ 8) C0)"
text{* Small-step semantics: *}
-value "show_acom [''x''] (((step {}) ^^ 0) (step {\<lambda>x. 0} C0))"
-value "show_acom [''x''] (((step {}) ^^ 1) (step {\<lambda>x. 0} C0))"
-value "show_acom [''x''] (((step {}) ^^ 2) (step {\<lambda>x. 0} C0))"
-value "show_acom [''x''] (((step {}) ^^ 3) (step {\<lambda>x. 0} C0))"
-value "show_acom [''x''] (((step {}) ^^ 4) (step {\<lambda>x. 0} C0))"
-value "show_acom [''x''] (((step {}) ^^ 5) (step {\<lambda>x. 0} C0))"
-value "show_acom [''x''] (((step {}) ^^ 6) (step {\<lambda>x. 0} C0))"
-value "show_acom [''x''] (((step {}) ^^ 7) (step {\<lambda>x. 0} C0))"
-value "show_acom [''x''] (((step {}) ^^ 8) (step {\<lambda>x. 0} C0))"
-
+value "show_acom [''x''] (((step {}) ^^ 0) (step {<>} C0))"
+value "show_acom [''x''] (((step {}) ^^ 1) (step {<>} C0))"
+value "show_acom [''x''] (((step {}) ^^ 2) (step {<>} C0))"
+value "show_acom [''x''] (((step {}) ^^ 3) (step {<>} C0))"
+value "show_acom [''x''] (((step {}) ^^ 4) (step {<>} C0))"
+value "show_acom [''x''] (((step {}) ^^ 5) (step {<>} C0))"
+value "show_acom [''x''] (((step {}) ^^ 6) (step {<>} C0))"
+value "show_acom [''x''] (((step {}) ^^ 7) (step {<>} C0))"
+value "show_acom [''x''] (((step {}) ^^ 8) (step {<>} C0))"
end
--- a/src/HOL/Library/Extended_Real.thy Fri Jan 11 22:01:49 2013 +0100
+++ b/src/HOL/Library/Extended_Real.thy Fri Jan 11 22:16:48 2013 +0100
@@ -1752,10 +1752,18 @@
"ereal_of_enat m \<le> ereal_of_enat n \<longleftrightarrow> m \<le> n"
by (cases m n rule: enat2_cases) auto
+lemma ereal_of_enat_less_iff[simp]:
+ "ereal_of_enat m < ereal_of_enat n \<longleftrightarrow> m < n"
+by (cases m n rule: enat2_cases) auto
+
lemma numeral_le_ereal_of_enat_iff[simp]:
shows "numeral m \<le> ereal_of_enat n \<longleftrightarrow> numeral m \<le> n"
by (cases n) (auto dest: natceiling_le intro: natceiling_le_eq[THEN iffD1])
+lemma numeral_less_ereal_of_enat_iff[simp]:
+ shows "numeral m < ereal_of_enat n \<longleftrightarrow> numeral m < n"
+by (cases n) (auto simp: real_of_nat_less_iff[symmetric])
+
lemma ereal_of_enat_ge_zero_cancel_iff[simp]:
"0 \<le> ereal_of_enat n \<longleftrightarrow> 0 \<le> n"
by (cases n) (auto simp: enat_0[symmetric])
@@ -1768,6 +1776,11 @@
"ereal_of_enat 0 = 0"
by (auto simp: enat_0[symmetric])
+lemma ereal_of_enat_inf[simp]:
+ "ereal_of_enat n = \<infinity> \<longleftrightarrow> n = \<infinity>"
+ by (cases n) auto
+
+
lemma ereal_of_enat_add:
"ereal_of_enat (m + n) = ereal_of_enat m + ereal_of_enat n"
by (cases m n rule: enat2_cases) auto
--- a/src/HOL/TPTP/mash_eval.ML Fri Jan 11 22:01:49 2013 +0100
+++ b/src/HOL/TPTP/mash_eval.ML Fri Jan 11 22:16:48 2013 +0100
@@ -91,14 +91,14 @@
[(mepo_weight, (mepo_facts, [])),
(mash_weight, (mash_facts, mash_unks))]
val mesh_facts =
- mesh_facts (Thm.eq_thm o pairself snd) slack_max_facts mess
+ mesh_facts (Thm.eq_thm_prop o pairself snd) slack_max_facts mess
val isar_facts =
find_suggested_facts (map (rpair 1.0) isar_deps) facts
(* adapted from "mirabelle_sledgehammer.ML" *)
fun set_file_name heading (SOME dir) =
let
val prob_prefix =
- "goal_" ^ string_of_int j ^ "__" ^ escape_meta name ^ "__" ^
+ "goal_" ^ string_of_int j ^ "__" ^ encode_str name ^ "__" ^
heading
in
Config.put dest_dir dir
@@ -109,7 +109,7 @@
fun prove heading get facts =
let
fun nickify ((_, stature), th) =
- ((K (escape_meta (nickname_of_thm th)), stature), th)
+ ((K (encode_str (nickname_of_thm th)), stature), th)
val facts =
facts
|> map (get #> nickify)
--- a/src/HOL/TPTP/mash_export.ML Fri Jan 11 22:01:49 2013 +0100
+++ b/src/HOL/TPTP/mash_export.ML Fri Jan 11 22:16:48 2013 +0100
@@ -54,7 +54,7 @@
val _ = File.write path ""
fun do_fact fact prevs =
let
- val s = escape_meta fact ^ ": " ^ escape_metas prevs ^ "\n"
+ val s = encode_str fact ^ ": " ^ encode_strs prevs ^ "\n"
val _ = File.append path s
in [fact] end
val facts =
@@ -76,7 +76,7 @@
val feats =
features_of ctxt prover (theory_of_thm th) stature [prop_of th]
val s =
- escape_meta name ^ ": " ^ encode_features (sort_wrt fst feats) ^ "\n"
+ encode_str name ^ ": " ^ encode_features (sort_wrt fst feats) ^ "\n"
in File.append path s end
in List.app do_fact facts end
@@ -105,7 +105,7 @@
val deps =
isar_or_prover_dependencies_of ctxt params_opt facts name_tabs th
NONE
- in escape_meta name ^ ": " ^ escape_metas deps ^ "\n" end
+ in encode_str name ^ ": " ^ encode_strs deps ^ "\n" end
else
""
val lines = Par_List.map do_fact (tag_list 1 facts)
@@ -140,14 +140,14 @@
isar_or_prover_dependencies_of ctxt params_opt facts name_tabs th
(SOME isar_deps)
val core =
- escape_meta name ^ ": " ^ escape_metas prevs ^ "; " ^
+ encode_str name ^ ": " ^ encode_strs prevs ^ "; " ^
encode_features (sort_wrt fst feats)
val query =
if is_bad_query ctxt ho_atp th isar_deps then ""
else "? " ^ core ^ "\n"
val update =
"! " ^ core ^ "; " ^
- escape_metas (these (trim_dependencies th deps)) ^ "\n"
+ encode_strs (these (trim_dependencies th deps)) ^ "\n"
in query ^ update end
else
""
@@ -189,7 +189,7 @@
|> Sledgehammer_MePo.mepo_suggested_facts ctxt params prover
max_suggs NONE hyp_ts concl_t
|> map (nickname_of_thm o snd)
- in escape_meta name ^ ": " ^ escape_metas suggs ^ "\n" end
+ in encode_str name ^ ": " ^ encode_strs suggs ^ "\n" end
end
fun accum x (yss as ys :: _) = (x :: ys) :: yss
val old_factss = tl (fold accum new_facts [old_facts])
@@ -203,19 +203,18 @@
val _ = File.write mesh_path ""
fun do_fact (mash_line, mepo_line) =
let
- val (mash_name, mash_suggs) =
+ val (name, mash_suggs) =
extract_suggestions mash_line
||> (map fst #> weight_mash_facts)
- val (mepo_name, mepo_suggs) =
+ val (name', mepo_suggs) =
extract_suggestions mepo_line
||> (map fst #> weight_mash_facts)
- val _ =
- if mash_name = mepo_name then () else error "Input files out of sync."
+ val _ = if name = name' then () else error "Input files out of sync."
val mess =
[(mepo_weight, (mepo_suggs, [])),
(mash_weight, (mash_suggs, []))]
val mesh_suggs = mesh_facts (op =) max_suggs mess
- val mesh_line = mash_name ^ ": " ^ space_implode " " mesh_suggs ^ "\n"
+ val mesh_line = encode_str name ^ ": " ^ encode_strs mesh_suggs ^ "\n"
in File.append mesh_path mesh_line end
val mash_lines = Path.explode mash_file_name |> File.read_lines
val mepo_lines = Path.explode mepo_file_name |> File.read_lines
--- a/src/HOL/Tools/Nitpick/nitpick.ML Fri Jan 11 22:01:49 2013 +0100
+++ b/src/HOL/Tools/Nitpick/nitpick.ML Fri Jan 11 22:16:48 2013 +0100
@@ -186,17 +186,8 @@
fun java_too_old_message () =
"The Java version is too old. " ^ isabelle_wrong_message
fun kodkodi_not_installed_message () =
- "Nitpick requires the external Java program Kodkodi. To install it, download \
- \the package from \"http://www21.in.tum.de/~blanchet/#software\" and add the \
- \\"kodkodi-x.y.z\" directory's full path to " ^
- Path.print (Path.expand (Path.explode "$ISABELLE_HOME_USER/etc/components")) ^
- " on a line of its own."
-fun kodkodi_too_old_message () =
- "The installed Kodkodi version is too old. To install a newer version, \
- \download the package from \"http://www21.in.tum.de/~blanchet/#software\" \
- \and add the \"kodkodi-x.y.z\" directory's full path to " ^
- Path.print (Path.expand (Path.explode "$ISABELLE_HOME_USER/etc/components")) ^
- " on a line of its own."
+ "Nitpick requires the external Java program Kodkodi."
+fun kodkodi_too_old_message () = "The installed Kodkodi version is too old."
val max_unsound_delay_ms = 200
val max_unsound_delay_percent = 2
--- a/src/HOL/Tools/Sledgehammer/MaSh/src/dictionaries.py Fri Jan 11 22:01:49 2013 +0100
+++ b/src/HOL/Tools/Sledgehammer/MaSh/src/dictionaries.py Fri Jan 11 22:16:48 2013 +0100
@@ -31,6 +31,7 @@
self.featureDict = {}
self.dependenciesDict = {}
self.accessibleDict = {}
+ self.featureCountDict = {}
self.expandedAccessibles = {}
self.changed = True
@@ -38,8 +39,8 @@
Init functions. Side Effect: nameIdDict, idNameDict, featureIdDict, articleDict get filled!
"""
def init_featureDict(self,featureFile):
- self.featureDict,self.maxNameId,self.maxFeatureId = create_feature_dict(self.nameIdDict,self.idNameDict,self.maxNameId,self.featureIdDict,\
- self.maxFeatureId,featureFile)
+ self.featureDict,self.maxNameId,self.maxFeatureId, self.featureCountDict = create_feature_dict(self.nameIdDict,self.idNameDict,self.maxNameId,self.featureIdDict,\
+ self.maxFeatureId,self.featureCountDict,featureFile)
def init_dependenciesDict(self,depFile):
self.dependenciesDict = create_dependencies_dict(self.nameIdDict,depFile)
def init_accessibleDict(self,accFile):
@@ -73,9 +74,12 @@
def add_feature(self,featureName):
if not self.featureIdDict.has_key(featureName):
self.featureIdDict[featureName] = self.maxFeatureId
+ self.featureCountDict[self.maxFeatureId] = 0
self.maxFeatureId += 1
self.changed = True
- return self.featureIdDict[featureName]
+ fId = self.featureIdDict[featureName]
+ self.featureCountDict[fId] += 1
+ return fId
def get_features(self,line):
# Feature Ids
@@ -83,12 +87,12 @@
features = []
for fn in featureNames:
tmp = fn.split('=')
+ weight = 1.0
if len(tmp) == 2:
- fId = self.add_feature(tmp[0])
- features.append((fId,float(tmp[1])))
- else:
- fId = self.add_feature(fn)
- features.append((fId,1.0))
+ fn = tmp[0]
+ weight = float(tmp[1])
+ fId = self.add_feature(tmp[0])
+ features.append((fId,weight))
return features
def expand_accessibles(self,acc):
@@ -150,7 +154,7 @@
def parse_problem(self,line):
"""
- Parses a problem and returns the features and the accessibles.
+ Parses a problem and returns the features, the accessibles, and any hints.
"""
assert line.startswith('? ')
line = line[2:]
@@ -176,19 +180,24 @@
self.changed = True
accessibles = self.expand_accessibles(unExpAcc)
features = self.get_features(line)
+ # Get hints:
+ if len(line) == 3:
+ hints = [self.nameIdDict[d.strip()] for d in line[2].split()]
+ else:
+ hints = []
- return name,features,accessibles
+ return name,features,accessibles,hints
def save(self,fileName):
if self.changed:
dictsStream = open(fileName, 'wb')
dump((self.accessibleDict,self.dependenciesDict,self.expandedAccessibles,self.featureDict,\
- self.featureIdDict,self.idNameDict,self.maxFeatureId,self.maxNameId,self.nameIdDict),dictsStream)
+ self.featureIdDict,self.idNameDict,self.maxFeatureId,self.maxNameId,self.nameIdDict,self.featureCountDict),dictsStream)
self.changed = False
dictsStream.close()
def load(self,fileName):
dictsStream = open(fileName, 'rb')
self.accessibleDict,self.dependenciesDict,self.expandedAccessibles,self.featureDict,\
- self.featureIdDict,self.idNameDict,self.maxFeatureId,self.maxNameId,self.nameIdDict = load(dictsStream)
+ self.featureIdDict,self.idNameDict,self.maxFeatureId,self.maxNameId,self.nameIdDict,self.featureCountDict = load(dictsStream)
self.changed = False
dictsStream.close()
--- a/src/HOL/Tools/Sledgehammer/MaSh/src/mash.py Fri Jan 11 22:01:49 2013 +0100
+++ b/src/HOL/Tools/Sledgehammer/MaSh/src/mash.py Fri Jan 11 22:16:48 2013 +0100
@@ -48,6 +48,8 @@
help='Name of the file with the premise dependencies. The file must be in inputDir. Default = mash_dependencies')
parser.add_argument('--saveModel',default=False,action='store_true',help="Stores the learned Model at the end of a prediction run. Default=False.")
parser.add_argument('--learnTheories',default=False,action='store_true',help="Uses a two-lvl prediction mode. First the theories, then the premises. Default=False.")
+#DEBUG: Change sineprioir default to false
+parser.add_argument('--sinePrior',default=True,action='store_true',help="Uses a SInE like prior for premise lvl predictions. Default=False.")
parser.add_argument('--nb',default=False,action='store_true',help="Use Naive Bayes for learning. This is the default learning method.")
@@ -87,7 +89,7 @@
# Pick algorithm
if args.nb:
logger.info('Using sparse Naive Bayes for learning.')
- model = sparseNBClassifier()
+ model = sparseNBClassifier(args.sinePrior)
modelFile = os.path.join(args.outputDir,'NB.pickle')
elif args.snow:
logger.info('Using naive bayes (SNoW) for learning.')
@@ -101,7 +103,7 @@
modelFile = os.path.join(args.outputDir,'mepo.pickle')
else:
logger.info('No algorithm specified. Using sparse Naive Bayes.')
- model = sparseNBClassifier()
+ model = sparseNBClassifier(args.sinePrior)
modelFile = os.path.join(args.outputDir,'NB.pickle')
dictsFile = os.path.join(args.outputDir,'dicts.pickle')
theoryFile = os.path.join(args.outputDir,'theory.pickle')
@@ -182,7 +184,7 @@
# Update Dependencies, p proves p
dicts.dependenciesDict[problemId] = [problemId]+dicts.dependenciesDict[problemId]
if args.learnTheories:
- theoryModels.update(problemId,dicts)
+ theoryModels.update(problemId,dicts.featureDict[problemId],dicts.dependenciesDict[problemId],dicts)
if args.snow:
model.update(problemId,dicts.featureDict[problemId],dicts.dependenciesDict[problemId],dicts)
else:
@@ -200,16 +202,34 @@
computeStats = True
if args.predef:
continue
- name,features,accessibles = dicts.parse_problem(line)
+ name,features,accessibles,hints = dicts.parse_problem(line)
# Create predictions
logger.info('Starting computation for problem on line %s',lineCounter)
+ # Update Models with hints
+ if not hints == []:
+ if args.learnTheories:
+ accessibleTheories = set([(dicts.idNameDict[x]).split('.')[0] for x in accessibles])
+ theoryModels.update_with_acc('hints',features,hints,dicts,accessibleTheories)
+ if args.snow:
+ pass
+ else:
+ model.update('hints',features,hints)
+
+ # Predict premises
if args.learnTheories:
predictedTheories,accessibles = theoryModels.predict(features,accessibles,dicts)
- if args.snow:
- predictions,predictionValues = model.predict(features,accessibles,dicts)
- else:
- predictions,predictionValues = model.predict(features,accessibles)
+ predictions,predictionValues = model.predict(features,accessibles,dicts)
assert len(predictions) == len(predictionValues)
+
+ # Delete hints
+ if not hints == []:
+ if args.learnTheories:
+ theoryModels.delete('hints',features,hints,dicts)
+ if args.snow:
+ pass
+ else:
+ model.delete('hints',features,hints)
+
logger.info('Done. %s seconds needed.',round(time()-startTime,2))
# Output
predictionNames = [str(dicts.idNameDict[p]) for p in predictions[:args.numberOfPredictions]]
@@ -253,10 +273,19 @@
if __name__ == '__main__':
# Example:
+ # Auth
+ # ISAR Theories
+ #args = ['-l','testNB.log','-o','../tmp/','--statistics','--init','--inputDir','../data/20121227b/Auth/','--learnTheories']
+ #args = ['-i', '../data/20121227b/Auth/mash_commands','-p','../tmp/testNB.pred','-l','../tmp/testNB.log','--nb','-o','../tmp/','--statistics','--saveStats','../tmp/JinjaIsarNB.stats','--cutOff','500','--learnTheories']
+ # ISAR MePo
+ #args = ['-l','testIsabelle.log','-o','../tmp/','--statistics','--init','--inputDir','../data/20121227b/Auth/','--predef']
+ #args = ['-i', '../data/20121227b/Auth/mash_commands','-p','../tmp/JinjaMePo.pred','-l','testIsabelle.log','--predef','-o','../tmp/','--statistics','--saveStats','../tmp/JinjaMePo.stats']
+
+
# Jinja
# ISAR Theories
- #args = ['-l','testNB.log','-o','../tmp/','--statistics','--init','--inputDir','../data/20121221/Jinja/','--learnTheories']
- #args = ['-i', '../data/20121221/Jinja/mash_commands','-p','../tmp/testNB.pred','-l','../tmp/testNB.log','--nb','-o','../tmp/','--statistics','--saveStats','../tmp/JinjaIsarNB.stats','--cutOff','500','--learnTheories']
+ #args = ['-l','testNB.log','-o','../tmp/','--statistics','--init','--inputDir','../data/20121227b/Jinja/','--learnTheories']
+ #args = ['-i', '../data/20121227b/Jinja/mash_commands','-p','../tmp/testNB.pred','-l','../tmp/testNB.log','--nb','-o','../tmp/','--statistics','--saveStats','../tmp/JinjaIsarNB.stats','--cutOff','500','--learnTheories']
# ISAR NB
#args = ['-l','testNB.log','-o','../tmp/','--statistics','--init','--inputDir','../data/20121221/Jinja/']
#args = ['-i', '../data/20121221/Jinja/mash_commands','-p','../tmp/testNB.pred','-l','../tmp/testNB.log','--nb','-o','../tmp/','--statistics','--saveStats','../tmp/JinjaIsarNB.stats','--cutOff','500']
@@ -277,6 +306,9 @@
# Probability
+ # ISAR Theories
+ #args = ['-l','testNB.log','-o','../tmp/','--statistics','--init','--inputDir','../data/20121213/Probability/','--learnTheories']
+ #args = ['-i', '../data/20121213/Probability/mash_commands','-p','../tmp/testNB.pred','-l','../tmp/testNB.log','--nb','-o','../tmp/','--statistics','--saveStats','../tmp/JinjaIsarNB.stats','--cutOff','500','--learnTheories']
# ISAR NB
#args = ['-l','testNB.log','-o','../tmp/','--statistics','--init','--inputDir','../data/20121213/Probability/']
#args = ['-i', '../data/20121213/Probability/mash_commands','-p','../tmp/testNB.pred','-l','../tmp/testNB.log','--nb','-o','../tmp/','--statistics','--saveStats','../tmp/ProbIsarNB.stats','--cutOff','500']
--- a/src/HOL/Tools/Sledgehammer/MaSh/src/readData.py Fri Jan 11 22:01:49 2013 +0100
+++ b/src/HOL/Tools/Sledgehammer/MaSh/src/readData.py Fri Jan 11 22:16:48 2013 +0100
@@ -14,7 +14,7 @@
import sys,logging
-def create_feature_dict(nameIdDict,idNameDict,maxNameId,featureIdDict,maxFeatureId,inputFile):
+def create_feature_dict(nameIdDict,idNameDict,maxNameId,featureIdDict,maxFeatureId,featureCountDict,inputFile):
logger = logging.getLogger('create_feature_dict')
featureDict = {}
IS = open(inputFile,'r')
@@ -32,23 +32,24 @@
maxNameId += 1
# Feature Ids
featureNames = [f.strip() for f in line[1].split()]
- features = []
+ features = []
for fn in featureNames:
+ weight = 1.0
tmp = fn.split('=')
if len(tmp) == 2:
- if not featureIdDict.has_key(tmp[0]):
- featureIdDict[tmp[0]] = maxFeatureId
- maxFeatureId += 1
- features.append((featureIdDict[tmp[0]],float(tmp[1])))
- else:
- if not featureIdDict.has_key(fn):
- featureIdDict[fn] = maxFeatureId
- maxFeatureId += 1
- features.append((featureIdDict[fn],1.0))
+ fn = tmp[0]
+ weight = float(tmp[1])
+ if not featureIdDict.has_key(fn):
+ featureIdDict[fn] = maxFeatureId
+ featureCountDict[maxFeatureId] = 0
+ maxFeatureId += 1
+ fId = featureIdDict[fn]
+ features.append((fId,weight))
+ featureCountDict[fId] += 1
# Store results
featureDict[nameId] = features
IS.close()
- return featureDict,maxNameId,maxFeatureId
+ return featureDict,maxNameId,maxFeatureId,featureCountDict
def create_dependencies_dict(nameIdDict,inputFile):
logger = logging.getLogger('create_dependencies_dict')
--- a/src/HOL/Tools/Sledgehammer/MaSh/src/singleNaiveBayes.py Fri Jan 11 22:01:49 2013 +0100
+++ b/src/HOL/Tools/Sledgehammer/MaSh/src/singleNaiveBayes.py Fri Jan 11 22:16:48 2013 +0100
@@ -61,7 +61,7 @@
self.pos -= 1
else:
self.neg -= 1
- for f in features:
+ for f,_w in features:
posCount,negCount = self.counts[f]
if label:
posCount -= 1
@@ -79,7 +79,7 @@
def predict_sparse(self,features):
"""
- Returns 1 if the probability is greater than 50%.
+ Returns 1 if the probability of + being the correct label is greater than the probability that - is the correct label.
"""
if self.neg == 0:
return 1
--- a/src/HOL/Tools/Sledgehammer/MaSh/src/sparseNaiveBayes.py Fri Jan 11 22:01:49 2013 +0100
+++ b/src/HOL/Tools/Sledgehammer/MaSh/src/sparseNaiveBayes.py Fri Jan 11 22:16:48 2013 +0100
@@ -19,11 +19,15 @@
An updateable naive Bayes classifier.
'''
- def __init__(self):
+ def __init__(self,useSinePrior = False):
'''
Constructor
'''
self.counts = {}
+ self.sinePrior = useSinePrior
+ self.defaultPriorWeight = 20
+ self.posWeight = 20
+ self.defVal = 15
def initializeModel(self,trainData,dicts):
"""
@@ -32,6 +36,10 @@
for d in trainData:
dPosCount = 0
dFeatureCounts = {}
+ # DEBUG: give p |- p a higher weight
+ dPosCount = self.defaultPriorWeight
+ for f,_w in dicts.featureDict[d]:
+ dFeatureCounts[f] = self.defaultPriorWeight
self.counts[d] = [dPosCount,dFeatureCounts]
for key in dicts.dependenciesDict.keys():
@@ -53,8 +61,12 @@
"""
if not self.counts.has_key(dataPoint):
dPosCount = 0
- dFeatureCounts = {}
- self.counts[dataPoint] = [dPosCount,dFeatureCounts]
+ dFeatureCounts = {}
+ # DEBUG: give p |- p a higher weight
+ dPosCount = self.defaultPriorWeight
+ for f,_w in features:
+ dFeatureCounts[f] = self.defaultPriorWeight
+ self.counts[dataPoint] = [dPosCount,dFeatureCounts]
for dep in dependencies:
self.counts[dep][0] += 1
for f,_w in features:
@@ -69,7 +81,7 @@
"""
for dep in dependencies:
self.counts[dep][0] -= 1
- for f in features:
+ for f,_w in features:
self.counts[dep][1][f] -= 1
@@ -83,13 +95,11 @@
self.delete(problemId,features,oldDeps)
self.update(problemId,features,newDependencies)
- def predict(self,features,accessibles):
+ def predict(self,features,accessibles,dicts):
"""
For each accessible, predicts the probability of it being useful given the features.
Returns a ranking of the accessibles.
"""
- posWeight = 20.0
- defVal = 15
predictions = []
for a in accessibles:
posA = self.counts[a][0]
@@ -101,12 +111,12 @@
#w = 1
if f in fA:
if fWeightsA[f] == 0:
- resultA -= w*defVal
+ resultA -= w*self.defVal
else:
assert fWeightsA[f] <= posA
- resultA += w*log(float(posWeight*fWeightsA[f])/posA)
+ resultA += w*log(float(self.posWeight*fWeightsA[f])/posA)
else:
- resultA -= w*defVal
+ resultA -= w*self.defVal
predictions.append(resultA)
#expPredictions = array([exp(x) for x in predictions])
predictions = array(predictions)
--- a/src/HOL/Tools/Sledgehammer/MaSh/src/theoryModels.py Fri Jan 11 22:01:49 2013 +0100
+++ b/src/HOL/Tools/Sledgehammer/MaSh/src/theoryModels.py Fri Jan 11 22:16:48 2013 +0100
@@ -87,15 +87,13 @@
for a in self.accTheories:
self.theoryModels[a].overwrite(features,a in oldTheories,a in newTheories)
- def delete(self):
- pass
+ def delete(self,problemId,features,dependencies,dicts):
+ tmp = [dicts.idNameDict[x] for x in dependencies]
+ usedTheories = set([x.split('.')[0] for x in tmp])
+ for a in self.accessibleTheories:
+ self.theoryModels[a].delete(features,a in usedTheories)
- def update(self,problemId,dicts):
- features = dicts.featureDict[problemId]
-
- # Find the actually used theories
- tmp = [dicts.idNameDict[x] for x in dicts.dependenciesDict[problemId]]
- usedTheories = set([x.split('.')[0] for x in tmp])
+ def update(self,problemId,features,dependencies,dicts):
currentTheory = (dicts.idNameDict[problemId]).split('.')[0]
# Create new theory model, if there is a new theory
if not self.theoryDict.has_key(currentTheory):
@@ -105,9 +103,15 @@
self.currentTheory = currentTheory
theoryModel = singleNBClassifier()
self.theoryModels[currentTheory] = theoryModel
- self.accessibleTheories.add(self.currentTheory)
- if not len(usedTheories) == 0:
- for a in self.accessibleTheories:
+ self.accessibleTheories.add(self.currentTheory)
+ self.update_with_acc(problemId,features,dependencies,dicts,self.accessibleTheories)
+
+ def update_with_acc(self,problemId,features,dependencies,dicts,accessibleTheories):
+ # Find the actually used theories
+ tmp = [dicts.idNameDict[x] for x in dependencies]
+ usedTheories = set([x.split('.')[0] for x in tmp])
+ if not len(usedTheories) == 0:
+ for a in accessibleTheories:
self.theoryModels[a].update(features,a in usedTheories)
def predict(self,features,accessibles,dicts):
--- a/src/HOL/Tools/Sledgehammer/sledgehammer_isar.ML Fri Jan 11 22:01:49 2013 +0100
+++ b/src/HOL/Tools/Sledgehammer/sledgehammer_isar.ML Fri Jan 11 22:16:48 2013 +0100
@@ -373,6 +373,8 @@
fun hammer_away override_params subcommand opt_i fact_override state =
let
+ (* necessary to avoid problems in jedit *)
+ val state = state |> Proof.map_context (Config.put show_markup false)
val ctxt = Proof.context_of state
val override_params = override_params |> map (normalize_raw_param ctxt)
val _ = Isabelle_System.mkdir (Path.explode (getenv "ISABELLE_TMP"))
--- a/src/HOL/Tools/Sledgehammer/sledgehammer_mash.ML Fri Jan 11 22:01:49 2013 +0100
+++ b/src/HOL/Tools/Sledgehammer/sledgehammer_mash.ML Fri Jan 11 22:16:48 2013 +0100
@@ -24,10 +24,10 @@
val relearn_isarN : string
val relearn_proverN : string
val fact_filters : string list
- val escape_meta : string -> string
- val escape_metas : string list -> string
- val unescape_meta : string -> string
- val unescape_metas : string -> string list
+ val encode_str : string -> string
+ val encode_strs : string list -> string
+ val unencode_str : string -> string
+ val unencode_strs : string -> string list
val encode_features : (string * real) list -> string
val extract_suggestions : string -> string * (string * real) list
@@ -40,7 +40,8 @@
val relearn :
Proof.context -> bool -> (string * string list) list -> unit
val suggest :
- Proof.context -> bool -> int -> string list * (string * real) list
+ Proof.context -> bool -> int
+ -> string list * (string * real) list * string list
-> (string * real) list
end
@@ -189,40 +190,40 @@
| unmeta_chars _ (#"%" :: _) = "" (* error *)
| unmeta_chars accum (c :: cs) = unmeta_chars (c :: accum) cs
-val escape_meta = String.translate meta_char
-val escape_metas = map escape_meta #> space_implode " "
-val unescape_meta = String.explode #> unmeta_chars []
-val unescape_metas =
- space_explode " " #> filter_out (curry (op =) "") #> map unescape_meta
+val encode_str = String.translate meta_char
+val encode_strs = map encode_str #> space_implode " "
+val unencode_str = String.explode #> unmeta_chars []
+val unencode_strs =
+ space_explode " " #> filter_out (curry (op =) "") #> map unencode_str
fun encode_feature (name, weight) =
- escape_meta name ^
+ encode_str name ^
(if Real.== (weight, 1.0) then "" else "=" ^ Real.toString weight)
val encode_features = map encode_feature #> space_implode " "
fun str_of_learn (name, parents, feats, deps) =
- "! " ^ escape_meta name ^ ": " ^ escape_metas parents ^ "; " ^
- encode_features feats ^ "; " ^ escape_metas deps ^ "\n"
+ "! " ^ encode_str name ^ ": " ^ encode_strs parents ^ "; " ^
+ encode_features feats ^ "; " ^ encode_strs deps ^ "\n"
fun str_of_relearn (name, deps) =
- "p " ^ escape_meta name ^ ": " ^ escape_metas deps ^ "\n"
+ "p " ^ encode_str name ^ ": " ^ encode_strs deps ^ "\n"
-fun str_of_query (parents, feats) =
- "? " ^ escape_metas parents ^ "; " ^ encode_features feats ^ "\n"
+fun str_of_query (parents, feats, hints) =
+ "? " ^ encode_strs parents ^ "; " ^ encode_features feats ^
+ (if null hints then "" else "; " ^ encode_strs hints) ^ "\n"
fun extract_suggestion sugg =
case space_explode "=" sugg of
[name, weight] =>
- SOME (unescape_meta name, Real.fromString weight |> the_default 1.0)
- | [name] => SOME (unescape_meta name, 1.0)
+ SOME (unencode_str name, Real.fromString weight |> the_default 1.0)
+ | [name] => SOME (unencode_str name, 1.0)
| _ => NONE
fun extract_suggestions line =
case space_explode ":" line of
[goal, suggs] =>
- (unescape_meta goal,
- map_filter extract_suggestion (space_explode " " suggs))
+ (unencode_str goal, map_filter extract_suggestion (space_explode " " suggs))
| _ => ("", [])
structure MaSh =
@@ -249,9 +250,9 @@
elide_string 1000 (space_implode " " (map #1 relearns)));
run_mash_tool ctxt overlord true 0 (relearns, str_of_relearn) (K ()))
-fun suggest ctxt overlord max_suggs (query as (_, feats)) =
+fun suggest ctxt overlord max_suggs (query as (_, feats, hints)) =
(trace_msg ctxt (fn () => "MaSh suggest " ^ encode_features feats);
- run_mash_tool ctxt overlord false max_suggs
+ run_mash_tool ctxt overlord (not (null hints)) max_suggs
([query], str_of_query)
(fn suggs =>
case suggs () of
@@ -312,7 +313,7 @@
local
-val version = "*** MaSh version 20130104a ***"
+val version = "*** MaSh version 20130111a ***"
exception Too_New of unit
@@ -321,7 +322,7 @@
[head, parents] =>
(case space_explode " " head of
[kind, name] =>
- SOME (unescape_meta name, unescape_metas parents,
+ SOME (unencode_str name, unencode_strs parents,
try proof_kind_of_str kind |> the_default Isar_Proof)
| _ => NONE)
| _ => NONE
@@ -361,8 +362,8 @@
| save ctxt {access_G, dirty} =
let
fun str_of_entry (name, parents, kind) =
- str_of_proof_kind kind ^ " " ^ escape_meta name ^ ": " ^
- escape_metas parents ^ "\n"
+ str_of_proof_kind kind ^ " " ^ encode_str name ^ ": " ^
+ encode_strs parents ^ "\n"
fun append_entry (name, (kind, (parents, _))) =
cons (name, Graph.Keys.dest parents, kind)
val (banner, entries) =
@@ -621,6 +622,11 @@
exclusively to "someI_e" (and to some internal constructions). *)
val class_some_dep = nickname_of_thm @{thm someI_ex}
+val fundef_ths =
+ @{thms fundef_ex1_existence fundef_ex1_uniqueness fundef_ex1_iff
+ fundef_default_value}
+ |> map nickname_of_thm
+
(* "Rep_xxx_inject", "Abs_xxx_inverse", etc., are derived using these facts. *)
val typedef_ths =
@{thms type_definition.Abs_inverse type_definition.Rep_inverse
@@ -647,8 +653,11 @@
let
val deps = thms_in_proof (SOME name_tabs) th
in
- if deps = [typedef_dep] orelse deps = [class_some_dep] orelse
- exists (member (op =) typedef_ths) deps orelse is_size_def deps th then
+ if deps = [typedef_dep] orelse
+ deps = [class_some_dep] orelse
+ exists (member (op =) fundef_ths) deps orelse
+ exists (member (op =) typedef_ths) deps orelse
+ is_size_def deps th then
[]
else
deps
@@ -771,12 +780,13 @@
val unknown =
raw_unknown
|> fold (subtract (Thm.eq_thm_prop o pairself snd)) [chained, proximity]
- in (mesh_facts (Thm.eq_thm o pairself snd) max_facts mess, unknown) end
+ in (mesh_facts (Thm.eq_thm_prop o pairself snd) max_facts mess, unknown) end
fun mash_suggested_facts ctxt ({overlord, ...} : params) prover max_facts hyp_ts
concl_t facts =
let
val thy = Proof_Context.theory_of ctxt
+ val chained = facts |> filter (fn ((_, (scope, _)), _) => scope = Chained)
val (access_G, suggs) =
peek_state ctxt (fn {access_G, ...} =>
if Graph.is_empty access_G then
@@ -786,10 +796,11 @@
val parents = maximal_in_graph access_G facts
val feats =
features_of ctxt prover thy (Local, General) (concl_t :: hyp_ts)
+ val hints = map (nickname_of_thm o snd) chained
in
- (access_G, MaSh.suggest ctxt overlord max_facts (parents, feats))
+ (access_G,
+ MaSh.suggest ctxt overlord max_facts (parents, feats, hints))
end)
- val chained = facts |> filter (fn ((_, (scope, _)), _) => scope = Chained)
val unknown = facts |> filter_out (is_fact_in_graph access_G)
in find_mash_suggestions max_facts suggs facts chained unknown end
@@ -1116,7 +1127,7 @@
|> (if fact_filter <> mepoN then cons (mash_weight, (mash ()))
else I)
in
- mesh_facts (Thm.eq_thm o pairself snd) max_facts mess
+ mesh_facts (Thm.eq_thm_prop o pairself snd) max_facts mess
|> not (null add_ths) ? prepend_facts add_ths
end