merged
authorwenzelm
Tue, 07 Oct 2014 21:01:31 +0200
changeset 58615 c38d8f139bbc
parent 58608 5b7f0b5da884 (current diff)
parent 58614 7338eb25226c (diff)
child 58616 4257a7f2bf39
merged
--- a/NEWS	Tue Oct 07 14:02:24 2014 +0200
+++ b/NEWS	Tue Oct 07 21:01:31 2014 +0200
@@ -129,6 +129,12 @@
 PARALLEL_GOALS.
 
 
+*** System ***
+
+* The Isabelle tool "update_cartouches" changes theory files to use
+cartouches instead of old-style {* verbatim *} or `alt_string` tokens.
+
+
 
 New in Isabelle2014 (August 2014)
 ---------------------------------
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/lib/Tools/update_cartouches	Tue Oct 07 21:01:31 2014 +0200
@@ -0,0 +1,36 @@
+#!/usr/bin/env bash
+#
+# Author: Makarius
+#
+# DESCRIPTION: update theory syntax to use cartouches
+
+
+## diagnostics
+
+PRG="$(basename "$0")"
+
+function usage()
+{
+  echo
+  echo "Usage: isabelle $PRG [FILES|DIRS...]"
+  echo
+  echo "  Recursively find .thy files and update theory syntax to use cartouches"
+  echo "  instead of old-style {* verbatim *} or \`alt_string\` tokens."
+  echo
+  echo "  Old versions of files are preserved by appending \"~~\"."
+  echo
+  exit 1
+}
+
+
+## process command line
+
+[ "$#" -eq 0 -o "$1" = "-?" ] && usage
+
+SPECS="$@"; shift "$#"
+
+
+## main
+
+find $SPECS -name \*.thy -print0 | \
+  xargs -0 "$ISABELLE_TOOL" java isabelle.Update_Cartouches
--- a/src/HOL/Isar_Examples/Basic_Logic.thy	Tue Oct 07 14:02:24 2014 +0200
+++ b/src/HOL/Isar_Examples/Basic_Logic.thy	Tue Oct 07 21:01:31 2014 +0200
@@ -4,19 +4,19 @@
 Basic propositional and quantifier reasoning.
 *)
 
-header {* Basic logical reasoning *}
+header \<open>Basic logical reasoning\<close>
 
 theory Basic_Logic
 imports Main
 begin
 
 
-subsection {* Pure backward reasoning *}
+subsection \<open>Pure backward reasoning\<close>
 
-text {* In order to get a first idea of how Isabelle/Isar proof
+text \<open>In order to get a first idea of how Isabelle/Isar proof
   documents may look like, we consider the propositions @{text I},
   @{text K}, and @{text S}.  The following (rather explicit) proofs
-  should require little extra explanations. *}
+  should require little extra explanations.\<close>
 
 lemma I: "A \<longrightarrow> A"
 proof
@@ -51,14 +51,14 @@
   qed
 qed
 
-text {* Isar provides several ways to fine-tune the reasoning,
+text \<open>Isar provides several ways to fine-tune the reasoning,
   avoiding excessive detail.  Several abbreviated language elements
   are available, enabling the writer to express proofs in a more
   concise way, even without referring to any automated proof tools
   yet.
 
   First of all, proof by assumption may be abbreviated as a single
-  dot. *}
+  dot.\<close>
 
 lemma "A \<longrightarrow> A"
 proof
@@ -66,42 +66,42 @@
   show A by fact+
 qed
 
-text {* In fact, concluding any (sub-)proof already involves solving
+text \<open>In fact, concluding any (sub-)proof already involves solving
   any remaining goals by assumption\footnote{This is not a completely
   trivial operation, as proof by assumption may involve full
   higher-order unification.}.  Thus we may skip the rather vacuous
-  body of the above proof as well. *}
+  body of the above proof as well.\<close>
 
 lemma "A \<longrightarrow> A"
 proof
 qed
 
-text {* Note that the \isacommand{proof} command refers to the @{text
+text \<open>Note that the \isacommand{proof} command refers to the @{text
   rule} method (without arguments) by default.  Thus it implicitly
   applies a single rule, as determined from the syntactic form of the
   statements involved.  The \isacommand{by} command abbreviates any
-  proof with empty body, so the proof may be further pruned. *}
+  proof with empty body, so the proof may be further pruned.\<close>
 
 lemma "A \<longrightarrow> A"
   by rule
 
-text {* Proof by a single rule may be abbreviated as double-dot. *}
+text \<open>Proof by a single rule may be abbreviated as double-dot.\<close>
 
 lemma "A \<longrightarrow> A" ..
 
-text {* Thus we have arrived at an adequate representation of the
+text \<open>Thus we have arrived at an adequate representation of the
   proof of a tautology that holds by a single standard
   rule.\footnote{Apparently, the rule here is implication
-  introduction.} *}
+  introduction.}\<close>
 
-text {* Let us also reconsider @{text K}.  Its statement is composed
+text \<open>Let us also reconsider @{text K}.  Its statement is composed
   of iterated connectives.  Basic decomposition is by a single rule at
   a time, which is why our first version above was by nesting two
   proofs.
 
   The @{text intro} proof method repeatedly decomposes a goal's
   conclusion.\footnote{The dual method is @{text elim}, acting on a
-  goal's premises.} *}
+  goal's premises.}\<close>
 
 lemma "A \<longrightarrow> B \<longrightarrow> A"
 proof (intro impI)
@@ -109,12 +109,12 @@
   show A by fact
 qed
 
-text {* Again, the body may be collapsed. *}
+text \<open>Again, the body may be collapsed.\<close>
 
 lemma "A \<longrightarrow> B \<longrightarrow> A"
   by (intro impI)
 
-text {* Just like @{text rule}, the @{text intro} and @{text elim}
+text \<open>Just like @{text rule}, the @{text intro} and @{text elim}
   proof methods pick standard structural rules, in case no explicit
   arguments are given.  While implicit rules are usually just fine for
   single rule application, this may go too far with iteration.  Thus
@@ -127,18 +127,18 @@
   prime application of @{text intro} and @{text elim}.  In contrast,
   terminal steps that solve a goal completely are usually performed by
   actual automated proof methods (such as \isacommand{by}~@{text
-  blast}. *}
+  blast}.\<close>
 
 
-subsection {* Variations of backward vs.\ forward reasoning *}
+subsection \<open>Variations of backward vs.\ forward reasoning\<close>
 
-text {* Certainly, any proof may be performed in backward-style only.
+text \<open>Certainly, any proof may be performed in backward-style only.
   On the other hand, small steps of reasoning are often more naturally
   expressed in forward-style.  Isar supports both backward and forward
   reasoning as a first-class concept.  In order to demonstrate the
   difference, we consider several proofs of @{text "A \<and> B \<longrightarrow> B \<and> A"}.
 
-  The first version is purely backward. *}
+  The first version is purely backward.\<close>
 
 lemma "A \<and> B \<longrightarrow> B \<and> A"
 proof
@@ -150,70 +150,70 @@
   qed
 qed
 
-text {* Above, the @{text "conjunct_1/2"} projection rules had to be
+text \<open>Above, the @{text "conjunct_1/2"} projection rules had to be
   named explicitly, since the goals @{text B} and @{text A} did not
   provide any structural clue.  This may be avoided using
   \isacommand{from} to focus on the @{text "A \<and> B"} assumption as the
   current facts, enabling the use of double-dot proofs.  Note that
   \isacommand{from} already does forward-chaining, involving the
-  @{text conjE} rule here. *}
+  @{text conjE} rule here.\<close>
 
 lemma "A \<and> B \<longrightarrow> B \<and> A"
 proof
   assume "A \<and> B"
   show "B \<and> A"
   proof
-    from `A \<and> B` show B ..
-    from `A \<and> B` show A ..
+    from \<open>A \<and> B\<close> show B ..
+    from \<open>A \<and> B\<close> show A ..
   qed
 qed
 
-text {* In the next version, we move the forward step one level
+text \<open>In the next version, we move the forward step one level
   upwards.  Forward-chaining from the most recent facts is indicated
   by the \isacommand{then} command.  Thus the proof of @{text "B \<and> A"}
   from @{text "A \<and> B"} actually becomes an elimination, rather than an
   introduction.  The resulting proof structure directly corresponds to
   that of the @{text conjE} rule, including the repeated goal
-  proposition that is abbreviated as @{text ?thesis} below. *}
+  proposition that is abbreviated as @{text ?thesis} below.\<close>
 
 lemma "A \<and> B \<longrightarrow> B \<and> A"
 proof
   assume "A \<and> B"
   then show "B \<and> A"
-  proof                    -- {* rule @{text conjE} of @{text "A \<and> B"} *}
+  proof                    -- \<open>rule @{text conjE} of @{text "A \<and> B"}\<close>
     assume B A
-    then show ?thesis ..   -- {* rule @{text conjI} of @{text "B \<and> A"} *}
+    then show ?thesis ..   -- \<open>rule @{text conjI} of @{text "B \<and> A"}\<close>
   qed
 qed
 
-text {* In the subsequent version we flatten the structure of the main
+text \<open>In the subsequent version we flatten the structure of the main
   body by doing forward reasoning all the time.  Only the outermost
-  decomposition step is left as backward. *}
+  decomposition step is left as backward.\<close>
 
 lemma "A \<and> B \<longrightarrow> B \<and> A"
 proof
   assume "A \<and> B"
-  from `A \<and> B` have A ..
-  from `A \<and> B` have B ..
-  from `B` `A` show "B \<and> A" ..
+  from \<open>A \<and> B\<close> have A ..
+  from \<open>A \<and> B\<close> have B ..
+  from \<open>B\<close> \<open>A\<close> show "B \<and> A" ..
 qed
 
-text {* We can still push forward-reasoning a bit further, even at the
+text \<open>We can still push forward-reasoning a bit further, even at the
   risk of getting ridiculous.  Note that we force the initial proof
-  step to do nothing here, by referring to the ``-'' proof method. *}
+  step to do nothing here, by referring to the ``-'' proof method.\<close>
 
 lemma "A \<and> B \<longrightarrow> B \<and> A"
 proof -
   {
     assume "A \<and> B"
-    from `A \<and> B` have A ..
-    from `A \<and> B` have B ..
-    from `B` `A` have "B \<and> A" ..
+    from \<open>A \<and> B\<close> have A ..
+    from \<open>A \<and> B\<close> have B ..
+    from \<open>B\<close> \<open>A\<close> have "B \<and> A" ..
   }
-  then show ?thesis ..         -- {* rule @{text impI} *}
+  then show ?thesis ..         -- \<open>rule @{text impI}\<close>
 qed
 
-text {* \medskip With these examples we have shifted through a whole
+text \<open>\medskip With these examples we have shifted through a whole
   range from purely backward to purely forward reasoning.  Apparently,
   in the extreme ends we get slightly ill-structured proofs, which
   also require much explicit naming of either rules (backward) or
@@ -226,11 +226,11 @@
   course.  Depending on the actual applications, the intended audience
   etc., rules (and methods) on the one hand vs.\ facts on the other
   hand have to be emphasized in an appropriate way.  This requires the
-  proof writer to develop good taste, and some practice, of course. *}
+  proof writer to develop good taste, and some practice, of course.\<close>
 
-text {* For our example the most appropriate way of reasoning is
+text \<open>For our example the most appropriate way of reasoning is
   probably the middle one, with conjunction introduction done after
-  elimination. *}
+  elimination.\<close>
 
 lemma "A \<and> B \<longrightarrow> B \<and> A"
 proof
@@ -244,32 +244,32 @@
 
 
 
-subsection {* A few examples from ``Introduction to Isabelle'' *}
+subsection \<open>A few examples from ``Introduction to Isabelle''\<close>
 
-text {* We rephrase some of the basic reasoning examples of
-  \cite{isabelle-intro}, using HOL rather than FOL. *}
+text \<open>We rephrase some of the basic reasoning examples of
+  @{cite "isabelle-intro"}, using HOL rather than FOL.\<close>
 
 
-subsubsection {* A propositional proof *}
+subsubsection \<open>A propositional proof\<close>
 
-text {* We consider the proposition @{text "P \<or> P \<longrightarrow> P"}.  The proof
+text \<open>We consider the proposition @{text "P \<or> P \<longrightarrow> P"}.  The proof
   below involves forward-chaining from @{text "P \<or> P"}, followed by an
-  explicit case-analysis on the two \emph{identical} cases. *}
+  explicit case-analysis on the two \emph{identical} cases.\<close>
 
 lemma "P \<or> P \<longrightarrow> P"
 proof
   assume "P \<or> P"
   then show P
-  proof                    -- {*
+  proof                    -- \<open>
     rule @{text disjE}: \smash{$\infer{C}{A \disj B & \infer*{C}{[A]} & \infer*{C}{[B]}}$}
-  *}
+\<close>
     assume P show P by fact
   next
     assume P show P by fact
   qed
 qed
 
-text {* Case splits are \emph{not} hardwired into the Isar language as
+text \<open>Case splits are \emph{not} hardwired into the Isar language as
   a special feature.  The \isacommand{next} command used to separate
   the cases above is just a short form of managing block structure.
 
@@ -289,7 +289,7 @@
 
   \medskip In our example the situation is even simpler, since the two
   cases actually coincide.  Consequently the proof may be rephrased as
-  follows. *}
+  follows.\<close>
 
 lemma "P \<or> P \<longrightarrow> P"
 proof
@@ -302,10 +302,10 @@
   qed
 qed
 
-text {* Again, the rather vacuous body of the proof may be collapsed.
+text \<open>Again, the rather vacuous body of the proof may be collapsed.
   Thus the case analysis degenerates into two assumption steps, which
   are implicitly performed when concluding the single rule step of the
-  double-dot proof as follows. *}
+  double-dot proof as follows.\<close>
 
 lemma "P \<or> P \<longrightarrow> P"
 proof
@@ -314,9 +314,9 @@
 qed
 
 
-subsubsection {* A quantifier proof *}
+subsubsection \<open>A quantifier proof\<close>
 
-text {* To illustrate quantifier reasoning, let us prove @{text
+text \<open>To illustrate quantifier reasoning, let us prove @{text
   "(\<exists>x. P (f x)) \<longrightarrow> (\<exists>y. P y)"}.  Informally, this holds because any
   @{text a} with @{text "P (f a)"} may be taken as a witness for the
   second existential statement.
@@ -326,27 +326,27 @@
   instantiation.  Furthermore, we encounter two new language elements:
   the \isacommand{fix} command augments the context by some new
   ``arbitrary, but fixed'' element; the \isacommand{is} annotation
-  binds term abbreviations by higher-order pattern matching. *}
+  binds term abbreviations by higher-order pattern matching.\<close>
 
 lemma "(\<exists>x. P (f x)) \<longrightarrow> (\<exists>y. P y)"
 proof
   assume "\<exists>x. P (f x)"
   then show "\<exists>y. P y"
-  proof (rule exE)             -- {*
+  proof (rule exE)             -- \<open>
     rule @{text exE}: \smash{$\infer{B}{\ex x A(x) & \infer*{B}{[A(x)]_x}}$}
-  *}
+\<close>
     fix a
     assume "P (f a)" (is "P ?witness")
     then show ?thesis by (rule exI [of P ?witness])
   qed
 qed
 
-text {* While explicit rule instantiation may occasionally improve
+text \<open>While explicit rule instantiation may occasionally improve
   readability of certain aspects of reasoning, it is usually quite
   redundant.  Above, the basic proof outline gives already enough
   structural clues for the system to infer both the rules and their
   instances (by higher-order unification).  Thus we may as well prune
-  the text as follows. *}
+  the text as follows.\<close>
 
 lemma "(\<exists>x. P (f x)) \<longrightarrow> (\<exists>y. P y)"
 proof
@@ -359,10 +359,10 @@
   qed
 qed
 
-text {* Explicit @{text \<exists>}-elimination as seen above can become quite
+text \<open>Explicit @{text \<exists>}-elimination as seen above can become quite
   cumbersome in practice.  The derived Isar language element
   ``\isakeyword{obtain}'' provides a more handsome way to do
-  generalized existence reasoning. *}
+  generalized existence reasoning.\<close>
 
 lemma "(\<exists>x. P (f x)) \<longrightarrow> (\<exists>y. P y)"
 proof
@@ -371,21 +371,21 @@
   then show "\<exists>y. P y" ..
 qed
 
-text {* Technically, \isakeyword{obtain} is similar to
+text \<open>Technically, \isakeyword{obtain} is similar to
   \isakeyword{fix} and \isakeyword{assume} together with a soundness
   proof of the elimination involved.  Thus it behaves similar to any
   other forward proof element.  Also note that due to the nature of
   general existence reasoning involved here, any result exported from
   the context of an \isakeyword{obtain} statement may \emph{not} refer
-  to the parameters introduced there. *}
+  to the parameters introduced there.\<close>
 
 
-subsubsection {* Deriving rules in Isabelle *}
+subsubsection \<open>Deriving rules in Isabelle\<close>
 
-text {* We derive the conjunction elimination rule from the
+text \<open>We derive the conjunction elimination rule from the
   corresponding projections.  The proof is quite straight-forward,
   since Isabelle/Isar supports non-atomic goals and assumptions fully
-  transparently. *}
+  transparently.\<close>
 
 theorem conjE: "A \<and> B \<Longrightarrow> (A \<Longrightarrow> B \<Longrightarrow> C) \<Longrightarrow> C"
 proof -
--- a/src/HOL/Isar_Examples/Cantor.thy	Tue Oct 07 14:02:24 2014 +0200
+++ b/src/HOL/Isar_Examples/Cantor.thy	Tue Oct 07 21:01:31 2014 +0200
@@ -2,16 +2,16 @@
     Author:     Markus Wenzel, TU Muenchen
 *)
 
-header {* Cantor's Theorem *}
+header \<open>Cantor's Theorem\<close>
 
 theory Cantor
 imports Main
 begin
 
-text_raw {* \footnote{This is an Isar version of the final example of
-  the Isabelle/HOL manual \cite{isabelle-HOL}.}  *}
+text_raw \<open>\footnote{This is an Isar version of the final example of
+  the Isabelle/HOL manual @{cite "isabelle-HOL"}.}\<close>
 
-text {* Cantor's Theorem states that every set has more subsets than
+text \<open>Cantor's Theorem states that every set has more subsets than
   it has elements.  It has become a favorite basic example in pure
   higher-order logic since it is so easily expressed: \[\all{f::\alpha
   \To \alpha \To \idt{bool}} \ex{S::\alpha \To \idt{bool}}
@@ -22,7 +22,7 @@
   every function from $\alpha$ to its powerset, some subset is outside
   its range.  The Isabelle/Isar proofs below uses HOL's set theory,
   with the type $\alpha \ap \idt{set}$ and the operator
-  $\idt{range}::(\alpha \To \beta) \To \beta \ap \idt{set}$. *}
+  $\idt{range}::(\alpha \To \beta) \To \beta \ap \idt{set}$.\<close>
 
 theorem "\<exists>S. S \<notin> range (f :: 'a \<Rightarrow> 'a set)"
 proof
@@ -36,30 +36,30 @@
       assume "y \<in> f y"
       assume "y \<in> ?S"
       then have "y \<notin> f y" ..
-      with `y : f y` show ?thesis by contradiction
+      with \<open>y : f y\<close> show ?thesis by contradiction
     next
       assume "y \<notin> ?S"
       assume "y \<notin> f y"
       then have "y \<in> ?S" ..
-      with `y \<notin> ?S` show ?thesis by contradiction
+      with \<open>y \<notin> ?S\<close> show ?thesis by contradiction
     qed
   qed
 qed
 
-text {* How much creativity is required?  As it happens, Isabelle can
+text \<open>How much creativity is required?  As it happens, Isabelle can
   prove this theorem automatically using best-first search.
   Depth-first search would diverge, but best-first search successfully
   navigates through the large search space.  The context of Isabelle's
   classical prover contains rules for the relevant constructs of HOL's
-  set theory.  *}
+  set theory.\<close>
 
 theorem "\<exists>S. S \<notin> range (f :: 'a \<Rightarrow> 'a set)"
   by best
 
-text {* While this establishes the same theorem internally, we do not
+text \<open>While this establishes the same theorem internally, we do not
   get any idea of how the proof actually works.  There is currently no
   way to transform internal system-level representations of Isabelle
   proofs back into Isar text.  Writing intelligible proof documents
-  really is a creative process, after all. *}
+  really is a creative process, after all.\<close>
 
 end
--- a/src/HOL/Isar_Examples/Drinker.thy	Tue Oct 07 14:02:24 2014 +0200
+++ b/src/HOL/Isar_Examples/Drinker.thy	Tue Oct 07 21:01:31 2014 +0200
@@ -2,17 +2,17 @@
     Author:     Makarius
 *)
 
-header {* The Drinker's Principle *}
+header \<open>The Drinker's Principle\<close>
 
 theory Drinker
 imports Main
 begin
 
-text {* Here is another example of classical reasoning: the Drinker's
+text \<open>Here is another example of classical reasoning: the Drinker's
   Principle says that for some person, if he is drunk, everybody else
   is drunk!
 
-  We first prove a classical part of de-Morgan's law. *}
+  We first prove a classical part of de-Morgan's law.\<close>
 
 lemma de_Morgan:
   assumes "\<not> (\<forall>x. P x)"
@@ -25,10 +25,10 @@
     proof (rule classical)
       assume "\<not> P x"
       then have "\<exists>x. \<not> P x" ..
-      with `\<not> (\<exists>x. \<not> P x)` show ?thesis by contradiction
+      with \<open>\<not> (\<exists>x. \<not> P x)\<close> show ?thesis by contradiction
     qed
   qed
-  with `\<not> (\<forall>x. P x)` show ?thesis by contradiction
+  with \<open>\<not> (\<forall>x. P x)\<close> show ?thesis by contradiction
 qed
 
 theorem Drinker's_Principle: "\<exists>x. drunk x \<longrightarrow> (\<forall>x. drunk x)"
--- a/src/HOL/Isar_Examples/Expr_Compiler.thy	Tue Oct 07 14:02:24 2014 +0200
+++ b/src/HOL/Isar_Examples/Expr_Compiler.thy	Tue Oct 07 21:01:31 2014 +0200
@@ -4,40 +4,40 @@
 Correctness of a simple expression/stack-machine compiler.
 *)
 
-header {* Correctness of a simple expression compiler *}
+header \<open>Correctness of a simple expression compiler\<close>
 
 theory Expr_Compiler
 imports Main
 begin
 
-text {* This is a (rather trivial) example of program verification.
+text \<open>This is a (rather trivial) example of program verification.
   We model a compiler for translating expressions to stack machine
   instructions, and prove its correctness wrt.\ some evaluation
-  semantics. *}
+  semantics.\<close>
 
 
-subsection {* Binary operations *}
+subsection \<open>Binary operations\<close>
 
-text {* Binary operations are just functions over some type of values.
+text \<open>Binary operations are just functions over some type of values.
   This is both for abstract syntax and semantics, i.e.\ we use a
-  ``shallow embedding'' here. *}
+  ``shallow embedding'' here.\<close>
 
 type_synonym 'val binop = "'val \<Rightarrow> 'val \<Rightarrow> 'val"
 
 
-subsection {* Expressions *}
+subsection \<open>Expressions\<close>
 
-text {* The language of expressions is defined as an inductive type,
+text \<open>The language of expressions is defined as an inductive type,
   consisting of variables, constants, and binary operations on
-  expressions. *}
+  expressions.\<close>
 
 datatype (dead 'adr, dead 'val) expr =
     Variable 'adr
   | Constant 'val
   | Binop "'val binop" "('adr, 'val) expr" "('adr, 'val) expr"
 
-text {* Evaluation (wrt.\ some environment of variable assignments) is
-  defined by primitive recursion over the structure of expressions. *}
+text \<open>Evaluation (wrt.\ some environment of variable assignments) is
+  defined by primitive recursion over the structure of expressions.\<close>
 
 primrec eval :: "('adr, 'val) expr \<Rightarrow> ('adr \<Rightarrow> 'val) \<Rightarrow> 'val"
 where
@@ -46,18 +46,18 @@
 | "eval (Binop f e1 e2) env = f (eval e1 env) (eval e2 env)"
 
 
-subsection {* Machine *}
+subsection \<open>Machine\<close>
 
-text {* Next we model a simple stack machine, with three
-  instructions. *}
+text \<open>Next we model a simple stack machine, with three
+  instructions.\<close>
 
 datatype (dead 'adr, dead 'val) instr =
     Const 'val
   | Load 'adr
   | Apply "'val binop"
 
-text {* Execution of a list of stack machine instructions is easily
-  defined as follows. *}
+text \<open>Execution of a list of stack machine instructions is easily
+  defined as follows.\<close>
 
 primrec exec :: "(('adr, 'val) instr) list \<Rightarrow> 'val list \<Rightarrow> ('adr \<Rightarrow> 'val) \<Rightarrow> 'val list"
 where
@@ -73,10 +73,10 @@
   where "execute instrs env = hd (exec instrs [] env)"
 
 
-subsection {* Compiler *}
+subsection \<open>Compiler\<close>
 
-text {* We are ready to define the compilation function of expressions
-  to lists of stack machine instructions. *}
+text \<open>We are ready to define the compilation function of expressions
+  to lists of stack machine instructions.\<close>
 
 primrec compile :: "('adr, 'val) expr \<Rightarrow> (('adr, 'val) instr) list"
 where
@@ -85,9 +85,9 @@
 | "compile (Binop f e1 e2) = compile e2 @ compile e1 @ [Apply f]"
 
 
-text {* The main result of this development is the correctness theorem
+text \<open>The main result of this development is the correctness theorem
   for @{text compile}.  We first establish a lemma about @{text exec}
-  and list append. *}
+  and list append.\<close>
 
 lemma exec_append:
   "exec (xs @ ys) stack env =
@@ -127,11 +127,11 @@
 qed
 
 
-text {* \bigskip In the proofs above, the @{text simp} method does
+text \<open>\bigskip In the proofs above, the @{text simp} method does
   quite a lot of work behind the scenes (mostly ``functional program
   execution'').  Subsequently, the same reasoning is elaborated in
   detail --- at most one recursive function definition is used at a
-  time.  Thus we get a better idea of what is actually going on. *}
+  time.  Thus we get a better idea of what is actually going on.\<close>
 
 lemma exec_append':
   "exec (xs @ ys) stack env = exec ys (exec xs stack env) env"
@@ -158,7 +158,7 @@
   next
     case (Load adr)
     from Cons show ?case
-      by simp -- {* same as above *}
+      by simp -- \<open>same as above\<close>
   next
     case (Apply fn)
     have "exec ((Apply fn # xs) @ ys) s env =
@@ -188,7 +188,7 @@
     finally show ?case .
   next
     case (Constant val s)
-    show ?case by simp -- {* same as above *}
+    show ?case by simp -- \<open>same as above\<close>
   next
     case (Binop fn e1 e2 s)
     have "exec (compile (Binop fn e1 e2)) s env =
--- a/src/HOL/Isar_Examples/Fibonacci.thy	Tue Oct 07 14:02:24 2014 +0200
+++ b/src/HOL/Isar_Examples/Fibonacci.thy	Tue Oct 07 21:01:31 2014 +0200
@@ -12,21 +12,21 @@
   (Addison-Wesley, 1989)
 *)
 
-header {* Fib and Gcd commute *}
+header \<open>Fib and Gcd commute\<close>
 
 theory Fibonacci
 imports "../Number_Theory/Primes"
 begin
 
-text_raw {* \footnote{Isar version by Gertrud Bauer.  Original tactic
+text_raw \<open>\footnote{Isar version by Gertrud Bauer.  Original tactic
   script by Larry Paulson.  A few proofs of laws taken from
-  \cite{Concrete-Math}.} *}
+  @{cite "Concrete-Math"}.}\<close>
 
 
 declare One_nat_def [simp]
 
 
-subsection {* Fibonacci numbers *}
+subsection \<open>Fibonacci numbers\<close>
 
 fun fib :: "nat \<Rightarrow> nat" where
   "fib 0 = 0"
@@ -37,7 +37,7 @@
   by (induct n rule: fib.induct) simp_all
 
 
-text {* Alternative induction rule. *}
+text \<open>Alternative induction rule.\<close>
 
 theorem fib_induct:
   fixes n :: nat
@@ -45,14 +45,14 @@
   by (induct rule: fib.induct) simp_all
 
 
-subsection {* Fib and gcd commute *}
+subsection \<open>Fib and gcd commute\<close>
 
-text {* A few laws taken from \cite{Concrete-Math}. *}
+text \<open>A few laws taken from @{cite "Concrete-Math"}.\<close>
 
 lemma fib_add:
   "fib (n + k + 1) = fib (k + 1) * fib (n + 1) + fib k * fib n"
   (is "?P n")
-  -- {* see \cite[page 280]{Concrete-Math} *}
+  -- \<open>see @{cite \<open>page 280\<close> "Concrete-Math"}\<close>
 proof (induct n rule: fib_induct)
   show "?P 0" by simp
   show "?P 1" by simp
@@ -93,7 +93,7 @@
   assume "0 < n"
   then have "gcd (n * k + m) n = gcd n (m mod n)"
     by (simp add: gcd_non_0_nat add.commute)
-  also from `0 < n` have "\<dots> = gcd m n"
+  also from \<open>0 < n\<close> have "\<dots> = gcd m n"
     by (simp add: gcd_non_0_nat)
   finally show ?thesis .
 qed
@@ -124,7 +124,7 @@
 proof -
   have "gcd (fib m) (fib (n - m)) = gcd (fib m) (fib (n - m + m))"
     by (simp add: gcd_fib_add)
-  also from `m \<le> n` have "n - m + m = n"
+  also from \<open>m \<le> n\<close> have "n - m + m = n"
     by simp
   finally show ?thesis .
 qed
@@ -145,12 +145,12 @@
     next
       case False
       then have "m \<le> n" by simp
-      from `0 < m` and False have "n - m < n"
+      from \<open>0 < m\<close> and False have "n - m < n"
         by simp
       with hyp have "gcd (fib m) (fib ((n - m) mod m))
           = gcd (fib m) (fib (n - m))" by simp
       also have "\<dots> = gcd (fib m) (fib n)"
-        using `m \<le> n` by (rule gcd_fib_diff)
+        using \<open>m \<le> n\<close> by (rule gcd_fib_diff)
       finally have "gcd (fib m) (fib ((n - m) mod m)) =
           gcd (fib m) (fib n)" .
       with False show ?thesis by simp
--- a/src/HOL/Isar_Examples/Group.thy	Tue Oct 07 14:02:24 2014 +0200
+++ b/src/HOL/Isar_Examples/Group.thy	Tue Oct 07 21:01:31 2014 +0200
@@ -2,26 +2,26 @@
     Author:     Markus Wenzel, TU Muenchen
 *)
 
-header {* Basic group theory *}
+header \<open>Basic group theory\<close>
 
 theory Group
 imports Main
 begin
 
-subsection {* Groups and calculational reasoning *} 
+subsection \<open>Groups and calculational reasoning\<close> 
 
-text {* Groups over signature $({\times} :: \alpha \To \alpha \To
+text \<open>Groups over signature $({\times} :: \alpha \To \alpha \To
   \alpha, \idt{one} :: \alpha, \idt{inverse} :: \alpha \To \alpha)$
   are defined as an axiomatic type class as follows.  Note that the
-  parent class $\idt{times}$ is provided by the basic HOL theory. *}
+  parent class $\idt{times}$ is provided by the basic HOL theory.\<close>
 
 class group = times + one + inverse +
   assumes group_assoc: "(x * y) * z = x * (y * z)"
     and group_left_one: "1 * x = x"
     and group_left_inverse: "inverse x * x = 1"
 
-text {* The group axioms only state the properties of left one and
-  inverse, the right versions may be derived as follows. *}
+text \<open>The group axioms only state the properties of left one and
+  inverse, the right versions may be derived as follows.\<close>
 
 theorem (in group) group_right_inverse: "x * inverse x = 1"
 proof -
@@ -44,9 +44,9 @@
   finally show ?thesis .
 qed
 
-text {* With \name{group-right-inverse} already available,
+text \<open>With \name{group-right-inverse} already available,
   \name{group-right-one}\label{thm:group-right-one} is now established
-  much easier. *}
+  much easier.\<close>
 
 theorem (in group) group_right_one: "x * 1 = x"
 proof -
@@ -61,7 +61,7 @@
   finally show ?thesis .
 qed
 
-text {* \medskip The calculational proof style above follows typical
+text \<open>\medskip The calculational proof style above follows typical
   presentations given in any introductory course on algebra.  The
   basic technique is to form a transitive chain of equations, which in
   turn are established by simplifying with appropriate rules.  The
@@ -81,8 +81,7 @@
   Isabelle/Isar, but defined on top of the basic Isar/VM interpreter.
   Expanding the \isakeyword{also} and \isakeyword{finally} derived
   language elements, calculations may be simulated by hand as
-  demonstrated below.
-*}
+  demonstrated below.\<close>
 
 theorem (in group) "x * 1 = x"
 proof -
@@ -90,58 +89,57 @@
     by (simp only: group_left_inverse)
 
   note calculation = this
-    -- {* first calculational step: init calculation register *}
+    -- \<open>first calculational step: init calculation register\<close>
 
   have "\<dots> = x * inverse x * x"
     by (simp only: group_assoc)
 
   note calculation = trans [OF calculation this]
-    -- {* general calculational step: compose with transitivity rule *}
+    -- \<open>general calculational step: compose with transitivity rule\<close>
 
   have "\<dots> = 1 * x"
     by (simp only: group_right_inverse)
 
   note calculation = trans [OF calculation this]
-    -- {* general calculational step: compose with transitivity rule *}
+    -- \<open>general calculational step: compose with transitivity rule\<close>
 
   have "\<dots> = x"
     by (simp only: group_left_one)
 
   note calculation = trans [OF calculation this]
-    -- {* final calculational step: compose with transitivity rule \dots *}
+    -- \<open>final calculational step: compose with transitivity rule \dots\<close>
   from calculation
-    -- {* \dots\ and pick up the final result *}
+    -- \<open>\dots\ and pick up the final result\<close>
 
   show ?thesis .
 qed
 
-text {* Note that this scheme of calculations is not restricted to
+text \<open>Note that this scheme of calculations is not restricted to
   plain transitivity.  Rules like anti-symmetry, or even forward and
   backward substitution work as well.  For the actual implementation
   of \isacommand{also} and \isacommand{finally}, Isabelle/Isar
   maintains separate context information of ``transitivity'' rules.
   Rule selection takes place automatically by higher-order
-  unification. *}
+  unification.\<close>
 
 
-subsection {* Groups as monoids *}
+subsection \<open>Groups as monoids\<close>
 
-text {* Monoids over signature $({\times} :: \alpha \To \alpha \To
-  \alpha, \idt{one} :: \alpha)$ are defined like this.
-*}
+text \<open>Monoids over signature $({\times} :: \alpha \To \alpha \To
+  \alpha, \idt{one} :: \alpha)$ are defined like this.\<close>
 
 class monoid = times + one +
   assumes monoid_assoc: "(x * y) * z = x * (y * z)"
     and monoid_left_one: "1 * x = x"
     and monoid_right_one: "x * 1 = x"
 
-text {* Groups are \emph{not} yet monoids directly from the
+text \<open>Groups are \emph{not} yet monoids directly from the
   definition.  For monoids, \name{right-one} had to be included as an
   axiom, but for groups both \name{right-one} and \name{right-inverse}
   are derivable from the other axioms.  With \name{group-right-one}
   derived as a theorem of group theory (see
   page~\pageref{thm:group-right-one}), we may still instantiate
-  $\idt{group} \subseteq \idt{monoid}$ properly as follows. *}
+  $\idt{group} \subseteq \idt{monoid}$ properly as follows.\<close>
 
 instance group < monoid
   by intro_classes
@@ -149,18 +147,18 @@
       rule group_left_one,
       rule group_right_one)
 
-text {* The \isacommand{instance} command actually is a version of
+text \<open>The \isacommand{instance} command actually is a version of
   \isacommand{theorem}, setting up a goal that reflects the intended
   class relation (or type constructor arity).  Thus any Isar proof
   language element may be involved to establish this statement.  When
   concluding the proof, the result is transformed into the intended
-  type signature extension behind the scenes. *}
+  type signature extension behind the scenes.\<close>
 
 
-subsection {* More theorems of group theory *}
+subsection \<open>More theorems of group theory\<close>
 
-text {* The one element is already uniquely determined by preserving
-  an \emph{arbitrary} group element. *}
+text \<open>The one element is already uniquely determined by preserving
+  an \emph{arbitrary} group element.\<close>
 
 theorem (in group) group_one_equality:
   assumes eq: "e * x = x"
@@ -179,7 +177,7 @@
   finally show ?thesis .
 qed
 
-text {* Likewise, the inverse is already determined by the cancel property. *}
+text \<open>Likewise, the inverse is already determined by the cancel property.\<close>
 
 theorem (in group) group_inverse_equality:
   assumes eq: "x' * x = 1"
@@ -198,7 +196,7 @@
   finally show ?thesis .
 qed
 
-text {* The inverse operation has some further characteristic properties. *}
+text \<open>The inverse operation has some further characteristic properties.\<close>
 
 theorem (in group) group_inverse_times: "inverse (x * y) = inverse y * inverse x"
 proof (rule group_inverse_equality)
--- a/src/HOL/Isar_Examples/Group_Context.thy	Tue Oct 07 14:02:24 2014 +0200
+++ b/src/HOL/Isar_Examples/Group_Context.thy	Tue Oct 07 21:01:31 2014 +0200
@@ -2,13 +2,13 @@
     Author:     Makarius
 *)
 
-header {* Some algebraic identities derived from group axioms -- theory context version *}
+header \<open>Some algebraic identities derived from group axioms -- theory context version\<close>
 
 theory Group_Context
 imports Main
 begin
 
-text {* hypothetical group axiomatization *}
+text \<open>hypothetical group axiomatization\<close>
 
 context
   fixes prod :: "'a \<Rightarrow> 'a \<Rightarrow> 'a"  (infixl "**" 70)
@@ -19,7 +19,7 @@
     and left_inverse: "inverse x ** x = one"
 begin
 
-text {* some consequences *}
+text \<open>some consequences\<close>
 
 lemma right_inverse: "x ** inverse x = one"
 proof -
--- a/src/HOL/Isar_Examples/Group_Notepad.thy	Tue Oct 07 14:02:24 2014 +0200
+++ b/src/HOL/Isar_Examples/Group_Notepad.thy	Tue Oct 07 21:01:31 2014 +0200
@@ -2,7 +2,7 @@
     Author:     Makarius
 *)
 
-header {* Some algebraic identities derived from group axioms -- proof notepad version *}
+header \<open>Some algebraic identities derived from group axioms -- proof notepad version\<close>
 
 theory Group_Notepad
 imports Main
@@ -10,7 +10,7 @@
 
 notepad
 begin
-  txt {* hypothetical group axiomatization *}
+  txt \<open>hypothetical group axiomatization\<close>
 
   fix prod :: "'a \<Rightarrow> 'a \<Rightarrow> 'a"  (infixl "**" 70)
     and one :: "'a"
@@ -19,7 +19,7 @@
     and left_one: "\<And>x. one ** x = x"
     and left_inverse: "\<And>x. inverse x ** x = one"
 
-  txt {* some consequences *}
+  txt \<open>some consequences\<close>
 
   have right_inverse: "\<And>x. x ** inverse x = one"
   proof -
--- a/src/HOL/Isar_Examples/Hoare.thy	Tue Oct 07 14:02:24 2014 +0200
+++ b/src/HOL/Isar_Examples/Hoare.thy	Tue Oct 07 21:01:31 2014 +0200
@@ -4,19 +4,19 @@
 A formulation of Hoare logic suitable for Isar.
 *)
 
-header {* Hoare Logic *}
+header \<open>Hoare Logic\<close>
 
 theory Hoare
 imports Main
 begin
 
-subsection {* Abstract syntax and semantics *}
+subsection \<open>Abstract syntax and semantics\<close>
 
-text {* The following abstract syntax and semantics of Hoare Logic
+text \<open>The following abstract syntax and semantics of Hoare Logic
   over \texttt{WHILE} programs closely follows the existing tradition
   in Isabelle/HOL of formalizing the presentation given in
-  \cite[\S6]{Winskel:1993}.  See also @{file "~~/src/HOL/Hoare"} and
-  \cite{Nipkow:1998:Winskel}. *}
+  @{cite \<open>\S6\<close> "Winskel:1993"}.  See also @{file "~~/src/HOL/Hoare"} and
+  @{cite "Nipkow:1998:Winskel"}.\<close>
 
 type_synonym 'a bexp = "'a set"
 type_synonym 'a assn = "'a set"
@@ -58,16 +58,16 @@
   by (simp add: Valid_def)
 
 
-subsection {* Primitive Hoare rules *}
+subsection \<open>Primitive Hoare rules\<close>
 
-text {* From the semantics defined above, we derive the standard set
-  of primitive Hoare rules; e.g.\ see \cite[\S6]{Winskel:1993}.
+text \<open>From the semantics defined above, we derive the standard set
+  of primitive Hoare rules; e.g.\ see @{cite \<open>\S6\<close> "Winskel:1993"}.
   Usually, variant forms of these rules are applied in actual proof,
   see also \S\ref{sec:hoare-isar} and \S\ref{sec:hoare-vcg}.
 
   \medskip The \name{basic} rule represents any kind of atomic access
   to the state space.  This subsumes the common rules of \name{skip}
-  and \name{assign}, as formulated in \S\ref{sec:hoare-isar}. *}
+  and \name{assign}, as formulated in \S\ref{sec:hoare-isar}.\<close>
 
 theorem basic: "\<turnstile> {s. f s \<in> P} (Basic f) P"
 proof
@@ -78,10 +78,8 @@
   with s show "s' \<in> P" by simp
 qed
 
-text {*
- The rules for sequential commands and semantic consequences are
- established in a straight forward manner as follows.
-*}
+text \<open>The rules for sequential commands and semantic consequences are
+ established in a straight forward manner as follows.\<close>
 
 theorem seq: "\<turnstile> P c1 Q \<Longrightarrow> \<turnstile> Q c2 R \<Longrightarrow> \<turnstile> P (c1; c2) R"
 proof
@@ -106,9 +104,9 @@
   with QQ' show "s' \<in> Q'" ..
 qed
 
-text {* The rule for conditional commands is directly reflected by the
+text \<open>The rule for conditional commands is directly reflected by the
   corresponding semantics; in the proof we just have to look closely
-  which cases apply. *}
+  which cases apply.\<close>
 
 theorem cond:
   assumes case_b: "\<turnstile> (P \<inter> b) c1 Q"
@@ -136,12 +134,12 @@
   qed
 qed
 
-text {* The @{text while} rule is slightly less trivial --- it is the
+text \<open>The @{text while} rule is slightly less trivial --- it is the
   only one based on recursion, which is expressed in the semantics by
   a Kleene-style least fixed-point construction.  The auxiliary
   statement below, which is by induction on the number of iterations
   is the main point to be proven; the rest is by routine application
-  of the semantics of \texttt{WHILE}. *}
+  of the semantics of \texttt{WHILE}.\<close>
 
 theorem while:
   assumes body: "\<turnstile> (P \<inter> b) c P"
@@ -165,9 +163,9 @@
 qed
 
 
-subsection {* Concrete syntax for assertions *}
+subsection \<open>Concrete syntax for assertions\<close>
 
-text {* We now introduce concrete syntax for describing commands (with
+text \<open>We now introduce concrete syntax for describing commands (with
   embedded expressions) and assertions. The basic technique is that of
   semantic ``quote-antiquote''.  A \emph{quotation} is a syntactic
   entity delimited by an implicit abstraction, say over the state
@@ -176,9 +174,9 @@
   would select (or even update) components from the state.
 
   We will see some examples later in the concrete rules and
-  applications. *}
+  applications.\<close>
 
-text {* The following specification of syntax and translations is for
+text \<open>The following specification of syntax and translations is for
   Isabelle experts only; feel free to ignore it.
 
   While the first part is still a somewhat intelligible specification
@@ -186,7 +184,7 @@
   actual ``ML drivers'' is quite involved.  Just note that the we
   re-use the basic quote/antiquote translations as already defined in
   Isabelle/Pure (see @{ML Syntax_Trans.quote_tr}, and
-  @{ML Syntax_Trans.quote_tr'},). *}
+  @{ML Syntax_Trans.quote_tr'},).\<close>
 
 syntax
   "_quote" :: "'b \<Rightarrow> ('a \<Rightarrow> 'b)"
@@ -208,19 +206,19 @@
   "WHILE b INV i DO c OD" \<rightharpoonup> "CONST While \<lbrace>b\<rbrace> i c"
   "WHILE b DO c OD" \<rightleftharpoons> "WHILE b INV CONST undefined DO c OD"
 
-parse_translation {*
+parse_translation \<open>
   let
     fun quote_tr [t] = Syntax_Trans.quote_tr @{syntax_const "_antiquote"} t
       | quote_tr ts = raise TERM ("quote_tr", ts);
   in [(@{syntax_const "_quote"}, K quote_tr)] end
-*}
+\<close>
 
-text {* As usual in Isabelle syntax translations, the part for
+text \<open>As usual in Isabelle syntax translations, the part for
   printing is more complicated --- we cannot express parts as macro
   rules as above.  Don't look here, unless you have to do similar
-  things for yourself. *}
+  things for yourself.\<close>
 
-print_translation {*
+print_translation \<open>
   let
     fun quote_tr' f (t :: ts) =
           Term.list_comb (f $ Syntax_Trans.quote_tr' @{syntax_const "_antiquote"} t, ts)
@@ -242,18 +240,18 @@
     (@{const_syntax Cond}, K (bexp_tr' @{syntax_const "_Cond"})),
     (@{const_syntax While}, K (bexp_tr' @{syntax_const "_While_inv"}))]
   end
-*}
+\<close>
 
 
-subsection {* Rules for single-step proof \label{sec:hoare-isar} *}
+subsection \<open>Rules for single-step proof \label{sec:hoare-isar}\<close>
 
-text {* We are now ready to introduce a set of Hoare rules to be used
+text \<open>We are now ready to introduce a set of Hoare rules to be used
   in single-step structured proofs in Isabelle/Isar.  We refer to the
   concrete syntax introduce above.
 
   \medskip Assertions of Hoare Logic may be manipulated in
   calculational proofs, with the inclusion expressed in terms of sets
-  or predicates.  Reversed order is supported as well. *}
+  or predicates.  Reversed order is supported as well.\<close>
 
 lemma [trans]: "\<turnstile> P c Q \<Longrightarrow> P' \<subseteq> P \<Longrightarrow> \<turnstile> P' c Q"
   by (unfold Valid_def) blast
@@ -280,10 +278,10 @@
   by (simp add: Valid_def)
 
 
-text {* Identity and basic assignments.\footnote{The $\idt{hoare}$
+text \<open>Identity and basic assignments.\footnote{The $\idt{hoare}$
   method introduced in \S\ref{sec:hoare-vcg} is able to provide proper
   instances for any number of basic assignments, without producing
-  additional verification conditions.} *}
+  additional verification conditions.}\<close>
 
 lemma skip [intro?]: "\<turnstile> P SKIP P"
 proof -
@@ -294,9 +292,9 @@
 lemma assign: "\<turnstile> P [\<acute>a/\<acute>x::'a] \<acute>x := \<acute>a P"
   by (rule basic)
 
-text {* Note that above formulation of assignment corresponds to our
+text \<open>Note that above formulation of assignment corresponds to our
   preferred way to model state spaces, using (extensible) record types
-  in HOL \cite{Naraschewski-Wenzel:1998:HOOL}.  For any record field
+  in HOL @{cite "Naraschewski-Wenzel:1998:HOOL"}.  For any record field
   $x$, Isabelle/HOL provides a functions $x$ (selector) and
   $\idt{x{\dsh}update}$ (update).  Above, there is only a place-holder
   appearing for the latter kind of function: due to concrete syntax
@@ -304,17 +302,17 @@
   due to the external nature of HOL record fields, we could not even
   state a general theorem relating selector and update functions (if
   this were required here); this would only work for any particular
-  instance of record fields introduced so far.} *}
+  instance of record fields introduced so far.}\<close>
 
-text {* Sequential composition --- normalizing with associativity
-  achieves proper of chunks of code verified separately. *}
+text \<open>Sequential composition --- normalizing with associativity
+  achieves proper of chunks of code verified separately.\<close>
 
 lemmas [trans, intro?] = seq
 
 lemma seq_assoc [simp]: "\<turnstile> P c1;(c2;c3) Q \<longleftrightarrow> \<turnstile> P (c1;c2);c3 Q"
   by (auto simp add: Valid_def)
 
-text {* Conditional statements. *}
+text \<open>Conditional statements.\<close>
 
 lemmas [trans, intro?] = cond
 
@@ -324,7 +322,7 @@
       \<Longrightarrow> \<turnstile> \<lbrace>\<acute>P\<rbrace> IF \<acute>b THEN c1 ELSE c2 FI Q"
     by (rule cond) (simp_all add: Valid_def)
 
-text {* While statements --- with optional invariant. *}
+text \<open>While statements --- with optional invariant.\<close>
 
 lemma [intro?]: "\<turnstile> (P \<inter> b) c P \<Longrightarrow> \<turnstile> P (While b P c) (P \<inter> -b)"
   by (rule while)
@@ -344,9 +342,9 @@
   by (simp add: while Collect_conj_eq Collect_neg_eq)
 
 
-subsection {* Verification conditions \label{sec:hoare-vcg} *}
+subsection \<open>Verification conditions \label{sec:hoare-vcg}\<close>
 
-text {* We now load the \emph{original} ML file for proof scripts and
+text \<open>We now load the \emph{original} ML file for proof scripts and
   tactic definition for the Hoare Verification Condition Generator
   (see @{file "~~/src/HOL/Hoare/"}).  As far as we
   are concerned here, the result is a proof method \name{hoare}, which
@@ -355,7 +353,7 @@
   requires \texttt{WHILE} loops to be fully annotated with invariants
   beforehand.  Furthermore, only \emph{concrete} pieces of code are
   handled --- the underlying tactic fails ungracefully if supplied
-  with meta-variables or parameters, for example. *}
+  with meta-variables or parameters, for example.\<close>
 
 lemma SkipRule: "p \<subseteq> q \<Longrightarrow> Valid p (Basic id) q"
   by (auto simp add: Valid_def)
@@ -393,11 +391,11 @@
 
 ML_file "~~/src/HOL/Hoare/hoare_tac.ML"
 
-method_setup hoare = {*
-  Scan.succeed (fn ctxt =>
+method_setup hoare =
+  \<open>Scan.succeed (fn ctxt =>
     (SIMPLE_METHOD'
-       (Hoare.hoare_tac ctxt
-        (simp_tac (put_simpset HOL_basic_ss ctxt addsimps [@{thm "Record.K_record_comp"}] ))))) *}
+      (Hoare.hoare_tac ctxt
+        (simp_tac (put_simpset HOL_basic_ss ctxt addsimps [@{thm "Record.K_record_comp"}] )))))\<close>
   "verification condition generator for Hoare logic"
 
 end
--- a/src/HOL/Isar_Examples/Hoare_Ex.thy	Tue Oct 07 14:02:24 2014 +0200
+++ b/src/HOL/Isar_Examples/Hoare_Ex.thy	Tue Oct 07 21:01:31 2014 +0200
@@ -1,14 +1,14 @@
-header {* Using Hoare Logic *}
+header \<open>Using Hoare Logic\<close>
 
 theory Hoare_Ex
 imports Hoare
 begin
 
-subsection {* State spaces *}
+subsection \<open>State spaces\<close>
 
-text {* First of all we provide a store of program variables that
+text \<open>First of all we provide a store of program variables that
   occur in any of the programs considered later.  Slightly unexpected
-  things may happen when attempting to work with undeclared variables. *}
+  things may happen when attempting to work with undeclared variables.\<close>
 
 record vars =
   I :: nat
@@ -16,29 +16,29 @@
   N :: nat
   S :: nat
 
-text {* While all of our variables happen to have the same type,
+text \<open>While all of our variables happen to have the same type,
   nothing would prevent us from working with many-sorted programs as
   well, or even polymorphic ones.  Also note that Isabelle/HOL's
   extensible record types even provides simple means to extend the
-  state space later. *}
+  state space later.\<close>
 
 
-subsection {* Basic examples *}
+subsection \<open>Basic examples\<close>
 
-text {* We look at few trivialities involving assignment and
+text \<open>We look at few trivialities involving assignment and
   sequential composition, in order to get an idea of how to work with
-  our formulation of Hoare Logic. *}
+  our formulation of Hoare Logic.\<close>
 
-text {* Using the basic @{text assign} rule directly is a bit
-  cumbersome. *}
+text \<open>Using the basic @{text assign} rule directly is a bit
+  cumbersome.\<close>
 
 lemma "\<turnstile> \<lbrace>\<acute>(N_update (\<lambda>_. (2 * \<acute>N))) \<in> \<lbrace>\<acute>N = 10\<rbrace>\<rbrace> \<acute>N := 2 * \<acute>N \<lbrace>\<acute>N = 10\<rbrace>"
   by (rule assign)
 
-text {* Certainly we want the state modification already done, e.g.\
+text \<open>Certainly we want the state modification already done, e.g.\
   by simplification.  The \name{hoare} method performs the basic state
   update for us; we may apply the Simplifier afterwards to achieve
-  ``obvious'' consequences as well. *}
+  ``obvious'' consequences as well.\<close>
 
 lemma "\<turnstile> \<lbrace>True\<rbrace> \<acute>N := 10 \<lbrace>\<acute>N = 10\<rbrace>"
   by hoare
@@ -67,10 +67,10 @@
       \<lbrace>\<acute>M = b \<and> \<acute>N = a\<rbrace>"
   by hoare simp
 
-text {* It is important to note that statements like the following one
+text \<open>It is important to note that statements like the following one
   can only be proven for each individual program variable.  Due to the
   extra-logical nature of record fields, we cannot formulate a theorem
-  relating record selectors and updates schematically. *}
+  relating record selectors and updates schematically.\<close>
 
 lemma "\<turnstile> \<lbrace>\<acute>N = a\<rbrace> \<acute>N := \<acute>N \<lbrace>\<acute>N = a\<rbrace>"
   by hoare
@@ -80,13 +80,13 @@
 
 lemma
   "Valid {s. x s = a} (Basic (\<lambda>s. x_update (x s) s)) {s. x s = n}"
-  -- {* same statement without concrete syntax *}
+  -- \<open>same statement without concrete syntax\<close>
   oops
 
 
-text {* In the following assignments we make use of the consequence
+text \<open>In the following assignments we make use of the consequence
   rule in order to achieve the intended precondition.  Certainly, the
-  \name{hoare} method is able to handle this case, too. *}
+  \name{hoare} method is able to handle this case, too.\<close>
 
 lemma "\<turnstile> \<lbrace>\<acute>M = \<acute>N\<rbrace> \<acute>M := \<acute>M + 1 \<lbrace>\<acute>M \<noteq> \<acute>N\<rbrace>"
 proof -
@@ -100,8 +100,8 @@
 lemma "\<turnstile> \<lbrace>\<acute>M = \<acute>N\<rbrace> \<acute>M := \<acute>M + 1 \<lbrace>\<acute>M \<noteq> \<acute>N\<rbrace>"
 proof -
   have "\<And>m n::nat. m = n \<longrightarrow> m + 1 \<noteq> n"
-      -- {* inclusion of assertions expressed in ``pure'' logic, *}
-      -- {* without mentioning the state space *}
+      -- \<open>inclusion of assertions expressed in ``pure'' logic,\<close>
+      -- \<open>without mentioning the state space\<close>
     by simp
   also have "\<turnstile> \<lbrace>\<acute>M + 1 \<noteq> \<acute>N\<rbrace> \<acute>M := \<acute>M + 1 \<lbrace>\<acute>M \<noteq> \<acute>N\<rbrace>"
     by hoare
@@ -112,12 +112,12 @@
   by hoare simp
 
 
-subsection {* Multiplication by addition *}
+subsection \<open>Multiplication by addition\<close>
 
-text {* We now do some basic examples of actual \texttt{WHILE}
+text \<open>We now do some basic examples of actual \texttt{WHILE}
   programs.  This one is a loop for calculating the product of two
   natural numbers, by iterated addition.  We first give detailed
-  structured proof based on single-step Hoare rules. *}
+  structured proof based on single-step Hoare rules.\<close>
 
 lemma
   "\<turnstile> \<lbrace>\<acute>M = 0 \<and> \<acute>S = 0\<rbrace>
@@ -141,10 +141,10 @@
   finally show ?thesis .
 qed
 
-text {* The subsequent version of the proof applies the @{text hoare}
+text \<open>The subsequent version of the proof applies the @{text hoare}
   method to reduce the Hoare statement to a purely logical problem
   that can be solved fully automatically.  Note that we have to
-  specify the \texttt{WHILE} loop invariant in the original statement. *}
+  specify the \texttt{WHILE} loop invariant in the original statement.\<close>
 
 lemma
   "\<turnstile> \<lbrace>\<acute>M = 0 \<and> \<acute>S = 0\<rbrace>
@@ -155,17 +155,17 @@
   by hoare auto
 
 
-subsection {* Summing natural numbers *}
+subsection \<open>Summing natural numbers\<close>
 
-text {* We verify an imperative program to sum natural numbers up to a
+text \<open>We verify an imperative program to sum natural numbers up to a
   given limit.  First some functional definition for proper
-  specification of the problem. *}
+  specification of the problem.\<close>
 
-text {* The following proof is quite explicit in the individual steps
+text \<open>The following proof is quite explicit in the individual steps
   taken, with the \name{hoare} method only applied locally to take
   care of assignment and sequential composition.  Note that we express
   intermediate proof obligation in pure logic, without referring to
-  the state space. *}
+  the state space.\<close>
 
 theorem
   "\<turnstile> \<lbrace>True\<rbrace>
@@ -203,9 +203,9 @@
   finally show ?thesis .
 qed
 
-text {* The next version uses the @{text hoare} method, while still
+text \<open>The next version uses the @{text hoare} method, while still
   explaining the resulting proof obligations in an abstract,
-  structured manner. *}
+  structured manner.\<close>
 
 theorem
   "\<turnstile> \<lbrace>True\<rbrace>
@@ -235,8 +235,8 @@
   qed
 qed
 
-text {* Certainly, this proof may be done fully automatic as well,
-  provided that the invariant is given beforehand. *}
+text \<open>Certainly, this proof may be done fully automatic as well,
+  provided that the invariant is given beforehand.\<close>
 
 theorem
   "\<turnstile> \<lbrace>True\<rbrace>
@@ -251,10 +251,10 @@
   by hoare auto
 
 
-subsection {* Time *}
+subsection \<open>Time\<close>
 
-text {* A simple embedding of time in Hoare logic: function @{text
-  timeit} inserts an extra variable to keep track of the elapsed time. *}
+text \<open>A simple embedding of time in Hoare logic: function @{text
+  timeit} inserts an extra variable to keep track of the elapsed time.\<close>
 
 record tstate = time :: nat
 
--- a/src/HOL/Isar_Examples/Knaster_Tarski.thy	Tue Oct 07 14:02:24 2014 +0200
+++ b/src/HOL/Isar_Examples/Knaster_Tarski.thy	Tue Oct 07 21:01:31 2014 +0200
@@ -4,17 +4,17 @@
 Typical textbook proof example.
 *)
 
-header {* Textbook-style reasoning: the Knaster-Tarski Theorem *}
+header \<open>Textbook-style reasoning: the Knaster-Tarski Theorem\<close>
 
 theory Knaster_Tarski
 imports Main "~~/src/HOL/Library/Lattice_Syntax"
 begin
 
 
-subsection {* Prose version *}
+subsection \<open>Prose version\<close>
 
-text {* According to the textbook \cite[pages
-  93--94]{davey-priestley}, the Knaster-Tarski fixpoint theorem is as
+text \<open>According to the textbook @{cite \<open>pages 93--94\<close> "davey-priestley"},
+  the Knaster-Tarski fixpoint theorem is as
   follows.\footnote{We have dualized the argument, and tuned the
   notation a little bit.}
 
@@ -28,16 +28,16 @@
   H}, whence @{text "f(a) \<le> a"}.  We now use this inequality to prove
   the reverse one (!) and thereby complete the proof that @{text a} is
   a fixpoint.  Since @{text f} is order-preserving, @{text "f(f(a)) \<le>
-  f(a)"}.  This says @{text "f(a) \<in> H"}, so @{text "a \<le> f(a)"}. *}
+  f(a)"}.  This says @{text "f(a) \<in> H"}, so @{text "a \<le> f(a)"}.\<close>
 
 
-subsection {* Formal versions *}
+subsection \<open>Formal versions\<close>
 
-text {* The Isar proof below closely follows the original
+text \<open>The Isar proof below closely follows the original
   presentation.  Virtually all of the prose narration has been
   rephrased in terms of formal Isar language elements.  Just as many
   textbook-style proofs, there is a strong bias towards forward proof,
-  and several bends in the course of reasoning. *}
+  and several bends in the course of reasoning.\<close>
 
 theorem Knaster_Tarski:
   fixes f :: "'a::complete_lattice \<Rightarrow> 'a"
@@ -52,8 +52,8 @@
       fix x
       assume "x \<in> ?H"
       then have "?a \<le> x" by (rule Inf_lower)
-      with `mono f` have "f ?a \<le> f x" ..
-      also from `x \<in> ?H` have "\<dots> \<le> x" ..
+      with \<open>mono f\<close> have "f ?a \<le> f x" ..
+      also from \<open>x \<in> ?H\<close> have "\<dots> \<le> x" ..
       finally have "f ?a \<le> x" .
     }
     then have "f ?a \<le> ?a" by (rule Inf_greatest)
@@ -61,13 +61,13 @@
       also presume "\<dots> \<le> f ?a"
       finally (order_antisym) show ?thesis .
     }
-    from `mono f` and `f ?a \<le> ?a` have "f (f ?a) \<le> f ?a" ..
+    from \<open>mono f\<close> and \<open>f ?a \<le> ?a\<close> have "f (f ?a) \<le> f ?a" ..
     then have "f ?a \<in> ?H" ..
     then show "?a \<le> f ?a" by (rule Inf_lower)
   qed
 qed
 
-text {* Above we have used several advanced Isar language elements,
+text \<open>Above we have used several advanced Isar language elements,
   such as explicit block structure and weak assumptions.  Thus we have
   mimicked the particular way of reasoning of the original text.
 
@@ -75,7 +75,7 @@
   achieve structured top-down decomposition of the problem at the
   outer level, while only the inner steps of reasoning are done in a
   forward manner.  We are certainly more at ease here, requiring only
-  the most basic features of the Isar language. *}
+  the most basic features of the Isar language.\<close>
 
 theorem Knaster_Tarski':
   fixes f :: "'a::complete_lattice \<Rightarrow> 'a"
@@ -91,13 +91,13 @@
       fix x
       assume "x \<in> ?H"
       then have "?a \<le> x" by (rule Inf_lower)
-      with `mono f` have "f ?a \<le> f x" ..
-      also from `x \<in> ?H` have "\<dots> \<le> x" ..
+      with \<open>mono f\<close> have "f ?a \<le> f x" ..
+      also from \<open>x \<in> ?H\<close> have "\<dots> \<le> x" ..
       finally show "f ?a \<le> x" .
     qed
     show "?a \<le> f ?a"
     proof (rule Inf_lower)
-      from `mono f` and `f ?a \<le> ?a` have "f (f ?a) \<le> f ?a" ..
+      from \<open>mono f\<close> and \<open>f ?a \<le> ?a\<close> have "f (f ?a) \<le> f ?a" ..
       then show "f ?a \<in> ?H" ..
     qed
   qed
--- a/src/HOL/Isar_Examples/Mutilated_Checkerboard.thy	Tue Oct 07 14:02:24 2014 +0200
+++ b/src/HOL/Isar_Examples/Mutilated_Checkerboard.thy	Tue Oct 07 21:01:31 2014 +0200
@@ -3,16 +3,16 @@
     Author:     Lawrence C Paulson, Cambridge University Computer Laboratory (original scripts)
 *)
 
-header {* The Mutilated Checker Board Problem *}
+header \<open>The Mutilated Checker Board Problem\<close>
 
 theory Mutilated_Checkerboard
 imports Main
 begin
 
-text {* The Mutilated Checker Board Problem, formalized inductively.
-  See \cite{paulson-mutilated-board} for the original tactic script version. *}
+text \<open>The Mutilated Checker Board Problem, formalized inductively.
+  See @{cite "paulson-mutilated-board"} for the original tactic script version.\<close>
 
-subsection {* Tilings *}
+subsection \<open>Tilings\<close>
 
 inductive_set tiling :: "'a set set \<Rightarrow> 'a set set"
   for A :: "'a set set"
@@ -21,7 +21,7 @@
 | Un: "a \<in> A \<Longrightarrow> t \<in> tiling A \<Longrightarrow> a \<subseteq> - t \<Longrightarrow> a \<union> t \<in> tiling A"
 
 
-text "The union of two disjoint tilings is a tiling."
+text \<open>The union of two disjoint tilings is a tiling.\<close>
 
 lemma tiling_Un:
   assumes "t \<in> tiling A"
@@ -30,21 +30,21 @@
   shows "t \<union> u \<in> tiling A"
 proof -
   let ?T = "tiling A"
-  from `t \<in> ?T` and `t \<inter> u = {}`
+  from \<open>t \<in> ?T\<close> and \<open>t \<inter> u = {}\<close>
   show "t \<union> u \<in> ?T"
   proof (induct t)
     case empty
-    with `u \<in> ?T` show "{} \<union> u \<in> ?T" by simp
+    with \<open>u \<in> ?T\<close> show "{} \<union> u \<in> ?T" by simp
   next
     case (Un a t)
     show "(a \<union> t) \<union> u \<in> ?T"
     proof -
       have "a \<union> (t \<union> u) \<in> ?T"
-        using `a \<in> A`
+        using \<open>a \<in> A\<close>
       proof (rule tiling.Un)
-        from `(a \<union> t) \<inter> u = {}` have "t \<inter> u = {}" by blast
+        from \<open>(a \<union> t) \<inter> u = {}\<close> have "t \<inter> u = {}" by blast
         then show "t \<union> u \<in> ?T" by (rule Un)
-        from `a \<subseteq> - t` and `(a \<union> t) \<inter> u = {}`
+        from \<open>a \<subseteq> - t\<close> and \<open>(a \<union> t) \<inter> u = {}\<close>
         show "a \<subseteq> - (t \<union> u)" by blast
       qed
       also have "a \<union> (t \<union> u) = (a \<union> t) \<union> u"
@@ -55,7 +55,7 @@
 qed
 
 
-subsection {* Basic properties of ``below'' *}
+subsection \<open>Basic properties of ``below''\<close>
 
 definition below :: "nat \<Rightarrow> nat set"
   where "below n = {i. i < n}"
@@ -77,7 +77,7 @@
 lemmas Sigma_Suc = Sigma_Suc1 Sigma_Suc2
 
 
-subsection {* Basic properties of ``evnodd'' *}
+subsection \<open>Basic properties of ``evnodd''\<close>
 
 definition evnodd :: "(nat \<times> nat) set \<Rightarrow> nat \<Rightarrow> (nat \<times> nat) set"
   where "evnodd A b = A \<inter> {(i, j). (i + j) mod 2 = b}"
@@ -109,7 +109,7 @@
   by (simp add: evnodd_def)
 
 
-subsection {* Dominoes *}
+subsection \<open>Dominoes\<close>
 
 inductive_set domino :: "(nat \<times> nat) set set"
 where
@@ -164,7 +164,7 @@
   shows "\<exists>i j. evnodd d b = {(i, j)}"  (is "?P d")
   using assms
 proof induct
-  from `b < 2` have b_cases: "b = 0 \<or> b = 1" by arith
+  from \<open>b < 2\<close> have b_cases: "b = 0 \<or> b = 1" by arith
   fix i j
   note [simp] = evnodd_empty evnodd_insert mod_Suc
   from b_cases show "?P {(i, j), (i, j + 1)}" by rule auto
@@ -182,7 +182,7 @@
 qed
 
 
-subsection {* Tilings of dominoes *}
+subsection \<open>Tilings of dominoes\<close>
 
 lemma tiling_domino_finite:
   assumes t: "t \<in> tiling domino"  (is "t \<in> ?T")
@@ -193,7 +193,7 @@
   fix a t assume "?F t"
   assume "a \<in> domino"
   then have "?F a" by (rule domino_finite)
-  from this and `?F t` show "?F (a \<union> t)" by (rule finite_UnI)
+  from this and \<open>?F t\<close> show "?F (a \<union> t)" by (rule finite_UnI)
 qed
 
 lemma tiling_domino_01:
@@ -206,8 +206,8 @@
 next
   case (Un a t)
   let ?e = evnodd
-  note hyp = `card (?e t 0) = card (?e t 1)`
-    and at = `a \<subseteq> - t`
+  note hyp = \<open>card (?e t 0) = card (?e t 1)\<close>
+    and at = \<open>a \<subseteq> - t\<close>
   have card_suc:
     "\<And>b. b < 2 \<Longrightarrow> card (?e (a \<union> t) b) = Suc (card (?e t b))"
   proof -
@@ -216,14 +216,14 @@
     have "?e (a \<union> t) b = ?e a b \<union> ?e t b" by (rule evnodd_Un)
     also obtain i j where e: "?e a b = {(i, j)}"
     proof -
-      from `a \<in> domino` and `b < 2`
+      from \<open>a \<in> domino\<close> and \<open>b < 2\<close>
       have "\<exists>i j. ?e a b = {(i, j)}" by (rule domino_singleton)
       then show ?thesis by (blast intro: that)
     qed
     also have "\<dots> \<union> ?e t b = insert (i, j) (?e t b)" by simp
     also have "card \<dots> = Suc (card (?e t b))"
     proof (rule card_insert_disjoint)
-      from `t \<in> tiling domino` have "finite t"
+      from \<open>t \<in> tiling domino\<close> have "finite t"
         by (rule tiling_domino_finite)
       then show "finite (?e t b)"
         by (rule evnodd_finite)
@@ -240,7 +240,7 @@
 qed
 
 
-subsection {* Main theorem *}
+subsection \<open>Main theorem\<close>
 
 definition mutilated_board :: "nat \<Rightarrow> nat \<Rightarrow> (nat \<times> nat) set"
   where
--- a/src/HOL/Isar_Examples/Nested_Datatype.thy	Tue Oct 07 14:02:24 2014 +0200
+++ b/src/HOL/Isar_Examples/Nested_Datatype.thy	Tue Oct 07 21:01:31 2014 +0200
@@ -1,10 +1,10 @@
-header {* Nested datatypes *}
+header \<open>Nested datatypes\<close>
 
 theory Nested_Datatype
 imports Main
 begin
 
-subsection {* Terms and substitution *}
+subsection \<open>Terms and substitution\<close>
 
 datatype ('a, 'b) "term" =
   Var 'a
@@ -20,7 +20,7 @@
 
 lemmas subst_simps = subst_term.simps subst_term_list.simps
 
-text {* \medskip A simple lemma about composition of substitutions. *}
+text \<open>\medskip A simple lemma about composition of substitutions.\<close>
 
 lemma
   "subst_term (subst_term f1 \<circ> f2) t =
@@ -52,7 +52,7 @@
 qed
 
 
-subsection {* Alternative induction *}
+subsection \<open>Alternative induction\<close>
 
 lemma "subst_term (subst_term f1 \<circ> f2) t = subst_term f1 (subst_term f2 t)"
 proof (induct t rule: term.induct)
--- a/src/HOL/Isar_Examples/Peirce.thy	Tue Oct 07 14:02:24 2014 +0200
+++ b/src/HOL/Isar_Examples/Peirce.thy	Tue Oct 07 21:01:31 2014 +0200
@@ -2,13 +2,13 @@
     Author:     Markus Wenzel, TU Muenchen
 *)
 
-header {* Peirce's Law *}
+header \<open>Peirce's Law\<close>
 
 theory Peirce
 imports Main
 begin
 
-text {* We consider Peirce's Law: $((A \impl B) \impl A) \impl A$.
+text \<open>We consider Peirce's Law: $((A \impl B) \impl A) \impl A$.
   This is an inherently non-intuitionistic statement, so its proof
   will certainly involve some form of classical contradiction.
 
@@ -17,7 +17,7 @@
   the negated goal may be introduced as additional assumption.  This
   eventually leads to a contradiction.\footnote{The rule involved
   there is negation elimination; it holds in intuitionistic logic as
-  well.} *}
+  well.}\<close>
 
 theorem "((A \<longrightarrow> B) \<longrightarrow> A) \<longrightarrow> A"
 proof
@@ -28,13 +28,13 @@
     have "A \<longrightarrow> B"
     proof
       assume A
-      with `\<not> A` show B by contradiction
+      with \<open>\<not> A\<close> show B by contradiction
     qed
-    with `(A \<longrightarrow> B) \<longrightarrow> A` show A ..
+    with \<open>(A \<longrightarrow> B) \<longrightarrow> A\<close> show A ..
   qed
 qed
 
-text {* In the subsequent version the reasoning is rearranged by means
+text \<open>In the subsequent version the reasoning is rearranged by means
   of ``weak assumptions'' (as introduced by \isacommand{presume}).
   Before assuming the negated goal $\neg A$, its intended consequence
   $A \impl B$ is put into place in order to solve the main problem.
@@ -46,7 +46,7 @@
   \isacommand{show} in the context of weak assumptions then the latter
   give rise to new subgoals, which may be established separately.  In
   contrast, strong assumptions (as introduced by \isacommand{assume})
-  are solved immediately. *}
+  are solved immediately.\<close>
 
 theorem "((A \<longrightarrow> B) \<longrightarrow> A) \<longrightarrow> A"
 proof
@@ -54,18 +54,18 @@
   show A
   proof (rule classical)
     presume "A \<longrightarrow> B"
-    with `(A \<longrightarrow> B) \<longrightarrow> A` show A ..
+    with \<open>(A \<longrightarrow> B) \<longrightarrow> A\<close> show A ..
   next
     assume "\<not> A"
     show "A \<longrightarrow> B"
     proof
       assume A
-      with `\<not> A` show B by contradiction
+      with \<open>\<not> A\<close> show B by contradiction
     qed
   qed
 qed
 
-text {* Note that the goals stemming from weak assumptions may be even
+text \<open>Note that the goals stemming from weak assumptions may be even
   left until qed time, where they get eventually solved ``by
   assumption'' as well.  In that case there is really no fundamental
   difference between the two kinds of assumptions, apart from the
@@ -80,6 +80,6 @@
   several goals that only differ in there local contexts.  With strong
   assumptions these may be still solved in any order in a predictable
   way, while weak ones would quickly lead to great confusion,
-  eventually demanding even some backtracking. *}
+  eventually demanding even some backtracking.\<close>
 
 end
--- a/src/HOL/Isar_Examples/Puzzle.thy	Tue Oct 07 14:02:24 2014 +0200
+++ b/src/HOL/Isar_Examples/Puzzle.thy	Tue Oct 07 21:01:31 2014 +0200
@@ -1,16 +1,16 @@
-header {* An old chestnut *}
+header \<open>An old chestnut\<close>
 
 theory Puzzle
 imports Main
 begin
 
-text_raw {* \footnote{A question from ``Bundeswettbewerb Mathematik''.
+text_raw \<open>\footnote{A question from ``Bundeswettbewerb Mathematik''.
   Original pen-and-paper proof due to Herbert Ehler; Isabelle tactic
-  script by Tobias Nipkow.} *}
+  script by Tobias Nipkow.}\<close>
 
-text {* \textbf{Problem.}  Given some function $f\colon \Nat \to \Nat$
+text \<open>\textbf{Problem.}  Given some function $f\colon \Nat \to \Nat$
   such that $f \ap (f \ap n) < f \ap (\idt{Suc} \ap n)$ for all $n$.
-  Demonstrate that $f$ is the identity. *}
+  Demonstrate that $f$ is the identity.\<close>
 
 theorem
   assumes f_ax: "\<And>n. f (f n) < f (Suc n)"
@@ -28,7 +28,7 @@
         also from f_ax have "\<dots> < f n" by (simp only: Suc)
         finally have "f m < f n" .
         with less have "m \<le> f m" .
-        also note `\<dots> < f n`
+        also note \<open>\<dots> < f n\<close>
         finally have "m < f n" .
         then have "n \<le> f n" by (simp only: Suc)
         then show ?thesis .
--- a/src/HOL/Isar_Examples/Summation.thy	Tue Oct 07 14:02:24 2014 +0200
+++ b/src/HOL/Isar_Examples/Summation.thy	Tue Oct 07 21:01:31 2014 +0200
@@ -2,23 +2,23 @@
     Author:     Markus Wenzel
 *)
 
-header {* Summing natural numbers *}
+header \<open>Summing natural numbers\<close>
 
 theory Summation
 imports Main
 begin
 
-text {* Subsequently, we prove some summation laws of natural numbers
+text \<open>Subsequently, we prove some summation laws of natural numbers
   (including odds, squares, and cubes).  These examples demonstrate
   how plain natural deduction (including induction) may be combined
-  with calculational proof. *}
+  with calculational proof.\<close>
 
 
-subsection {* Summation laws *}
+subsection \<open>Summation laws\<close>
 
-text {* The sum of natural numbers $0 + \cdots + n$ equals $n \times
+text \<open>The sum of natural numbers $0 + \cdots + n$ equals $n \times
   (n + 1)/2$.  Avoiding formal reasoning about division we prove this
-  equation multiplied by $2$. *}
+  equation multiplied by $2$.\<close>
 
 theorem sum_of_naturals:
   "2 * (\<Sum>i::nat=0..n. i) = n * (n + 1)"
@@ -35,7 +35,7 @@
     by simp
 qed
 
-text {* The above proof is a typical instance of mathematical
+text \<open>The above proof is a typical instance of mathematical
   induction.  The main statement is viewed as some $\var{P} \ap n$
   that is split by the induction method into base case $\var{P} \ap
   0$, and step case $\var{P} \ap n \Impl \var{P} \ap (\idt{Suc} \ap
@@ -71,11 +71,11 @@
   $x:A$ instead.
 
   \end{enumerate}
-*}
+\<close>
 
-text {* \medskip We derive further summation laws for odds, squares,
+text \<open>\medskip We derive further summation laws for odds, squares,
   and cubes as follows.  The basic technique of induction plus
-  calculation is the same as before. *}
+  calculation is the same as before.\<close>
 
 theorem sum_of_odds:
   "(\<Sum>i::nat=0..<n. 2 * i + 1) = n^Suc (Suc 0)"
@@ -93,9 +93,9 @@
     by simp
 qed
 
-text {* Subsequently we require some additional tweaking of Isabelle
+text \<open>Subsequently we require some additional tweaking of Isabelle
   built-in arithmetic simplifications, such as bringing in
-  distributivity by hand. *}
+  distributivity by hand.\<close>
 
 lemmas distrib = add_mult_distrib add_mult_distrib2
 
@@ -132,7 +132,7 @@
     by simp
 qed
 
-text {* Note that in contrast to older traditions of tactical proof
+text \<open>Note that in contrast to older traditions of tactical proof
   scripts, the structured proof applies induction on the original,
   unsimplified statement.  This allows to state the induction cases
   robustly and conveniently.  Simplification (or other automated)
@@ -143,6 +143,6 @@
   $\idt{simp}$ or $\idt{auto}$ should normally be never used as
   initial proof methods with a nested sub-proof to address the
   automatically produced situation, but only as terminal ones to solve
-  sub-problems.  *}
+  sub-problems.\<close>
 
 end
--- a/src/HOL/Unix/Nested_Environment.thy	Tue Oct 07 14:02:24 2014 +0200
+++ b/src/HOL/Unix/Nested_Environment.thy	Tue Oct 07 21:01:31 2014 +0200
@@ -2,13 +2,13 @@
     Author:     Markus Wenzel, TU Muenchen
 *)
 
-header {* Nested environments *}
+header \<open>Nested environments\<close>
 
 theory Nested_Environment
 imports Main
 begin
 
-text {*
+text \<open>
   Consider a partial function @{term [source] "e :: 'a \<Rightarrow> 'b option"};
   this may be understood as an \emph{environment} mapping indexes
   @{typ 'a} to optional entry values @{typ 'b} (cf.\ the basic theory
@@ -17,31 +17,31 @@
   basic values or again proper environments.  Then each entry is
   accessed by a \emph{path}, i.e.\ a list of indexes leading to its
   position within the structure.
-*}
+\<close>
 
 datatype (dead 'a, dead 'b, dead 'c) env =
     Val 'a
   | Env 'b  "'c \<Rightarrow> ('a, 'b, 'c) env option"
 
-text {*
+text \<open>
   \medskip In the type @{typ "('a, 'b, 'c) env"} the parameter @{typ
   'a} refers to basic values (occurring in terminal positions), type
   @{typ 'b} to values associated with proper (inner) environments, and
   type @{typ 'c} with the index type for branching.  Note that there
   is no restriction on any of these types.  In particular, arbitrary
   branching may yield rather large (transfinite) tree structures.
-*}
+\<close>
 
 
-subsection {* The lookup operation *}
+subsection \<open>The lookup operation\<close>
 
-text {*
+text \<open>
   Lookup in nested environments works by following a given path of
   index elements, leading to an optional result (a terminal value or
   nested environment).  A \emph{defined position} within a nested
   environment is one where @{term lookup} at its path does not yield
   @{term None}.
-*}
+\<close>
 
 primrec lookup :: "('a, 'b, 'c) env \<Rightarrow> 'c list \<Rightarrow> ('a, 'b, 'c) env option"
   and lookup_option :: "('a, 'b, 'c) env option \<Rightarrow> 'c list \<Rightarrow> ('a, 'b, 'c) env option"
@@ -56,10 +56,10 @@
 
 hide_const lookup_option
 
-text {*
+text \<open>
   \medskip The characteristic cases of @{term lookup} are expressed by
   the following equalities.
-*}
+\<close>
 
 theorem lookup_nil: "lookup e [] = Some e"
   by (cases e) simp_all
@@ -90,12 +90,12 @@
           | Some e \<Rightarrow> lookup e xs)))"
   by (simp split: list.split env.split)
 
-text {*
+text \<open>
   \medskip Displaced @{term lookup} operations, relative to a certain
   base path prefix, may be reduced as follows.  There are two cases,
   depending whether the environment actually extends far enough to
   follow the base path.
-*}
+\<close>
 
 theorem lookup_append_none:
   assumes "lookup env xs = None"
@@ -119,7 +119,7 @@
       with Env show ?thesis by simp
     next
       case (Some e)
-      note es = `es x = Some e`
+      note es = \<open>es x = Some e\<close>
       show ?thesis
       proof (cases "lookup e xs")
         case None
@@ -144,7 +144,7 @@
   then show "lookup env ([] @ ys) = lookup e ys" by simp
 next
   case (Cons x xs)
-  note asm = `lookup env (x # xs) = Some e`
+  note asm = \<open>lookup env (x # xs) = Some e\<close>
   show "lookup env ((x # xs) @ ys) = lookup e ys"
   proof (cases env)
     case (Val a)
@@ -159,7 +159,7 @@
       then show ?thesis ..
     next
       case (Some e')
-      note es = `es x = Some e'`
+      note es = \<open>es x = Some e'\<close>
       show ?thesis
       proof (cases "lookup e' xs")
         case None
@@ -176,12 +176,12 @@
   qed
 qed
 
-text {*
+text \<open>
   \medskip Successful @{term lookup} deeper down an environment
   structure means we are able to peek further up as well.  Note that
   this is basically just the contrapositive statement of @{thm
   [source] lookup_append_none} above.
-*}
+\<close>
 
 theorem lookup_some_append:
   assumes "lookup env (xs @ ys) = Some e"
@@ -193,11 +193,11 @@
   then show ?thesis by (simp)
 qed
 
-text {*
+text \<open>
   The subsequent statement describes in more detail how a successful
   @{term lookup} with a non-empty path results in a certain situation
   at any upper position.
-*}
+\<close>
 
 theorem lookup_some_upper:
   assumes "lookup env (xs @ y # ys) = Some e"
@@ -236,14 +236,14 @@
 qed
 
 
-subsection {* The update operation *}
+subsection \<open>The update operation\<close>
 
-text {*
+text \<open>
   Update at a certain position in a nested environment may either
   delete an existing entry, or overwrite an existing one.  Note that
   update at undefined positions is simple absorbed, i.e.\ the
   environment is left unchanged.
-*}
+\<close>
 
 primrec update :: "'c list \<Rightarrow> ('a, 'b, 'c) env option \<Rightarrow>
     ('a, 'b, 'c) env \<Rightarrow> ('a, 'b, 'c) env"
@@ -264,10 +264,10 @@
 
 hide_const update_option
 
-text {*
+text \<open>
   \medskip The characteristic cases of @{term update} are expressed by
   the following equalities.
-*}
+\<close>
 
 theorem update_nil_none: "update [] None env = env"
   by (cases env) simp_all
@@ -314,11 +314,11 @@
                   | Some e \<Rightarrow> Some (update (y # ys) opt e)))))))"
   by (simp split: list.split env.split option.split)
 
-text {*
+text \<open>
   \medskip The most basic correspondence of @{term lookup} and @{term
   update} states that after @{term update} at a defined position,
   subsequent @{term lookup} operations would yield the new value.
-*}
+\<close>
 
 theorem lookup_update_some:
   assumes "lookup env xs = Some e"
@@ -331,7 +331,7 @@
 next
   case (Cons x xs)
   note hyp = Cons.hyps
-    and asm = `lookup env (x # xs) = Some e`
+    and asm = \<open>lookup env (x # xs) = Some e\<close>
   show ?case
   proof (cases env)
     case (Val a)
@@ -346,7 +346,7 @@
       then show ?thesis ..
     next
       case (Some e')
-      note es = `es x = Some e'`
+      note es = \<open>es x = Some e'\<close>
       show ?thesis
       proof (cases xs)
         case Nil
@@ -361,13 +361,13 @@
   qed
 qed
 
-text {*
+text \<open>
   \medskip The properties of displaced @{term update} operations are
   analogous to those of @{term lookup} above.  There are two cases:
   below an undefined position @{term update} is absorbed altogether,
   and below a defined positions @{term update} affects subsequent
   @{term lookup} operations in the obvious way.
-*}
+\<close>
 
 theorem update_append_none:
   assumes "lookup env xs = None"
@@ -380,7 +380,7 @@
 next
   case (Cons x xs)
   note hyp = Cons.hyps
-    and asm = `lookup env (x # xs) = None`
+    and asm = \<open>lookup env (x # xs) = None\<close>
   show "update ((x # xs) @ y # ys) opt env = env"
   proof (cases env)
     case (Val a)
@@ -390,12 +390,12 @@
     show ?thesis
     proof (cases "es x")
       case None
-      note es = `es x = None`
+      note es = \<open>es x = None\<close>
       show ?thesis
         by (cases xs) (simp_all add: es Env fun_upd_idem_iff)
     next
       case (Some e)
-      note es = `es x = Some e`
+      note es = \<open>es x = Some e\<close>
       show ?thesis
       proof (cases xs)
         case Nil
@@ -423,7 +423,7 @@
 next
   case (Cons x xs)
   note hyp = Cons.hyps
-    and asm = `lookup env (x # xs) = Some e`
+    and asm = \<open>lookup env (x # xs) = Some e\<close>
   show "lookup (update ((x # xs) @ y # ys) opt env) (x # xs) =
       Some (update (y # ys) opt e)"
   proof (cases env)
@@ -439,7 +439,7 @@
       then show ?thesis ..
     next
       case (Some e')
-      note es = `es x = Some e'`
+      note es = \<open>es x = Some e'\<close>
       show ?thesis
       proof (cases xs)
         case Nil
@@ -456,12 +456,12 @@
   qed
 qed
 
-text {*
+text \<open>
   \medskip Apparently, @{term update} does not affect the result of
   subsequent @{term lookup} operations at independent positions, i.e.\
   in case that the paths for @{term update} and @{term lookup} fork at
   a certain point.
-*}
+\<close>
 
 theorem lookup_update_other:
   assumes neq: "y \<noteq> (z::'c)"
@@ -519,7 +519,7 @@
 qed
 
 
-subsection {* Code generation *}
+subsection \<open>Code generation\<close>
 
 lemma [code, code del]:
   "(HOL.equal :: (_, _, _) env \<Rightarrow> _) = HOL.equal" ..
@@ -549,7 +549,7 @@
     show ?lhs
     proof
       fix z
-      from `?rhs` have "?prop z" ..
+      from \<open>?rhs\<close> have "?prop z" ..
       then show "f z = g z" by (auto split: option.splits)
     qed
   qed
--- a/src/HOL/Unix/Unix.thy	Tue Oct 07 14:02:24 2014 +0200
+++ b/src/HOL/Unix/Unix.thy	Tue Oct 07 21:01:31 2014 +0200
@@ -2,7 +2,7 @@
     Author:     Markus Wenzel, TU Muenchen
 *)
 
-header {* Unix file-systems \label{sec:unix-file-system} *}
+header \<open>Unix file-systems \label{sec:unix-file-system}\<close>
 
 theory Unix
 imports
@@ -10,7 +10,7 @@
   "~~/src/HOL/Library/Sublist"
 begin
 
-text {*
+text \<open>
   We give a simple mathematical model of the basic structures
   underlying the Unix file-system, together with a few fundamental
   operations that could be imagined to be performed internally by the
@@ -34,26 +34,26 @@
   file-systems to be discussed here.  First of all, we ignore
   character and block special files, pipes, sockets, hard links,
   symbolic links, and mount points.
-*}
+\<close>
 
 
-subsection {* Names *}
+subsection \<open>Names\<close>
 
-text {*
+text \<open>
   User ids and file name components shall be represented by natural
   numbers (without loss of generality).  We do not bother about
   encoding of actual names (e.g.\ strings), nor a mapping between user
   names and user ids as would be present in a reality.
-*}
+\<close>
 
 type_synonym uid = nat
 type_synonym name = nat
 type_synonym path = "name list"
 
 
-subsection {* Attributes *}
+subsection \<open>Attributes\<close>
 
-text {*
+text \<open>
   Unix file attributes mainly consist of \emph{owner} information and
   a number of \emph{permission} bits which control access for
   ``user'', ``group'', and ``others'' (see the Unix man pages @{text
@@ -70,8 +70,8 @@
   of multiple groups and group membership, but pretend that everyone
   is member of a single global group.\footnote{A general HOL model of
   user group structures and related issues is given in
-  \cite{Naraschewski:2001}.}
-*}
+  @{cite "Naraschewski:2001"}.}
+\<close>
 
 datatype perm =
     Readable
@@ -84,7 +84,7 @@
   owner :: uid
   others :: perms
 
-text {*
+text \<open>
   For plain files @{term Readable} and @{term Writable} specify read
   and write access to the actual content, i.e.\ the string of text
   stored here.  For directories @{term Readable} determines if the set
@@ -101,16 +101,16 @@
   directories encountered on the path would have to grant @{term
   Executable}.  We ignore this detail and pretend that all directories
   give @{term Executable} permission to anybody.
-*}
+\<close>
 
 
-subsection {* Files *}
+subsection \<open>Files\<close>
 
-text {*
+text \<open>
   In order to model the general tree structure of a Unix file-system
   we use the arbitrarily branching datatype @{typ "('a, 'b, 'c) env"}
   from the standard library of Isabelle/HOL
-  \cite{Bauer-et-al:2002:HOL-Library}.  This type provides
+  @{cite "Bauer-et-al:2002:HOL-Library"}.  This type provides
   constructors @{term Val} and @{term Env} as follows:
 
   \medskip
@@ -128,11 +128,11 @@
   "att \<times> string"} (representing plain files), @{typ att} (for
   attributes of directory nodes), and @{typ name} (for the index type
   of directory nodes).
-*}
+\<close>
 
 type_synonym "file" = "(att \<times> string, att, name) env"
 
-text {*
+text \<open>
   \medskip The HOL library also provides @{term lookup} and @{term
   update} operations for general tree structures with the subsequent
   primitive recursive characterizations.
@@ -149,15 +149,15 @@
   @{thm [display, indent = 2, eta_contract = false] update_eq [no_vars]}
 
   Several further properties of these operations are proven in
-  \cite{Bauer-et-al:2002:HOL-Library}.  These will be routinely used
+  @{cite "Bauer-et-al:2002:HOL-Library"}.  These will be routinely used
   later on without further notice.
 
   \bigskip Apparently, the elements of type @{typ "file"} contain an
   @{typ att} component in either case.  We now define a few auxiliary
   operations to manipulate this field uniformly, following the
   conventions for record types in Isabelle/HOL
-  \cite{Nipkow-et-al:2000:HOL}.
-*}
+  @{cite "Nipkow-et-al:2000:HOL"}.
+\<close>
 
 definition
   "attributes file =
@@ -188,16 +188,16 @@
   by (simp add: map_attributes_def)
 
 
-subsection {* Initial file-systems *}
+subsection \<open>Initial file-systems\<close>
 
-text {*
+text \<open>
   Given a set of \emph{known users} a file-system shall be initialized
   by providing an empty home directory for each user, with read-only
   access for everyone else.  (Note that we may directly use the user
   id as home directory name, since both types have been identified.)
   Certainly, the very root directory is owned by the super user (who
   has user id 0).
-*}
+\<close>
 
 definition
   "init users =
@@ -206,9 +206,9 @@
         else None)"
 
 
-subsection {* Accessing file-systems *}
+subsection \<open>Accessing file-systems\<close>
 
-text {*
+text \<open>
   The main internal file-system operation is access of a file by a
   user, requesting a certain set of permissions.  The resulting @{typ
   "file option"} indicates if a file had been present at the
@@ -216,9 +216,9 @@
   permissions recorded within the file-system.
 
   Note that by the rules of Unix file-system security (e.g.\
-  \cite{Tanenbaum:1992}) both the super-user and owner may always
+  @{cite "Tanenbaum:1992"}) both the super-user and owner may always
   access a file unconditionally (in our simplified model).
-*}
+\<close>
 
 definition
   "access root path uid perms =
@@ -231,7 +231,7 @@
         then Some file
         else None)"
 
-text {*
+text \<open>
   \medskip Successful access to a certain file is the main
   prerequisite for system-calls to be applicable (cf.\
   \secref{sec:unix-trans}).  Any modification of the file-system is
@@ -241,7 +241,7 @@
   @{term lookup} function, with additional checking of
   attributes. Subsequently we establish a few auxiliary facts that
   stem from the primitive @{term lookup} used within @{term access}.
-*}
+\<close>
 
 lemma access_empty_lookup: "access root path uid {} = lookup root path"
   by (simp add: access_def split: option.splits)
@@ -264,13 +264,13 @@
 qed
 
 
-section {* File-system transitions \label{sec:unix-trans} *}
+section \<open>File-system transitions \label{sec:unix-trans}\<close>
 
-subsection {* Unix system calls \label{sec:unix-syscall} *}
+subsection \<open>Unix system calls \label{sec:unix-syscall}\<close>
 
-text {*
+text \<open>
   According to established operating system design (cf.\
-  \cite{Tanenbaum:1992}) user space processes may only initiate system
+  @{cite "Tanenbaum:1992"}) user space processes may only initiate system
   operations by a fixed set of \emph{system-calls}.  This enables the
   kernel to enforce certain security policies in the first
   place.\footnote{Incidently, this is the very same principle employed
@@ -282,7 +282,7 @@
   operation} for the syntax of system-calls, together with an
   inductive definition of file-system state transitions of the form
   @{text "root \<midarrow>x\<rightarrow> root'"} for the operational semantics.
-*}
+\<close>
 
 datatype operation =
     Read uid string path
@@ -294,13 +294,13 @@
   | Rmdir uid path
   | Readdir uid "name set" path
 
-text {*
+text \<open>
   The @{typ uid} field of an operation corresponds to the
   \emph{effective user id} of the underlying process, although our
   model never mentions processes explicitly.  The other parameters are
   provided as arguments by the caller; the @{term path} one is common
   to all kinds of system-calls.
-*}
+\<close>
 
 primrec uid_of :: "operation \<Rightarrow> uid"
 where
@@ -324,7 +324,7 @@
   | "path_of (Rmdir uid path) = path"
   | "path_of (Readdir uid names path) = path"
 
-text {*
+text \<open>
   \medskip Note that we have omitted explicit @{text Open} and @{text
   Close} operations, pretending that @{term Read} and @{term Write}
   would already take care of this behind the scenes.  Thus we have
@@ -345,7 +345,7 @@
   via transitions of the file-system configuration.  This is expressed
   as an inductive relation (although there is no actual recursion
   involved here).
-*}
+\<close>
 
 inductive transition :: "file \<Rightarrow> operation \<Rightarrow> file \<Rightarrow> bool"
   ("_ \<midarrow>_\<rightarrow> _" [90, 1000, 90] 100)
@@ -392,7 +392,7 @@
       names = dom dir \<Longrightarrow>
       root \<midarrow>(Readdir uid names path)\<rightarrow> root"
 
-text {*
+text \<open>
   \medskip Certainly, the above specification is central to the whole
   formal development.  Any of the results to be established later on
   are only meaningful to the outside world if this transition system
@@ -403,16 +403,16 @@
   If in doubt, one may consider to compare our definition with the
   informal specifications given the corresponding Unix man pages, or
   even peek at an actual implementation such as
-  \cite{Torvalds-et-al:Linux}.  Another common way to gain confidence
+  @{cite "Torvalds-et-al:Linux"}.  Another common way to gain confidence
   into the formal model is to run simple simulations (see
   \secref{sec:unix-examples}), and check the results with that of
   experiments performed on a real Unix system.
-*}
+\<close>
 
 
-subsection {* Basic properties of single transitions \label{sec:unix-single-trans} *}
+subsection \<open>Basic properties of single transitions \label{sec:unix-single-trans}\<close>
 
-text {*
+text \<open>
   The transition system @{text "root \<midarrow>x\<rightarrow> root'"} defined above
   determines a unique result @{term root'} from given @{term root} and
   @{term x} (this is holds rather trivially, since there is even only
@@ -420,7 +420,7 @@
   simplify our subsequent development to some extent, since we only
   have to reason about a partial function rather than a general
   relation.
-*}
+\<close>
 
 theorem transition_uniq:
   assumes root': "root \<midarrow>x\<rightarrow> root'"
@@ -453,11 +453,11 @@
   with root' show ?thesis by cases blast+
 qed
 
-text {*
+text \<open>
   \medskip Apparently, file-system transitions are \emph{type-safe} in
   the sense that the result of transforming an actual directory yields
   again a directory.
-*}
+\<close>
 
 theorem transition_type_safe:
   assumes tr: "root \<midarrow>x\<rightarrow> root'"
@@ -476,25 +476,25 @@
     by (auto simp add: update_eq split: list.splits)
 qed
 
-text {*
+text \<open>
   The previous result may be seen as the most basic invariant on the
   file-system state that is enforced by any proper kernel
   implementation.  So user processes --- being bound to the
   system-call interface --- may never mess up a file-system such that
   the root becomes a plain file instead of a directory, which would be
   a strange situation indeed.
-*}
+\<close>
 
 
-subsection {* Iterated transitions *}
+subsection \<open>Iterated transitions\<close>
 
-text {*
+text \<open>
   Iterated system transitions via finite sequences of system
   operations are modeled inductively as follows.  In a sense, this
   relation describes the cumulative effect of the sequence of
   system-calls issued by a number of running processes over a finite
   amount of time.
-*}
+\<close>
 
 inductive transitions :: "file \<Rightarrow> operation list \<Rightarrow> file \<Rightarrow> bool"
   ("_ =_\<Rightarrow> _" [90, 1000, 90] 100)
@@ -502,11 +502,11 @@
     nil: "root =[]\<Rightarrow> root"
   | cons: "root \<midarrow>x\<rightarrow> root' \<Longrightarrow> root' =xs\<Rightarrow> root'' \<Longrightarrow> root =(x # xs)\<Rightarrow> root''"
 
-text {*
+text \<open>
   \medskip We establish a few basic facts relating iterated
   transitions with single ones, according to the recursive structure
   of lists.
-*}
+\<close>
 
 lemma transitions_nil_eq: "root =[]\<Rightarrow> root' = (root = root')"
 proof
@@ -529,12 +529,12 @@
     by (blast intro: transitions.cons)
 qed
 
-text {*
+text \<open>
   The next two rules show how to ``destruct'' known transition
   sequences.  Note that the second one actually relies on the
   uniqueness property of the basic transition system (see
   \secref{sec:unix-single-trans}).
-*}
+\<close>
 
 lemma transitions_nilD: "root =[]\<Rightarrow> root' \<Longrightarrow> root' = root"
   by (simp add: transitions_nil_eq)
@@ -550,12 +550,12 @@
   with root'' show "root' =xs\<Rightarrow> root''" by simp
 qed
 
-text {*
+text \<open>
   \medskip The following fact shows how an invariant @{term Q} of
   single transitions with property @{term P} may be transferred to
   iterated transitions.  The proof is rather obvious by rule induction
   over the definition of @{term "root =xs\<Rightarrow> root'"}.
-*}
+\<close>
 
 lemma transitions_invariant:
   assumes r: "\<And>r x r'. r \<midarrow>x\<rightarrow> r' \<Longrightarrow> Q r \<Longrightarrow> P x \<Longrightarrow> Q r'"
@@ -567,19 +567,19 @@
   show ?case by (rule nil.prems)
 next
   case (cons root x root' xs root'')
-  note P = `\<forall>x \<in> set (x # xs). P x`
+  note P = \<open>\<forall>x \<in> set (x # xs). P x\<close>
   then have "P x" by simp
-  with `root \<midarrow>x\<rightarrow> root'` and `Q root` have Q': "Q root'" by (rule r)
+  with \<open>root \<midarrow>x\<rightarrow> root'\<close> and \<open>Q root\<close> have Q': "Q root'" by (rule r)
   from P have "\<forall>x \<in> set xs. P x" by simp
   with Q' show "Q root''" by (rule cons.hyps)
 qed
 
-text {*
+text \<open>
   As an example of applying the previous result, we transfer the basic
   type-safety property (see \secref{sec:unix-single-trans}) from
   single transitions to iterated ones, which is a rather obvious
   result indeed.
-*}
+\<close>
 
 theorem transitions_type_safe:
   assumes "root =xs\<Rightarrow> root'"
@@ -591,9 +591,9 @@
 qed
 
 
-section {* Executable sequences *}
+section \<open>Executable sequences\<close>
 
-text {*
+text \<open>
   An inductively defined relation such as the one of @{text "root \<midarrow>x\<rightarrow>
   root'"} (see \secref{sec:unix-syscall}) has two main aspects.  First
   of all, the resulting system admits a certain set of transition
@@ -614,16 +614,16 @@
   executions; exhaustive reasoning will be employed only later on (see
   \secref{sec:unix-bogosity}), when we shall demonstrate that certain
   behavior is \emph{not} possible.
-*}
+\<close>
 
 
-subsection {* Possible transitions *}
+subsection \<open>Possible transitions\<close>
 
-text {*
+text \<open>
   Rather obviously, a list of system operations can be executed within
   a certain state if there is a result state reached by an iterated
   transition.
-*}
+\<close>
 
 definition "can_exec root xs = (\<exists>root'. root =xs\<Rightarrow> root')"
 
@@ -634,10 +634,10 @@
     "root \<midarrow>x\<rightarrow> root' \<Longrightarrow> can_exec root' xs \<Longrightarrow> can_exec root (x # xs)"
   unfolding can_exec_def by (blast intro: transitions.intros)
 
-text {*
+text \<open>
   \medskip In case that we already know that a certain sequence can be
   executed we may destruct it backwardly into individual transitions.
-*}
+\<close>
 
 lemma can_exec_snocD: "can_exec root (xs @ [y])
     \<Longrightarrow> \<exists>root' root''. root =xs\<Rightarrow> root' \<and> root' \<midarrow>y\<rightarrow> root''"
@@ -647,7 +647,7 @@
     by (simp add: can_exec_def transitions_nil_eq transitions_cons_eq)
 next
   case (Cons x xs)
-  from `can_exec root ((x # xs) @ [y])` obtain r root'' where
+  from \<open>can_exec root ((x # xs) @ [y])\<close> obtain r root'' where
       x: "root \<midarrow>x\<rightarrow> r" and
       xs_y: "r =(xs @ [y])\<Rightarrow> root''"
     by (auto simp add: can_exec_def transitions_nil_eq transitions_cons_eq)
@@ -659,35 +659,35 @@
 qed
 
 
-subsection {* Example executions \label{sec:unix-examples} *}
+subsection \<open>Example executions \label{sec:unix-examples}\<close>
 
-text {*
+text \<open>
   We are now ready to perform a few experiments within our formal
   model of Unix system-calls.  The common technique is to alternate
   introduction rules of the transition system (see
   \secref{sec:unix-trans}), and steps to solve any emerging side
   conditions using algebraic properties of the underlying file-system
   structures (see \secref{sec:unix-file-system}).
-*}
+\<close>
 
 lemmas eval = access_def init_def
 
 theorem "u \<in> users \<Longrightarrow> can_exec (init users)
     [Mkdir u perms [u, name]]"
   apply (rule can_exec_cons)
-    -- {* back-chain @{term can_exec} (of @{term [source] Cons}) *}
+    -- \<open>back-chain @{term can_exec} (of @{term [source] Cons})\<close>
   apply (rule mkdir)
-    -- {* back-chain @{term Mkdir} *}
+    -- \<open>back-chain @{term Mkdir}\<close>
   apply (force simp add: eval)+
-    -- {* solve preconditions of @{term Mkdir} *}
+    -- \<open>solve preconditions of @{term Mkdir}\<close>
   apply (simp add: eval)
-    -- {* peek at resulting dir (optional) *}
-  txt {* @{subgoals [display]} *}
+    -- \<open>peek at resulting dir (optional)\<close>
+  txt \<open>@{subgoals [display]}\<close>
   apply (rule can_exec_nil)
-    -- {* back-chain @{term can_exec} (of @{term [source] Nil}) *}
+    -- \<open>back-chain @{term can_exec} (of @{term [source] Nil})\<close>
   done
 
-text {*
+text \<open>
   By inspecting the result shown just before the final step above, we
   may gain confidence that our specification of Unix system-calls
   actually makes sense.  Further common errors are usually exhibited
@@ -695,7 +695,7 @@
 
   \medskip Here are a few further experiments, using the same
   techniques as before.
-*}
+\<close>
 
 theorem "u \<in> users \<Longrightarrow> can_exec (init users)
    [Creat u perms [u, name],
@@ -708,7 +708,7 @@
   apply (rule unlink)
   apply (force simp add: eval)+
   apply (simp add: eval)
-  txt {* peek at result: @{subgoals [display]} *}
+  txt \<open>peek at result: @{subgoals [display]}\<close>
   apply (rule can_exec_nil)
   done
 
@@ -722,7 +722,7 @@
     Readdir u {name\<^sub>3, name\<^sub>4} [u, name\<^sub>1, name\<^sub>2]]"
   apply (rule can_exec_cons, rule transition.intros,
     (force simp add: eval)+, (simp add: eval)?)+
-  txt {* peek at result: @{subgoals [display]} *}
+  txt \<open>peek at result: @{subgoals [display]}\<close>
   apply (rule can_exec_nil)
   done
 
@@ -735,28 +735,28 @@
     Read u ''foo'' [u, name\<^sub>1, name\<^sub>2, name\<^sub>3]]"
   apply (rule can_exec_cons, rule transition.intros,
     (force simp add: eval)+, (simp add: eval)?)+
-  txt {* peek at result: @{subgoals [display]} *}
+  txt \<open>peek at result: @{subgoals [display]}\<close>
   apply (rule can_exec_nil)
   done
 
 
-section {* Odd effects --- treated formally \label{sec:unix-bogosity} *}
+section \<open>Odd effects --- treated formally \label{sec:unix-bogosity}\<close>
 
-text {*
+text \<open>
   We are now ready to give a completely formal treatment of the
   slightly odd situation discussed in the introduction (see
   \secref{sec:unix-intro}): the file-system can easily reach a state
   where a user is unable to remove his very own directory, because it
   is still populated by items placed there by another user in an
   uncouth manner.
-*}
+\<close>
 
-subsection {* The general procedure \label{sec:unix-inv-procedure} *}
+subsection \<open>The general procedure \label{sec:unix-inv-procedure}\<close>
 
-text {*
+text \<open>
   The following theorem expresses the general procedure we are
   following to achieve the main result.
-*}
+\<close>
 
 theorem general_procedure:
   assumes cannot_y: "\<And>r r'. Q r \<Longrightarrow> r \<midarrow>y\<rightarrow> r' \<Longrightarrow> False"
@@ -780,7 +780,7 @@
   then show ?thesis by blast
 qed
 
-text {*
+text \<open>
   Here @{prop "P x"} refers to the restriction on file-system
   operations that are admitted after having reached the critical
   configuration; according to the problem specification this will
@@ -790,16 +790,16 @@
   @{term Q} is a suitable (auxiliary) invariant over the file-system;
   choosing @{term Q} adequately is very important to make the proof
   work (see \secref{sec:unix-inv-lemmas}).
-*}
+\<close>
 
 
-subsection {* The particular situation *}
+subsection \<open>The particular situation\<close>
 
-text {*
+text \<open>
   We introduce a few global declarations and axioms to describe our
   particular situation considered here.  Thus we avoid excessive use
   of local parameters in the subsequent development.
-*}
+\<close>
 
 locale situation =
   fixes users :: "uid set"
@@ -827,21 +827,21 @@
 definition
   "bogus_path = [user\<^sub>1, name\<^sub>1, name\<^sub>2]"
 
-text {*
+text \<open>
   The @{term bogus} operations are the ones that lead into the uncouth
   situation; @{term bogus_path} is the key position within the
   file-system where things go awry.
-*}
+\<close>
 
 
-subsection {* Invariance lemmas \label{sec:unix-inv-lemmas} *}
+subsection \<open>Invariance lemmas \label{sec:unix-inv-lemmas}\<close>
 
-text {*
+text \<open>
   The following invariant over the root file-system describes the
   bogus situation in an abstract manner: located at a certain @{term
   path} within the file-system is a non-empty directory that is
   neither owned nor writable by @{term user\<^sub>1}.
-*}
+\<close>
 
 definition
   "invariant root path =
@@ -850,7 +850,7 @@
       user\<^sub>1 \<noteq> owner att \<and>
       access root path user\<^sub>1 {Writable} = None)"
 
-text {*
+text \<open>
   Following the general procedure (see
   \secref{sec:unix-inv-procedure}) we will now establish the three key
   lemmas required to yield the final result.
@@ -880,7 +880,7 @@
 
   \medskip The first two lemmas are technically straight forward ---
   we just have to inspect rather special cases.
-*}
+\<close>
 
 lemma cannot_rmdir:
   assumes inv: "invariant root bogus_path"
@@ -913,13 +913,13 @@
     -- "check the invariant"
   done
 
-text {*
+text \<open>
   \medskip At last we are left with the main effort to show that the
   ``bogosity'' invariant is preserved by any file-system operation
   performed by @{term user\<^sub>1} alone.  Note that this holds for
   any @{term path} given, the particular @{term bogus_path} is not
   required here.
-*}
+\<close>
 
 lemma preserve_invariant:
   assumes tr: "root \<midarrow>x\<rightarrow> root'"
@@ -1056,13 +1056,13 @@
 qed
 
 
-subsection {* Putting it all together \label{sec:unix-main-result} *}
+subsection \<open>Putting it all together \label{sec:unix-main-result}\<close>
 
-text {*
+text \<open>
   The main result is now imminent, just by composing the three
   invariance lemmas (see \secref{sec:unix-inv-lemmas}) according the the
   overall procedure (see \secref{sec:unix-inv-procedure}).
-*}
+\<close>
 
 corollary
   assumes bogus: "init users =bogus\<Rightarrow> root"
--- a/src/Pure/General/file.scala	Tue Oct 07 14:02:24 2014 +0200
+++ b/src/Pure/General/file.scala	Tue Oct 07 21:01:31 2014 +0200
@@ -116,6 +116,12 @@
     File.write(path, text)
   }
 
+  def write_backup2(path: Path, text: CharSequence)
+  {
+    path.file renameTo path.backup2.file
+    File.write(path, text)
+  }
+
 
   /* copy */
 
--- a/src/Pure/General/path.scala	Tue Oct 07 14:02:24 2014 +0200
+++ b/src/Pure/General/path.scala	Tue Oct 07 21:01:31 2014 +0200
@@ -162,6 +162,12 @@
     prfx + Path.basic(s + "~")
   }
 
+  def backup2: Path =
+  {
+    val (prfx, s) = split_path
+    prfx + Path.basic(s + "~~")
+  }
+
   private val Ext = new Regex("(.*)\\.([^.]*)")
 
   def split_ext: (Path, String) =
--- a/src/Pure/Pure.thy	Tue Oct 07 14:02:24 2014 +0200
+++ b/src/Pure/Pure.thy	Tue Oct 07 21:01:31 2014 +0200
@@ -122,149 +122,149 @@
 ML_file "Tools/named_theorems.ML"
 
 
-section {* Basic attributes *}
+section \<open>Basic attributes\<close>
 
 attribute_setup tagged =
-  "Scan.lift (Args.name -- Args.name) >> Thm.tag"
+  \<open>Scan.lift (Args.name -- Args.name) >> Thm.tag\<close>
   "tagged theorem"
 
 attribute_setup untagged =
-  "Scan.lift Args.name >> Thm.untag"
+  \<open>Scan.lift Args.name >> Thm.untag\<close>
   "untagged theorem"
 
 attribute_setup kind =
-  "Scan.lift Args.name >> Thm.kind"
+  \<open>Scan.lift Args.name >> Thm.kind\<close>
   "theorem kind"
 
 attribute_setup THEN =
-  "Scan.lift (Scan.optional (Args.bracks Parse.nat) 1) -- Attrib.thm
-    >> (fn (i, B) => Thm.rule_attribute (fn _ => fn A => A RSN (i, B)))"
+  \<open>Scan.lift (Scan.optional (Args.bracks Parse.nat) 1) -- Attrib.thm
+    >> (fn (i, B) => Thm.rule_attribute (fn _ => fn A => A RSN (i, B)))\<close>
   "resolution with rule"
 
 attribute_setup OF =
-  "Attrib.thms >> (fn Bs => Thm.rule_attribute (fn _ => fn A => A OF Bs))"
+  \<open>Attrib.thms >> (fn Bs => Thm.rule_attribute (fn _ => fn A => A OF Bs))\<close>
   "rule resolved with facts"
 
 attribute_setup rename_abs =
-  "Scan.lift (Scan.repeat (Args.maybe Args.name)) >> (fn vs =>
-    Thm.rule_attribute (K (Drule.rename_bvars' vs)))"
+  \<open>Scan.lift (Scan.repeat (Args.maybe Args.name)) >> (fn vs =>
+    Thm.rule_attribute (K (Drule.rename_bvars' vs)))\<close>
   "rename bound variables in abstractions"
 
 attribute_setup unfolded =
-  "Attrib.thms >> (fn ths =>
-    Thm.rule_attribute (fn context => Local_Defs.unfold (Context.proof_of context) ths))"
+  \<open>Attrib.thms >> (fn ths =>
+    Thm.rule_attribute (fn context => Local_Defs.unfold (Context.proof_of context) ths))\<close>
   "unfolded definitions"
 
 attribute_setup folded =
-  "Attrib.thms >> (fn ths =>
-    Thm.rule_attribute (fn context => Local_Defs.fold (Context.proof_of context) ths))"
+  \<open>Attrib.thms >> (fn ths =>
+    Thm.rule_attribute (fn context => Local_Defs.fold (Context.proof_of context) ths))\<close>
   "folded definitions"
 
 attribute_setup consumes =
-  "Scan.lift (Scan.optional Parse.int 1) >> Rule_Cases.consumes"
+  \<open>Scan.lift (Scan.optional Parse.int 1) >> Rule_Cases.consumes\<close>
   "number of consumed facts"
 
 attribute_setup constraints =
-  "Scan.lift Parse.nat >> Rule_Cases.constraints"
+  \<open>Scan.lift Parse.nat >> Rule_Cases.constraints\<close>
   "number of equality constraints"
 
-attribute_setup case_names = {*
-  Scan.lift (Scan.repeat1 (Args.name --
+attribute_setup case_names =
+  \<open>Scan.lift (Scan.repeat1 (Args.name --
     Scan.optional (@{keyword "["} |-- Scan.repeat1 (Args.maybe Args.name) --| @{keyword "]"}) []))
-  >> (fn cs =>
+    >> (fn cs =>
       Rule_Cases.cases_hyp_names
         (map #1 cs)
-        (map (map (the_default Rule_Cases.case_hypsN) o #2) cs))
-*} "named rule cases"
+        (map (map (the_default Rule_Cases.case_hypsN) o #2) cs))\<close>
+  "named rule cases"
 
 attribute_setup case_conclusion =
-  "Scan.lift (Args.name -- Scan.repeat Args.name) >> Rule_Cases.case_conclusion"
+  \<open>Scan.lift (Args.name -- Scan.repeat Args.name) >> Rule_Cases.case_conclusion\<close>
   "named conclusion of rule cases"
 
 attribute_setup params =
-  "Scan.lift (Parse.and_list1 (Scan.repeat Args.name)) >> Rule_Cases.params"
+  \<open>Scan.lift (Parse.and_list1 (Scan.repeat Args.name)) >> Rule_Cases.params\<close>
   "named rule parameters"
 
-attribute_setup rule_format = {*
-  Scan.lift (Args.mode "no_asm")
-    >> (fn true => Object_Logic.rule_format_no_asm | false => Object_Logic.rule_format)
-*} "result put into canonical rule format"
+attribute_setup rule_format =
+  \<open>Scan.lift (Args.mode "no_asm")
+    >> (fn true => Object_Logic.rule_format_no_asm | false => Object_Logic.rule_format)\<close>
+  "result put into canonical rule format"
 
 attribute_setup elim_format =
-  "Scan.succeed (Thm.rule_attribute (K Tactic.make_elim))"
+  \<open>Scan.succeed (Thm.rule_attribute (K Tactic.make_elim))\<close>
   "destruct rule turned into elimination rule format"
 
-attribute_setup no_vars = {*
-  Scan.succeed (Thm.rule_attribute (fn context => fn th =>
+attribute_setup no_vars =
+  \<open>Scan.succeed (Thm.rule_attribute (fn context => fn th =>
     let
       val ctxt = Variable.set_body false (Context.proof_of context);
       val ((_, [th']), _) = Variable.import true [th] ctxt;
-    in th' end))
-*} "imported schematic variables"
+    in th' end))\<close>
+  "imported schematic variables"
 
 attribute_setup eta_long =
-  "Scan.succeed (Thm.rule_attribute (fn _ => Conv.fconv_rule Drule.eta_long_conversion))"
+  \<open>Scan.succeed (Thm.rule_attribute (fn _ => Conv.fconv_rule Drule.eta_long_conversion))\<close>
   "put theorem into eta long beta normal form"
 
 attribute_setup atomize =
-  "Scan.succeed Object_Logic.declare_atomize"
+  \<open>Scan.succeed Object_Logic.declare_atomize\<close>
   "declaration of atomize rule"
 
 attribute_setup rulify =
-  "Scan.succeed Object_Logic.declare_rulify"
+  \<open>Scan.succeed Object_Logic.declare_rulify\<close>
   "declaration of rulify rule"
 
 attribute_setup rotated =
-  "Scan.lift (Scan.optional Parse.int 1
-    >> (fn n => Thm.rule_attribute (fn _ => rotate_prems n)))"
+  \<open>Scan.lift (Scan.optional Parse.int 1
+    >> (fn n => Thm.rule_attribute (fn _ => rotate_prems n)))\<close>
   "rotated theorem premises"
 
 attribute_setup defn =
-  "Attrib.add_del Local_Defs.defn_add Local_Defs.defn_del"
+  \<open>Attrib.add_del Local_Defs.defn_add Local_Defs.defn_del\<close>
   "declaration of definitional transformations"
 
 attribute_setup abs_def =
-  "Scan.succeed (Thm.rule_attribute (fn context =>
-    Local_Defs.meta_rewrite_rule (Context.proof_of context) #> Drule.abs_def))"
+  \<open>Scan.succeed (Thm.rule_attribute (fn context =>
+    Local_Defs.meta_rewrite_rule (Context.proof_of context) #> Drule.abs_def))\<close>
   "abstract over free variables of definitional theorem"
 
 
-section {* Further content for the Pure theory *}
+section \<open>Further content for the Pure theory\<close>
 
-subsection {* Meta-level connectives in assumptions *}
+subsection \<open>Meta-level connectives in assumptions\<close>
 
 lemma meta_mp:
-  assumes "PROP P ==> PROP Q" and "PROP P"
+  assumes "PROP P \<Longrightarrow> PROP Q" and "PROP P"
   shows "PROP Q"
-    by (rule `PROP P ==> PROP Q` [OF `PROP P`])
+    by (rule \<open>PROP P \<Longrightarrow> PROP Q\<close> [OF \<open>PROP P\<close>])
 
 lemmas meta_impE = meta_mp [elim_format]
 
 lemma meta_spec:
-  assumes "!!x. PROP P x"
+  assumes "\<And>x. PROP P x"
   shows "PROP P x"
-    by (rule `!!x. PROP P x`)
+    by (rule \<open>\<And>x. PROP P x\<close>)
 
 lemmas meta_allE = meta_spec [elim_format]
 
 lemma swap_params:
-  "(!!x y. PROP P x y) == (!!y x. PROP P x y)" ..
+  "(\<And>x y. PROP P x y) \<equiv> (\<And>y x. PROP P x y)" ..
 
 
-subsection {* Meta-level conjunction *}
+subsection \<open>Meta-level conjunction\<close>
 
 lemma all_conjunction:
-  "(!!x. PROP A x &&& PROP B x) == ((!!x. PROP A x) &&& (!!x. PROP B x))"
+  "(\<And>x. PROP A x &&& PROP B x) \<equiv> ((\<And>x. PROP A x) &&& (\<And>x. PROP B x))"
 proof
-  assume conj: "!!x. PROP A x &&& PROP B x"
-  show "(!!x. PROP A x) &&& (!!x. PROP B x)"
+  assume conj: "\<And>x. PROP A x &&& PROP B x"
+  show "(\<And>x. PROP A x) &&& (\<And>x. PROP B x)"
   proof -
     fix x
     from conj show "PROP A x" by (rule conjunctionD1)
     from conj show "PROP B x" by (rule conjunctionD2)
   qed
 next
-  assume conj: "(!!x. PROP A x) &&& (!!x. PROP B x)"
+  assume conj: "(\<And>x. PROP A x) &&& (\<And>x. PROP B x)"
   fix x
   show "PROP A x &&& PROP B x"
   proof -
@@ -274,36 +274,36 @@
 qed
 
 lemma imp_conjunction:
-  "(PROP A ==> PROP B &&& PROP C) == ((PROP A ==> PROP B) &&& (PROP A ==> PROP C))"
+  "(PROP A \<Longrightarrow> PROP B &&& PROP C) \<equiv> ((PROP A \<Longrightarrow> PROP B) &&& (PROP A \<Longrightarrow> PROP C))"
 proof
-  assume conj: "PROP A ==> PROP B &&& PROP C"
-  show "(PROP A ==> PROP B) &&& (PROP A ==> PROP C)"
+  assume conj: "PROP A \<Longrightarrow> PROP B &&& PROP C"
+  show "(PROP A \<Longrightarrow> PROP B) &&& (PROP A \<Longrightarrow> PROP C)"
   proof -
     assume "PROP A"
-    from conj [OF `PROP A`] show "PROP B" by (rule conjunctionD1)
-    from conj [OF `PROP A`] show "PROP C" by (rule conjunctionD2)
+    from conj [OF \<open>PROP A\<close>] show "PROP B" by (rule conjunctionD1)
+    from conj [OF \<open>PROP A\<close>] show "PROP C" by (rule conjunctionD2)
   qed
 next
-  assume conj: "(PROP A ==> PROP B) &&& (PROP A ==> PROP C)"
+  assume conj: "(PROP A \<Longrightarrow> PROP B) &&& (PROP A \<Longrightarrow> PROP C)"
   assume "PROP A"
   show "PROP B &&& PROP C"
   proof -
-    from `PROP A` show "PROP B" by (rule conj [THEN conjunctionD1])
-    from `PROP A` show "PROP C" by (rule conj [THEN conjunctionD2])
+    from \<open>PROP A\<close> show "PROP B" by (rule conj [THEN conjunctionD1])
+    from \<open>PROP A\<close> show "PROP C" by (rule conj [THEN conjunctionD2])
   qed
 qed
 
 lemma conjunction_imp:
-  "(PROP A &&& PROP B ==> PROP C) == (PROP A ==> PROP B ==> PROP C)"
+  "(PROP A &&& PROP B \<Longrightarrow> PROP C) \<equiv> (PROP A \<Longrightarrow> PROP B \<Longrightarrow> PROP C)"
 proof
-  assume r: "PROP A &&& PROP B ==> PROP C"
+  assume r: "PROP A &&& PROP B \<Longrightarrow> PROP C"
   assume ab: "PROP A" "PROP B"
   show "PROP C"
   proof (rule r)
     from ab show "PROP A &&& PROP B" .
   qed
 next
-  assume r: "PROP A ==> PROP B ==> PROP C"
+  assume r: "PROP A \<Longrightarrow> PROP B \<Longrightarrow> PROP C"
   assume conj: "PROP A &&& PROP B"
   show "PROP C"
   proof (rule r)
--- a/src/Pure/Tools/bibtex.scala	Tue Oct 07 14:02:24 2014 +0200
+++ b/src/Pure/Tools/bibtex.scala	Tue Oct 07 21:01:31 2014 +0200
@@ -215,7 +215,7 @@
     /* ignored text */
 
     private val ignored: Parser[Chunk] =
-      rep1("""(?mi)([^@]+|@[ \t]*comment)""".r) ^^ {
+      rep1("""(?i)([^@]+|@[ \t]*comment)""".r) ^^ {
         case ss => Chunk("", List(Token(Token.Kind.COMMENT, ss.mkString))) }
 
     private def ignored_line: Parser[(Chunk, Line_Context)] =
@@ -267,7 +267,7 @@
         { case (s, delim) if delim == Closed => Token(Token.Kind.STRING, s) }
 
     private def recover_delimited: Parser[Token] =
-      """(?m)["{][^@]*""".r ^^ token(Token.Kind.ERROR)
+      """["{][^@]*""".r ^^ token(Token.Kind.ERROR)
 
     def delimited_line(ctxt: Item): Parser[(Chunk, Line_Context)] =
       delimited_depth(ctxt.delim) ^^ { case (s, delim1) =>
@@ -338,7 +338,7 @@
               case d ~ e ~ f => Chunk(kind, a ::: List(b) ::: c ::: d.toList ::: e ::: f.toList) } }
 
     private val recover_item: Parser[Chunk] =
-      at ~ "(?m)[^@]*".r ^^ { case a ~ b => Chunk("", List(a, Token(Token.Kind.ERROR, b))) }
+      at ~ "[^@]*".r ^^ { case a ~ b => Chunk("", List(a, Token(Token.Kind.ERROR, b))) }
 
 
     /* chunks */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Pure/Tools/update_cartouches.scala	Tue Oct 07 21:01:31 2014 +0200
@@ -0,0 +1,51 @@
+/*  Title:      Pure/Tools/update_cartouches.scala
+    Author:     Makarius
+
+Update theory syntax to use cartouches.
+*/
+
+package isabelle
+
+
+object Update_Cartouches
+{
+  /* update cartouches */
+
+  private def cartouche(s: String): String =
+    Symbol.open + s + Symbol.close
+
+  private val Verbatim_Body = """(?s)[ \t]*(.*?)[ \t]*""".r
+
+  def update_cartouches(path: Path)
+  {
+    val text0 = File.read(path)
+    val text1 =
+      (for (tok <- Outer_Syntax.empty.scan(text0).iterator)
+        yield {
+          if (tok.kind == Token.Kind.ALT_STRING) cartouche(tok.content)
+          else if (tok.kind == Token.Kind.VERBATIM) {
+            tok.content match {
+              case Verbatim_Body(s) => cartouche(s)
+              case s => tok.source
+            }
+          }
+          else tok.source
+        }
+      ).mkString
+
+    if (text0 != text1) {
+      Output.writeln("changing " + path)
+      File.write_backup2(path, text1)
+    }
+  }
+
+
+  /* command line entry point */
+
+  def main(args: Array[String])
+  {
+    Command_Line.tool0 {
+      args.foreach(arg => update_cartouches(Path.explode(arg)))
+    }
+  }
+}
--- a/src/Pure/build-jars	Tue Oct 07 14:02:24 2014 +0200
+++ b/src/Pure/build-jars	Tue Oct 07 21:01:31 2014 +0200
@@ -97,6 +97,7 @@
   Tools/print_operation.scala
   Tools/simplifier_trace.scala
   Tools/task_statistics.scala
+  Tools/update_cartouches.scala
   library.scala
   term.scala
   term_xml.scala