src/HOL/Probability/Information.thy
author hoelzl
Tue May 04 18:19:24 2010 +0200 (2010-05-04)
changeset 36649 bfd8c550faa6
parent 36624 25153c08655e
child 38656 d5d342611edb
permissions -rw-r--r--
Corrected imports; better approximation of dependencies.
     1 theory Information
     2 imports Probability_Space Product_Measure Convex
     3 begin
     4 
     5 section "Convex theory"
     6 
     7 lemma log_setsum:
     8   assumes "finite s" "s \<noteq> {}"
     9   assumes "b > 1"
    10   assumes "(\<Sum> i \<in> s. a i) = 1"
    11   assumes "\<And> i. i \<in> s \<Longrightarrow> a i \<ge> 0"
    12   assumes "\<And> i. i \<in> s \<Longrightarrow> y i \<in> {0 <..}"
    13   shows "log b (\<Sum> i \<in> s. a i * y i) \<ge> (\<Sum> i \<in> s. a i * log b (y i))"
    14 proof -
    15   have "convex_on {0 <..} (\<lambda> x. - log b x)"
    16     by (rule minus_log_convex[OF `b > 1`])
    17   hence "- log b (\<Sum> i \<in> s. a i * y i) \<le> (\<Sum> i \<in> s. a i * - log b (y i))"
    18     using convex_on_setsum[of _ _ "\<lambda> x. - log b x"] assms pos_is_convex by fastsimp
    19   thus ?thesis by (auto simp add:setsum_negf le_imp_neg_le)
    20 qed
    21 
    22 lemma log_setsum':
    23   assumes "finite s" "s \<noteq> {}"
    24   assumes "b > 1"
    25   assumes "(\<Sum> i \<in> s. a i) = 1"
    26   assumes pos: "\<And> i. i \<in> s \<Longrightarrow> 0 \<le> a i"
    27           "\<And> i. \<lbrakk> i \<in> s ; 0 < a i \<rbrakk> \<Longrightarrow> 0 < y i"
    28   shows "log b (\<Sum> i \<in> s. a i * y i) \<ge> (\<Sum> i \<in> s. a i * log b (y i))"
    29 proof -
    30   have "\<And>y. (\<Sum> i \<in> s - {i. a i = 0}. a i * y i) = (\<Sum> i \<in> s. a i * y i)"
    31     using assms by (auto intro!: setsum_mono_zero_cong_left)
    32   moreover have "log b (\<Sum> i \<in> s - {i. a i = 0}. a i * y i) \<ge> (\<Sum> i \<in> s - {i. a i = 0}. a i * log b (y i))"
    33   proof (rule log_setsum)
    34     have "setsum a (s - {i. a i = 0}) = setsum a s"
    35       using assms(1) by (rule setsum_mono_zero_cong_left) auto
    36     thus sum_1: "setsum a (s - {i. a i = 0}) = 1"
    37       "finite (s - {i. a i = 0})" using assms by simp_all
    38 
    39     show "s - {i. a i = 0} \<noteq> {}"
    40     proof
    41       assume *: "s - {i. a i = 0} = {}"
    42       hence "setsum a (s - {i. a i = 0}) = 0" by (simp add: * setsum_empty)
    43       with sum_1 show False by simp
    44 qed
    45 
    46     fix i assume "i \<in> s - {i. a i = 0}"
    47     hence "i \<in> s" "a i \<noteq> 0" by simp_all
    48     thus "0 \<le> a i" "y i \<in> {0<..}" using pos[of i] by auto
    49   qed fact+
    50   ultimately show ?thesis by simp
    51 qed
    52 
    53 section "Information theory"
    54 
    55 lemma (in finite_prob_space) sum_over_space_distrib:
    56   "(\<Sum>x\<in>X`space M. distribution X {x}) = 1"
    57   unfolding distribution_def prob_space[symmetric] using finite_space
    58   by (subst measure_finitely_additive'')
    59      (auto simp add: disjoint_family_on_def sets_eq_Pow intro!: arg_cong[where f=prob])
    60 
    61 locale finite_information_space = finite_prob_space +
    62   fixes b :: real assumes b_gt_1: "1 < b"
    63 
    64 definition
    65   "KL_divergence b M X Y =
    66     measure_space.integral (M\<lparr>measure := X\<rparr>)
    67                            (\<lambda>x. log b ((measure_space.RN_deriv (M \<lparr>measure := Y\<rparr> ) X) x))"
    68 
    69 lemma (in finite_prob_space) distribution_mono:
    70   assumes "\<And>t. \<lbrakk> t \<in> space M ; X t \<in> x \<rbrakk> \<Longrightarrow> Y t \<in> y"
    71   shows "distribution X x \<le> distribution Y y"
    72   unfolding distribution_def
    73   using assms by (auto simp: sets_eq_Pow intro!: measure_mono)
    74 
    75 lemma (in prob_space) distribution_remove_const:
    76   shows "joint_distribution X (\<lambda>x. ()) {(x, ())} = distribution X {x}"
    77   and "joint_distribution (\<lambda>x. ()) X {((), x)} = distribution X {x}"
    78   and "joint_distribution X (\<lambda>x. (Y x, ())) {(x, y, ())} = joint_distribution X Y {(x, y)}"
    79   and "joint_distribution X (\<lambda>x. ((), Y x)) {(x, (), y)} = joint_distribution X Y {(x, y)}"
    80   and "distribution (\<lambda>x. ()) {()} = 1"
    81   unfolding prob_space[symmetric]
    82   by (auto intro!: arg_cong[where f=prob] simp: distribution_def)
    83 
    84 
    85 context finite_information_space
    86 begin
    87 
    88 lemma distribution_mono_gt_0:
    89   assumes gt_0: "0 < distribution X x"
    90   assumes *: "\<And>t. \<lbrakk> t \<in> space M ; X t \<in> x \<rbrakk> \<Longrightarrow> Y t \<in> y"
    91   shows "0 < distribution Y y"
    92   by (rule less_le_trans[OF gt_0 distribution_mono]) (rule *)
    93 
    94 lemma
    95   assumes "0 \<le> A" and pos: "0 < A \<Longrightarrow> 0 < B" "0 < A \<Longrightarrow> 0 < C"
    96   shows mult_log_mult: "A * log b (B * C) = A * log b B + A * log b C" (is "?mult")
    97   and mult_log_divide: "A * log b (B / C) = A * log b B - A * log b C" (is "?div")
    98 proof -
    99   have "?mult \<and> ?div"
   100 proof (cases "A = 0")
   101   case False
   102   hence "0 < A" using `0 \<le> A` by auto
   103     with pos[OF this] show "?mult \<and> ?div" using b_gt_1
   104       by (auto simp: log_divide log_mult field_simps)
   105 qed simp
   106   thus ?mult and ?div by auto
   107 qed
   108 
   109 lemma split_pairs:
   110   shows
   111     "((A, B) = X) \<longleftrightarrow> (fst X = A \<and> snd X = B)" and
   112     "(X = (A, B)) \<longleftrightarrow> (fst X = A \<and> snd X = B)" by auto
   113 
   114 ML {*
   115 
   116   (* tactic to solve equations of the form @{term "W * log b (X / (Y * Z)) = W * log b X - W * log b (Y * Z)"}
   117      where @{term W} is a joint distribution of @{term X}, @{term Y}, and @{term Z}. *)
   118 
   119   val mult_log_intros = [@{thm mult_log_divide}, @{thm mult_log_mult}]
   120   val intros = [@{thm divide_pos_pos}, @{thm mult_pos_pos}, @{thm positive_distribution}]
   121 
   122   val distribution_gt_0_tac = (rtac @{thm distribution_mono_gt_0}
   123     THEN' assume_tac
   124     THEN' clarsimp_tac (clasimpset_of @{context} addsimps2 @{thms split_pairs}))
   125 
   126   val distr_mult_log_eq_tac = REPEAT_ALL_NEW (CHANGED o TRY o
   127     (resolve_tac (mult_log_intros @ intros)
   128       ORELSE' distribution_gt_0_tac
   129       ORELSE' clarsimp_tac (clasimpset_of @{context})))
   130 
   131   fun instanciate_term thy redex intro =
   132     let
   133       val intro_concl = Thm.concl_of intro
   134 
   135       val lhs = intro_concl |> HOLogic.dest_Trueprop |> HOLogic.dest_eq |> fst
   136 
   137       val m = SOME (Pattern.match thy (lhs, redex) (Vartab.empty, Vartab.empty))
   138         handle Pattern.MATCH => NONE
   139 
   140     in
   141       Option.map (fn m => Envir.subst_term m intro_concl) m
   142     end
   143 
   144   fun mult_log_simproc simpset redex =
   145   let
   146     val ctxt = Simplifier.the_context simpset
   147     val thy = ProofContext.theory_of ctxt
   148     fun prove (SOME thm) = (SOME
   149           (Goal.prove ctxt [] [] thm (K (distr_mult_log_eq_tac 1))
   150            |> mk_meta_eq)
   151             handle THM _ => NONE)
   152       | prove NONE = NONE
   153   in
   154     get_first (instanciate_term thy (term_of redex) #> prove) mult_log_intros
   155   end
   156 *}
   157 
   158 simproc_setup mult_log ("distribution X x * log b (A * B)" |
   159                         "distribution X x * log b (A / B)") = {* K mult_log_simproc *}
   160 
   161 end
   162 
   163 lemma KL_divergence_eq_finite:
   164   assumes u: "finite_measure_space (M\<lparr>measure := u\<rparr>)"
   165   assumes v: "finite_measure_space (M\<lparr>measure := v\<rparr>)"
   166   assumes u_0: "\<And>x. \<lbrakk> x \<in> space M ; v {x} = 0 \<rbrakk> \<Longrightarrow> u {x} = 0"
   167   shows "KL_divergence b M u v = (\<Sum>x\<in>space M. u {x} * log b (u {x} / v {x}))" (is "_ = ?sum")
   168 proof (simp add: KL_divergence_def, subst finite_measure_space.integral_finite_singleton, simp_all add: u)
   169   have ms_u: "measure_space (M\<lparr>measure := u\<rparr>)"
   170     using u unfolding finite_measure_space_def by simp
   171 
   172   show "(\<Sum>x \<in> space M. log b (measure_space.RN_deriv (M\<lparr>measure := v\<rparr>) u x) * u {x}) = ?sum"
   173     apply (rule setsum_cong[OF refl])
   174     apply simp
   175     apply (safe intro!: arg_cong[where f="log b"] )
   176     apply (subst finite_measure_space.RN_deriv_finite_singleton)
   177     using assms ms_u by auto
   178 qed
   179 
   180 lemma log_setsum_divide:
   181   assumes "finite S" and "S \<noteq> {}" and "1 < b"
   182   assumes "(\<Sum>x\<in>S. g x) = 1"
   183   assumes pos: "\<And>x. x \<in> S \<Longrightarrow> g x \<ge> 0" "\<And>x. x \<in> S \<Longrightarrow> f x \<ge> 0"
   184   assumes g_pos: "\<And>x. \<lbrakk> x \<in> S ; 0 < g x \<rbrakk> \<Longrightarrow> 0 < f x"
   185   shows "- (\<Sum>x\<in>S. g x * log b (g x / f x)) \<le> log b (\<Sum>x\<in>S. f x)"
   186 proof -
   187   have log_mono: "\<And>x y. 0 < x \<Longrightarrow> x \<le> y \<Longrightarrow> log b x \<le> log b y"
   188     using `1 < b` by (subst log_le_cancel_iff) auto
   189 
   190   have "- (\<Sum>x\<in>S. g x * log b (g x / f x)) = (\<Sum>x\<in>S. g x * log b (f x / g x))"
   191   proof (unfold setsum_negf[symmetric], rule setsum_cong)
   192     fix x assume x: "x \<in> S"
   193     show "- (g x * log b (g x / f x)) = g x * log b (f x / g x)"
   194     proof (cases "g x = 0")
   195       case False
   196       with pos[OF x] g_pos[OF x] have "0 < f x" "0 < g x" by simp_all
   197       thus ?thesis using `1 < b` by (simp add: log_divide field_simps)
   198     qed simp
   199   qed rule
   200   also have "... \<le> log b (\<Sum>x\<in>S. g x * (f x / g x))"
   201   proof (rule log_setsum')
   202     fix x assume x: "x \<in> S" "0 < g x"
   203     with g_pos[OF x] show "0 < f x / g x" by (safe intro!: divide_pos_pos)
   204   qed fact+
   205   also have "... = log b (\<Sum>x\<in>S - {x. g x = 0}. f x)" using `finite S`
   206     by (auto intro!: setsum_mono_zero_cong_right arg_cong[where f="log b"]
   207         split: split_if_asm)
   208   also have "... \<le> log b (\<Sum>x\<in>S. f x)"
   209   proof (rule log_mono)
   210     have "0 = (\<Sum>x\<in>S - {x. g x = 0}. 0)" by simp
   211     also have "... < (\<Sum>x\<in>S - {x. g x = 0}. f x)" (is "_ < ?sum")
   212     proof (rule setsum_strict_mono)
   213       show "finite (S - {x. g x = 0})" using `finite S` by simp
   214       show "S - {x. g x = 0} \<noteq> {}"
   215       proof
   216         assume "S - {x. g x = 0} = {}"
   217         hence "(\<Sum>x\<in>S. g x) = 0" by (subst setsum_0') auto
   218         with `(\<Sum>x\<in>S. g x) = 1` show False by simp
   219       qed
   220       fix x assume "x \<in> S - {x. g x = 0}"
   221       thus "0 < f x" using g_pos[of x] pos(1)[of x] by auto
   222     qed
   223     finally show "0 < ?sum" .
   224     show "(\<Sum>x\<in>S - {x. g x = 0}. f x) \<le> (\<Sum>x\<in>S. f x)"
   225       using `finite S` pos by (auto intro!: setsum_mono2)
   226   qed
   227   finally show ?thesis .
   228 qed
   229 
   230 lemma KL_divergence_positive_finite:
   231   assumes u: "finite_prob_space (M\<lparr>measure := u\<rparr>)"
   232   assumes v: "finite_prob_space (M\<lparr>measure := v\<rparr>)"
   233   assumes u_0: "\<And>x. \<lbrakk> x \<in> space M ; v {x} = 0 \<rbrakk> \<Longrightarrow> u {x} = 0"
   234   and "1 < b"
   235   shows "0 \<le> KL_divergence b M u v"
   236 proof -
   237   interpret u: finite_prob_space "M\<lparr>measure := u\<rparr>" using u .
   238   interpret v: finite_prob_space "M\<lparr>measure := v\<rparr>" using v .
   239 
   240   have *: "space M \<noteq> {}" using u.not_empty by simp
   241 
   242   have "- (KL_divergence b M u v) \<le> log b (\<Sum>x\<in>space M. v {x})"
   243   proof (subst KL_divergence_eq_finite, safe intro!: log_setsum_divide *)
   244     show "finite_measure_space (M\<lparr>measure := u\<rparr>)"
   245       "finite_measure_space (M\<lparr>measure := v\<rparr>)"
   246        using u v unfolding finite_prob_space_eq by simp_all
   247 
   248      show "finite (space M)" using u.finite_space by simp
   249      show "1 < b" by fact
   250      show "(\<Sum>x\<in>space M. u {x}) = 1" using u.sum_over_space_eq_1 by simp
   251 
   252      fix x assume x: "x \<in> space M"
   253      thus pos: "0 \<le> u {x}" "0 \<le> v {x}"
   254        using u.positive u.sets_eq_Pow v.positive v.sets_eq_Pow by simp_all
   255 
   256      { assume "v {x} = 0" from u_0[OF x this] show "u {x} = 0" . }
   257      { assume "0 < u {x}"
   258        hence "v {x} \<noteq> 0" using u_0[OF x] by auto
   259        with pos show "0 < v {x}" by simp }
   260   qed
   261   thus "0 \<le> KL_divergence b M u v" using v.sum_over_space_eq_1 by simp
   262 qed
   263 
   264 definition (in prob_space)
   265   "mutual_information b s1 s2 X Y \<equiv>
   266     let prod_space =
   267       prod_measure_space (\<lparr>space = space s1, sets = sets s1, measure = distribution X\<rparr>)
   268                          (\<lparr>space = space s2, sets = sets s2, measure = distribution Y\<rparr>)
   269     in
   270       KL_divergence b prod_space (joint_distribution X Y) (measure prod_space)"
   271 
   272 abbreviation (in finite_information_space)
   273   finite_mutual_information ("\<I>'(_ ; _')") where
   274   "\<I>(X ; Y) \<equiv> mutual_information b
   275     \<lparr> space = X`space M, sets = Pow (X`space M) \<rparr>
   276     \<lparr> space = Y`space M, sets = Pow (Y`space M) \<rparr> X Y"
   277 
   278 lemma (in finite_measure_space) measure_spaceI: "measure_space M"
   279   by unfold_locales
   280 
   281 lemma prod_measure_times_finite:
   282   assumes fms: "finite_measure_space M" "finite_measure_space M'" and a: "a \<in> space M \<times> space M'"
   283   shows "prod_measure M M' {a} = measure M {fst a} * measure M' {snd a}"
   284 proof (cases a)
   285   case (Pair b c)
   286   hence a_eq: "{a} = {b} \<times> {c}" by simp
   287 
   288   with fms[THEN finite_measure_space.measure_spaceI]
   289     fms[THEN finite_measure_space.sets_eq_Pow] a Pair
   290   show ?thesis unfolding a_eq
   291     by (subst prod_measure_times) simp_all
   292 qed
   293 
   294 lemma setsum_cartesian_product':
   295   "(\<Sum>x\<in>A \<times> B. f x) = (\<Sum>x\<in>A. setsum (\<lambda>y. f (x, y)) B)"
   296   unfolding setsum_cartesian_product by simp
   297 
   298 lemma (in finite_information_space)
   299   assumes MX: "finite_prob_space \<lparr> space = space MX, sets = sets MX, measure = distribution X\<rparr>"
   300     (is "finite_prob_space ?MX")
   301   assumes MY: "finite_prob_space \<lparr> space = space MY, sets = sets MY, measure = distribution Y\<rparr>"
   302     (is "finite_prob_space ?MY")
   303   and X_space: "X ` space M \<subseteq> space MX" and Y_space: "Y ` space M \<subseteq> space MY"
   304   shows mutual_information_eq_generic:
   305     "mutual_information b MX MY X Y = (\<Sum> (x,y) \<in> space MX \<times> space MY.
   306       joint_distribution X Y {(x,y)} *
   307       log b (joint_distribution X Y {(x,y)} /
   308       (distribution X {x} * distribution Y {y})))"
   309     (is "?equality")
   310   and mutual_information_positive_generic:
   311     "0 \<le> mutual_information b MX MY X Y" (is "?positive")
   312 proof -
   313   let ?P = "prod_measure_space ?MX ?MY"
   314   let ?measure = "joint_distribution X Y"
   315   let ?P' = "measure_update (\<lambda>_. ?measure) ?P"
   316 
   317   interpret X: finite_prob_space "?MX" using MX .
   318   moreover interpret Y: finite_prob_space "?MY" using MY .
   319   ultimately have ms_X: "measure_space ?MX"
   320     and ms_Y: "measure_space ?MY" by unfold_locales
   321 
   322   have fms_P: "finite_measure_space ?P"
   323       by (rule finite_measure_space_finite_prod_measure) fact+
   324 
   325   have fms_P': "finite_measure_space ?P'"
   326       using finite_product_measure_space[of "space MX" "space MY"]
   327         X.finite_space Y.finite_space sigma_prod_sets_finite[OF X.finite_space Y.finite_space]
   328         X.sets_eq_Pow Y.sets_eq_Pow
   329       by (simp add: prod_measure_space_def)
   330 
   331   { fix x assume "x \<in> space ?P"
   332     hence x_in_MX: "{fst x} \<in> sets MX" using X.sets_eq_Pow
   333       by (auto simp: prod_measure_space_def)
   334 
   335     assume "measure ?P {x} = 0"
   336     with prod_measure_times[OF ms_X ms_Y, of "{fst x}" "{snd x}"] x_in_MX
   337     have "distribution X {fst x} = 0 \<or> distribution Y {snd x} = 0"
   338       by (simp add: prod_measure_space_def)
   339 
   340     hence "joint_distribution X Y {x} = 0"
   341       by (cases x) (auto simp: distribution_order) }
   342   note measure_0 = this
   343 
   344   show ?equality
   345     unfolding Let_def mutual_information_def using fms_P fms_P' measure_0 MX MY
   346     by (subst KL_divergence_eq_finite)
   347        (simp_all add: prod_measure_space_def prod_measure_times_finite
   348          finite_prob_space_eq setsum_cartesian_product')
   349 
   350   show ?positive
   351     unfolding Let_def mutual_information_def using measure_0 b_gt_1
   352   proof (safe intro!: KL_divergence_positive_finite, simp_all)
   353     from ms_X ms_Y X.top Y.top X.prob_space Y.prob_space
   354     have "measure ?P (space ?P) = 1"
   355       by (simp add: prod_measure_space_def, subst prod_measure_times, simp_all)
   356     with fms_P show "finite_prob_space ?P"
   357       by (simp add: finite_prob_space_eq)
   358 
   359     from ms_X ms_Y X.top Y.top X.prob_space Y.prob_space Y.not_empty X_space Y_space
   360     have "measure ?P' (space ?P') = 1" unfolding prob_space[symmetric]
   361       by (auto simp add: prod_measure_space_def distribution_def vimage_Times comp_def
   362         intro!: arg_cong[where f=prob])
   363     with fms_P' show "finite_prob_space ?P'"
   364       by (simp add: finite_prob_space_eq)
   365   qed
   366 qed
   367 
   368 lemma (in finite_information_space) mutual_information_eq:
   369   "\<I>(X;Y) = (\<Sum> (x,y) \<in> X ` space M \<times> Y ` space M.
   370     distribution (\<lambda>x. (X x, Y x)) {(x,y)} * log b (distribution (\<lambda>x. (X x, Y x)) {(x,y)} /
   371                                                    (distribution X {x} * distribution Y {y})))"
   372   by (subst mutual_information_eq_generic) (simp_all add: finite_prob_space_of_images)
   373 
   374 lemma (in finite_information_space) mutual_information_positive: "0 \<le> \<I>(X;Y)"
   375   by (subst mutual_information_positive_generic) (simp_all add: finite_prob_space_of_images)
   376 
   377 definition (in prob_space)
   378   "entropy b s X = mutual_information b s s X X"
   379 
   380 abbreviation (in finite_information_space)
   381   finite_entropy ("\<H>'(_')") where
   382   "\<H>(X) \<equiv> entropy b \<lparr> space = X`space M, sets = Pow (X`space M) \<rparr> X"
   383 
   384 lemma (in finite_information_space) joint_distribution_remove[simp]:
   385     "joint_distribution X X {(x, x)} = distribution X {x}"
   386   unfolding distribution_def by (auto intro!: arg_cong[where f=prob])
   387 
   388 lemma (in finite_information_space) entropy_eq:
   389   "\<H>(X) = -(\<Sum> x \<in> X ` space M. distribution X {x} * log b (distribution X {x}))"
   390 proof -
   391   { fix f
   392   { fix x y
   393     have "(\<lambda>x. (X x, X x)) -` {(x, y)} = (if x = y then X -` {x} else {})" by auto
   394       hence "distribution (\<lambda>x. (X x, X x))  {(x,y)} * f x y = (if x = y then distribution X {x} * f x y else 0)"
   395       unfolding distribution_def by auto }
   396     hence "(\<Sum>(x, y) \<in> X ` space M \<times> X ` space M. joint_distribution X X {(x, y)} * f x y) =
   397       (\<Sum>x \<in> X ` space M. distribution X {x} * f x x)"
   398       unfolding setsum_cartesian_product' by (simp add: setsum_cases finite_space) }
   399   note remove_cartesian_product = this
   400 
   401   show ?thesis
   402     unfolding entropy_def mutual_information_eq setsum_negf[symmetric] remove_cartesian_product
   403     by (auto intro!: setsum_cong)
   404 qed
   405 
   406 lemma (in finite_information_space) entropy_positive: "0 \<le> \<H>(X)"
   407   unfolding entropy_def using mutual_information_positive .
   408 
   409 definition (in prob_space)
   410   "conditional_mutual_information b s1 s2 s3 X Y Z \<equiv>
   411     let prod_space =
   412       prod_measure_space \<lparr>space = space s2, sets = sets s2, measure = distribution Y\<rparr>
   413                          \<lparr>space = space s3, sets = sets s3, measure = distribution Z\<rparr>
   414     in
   415       mutual_information b s1 prod_space X (\<lambda>x. (Y x, Z x)) -
   416       mutual_information b s1 s3 X Z"
   417 
   418 abbreviation (in finite_information_space)
   419   finite_conditional_mutual_information ("\<I>'( _ ; _ | _ ')") where
   420   "\<I>(X ; Y | Z) \<equiv> conditional_mutual_information b
   421     \<lparr> space = X`space M, sets = Pow (X`space M) \<rparr>
   422     \<lparr> space = Y`space M, sets = Pow (Y`space M) \<rparr>
   423     \<lparr> space = Z`space M, sets = Pow (Z`space M) \<rparr>
   424     X Y Z"
   425 
   426 lemma (in finite_information_space) setsum_distribution_gen:
   427   assumes "Z -` {c} \<inter> space M = (\<Union>x \<in> X`space M. Y -` {f x}) \<inter> space M"
   428   and "inj_on f (X`space M)"
   429   shows "(\<Sum>x \<in> X`space M. distribution Y {f x}) = distribution Z {c}"
   430   unfolding distribution_def assms
   431   using finite_space assms
   432   by (subst measure_finitely_additive'')
   433      (auto simp add: disjoint_family_on_def sets_eq_Pow inj_on_def
   434       intro!: arg_cong[where f=prob])
   435 
   436 lemma (in finite_information_space) setsum_distribution:
   437   "(\<Sum>x \<in> X`space M. joint_distribution X Y {(x, y)}) = distribution Y {y}"
   438   "(\<Sum>y \<in> Y`space M. joint_distribution X Y {(x, y)}) = distribution X {x}"
   439   "(\<Sum>x \<in> X`space M. joint_distribution X (\<lambda>x. (Y x, Z x)) {(x, y, z)}) = joint_distribution Y Z {(y, z)}"
   440   "(\<Sum>y \<in> Y`space M. joint_distribution X (\<lambda>x. (Y x, Z x)) {(x, y, z)}) = joint_distribution X Z {(x, z)}"
   441   "(\<Sum>z \<in> Z`space M. joint_distribution X (\<lambda>x. (Y x, Z x)) {(x, y, z)}) = joint_distribution X Y {(x, y)}"
   442   by (auto intro!: inj_onI setsum_distribution_gen)
   443 
   444 lemma (in finite_information_space) conditional_mutual_information_eq_sum:
   445    "\<I>(X ; Y | Z) =
   446      (\<Sum>(x, y, z)\<in>X ` space M \<times> (\<lambda>x. (Y x, Z x)) ` space M.
   447              distribution (\<lambda>x. (X x, Y x, Z x)) {(x, y, z)} *
   448              log b (distribution (\<lambda>x. (X x, Y x, Z x)) {(x, y, z)}/
   449         distribution (\<lambda>x. (Y x, Z x)) {(y, z)})) -
   450      (\<Sum>(x, z)\<in>X ` space M \<times> Z ` space M.
   451         distribution (\<lambda>x. (X x, Z x)) {(x,z)} * log b (distribution (\<lambda>x. (X x, Z x)) {(x,z)} / distribution Z {z}))"
   452   (is "_ = ?rhs")
   453 proof -
   454   have setsum_product:
   455     "\<And>f x. (\<Sum>v\<in>(\<lambda>x. (Y x, Z x)) ` space M. joint_distribution X (\<lambda>x. (Y x, Z x)) {(x,v)} * f v)
   456       = (\<Sum>v\<in>Y ` space M \<times> Z ` space M. joint_distribution X (\<lambda>x. (Y x, Z x)) {(x,v)} * f v)"
   457   proof (safe intro!: setsum_mono_zero_cong_left imageI)
   458     fix x y z f
   459     assume *: "(Y y, Z z) \<notin> (\<lambda>x. (Y x, Z x)) ` space M" and "y \<in> space M" "z \<in> space M"
   460     hence "(\<lambda>x. (X x, Y x, Z x)) -` {(x, Y y, Z z)} \<inter> space M = {}"
   461     proof safe
   462       fix x' assume x': "x' \<in> space M" and eq: "Y x' = Y y" "Z x' = Z z"
   463       have "(Y y, Z z) \<in> (\<lambda>x. (Y x, Z x)) ` space M" using eq[symmetric] x' by auto
   464       thus "x' \<in> {}" using * by auto
   465     qed
   466     thus "joint_distribution X (\<lambda>x. (Y x, Z x)) {(x, Y y, Z z)} * f (Y y) (Z z) = 0"
   467       unfolding distribution_def by simp
   468   qed (simp add: finite_space)
   469 
   470   thus ?thesis
   471     unfolding conditional_mutual_information_def Let_def mutual_information_eq
   472     apply (subst mutual_information_eq_generic)
   473     by (auto simp add: prod_measure_space_def sigma_prod_sets_finite finite_space
   474         finite_prob_space_of_images finite_product_prob_space_of_images
   475         setsum_cartesian_product' setsum_product setsum_subtractf setsum_addf
   476         setsum_left_distrib[symmetric] setsum_distribution
   477       cong: setsum_cong)
   478 qed
   479 
   480 lemma (in finite_information_space) conditional_mutual_information_eq:
   481   "\<I>(X ; Y | Z) = (\<Sum>(x, y, z) \<in> X ` space M \<times> Y ` space M \<times> Z ` space M.
   482              distribution (\<lambda>x. (X x, Y x, Z x)) {(x, y, z)} *
   483              log b (distribution (\<lambda>x. (X x, Y x, Z x)) {(x, y, z)}/
   484     (joint_distribution X Z {(x, z)} * joint_distribution Y Z {(y,z)} / distribution Z {z})))"
   485   unfolding conditional_mutual_information_def Let_def mutual_information_eq
   486     apply (subst mutual_information_eq_generic)
   487   by (auto simp add: prod_measure_space_def sigma_prod_sets_finite finite_space
   488       finite_prob_space_of_images finite_product_prob_space_of_images
   489       setsum_cartesian_product' setsum_product setsum_subtractf setsum_addf
   490       setsum_left_distrib[symmetric] setsum_distribution setsum_commute[where A="Y`space M"]
   491     cong: setsum_cong)
   492 
   493 lemma (in finite_information_space) conditional_mutual_information_eq_mutual_information:
   494   "\<I>(X ; Y) = \<I>(X ; Y | (\<lambda>x. ()))"
   495 proof -
   496   have [simp]: "(\<lambda>x. ()) ` space M = {()}" using not_empty by auto
   497 
   498   show ?thesis
   499     unfolding conditional_mutual_information_eq mutual_information_eq
   500     by (simp add: setsum_cartesian_product' distribution_remove_const)
   501 qed
   502 
   503 lemma (in finite_information_space) conditional_mutual_information_positive:
   504   "0 \<le> \<I>(X ; Y | Z)"
   505 proof -
   506   let ?dXYZ = "distribution (\<lambda>x. (X x, Y x, Z x))"
   507   let ?dXZ = "joint_distribution X Z"
   508   let ?dYZ = "joint_distribution Y Z"
   509   let ?dX = "distribution X"
   510   let ?dZ = "distribution Z"
   511   let ?M = "X ` space M \<times> Y ` space M \<times> Z ` space M"
   512 
   513   have split_beta: "\<And>f. split f = (\<lambda>x. f (fst x) (snd x))" by (simp add: expand_fun_eq)
   514 
   515   have "- (\<Sum>(x, y, z) \<in> ?M. ?dXYZ {(x, y, z)} *
   516     log b (?dXYZ {(x, y, z)} / (?dXZ {(x, z)} * ?dYZ {(y,z)} / ?dZ {z})))
   517     \<le> log b (\<Sum>(x, y, z) \<in> ?M. ?dXZ {(x, z)} * ?dYZ {(y,z)} / ?dZ {z})"
   518     unfolding split_beta
   519   proof (rule log_setsum_divide)
   520     show "?M \<noteq> {}" using not_empty by simp
   521     show "1 < b" using b_gt_1 .
   522 
   523     fix x assume "x \<in> ?M"
   524     show "0 \<le> ?dXYZ {(fst x, fst (snd x), snd (snd x))}" using positive_distribution .
   525     show "0 \<le> ?dXZ {(fst x, snd (snd x))} * ?dYZ {(fst (snd x), snd (snd x))} / ?dZ {snd (snd x)}"
   526       by (auto intro!: mult_nonneg_nonneg positive_distribution simp: zero_le_divide_iff)
   527 
   528     assume *: "0 < ?dXYZ {(fst x, fst (snd x), snd (snd x))}"
   529     thus "0 < ?dXZ {(fst x, snd (snd x))} * ?dYZ {(fst (snd x), snd (snd x))} / ?dZ {snd (snd x)}"
   530       by (auto intro!: divide_pos_pos mult_pos_pos
   531            intro: distribution_order(6) distribution_mono_gt_0)
   532   qed (simp_all add: setsum_cartesian_product' sum_over_space_distrib setsum_distribution finite_space)
   533   also have "(\<Sum>(x, y, z) \<in> ?M. ?dXZ {(x, z)} * ?dYZ {(y,z)} / ?dZ {z}) = (\<Sum>z\<in>Z`space M. ?dZ {z})"
   534     apply (simp add: setsum_cartesian_product')
   535     apply (subst setsum_commute)
   536     apply (subst (2) setsum_commute)
   537     by (auto simp: setsum_divide_distrib[symmetric] setsum_product[symmetric] setsum_distribution
   538           intro!: setsum_cong)
   539   finally show ?thesis
   540     unfolding conditional_mutual_information_eq sum_over_space_distrib by simp
   541 qed
   542 
   543 
   544 definition (in prob_space)
   545   "conditional_entropy b S T X Y = conditional_mutual_information b S S T X X Y"
   546 
   547 abbreviation (in finite_information_space)
   548   finite_conditional_entropy ("\<H>'(_ | _')") where
   549   "\<H>(X | Y) \<equiv> conditional_entropy b
   550     \<lparr> space = X`space M, sets = Pow (X`space M) \<rparr>
   551     \<lparr> space = Y`space M, sets = Pow (Y`space M) \<rparr> X Y"
   552 
   553 lemma (in finite_information_space) conditional_entropy_positive:
   554   "0 \<le> \<H>(X | Y)" unfolding conditional_entropy_def using conditional_mutual_information_positive .
   555 
   556 lemma (in finite_information_space) conditional_entropy_eq:
   557   "\<H>(X | Z) =
   558      - (\<Sum>(x, z)\<in>X ` space M \<times> Z ` space M.
   559          joint_distribution X Z {(x, z)} *
   560          log b (joint_distribution X Z {(x, z)} / distribution Z {z}))"
   561 proof -
   562   have *: "\<And>x y z. (\<lambda>x. (X x, X x, Z x)) -` {(x, y, z)} = (if x = y then (\<lambda>x. (X x, Z x)) -` {(x, z)} else {})" by auto
   563   show ?thesis
   564     unfolding conditional_mutual_information_eq_sum
   565       conditional_entropy_def distribution_def *
   566     by (auto intro!: setsum_0')
   567 qed
   568 
   569 lemma (in finite_information_space) mutual_information_eq_entropy_conditional_entropy:
   570   "\<I>(X ; Z) = \<H>(X) - \<H>(X | Z)"
   571   unfolding mutual_information_eq entropy_eq conditional_entropy_eq
   572   using finite_space
   573   by (auto simp add: setsum_addf setsum_subtractf setsum_cartesian_product'
   574       setsum_left_distrib[symmetric] setsum_addf setsum_distribution)
   575 
   576 lemma (in finite_information_space) conditional_entropy_less_eq_entropy:
   577   "\<H>(X | Z) \<le> \<H>(X)"
   578 proof -
   579   have "\<I>(X ; Z) = \<H>(X) - \<H>(X | Z)" using mutual_information_eq_entropy_conditional_entropy .
   580   with mutual_information_positive[of X Z] entropy_positive[of X]
   581   show ?thesis by auto
   582 qed
   583 
   584 (* -------------Entropy of a RV with a certain event is zero---------------- *)
   585 
   586 lemma (in finite_information_space) finite_entropy_certainty_eq_0:
   587   assumes "x \<in> X ` space M" and "distribution X {x} = 1"
   588   shows "\<H>(X) = 0"
   589 proof -
   590   interpret X: finite_prob_space "\<lparr> space = X ` space M,
   591     sets = Pow (X ` space M),
   592     measure = distribution X\<rparr>" by (rule finite_prob_space_of_images)
   593 
   594   have "distribution X (X ` space M - {x}) = distribution X (X ` space M) - distribution X {x}"
   595     using X.measure_compl[of "{x}"] assms by auto
   596   also have "\<dots> = 0" using X.prob_space assms by auto
   597   finally have X0: "distribution X (X ` space M - {x}) = 0" by auto
   598 
   599   { fix y assume asm: "y \<noteq> x" "y \<in> X ` space M"
   600     hence "{y} \<subseteq> X ` space M - {x}" by auto
   601     from X.measure_mono[OF this] X0 X.positive[of "{y}"] asm
   602     have "distribution X {y} = 0" by auto }
   603 
   604   hence fi: "\<And> y. y \<in> X ` space M \<Longrightarrow> distribution X {y} = (if x = y then 1 else 0)"
   605     using assms by auto
   606 
   607   have y: "\<And>y. (if x = y then 1 else 0) * log b (if x = y then 1 else 0) = 0" by simp
   608 
   609   show ?thesis unfolding entropy_eq by (auto simp: y fi)
   610 qed
   611 (* --------------- upper bound on entropy for a rv ------------------------- *)
   612 
   613 lemma (in finite_information_space) finite_entropy_le_card:
   614   "\<H>(X) \<le> log b (real (card (X ` space M \<inter> {x . distribution X {x} \<noteq> 0})))"
   615 proof -
   616   interpret X: finite_prob_space "\<lparr>space = X ` space M,
   617                                     sets = Pow (X ` space M),
   618                                  measure = distribution X\<rparr>"
   619     using finite_prob_space_of_images by auto
   620 
   621   have triv: "\<And> x. (if distribution X {x} \<noteq> 0 then distribution X {x} else 0) = distribution X {x}"
   622     by auto
   623   hence sum1: "(\<Sum> x \<in> X ` space M \<inter> {y. distribution X {y} \<noteq> 0}. distribution X {x}) = 1"
   624     using X.measure_finitely_additive''[of "X ` space M" "\<lambda> x. {x}", simplified]
   625       sets_eq_Pow inj_singleton[unfolded inj_on_def, rule_format]
   626     unfolding disjoint_family_on_def  X.prob_space[symmetric]
   627     using finite_imageI[OF finite_space, of X] by (auto simp add:triv setsum_restrict_set)
   628   have pos: "\<And> x. x \<in> X ` space M \<inter> {y. distribution X {y} \<noteq> 0} \<Longrightarrow> inverse (distribution X {x}) > 0"
   629     using X.positive sets_eq_Pow unfolding inverse_positive_iff_positive less_le by auto
   630   { assume asm: "X ` space M \<inter> {y. distribution X {y} \<noteq> 0} = {}" 
   631     { fix x assume "x \<in> X ` space M"
   632       hence "distribution X {x} = 0" using asm by blast }
   633     hence A: "(\<Sum> x \<in> X ` space M. distribution X {x}) = 0" by auto
   634     have B: "(\<Sum> x \<in> X ` space M. distribution X {x})
   635       \<ge> (\<Sum> x \<in> X ` space M \<inter> {y. distribution X {y} \<noteq> 0}. distribution X {x})"
   636       using finite_imageI[OF finite_space, of X]
   637       by (subst setsum_mono2) auto
   638     from A B have "False" using sum1 by auto } note not_empty = this
   639   { fix x assume asm: "x \<in> X ` space M"
   640     have "- distribution X {x} * log b (distribution X {x})
   641        = - (if distribution X {x} \<noteq> 0 
   642             then distribution X {x} * log b (distribution X {x})
   643             else 0)"
   644       by auto
   645     also have "\<dots> = (if distribution X {x} \<noteq> 0 
   646           then distribution X {x} * - log b (distribution X {x})
   647           else 0)"
   648       by auto
   649     also have "\<dots> = (if distribution X {x} \<noteq> 0
   650                     then distribution X {x} * log b (inverse (distribution X {x}))
   651                     else 0)"
   652       using log_inverse b_gt_1 X.positive[of "{x}"] asm by auto
   653     finally have "- distribution X {x} * log b (distribution X {x})
   654                  = (if distribution X {x} \<noteq> 0 
   655                     then distribution X {x} * log b (inverse (distribution X {x}))
   656                     else 0)"
   657       by auto } note log_inv = this
   658   have "- (\<Sum> x \<in> X ` space M. distribution X {x} * log b (distribution X {x}))
   659        = (\<Sum> x \<in> X ` space M. (if distribution X {x} \<noteq> 0 
   660           then distribution X {x} * log b (inverse (distribution X {x}))
   661           else 0))"
   662     unfolding setsum_negf[symmetric] using log_inv by auto
   663   also have "\<dots> = (\<Sum> x \<in> X ` space M \<inter> {y. distribution X {y} \<noteq> 0}.
   664                           distribution X {x} * log b (inverse (distribution X {x})))"
   665     unfolding setsum_restrict_set[OF finite_imageI[OF finite_space, of X]] by auto
   666   also have "\<dots> \<le> log b (\<Sum> x \<in> X ` space M \<inter> {y. distribution X {y} \<noteq> 0}.
   667                           distribution X {x} * (inverse (distribution X {x})))"
   668     apply (subst log_setsum[OF _ _ b_gt_1 sum1, 
   669      unfolded greaterThan_iff, OF _ _ _]) using pos sets_eq_Pow
   670       X.finite_space assms X.positive not_empty by auto
   671   also have "\<dots> = log b (\<Sum> x \<in> X ` space M \<inter> {y. distribution X {y} \<noteq> 0}. 1)"
   672     by auto
   673   also have "\<dots> \<le> log b (real_of_nat (card (X ` space M \<inter> {y. distribution X {y} \<noteq> 0})))"
   674     by auto
   675   finally have "- (\<Sum>x\<in>X ` space M. distribution X {x} * log b (distribution X {x}))
   676                \<le> log b (real_of_nat (card (X ` space M \<inter> {y. distribution X {y} \<noteq> 0})))" by simp
   677   thus ?thesis unfolding entropy_eq real_eq_of_nat by auto
   678 qed
   679 
   680 (* --------------- entropy is maximal for a uniform rv --------------------- *)
   681 
   682 lemma (in finite_prob_space) uniform_prob:
   683   assumes "x \<in> space M"
   684   assumes "\<And> x y. \<lbrakk>x \<in> space M ; y \<in> space M\<rbrakk> \<Longrightarrow> prob {x} = prob {y}"
   685   shows "prob {x} = 1 / real (card (space M))"
   686 proof -
   687   have prob_x: "\<And> y. y \<in> space M \<Longrightarrow> prob {y} = prob {x}"
   688     using assms(2)[OF _ `x \<in> space M`] by blast
   689   have "1 = prob (space M)"
   690     using prob_space by auto
   691   also have "\<dots> = (\<Sum> x \<in> space M. prob {x})"
   692     using measure_finitely_additive''[of "space M" "\<lambda> x. {x}", simplified]
   693       sets_eq_Pow inj_singleton[unfolded inj_on_def, rule_format]
   694       finite_space unfolding disjoint_family_on_def  prob_space[symmetric]
   695     by (auto simp add:setsum_restrict_set)
   696   also have "\<dots> = (\<Sum> y \<in> space M. prob {x})"
   697     using prob_x by auto
   698   also have "\<dots> = real_of_nat (card (space M)) * prob {x}" by simp
   699   finally have one: "1 = real (card (space M)) * prob {x}"
   700     using real_eq_of_nat by auto
   701   hence two: "real (card (space M)) \<noteq> 0" by fastsimp 
   702   from one have three: "prob {x} \<noteq> 0" by fastsimp
   703   thus ?thesis using one two three divide_cancel_right
   704     by (auto simp:field_simps)
   705 qed
   706 
   707 lemma (in finite_information_space) finite_entropy_uniform_max:
   708   assumes "\<And>x y. \<lbrakk> x \<in> X ` space M ; y \<in> X ` space M \<rbrakk> \<Longrightarrow> distribution X {x} = distribution X {y}"
   709   shows "\<H>(X) = log b (real (card (X ` space M)))"
   710 proof -
   711   interpret X: finite_prob_space "\<lparr>space = X ` space M,
   712                                     sets = Pow (X ` space M),
   713                                  measure = distribution X\<rparr>"
   714     using finite_prob_space_of_images by auto
   715 
   716   { fix x assume xasm: "x \<in> X ` space M"
   717     hence card_gt0: "real (card (X ` space M)) > 0"
   718       using card_gt_0_iff X.finite_space by auto
   719     from xasm have "\<And> y. y \<in> X ` space M \<Longrightarrow> distribution X {y} = distribution X {x}"
   720       using assms by blast
   721     hence "- (\<Sum>x\<in>X ` space M. distribution X {x} * log b (distribution X {x}))
   722          = - real (card (X ` space M)) * distribution X {x} * log b (distribution X {x})"
   723       unfolding real_eq_of_nat by auto
   724     also have "\<dots> = - real (card (X ` space M)) * (1 / real (card (X ` space M))) * log b (1 / real (card (X ` space M)))"
   725       by (auto simp: X.uniform_prob[simplified, OF xasm assms])
   726     also have "\<dots> = log b (real (card (X ` space M)))"
   727       unfolding inverse_eq_divide[symmetric]
   728       using card_gt0 log_inverse b_gt_1
   729       by (auto simp add:field_simps card_gt0)
   730     finally have ?thesis
   731       unfolding entropy_eq by auto }
   732   moreover
   733   { assume "X ` space M = {}"
   734     hence "distribution X (X ` space M) = 0"
   735       using X.empty_measure by simp
   736     hence "False" using X.prob_space by auto }
   737   ultimately show ?thesis by auto
   738 qed
   739 
   740 definition "subvimage A f g \<longleftrightarrow> (\<forall>x \<in> A. f -` {f x} \<inter> A \<subseteq> g -` {g x} \<inter> A)"
   741 
   742 lemma subvimageI:
   743   assumes "\<And>x y. \<lbrakk> x \<in> A ; y \<in> A ; f x = f y \<rbrakk> \<Longrightarrow> g x = g y"
   744   shows "subvimage A f g"
   745   using assms unfolding subvimage_def by blast
   746 
   747 lemma subvimageE[consumes 1]:
   748   assumes "subvimage A f g"
   749   obtains "\<And>x y. \<lbrakk> x \<in> A ; y \<in> A ; f x = f y \<rbrakk> \<Longrightarrow> g x = g y"
   750   using assms unfolding subvimage_def by blast
   751 
   752 lemma subvimageD:
   753   "\<lbrakk> subvimage A f g ; x \<in> A ; y \<in> A ; f x = f y \<rbrakk> \<Longrightarrow> g x = g y"
   754   using assms unfolding subvimage_def by blast
   755 
   756 lemma subvimage_subset:
   757   "\<lbrakk> subvimage B f g ; A \<subseteq> B \<rbrakk> \<Longrightarrow> subvimage A f g"
   758   unfolding subvimage_def by auto
   759 
   760 lemma subvimage_idem[intro]: "subvimage A g g"
   761   by (safe intro!: subvimageI)
   762 
   763 lemma subvimage_comp_finer[intro]:
   764   assumes svi: "subvimage A g h"
   765   shows "subvimage A g (f \<circ> h)"
   766 proof (rule subvimageI, simp)
   767   fix x y assume "x \<in> A" "y \<in> A" "g x = g y"
   768   from svi[THEN subvimageD, OF this]
   769   show "f (h x) = f (h y)" by simp
   770 qed
   771 
   772 lemma subvimage_comp_gran:
   773   assumes svi: "subvimage A g h"
   774   assumes inj: "inj_on f (g ` A)"
   775   shows "subvimage A (f \<circ> g) h"
   776   by (rule subvimageI) (auto intro!: subvimageD[OF svi] simp: inj_on_iff[OF inj])
   777 
   778 lemma subvimage_comp:
   779   assumes svi: "subvimage (f ` A) g h"
   780   shows "subvimage A (g \<circ> f) (h \<circ> f)"
   781   by (rule subvimageI) (auto intro!: svi[THEN subvimageD])
   782 
   783 lemma subvimage_trans:
   784   assumes fg: "subvimage A f g"
   785   assumes gh: "subvimage A g h"
   786   shows "subvimage A f h"
   787   by (rule subvimageI) (auto intro!: fg[THEN subvimageD] gh[THEN subvimageD])
   788 
   789 lemma subvimage_translator:
   790   assumes svi: "subvimage A f g"
   791   shows "\<exists>h. \<forall>x \<in> A. h (f x)  = g x"
   792 proof (safe intro!: exI[of _ "\<lambda>x. (THE z. z \<in> (g ` (f -` {x} \<inter> A)))"])
   793   fix x assume "x \<in> A"
   794   show "(THE x'. x' \<in> (g ` (f -` {f x} \<inter> A))) = g x"
   795     by (rule theI2[of _ "g x"])
   796       (insert `x \<in> A`, auto intro!: svi[THEN subvimageD])
   797 qed
   798 
   799 lemma subvimage_translator_image:
   800   assumes svi: "subvimage A f g"
   801   shows "\<exists>h. h ` f ` A = g ` A"
   802 proof -
   803   from subvimage_translator[OF svi]
   804   obtain h where "\<And>x. x \<in> A \<Longrightarrow> h (f x) = g x" by auto
   805   thus ?thesis
   806     by (auto intro!: exI[of _ h]
   807       simp: image_compose[symmetric] comp_def cong: image_cong)
   808 qed
   809 
   810 lemma subvimage_finite:
   811   assumes svi: "subvimage A f g" and fin: "finite (f`A)"
   812   shows "finite (g`A)"
   813 proof -
   814   from subvimage_translator_image[OF svi]
   815   obtain h where "g`A = h`f`A" by fastsimp
   816   with fin show "finite (g`A)" by simp
   817 qed
   818 
   819 lemma subvimage_disj:
   820   assumes svi: "subvimage A f g"
   821   shows "f -` {x} \<inter> A \<subseteq> g -` {y} \<inter> A \<or>
   822       f -` {x} \<inter> g -` {y} \<inter> A = {}" (is "?sub \<or> ?dist")
   823 proof (rule disjCI)
   824   assume "\<not> ?dist"
   825   then obtain z where "z \<in> A" and "x = f z" and "y = g z" by auto
   826   thus "?sub" using svi unfolding subvimage_def by auto
   827 qed
   828 
   829 lemma setsum_image_split:
   830   assumes svi: "subvimage A f g" and fin: "finite (f ` A)"
   831   shows "(\<Sum>x\<in>f`A. h x) = (\<Sum>y\<in>g`A. \<Sum>x\<in>f`(g -` {y} \<inter> A). h x)"
   832     (is "?lhs = ?rhs")
   833 proof -
   834   have "f ` A =
   835       snd ` (SIGMA x : g ` A. f ` (g -` {x} \<inter> A))"
   836       (is "_ = snd ` ?SIGMA")
   837     unfolding image_split_eq_Sigma[symmetric]
   838     by (simp add: image_compose[symmetric] comp_def)
   839   moreover
   840   have snd_inj: "inj_on snd ?SIGMA"
   841     unfolding image_split_eq_Sigma[symmetric]
   842     by (auto intro!: inj_onI subvimageD[OF svi])
   843   ultimately
   844   have "(\<Sum>x\<in>f`A. h x) = (\<Sum>(x,y)\<in>?SIGMA. h y)"
   845     by (auto simp: setsum_reindex intro: setsum_cong)
   846   also have "... = ?rhs"
   847     using subvimage_finite[OF svi fin] fin
   848     apply (subst setsum_Sigma[symmetric])
   849     by (auto intro!: finite_subset[of _ "f`A"])
   850   finally show ?thesis .
   851 qed
   852 
   853 lemma (in finite_information_space) entropy_partition:
   854   assumes svi: "subvimage (space M) X P"
   855   shows "\<H>(X) = \<H>(P) + \<H>(X|P)"
   856 proof -
   857   have "(\<Sum>x\<in>X ` space M. distribution X {x} * log b (distribution X {x})) =
   858     (\<Sum>y\<in>P `space M. \<Sum>x\<in>X ` space M.
   859     joint_distribution X P {(x, y)} * log b (joint_distribution X P {(x, y)}))"
   860   proof (subst setsum_image_split[OF svi],
   861       safe intro!: finite_imageI finite_space setsum_mono_zero_cong_left imageI)
   862     fix p x assume in_space: "p \<in> space M" "x \<in> space M"
   863     assume "joint_distribution X P {(X x, P p)} * log b (joint_distribution X P {(X x, P p)}) \<noteq> 0"
   864     hence "(\<lambda>x. (X x, P x)) -` {(X x, P p)} \<inter> space M \<noteq> {}" by (auto simp: distribution_def)
   865     with svi[unfolded subvimage_def, rule_format, OF `x \<in> space M`]
   866     show "x \<in> P -` {P p}" by auto
   867   next
   868     fix p x assume in_space: "p \<in> space M" "x \<in> space M"
   869     assume "P x = P p"
   870     from this[symmetric] svi[unfolded subvimage_def, rule_format, OF `x \<in> space M`]
   871     have "X -` {X x} \<inter> space M \<subseteq> P -` {P p} \<inter> space M"
   872       by auto
   873     hence "(\<lambda>x. (X x, P x)) -` {(X x, P p)} \<inter> space M = X -` {X x} \<inter> space M"
   874       by auto
   875     thus "distribution X {X x} * log b (distribution X {X x}) =
   876           joint_distribution X P {(X x, P p)} *
   877           log b (joint_distribution X P {(X x, P p)})"
   878       by (auto simp: distribution_def)
   879   qed
   880   thus ?thesis
   881   unfolding entropy_eq conditional_entropy_eq
   882     by (simp add: setsum_cartesian_product' setsum_subtractf setsum_distribution
   883       setsum_left_distrib[symmetric] setsum_commute[where B="P`space M"])
   884 qed
   885 
   886 corollary (in finite_information_space) entropy_data_processing:
   887   "\<H>(f \<circ> X) \<le> \<H>(X)"
   888   by (subst (2) entropy_partition[of _ "f \<circ> X"]) (auto intro: conditional_entropy_positive)
   889 
   890 lemma (in prob_space) distribution_cong:
   891   assumes "\<And>x. x \<in> space M \<Longrightarrow> X x = Y x"
   892   shows "distribution X = distribution Y"
   893   unfolding distribution_def expand_fun_eq
   894   using assms by (auto intro!: arg_cong[where f=prob])
   895 
   896 lemma (in prob_space) joint_distribution_cong:
   897   assumes "\<And>x. x \<in> space M \<Longrightarrow> X x = X' x"
   898   assumes "\<And>x. x \<in> space M \<Longrightarrow> Y x = Y' x"
   899   shows "joint_distribution X Y = joint_distribution X' Y'"
   900   unfolding distribution_def expand_fun_eq
   901   using assms by (auto intro!: arg_cong[where f=prob])
   902 
   903 lemma image_cong:
   904   "\<lbrakk> \<And>x. x \<in> S \<Longrightarrow> X x = X' x \<rbrakk> \<Longrightarrow> X ` S = X' ` S"
   905   by (auto intro!: image_eqI)
   906 
   907 lemma (in finite_information_space) mutual_information_cong:
   908   assumes X: "\<And>x. x \<in> space M \<Longrightarrow> X x = X' x"
   909   assumes Y: "\<And>x. x \<in> space M \<Longrightarrow> Y x = Y' x"
   910   shows "\<I>(X ; Y) = \<I>(X' ; Y')"
   911 proof -
   912   have "X ` space M = X' ` space M" using X by (rule image_cong)
   913   moreover have "Y ` space M = Y' ` space M" using Y by (rule image_cong)
   914   ultimately show ?thesis
   915   unfolding mutual_information_eq
   916     using
   917       assms[THEN distribution_cong]
   918       joint_distribution_cong[OF assms]
   919     by (auto intro!: setsum_cong)
   920 qed
   921 
   922 corollary (in finite_information_space) entropy_of_inj:
   923   assumes "inj_on f (X`space M)"
   924   shows "\<H>(f \<circ> X) = \<H>(X)"
   925 proof (rule antisym)
   926   show "\<H>(f \<circ> X) \<le> \<H>(X)" using entropy_data_processing .
   927 next
   928   have "\<H>(X) = \<H>(the_inv_into (X`space M) f \<circ> (f \<circ> X))"
   929     by (auto intro!: mutual_information_cong simp: entropy_def the_inv_into_f_f[OF assms])
   930   also have "... \<le> \<H>(f \<circ> X)"
   931     using entropy_data_processing .
   932   finally show "\<H>(X) \<le> \<H>(f \<circ> X)" .
   933 qed
   934 
   935 end