src/HOL/Probability/Information.thy
changeset 39097 943c7b348524
parent 39092 98de40859858
child 39198 f967a16dfcdd
equal deleted inserted replaced
39096:111756225292 39097:943c7b348524
     1 theory Information
     1 theory Information
     2 imports Probability_Space Product_Measure Convex Radon_Nikodym
     2 imports Probability_Space Product_Measure Convex Radon_Nikodym
     3 begin
     3 begin
       
     4 
       
     5 lemma log_le: "1 < a \<Longrightarrow> 0 < x \<Longrightarrow> x \<le> y \<Longrightarrow> log a x \<le> log a y"
       
     6   by (subst log_le_cancel_iff) auto
       
     7 
       
     8 lemma log_less: "1 < a \<Longrightarrow> 0 < x \<Longrightarrow> x < y \<Longrightarrow> log a x < log a y"
       
     9   by (subst log_less_cancel_iff) auto
       
    10 
       
    11 lemma setsum_cartesian_product':
       
    12   "(\<Sum>x\<in>A \<times> B. f x) = (\<Sum>x\<in>A. setsum (\<lambda>y. f (x, y)) B)"
       
    13   unfolding setsum_cartesian_product by simp
     4 
    14 
     5 lemma real_of_pinfreal_inverse[simp]:
    15 lemma real_of_pinfreal_inverse[simp]:
     6   fixes X :: pinfreal
    16   fixes X :: pinfreal
     7   shows "real (inverse X) = 1 / real X"
    17   shows "real (inverse X) = 1 / real X"
     8   by (cases X) (auto simp: inverse_eq_divide)
    18   by (cases X) (auto simp: inverse_eq_divide)
       
    19 
       
    20 lemma (in finite_prob_space) finite_product_prob_space_of_images:
       
    21   "finite_prob_space \<lparr> space = X ` space M \<times> Y ` space M, sets = Pow (X ` space M \<times> Y ` space M)\<rparr>
       
    22                      (joint_distribution X Y)"
       
    23   (is "finite_prob_space ?S _")
       
    24 proof (simp add: finite_prob_space_eq finite_product_measure_space_of_images)
       
    25   have "X -` X ` space M \<inter> Y -` Y ` space M \<inter> space M = space M" by auto
       
    26   thus "joint_distribution X Y (X ` space M \<times> Y ` space M) = 1"
       
    27     by (simp add: distribution_def prob_space vimage_Times comp_def measure_space_1)
       
    28 qed
       
    29 
       
    30 lemma (in finite_prob_space) finite_measure_space_prod:
       
    31   assumes X: "finite_measure_space MX (distribution X)"
       
    32   assumes Y: "finite_measure_space MY (distribution Y)"
       
    33   shows "finite_measure_space (prod_measure_space MX MY) (joint_distribution X Y)"
       
    34     (is "finite_measure_space ?M ?D")
       
    35 proof (intro finite_measure_spaceI)
       
    36   interpret X: finite_measure_space MX "distribution X" by fact
       
    37   interpret Y: finite_measure_space MY "distribution Y" by fact
       
    38   note finite_measure_space.finite_prod_measure_space[OF X Y, simp]
       
    39   show "finite (space ?M)" using X.finite_space Y.finite_space by auto
       
    40   show "joint_distribution X Y {} = 0" by simp
       
    41   show "sets ?M = Pow (space ?M)" by simp
       
    42   { fix x show "?D (space ?M) \<noteq> \<omega>" by (rule distribution_finite) }
       
    43   { fix A B assume "A \<subseteq> space ?M" "B \<subseteq> space ?M" "A \<inter> B = {}"
       
    44     have *: "(\<lambda>t. (X t, Y t)) -` (A \<union> B) \<inter> space M =
       
    45              (\<lambda>t. (X t, Y t)) -` A \<inter> space M \<union> (\<lambda>t. (X t, Y t)) -` B \<inter> space M"
       
    46       by auto
       
    47     show "?D (A \<union> B) = ?D A + ?D B" unfolding distribution_def *
       
    48       apply (rule measure_additive[symmetric])
       
    49       using `A \<inter> B = {}` by (auto simp: sets_eq_Pow) }
       
    50 qed
     9 
    51 
    10 section "Convex theory"
    52 section "Convex theory"
    11 
    53 
    12 lemma log_setsum:
    54 lemma log_setsum:
    13   assumes "finite s" "s \<noteq> {}"
    55   assumes "finite s" "s \<noteq> {}"
   103       using `finite S` pos by (auto intro!: setsum_mono2)
   145       using `finite S` pos by (auto intro!: setsum_mono2)
   104   qed
   146   qed
   105   finally show ?thesis .
   147   finally show ?thesis .
   106 qed
   148 qed
   107 
   149 
   108 lemma (in finite_prob_space) sum_over_space_distrib:
   150 lemma split_pairs:
   109   "(\<Sum>x\<in>X`space M. distribution X {x}) = 1"
   151   shows
   110   unfolding distribution_def measure_space_1[symmetric] using finite_space
   152     "((A, B) = X) \<longleftrightarrow> (fst X = A \<and> snd X = B)" and
   111   by (subst measure_finitely_additive'')
   153     "(X = (A, B)) \<longleftrightarrow> (fst X = A \<and> snd X = B)" by auto
   112      (auto simp add: disjoint_family_on_def sets_eq_Pow intro!: arg_cong[where f=\<mu>])
       
   113 
       
   114 lemma (in finite_prob_space) sum_over_space_real_distribution:
       
   115   "(\<Sum>x\<in>X`space M. real (distribution X {x})) = 1"
       
   116   unfolding distribution_def prob_space[symmetric] using finite_space
       
   117   by (subst real_finite_measure_finite_Union[symmetric])
       
   118      (auto simp add: disjoint_family_on_def sets_eq_Pow intro!: arg_cong[where f=prob])
       
   119 
   154 
   120 section "Information theory"
   155 section "Information theory"
   121 
       
   122 definition
       
   123   "KL_divergence b M \<mu> \<nu> =
       
   124     measure_space.integral M \<mu> (\<lambda>x. log b (real (sigma_finite_measure.RN_deriv M \<nu> \<mu> x)))"
       
   125 
   156 
   126 locale finite_information_space = finite_prob_space +
   157 locale finite_information_space = finite_prob_space +
   127   fixes b :: real assumes b_gt_1: "1 < b"
   158   fixes b :: real assumes b_gt_1: "1 < b"
   128 
   159 
   129 lemma (in finite_prob_space) distribution_mono:
       
   130   assumes "\<And>t. \<lbrakk> t \<in> space M ; X t \<in> x \<rbrakk> \<Longrightarrow> Y t \<in> y"
       
   131   shows "distribution X x \<le> distribution Y y"
       
   132   unfolding distribution_def
       
   133   using assms by (auto simp: sets_eq_Pow intro!: measure_mono)
       
   134 
       
   135 lemma (in prob_space) distribution_remove_const:
       
   136   shows "joint_distribution X (\<lambda>x. ()) {(x, ())} = distribution X {x}"
       
   137   and "joint_distribution (\<lambda>x. ()) X {((), x)} = distribution X {x}"
       
   138   and "joint_distribution X (\<lambda>x. (Y x, ())) {(x, y, ())} = joint_distribution X Y {(x, y)}"
       
   139   and "joint_distribution X (\<lambda>x. ((), Y x)) {(x, (), y)} = joint_distribution X Y {(x, y)}"
       
   140   and "distribution (\<lambda>x. ()) {()} = 1"
       
   141   unfolding measure_space_1[symmetric]
       
   142   by (auto intro!: arg_cong[where f="\<mu>"] simp: distribution_def)
       
   143 
       
   144 context finite_information_space
   160 context finite_information_space
   145 begin
   161 begin
   146 
       
   147 lemma distribution_mono_gt_0:
       
   148   assumes gt_0: "0 < distribution X x"
       
   149   assumes *: "\<And>t. \<lbrakk> t \<in> space M ; X t \<in> x \<rbrakk> \<Longrightarrow> Y t \<in> y"
       
   150   shows "0 < distribution Y y"
       
   151   by (rule less_le_trans[OF gt_0 distribution_mono]) (rule *)
       
   152 
   162 
   153 lemma
   163 lemma
   154   assumes "0 \<le> A" and pos: "0 < A \<Longrightarrow> 0 < B" "0 < A \<Longrightarrow> 0 < C"
   164   assumes "0 \<le> A" and pos: "0 < A \<Longrightarrow> 0 < B" "0 < A \<Longrightarrow> 0 < C"
   155   shows mult_log_mult: "A * log b (B * C) = A * log b B + A * log b C" (is "?mult")
   165   shows mult_log_mult: "A * log b (B * C) = A * log b B + A * log b C" (is "?mult")
   156   and mult_log_divide: "A * log b (B / C) = A * log b B - A * log b C" (is "?div")
   166   and mult_log_divide: "A * log b (B / C) = A * log b B - A * log b C" (is "?div")
   162       with pos[OF this] show "?mult \<and> ?div" using b_gt_1
   172       with pos[OF this] show "?mult \<and> ?div" using b_gt_1
   163         by (auto simp: log_divide log_mult field_simps)
   173         by (auto simp: log_divide log_mult field_simps)
   164   qed simp
   174   qed simp
   165   thus ?mult and ?div by auto
   175   thus ?mult and ?div by auto
   166 qed
   176 qed
   167 
       
   168 lemma split_pairs:
       
   169   shows
       
   170     "((A, B) = X) \<longleftrightarrow> (fst X = A \<and> snd X = B)" and
       
   171     "(X = (A, B)) \<longleftrightarrow> (fst X = A \<and> snd X = B)" by auto
       
   172 
       
   173 lemma (in finite_information_space) distribution_finite:
       
   174   "distribution X A \<noteq> \<omega>"
       
   175   using measure_finite[of "X -` A \<inter> space M"]
       
   176   unfolding distribution_def sets_eq_Pow by auto
       
   177 
       
   178 lemma (in finite_information_space) real_distribution_gt_0[simp]:
       
   179   "0 < real (distribution Y y) \<longleftrightarrow>  0 < distribution Y y"
       
   180   using assms by (auto intro!: real_pinfreal_pos distribution_finite)
       
   181 
       
   182 lemma real_distribution_mult_pos_pos:
       
   183   assumes "0 < distribution Y y"
       
   184   and "0 < distribution X x"
       
   185   shows "0 < real (distribution Y y * distribution X x)"
       
   186   unfolding real_of_pinfreal_mult[symmetric]
       
   187   using assms by (auto intro!: mult_pos_pos)
       
   188 
       
   189 lemma real_distribution_divide_pos_pos:
       
   190   assumes "0 < distribution Y y"
       
   191   and "0 < distribution X x"
       
   192   shows "0 < real (distribution Y y / distribution X x)"
       
   193   unfolding divide_pinfreal_def real_of_pinfreal_mult[symmetric]
       
   194   using assms distribution_finite[of X x] by (cases "distribution X x") (auto intro!: mult_pos_pos)
       
   195 
       
   196 lemma real_distribution_mult_inverse_pos_pos:
       
   197   assumes "0 < distribution Y y"
       
   198   and "0 < distribution X x"
       
   199   shows "0 < real (distribution Y y * inverse (distribution X x))"
       
   200   unfolding divide_pinfreal_def real_of_pinfreal_mult[symmetric]
       
   201   using assms distribution_finite[of X x] by (cases "distribution X x") (auto intro!: mult_pos_pos)
       
   202 
   177 
   203 ML {*
   178 ML {*
   204 
   179 
   205   (* tactic to solve equations of the form @{term "W * log b (X / (Y * Z)) = W * log b X - W * log b (Y * Z)"}
   180   (* tactic to solve equations of the form @{term "W * log b (X / (Y * Z)) = W * log b X - W * log b (Y * Z)"}
   206      where @{term W} is a joint distribution of @{term X}, @{term Y}, and @{term Z}. *)
   181      where @{term W} is a joint distribution of @{term X}, @{term Y}, and @{term Z}. *)
   250 simproc_setup mult_log ("real (distribution X x) * log b (A * B)" |
   225 simproc_setup mult_log ("real (distribution X x) * log b (A * B)" |
   251                         "real (distribution X x) * log b (A / B)") = {* K mult_log_simproc *}
   226                         "real (distribution X x) * log b (A / B)") = {* K mult_log_simproc *}
   252 
   227 
   253 end
   228 end
   254 
   229 
   255 lemma (in finite_measure_space) absolutely_continuousI:
   230 subsection "Kullback$-$Leibler divergence"
   256   assumes "finite_measure_space M \<nu>"
   231 
   257   assumes v: "\<And>x. \<lbrakk> x \<in> space M ; \<mu> {x} = 0 \<rbrakk> \<Longrightarrow> \<nu> {x} = 0"
   232 text {* The Kullback$-$Leibler divergence is also known as relative entropy or
   258   shows "absolutely_continuous \<nu>"
   233 Kullback$-$Leibler distance. *}
   259 proof (unfold absolutely_continuous_def sets_eq_Pow, safe)
   234 
   260   fix N assume "\<mu> N = 0" "N \<subseteq> space M"
   235 definition
   261 
   236   "KL_divergence b M \<mu> \<nu> =
   262   interpret v: finite_measure_space M \<nu> by fact
   237     measure_space.integral M \<mu> (\<lambda>x. log b (real (sigma_finite_measure.RN_deriv M \<nu> \<mu> x)))"
   263 
       
   264   have "\<nu> N = \<nu> (\<Union>x\<in>N. {x})" by simp
       
   265   also have "\<dots> = (\<Sum>x\<in>N. \<nu> {x})"
       
   266   proof (rule v.measure_finitely_additive''[symmetric])
       
   267     show "finite N" using `N \<subseteq> space M` finite_space by (auto intro: finite_subset)
       
   268     show "disjoint_family_on (\<lambda>i. {i}) N" unfolding disjoint_family_on_def by auto
       
   269     fix x assume "x \<in> N" thus "{x} \<in> sets M" using `N \<subseteq> space M` sets_eq_Pow by auto
       
   270   qed
       
   271   also have "\<dots> = 0"
       
   272   proof (safe intro!: setsum_0')
       
   273     fix x assume "x \<in> N"
       
   274     hence "\<mu> {x} \<le> \<mu> N" using sets_eq_Pow `N \<subseteq> space M` by (auto intro!: measure_mono)
       
   275     hence "\<mu> {x} = 0" using `\<mu> N = 0` by simp
       
   276     thus "\<nu> {x} = 0" using v[of x] `x \<in> N` `N \<subseteq> space M` by auto
       
   277   qed
       
   278   finally show "\<nu> N = 0" .
       
   279 qed
       
   280 
   238 
   281 lemma (in finite_measure_space) KL_divergence_eq_finite:
   239 lemma (in finite_measure_space) KL_divergence_eq_finite:
   282   assumes v: "finite_measure_space M \<nu>"
   240   assumes v: "finite_measure_space M \<nu>"
   283   assumes ac: "\<forall>x\<in>space M. \<mu> {x} = 0 \<longrightarrow> \<nu> {x} = 0"
   241   assumes ac: "\<forall>x\<in>space M. \<mu> {x} = 0 \<longrightarrow> \<nu> {x} = 0"
   284   shows "KL_divergence b M \<nu> \<mu> = (\<Sum>x\<in>space M. real (\<nu> {x}) * log b (real (\<nu> {x}) / real (\<mu> {x})))" (is "_ = ?sum")
   242   shows "KL_divergence b M \<nu> \<mu> = (\<Sum>x\<in>space M. real (\<nu> {x}) * log b (real (\<nu> {x}) / real (\<mu> {x})))" (is "_ = ?sum")
   285 proof (simp add: KL_divergence_def finite_measure_space.integral_finite_singleton[OF v])
   243 proof (simp add: KL_divergence_def finite_measure_space.integral_finite_singleton[OF v])
   286   interpret v: finite_measure_space M \<nu> by fact
   244   interpret v: finite_measure_space M \<nu> by fact
   287   have ms: "measure_space M \<nu>" by fact
   245   have ms: "measure_space M \<nu>" by fact
   288 
       
   289   have ac: "absolutely_continuous \<nu>"
   246   have ac: "absolutely_continuous \<nu>"
   290     using ac by (auto intro!: absolutely_continuousI[OF v])
   247     using ac by (auto intro!: absolutely_continuousI[OF v])
   291 
       
   292   show "(\<Sum>x \<in> space M. log b (real (RN_deriv \<nu> x)) * real (\<nu> {x})) = ?sum"
   248   show "(\<Sum>x \<in> space M. log b (real (RN_deriv \<nu> x)) * real (\<nu> {x})) = ?sum"
   293     using RN_deriv_finite_measure[OF ms ac]
   249     using RN_deriv_finite_measure[OF ms ac]
   294     by (auto intro!: setsum_cong simp: field_simps real_of_pinfreal_mult[symmetric])
   250     by (auto intro!: setsum_cong simp: field_simps real_of_pinfreal_mult[symmetric])
   295 qed
   251 qed
   296 
       
   297 lemma (in finite_prob_space) finite_sum_over_space_eq_1:
       
   298   "(\<Sum>x\<in>space M. real (\<mu> {x})) = 1"
       
   299   using sum_over_space_eq_1 finite_measure by (simp add: real_of_pinfreal_setsum sets_eq_Pow)
       
   300 
   252 
   301 lemma (in finite_prob_space) KL_divergence_positive_finite:
   253 lemma (in finite_prob_space) KL_divergence_positive_finite:
   302   assumes v: "finite_prob_space M \<nu>"
   254   assumes v: "finite_prob_space M \<nu>"
   303   assumes ac: "\<And>x. \<lbrakk> x \<in> space M ; \<mu> {x} = 0 \<rbrakk> \<Longrightarrow> \<nu> {x} = 0"
   255   assumes ac: "\<And>x. \<lbrakk> x \<in> space M ; \<mu> {x} = 0 \<rbrakk> \<Longrightarrow> \<nu> {x} = 0"
   304   and "1 < b"
   256   and "1 < b"
   320       show "(\<Sum>x\<in>space M. real (\<nu> {x})) = 1" using v.finite_sum_over_space_eq_1 by simp
   272       show "(\<Sum>x\<in>space M. real (\<nu> {x})) = 1" using v.finite_sum_over_space_eq_1 by simp
   321 
   273 
   322       fix x assume x: "x \<in> space M"
   274       fix x assume x: "x \<in> space M"
   323       { assume "0 < real (\<nu> {x})"
   275       { assume "0 < real (\<nu> {x})"
   324         hence "\<mu> {x} \<noteq> 0" using ac[OF x] by auto
   276         hence "\<mu> {x} \<noteq> 0" using ac[OF x] by auto
   325         thus "0 < prob {x}" using measure_finite[of "{x}"] sets_eq_Pow x
   277         thus "0 < prob {x}" using finite_measure[of "{x}"] sets_eq_Pow x
   326           by (cases "\<mu> {x}") simp_all }
   278           by (cases "\<mu> {x}") simp_all }
   327     qed auto
   279     qed auto
   328   qed
   280   qed
   329   thus "0 \<le> KL_divergence b M \<nu> \<mu>" using finite_sum_over_space_eq_1 by simp
   281   thus "0 \<le> KL_divergence b M \<nu> \<mu>" using finite_sum_over_space_eq_1 by simp
   330 qed
   282 qed
       
   283 
       
   284 subsection {* Mutual Information *}
   331 
   285 
   332 definition (in prob_space)
   286 definition (in prob_space)
   333   "mutual_information b S T X Y =
   287   "mutual_information b S T X Y =
   334     KL_divergence b (prod_measure_space S T)
   288     KL_divergence b (prod_measure_space S T)
   335       (joint_distribution X Y)
   289       (joint_distribution X Y)
   338 abbreviation (in finite_information_space)
   292 abbreviation (in finite_information_space)
   339   finite_mutual_information ("\<I>'(_ ; _')") where
   293   finite_mutual_information ("\<I>'(_ ; _')") where
   340   "\<I>(X ; Y) \<equiv> mutual_information b
   294   "\<I>(X ; Y) \<equiv> mutual_information b
   341     \<lparr> space = X`space M, sets = Pow (X`space M) \<rparr>
   295     \<lparr> space = X`space M, sets = Pow (X`space M) \<rparr>
   342     \<lparr> space = Y`space M, sets = Pow (Y`space M) \<rparr> X Y"
   296     \<lparr> space = Y`space M, sets = Pow (Y`space M) \<rparr> X Y"
   343 
       
   344 lemma prod_measure_times_finite:
       
   345   assumes fms: "finite_measure_space M \<mu>" "finite_measure_space N \<nu>" and a: "a \<in> space M \<times> space N"
       
   346   shows "prod_measure M \<mu> N \<nu> {a} = \<mu> {fst a} * \<nu> {snd a}"
       
   347 proof (cases a)
       
   348   case (Pair b c)
       
   349   hence a_eq: "{a} = {b} \<times> {c}" by simp
       
   350 
       
   351   interpret M: finite_measure_space M \<mu> by fact
       
   352   interpret N: finite_measure_space N \<nu> by fact
       
   353 
       
   354   from finite_measure_space.finite_prod_measure_times[OF fms, of "{b}" "{c}"] M.sets_eq_Pow N.sets_eq_Pow a Pair
       
   355   show ?thesis unfolding a_eq by simp
       
   356 qed
       
   357 
       
   358 lemma setsum_cartesian_product':
       
   359   "(\<Sum>x\<in>A \<times> B. f x) = (\<Sum>x\<in>A. setsum (\<lambda>y. f (x, y)) B)"
       
   360   unfolding setsum_cartesian_product by simp
       
   361 
   297 
   362 lemma (in finite_information_space) mutual_information_generic_eq:
   298 lemma (in finite_information_space) mutual_information_generic_eq:
   363   assumes MX: "finite_measure_space MX (distribution X)"
   299   assumes MX: "finite_measure_space MX (distribution X)"
   364   assumes MY: "finite_measure_space MY (distribution Y)"
   300   assumes MY: "finite_measure_space MY (distribution Y)"
   365   shows "mutual_information b MX MY X Y = (\<Sum> (x,y) \<in> space MX \<times> space MY.
   301   shows "mutual_information b MX MY X Y = (\<Sum> (x,y) \<in> space MX \<times> space MY.
   476   "\<I>(X;Y) = (\<Sum> (x,y) \<in> X ` space M \<times> Y ` space M.
   412   "\<I>(X;Y) = (\<Sum> (x,y) \<in> X ` space M \<times> Y ` space M.
   477     real (distribution (\<lambda>x. (X x, Y x)) {(x,y)}) * log b (real (distribution (\<lambda>x. (X x, Y x)) {(x,y)}) /
   413     real (distribution (\<lambda>x. (X x, Y x)) {(x,y)}) * log b (real (distribution (\<lambda>x. (X x, Y x)) {(x,y)}) /
   478                                                    (real (distribution X {x}) * real (distribution Y {y}))))"
   414                                                    (real (distribution X {x}) * real (distribution Y {y}))))"
   479   by (subst mutual_information_eq_generic) (simp_all add: finite_prob_space_of_images)
   415   by (subst mutual_information_eq_generic) (simp_all add: finite_prob_space_of_images)
   480 
   416 
       
   417 lemma (in finite_information_space) mutual_information_cong:
       
   418   assumes X: "\<And>x. x \<in> space M \<Longrightarrow> X x = X' x"
       
   419   assumes Y: "\<And>x. x \<in> space M \<Longrightarrow> Y x = Y' x"
       
   420   shows "\<I>(X ; Y) = \<I>(X' ; Y')"
       
   421 proof -
       
   422   have "X ` space M = X' ` space M" using X by (auto intro!: image_eqI)
       
   423   moreover have "Y ` space M = Y' ` space M" using Y by (auto intro!: image_eqI)
       
   424   ultimately show ?thesis
       
   425   unfolding mutual_information_eq
       
   426     using
       
   427       assms[THEN distribution_cong]
       
   428       joint_distribution_cong[OF assms]
       
   429     by (auto intro!: setsum_cong)
       
   430 qed
       
   431 
   481 lemma (in finite_information_space) mutual_information_positive: "0 \<le> \<I>(X;Y)"
   432 lemma (in finite_information_space) mutual_information_positive: "0 \<le> \<I>(X;Y)"
   482   by (subst mutual_information_positive_generic) (simp_all add: finite_prob_space_of_images)
   433   by (subst mutual_information_positive_generic) (simp_all add: finite_prob_space_of_images)
       
   434 
       
   435 subsection {* Entropy *}
   483 
   436 
   484 definition (in prob_space)
   437 definition (in prob_space)
   485   "entropy b s X = mutual_information b s s X X"
   438   "entropy b s X = mutual_information b s s X X"
   486 
   439 
   487 abbreviation (in finite_information_space)
   440 abbreviation (in finite_information_space)
   488   finite_entropy ("\<H>'(_')") where
   441   finite_entropy ("\<H>'(_')") where
   489   "\<H>(X) \<equiv> entropy b \<lparr> space = X`space M, sets = Pow (X`space M) \<rparr> X"
   442   "\<H>(X) \<equiv> entropy b \<lparr> space = X`space M, sets = Pow (X`space M) \<rparr> X"
   490 
   443 
   491 lemma (in finite_information_space) joint_distribution_remove[simp]:
   444 lemma (in finite_information_space) entropy_generic_eq:
   492     "joint_distribution X X {(x, x)} = distribution X {x}"
   445   assumes MX: "finite_measure_space MX (distribution X)"
   493   unfolding distribution_def by (auto intro!: arg_cong[where f="\<mu>"])
   446   shows "entropy b MX X = -(\<Sum> x \<in> space MX. real (distribution X {x}) * log b (real (distribution X {x})))"
       
   447 proof -
       
   448   let "?X x" = "real (distribution X {x})"
       
   449   let "?XX x y" = "real (joint_distribution X X {(x, y)})"
       
   450   interpret MX: finite_measure_space MX "distribution X" by fact
       
   451   { fix x y
       
   452     have "(\<lambda>x. (X x, X x)) -` {(x, y)} = (if x = y then X -` {x} else {})" by auto
       
   453     then have "?XX x y * log b (?XX x y / (?X x * ?X y)) =
       
   454         (if x = y then - ?X y * log b (?X y) else 0)"
       
   455       unfolding distribution_def by (auto simp: mult_log_divide) }
       
   456   note remove_XX = this
       
   457   show ?thesis
       
   458     unfolding entropy_def mutual_information_generic_eq[OF MX MX]
       
   459     unfolding setsum_cartesian_product[symmetric] setsum_negf[symmetric] remove_XX
       
   460     by (auto simp: setsum_cases MX.finite_space)
       
   461 qed
   494 
   462 
   495 lemma (in finite_information_space) entropy_eq:
   463 lemma (in finite_information_space) entropy_eq:
   496   "\<H>(X) = -(\<Sum> x \<in> X ` space M. real (distribution X {x}) * log b (real (distribution X {x})))"
   464   "\<H>(X) = -(\<Sum> x \<in> X ` space M. real (distribution X {x}) * log b (real (distribution X {x})))"
   497 proof -
   465   by (simp add: finite_measure_space entropy_generic_eq)
   498   { fix f
       
   499     { fix x y
       
   500       have "(\<lambda>x. (X x, X x)) -` {(x, y)} = (if x = y then X -` {x} else {})" by auto
       
   501         hence "real (distribution (\<lambda>x. (X x, X x))  {(x,y)}) * f x y =
       
   502             (if x = y then real (distribution X {x}) * f x y else 0)"
       
   503         unfolding distribution_def by auto }
       
   504     hence "(\<Sum>(x, y) \<in> X ` space M \<times> X ` space M. real (joint_distribution X X {(x, y)}) * f x y) =
       
   505       (\<Sum>x \<in> X ` space M. real (distribution X {x}) * f x x)"
       
   506       unfolding setsum_cartesian_product' by (simp add: setsum_cases finite_space) }
       
   507   note remove_cartesian_product = this
       
   508 
       
   509   show ?thesis
       
   510     unfolding entropy_def mutual_information_eq setsum_negf[symmetric] remove_cartesian_product
       
   511     by (auto intro!: setsum_cong)
       
   512 qed
       
   513 
   466 
   514 lemma (in finite_information_space) entropy_positive: "0 \<le> \<H>(X)"
   467 lemma (in finite_information_space) entropy_positive: "0 \<le> \<H>(X)"
   515   unfolding entropy_def using mutual_information_positive .
   468   unfolding entropy_def using mutual_information_positive .
       
   469 
       
   470 lemma (in finite_information_space) entropy_certainty_eq_0:
       
   471   assumes "x \<in> X ` space M" and "distribution X {x} = 1"
       
   472   shows "\<H>(X) = 0"
       
   473 proof -
       
   474   interpret X: finite_prob_space "\<lparr> space = X ` space M, sets = Pow (X ` space M) \<rparr>" "distribution X"
       
   475     by (rule finite_prob_space_of_images)
       
   476 
       
   477   have "distribution X (X ` space M - {x}) = distribution X (X ` space M) - distribution X {x}"
       
   478     using X.measure_compl[of "{x}"] assms by auto
       
   479   also have "\<dots> = 0" using X.prob_space assms by auto
       
   480   finally have X0: "distribution X (X ` space M - {x}) = 0" by auto
       
   481 
       
   482   { fix y assume asm: "y \<noteq> x" "y \<in> X ` space M"
       
   483     hence "{y} \<subseteq> X ` space M - {x}" by auto
       
   484     from X.measure_mono[OF this] X0 asm
       
   485     have "distribution X {y} = 0" by auto }
       
   486 
       
   487   hence fi: "\<And> y. y \<in> X ` space M \<Longrightarrow> real (distribution X {y}) = (if x = y then 1 else 0)"
       
   488     using assms by auto
       
   489 
       
   490   have y: "\<And>y. (if x = y then 1 else 0) * log b (if x = y then 1 else 0) = 0" by simp
       
   491 
       
   492   show ?thesis unfolding entropy_eq by (auto simp: y fi)
       
   493 qed
       
   494 
       
   495 lemma (in finite_information_space) entropy_le_card_not_0:
       
   496   "\<H>(X) \<le> log b (real (card (X ` space M \<inter> {x . distribution X {x} \<noteq> 0})))"
       
   497 proof -
       
   498   let "?d x" = "distribution X {x}"
       
   499   let "?p x" = "real (?d x)"
       
   500   have "\<H>(X) = (\<Sum>x\<in>X`space M. ?p x * log b (1 / ?p x))"
       
   501     by (auto intro!: setsum_cong simp: entropy_eq setsum_negf[symmetric])
       
   502   also have "\<dots> \<le> log b (\<Sum>x\<in>X`space M. ?p x * (1 / ?p x))"
       
   503     apply (rule log_setsum')
       
   504     using not_empty b_gt_1 finite_space sum_over_space_real_distribution
       
   505     by auto
       
   506   also have "\<dots> = log b (\<Sum>x\<in>X`space M. if ?d x \<noteq> 0 then 1 else 0)"
       
   507     apply (rule arg_cong[where f="\<lambda>f. log b (\<Sum>x\<in>X`space M. f x)"])
       
   508     using distribution_finite[of X] by (auto simp: expand_fun_eq real_of_pinfreal_eq_0)
       
   509   finally show ?thesis
       
   510     using finite_space by (auto simp: setsum_cases real_eq_of_nat)
       
   511 qed
       
   512 
       
   513 lemma (in finite_information_space) entropy_uniform_max:
       
   514   assumes "\<And>x y. \<lbrakk> x \<in> X ` space M ; y \<in> X ` space M \<rbrakk> \<Longrightarrow> distribution X {x} = distribution X {y}"
       
   515   shows "\<H>(X) = log b (real (card (X ` space M)))"
       
   516 proof -
       
   517   note uniform =
       
   518     finite_prob_space_of_images[of X, THEN finite_prob_space.uniform_prob, simplified]
       
   519 
       
   520   have card_gt0: "0 < card (X ` space M)" unfolding card_gt_0_iff
       
   521     using finite_space not_empty by auto
       
   522 
       
   523   { fix x assume "x \<in> X ` space M"
       
   524     hence "real (distribution X {x}) = 1 / real (card (X ` space M))"
       
   525     proof (rule uniform)
       
   526       fix x y assume "x \<in> X`space M" "y \<in> X`space M"
       
   527       from assms[OF this] show "real (distribution X {x}) = real (distribution X {y})" by simp
       
   528     qed }
       
   529   thus ?thesis
       
   530     using not_empty finite_space b_gt_1 card_gt0
       
   531     by (simp add: entropy_eq real_eq_of_nat[symmetric] log_divide)
       
   532 qed
       
   533 
       
   534 lemma (in finite_information_space) entropy_le_card:
       
   535   "\<H>(X) \<le> log b (real (card (X ` space M)))"
       
   536 proof cases
       
   537   assume "X ` space M \<inter> {x. distribution X {x} \<noteq> 0} = {}"
       
   538   then have "\<And>x. x\<in>X`space M \<Longrightarrow> distribution X {x} = 0" by auto
       
   539   moreover
       
   540   have "0 < card (X`space M)"
       
   541     using finite_space not_empty unfolding card_gt_0_iff by auto
       
   542   then have "log b 1 \<le> log b (real (card (X`space M)))"
       
   543     using b_gt_1 by (intro log_le) auto
       
   544   ultimately show ?thesis unfolding entropy_eq by simp
       
   545 next
       
   546   assume False: "X ` space M \<inter> {x. distribution X {x} \<noteq> 0} \<noteq> {}"
       
   547   have "card (X ` space M \<inter> {x. distribution X {x} \<noteq> 0}) \<le> card (X ` space M)"
       
   548     (is "?A \<le> ?B") using finite_space not_empty by (auto intro!: card_mono)
       
   549   note entropy_le_card_not_0
       
   550   also have "log b (real ?A) \<le> log b (real ?B)"
       
   551     using b_gt_1 False finite_space not_empty `?A \<le> ?B`
       
   552     by (auto intro!: log_le simp: card_gt_0_iff)
       
   553   finally show ?thesis .
       
   554 qed
       
   555 
       
   556 lemma (in finite_information_space) entropy_commute:
       
   557   "\<H>(\<lambda>x. (X x, Y x)) = \<H>(\<lambda>x. (Y x, X x))"
       
   558 proof -
       
   559   have *: "(\<lambda>x. (Y x, X x))`space M = (\<lambda>(a,b). (b,a))`(\<lambda>x. (X x, Y x))`space M"
       
   560     by auto
       
   561   have inj: "\<And>X. inj_on (\<lambda>(a,b). (b,a)) X"
       
   562     by (auto intro!: inj_onI)
       
   563   show ?thesis
       
   564     unfolding entropy_eq unfolding * setsum_reindex[OF inj]
       
   565     by (simp add: joint_distribution_commute[of Y X] split_beta)
       
   566 qed
       
   567 
       
   568 lemma (in finite_information_space) entropy_eq_cartesian_sum:
       
   569   "\<H>(\<lambda>x. (X x, Y x)) = -(\<Sum>x\<in>X`space M. \<Sum>y\<in>Y`space M.
       
   570     real (joint_distribution X Y {(x,y)}) *
       
   571     log b (real (joint_distribution X Y {(x,y)})))"
       
   572 proof -
       
   573   { fix x assume "x\<notin>(\<lambda>x. (X x, Y x))`space M"
       
   574     then have "(\<lambda>x. (X x, Y x)) -` {x} \<inter> space M = {}" by auto
       
   575     then have "joint_distribution X Y {x} = 0"
       
   576       unfolding distribution_def by auto }
       
   577   then show ?thesis using finite_space
       
   578     unfolding entropy_eq neg_equal_iff_equal setsum_cartesian_product
       
   579     by (auto intro!: setsum_mono_zero_cong_left)
       
   580 qed
       
   581 
       
   582 subsection {* Conditional Mutual Information *}
   516 
   583 
   517 definition (in prob_space)
   584 definition (in prob_space)
   518   "conditional_mutual_information b M1 M2 M3 X Y Z \<equiv>
   585   "conditional_mutual_information b M1 M2 M3 X Y Z \<equiv>
   519     mutual_information b M1 (prod_measure_space M2 M3) X (\<lambda>x. (Y x, Z x)) -
   586     mutual_information b M1 (prod_measure_space M2 M3) X (\<lambda>x. (Y x, Z x)) -
   520     mutual_information b M1 M3 X Z"
   587     mutual_information b M1 M3 X Z"
   525     \<lparr> space = X`space M, sets = Pow (X`space M) \<rparr>
   592     \<lparr> space = X`space M, sets = Pow (X`space M) \<rparr>
   526     \<lparr> space = Y`space M, sets = Pow (Y`space M) \<rparr>
   593     \<lparr> space = Y`space M, sets = Pow (Y`space M) \<rparr>
   527     \<lparr> space = Z`space M, sets = Pow (Z`space M) \<rparr>
   594     \<lparr> space = Z`space M, sets = Pow (Z`space M) \<rparr>
   528     X Y Z"
   595     X Y Z"
   529 
   596 
   530 lemma (in finite_information_space) setsum_distribution_gen:
   597 lemma (in finite_information_space) conditional_mutual_information_generic_eq:
   531   assumes "Z -` {c} \<inter> space M = (\<Union>x \<in> X`space M. Y -` {f x}) \<inter> space M"
   598   assumes MX: "finite_measure_space MX (distribution X)"
   532   and "inj_on f (X`space M)"
   599   assumes MY: "finite_measure_space MY (distribution Y)"
   533   shows "(\<Sum>x \<in> X`space M. distribution Y {f x}) = distribution Z {c}"
   600   assumes MZ: "finite_measure_space MZ (distribution Z)"
   534   unfolding distribution_def assms
   601   shows "conditional_mutual_information b MX MY MZ X Y Z =
   535   using finite_space assms
   602     (\<Sum>(x, y, z)\<in>space MX \<times> space MY \<times> space MZ.
   536   by (subst measure_finitely_additive'')
   603       real (joint_distribution X (\<lambda>x. (Y x, Z x)) {(x, y, z)}) *
   537      (auto simp add: disjoint_family_on_def sets_eq_Pow inj_on_def
   604       log b (real (joint_distribution X (\<lambda>x. (Y x, Z x)) {(x, y, z)}) /
   538       intro!: arg_cong[where f=prob])
   605                    (real (distribution X {x}) * real (joint_distribution Y Z {(y, z)})))) -
   539 
   606     (\<Sum>(x, y)\<in>space MX \<times> space MZ.
   540 lemma (in finite_information_space) setsum_distribution:
   607       real (joint_distribution X Z {(x, y)}) *
   541   "(\<Sum>x \<in> X`space M. joint_distribution X Y {(x, y)}) = distribution Y {y}"
   608       log b (real (joint_distribution X Z {(x, y)}) / (real (distribution X {x}) * real (distribution Z {y}))))"
   542   "(\<Sum>y \<in> Y`space M. joint_distribution X Y {(x, y)}) = distribution X {x}"
   609   using assms finite_measure_space_prod[OF MY MZ]
   543   "(\<Sum>x \<in> X`space M. joint_distribution X (\<lambda>x. (Y x, Z x)) {(x, y, z)}) = joint_distribution Y Z {(y, z)}"
   610   unfolding conditional_mutual_information_def
   544   "(\<Sum>y \<in> Y`space M. joint_distribution X (\<lambda>x. (Y x, Z x)) {(x, y, z)}) = joint_distribution X Z {(x, z)}"
   611   by (subst (1 2) mutual_information_generic_eq)
   545   "(\<Sum>z \<in> Z`space M. joint_distribution X (\<lambda>x. (Y x, Z x)) {(x, y, z)}) = joint_distribution X Y {(x, y)}"
   612      (simp_all add: setsum_cartesian_product' finite_measure_space.finite_prod_measure_space)
   546   by (auto intro!: inj_onI setsum_distribution_gen)
   613 
   547 
       
   548 lemma (in finite_information_space) setsum_real_distribution_gen:
       
   549   assumes "Z -` {c} \<inter> space M = (\<Union>x \<in> X`space M. Y -` {f x}) \<inter> space M"
       
   550   and "inj_on f (X`space M)"
       
   551   shows "(\<Sum>x \<in> X`space M. real (distribution Y {f x})) = real (distribution Z {c})"
       
   552   unfolding distribution_def assms
       
   553   using finite_space assms
       
   554   by (subst real_finite_measure_finite_Union[symmetric])
       
   555      (auto simp add: disjoint_family_on_def sets_eq_Pow inj_on_def
       
   556         intro!: arg_cong[where f=prob])
       
   557 
       
   558 lemma (in finite_information_space) setsum_real_distribution:
       
   559   "(\<Sum>x \<in> X`space M. real (joint_distribution X Y {(x, y)})) = real (distribution Y {y})"
       
   560   "(\<Sum>y \<in> Y`space M. real (joint_distribution X Y {(x, y)})) = real (distribution X {x})"
       
   561   "(\<Sum>x \<in> X`space M. real (joint_distribution X (\<lambda>x. (Y x, Z x)) {(x, y, z)})) = real (joint_distribution Y Z {(y, z)})"
       
   562   "(\<Sum>y \<in> Y`space M. real (joint_distribution X (\<lambda>x. (Y x, Z x)) {(x, y, z)})) = real (joint_distribution X Z {(x, z)})"
       
   563   "(\<Sum>z \<in> Z`space M. real (joint_distribution X (\<lambda>x. (Y x, Z x)) {(x, y, z)})) = real (joint_distribution X Y {(x, y)})"
       
   564   by (auto intro!: inj_onI setsum_real_distribution_gen)
       
   565 
       
   566 lemma (in finite_information_space) conditional_mutual_information_eq_sum:
       
   567    "\<I>(X ; Y | Z) =
       
   568      (\<Sum>(x, y, z)\<in>X ` space M \<times> (\<lambda>x. (Y x, Z x)) ` space M.
       
   569              real (distribution (\<lambda>x. (X x, Y x, Z x)) {(x, y, z)}) *
       
   570              log b (real (distribution (\<lambda>x. (X x, Y x, Z x)) {(x, y, z)})/
       
   571         real (distribution (\<lambda>x. (Y x, Z x)) {(y, z)}))) -
       
   572      (\<Sum>(x, z)\<in>X ` space M \<times> Z ` space M.
       
   573         real (distribution (\<lambda>x. (X x, Z x)) {(x,z)}) * log b (real (distribution (\<lambda>x. (X x, Z x)) {(x,z)}) / real (distribution Z {z})))"
       
   574   (is "_ = ?rhs")
       
   575 proof -
       
   576   have setsum_product:
       
   577     "\<And>f x. (\<Sum>v\<in>(\<lambda>x. (Y x, Z x)) ` space M. real (joint_distribution X (\<lambda>x. (Y x, Z x)) {(x,v)}) * f v)
       
   578       = (\<Sum>v\<in>Y ` space M \<times> Z ` space M. real (joint_distribution X (\<lambda>x. (Y x, Z x)) {(x,v)}) * f v)"
       
   579   proof (safe intro!: setsum_mono_zero_cong_left imageI)
       
   580     fix x y z f
       
   581     assume *: "(Y y, Z z) \<notin> (\<lambda>x. (Y x, Z x)) ` space M" and "y \<in> space M" "z \<in> space M"
       
   582     hence "(\<lambda>x. (X x, Y x, Z x)) -` {(x, Y y, Z z)} \<inter> space M = {}"
       
   583     proof safe
       
   584       fix x' assume x': "x' \<in> space M" and eq: "Y x' = Y y" "Z x' = Z z"
       
   585       have "(Y y, Z z) \<in> (\<lambda>x. (Y x, Z x)) ` space M" using eq[symmetric] x' by auto
       
   586       thus "x' \<in> {}" using * by auto
       
   587     qed
       
   588     thus "real (joint_distribution X (\<lambda>x. (Y x, Z x)) {(x, Y y, Z z)}) * f (Y y) (Z z) = 0"
       
   589       unfolding distribution_def by simp
       
   590   qed (simp add: finite_space)
       
   591 
       
   592   thus ?thesis
       
   593     unfolding conditional_mutual_information_def Let_def mutual_information_eq
       
   594     by (subst mutual_information_eq_generic)
       
   595        (auto simp: prod_measure_space_def sigma_prod_sets_finite finite_space sigma_def
       
   596         finite_prob_space_of_images finite_product_prob_space_of_images
       
   597         setsum_cartesian_product' setsum_product setsum_subtractf setsum_addf
       
   598         setsum_left_distrib[symmetric] setsum_real_distribution
       
   599       cong: setsum_cong)
       
   600 qed
       
   601 
   614 
   602 lemma (in finite_information_space) conditional_mutual_information_eq:
   615 lemma (in finite_information_space) conditional_mutual_information_eq:
   603   "\<I>(X ; Y | Z) = (\<Sum>(x, y, z) \<in> X ` space M \<times> Y ` space M \<times> Z ` space M.
   616   "\<I>(X ; Y | Z) = (\<Sum>(x, y, z) \<in> X ` space M \<times> Y ` space M \<times> Z ` space M.
   604              real (distribution (\<lambda>x. (X x, Y x, Z x)) {(x, y, z)}) *
   617              real (distribution (\<lambda>x. (X x, Y x, Z x)) {(x, y, z)}) *
   605              log b (real (distribution (\<lambda>x. (X x, Y x, Z x)) {(x, y, z)}) /
   618              log b (real (distribution (\<lambda>x. (X x, Y x, Z x)) {(x, y, z)}) /
   606     (real (joint_distribution X Z {(x, z)}) * real (joint_distribution Y Z {(y,z)} / distribution Z {z}))))"
   619     (real (joint_distribution X Z {(x, z)}) * real (joint_distribution Y Z {(y,z)} / distribution Z {z}))))"
   607   unfolding conditional_mutual_information_def Let_def mutual_information_eq
   620   by (subst conditional_mutual_information_generic_eq)
   608   by (subst mutual_information_eq_generic)
       
   609      (auto simp add: prod_measure_space_def sigma_prod_sets_finite finite_space
   621      (auto simp add: prod_measure_space_def sigma_prod_sets_finite finite_space
   610       finite_prob_space_of_images finite_product_prob_space_of_images sigma_def
   622       finite_measure_space finite_product_prob_space_of_images sigma_def
   611       setsum_cartesian_product' setsum_product setsum_subtractf setsum_addf
   623       setsum_cartesian_product' setsum_product setsum_subtractf setsum_addf
   612       setsum_left_distrib[symmetric] setsum_real_distribution setsum_commute[where A="Y`space M"]
   624       setsum_left_distrib[symmetric] setsum_real_distribution setsum_commute[where A="Y`space M"]
   613       real_of_pinfreal_mult[symmetric]
   625       real_of_pinfreal_mult[symmetric]
   614     cong: setsum_cong)
   626     cong: setsum_cong)
   615 
   627 
   620 
   632 
   621   show ?thesis
   633   show ?thesis
   622     unfolding conditional_mutual_information_eq mutual_information_eq
   634     unfolding conditional_mutual_information_eq mutual_information_eq
   623     by (simp add: setsum_cartesian_product' distribution_remove_const)
   635     by (simp add: setsum_cartesian_product' distribution_remove_const)
   624 qed
   636 qed
   625 
       
   626 lemma (in finite_prob_space) distribution_finite:
       
   627   "distribution X A \<noteq> \<omega>"
       
   628   by (auto simp: sets_eq_Pow distribution_def intro!: measure_finite)
       
   629 
       
   630 lemma (in finite_prob_space) real_distribution_order:
       
   631   shows "r \<le> real (joint_distribution X Y {(x, y)}) \<Longrightarrow> r \<le> real (distribution X {x})"
       
   632   and "r \<le> real (joint_distribution X Y {(x, y)}) \<Longrightarrow> r \<le> real (distribution Y {y})"
       
   633   and "r < real (joint_distribution X Y {(x, y)}) \<Longrightarrow> r < real (distribution X {x})"
       
   634   and "r < real (joint_distribution X Y {(x, y)}) \<Longrightarrow> r < real (distribution Y {y})"
       
   635   and "distribution X {x} = 0 \<Longrightarrow> real (joint_distribution X Y {(x, y)}) = 0"
       
   636   and "distribution Y {y} = 0 \<Longrightarrow> real (joint_distribution X Y {(x, y)}) = 0"
       
   637   using real_of_pinfreal_mono[OF distribution_finite joint_distribution_restriction_fst, of X Y "{(x, y)}"]
       
   638   using real_of_pinfreal_mono[OF distribution_finite joint_distribution_restriction_snd, of X Y "{(x, y)}"]
       
   639   using real_pinfreal_nonneg[of "joint_distribution X Y {(x, y)}"]
       
   640   by auto
       
   641 
   637 
   642 lemma (in finite_information_space) conditional_mutual_information_positive:
   638 lemma (in finite_information_space) conditional_mutual_information_positive:
   643   "0 \<le> \<I>(X ; Y | Z)"
   639   "0 \<le> \<I>(X ; Y | Z)"
   644 proof -
   640 proof -
   645   let "?dXYZ A" = "real (distribution (\<lambda>x. (X x, Y x, Z x)) A)"
   641   let "?dXYZ A" = "real (distribution (\<lambda>x. (X x, Y x, Z x)) A)"
   680   finally show ?thesis
   676   finally show ?thesis
   681     unfolding conditional_mutual_information_eq sum_over_space_real_distribution
   677     unfolding conditional_mutual_information_eq sum_over_space_real_distribution
   682     by (simp add: real_of_pinfreal_mult[symmetric])
   678     by (simp add: real_of_pinfreal_mult[symmetric])
   683 qed
   679 qed
   684 
   680 
       
   681 subsection {* Conditional Entropy *}
       
   682 
   685 definition (in prob_space)
   683 definition (in prob_space)
   686   "conditional_entropy b S T X Y = conditional_mutual_information b S S T X X Y"
   684   "conditional_entropy b S T X Y = conditional_mutual_information b S S T X X Y"
   687 
   685 
   688 abbreviation (in finite_information_space)
   686 abbreviation (in finite_information_space)
   689   finite_conditional_entropy ("\<H>'(_ | _')") where
   687   finite_conditional_entropy ("\<H>'(_ | _')") where
   692     \<lparr> space = Y`space M, sets = Pow (Y`space M) \<rparr> X Y"
   690     \<lparr> space = Y`space M, sets = Pow (Y`space M) \<rparr> X Y"
   693 
   691 
   694 lemma (in finite_information_space) conditional_entropy_positive:
   692 lemma (in finite_information_space) conditional_entropy_positive:
   695   "0 \<le> \<H>(X | Y)" unfolding conditional_entropy_def using conditional_mutual_information_positive .
   693   "0 \<le> \<H>(X | Y)" unfolding conditional_entropy_def using conditional_mutual_information_positive .
   696 
   694 
       
   695 lemma (in finite_information_space) conditional_entropy_generic_eq:
       
   696   assumes MX: "finite_measure_space MX (distribution X)"
       
   697   assumes MY: "finite_measure_space MZ (distribution Z)"
       
   698   shows "conditional_entropy b MX MZ X Z =
       
   699      - (\<Sum>(x, z)\<in>space MX \<times> space MZ.
       
   700          real (joint_distribution X Z {(x, z)}) *
       
   701          log b (real (joint_distribution X Z {(x, z)}) / real (distribution Z {z})))"
       
   702   unfolding conditional_entropy_def using assms
       
   703   apply (simp add: conditional_mutual_information_generic_eq
       
   704                    setsum_cartesian_product' setsum_commute[of _ "space MZ"]
       
   705                    setsum_negf[symmetric] setsum_subtractf[symmetric])
       
   706 proof (safe intro!: setsum_cong, simp)
       
   707   fix z x assume "z \<in> space MZ" "x \<in> space MX"
       
   708   let "?XXZ x'" = "real (joint_distribution X (\<lambda>x. (X x, Z x)) {(x, x', z)})"
       
   709   let "?XZ x'" = "real (joint_distribution X Z {(x', z)})"
       
   710   let "?X" = "real (distribution X {x})"
       
   711   interpret MX: finite_measure_space MX "distribution X" by fact
       
   712   have *: "\<And>A. A = {} \<Longrightarrow> prob A = 0" by simp
       
   713   have XXZ: "\<And>x'. ?XXZ x' = (if x' = x then ?XZ x else 0)"
       
   714     by (auto simp: distribution_def intro!: arg_cong[where f=prob] *)
       
   715   have "(\<Sum>x'\<in>space MX. ?XXZ x' * log b (?XXZ x') - (?XXZ x' * log b ?X + ?XXZ x' * log b (?XZ x'))) =
       
   716     (\<Sum>x'\<in>{x}. ?XZ x' * log b (?XZ x') - (?XZ x' * log b ?X + ?XZ x' * log b (?XZ x')))"
       
   717     using `x \<in> space MX` MX.finite_space
       
   718     by (safe intro!: setsum_mono_zero_cong_right)
       
   719        (auto split: split_if_asm simp: XXZ)
       
   720   then show "(\<Sum>x'\<in>space MX. ?XXZ x' * log b (?XXZ x') - (?XXZ x' * log b ?X + ?XXZ x' * log b (?XZ x'))) +
       
   721       ?XZ x * log b ?X = 0" by simp
       
   722 qed
       
   723 
   697 lemma (in finite_information_space) conditional_entropy_eq:
   724 lemma (in finite_information_space) conditional_entropy_eq:
   698   "\<H>(X | Z) =
   725   "\<H>(X | Z) =
   699      - (\<Sum>(x, z)\<in>X ` space M \<times> Z ` space M.
   726      - (\<Sum>(x, z)\<in>X ` space M \<times> Z ` space M.
   700          real (joint_distribution X Z {(x, z)}) *
   727          real (joint_distribution X Z {(x, z)}) *
   701          log b (real (joint_distribution X Z {(x, z)}) / real (distribution Z {z})))"
   728          log b (real (joint_distribution X Z {(x, z)}) / real (distribution Z {z})))"
   702 proof -
   729   by (simp add: finite_measure_space conditional_entropy_generic_eq)
   703   have *: "\<And>x y z. (\<lambda>x. (X x, X x, Z x)) -` {(x, y, z)} = (if x = y then (\<lambda>x. (X x, Z x)) -` {(x, z)} else {})" by auto
   730 
   704   show ?thesis
   731 lemma (in finite_information_space) conditional_entropy_eq_ce_with_hypothesis:
   705     unfolding conditional_mutual_information_eq_sum
   732   "\<H>(X | Y) =
   706       conditional_entropy_def distribution_def *
   733     -(\<Sum>y\<in>Y`space M. real (distribution Y {y}) *
   707     by (auto intro!: setsum_0')
   734       (\<Sum>x\<in>X`space M. real (joint_distribution X Y {(x,y)}) / real (distribution Y {(y)}) *
   708 qed
   735               log b (real (joint_distribution X Y {(x,y)}) / real (distribution Y {(y)}))))"
       
   736   unfolding conditional_entropy_eq neg_equal_iff_equal
       
   737   apply (simp add: setsum_commute[of _ "Y`space M"] setsum_cartesian_product' setsum_divide_distrib[symmetric])
       
   738   apply (safe intro!: setsum_cong)
       
   739   using real_distribution_order'[of Y _ X _]
       
   740   by (auto simp add: setsum_subtractf[of _ _ "X`space M"])
       
   741 
       
   742 lemma (in finite_information_space) conditional_entropy_eq_cartesian_sum:
       
   743   "\<H>(X | Y) = -(\<Sum>x\<in>X`space M. \<Sum>y\<in>Y`space M.
       
   744     real (joint_distribution X Y {(x,y)}) *
       
   745     log b (real (joint_distribution X Y {(x,y)}) / real (distribution Y {y})))"
       
   746 proof -
       
   747   { fix x assume "x\<notin>(\<lambda>x. (X x, Y x))`space M"
       
   748     then have "(\<lambda>x. (X x, Y x)) -` {x} \<inter> space M = {}" by auto
       
   749     then have "joint_distribution X Y {x} = 0"
       
   750       unfolding distribution_def by auto }
       
   751   then show ?thesis using finite_space
       
   752     unfolding conditional_entropy_eq neg_equal_iff_equal setsum_cartesian_product
       
   753     by (auto intro!: setsum_mono_zero_cong_left)
       
   754 qed
       
   755 
       
   756 subsection {* Equalities *}
   709 
   757 
   710 lemma (in finite_information_space) mutual_information_eq_entropy_conditional_entropy:
   758 lemma (in finite_information_space) mutual_information_eq_entropy_conditional_entropy:
   711   "\<I>(X ; Z) = \<H>(X) - \<H>(X | Z)"
   759   "\<I>(X ; Z) = \<H>(X) - \<H>(X | Z)"
   712   unfolding mutual_information_eq entropy_eq conditional_entropy_eq
   760   unfolding mutual_information_eq entropy_eq conditional_entropy_eq
   713   using finite_space
   761   using finite_space
   720   have "\<I>(X ; Z) = \<H>(X) - \<H>(X | Z)" using mutual_information_eq_entropy_conditional_entropy .
   768   have "\<I>(X ; Z) = \<H>(X) - \<H>(X | Z)" using mutual_information_eq_entropy_conditional_entropy .
   721   with mutual_information_positive[of X Z] entropy_positive[of X]
   769   with mutual_information_positive[of X Z] entropy_positive[of X]
   722   show ?thesis by auto
   770   show ?thesis by auto
   723 qed
   771 qed
   724 
   772 
   725 (* -------------Entropy of a RV with a certain event is zero---------------- *)
   773 lemma (in finite_information_space) entropy_chain_rule:
   726 
   774   "\<H>(\<lambda>x. (X x, Y x)) = \<H>(X) + \<H>(Y|X)"
   727 lemma (in finite_information_space) finite_entropy_certainty_eq_0:
   775   unfolding entropy_eq[of X] entropy_eq_cartesian_sum conditional_entropy_eq_cartesian_sum
   728   assumes "x \<in> X ` space M" and "distribution X {x} = 1"
   776   unfolding setsum_commute[of _ "X`space M"] setsum_negf[symmetric] setsum_addf[symmetric]
   729   shows "\<H>(X) = 0"
   777   by (rule setsum_cong)
   730 proof -
   778      (simp_all add: setsum_negf setsum_addf setsum_subtractf setsum_real_distribution
   731   interpret X: finite_prob_space "\<lparr> space = X ` space M, sets = Pow (X ` space M) \<rparr>" "distribution X"
   779                     setsum_left_distrib[symmetric] joint_distribution_commute[of X Y])
   732     by (rule finite_prob_space_of_images)
   780 
   733 
   781 section {* Partitioning *}
   734   have "distribution X (X ` space M - {x}) = distribution X (X ` space M) - distribution X {x}"
       
   735     using X.measure_compl[of "{x}"] assms by auto
       
   736   also have "\<dots> = 0" using X.prob_space assms by auto
       
   737   finally have X0: "distribution X (X ` space M - {x}) = 0" by auto
       
   738 
       
   739   { fix y assume asm: "y \<noteq> x" "y \<in> X ` space M"
       
   740     hence "{y} \<subseteq> X ` space M - {x}" by auto
       
   741     from X.measure_mono[OF this] X0 asm
       
   742     have "distribution X {y} = 0" by auto }
       
   743 
       
   744   hence fi: "\<And> y. y \<in> X ` space M \<Longrightarrow> real (distribution X {y}) = (if x = y then 1 else 0)"
       
   745     using assms by auto
       
   746 
       
   747   have y: "\<And>y. (if x = y then 1 else 0) * log b (if x = y then 1 else 0) = 0" by simp
       
   748 
       
   749   show ?thesis unfolding entropy_eq by (auto simp: y fi)
       
   750 qed
       
   751 (* --------------- upper bound on entropy for a rv ------------------------- *)
       
   752 
       
   753 lemma (in finite_prob_space) distribution_1:
       
   754   "distribution X A \<le> 1"
       
   755   unfolding distribution_def measure_space_1[symmetric]
       
   756   by (auto intro!: measure_mono simp: sets_eq_Pow)
       
   757 
       
   758 lemma (in finite_prob_space) real_distribution_1:
       
   759   "real (distribution X A) \<le> 1"
       
   760   unfolding real_pinfreal_1[symmetric]
       
   761   by (rule real_of_pinfreal_mono[OF _ distribution_1]) simp
       
   762 
       
   763 lemma (in finite_information_space) finite_entropy_le_card:
       
   764   "\<H>(X) \<le> log b (real (card (X ` space M \<inter> {x . distribution X {x} \<noteq> 0})))"
       
   765 proof -
       
   766   let "?d x" = "distribution X {x}"
       
   767   let "?p x" = "real (?d x)"
       
   768   have "\<H>(X) = (\<Sum>x\<in>X`space M. ?p x * log b (1 / ?p x))"
       
   769     by (auto intro!: setsum_cong simp: entropy_eq setsum_negf[symmetric])
       
   770   also have "\<dots> \<le> log b (\<Sum>x\<in>X`space M. ?p x * (1 / ?p x))"
       
   771     apply (rule log_setsum')
       
   772     using not_empty b_gt_1 finite_space sum_over_space_real_distribution
       
   773     by auto
       
   774   also have "\<dots> = log b (\<Sum>x\<in>X`space M. if ?d x \<noteq> 0 then 1 else 0)"
       
   775     apply (rule arg_cong[where f="\<lambda>f. log b (\<Sum>x\<in>X`space M. f x)"])
       
   776     using distribution_finite[of X] by (auto simp: expand_fun_eq real_of_pinfreal_eq_0)
       
   777   finally show ?thesis
       
   778     using finite_space by (auto simp: setsum_cases real_eq_of_nat)
       
   779 qed
       
   780 
       
   781 (* --------------- entropy is maximal for a uniform rv --------------------- *)
       
   782 
       
   783 lemma (in finite_prob_space) uniform_prob:
       
   784   assumes "x \<in> space M"
       
   785   assumes "\<And> x y. \<lbrakk>x \<in> space M ; y \<in> space M\<rbrakk> \<Longrightarrow> prob {x} = prob {y}"
       
   786   shows "prob {x} = 1 / real (card (space M))"
       
   787 proof -
       
   788   have prob_x: "\<And> y. y \<in> space M \<Longrightarrow> prob {y} = prob {x}"
       
   789     using assms(2)[OF _ `x \<in> space M`] by blast
       
   790   have "1 = prob (space M)"
       
   791     using prob_space by auto
       
   792   also have "\<dots> = (\<Sum> x \<in> space M. prob {x})"
       
   793     using real_finite_measure_finite_Union[of "space M" "\<lambda> x. {x}", simplified]
       
   794       sets_eq_Pow inj_singleton[unfolded inj_on_def, rule_format]
       
   795       finite_space unfolding disjoint_family_on_def  prob_space[symmetric]
       
   796     by (auto simp add:setsum_restrict_set)
       
   797   also have "\<dots> = (\<Sum> y \<in> space M. prob {x})"
       
   798     using prob_x by auto
       
   799   also have "\<dots> = real_of_nat (card (space M)) * prob {x}" by simp
       
   800   finally have one: "1 = real (card (space M)) * prob {x}"
       
   801     using real_eq_of_nat by auto
       
   802   hence two: "real (card (space M)) \<noteq> 0" by fastsimp 
       
   803   from one have three: "prob {x} \<noteq> 0" by fastsimp
       
   804   thus ?thesis using one two three divide_cancel_right
       
   805     by (auto simp:field_simps)
       
   806 qed
       
   807 
       
   808 lemma (in finite_information_space) finite_entropy_uniform_max:
       
   809   assumes "\<And>x y. \<lbrakk> x \<in> X ` space M ; y \<in> X ` space M \<rbrakk> \<Longrightarrow> distribution X {x} = distribution X {y}"
       
   810   shows "\<H>(X) = log b (real (card (X ` space M)))"
       
   811 proof -
       
   812   note uniform =
       
   813     finite_prob_space_of_images[of X, THEN finite_prob_space.uniform_prob, simplified]
       
   814 
       
   815   have card_gt0: "0 < card (X ` space M)" unfolding card_gt_0_iff
       
   816     using finite_space not_empty by auto
       
   817 
       
   818   { fix x assume "x \<in> X ` space M"
       
   819     hence "real (distribution X {x}) = 1 / real (card (X ` space M))"
       
   820     proof (rule uniform)
       
   821       fix x y assume "x \<in> X`space M" "y \<in> X`space M"
       
   822       from assms[OF this] show "real (distribution X {x}) = real (distribution X {y})" by simp
       
   823     qed }
       
   824   thus ?thesis
       
   825     using not_empty finite_space b_gt_1 card_gt0
       
   826     by (simp add: entropy_eq real_eq_of_nat[symmetric] log_divide)
       
   827 qed
       
   828 
   782 
   829 definition "subvimage A f g \<longleftrightarrow> (\<forall>x \<in> A. f -` {f x} \<inter> A \<subseteq> g -` {g x} \<inter> A)"
   783 definition "subvimage A f g \<longleftrightarrow> (\<forall>x \<in> A. f -` {f x} \<inter> A \<subseteq> g -` {g x} \<inter> A)"
   830 
   784 
   831 lemma subvimageI:
   785 lemma subvimageI:
   832   assumes "\<And>x y. \<lbrakk> x \<in> A ; y \<in> A ; f x = f y \<rbrakk> \<Longrightarrow> g x = g y"
   786   assumes "\<And>x y. \<lbrakk> x \<in> A ; y \<in> A ; f x = f y \<rbrakk> \<Longrightarrow> g x = g y"
   974 
   928 
   975 corollary (in finite_information_space) entropy_data_processing:
   929 corollary (in finite_information_space) entropy_data_processing:
   976   "\<H>(f \<circ> X) \<le> \<H>(X)"
   930   "\<H>(f \<circ> X) \<le> \<H>(X)"
   977   by (subst (2) entropy_partition[of _ "f \<circ> X"]) (auto intro: conditional_entropy_positive)
   931   by (subst (2) entropy_partition[of _ "f \<circ> X"]) (auto intro: conditional_entropy_positive)
   978 
   932 
   979 lemma (in prob_space) distribution_cong:
       
   980   assumes "\<And>x. x \<in> space M \<Longrightarrow> X x = Y x"
       
   981   shows "distribution X = distribution Y"
       
   982   unfolding distribution_def expand_fun_eq
       
   983   using assms by (auto intro!: arg_cong[where f="\<mu>"])
       
   984 
       
   985 lemma (in prob_space) joint_distribution_cong:
       
   986   assumes "\<And>x. x \<in> space M \<Longrightarrow> X x = X' x"
       
   987   assumes "\<And>x. x \<in> space M \<Longrightarrow> Y x = Y' x"
       
   988   shows "joint_distribution X Y = joint_distribution X' Y'"
       
   989   unfolding distribution_def expand_fun_eq
       
   990   using assms by (auto intro!: arg_cong[where f="\<mu>"])
       
   991 
       
   992 lemma image_cong:
       
   993   "\<lbrakk> \<And>x. x \<in> S \<Longrightarrow> X x = X' x \<rbrakk> \<Longrightarrow> X ` S = X' ` S"
       
   994   by (auto intro!: image_eqI)
       
   995 
       
   996 lemma (in finite_information_space) mutual_information_cong:
       
   997   assumes X: "\<And>x. x \<in> space M \<Longrightarrow> X x = X' x"
       
   998   assumes Y: "\<And>x. x \<in> space M \<Longrightarrow> Y x = Y' x"
       
   999   shows "\<I>(X ; Y) = \<I>(X' ; Y')"
       
  1000 proof -
       
  1001   have "X ` space M = X' ` space M" using X by (rule image_cong)
       
  1002   moreover have "Y ` space M = Y' ` space M" using Y by (rule image_cong)
       
  1003   ultimately show ?thesis
       
  1004   unfolding mutual_information_eq
       
  1005     using
       
  1006       assms[THEN distribution_cong]
       
  1007       joint_distribution_cong[OF assms]
       
  1008     by (auto intro!: setsum_cong)
       
  1009 qed
       
  1010 
       
  1011 corollary (in finite_information_space) entropy_of_inj:
   933 corollary (in finite_information_space) entropy_of_inj:
  1012   assumes "inj_on f (X`space M)"
   934   assumes "inj_on f (X`space M)"
  1013   shows "\<H>(f \<circ> X) = \<H>(X)"
   935   shows "\<H>(f \<circ> X) = \<H>(X)"
  1014 proof (rule antisym)
   936 proof (rule antisym)
  1015   show "\<H>(f \<circ> X) \<le> \<H>(X)" using entropy_data_processing .
   937   show "\<H>(f \<circ> X) \<le> \<H>(X)" using entropy_data_processing .