the Rules chapter and theories
authorpaulson
Mon, 23 Oct 2000 16:25:04 +0200
changeset 102958eb12693cead
parent 10294 2ec9c808a8a7
child 10296 0c5907082459
the Rules chapter and theories
doc-src/TutorialI/Rules/Basic.thy
doc-src/TutorialI/Rules/Blast.thy
doc-src/TutorialI/Rules/Force.thy
doc-src/TutorialI/Rules/Primes.thy
doc-src/TutorialI/Rules/rules.tex
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/doc-src/TutorialI/Rules/Basic.thy	Mon Oct 23 16:25:04 2000 +0200
     1.3 @@ -0,0 +1,290 @@
     1.4 +theory Basic = Main:
     1.5 +
     1.6 +lemma conj_rule: "\<lbrakk> P; Q \<rbrakk> \<Longrightarrow> P \<and> (Q \<and> P)"
     1.7 +apply (rule conjI)
     1.8 + apply assumption
     1.9 +apply (rule conjI)
    1.10 + apply assumption
    1.11 +apply assumption
    1.12 +done
    1.13 +    
    1.14 +
    1.15 +lemma disj_swap: "P | Q \<Longrightarrow> Q | P"
    1.16 +apply (erule disjE)
    1.17 + apply (rule disjI2)
    1.18 + apply assumption
    1.19 +apply (rule disjI1)
    1.20 +apply assumption
    1.21 +done
    1.22 +
    1.23 +lemma conj_swap: "P \<and> Q \<Longrightarrow> Q \<and> P"
    1.24 +apply (rule conjI)
    1.25 + apply (drule conjunct2)
    1.26 + apply assumption
    1.27 +apply (drule conjunct1)
    1.28 +apply assumption
    1.29 +done
    1.30 +
    1.31 +lemma imp_uncurry: "P \<longrightarrow> Q \<longrightarrow> R \<Longrightarrow> P \<and> Q \<longrightarrow> R"
    1.32 +apply (rule impI)
    1.33 +apply (erule conjE)
    1.34 +apply (drule mp)
    1.35 + apply assumption
    1.36 +apply (drule mp)
    1.37 +  apply assumption
    1.38 + apply assumption
    1.39 +done
    1.40 +
    1.41 +text {*
    1.42 +substitution
    1.43 +
    1.44 +@{thm[display] ssubst}
    1.45 +\rulename{ssubst}
    1.46 +*};
    1.47 +
    1.48 +lemma "\<lbrakk> x = f x; P(f x) \<rbrakk> \<Longrightarrow> P x"
    1.49 +apply (erule ssubst)
    1.50 +apply assumption
    1.51 +done
    1.52 +
    1.53 +text {*
    1.54 +also provable by simp (re-orients)
    1.55 +*};
    1.56 +
    1.57 +lemma "\<lbrakk> x = f x; P (f x) (f x) x \<rbrakk> \<Longrightarrow> P x x x"
    1.58 +apply (erule ssubst)
    1.59 +back
    1.60 +back
    1.61 +back
    1.62 +back
    1.63 +apply assumption
    1.64 +done
    1.65 +
    1.66 +text {*
    1.67 +proof\ {\isacharparenleft}prove{\isacharparenright}{\isacharcolon}\ step\ \isadigit{1}\isanewline
    1.68 +\isanewline
    1.69 +goal\ {\isacharparenleft}lemma{\isacharparenright}{\isacharcolon}\isanewline
    1.70 +{\isasymlbrakk}x\ {\isacharequal}\ f\ x{\isacharsemicolon}\ P\ {\isacharparenleft}f\ x{\isacharparenright}\ {\isacharparenleft}f\ x{\isacharparenright}\ x{\isasymrbrakk}\ {\isasymLongrightarrow}\ P\ x\ x\ x\isanewline
    1.71 +\ \isadigit{1}{\isachardot}\ P\ {\isacharparenleft}f\ x{\isacharparenright}\ {\isacharparenleft}f\ x{\isacharparenright}\ x\ {\isasymLongrightarrow}\ P\ {\isacharparenleft}f\ x{\isacharparenright}\ {\isacharparenleft}f\ x{\isacharparenright}\ {\isacharparenleft}f\ x{\isacharparenright}
    1.72 +
    1.73 +proof\ {\isacharparenleft}prove{\isacharparenright}{\isacharcolon}\ step\ \isadigit{1}\isanewline
    1.74 +\isanewline
    1.75 +goal\ {\isacharparenleft}lemma{\isacharparenright}{\isacharcolon}\isanewline
    1.76 +{\isasymlbrakk}x\ {\isacharequal}\ f\ x{\isacharsemicolon}\ P\ {\isacharparenleft}f\ x{\isacharparenright}\ {\isacharparenleft}f\ x{\isacharparenright}\ x{\isasymrbrakk}\ {\isasymLongrightarrow}\ P\ x\ x\ x\isanewline
    1.77 +\ \isadigit{1}{\isachardot}\ P\ {\isacharparenleft}f\ x{\isacharparenright}\ {\isacharparenleft}f\ x{\isacharparenright}\ x\ {\isasymLongrightarrow}\ P\ x\ {\isacharparenleft}f\ x{\isacharparenright}\ {\isacharparenleft}f\ x{\isacharparenright}
    1.78 +
    1.79 +proof\ {\isacharparenleft}prove{\isacharparenright}{\isacharcolon}\ step\ \isadigit{1}\isanewline
    1.80 +\isanewline
    1.81 +goal\ {\isacharparenleft}lemma{\isacharparenright}{\isacharcolon}\isanewline
    1.82 +{\isasymlbrakk}x\ {\isacharequal}\ f\ x{\isacharsemicolon}\ P\ {\isacharparenleft}f\ x{\isacharparenright}\ {\isacharparenleft}f\ x{\isacharparenright}\ x{\isasymrbrakk}\ {\isasymLongrightarrow}\ P\ x\ x\ x\isanewline
    1.83 +\ \isadigit{1}{\isachardot}\ P\ {\isacharparenleft}f\ x{\isacharparenright}\ {\isacharparenleft}f\ x{\isacharparenright}\ x\ {\isasymLongrightarrow}\ P\ {\isacharparenleft}f\ x{\isacharparenright}\ x\ {\isacharparenleft}f\ x{\isacharparenright}
    1.84 +
    1.85 +proof\ {\isacharparenleft}prove{\isacharparenright}{\isacharcolon}\ step\ \isadigit{1}\isanewline
    1.86 +\isanewline
    1.87 +goal\ {\isacharparenleft}lemma{\isacharparenright}{\isacharcolon}\isanewline
    1.88 +{\isasymlbrakk}x\ {\isacharequal}\ f\ x{\isacharsemicolon}\ P\ {\isacharparenleft}f\ x{\isacharparenright}\ {\isacharparenleft}f\ x{\isacharparenright}\ x{\isasymrbrakk}\ {\isasymLongrightarrow}\ P\ x\ x\ x\isanewline
    1.89 +\ \isadigit{1}{\isachardot}\ P\ {\isacharparenleft}f\ x{\isacharparenright}\ {\isacharparenleft}f\ x{\isacharparenright}\ x\ {\isasymLongrightarrow}\ P\ x\ x\ {\isacharparenleft}f\ x{\isacharparenright}
    1.90 +
    1.91 +proof\ {\isacharparenleft}prove{\isacharparenright}{\isacharcolon}\ step\ \isadigit{1}\isanewline
    1.92 +\isanewline
    1.93 +goal\ {\isacharparenleft}lemma{\isacharparenright}{\isacharcolon}\isanewline
    1.94 +{\isasymlbrakk}x\ {\isacharequal}\ f\ x{\isacharsemicolon}\ P\ {\isacharparenleft}f\ x{\isacharparenright}\ {\isacharparenleft}f\ x{\isacharparenright}\ x{\isasymrbrakk}\ {\isasymLongrightarrow}\ P\ x\ x\ x\isanewline
    1.95 +\ \isadigit{1}{\isachardot}\ P\ {\isacharparenleft}f\ x{\isacharparenright}\ {\isacharparenleft}f\ x{\isacharparenright}\ x\ {\isasymLongrightarrow}\ P\ {\isacharparenleft}f\ x{\isacharparenright}\ {\isacharparenleft}f\ x{\isacharparenright}\ x
    1.96 +*};
    1.97 +
    1.98 +lemma "\<lbrakk> x = f x; P (f x) (f x) x \<rbrakk> \<Longrightarrow> P x x x"
    1.99 +apply (erule ssubst, assumption)
   1.100 +done
   1.101 +
   1.102 +lemma "\<lbrakk> x = f x; P (f x) (f x) x \<rbrakk> \<Longrightarrow> P x x x"
   1.103 +apply (erule_tac P="\<lambda>u. P u u x" in ssubst);
   1.104 +apply assumption
   1.105 +done
   1.106 +
   1.107 +
   1.108 +text {*
   1.109 +negation
   1.110 +
   1.111 +@{thm[display] notI}
   1.112 +\rulename{notI}
   1.113 +
   1.114 +@{thm[display] notE}
   1.115 +\rulename{notE}
   1.116 +
   1.117 +@{thm[display] classical}
   1.118 +\rulename{classical}
   1.119 +
   1.120 +@{thm[display] contrapos_pp}
   1.121 +\rulename{contrapos_pp}
   1.122 +
   1.123 +@{thm[display] contrapos_np}
   1.124 +\rulename{contrapos_np}
   1.125 +
   1.126 +@{thm[display] contrapos_nn}
   1.127 +\rulename{contrapos_nn}
   1.128 +*};
   1.129 +
   1.130 +
   1.131 +lemma "\<lbrakk>\<not>(P\<longrightarrow>Q); \<not>(R\<longrightarrow>Q)\<rbrakk> \<Longrightarrow> R"
   1.132 +apply (erule_tac Q="R\<longrightarrow>Q" in contrapos_np)
   1.133 +txt{*
   1.134 +proof\ {\isacharparenleft}prove{\isacharparenright}{\isacharcolon}\ step\ {\isadigit{1}}\isanewline
   1.135 +\isanewline
   1.136 +goal\ {\isacharparenleft}lemma{\isacharparenright}{\isacharcolon}\isanewline
   1.137 +{\isasymlbrakk}{\isasymnot}\ {\isacharparenleft}P\ {\isasymlongrightarrow}\ Q{\isacharparenright}{\isacharsemicolon}\ {\isasymnot}\ {\isacharparenleft}R\ {\isasymlongrightarrow}\ Q{\isacharparenright}{\isasymrbrakk}\ {\isasymLongrightarrow}\ R\isanewline
   1.138 +\ {\isadigit{1}}{\isachardot}\ {\isasymlbrakk}{\isasymnot}\ {\isacharparenleft}P\ {\isasymlongrightarrow}\ Q{\isacharparenright}{\isacharsemicolon}\ {\isasymnot}\ R{\isasymrbrakk}\ {\isasymLongrightarrow}\ R\ {\isasymlongrightarrow}\ Q
   1.139 +*}
   1.140 +
   1.141 +apply intro
   1.142 +txt{*
   1.143 +proof\ {\isacharparenleft}prove{\isacharparenright}{\isacharcolon}\ step\ {\isadigit{3}}\isanewline
   1.144 +\isanewline
   1.145 +goal\ {\isacharparenleft}lemma{\isacharparenright}{\isacharcolon}\isanewline
   1.146 +{\isasymlbrakk}{\isasymnot}\ {\isacharparenleft}P\ {\isasymlongrightarrow}\ Q{\isacharparenright}{\isacharsemicolon}\ {\isasymnot}\ {\isacharparenleft}R\ {\isasymlongrightarrow}\ Q{\isacharparenright}{\isasymrbrakk}\ {\isasymLongrightarrow}\ R\isanewline
   1.147 +\ {\isadigit{1}}{\isachardot}\ {\isasymlbrakk}{\isasymnot}\ {\isacharparenleft}P\ {\isasymlongrightarrow}\ Q{\isacharparenright}{\isacharsemicolon}\ {\isasymnot}\ R{\isacharsemicolon}\ R{\isasymrbrakk}\ {\isasymLongrightarrow}\ Q
   1.148 +*}
   1.149 +apply (erule notE, assumption)
   1.150 +done
   1.151 +
   1.152 +
   1.153 +lemma "(P \<or> Q) \<and> R \<Longrightarrow> P \<or> Q \<and> R"
   1.154 +apply intro
   1.155 +txt{*
   1.156 +proof\ {\isacharparenleft}prove{\isacharparenright}{\isacharcolon}\ step\ {\isadigit{1}}\isanewline
   1.157 +\isanewline
   1.158 +goal\ {\isacharparenleft}lemma{\isacharparenright}{\isacharcolon}\isanewline
   1.159 +{\isacharparenleft}P\ {\isasymor}\ Q{\isacharparenright}\ {\isasymand}\ R\ {\isasymLongrightarrow}\ P\ {\isasymor}\ Q\ {\isasymand}\ R\isanewline
   1.160 +\ {\isadigit{1}}{\isachardot}\ {\isasymlbrakk}{\isacharparenleft}P\ {\isasymor}\ Q{\isacharparenright}\ {\isasymand}\ R{\isacharsemicolon}\ {\isasymnot}\ {\isacharparenleft}Q\ {\isasymand}\ R{\isacharparenright}{\isasymrbrakk}\ {\isasymLongrightarrow}\ P
   1.161 +*}
   1.162 +
   1.163 +apply (elim conjE disjE)
   1.164 + apply assumption
   1.165 +
   1.166 +txt{*
   1.167 +proof\ {\isacharparenleft}prove{\isacharparenright}{\isacharcolon}\ step\ {\isadigit{4}}\isanewline
   1.168 +\isanewline
   1.169 +goal\ {\isacharparenleft}lemma{\isacharparenright}{\isacharcolon}\isanewline
   1.170 +{\isacharparenleft}P\ {\isasymor}\ Q{\isacharparenright}\ {\isasymand}\ R\ {\isasymLongrightarrow}\ P\ {\isasymor}\ Q\ {\isasymand}\ R\isanewline
   1.171 +\ {\isadigit{1}}{\isachardot}\ {\isasymlbrakk}{\isasymnot}\ {\isacharparenleft}Q\ {\isasymand}\ R{\isacharparenright}{\isacharsemicolon}\ R{\isacharsemicolon}\ Q{\isasymrbrakk}\ {\isasymLongrightarrow}\ P
   1.172 +*}
   1.173 +
   1.174 +apply (erule contrapos_np, rule conjI)
   1.175 +txt{*
   1.176 +proof\ {\isacharparenleft}prove{\isacharparenright}{\isacharcolon}\ step\ {\isadigit{6}}\isanewline
   1.177 +\isanewline
   1.178 +goal\ {\isacharparenleft}lemma{\isacharparenright}{\isacharcolon}\isanewline
   1.179 +{\isacharparenleft}P\ {\isasymor}\ Q{\isacharparenright}\ {\isasymand}\ R\ {\isasymLongrightarrow}\ P\ {\isasymor}\ Q\ {\isasymand}\ R\isanewline
   1.180 +\ {\isadigit{1}}{\isachardot}\ {\isasymlbrakk}R{\isacharsemicolon}\ Q{\isacharsemicolon}\ {\isasymnot}\ P{\isasymrbrakk}\ {\isasymLongrightarrow}\ Q\isanewline
   1.181 +\ {\isadigit{2}}{\isachardot}\ {\isasymlbrakk}R{\isacharsemicolon}\ Q{\isacharsemicolon}\ {\isasymnot}\ P{\isasymrbrakk}\ {\isasymLongrightarrow}\ R
   1.182 +*}
   1.183 +
   1.184 +  apply assumption
   1.185 + apply assumption
   1.186 +done
   1.187 +
   1.188 +
   1.189 +
   1.190 +text{*Quantifiers*}
   1.191 +
   1.192 +lemma "\<forall>x. P x \<longrightarrow> P x"
   1.193 +apply (rule allI)
   1.194 +apply (rule impI)
   1.195 +apply assumption
   1.196 +done
   1.197 +
   1.198 +lemma "(\<forall>x. P \<longrightarrow> Q x) \<Longrightarrow> P \<longrightarrow> (\<forall>x. Q x)"
   1.199 +apply (rule impI)
   1.200 +apply (rule allI)
   1.201 +apply (drule spec)
   1.202 +apply (drule mp)
   1.203 +  apply assumption
   1.204 + apply assumption
   1.205 +done
   1.206 +
   1.207 +lemma "\<lbrakk>\<forall>x. P x \<longrightarrow> P (f x); P a\<rbrakk> \<Longrightarrow> P(f (f a))"
   1.208 +apply (frule spec)
   1.209 +apply (drule mp, assumption)
   1.210 +apply (drule spec)
   1.211 +apply (drule mp, assumption, assumption)
   1.212 +done
   1.213 +
   1.214 +text
   1.215 +{*
   1.216 +proof\ {\isacharparenleft}prove{\isacharparenright}{\isacharcolon}\ step\ {\isadigit{1}}\isanewline
   1.217 +\isanewline
   1.218 +goal\ {\isacharparenleft}lemma{\isacharparenright}{\isacharcolon}\isanewline
   1.219 +{\isasymlbrakk}{\isasymforall}x{\isachardot}\ P\ x\ {\isasymlongrightarrow}\ P\ {\isacharparenleft}f\ x{\isacharparenright}{\isacharsemicolon}\ P\ a{\isasymrbrakk}\ {\isasymLongrightarrow}\ P\ {\isacharparenleft}f\ {\isacharparenleft}f\ a{\isacharparenright}{\isacharparenright}\isanewline
   1.220 +\ {\isadigit{1}}{\isachardot}\ {\isasymlbrakk}{\isasymforall}x{\isachardot}\ P\ x\ {\isasymlongrightarrow}\ P\ {\isacharparenleft}f\ x{\isacharparenright}{\isacharsemicolon}\ P\ a{\isacharsemicolon}\ P\ {\isacharquery}x\ {\isasymlongrightarrow}\ P\ {\isacharparenleft}f\ {\isacharquery}x{\isacharparenright}{\isasymrbrakk}\ {\isasymLongrightarrow}\ P\ {\isacharparenleft}f\ {\isacharparenleft}f\ a{\isacharparenright}{\isacharparenright}
   1.221 +*}
   1.222 +
   1.223 +text{*
   1.224 +proof\ {\isacharparenleft}prove{\isacharparenright}{\isacharcolon}\ step\ {\isadigit{3}}\isanewline
   1.225 +\isanewline
   1.226 +goal\ {\isacharparenleft}lemma{\isacharparenright}{\isacharcolon}\isanewline
   1.227 +{\isasymlbrakk}{\isasymforall}x{\isachardot}\ P\ x\ {\isasymlongrightarrow}\ P\ {\isacharparenleft}f\ x{\isacharparenright}{\isacharsemicolon}\ P\ a{\isasymrbrakk}\ {\isasymLongrightarrow}\ P\ {\isacharparenleft}f\ {\isacharparenleft}f\ a{\isacharparenright}{\isacharparenright}\isanewline
   1.228 +\ {\isadigit{1}}{\isachardot}\ {\isasymlbrakk}{\isasymforall}x{\isachardot}\ P\ x\ {\isasymlongrightarrow}\ P\ {\isacharparenleft}f\ x{\isacharparenright}{\isacharsemicolon}\ P\ a{\isacharsemicolon}\ P\ {\isacharparenleft}f\ a{\isacharparenright}{\isasymrbrakk}\ {\isasymLongrightarrow}\ P\ {\isacharparenleft}f\ {\isacharparenleft}f\ a{\isacharparenright}{\isacharparenright}
   1.229 +*}
   1.230 +
   1.231 +lemma "\<lbrakk>\<forall>x. P x \<longrightarrow> P (f x); P a\<rbrakk> \<Longrightarrow> P(f (f a))"
   1.232 +by blast
   1.233 +
   1.234 +lemma "(\<exists>x. P x) \<or> (\<exists>x. Q x) \<Longrightarrow> \<exists>x. P x \<or> Q x"
   1.235 +apply elim
   1.236 + apply intro
   1.237 + apply assumption
   1.238 +apply (intro exI disjI2) (*or else we get disjCI*)
   1.239 +apply assumption
   1.240 +done
   1.241 +
   1.242 +lemma "(P\<longrightarrow>Q) \<or> (Q\<longrightarrow>P)"
   1.243 +apply intro
   1.244 +apply elim
   1.245 +apply assumption
   1.246 +done
   1.247 +
   1.248 +lemma "(P\<or>Q)\<and>(P\<or>R) \<Longrightarrow> P \<or> (Q\<and>R)"
   1.249 +apply intro
   1.250 +apply (elim conjE disjE)
   1.251 +apply blast
   1.252 +apply blast
   1.253 +apply blast
   1.254 +apply blast
   1.255 +(*apply elim*)
   1.256 +done
   1.257 +
   1.258 +lemma "(\<exists>x. P \<and> Q x) \<Longrightarrow> P \<and> (\<exists>x. Q x)"
   1.259 +apply (erule exE)
   1.260 +apply (erule conjE)
   1.261 +apply (rule conjI)
   1.262 + apply assumption
   1.263 +apply (rule exI)
   1.264 + apply assumption
   1.265 +done
   1.266 +
   1.267 +lemma "(\<exists>x. P x) \<and> (\<exists>x. Q x) \<Longrightarrow> \<exists>x. P x \<and> Q x"
   1.268 +apply (erule conjE)
   1.269 +apply (erule exE)
   1.270 +apply (erule exE)
   1.271 +apply (rule exI)
   1.272 +apply (rule conjI)
   1.273 + apply assumption
   1.274 +oops
   1.275 +
   1.276 +lemma "\<forall> z. R z z \<Longrightarrow> \<exists>x. \<forall> y. R x y"
   1.277 +apply (rule exI)
   1.278 +apply (rule allI)
   1.279 +apply (drule spec)
   1.280 +oops
   1.281 +
   1.282 +lemma "\<forall>x. \<exists> y. x=y"
   1.283 +apply (rule allI)
   1.284 +apply (rule exI)
   1.285 +apply (rule refl)
   1.286 +done
   1.287 +
   1.288 +lemma "\<exists>x. \<forall> y. x=y"
   1.289 +apply (rule exI)
   1.290 +apply (rule allI)
   1.291 +oops
   1.292 +
   1.293 +end
     2.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     2.2 +++ b/doc-src/TutorialI/Rules/Blast.thy	Mon Oct 23 16:25:04 2000 +0200
     2.3 @@ -0,0 +1,40 @@
     2.4 +theory Blast = Main:
     2.5 +
     2.6 +lemma "((\<exists>x. \<forall>y. p(x)=p(y)) = ((\<exists>x. q(x))=(\<forall>y. p(y))))   =    
     2.7 +       ((\<exists>x. \<forall>y. q(x)=q(y)) = ((\<exists>x. p(x))=(\<forall>y. q(y))))"
     2.8 +apply blast
     2.9 +done
    2.10 +
    2.11 +text{*\noindent Until now, we have proved everything using only induction and
    2.12 +simplification.  Substantial proofs require more elaborate types of
    2.13 +inference.*}
    2.14 +
    2.15 +lemma "(\<forall>x. honest(x) \<and> industrious(x) \<longrightarrow> healthy(x)) \<and>  
    2.16 +       \<not> (\<exists>x. grocer(x) \<and> healthy(x)) \<and> 
    2.17 +       (\<forall>x. industrious(x) \<and> grocer(x) \<longrightarrow> honest(x)) \<and> 
    2.18 +       (\<forall>x. cyclist(x) \<longrightarrow> industrious(x)) \<and> 
    2.19 +       (\<forall>x. \<not>healthy(x) \<and> cyclist(x) \<longrightarrow> \<not>honest(x))  
    2.20 +       \<longrightarrow> (\<forall>x. grocer(x) \<longrightarrow> \<not>cyclist(x))";
    2.21 +apply blast
    2.22 +done
    2.23 +
    2.24 +lemma "(\<Union>i\<in>I. A(i)) \<inter> (\<Union>j\<in>J. B(j)) =
    2.25 +        (\<Union>i\<in>I. \<Union>j\<in>J. A(i) \<inter> B(j))"
    2.26 +apply blast
    2.27 +done
    2.28 +
    2.29 +text {*
    2.30 +@{thm[display] mult_is_0}
    2.31 + \rulename{mult_is_0}}
    2.32 +
    2.33 +@{thm[display] finite_Un}
    2.34 + \rulename{finite_Un}}
    2.35 +*};
    2.36 +
    2.37 +
    2.38 +lemma [iff]: "(xs@ys = []) = (xs=[] & ys=[])"
    2.39 +  apply (induct_tac xs)
    2.40 +  by (simp_all);
    2.41 +
    2.42 +(*ideas for uses of intro, etc.: ex/Primes/is_gcd_unique?*)
    2.43 +end
     3.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     3.2 +++ b/doc-src/TutorialI/Rules/Force.thy	Mon Oct 23 16:25:04 2000 +0200
     3.3 @@ -0,0 +1,41 @@
     3.4 +theory Force = Main:
     3.5 +
     3.6 +
     3.7 +
     3.8 +lemma "(\<forall>x. P x) \<and> (\<exists>x. Q x) \<longrightarrow> (\<forall>x. P x \<and> Q x)"
     3.9 +apply clarify
    3.10 +oops
    3.11 +
    3.12 +text {*
    3.13 +proof\ {\isacharparenleft}prove{\isacharparenright}{\isacharcolon}\ step\ {\isadigit{1}}\isanewline
    3.14 +\isanewline
    3.15 +goal\ {\isacharparenleft}lemma{\isacharparenright}{\isacharcolon}\isanewline
    3.16 +{\isacharparenleft}{\isasymforall}x{\isachardot}\ P\ x{\isacharparenright}\ {\isasymand}\ {\isacharparenleft}{\isasymexists}x{\isachardot}\ Q\ x{\isacharparenright}\ {\isasymlongrightarrow}\ {\isacharparenleft}{\isasymforall}x{\isachardot}\ P\ x\ {\isasymand}\ Q\ x{\isacharparenright}\isanewline
    3.17 +\ {\isadigit{1}}{\isachardot}\ {\isasymAnd}x\ xa{\isachardot}\ {\isasymlbrakk}{\isasymforall}x{\isachardot}\ P\ x{\isacharsemicolon}\ Q\ xa{\isasymrbrakk}\ {\isasymLongrightarrow}\ P\ x\ {\isasymand}\ Q\ x
    3.18 +*};
    3.19 +
    3.20 +text {*
    3.21 +couldn't find a good example of clarsimp
    3.22 +
    3.23 +@{thm[display]"someI"}
    3.24 +\rulename{someI}
    3.25 +*};
    3.26 +
    3.27 +lemma "\<lbrakk>Q a; P a\<rbrakk> \<Longrightarrow> P (SOME x. P x \<and> Q x) \<and> Q (SOME x. P x \<and> Q x)"
    3.28 +apply (rule someI)
    3.29 +oops
    3.30 +
    3.31 +lemma "\<lbrakk>Q a; P a\<rbrakk> \<Longrightarrow> P (SOME x. P x \<and> Q x) \<and> Q (SOME x. P x \<and> Q x)"
    3.32 +apply (fast intro!: someI)
    3.33 +done
    3.34 +
    3.35 +text{*
    3.36 +proof\ {\isacharparenleft}prove{\isacharparenright}{\isacharcolon}\ step\ \isadigit{1}\isanewline
    3.37 +\isanewline
    3.38 +goal\ {\isacharparenleft}lemma{\isacharparenright}{\isacharcolon}\isanewline
    3.39 +{\isasymlbrakk}Q\ a{\isacharsemicolon}\ P\ a{\isasymrbrakk}\ {\isasymLongrightarrow}\ P\ {\isacharparenleft}SOME\ x{\isachardot}\ P\ x\ {\isasymand}\ Q\ x{\isacharparenright}\ {\isasymand}\ Q\ {\isacharparenleft}SOME\ x{\isachardot}\ P\ x\ {\isasymand}\ Q\ x{\isacharparenright}\isanewline
    3.40 +\ \isadigit{1}{\isachardot}\ {\isasymlbrakk}Q\ a{\isacharsemicolon}\ P\ a{\isasymrbrakk}\ {\isasymLongrightarrow}\ P\ {\isacharquery}x\ {\isasymand}\ Q\ {\isacharquery}x
    3.41 +*}
    3.42 +
    3.43 +end
    3.44 +
     4.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     4.2 +++ b/doc-src/TutorialI/Rules/Primes.thy	Mon Oct 23 16:25:04 2000 +0200
     4.3 @@ -0,0 +1,351 @@
     4.4 +(* EXTRACT from HOL/ex/Primes.thy*)
     4.5 +
     4.6 +theory Primes = Main:
     4.7 +consts
     4.8 +  gcd     :: "nat*nat=>nat"               (*Euclid's algorithm *)
     4.9 +
    4.10 +recdef gcd "measure ((\<lambda>(m,n).n) ::nat*nat=>nat)"
    4.11 +    "gcd (m, n) = (if n=0 then m else gcd(n, m mod n))"
    4.12 +
    4.13 +
    4.14 +ML "Pretty.setmargin 64"
    4.15 +ML "IsarOutput.indent := 5"  (*that is, Doc/TutorialI/settings.ML*)
    4.16 +
    4.17 +
    4.18 +text {*
    4.19 +\begin{quote}
    4.20 +@{thm[display]"dvd_def"}
    4.21 +\rulename{dvd_def}
    4.22 +\end{quote}
    4.23 +*};
    4.24 +
    4.25 +
    4.26 +(*** Euclid's Algorithm ***)
    4.27 +
    4.28 +lemma gcd_0 [simp]: "gcd(m,0) = m"
    4.29 +  apply (simp);
    4.30 +  done
    4.31 +
    4.32 +lemma gcd_non_0 [simp]: "0<n \<Longrightarrow> gcd(m,n) = gcd (n, m mod n)"
    4.33 +  apply (simp)
    4.34 +  done;
    4.35 +
    4.36 +declare gcd.simps [simp del];
    4.37 +
    4.38 +(*gcd(m,n) divides m and n.  The conjunctions don't seem provable separately*)
    4.39 +lemma gcd_dvd_both: "(gcd(m,n) dvd m) \<and> (gcd(m,n) dvd n)"
    4.40 +  apply (induct_tac m n rule: gcd.induct)
    4.41 +  apply (case_tac "n=0")
    4.42 +  apply (simp_all)
    4.43 +  apply (blast dest: dvd_mod_imp_dvd)
    4.44 +  done
    4.45 +
    4.46 +
    4.47 +text {*
    4.48 +@{thm[display] dvd_mod_imp_dvd}
    4.49 +\rulename{dvd_mod_imp_dvd}
    4.50 +
    4.51 +@{thm[display] dvd_trans}
    4.52 +\rulename{dvd_trans}
    4.53 +
    4.54 +\begin{isabelle}
    4.55 +proof\ (prove):\ step\ 3\isanewline
    4.56 +\isanewline
    4.57 +goal\ (lemma\ gcd_dvd_both):\isanewline
    4.58 +gcd\ (m,\ n)\ dvd\ m\ \isasymand \ gcd\ (m,\ n)\ dvd\ n\isanewline
    4.59 +\ 1.\ \isasymAnd m\ n.\ \isasymlbrakk 0\ <\ n;\ gcd\ (n,\ m\ mod\ n)\ dvd\ n\ \isasymand \ gcd\ (n,\ m\ mod\ n)\ dvd\ (m\ mod\ n)\isasymrbrakk \isanewline
    4.60 +\ \ \ \ \ \ \ \ \ \ \isasymLongrightarrow \ gcd\ (n,\ m\ mod\ n)\ dvd\ m
    4.61 +\end{isabelle}
    4.62 +*};
    4.63 +
    4.64 +
    4.65 +lemmas gcd_dvd1 [iff] = gcd_dvd_both [THEN conjunct1]
    4.66 +lemmas gcd_dvd2 [iff] = gcd_dvd_both [THEN conjunct2];
    4.67 +
    4.68 +
    4.69 +text {*
    4.70 +\begin{quote}
    4.71 +@{thm[display] gcd_dvd1}
    4.72 +\rulename{gcd_dvd1}
    4.73 +
    4.74 +@{thm[display] gcd_dvd2}
    4.75 +\rulename{gcd_dvd2}
    4.76 +\end{quote}
    4.77 +*};
    4.78 +
    4.79 +(*Maximality: for all m,n,k naturals, 
    4.80 +                if k divides m and k divides n then k divides gcd(m,n)*)
    4.81 +lemma gcd_greatest [rule_format]:
    4.82 +      "(k dvd m) \<longrightarrow> (k dvd n) \<longrightarrow> k dvd gcd(m,n)"
    4.83 +  apply (induct_tac m n rule: gcd.induct)
    4.84 +  apply (case_tac "n=0")
    4.85 +  apply (simp_all add: dvd_mod);
    4.86 +  done;
    4.87 +
    4.88 +theorem gcd_greatest_iff [iff]: 
    4.89 +        "k dvd gcd(m,n) = (k dvd m \<and> k dvd n)"
    4.90 +  apply (blast intro!: gcd_greatest intro: dvd_trans);
    4.91 +  done;
    4.92 +
    4.93 +
    4.94 +constdefs
    4.95 +  is_gcd  :: "[nat,nat,nat]=>bool"        (*gcd as a relation*)
    4.96 +    "is_gcd p m n == p dvd m  \<and>  p dvd n  \<and>
    4.97 +                     (ALL d. d dvd m \<and> d dvd n \<longrightarrow> d dvd p)"
    4.98 +
    4.99 +(*Function gcd yields the Greatest Common Divisor*)
   4.100 +lemma is_gcd: "is_gcd (gcd(m,n)) m n"
   4.101 +  apply (simp add: is_gcd_def gcd_greatest);
   4.102 +  done
   4.103 +
   4.104 +(*uniqueness of GCDs*)
   4.105 +lemma is_gcd_unique: "\<lbrakk> is_gcd m a b; is_gcd n a b \<rbrakk> \<Longrightarrow> m=n"
   4.106 +  apply (simp add: is_gcd_def);
   4.107 +  apply (blast intro: dvd_anti_sym)
   4.108 +  done
   4.109 +
   4.110 +
   4.111 +text {*
   4.112 +@{thm[display] dvd_anti_sym}
   4.113 +\rulename{dvd_anti_sym}
   4.114 +
   4.115 +\begin{isabelle}
   4.116 +proof\ (prove):\ step\ 1\isanewline
   4.117 +\isanewline
   4.118 +goal\ (lemma\ is_gcd_unique):\isanewline
   4.119 +\isasymlbrakk is_gcd\ m\ a\ b;\ is_gcd\ n\ a\ b\isasymrbrakk \ \isasymLongrightarrow \ m\ =\ n\isanewline
   4.120 +\ 1.\ \isasymlbrakk m\ dvd\ a\ \isasymand \ m\ dvd\ b\ \isasymand \ (\isasymforall d.\ d\ dvd\ a\ \isasymand \ d\ dvd\ b\ \isasymlongrightarrow \ d\ dvd\ m);\isanewline
   4.121 +\ \ \ \ \ \ \ n\ dvd\ a\ \isasymand \ n\ dvd\ b\ \isasymand \ (\isasymforall d.\ d\ dvd\ a\ \isasymand \ d\ dvd\ b\ \isasymlongrightarrow \ d\ dvd\ n)\isasymrbrakk \isanewline
   4.122 +\ \ \ \ \isasymLongrightarrow \ m\ =\ n
   4.123 +\end{isabelle}
   4.124 +*};
   4.125 +
   4.126 +lemma gcd_assoc: "gcd(gcd(k,m),n) = gcd(k,gcd(m,n))"
   4.127 +  apply (rule is_gcd_unique)
   4.128 +  apply (rule is_gcd)
   4.129 +  apply (simp add: is_gcd_def);
   4.130 +  apply (blast intro: dvd_trans);
   4.131 +  done 
   4.132 +
   4.133 +text{*
   4.134 +\begin{isabelle}
   4.135 +proof\ (prove):\ step\ 3\isanewline
   4.136 +\isanewline
   4.137 +goal\ (lemma\ gcd_assoc):\isanewline
   4.138 +gcd\ (gcd\ (k,\ m),\ n)\ =\ gcd\ (k,\ gcd\ (m,\ n))\isanewline
   4.139 +\ 1.\ gcd\ (k,\ gcd\ (m,\ n))\ dvd\ k\ \isasymand \isanewline
   4.140 +\ \ \ \ gcd\ (k,\ gcd\ (m,\ n))\ dvd\ m\ \isasymand \ gcd\ (k,\ gcd\ (m,\ n))\ dvd\ n
   4.141 +\end{isabelle}
   4.142 +*}
   4.143 +
   4.144 +
   4.145 +lemma gcd_dvd_gcd_mult: "gcd(m,n) dvd gcd(k*m, n)"
   4.146 +  apply (blast intro: dvd_trans);
   4.147 +  done
   4.148 +
   4.149 +(*This is half of the proof (by dvd_anti_sym) of*)
   4.150 +lemma gcd_mult_cancel: "gcd(k,n) = 1 \<Longrightarrow> gcd(k*m, n) = gcd(m,n)"
   4.151 +  oops
   4.152 +
   4.153 +
   4.154 +
   4.155 +text{*\noindent
   4.156 +Forward proof material: of, OF, THEN, simplify.
   4.157 +*}
   4.158 +
   4.159 +text{*\noindent
   4.160 +SKIP most developments...
   4.161 +*}
   4.162 +
   4.163 +(** Commutativity **)
   4.164 +
   4.165 +lemma is_gcd_commute: "is_gcd k m n = is_gcd k n m"
   4.166 +  apply (auto simp add: is_gcd_def);
   4.167 +  done
   4.168 +
   4.169 +lemma gcd_commute: "gcd(m,n) = gcd(n,m)"
   4.170 +  apply (rule is_gcd_unique)
   4.171 +  apply (rule is_gcd)
   4.172 +  apply (subst is_gcd_commute)
   4.173 +  apply (simp add: is_gcd)
   4.174 +  done
   4.175 +
   4.176 +lemma gcd_1 [simp]: "gcd(m,1) = 1"
   4.177 +  apply (simp)
   4.178 +  done
   4.179 +
   4.180 +lemma gcd_1_left [simp]: "gcd(1,m) = 1"
   4.181 +  apply (simp add: gcd_commute [of 1])
   4.182 +  done
   4.183 +
   4.184 +text{*\noindent
   4.185 +as far as HERE.
   4.186 +*}
   4.187 +
   4.188 +
   4.189 +text {*
   4.190 +@{thm[display] gcd_1}
   4.191 +\rulename{gcd_1}
   4.192 +
   4.193 +@{thm[display] gcd_1_left}
   4.194 +\rulename{gcd_1_left}
   4.195 +*};
   4.196 +
   4.197 +text{*\noindent
   4.198 +SKIP THIS PROOF
   4.199 +*}
   4.200 +
   4.201 +lemma gcd_mult_distrib2: "k * gcd(m,n) = gcd(k*m, k*n)"
   4.202 +  apply (induct_tac m n rule: gcd.induct)
   4.203 +  apply (case_tac "n=0")
   4.204 +  apply (simp)
   4.205 +  apply (case_tac "k=0")
   4.206 +  apply (simp_all add: mod_geq gcd_non_0 mod_mult_distrib2)
   4.207 +  done
   4.208 +
   4.209 +text {*
   4.210 +@{thm[display] gcd_mult_distrib2}
   4.211 +\rulename{gcd_mult_distrib2}
   4.212 +*};
   4.213 +
   4.214 +text{*\noindent
   4.215 +of, simplified
   4.216 +*}
   4.217 +
   4.218 +
   4.219 +lemmas gcd_mult_0 = gcd_mult_distrib2 [of k 1];
   4.220 +lemmas gcd_mult_1 = gcd_mult_0 [simplified];
   4.221 +
   4.222 +text {*
   4.223 +@{thm[display] gcd_mult_distrib2 [of _ 1]}
   4.224 +
   4.225 +@{thm[display] gcd_mult_0}
   4.226 +\rulename{gcd_mult_0}
   4.227 +
   4.228 +@{thm[display] gcd_mult_1}
   4.229 +\rulename{gcd_mult_1}
   4.230 +
   4.231 +@{thm[display] sym}
   4.232 +\rulename{sym}
   4.233 +*};
   4.234 +
   4.235 +lemmas gcd_mult = gcd_mult_1 [THEN sym];
   4.236 +
   4.237 +lemmas gcd_mult = gcd_mult_distrib2 [of k 1, simplified, THEN sym];
   4.238 +      (*better in one step!*)
   4.239 +
   4.240 +text {*
   4.241 +more legible
   4.242 +*};
   4.243 +
   4.244 +lemma gcd_mult [simp]: "gcd(k, k*n) = k"
   4.245 +  apply (rule gcd_mult_distrib2 [of k 1, simplified, THEN sym])
   4.246 +  done
   4.247 +
   4.248 +lemmas gcd_self = gcd_mult [of k 1, simplified];
   4.249 +
   4.250 +
   4.251 +text {*
   4.252 +Rules handy with THEN
   4.253 +
   4.254 +@{thm[display] iffD1}
   4.255 +\rulename{iffD1}
   4.256 +
   4.257 +@{thm[display] iffD2}
   4.258 +\rulename{iffD2}
   4.259 +*};
   4.260 +
   4.261 +
   4.262 +text {*
   4.263 +again: more legible
   4.264 +*};
   4.265 +
   4.266 +lemma gcd_self [simp]: "gcd(k,k) = k"
   4.267 +  apply (rule gcd_mult [of k 1, simplified])
   4.268 +  done
   4.269 +
   4.270 +lemma relprime_dvd_mult: 
   4.271 +      "\<lbrakk> gcd(k,n)=1; k dvd (m*n) \<rbrakk> \<Longrightarrow> k dvd m";
   4.272 +  apply (insert gcd_mult_distrib2 [of m k n])
   4.273 +  apply (simp)
   4.274 +  apply (erule_tac t="m" in ssubst);
   4.275 +  apply (simp)
   4.276 +  done
   4.277 +
   4.278 +
   4.279 +text {*
   4.280 +Another example of "insert"
   4.281 +
   4.282 +@{thm[display] mod_div_equality}
   4.283 +\rulename{mod_div_equality}
   4.284 +*};
   4.285 +
   4.286 +lemma div_mult_self_is_m: 
   4.287 +      "0<n \<Longrightarrow> (m*n) div n = (m::nat)"
   4.288 +  apply (insert mod_div_equality [of "m*n" n])
   4.289 +  apply (simp)
   4.290 +  done
   4.291 +
   4.292 +lemma relprime_dvd_mult_iff: "gcd(k,n)=1 \<Longrightarrow> k dvd (m*n) = k dvd m";
   4.293 +  apply (blast intro: relprime_dvd_mult dvd_trans)
   4.294 +  done
   4.295 +
   4.296 +lemma relprime_20_81: "gcd(#20,#81) = 1";
   4.297 +  apply (simp add: gcd.simps)
   4.298 +  done
   4.299 +
   4.300 +
   4.301 +text {*
   4.302 +Examples of 'OF'
   4.303 +
   4.304 +@{thm[display] relprime_dvd_mult}
   4.305 +\rulename{relprime_dvd_mult}
   4.306 +
   4.307 +@{thm[display] relprime_dvd_mult [OF relprime_20_81]}
   4.308 +
   4.309 +@{thm[display] dvd_refl}
   4.310 +\rulename{dvd_refl}
   4.311 +
   4.312 +@{thm[display] dvd_add}
   4.313 +\rulename{dvd_add}
   4.314 +
   4.315 +@{thm[display] dvd_add [OF dvd_refl dvd_refl]}
   4.316 +
   4.317 +@{thm[display] dvd_add [OF _ dvd_refl]}
   4.318 +*};
   4.319 +
   4.320 +lemma "\<lbrakk>(z::int) < #37; #66 < #2*z; z*z \<noteq> #1225; Q(#34); Q(#36)\<rbrakk> \<Longrightarrow> Q(z)";
   4.321 +apply (subgoal_tac "z = #34 \<or> z = #36")
   4.322 +apply blast
   4.323 +apply (subgoal_tac "z \<noteq> #35")
   4.324 +apply arith
   4.325 +apply force
   4.326 +done
   4.327 +
   4.328 +text
   4.329 +{*
   4.330 +proof\ (prove):\ step\ 1\isanewline
   4.331 +\isanewline
   4.332 +goal\ (lemma):\isanewline
   4.333 +\isasymlbrakk z\ <\ \#37;\ \#66\ <\ \#2\ *\ z;\ z\ *\ z\ \isasymnoteq \ \#1225;\ Q\ \#34;\ Q\ \#36\isasymrbrakk \ \isasymLongrightarrow \ Q\ z\isanewline
   4.334 +\ 1.\ \isasymlbrakk z\ <\ \#37;\ \#66\ <\ \#2\ *\ z;\ z\ *\ z\ \isasymnoteq \ \#1225;\ Q\ \#34;\ Q\ \#36;\isanewline
   4.335 +\ \ \ \ \ \ \ z\ =\ \#34\ \isasymor \ z\ =\ \#36\isasymrbrakk \isanewline
   4.336 +\ \ \ \ \isasymLongrightarrow \ Q\ z\isanewline
   4.337 +\ 2.\ \isasymlbrakk z\ <\ \#37;\ \#66\ <\ \#2\ *\ z;\ z\ *\ z\ \isasymnoteq \ \#1225;\ Q\ \#34;\ Q\ \#36\isasymrbrakk \isanewline
   4.338 +\ \ \ \ \isasymLongrightarrow \ z\ =\ \#34\ \isasymor \ z\ =\ \#36
   4.339 +
   4.340 +
   4.341 +
   4.342 +proof\ (prove):\ step\ 3\isanewline
   4.343 +\isanewline
   4.344 +goal\ (lemma):\isanewline
   4.345 +\isasymlbrakk z\ <\ \#37;\ \#66\ <\ \#2\ *\ z;\ z\ *\ z\ \isasymnoteq \ \#1225;\ Q\ \#34;\ Q\ \#36\isasymrbrakk \ \isasymLongrightarrow \ Q\ z\isanewline
   4.346 +\ 1.\ \isasymlbrakk z\ <\ \#37;\ \#66\ <\ \#2\ *\ z;\ z\ *\ z\ \isasymnoteq \ \#1225;\ Q\ \#34;\ Q\ \#36;\isanewline
   4.347 +\ \ \ \ \ \ \ z\ \isasymnoteq \ \#35\isasymrbrakk \isanewline
   4.348 +\ \ \ \ \isasymLongrightarrow \ z\ =\ \#34\ \isasymor \ z\ =\ \#36\isanewline
   4.349 +\ 2.\ \isasymlbrakk z\ <\ \#37;\ \#66\ <\ \#2\ *\ z;\ z\ *\ z\ \isasymnoteq \ \#1225;\ Q\ \#34;\ Q\ \#36\isasymrbrakk \isanewline
   4.350 +\ \ \ \ \isasymLongrightarrow \ z\ \isasymnoteq \ \#35
   4.351 +*}
   4.352 +
   4.353 +
   4.354 +end
     5.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     5.2 +++ b/doc-src/TutorialI/Rules/rules.tex	Mon Oct 23 16:25:04 2000 +0200
     5.3 @@ -0,0 +1,1871 @@
     5.4 +\chapter{The Rules of the Game}
     5.5 +\label{chap:rules}
     5.6 + 
     5.7 +Until now, we have proved everything using only induction and simplification.
     5.8 +Substantial proofs require more elaborate forms of inference.  This chapter
     5.9 +outlines the concepts and techniques that underlie reasoning in Isabelle. The examples
    5.10 +are mainly drawn from predicate logic.  The first examples in this
    5.11 +chapter will consist of detailed, low-level proof steps.  Later, we shall
    5.12 +see how to automate such reasoning using the methods \isa{blast},
    5.13 +\isa{auto} and others. 
    5.14 +
    5.15 +\section{Natural deduction}
    5.16 +
    5.17 +In Isabelle, proofs are constructed using inference rules. The 
    5.18 +most familiar inference rule is probably \emph{modus ponens}: 
    5.19 +\[ \infer{Q}{P\imp Q & P} \]
    5.20 +This rule says that from $P\imp Q$ and $P$  
    5.21 +we may infer~$Q$.  
    5.22 +
    5.23 +%Early logical formalisms had this  
    5.24 +%rule and at most one or two others, along with many complicated 
    5.25 +%axioms. Any desired theorem could be obtained by applying \emph{modus 
    5.26 +%ponens} or other rules to the axioms, but proofs were 
    5.27 +%hard to find. For example, a standard inference system has 
    5.28 +%these two axioms (amongst others): 
    5.29 +%\begin{gather*}
    5.30 +%  P\imp(Q\imp P) \tag{K}\\
    5.31 +%  (P\imp(Q\imp R))\imp ((P\imp Q)\imp(P\imp R))  \tag{S}
    5.32 +%\end{gather*}
    5.33 +%Try proving the trivial fact $P\imp P$ using these axioms and \emph{modus
    5.34 +%ponens}!
    5.35 +
    5.36 +\textbf{Natural deduction} is an attempt to formalize logic in a way 
    5.37 +that mirrors human reasoning patterns. 
    5.38 +%
    5.39 +%Instead of having a few 
    5.40 +%inference rules and many axioms, it has many inference rules 
    5.41 +%and few axioms. 
    5.42 +%
    5.43 +For each logical symbol (say, $\conj$), there 
    5.44 +are two kinds of rules: \textbf{introduction} and \textbf{elimination} rules. 
    5.45 +The introduction rules allow us to infer this symbol (say, to 
    5.46 +infer conjunctions). The elimination rules allow us to deduce 
    5.47 +consequences from this symbol. Ideally each rule should mention 
    5.48 +one symbol only.  For predicate logic this can be 
    5.49 +done, but when users define their own concepts they typically 
    5.50 +have to refer to other symbols as well.  It is best not be dogmatic.
    5.51 +
    5.52 +Natural deduction generally deserves its name.  It is easy to use.  Each
    5.53 +proof step consists of identifying the outermost symbol of a formula and
    5.54 +applying the corresponding rule.  It creates new subgoals in
    5.55 +an obvious way from parts of the chosen formula.  Expanding the
    5.56 +definitions of constants can blow up the goal enormously.  Deriving natural
    5.57 +deduction rules for such constants lets us reason in terms of their key
    5.58 +properties, which might otherwise be obscured by the technicalities of its
    5.59 +definition.  Natural deduction rules also lend themselves to automation.
    5.60 +Isabelle's
    5.61 +\textbf{classical  reasoner} accepts any suitable  collection of natural deduction
    5.62 +rules and uses them to search for proofs automatically.  Isabelle is designed around
    5.63 +natural deduction and many of its  tools use the terminology of introduction and
    5.64 +elimination rules.
    5.65 +
    5.66 +
    5.67 +\section{Introduction rules}
    5.68 +
    5.69 +An \textbf{introduction} rule tells us when we can infer a formula 
    5.70 +containing a specific logical symbol. For example, the conjunction 
    5.71 +introduction rule says that if we have $P$ and if we have $Q$ then 
    5.72 +we have $P\conj Q$. In a mathematics text, it is typically shown 
    5.73 +like this:
    5.74 +\[  \infer{P\conj Q}{P & Q} \]
    5.75 +The rule introduces the conjunction
    5.76 +symbol~($\conj$) in its conclusion.  Of course, in Isabelle proofs we
    5.77 +mainly  reason backwards.  When we apply this rule, the subgoal already has
    5.78 +the form of a conjunction; the proof step makes this conjunction symbol
    5.79 +disappear. 
    5.80 +
    5.81 +In Isabelle notation, the rule looks like this:
    5.82 +\begin{isabelle}
    5.83 +\isasymlbrakk?P;\ ?Q\isasymrbrakk\ \isasymLongrightarrow\ ?P\ \isasymand\ ?Q\rulename{conjI}
    5.84 +\end{isabelle}
    5.85 +Carefully examine the syntax.  The premises appear to the
    5.86 +left of the arrow and the conclusion to the right.  The premises (if 
    5.87 +more than one) are grouped using the fat brackets.  The question marks
    5.88 +indicate \textbf{schematic variables} (also called \textbf{unknowns}): they may
    5.89 +be replaced by arbitrary formulas.  If we use the rule backwards, Isabelle
    5.90 +tries to unify the current subgoal with the conclusion of the rule, which
    5.91 +has the form \isa{?P\ \isasymand\ ?Q}.  (Unification is discussed below,
    5.92 +\S\ref{sec:unification}.)  If successful,
    5.93 +it yields new subgoals given by the formulas assigned to 
    5.94 +\isa{?P} and \isa{?Q}.
    5.95 +
    5.96 +The following trivial proof illustrates this point. 
    5.97 +\begin{isabelle}
    5.98 +\isacommand{lemma}\ conj_rule:\ "{\isasymlbrakk}P;\
    5.99 +Q\isasymrbrakk\ \isasymLongrightarrow\ P\ \isasymand\
   5.100 +(Q\ \isasymand\ P){"}\isanewline
   5.101 +\isacommand{apply}\ (rule\ conjI)\isanewline
   5.102 +\ \isacommand{apply}\ assumption\isanewline
   5.103 +\isacommand{apply}\ (rule\ conjI)\isanewline
   5.104 +\ \isacommand{apply}\ assumption\isanewline
   5.105 +\isacommand{apply}\ assumption
   5.106 +\end{isabelle}
   5.107 +At the start, Isabelle presents 
   5.108 +us with the assumptions (\isa{P} and~\isa{Q}) and with the goal to be proved,
   5.109 +\isa{P\ \isasymand\
   5.110 +(Q\ \isasymand\ P)}.  We are working backwards, so when we
   5.111 +apply conjunction introduction, the rule removes the outermost occurrence
   5.112 +of the \isa{\isasymand} symbol.  To apply a  rule to a subgoal, we apply
   5.113 +the proof method {\isa{rule}} --- here with {\isa{conjI}}, the  conjunction
   5.114 +introduction rule. 
   5.115 +\begin{isabelle}
   5.116 +%{\isasymlbrakk}P;\ Q\isasymrbrakk\ \isasymLongrightarrow\ P\ \isasymand\ Q\
   5.117 +%\isasymand\ P\isanewline
   5.118 +\ 1.\ {\isasymlbrakk}P;\ Q\isasymrbrakk\ \isasymLongrightarrow\ P\isanewline
   5.119 +\ 2.\ {\isasymlbrakk}P;\ Q\isasymrbrakk\ \isasymLongrightarrow\ Q\ \isasymand\ P
   5.120 +\end{isabelle}
   5.121 +Isabelle leaves two new subgoals: the two halves of the original conjunction. 
   5.122 +The first is simply \isa{P}, which is trivial, since \isa{P} is among 
   5.123 +the assumptions.  We can apply the {\isa{assumption}} 
   5.124 +method, which proves a subgoal by finding a matching assumption.
   5.125 +\begin{isabelle}
   5.126 +\ 1.\ {\isasymlbrakk}P;\ Q\isasymrbrakk\ \isasymLongrightarrow\ 
   5.127 +Q\ \isasymand\ P
   5.128 +\end{isabelle}
   5.129 +We are left with the subgoal of proving  
   5.130 +\isa{Q\ \isasymand\ P} from the assumptions \isa{P} and~\isa{Q}.  We apply
   5.131 +\isa{rule conjI} again. 
   5.132 +\begin{isabelle}
   5.133 +\ 1.\ {\isasymlbrakk}P;\ Q\isasymrbrakk\ \isasymLongrightarrow\ Q\isanewline
   5.134 +\ 2.\ {\isasymlbrakk}P;\ Q\isasymrbrakk\ \isasymLongrightarrow\ P
   5.135 +\end{isabelle}
   5.136 +We are left with two new subgoals, \isa{Q} and~\isa{P}, each of which can be proved
   5.137 +using the {\isa{assumption}} method. 
   5.138 +
   5.139 +
   5.140 +\section{Elimination rules}
   5.141 +
   5.142 +\textbf{Elimination} rules work in the opposite direction from introduction 
   5.143 +rules. In the case of conjunction, there are two such rules. 
   5.144 +From $P\conj Q$ we infer $P$. also, from $P\conj Q$  
   5.145 +we infer $Q$:
   5.146 +\[ \infer{P}{P\conj Q} \qquad \infer{Q}{P\conj Q}  \]
   5.147 +
   5.148 +Now consider disjunction. There are two introduction rules, which resemble inverted forms of the
   5.149 +conjunction elimination rules:
   5.150 +\[ \infer{P\disj Q}{P} \qquad \infer{P\disj Q}{Q}  \]
   5.151 +
   5.152 +What is the disjunction elimination rule?  The situation is rather different from 
   5.153 +conjunction.  From $P\disj Q$ we cannot conclude  that $P$ is true and we
   5.154 +cannot conclude that $Q$ is true; there are no direct
   5.155 +elimination rules of the sort that we have seen for conjunction.  Instead,
   5.156 +there is an elimination  rule that works indirectly.  If we are trying  to prove
   5.157 +something else, say $R$, and we know that $P\disj Q$ holds,  then we have to consider
   5.158 +two cases.  We can assume that $P$ is true  and prove $R$ and then assume that $Q$ is
   5.159 +true and prove $R$ a second  time.  Here we see a fundamental concept used in natural
   5.160 +deduction:  that of the \textbf{assumptions}. We have to prove $R$ twice, under
   5.161 +different assumptions.  The assumptions are local to these subproofs and are visible 
   5.162 +nowhere else. 
   5.163 +
   5.164 +In a logic text, the disjunction elimination rule might be shown 
   5.165 +like this:
   5.166 +\[ \infer{R}{P\disj Q & \infer*{R}{[P]} & \infer*{R}{[Q]}} \]
   5.167 +The assumptions $[P]$ and $[Q]$ are bracketed 
   5.168 +to emphasize that they are local to their subproofs.  In Isabelle 
   5.169 +notation, the already-familiar \isa\isasymLongrightarrow syntax serves the
   5.170 +same  purpose:
   5.171 +\begin{isabelle}
   5.172 +\isasymlbrakk?P\ \isasymor\ ?Q;\ ?P\ \isasymLongrightarrow\ ?R;\ ?Q\ \isasymLongrightarrow\ ?R\isasymrbrakk\ \isasymLongrightarrow\ ?R\rulename{disjE}
   5.173 +\end{isabelle}
   5.174 +When we use this sort of elimination rule backwards, it produces 
   5.175 +a case split.  (We have this before, in proofs by induction.)  The following  proof
   5.176 +illustrates the use of disjunction elimination.  
   5.177 +\begin{isabelle}
   5.178 +\isacommand{lemma}\ disj_swap:\ {"}P\ \isasymor\ Q\ 
   5.179 +\isasymLongrightarrow\ Q\ \isasymor\ P"\isanewline
   5.180 +\isacommand{apply}\ (erule\ disjE)\isanewline
   5.181 +\ \isacommand{apply}\ (rule\ disjI2)\isanewline
   5.182 +\ \isacommand{apply}\ assumption\isanewline
   5.183 +\isacommand{apply}\ (rule\ disjI1)\isanewline
   5.184 +\isacommand{apply}\ assumption
   5.185 +\end{isabelle}
   5.186 +We assume \isa{P\ \isasymor\ Q} and
   5.187 +must prove \isa{Q\ \isasymor\ P}\@.  Our first step uses the disjunction
   5.188 +elimination rule, \isa{disjE}.  The method {\isa{erule}}  applies an
   5.189 +elimination rule to the assumptions, searching for one that matches the
   5.190 +rule's first premise.  Deleting that assumption, it
   5.191 +return the subgoals for the remaining premises.  Most of the
   5.192 +time, this is  the best way to use elimination rules; only rarely is there
   5.193 +any  point in keeping the assumption.
   5.194 +
   5.195 +\begin{isabelle}
   5.196 +%P\ \isasymor\ Q\ \isasymLongrightarrow\ Q\ \isasymor\ P\isanewline
   5.197 +\ 1.\ P\ \isasymLongrightarrow\ Q\ \isasymor\ P\isanewline
   5.198 +\ 2.\ Q\ \isasymLongrightarrow\ Q\ \isasymor\ P
   5.199 +\end{isabelle}
   5.200 +Here it leaves us with two subgoals.  The first assumes \isa{P} and the 
   5.201 +second assumes \isa{Q}.  Tackling the first subgoal, we need to 
   5.202 +show \isa{Q\ \isasymor\ P}\@.  The second introduction rule (\isa{disjI2})
   5.203 +can reduce this  to \isa{P}, which matches the assumption. So, we apply the
   5.204 +{\isa{rule}}  method with \isa{disjI2} \ldots
   5.205 +\begin{isabelle}
   5.206 +\ 1.\ P\ \isasymLongrightarrow\ P\isanewline
   5.207 +\ 2.\ Q\ \isasymLongrightarrow\ Q\ \isasymor\ P
   5.208 +\end{isabelle}
   5.209 +\ldots and finish off with the {\isa{assumption}} 
   5.210 +method.  We are left with the other subgoal, which 
   5.211 +assumes \isa{Q}.  
   5.212 +\begin{isabelle}
   5.213 +\ 1.\ Q\ \isasymLongrightarrow\ Q\ \isasymor\ P
   5.214 +\end{isabelle}
   5.215 +Its proof is similar, using the introduction 
   5.216 +rule \isa{disjI1}. 
   5.217 +
   5.218 +The result of this proof is a new inference rule \isa{disj_swap}, which is neither 
   5.219 +an introduction nor an elimination rule, but which might 
   5.220 +be useful.  We can use it to replace any goal of the form $Q\disj P$
   5.221 +by a one of the form $P\disj Q$.
   5.222 +
   5.223 +
   5.224 +
   5.225 +\section{Destruction rules: some examples}
   5.226 +
   5.227 +Now let us examine the analogous proof for conjunction. 
   5.228 +\begin{isabelle}
   5.229 +\isacommand{lemma}\ conj_swap:\ {"}P\ \isasymand\ Q\ \isasymLongrightarrow\ Q\ \isasymand\ P"\isanewline
   5.230 +\isacommand{apply}\ (rule\ conjI)\isanewline
   5.231 +\ \isacommand{apply}\ (drule\ conjunct2)\isanewline
   5.232 +\ \isacommand{apply}\ assumption\isanewline
   5.233 +\isacommand{apply}\ (drule\ conjunct1)\isanewline
   5.234 +\isacommand{apply}\ assumption
   5.235 +\end{isabelle}
   5.236 +Recall that the conjunction elimination rules --- whose Isabelle names are 
   5.237 +\isa{conjunct1} and \isa{conjunct2} --- simply return the first or second half
   5.238 +of a conjunction.  Rules of this sort (where the conclusion is a subformula of a
   5.239 +premise) are called \textbf{destruction} rules, by analogy with the destructor
   5.240 +functions of functional pr§gramming.%
   5.241 +\footnote{This Isabelle terminology has no counterpart in standard logic texts, 
   5.242 +although the distinction between the two forms of elimination rule is well known. 
   5.243 +Girard \cite[page 74]{girard89}, for example, writes ``The elimination rules are very
   5.244 +bad.  What is catastrophic about them is the parasitic presence of a formula [$R$]
   5.245 +which has no structural link with the formula which is eliminated.''}
   5.246 +
   5.247 +The first proof step applies conjunction introduction, leaving 
   5.248 +two subgoals: 
   5.249 +\begin{isabelle}
   5.250 +%P\ \isasymand\ Q\ \isasymLongrightarrow\ Q\ \isasymand\ P\isanewline
   5.251 +\ 1.\ P\ \isasymand\ Q\ \isasymLongrightarrow\ Q\isanewline
   5.252 +\ 2.\ P\ \isasymand\ Q\ \isasymLongrightarrow\ P
   5.253 +\end{isabelle}
   5.254 +
   5.255 +To invoke the elimination rule, we apply a new method, \isa{drule}. 
   5.256 +Think of the \isa{d} as standing for \textbf{destruction} (or \textbf{direct}, if
   5.257 +you prefer).   Applying the 
   5.258 +second conjunction rule using \isa{drule} replaces the assumption 
   5.259 +\isa{P\ \isasymand\ Q} by \isa{Q}. 
   5.260 +\begin{isabelle}
   5.261 +\ 1.\ Q\ \isasymLongrightarrow\ Q\isanewline
   5.262 +\ 2.\ P\ \isasymand\ Q\ \isasymLongrightarrow\ P
   5.263 +\end{isabelle}
   5.264 +The resulting subgoal can be proved by applying \isa{assumption}.
   5.265 +The other subgoal is similarly proved, using the \isa{conjunct1} rule and the 
   5.266 +\isa{assumption} method.
   5.267 +
   5.268 +Choosing among the methods \isa{rule}, \isa{erule} and \isa{drule} is up to 
   5.269 +you.  Isabelle does not attempt to work out whether a rule 
   5.270 +is an introduction rule or an elimination rule.  The 
   5.271 +method determines how the rule will be interpreted. Many rules 
   5.272 +can be used in more than one way.  For example, \isa{disj_swap} can 
   5.273 +be applied to assumptions as well as to goals; it replaces any
   5.274 +assumption of the form
   5.275 +$P\disj Q$ by a one of the form $Q\disj P$.
   5.276 +
   5.277 +Destruction rules are simpler in form than indirect rules such as \isa{disjE},
   5.278 +but they can be inconvenient.  Each of the conjunction rules discards half 
   5.279 +of the formula, when usually we want to take both parts of the conjunction as new
   5.280 +assumptions.  The easiest way to do so is by using an 
   5.281 +alternative conjunction elimination rule that resembles \isa{disjE}.  It is seldom,
   5.282 +if ever, seen in logic books.  In Isabelle syntax it looks like this: 
   5.283 +\begin{isabelle}
   5.284 +\isasymlbrakk?P\ \isasymand\ ?Q;\ \isasymlbrakk?P;\ ?Q\isasymrbrakk\ \isasymLongrightarrow\ ?R\isasymrbrakk\ \isasymLongrightarrow\ ?R\rulename{conjE}
   5.285 +\end{isabelle}
   5.286 +
   5.287 +\begin{exercise}
   5.288 +Use the rule {\isa{conjE}} to shorten the proof above. 
   5.289 +\end{exercise}
   5.290 +
   5.291 +
   5.292 +\section{Implication}
   5.293 +
   5.294 +At the start of this chapter, we saw the rule \textit{modus ponens}.  It is, in fact,
   5.295 +a destruction rule. The matching introduction rule looks like this 
   5.296 +in Isabelle: 
   5.297 +\begin{isabelle}
   5.298 +(?P\ \isasymLongrightarrow\ ?Q)\ \isasymLongrightarrow\ ?P\
   5.299 +\isasymlongrightarrow\ ?Q\rulename{impI}
   5.300 +\end{isabelle}
   5.301 +And this is \textit{modus ponens}:
   5.302 +\begin{isabelle}
   5.303 +\isasymlbrakk?P\ \isasymlongrightarrow\ ?Q;\ ?P\isasymrbrakk\
   5.304 +\isasymLongrightarrow\ ?Q
   5.305 +\rulename{mp}
   5.306 +\end{isabelle}
   5.307 +
   5.308 +Here is a proof using the rules for implication.  This 
   5.309 +lemma performs a sort of uncurrying, replacing the two antecedents 
   5.310 +of a nested implication by a conjunction. 
   5.311 +\begin{isabelle}
   5.312 +\isacommand{lemma}\ imp_uncurry:\
   5.313 +{"}P\ \isasymlongrightarrow\ (Q\
   5.314 +\isasymlongrightarrow\ R)\ \isasymLongrightarrow\ P\
   5.315 +\isasymand\ Q\ \isasymlongrightarrow\
   5.316 +R"\isanewline
   5.317 +\isacommand{apply}\ (rule\ impI)\isanewline
   5.318 +\isacommand{apply}\ (erule\ conjE)\isanewline
   5.319 +\isacommand{apply}\ (drule\ mp)\isanewline
   5.320 +\ \isacommand{apply}\ assumption\isanewline
   5.321 +\isacommand{apply}\ (drule\ mp)\isanewline
   5.322 +\ \ \isacommand{apply}\ assumption\isanewline
   5.323 +\ \isacommand{apply}\ assumption
   5.324 +\end{isabelle}
   5.325 +First, we state the lemma and apply implication introduction (\isa{rule impI}), 
   5.326 +which moves the conjunction to the assumptions. 
   5.327 +\begin{isabelle}
   5.328 +%P\ \isasymlongrightarrow\ Q\ \isasymlongrightarrow\ R\ \isasymLongrightarrow\ P\
   5.329 +%\isasymand\ Q\ \isasymlongrightarrow\ R\isanewline
   5.330 +\ 1.\ {\isasymlbrakk}P\ \isasymlongrightarrow\ Q\ \isasymlongrightarrow\ R;\ P\ \isasymand\ Q\isasymrbrakk\ \isasymLongrightarrow\ R
   5.331 +\end{isabelle}
   5.332 +Next, we apply conjunction elimination (\isa{erule conjE}), which splits this
   5.333 +conjunction into two  parts. 
   5.334 +\begin{isabelle}
   5.335 +\ 1.\ {\isasymlbrakk}P\ \isasymlongrightarrow\ Q\ \isasymlongrightarrow\ R;\ P;\
   5.336 +Q\isasymrbrakk\ \isasymLongrightarrow\ R
   5.337 +\end{isabelle}
   5.338 +Now, we work on the assumption \isa{P\ \isasymlongrightarrow\ (Q\
   5.339 +\isasymlongrightarrow\ R)}, where the parentheses have been inserted for
   5.340 +clarity.  The nested implication requires two applications of
   5.341 +\textit{modus ponens}: \isa{drule mp}.  The first use  yields the
   5.342 +implication \isa{Q\
   5.343 +\isasymlongrightarrow\ R}, but first we must prove the extra subgoal 
   5.344 +\isa{P}, which we do by assumption. 
   5.345 +\begin{isabelle}
   5.346 +\ 1.\ {\isasymlbrakk}P;\ Q\isasymrbrakk\ \isasymLongrightarrow\ P\isanewline
   5.347 +\ 2.\ {\isasymlbrakk}P;\ Q;\ Q\ \isasymlongrightarrow\ R\isasymrbrakk\ \isasymLongrightarrow\ R
   5.348 +\end{isabelle}
   5.349 +Repeating these steps for \isa{Q\
   5.350 +\isasymlongrightarrow\ R} yields the conclusion we seek, namely~\isa{R}.
   5.351 +\begin{isabelle}
   5.352 +\ 1.\ {\isasymlbrakk}P;\ Q;\ Q\ \isasymlongrightarrow\ R\isasymrbrakk\
   5.353 +\isasymLongrightarrow\ R
   5.354 +\end{isabelle}
   5.355 +
   5.356 +The symbols \isa{\isasymLongrightarrow} and \isa{\isasymlongrightarrow}
   5.357 +both stand for implication, but they differ in many respects.  Isabelle
   5.358 +uses \isa{\isasymLongrightarrow} to express inference rules; the symbol is
   5.359 +built-in and Isabelle's inference mechanisms treat it specially.  On the
   5.360 +other hand, \isa{\isasymlongrightarrow} is just one of the many connectives
   5.361 +available in higher-order logic.  We reason about it using inference rules
   5.362 +such as \isa{impI} and \isa{mp}, just as we reason about the other
   5.363 +connectives.  You will have to use \isa{\isasymlongrightarrow} in any
   5.364 +context that requires a formula of higher-order logic.  Use
   5.365 +\isa{\isasymLongrightarrow} to separate a theorem's preconditions from its
   5.366 +conclusion.  
   5.367 +
   5.368 +When using induction, often the desired theorem results in an induction
   5.369 +hypothesis that is too weak.  In such cases you may have to invent a more
   5.370 +complicated induction formula, typically involving
   5.371 +\isa{\isasymlongrightarrow} and \isa{\isasymforall}.  From this lemma you
   5.372 +derive the desired theorem , typically involving
   5.373 +\isa{\isasymLongrightarrow}.  We shall see an example below,
   5.374 +\S\ref{sec:proving-euclid}.
   5.375 +
   5.376 +
   5.377 +
   5.378 +\remark{negation: notI, notE, ccontr, swap, contrapos?}
   5.379 +
   5.380 +
   5.381 +\section{Unification and substitution}\label{sec:unification}
   5.382 +
   5.383 +As we have seen, Isabelle rules involve variables that begin  with a
   5.384 +question mark. These are called \textbf{schematic} variables  and act as
   5.385 +placeholders for terms. \textbf{Unification} refers to  the process of
   5.386 +making two terms identical, possibly by replacing  their variables by
   5.387 +terms. The simplest case is when the two terms  are already the same. Next
   5.388 +simplest is when the variables in only one of the term
   5.389 + are replaced; this is called \textbf{pattern-matching}.  The
   5.390 +{\isa{rule}} method typically  matches the rule's conclusion
   5.391 +against the current subgoal.  In the most complex case,  variables in both
   5.392 +terms are replaced; the {\isa{rule}} method can do this the goal
   5.393 +itself contains schematic variables.  Other occurrences of the variables in
   5.394 +the rule or proof state are updated at the same time.
   5.395 +
   5.396 +Schematic variables in goals are sometimes called \textbf{unknowns}.  They
   5.397 +are useful because they let us proceed with a proof even  when we do not
   5.398 +know what certain terms should be --- as when the goal is $\exists x.\,P$. 
   5.399 +They can be  filled in later, often automatically. 
   5.400 +
   5.401 + Unification is well known to Prolog programmers. Isabelle uses \textbf{higher-order} 
   5.402 +unification, which is unification in the
   5.403 +typed $\lambda$-calculus.  The general case is
   5.404 +undecidable, but for our purposes, the differences from ordinary
   5.405 +unification are straightforward.  It handles bound  variables
   5.406 +correctly, avoiding capture.  The two terms \isa{{\isasymlambda}x.\ ?P} and
   5.407 +\isa{{\isasymlambda}x.\ t x}  are not unifiable; replacing \isa{?P} by
   5.408 +\isa{t x} is forbidden because the free occurrence of~\isa{x} would become
   5.409 +bound.  The two terms
   5.410 +\isa{{\isasymlambda}x.\ f(x,z)} and \isa{{\isasymlambda}y.\ f(y,z)} are
   5.411 +trivially unifiable because they differ only by a bound variable renaming.
   5.412 +
   5.413 +Higher-order unification sometimes must invent
   5.414 +$\lambda$-terms to replace function  variables,
   5.415 +which can lead to a combinatorial explosion. However,  Isabelle proofs tend
   5.416 +to involve easy cases where there are few possibilities for the
   5.417 +$\lambda$-term being constructed. In the easiest case, the
   5.418 +function variable is applied only to bound variables, 
   5.419 +as when we try to unify \isa{{\isasymlambda}x\ y.\ f(?h x y)} and
   5.420 +\isa{{\isasymlambda}x\ y.\ f(x+y+a)}.  The only solution is to replace
   5.421 +\isa{?h} by \isa{{\isasymlambda}x\ y.\ x+y+a}.  Such cases admit at most
   5.422 +one unifier, like ordinary unification.  A harder case is
   5.423 +unifying \isa{?h a} with~\isa{a+b}; it admits two solutions for \isa{?h},
   5.424 +namely \isa{{\isasymlambda}x.~a+b} and \isa{{\isasymlambda}x.~x+b}. 
   5.425 +Unifying \isa{?h a} with~\isa{a+a+b} admits four solutions; their number is
   5.426 +exponential in the number of occurrences of~\isa{a} in the second term.
   5.427 +
   5.428 +Isabelle also uses function variables to express \textbf{substitution}. 
   5.429 +A typical substitution rule allows us to replace one term by 
   5.430 +another if we know that two terms are equal. 
   5.431 +\[ \infer{P[t/x]}{s=t & P[s/x]} \]
   5.432 +The conclusion uses a notation for substitution: $P[t/x]$ is the result of
   5.433 +replacing $x$ by~$t$ in~$P$.  The rule only substitutes in the positions
   5.434 +designated by~$x$, which gives it additional power. For example, it can
   5.435 +derive symmetry of equality from reflexivity.  Using $x=s$ for~$P$
   5.436 +replaces just the first $s$ in $s=s$ by~$t$.
   5.437 +\[ \infer{t=s}{s=t & \infer{s=s}{}} \]
   5.438 +
   5.439 +The Isabelle version of the substitution rule looks like this: 
   5.440 +\begin{isabelle}
   5.441 +\isasymlbrakk?t\ =\ ?s;\ ?P\ ?s\isasymrbrakk\ \isasymLongrightarrow\ ?P\
   5.442 +?t
   5.443 +\rulename{ssubst}
   5.444 +\end{isabelle}
   5.445 +Crucially, \isa{?P} is a function 
   5.446 +variable: it can be replaced by a $\lambda$-expression 
   5.447 +involving one bound variable whose occurrences identify the places 
   5.448 +in which $s$ will be replaced by~$t$.  The proof above requires
   5.449 +\isa{{\isasymlambda}x.~x=s}.
   5.450 +
   5.451 +The \isa{simp} method replaces equals by equals, but using the substitution
   5.452 +rule gives us more control. Consider this proof: 
   5.453 +\begin{isabelle}
   5.454 +\isacommand{lemma}\
   5.455 +"{\isasymlbrakk}\ x\
   5.456 +=\ f\ x;\ odd(f\
   5.457 +x)\ \isasymrbrakk\ \isasymLongrightarrow\ odd\
   5.458 +x"\isanewline
   5.459 +\isacommand{apply}\ (erule\ ssubst)\isanewline
   5.460 +\isacommand{apply}\ assumption\isanewline
   5.461 +\isacommand{done}\end{isabelle}
   5.462 +%
   5.463 +The simplifier might loop, replacing \isa{x} by \isa{f x} and then by
   5.464 +\isa{f(f x)} and so forth. (Actually, \isa{simp} 
   5.465 +sees the danger and re-orients this equality, but in more complicated cases
   5.466 +it can be fooled.) When we apply substitution,  Isabelle replaces every
   5.467 +\isa{x} in the subgoal by \isa{f x} just once: it cannot loop.  The
   5.468 +resulting subgoal is trivial by assumption. 
   5.469 +
   5.470 +We are using the \isa{erule} method it in a novel way. Hitherto, 
   5.471 +the conclusion of the rule was just a variable such as~\isa{?R}, but it may
   5.472 +be any term. The conclusion is unified with the subgoal just as 
   5.473 +it would be with the \isa{rule} method. At the same time \isa{erule} looks 
   5.474 +for an assumption that matches the rule's first premise, as usual.  With
   5.475 +\isa{ssubst} the effect is to find, use and delete an equality 
   5.476 +assumption.
   5.477 +
   5.478 +
   5.479 +Higher-order unification can be tricky, as this example indicates: 
   5.480 +\begin{isabelle}
   5.481 +\isacommand{lemma}\ "{\isasymlbrakk}\ x\ =\
   5.482 +f\ x;\ triple\ (f\ x)\
   5.483 +(f\ x)\ x\ \isasymrbrakk\
   5.484 +\isasymLongrightarrow\ triple\ x\ x\ x"\isanewline
   5.485 +\isacommand{apply}\ (erule\ ssubst)\isanewline
   5.486 +\isacommand{back}\isanewline
   5.487 +\isacommand{back}\isanewline
   5.488 +\isacommand{back}\isanewline
   5.489 +\isacommand{back}\isanewline
   5.490 +\isacommand{apply}\ assumption\isanewline
   5.491 +\isacommand{done}
   5.492 +\end{isabelle}
   5.493 +%
   5.494 +By default, Isabelle tries to substitute for all the 
   5.495 +occurrences.  Applying \isa{erule\ ssubst} yields this subgoal:
   5.496 +\begin{isabelle}
   5.497 +\ 1.\ triple\ (f\ x)\ (f\ x)\ x\ \isasymLongrightarrow\ triple\ (f\ x)\ (f\ x)\ (f\ x)
   5.498 +\end{isabelle}
   5.499 +The substitution should have been done in the first two occurrences 
   5.500 +of~\isa{x} only. Isabelle has gone too far. The \isa{back} 
   5.501 +method allows us to reject this possibility and get a new one: 
   5.502 +\begin{isabelle}
   5.503 +\ 1.\ triple\ (f\ x)\ (f\ x)\ x\ \isasymLongrightarrow\ triple\ x\ (f\ x)\ (f\ x)
   5.504 +\end{isabelle}
   5.505 +%
   5.506 +Now Isabelle has left the first occurrence of~\isa{x} alone. That is 
   5.507 +promising but it is not the desired combination. So we use \isa{back} 
   5.508 +again:
   5.509 +\begin{isabelle}
   5.510 +\ 1.\ triple\ (f\ x)\ (f\ x)\ x\ \isasymLongrightarrow\ triple\ (f\ x)\ x\ (f\ x)
   5.511 +\end{isabelle}
   5.512 +%
   5.513 +This also is wrong, so we use \isa{back} again: 
   5.514 +\begin{isabelle}
   5.515 +\ 1.\ triple\ (f\ x)\ (f\ x)\ x\ \isasymLongrightarrow\ triple\ x\ x\ (f\ x)
   5.516 +\end{isabelle}
   5.517 +%
   5.518 +And this one is wrong too. Looking carefully at the series 
   5.519 +of alternatives, we see a binary countdown with reversed bits: 111,
   5.520 +011, 101, 001.  Invoke \isa{back} again: 
   5.521 +\begin{isabelle}
   5.522 +\ 1.\ triple\ (f\ x)\ (f\ x)\ x\ \isasymLongrightarrow\ triple\ (f\ x)\ (f\ x)\ x%
   5.523 +\end{isabelle}
   5.524 +At last, we have the right combination!  This goal follows by assumption.
   5.525 +
   5.526 +Never use {\isa{back}} in the final version of a proof. 
   5.527 +It should only be used for exploration. One way to get rid of {\isa{back}} 
   5.528 +to combine two methods in a single \textbf{apply} command. Isabelle 
   5.529 +applies the first method and then the second. If the second method 
   5.530 +fails then Isabelle automatically backtracks. This process continues until 
   5.531 +the first method produces an output that the second method can 
   5.532 +use. We get a one-line proof of our example: 
   5.533 +\begin{isabelle}
   5.534 +\isacommand{lemma}\
   5.535 +"{\isasymlbrakk}\ x\
   5.536 +=\ f\ x;\ triple\ (f\
   5.537 +x)\ (f\ x)\ x\
   5.538 +\isasymrbrakk\
   5.539 +\isasymLongrightarrow\ triple\ x\ x\ x"\isanewline
   5.540 +\isacommand{apply}\ (erule\ ssubst,\ assumption)\isanewline
   5.541 +\isacommand{done}
   5.542 +\end{isabelle}
   5.543 +
   5.544 +The most general way to get rid of the {\isa{back}} command is 
   5.545 +to instantiate variables in the rule.  The method {\isa{rule\_tac}} is
   5.546 +similar to \isa{rule}, but it
   5.547 +makes some of the rule's variables  denote specified terms.  
   5.548 +Also available are {\isa{drule\_tac}}  and \isa{erule\_tac}.  Here we need
   5.549 +\isa{erule\_tac} since above we used
   5.550 +\isa{erule}.
   5.551 +\begin{isabelle}
   5.552 +\isacommand{lemma}\ "{\isasymlbrakk}\ x\ =\ f\ x;\ triple\ (f\ x)\ (f\ x)\ x\ \isasymrbrakk\ \isasymLongrightarrow\ triple\ x\ x\ x"\isanewline
   5.553 +\isacommand{apply}\ (erule_tac\
   5.554 +P={"}{\isasymlambda}u.\ triple\ u\
   5.555 +u\ x"\ \isakeyword{in}\
   5.556 +ssubst)\isanewline
   5.557 +\isacommand{apply}\ assumption\isanewline
   5.558 +\isacommand{done}
   5.559 +\end{isabelle}
   5.560 +%
   5.561 +To specify a desired substitution 
   5.562 +requires instantiating the variable \isa{?P} with a $\lambda$-expression. 
   5.563 +The bound variable occurrences in \isa{{\isasymlambda}u.\ P\ u\
   5.564 +u\ x} indicate that the first two arguments have to be substituted, leaving
   5.565 +the third unchanged.
   5.566 +
   5.567 +An alternative to {\isa{rule\_tac}} is to use \isa{rule} with the
   5.568 +{\isa{of}}  directive, described in \S\ref{sec:forward} below.   An
   5.569 +advantage  of {\isa{rule\_tac}} is that the instantiations may refer to 
   5.570 +variables bound in the current subgoal.
   5.571 +
   5.572 +
   5.573 +\section{Negation}
   5.574 + 
   5.575 +Negation causes surprising complexity in proofs.  Its natural 
   5.576 +deduction rules are straightforward, but additional rules seem 
   5.577 +necessary in order to handle negated assumptions gracefully. 
   5.578 +
   5.579 +Negation introduction deduces $\neg P$ if assuming $P$ leads to a 
   5.580 +contradiction. Negation elimination deduces any formula in the 
   5.581 +presence of $\neg P$ together with~$P$: 
   5.582 +\begin{isabelle}
   5.583 +(?P\ \isasymLongrightarrow\ False)\ \isasymLongrightarrow\ \isasymnot\ ?P%
   5.584 +\rulename{notI}\isanewline
   5.585 +\isasymlbrakk{\isasymnot}\ ?P;\ ?P\isasymrbrakk\ \isasymLongrightarrow\ ?R%
   5.586 +\rulename{notE}
   5.587 +\end{isabelle}
   5.588 +%
   5.589 +Classical logic allows us to assume $\neg P$ 
   5.590 +when attempting to prove~$P$: 
   5.591 +\begin{isabelle}
   5.592 +(\isasymnot\ ?P\ \isasymLongrightarrow\ ?P)\ \isasymLongrightarrow\ ?P%
   5.593 +\rulename{classical}
   5.594 +\end{isabelle}
   5.595 +%
   5.596 +Three further rules are variations on the theme of contrapositive. 
   5.597 +They differ in the placement of the negation symbols: 
   5.598 +\begin{isabelle}
   5.599 +\isasymlbrakk?Q;\ \isasymnot\ ?P\ \isasymLongrightarrow\ \isasymnot\ ?Q\isasymrbrakk\ \isasymLongrightarrow\ ?P%
   5.600 +\rulename{contrapos_pp}\isanewline
   5.601 +\isasymlbrakk{\isasymnot}\ ?Q;\ \isasymnot\ ?P\ \isasymLongrightarrow\ ?Q\isasymrbrakk\ \isasymLongrightarrow\ ?P%
   5.602 +\rulename{contrapos_np}\isanewline
   5.603 +\isasymlbrakk{\isasymnot}\ ?Q;\ ?P\ \isasymLongrightarrow\ ?Q\isasymrbrakk\ \isasymLongrightarrow\ \isasymnot\ ?P%
   5.604 +\rulename{contrapos_nn}
   5.605 +\end{isabelle}
   5.606 +%
   5.607 +These rules are typically applied using the {\isa{erule}} method, where 
   5.608 +their effect is to form a contrapositive from an 
   5.609 +assumption and the goal's conclusion.  
   5.610 +
   5.611 +The most important of these is \isa{contrapos_np}.  It is useful
   5.612 +for applying introduction rules to negated assumptions.  For instance, 
   5.613 +the assumption $\neg(P\imp Q)$ is equivalent to the conclusion $P\imp Q$ and we 
   5.614 +might want to use conjunction introduction on it. 
   5.615 +Before we can do so, we must move that assumption so that it 
   5.616 +becomes the conclusion. The following proof demonstrates this 
   5.617 +technique: 
   5.618 +\begin{isabelle}
   5.619 +\isacommand{lemma}\ "\isasymlbrakk{\isasymnot}(P{\isasymlongrightarrow}Q);\
   5.620 +\isasymnot(R{\isasymlongrightarrow}Q)\isasymrbrakk\ \isasymLongrightarrow\
   5.621 +R"\isanewline
   5.622 +\isacommand{apply}\ (erule_tac\ Q="R{\isasymlongrightarrow}Q"\ \isakeyword{in}\
   5.623 +contrapos_np)\isanewline
   5.624 +\isacommand{apply}\ intro\isanewline
   5.625 +\isacommand{apply}\ (erule\ notE,\ assumption)\isanewline
   5.626 +\isacommand{done}
   5.627 +\end{isabelle}
   5.628 +%
   5.629 +There are two negated assumptions and we need to exchange the conclusion with the
   5.630 +second one.  The method \isa{erule contrapos_np} would select the first assumption,
   5.631 +which we do not want.  So we specify the desired assumption explicitly, using
   5.632 +\isa{erule_tac}.  This is the resulting subgoal: 
   5.633 +\begin{isabelle}
   5.634 +\ 1.\ \isasymlbrakk{\isasymnot}\ (P\ \isasymlongrightarrow\ Q);\ \isasymnot\
   5.635 +R\isasymrbrakk\ \isasymLongrightarrow\ R\ \isasymlongrightarrow\ Q%
   5.636 +\end{isabelle}
   5.637 +The former conclusion, namely \isa{R}, now appears negated among the assumptions,
   5.638 +while the negated formula \isa{R\ \isasymlongrightarrow\ Q} becomes the new
   5.639 +conclusion.
   5.640 +
   5.641 +We can now apply introduction rules.  We use the {\isa{intro}} method, which
   5.642 +repeatedly  applies built-in introduction rules.  Here its effect is equivalent
   5.643 +to \isa{rule impI}.\begin{isabelle}
   5.644 +\ 1.\ \isasymlbrakk{\isasymnot}\ (P\ \isasymlongrightarrow\ Q);\ \isasymnot\ R;\
   5.645 +R\isasymrbrakk\ \isasymLongrightarrow\ Q%
   5.646 +\end{isabelle}
   5.647 +We can see a contradiction in the form of assumptions \isa{\isasymnot\ R}
   5.648 +and~\isa{R}, which suggests using negation elimination.  If applied on its own,
   5.649 +however, it will select the first negated assumption, which is useless.   Instead,
   5.650 +we combine the rule with  the
   5.651 +\isa{assumption} method:
   5.652 +\begin{isabelle}
   5.653 +\ \ \ \ \ (erule\ notE,\ assumption)
   5.654 +\end{isabelle}
   5.655 +Now when Isabelle selects the first assumption, it tries to prove \isa{P\
   5.656 +\isasymlongrightarrow\ Q} and fails; it then backtracks, finds the 
   5.657 +assumption~\isa{\isasymnot\ R} and finally proves \isa{R} by assumption.  That
   5.658 +concludes the proof.
   5.659 +
   5.660 +\medskip
   5.661 +
   5.662 +Here is another example. 
   5.663 +\begin{isabelle}
   5.664 +\isacommand{lemma}\ "(P\ \isasymor\ Q)\ \isasymand\ R\
   5.665 +\isasymLongrightarrow\ P\ \isasymor\ Q\ \isasymand\ R"\isanewline
   5.666 +\isacommand{apply}\ intro%
   5.667 +
   5.668 +
   5.669 +\isacommand{apply}\ (elim\ conjE\ disjE)\isanewline
   5.670 +\ \isacommand{apply}\ assumption
   5.671 +\isanewline
   5.672 +\isacommand{apply}\ (erule\ contrapos_np,\ rule\ conjI)\isanewline
   5.673 +\ \ \isacommand{apply}\ assumption\isanewline
   5.674 +\ \isacommand{apply}\ assumption\isanewline
   5.675 +\isacommand{done}
   5.676 +\end{isabelle}
   5.677 +%
   5.678 +The first proof step applies the {\isa{intro}} method, which repeatedly 
   5.679 +uses built-in introduction rules.  Here it creates the negative assumption \isa{\isasymnot\ (Q\ \isasymand\
   5.680 +R)}.
   5.681 +\begin{isabelle}
   5.682 +\ 1.\ \isasymlbrakk(P\ \isasymor\ Q)\ \isasymand\ R;\ \isasymnot\ (Q\ \isasymand\
   5.683 +R)\isasymrbrakk\ \isasymLongrightarrow\ P%
   5.684 +\end{isabelle}
   5.685 +It comes from \isa{disjCI},  a disjunction introduction rule that is more
   5.686 +powerful than the separate rules  \isa{disjI1} and  \isa{disjI2}.
   5.687 +
   5.688 +Next we apply the {\isa{elim}} method, which repeatedly applies 
   5.689 +elimination rules; here, the elimination rules given 
   5.690 +in the command.  One of the subgoals is trivial, leaving us with one other:
   5.691 +\begin{isabelle}
   5.692 +\ 1.\ \isasymlbrakk{\isasymnot}\ (Q\ \isasymand\ R);\ R;\ Q\isasymrbrakk\ \isasymLongrightarrow\ P%
   5.693 +\end{isabelle}
   5.694 +%
   5.695 +Now we must move the formula \isa{Q\ \isasymand\ R} to be the conclusion.  The
   5.696 +combination 
   5.697 +\begin{isabelle}
   5.698 +\ \ \ \ \ (erule\ contrapos_np,\ rule\ conjI)
   5.699 +\end{isabelle}
   5.700 +is robust: the \isa{conjI} forces the \isa{erule} to select a
   5.701 +conjunction.  The two subgoals are the ones we would expect from appling
   5.702 +conjunction introduction to
   5.703 +\isa{Q\
   5.704 +\isasymand\ R}:  
   5.705 +\begin{isabelle}
   5.706 +\ 1.\ {\isasymlbrakk}R;\ Q;\ \isasymnot\ P\isasymrbrakk\ \isasymLongrightarrow\
   5.707 +Q\isanewline
   5.708 +\ 2.\ {\isasymlbrakk}R;\ Q;\ \isasymnot\ P\isasymrbrakk\ \isasymLongrightarrow\ R%
   5.709 +\end{isabelle}
   5.710 +The rest of the proof is trivial.
   5.711 +
   5.712 +
   5.713 +\section{The universal quantifier}
   5.714 +
   5.715 +Quantifiers require formalizing syntactic substitution and the notion of \textbf{arbitrary
   5.716 +value}.  Consider the universal quantifier.  In a logic book, its
   5.717 +introduction  rule looks like this: 
   5.718 +\[ \infer{\forall x.\,P}{P} \]
   5.719 +Typically, a proviso written in English says that $x$ must not
   5.720 +occur in the assumptions.  This proviso guarantees that $x$ can be regarded as
   5.721 +arbitrary, since it has not been assumed to satisfy any special conditions. 
   5.722 +Isabelle's  underlying formalism, called the
   5.723 +\textbf{meta-logic}, eliminates the  need for English.  It provides its own universal
   5.724 +quantifier (\isasymAnd) to express the notion of an arbitrary value.  We have
   5.725 +already seen  another symbol of the meta-logic, namely
   5.726 +\isa\isasymLongrightarrow, which expresses  inference rules and the treatment of
   5.727 +assumptions. The only other  symbol in the meta-logic is \isa\isasymequiv, which
   5.728 +can be used to define constants.
   5.729 +
   5.730 +Returning to the universal quantifier, we find that having a similar quantifier
   5.731 +as part of the meta-logic makes the introduction rule trivial to express:
   5.732 +\begin{isabelle}
   5.733 +({\isasymAnd}x.\ ?P\ x)\ \isasymLongrightarrow\ {\isasymforall}x.\ ?P\ x\rulename{allI}
   5.734 +\end{isabelle}
   5.735 +
   5.736 +
   5.737 +The following trivial proof demonstrates how the universal introduction 
   5.738 +rule works. 
   5.739 +\begin{isabelle}
   5.740 +\isacommand{lemma}\ "{\isasymforall}x.\ P\ x\ \isasymlongrightarrow\ P\ x"\isanewline
   5.741 +\isacommand{apply}\ (rule\ allI)\isanewline
   5.742 +\isacommand{apply}\ (rule\ impI)\isanewline
   5.743 +\isacommand{apply}\ assumption
   5.744 +\end{isabelle}
   5.745 +The first step invokes the rule by applying the method \isa{rule allI}. 
   5.746 +\begin{isabelle}
   5.747 +%{\isasymforall}x.\ P\ x\ \isasymlongrightarrow\ P\ x\isanewline
   5.748 +\ 1.\ {\isasymAnd}x.\ P\ x\ \isasymlongrightarrow\ P\ x
   5.749 +\end{isabelle}
   5.750 +Note  that the resulting proof state has a bound variable,
   5.751 +namely~\bigisa{x}.  The rule has replaced the universal quantifier of
   5.752 +higher-order  logic by Isabelle's meta-level quantifier.  Our goal is to
   5.753 +prove
   5.754 +\isa{P\ x\ \isasymlongrightarrow\ P\ x} for arbitrary~\isa{x}; it is 
   5.755 +an implication, so we apply the corresponding introduction rule (\isa{impI}). 
   5.756 +\begin{isabelle}
   5.757 +\ 1.\ {\isasymAnd}x.\ P\ x\ \isasymLongrightarrow\ P\ x
   5.758 +\end{isabelle}
   5.759 +The {\isa{assumption}} method proves this last subgoal. 
   5.760 +
   5.761 +\medskip
   5.762 +Now consider universal elimination. In a logic text, 
   5.763 +the rule looks like this: 
   5.764 +\[ \infer{P[t/x]}{\forall x.\,P} \]
   5.765 +The conclusion is $P$ with $t$ substituted for the variable~$x$.  
   5.766 +Isabelle expresses substitution using a function variable: 
   5.767 +\begin{isabelle}
   5.768 +{\isasymforall}x.\ ?P\ x\ \isasymLongrightarrow\ ?P\ ?x\rulename{spec}
   5.769 +\end{isabelle}
   5.770 +This destruction rule takes a 
   5.771 +universally quantified formula and removes the quantifier, replacing 
   5.772 +the bound variable \bigisa{x} by the schematic variable \bigisa{?x}.  Recall that a
   5.773 +schematic variable starts with a question mark and acts as a
   5.774 +placeholder: it can be replaced by any term. 
   5.775 +
   5.776 +To see how this works, let us derive a rule about reducing 
   5.777 +the scope of a universal quantifier.  In mathematical notation we write
   5.778 +\[ \infer{P\imp\forall x.\,Q}{\forall x.\,P\imp Q} \]
   5.779 +with the proviso `$x$ not free in~$P$.'  Isabelle's treatment of
   5.780 +substitution makes the proviso unnecessary.  The conclusion is expressed as
   5.781 +\isa{P\
   5.782 +\isasymlongrightarrow\ ({\isasymforall}x.\ Q\ x)}. No substitution for the
   5.783 +variable \isa{P} can introduce a dependence upon~\isa{x}: that would be a
   5.784 +bound variable capture.  Here is the isabelle proof in full:
   5.785 +\begin{isabelle}
   5.786 +\isacommand{lemma}\ "({\isasymforall}x.\ P\
   5.787 +\isasymlongrightarrow\ Q\ x)\ \isasymLongrightarrow\ P\
   5.788 +\isasymlongrightarrow\ ({\isasymforall}x.\ Q\ x){"}\isanewline
   5.789 +\isacommand{apply}\ (rule\ impI)\isanewline
   5.790 +\isacommand{apply}\ (rule\ allI)\isanewline
   5.791 +\isacommand{apply}\ (drule\ spec)\isanewline
   5.792 +\isacommand{apply}\ (drule\ mp)\isanewline
   5.793 +\ \ \isacommand{apply}\ assumption\isanewline
   5.794 +\ \isacommand{apply}\ assumption
   5.795 +\end{isabelle}
   5.796 +First we apply implies introduction (\isa{rule impI}), 
   5.797 +which moves the \isa{P} from the conclusion to the assumptions. Then 
   5.798 +we apply universal introduction (\isa{rule allI}).  
   5.799 +\begin{isabelle}
   5.800 +%{\isasymforall}x.\ P\ \isasymlongrightarrow\ Q\ x\ \isasymLongrightarrow\ P\
   5.801 +%\isasymlongrightarrow\ ({\isasymforall}x.\ Q\ x)\isanewline
   5.802 +\ 1.\ {\isasymAnd}x.\ \isasymlbrakk{\isasymforall}x.\ P\ \isasymlongrightarrow\ Q\ x;\ P\isasymrbrakk\ \isasymLongrightarrow\ Q\ x
   5.803 +\end{isabelle}
   5.804 +As before, it replaces the HOL 
   5.805 +quantifier by a meta-level quantifier, producing a subgoal that 
   5.806 +binds the variable~\bigisa{x}.  The leading bound variables
   5.807 +(here \isa{x}) and the assumptions (here \isa{{\isasymforall}x.\ P\
   5.808 +\isasymlongrightarrow\ Q\ x} and \isa{P}) form the \textbf{context} for the
   5.809 +conclusion, here \isa{Q\ x}.  At each proof step, the subgoals inherit the
   5.810 +previous context, though some context elements may be added or deleted. 
   5.811 +Applying \isa{erule} deletes an assumption, while many natural deduction
   5.812 +rules add bound variables or assumptions.
   5.813 +
   5.814 +Now, to reason from the universally quantified 
   5.815 +assumption, we apply the elimination rule using the {\isa{drule}} 
   5.816 +method.  This rule is called \isa{spec} because it specializes a universal formula
   5.817 +to a particular term.
   5.818 +\begin{isabelle}
   5.819 +\ 1.\ {\isasymAnd}x.\ {\isasymlbrakk}P;\ P\ \isasymlongrightarrow\ Q\ (?x2\
   5.820 +x){\isasymrbrakk}\ \isasymLongrightarrow\ Q\ x
   5.821 +\end{isabelle}
   5.822 +Observe how the context has changed.  The quantified formula is gone,
   5.823 +replaced by a new assumption derived from its body.  Informally, we have
   5.824 +removed the quantifier.  The quantified variable
   5.825 +has been replaced by the curious term 
   5.826 +\bigisa{?x2~x}; it acts as a placeholder that may be replaced 
   5.827 +by any term that can be built up from~\bigisa{x}.  (Formally, \bigisa{?x2} is an
   5.828 +unknown of function type, applied to the argument~\bigisa{x}.)  This new assumption is
   5.829 +an implication, so we can  use \emph{modus ponens} on it. As before, it requires
   5.830 +proving the  antecedent (in this case \isa{P}) and leaves us with the consequent. 
   5.831 +\begin{isabelle}
   5.832 +\ 1.\ {\isasymAnd}x.\ {\isasymlbrakk}P;\ Q\ (?x2\ x){\isasymrbrakk}\
   5.833 +\isasymLongrightarrow\ Q\ x
   5.834 +\end{isabelle}
   5.835 +The consequent is \isa{Q} applied to that placeholder.  It may be replaced by any
   5.836 +term built from~\bigisa{x}, and here 
   5.837 +it should simply be~\bigisa{x}.  The \isa{assumption} method will do this.
   5.838 +The assumption need not be identical to the conclusion, provided the two formulas are
   5.839 +unifiable.  
   5.840 +
   5.841 +\medskip
   5.842 +Note that \isa{drule spec} removes the universal quantifier and --- as
   5.843 +usual with elimination rules --- discards the original formula.  Sometimes, a
   5.844 +universal formula has to be kept so that it can be used again.  Then we use a new
   5.845 +method: \isa{frule}.  It acts like \isa{drule} but copies rather than replaces
   5.846 +the selected assumption.  The \isa{f} is for `forward.'
   5.847 +
   5.848 +In this example, we intuitively see that to go from \isa{P\ a} to \isa{P(f\ (f\
   5.849 +a))} requires two uses of the quantified assumption, one for each
   5.850 +additional~\isa{f}.
   5.851 +\begin{isabelle}
   5.852 +\isacommand{lemma}\ "\isasymlbrakk{\isasymforall}x.\ P\ x\ \isasymlongrightarrow\ P\ (f\ x);
   5.853 +\ P\ a\isasymrbrakk\ \isasymLongrightarrow\ P(f\ (f\ a))"\isanewline
   5.854 +\isacommand{apply}\ (frule\ spec)\isanewline
   5.855 +\isacommand{apply}\ (drule\ mp,\ assumption)\isanewline
   5.856 +\isacommand{apply}\ (drule\ spec)\isanewline
   5.857 +\isacommand{apply}\ (drule\ mp,\ assumption,\ assumption)\isanewline
   5.858 +\isacommand{done}
   5.859 +\end{isabelle}
   5.860 +%
   5.861 +Applying \isa{frule\ spec} leaves this subgoal:
   5.862 +\begin{isabelle}
   5.863 +\ 1.\ \isasymlbrakk{\isasymforall}x.\ P\ x\ \isasymlongrightarrow\ P\ (f\ x);\ P\ a;\ P\ ?x\ \isasymlongrightarrow\ P\ (f\ ?x)\isasymrbrakk\ \isasymLongrightarrow\ P\ (f\ (f\ a))
   5.864 +\end{isabelle}
   5.865 +It is just what  \isa{drule} would have left except that the quantified
   5.866 +assumption is still present.  The next step is to apply \isa{mp} to the
   5.867 +implication and the assumption \isa{P\ a}, which leaves this subgoal:
   5.868 +\begin{isabelle}
   5.869 +\ 1.\ \isasymlbrakk{\isasymforall}x.\ P\ x\ \isasymlongrightarrow\ P\ (f\ x);\ P\ a;\ P\ (f\ a)\isasymrbrakk\ \isasymLongrightarrow\ P\ (f\ (f\ a))
   5.870 +\end{isabelle}
   5.871 +%
   5.872 +We have created the assumption \isa{P(f\ a)}, which is progress.  To finish the
   5.873 +proof, we apply \isa{spec} one last time, using \isa{drule}.  One final trick: if
   5.874 +we then apply
   5.875 +\begin{isabelle}
   5.876 +\ \ \ \ \ (drule\ mp,\ assumption)
   5.877 +\end{isabelle}
   5.878 +it will add a second copy of \isa{P(f\ a)} instead of the desired \isa{P(f\
   5.879 +(f\ a))}.  Bundling both \isa{assumption} calls with \isa{drule mp} causes
   5.880 +Isabelle to backtrack and find the correct one.
   5.881 +
   5.882 +
   5.883 +\section{The existential quantifier}
   5.884 +
   5.885 +The concepts just presented also apply to the existential quantifier,
   5.886 +whose introduction rule looks like this in Isabelle: 
   5.887 +\begin{isabelle}
   5.888 +?P\ ?x\ \isasymLongrightarrow\ {\isasymexists}x.\ ?P\ x\rulename{exI}
   5.889 +\end{isabelle}
   5.890 +If we can exhibit some $x$ such that $P(x)$ is true, then $\exists x.
   5.891 +P(x)$ is also true. It is essentially a dual of the universal elimination rule, and
   5.892 +logic texts present it using the same notation for substitution.  The existential
   5.893 +elimination rule looks like this
   5.894 +in a logic text: 
   5.895 +\[ \infer{R}{\exists x.\,P & \infer*{R}{[P]}} \]
   5.896 +%
   5.897 +It looks like this in Isabelle: 
   5.898 +\begin{isabelle}
   5.899 +\isasymlbrakk{\isasymexists}x.\ ?P\ x;\ {\isasymAnd}x.\ ?P\ x\ \isasymLongrightarrow\ ?Q\isasymrbrakk\ \isasymLongrightarrow\ ?Q\rulename{exE}
   5.900 +\end{isabelle}
   5.901 +%
   5.902 +Given an existentially quantified theorem and some
   5.903 +formula $Q$ to prove, it creates a new assumption by removing the quantifier.  As with
   5.904 +the universal introduction  rule, the textbook version imposes a proviso on the
   5.905 +quantified variable, which Isabelle expresses using its meta-logic.  Note that it is
   5.906 +enough to have a universal quantifier in the meta-logic; we do not need an existential
   5.907 +quantifier to be built in as well.\remark{EX example needed?}
   5.908 + 
   5.909 +Isabelle/HOL also provides Hilbert's
   5.910 +$\epsilon$-operator.  The term $\epsilon x. P(x)$ denotes some $x$ such that $P(x)$ is
   5.911 +true, provided such a value exists.  Using this operator, we can express an
   5.912 +existential destruction rule:
   5.913 +\[ \infer{P[(\epsilon x. P) / \, x]}{\exists x.\,P} \]
   5.914 +This rule is seldom used, for it can cause exponential blow-up.  The
   5.915 +main use of $\epsilon x. P(x)$ is in definitions when $P(x)$ characterizes $x$
   5.916 +uniquely.  For instance, we can define the cardinality of a finite set~$A$ to be that
   5.917 +$n$ such that $A$ is in one-to-one correspondance with $\{1,\ldots,n\}$.  We can then
   5.918 +prove that the cardinality of the empty set is zero (since $n=0$ satisfies the
   5.919 +description) and proceed to prove other facts.\remark{SOME theorems
   5.920 +and example}
   5.921 +
   5.922 +\begin{exercise}
   5.923 +Prove the lemma
   5.924 +\[ \exists x.\, P\conj Q(x)\Imp P\conj(\exists x.\, Q(x)). \]
   5.925 +\emph{Hint}: the proof is similar 
   5.926 +to the one just above for the universal quantifier. 
   5.927 +\end{exercise}
   5.928 +
   5.929 +
   5.930 +\section{Some proofs that fail}
   5.931 +
   5.932 +Most of the examples in this tutorial involve proving theorems.  But not every 
   5.933 +conjecture is true, and it can be instructive to see how  
   5.934 +proofs fail. Here we attempt to prove a distributive law involving 
   5.935 +the existential quantifier and conjunction. 
   5.936 +\begin{isabelle}
   5.937 +\isacommand{lemma}\ "({\isasymexists}x.\ P\ x)\ \isasymand\ ({\isasymexists}x.\ Q\ x)\ \isasymLongrightarrow\ {\isasymexists}x.\ P\ x\ \isasymand\ Q\ x"\isanewline
   5.938 +\isacommand{apply}\ (erule\ conjE)\isanewline
   5.939 +\isacommand{apply}\ (erule\ exE)\isanewline
   5.940 +\isacommand{apply}\ (erule\ exE)\isanewline
   5.941 +\isacommand{apply}\ (rule\ exI)\isanewline
   5.942 +\isacommand{apply}\ (rule\ conjI)\isanewline
   5.943 +\ \isacommand{apply}\ assumption\isanewline
   5.944 +\isacommand{oops}
   5.945 +\end{isabelle}
   5.946 +The first steps are  routine.  We apply conjunction elimination (\isa{erule
   5.947 +conjE}) to split the assumption  in two, leaving two existentially quantified
   5.948 +assumptions.  Applying existential elimination  (\isa{erule exE}) removes one of
   5.949 +the quantifiers. 
   5.950 +\begin{isabelle}
   5.951 +%({\isasymexists}x.\ P\ x)\ \isasymand\ ({\isasymexists}x.\ Q\ x)\
   5.952 +%\isasymLongrightarrow\ {\isasymexists}x.\ P\ x\ \isasymand\ Q\ x\isanewline
   5.953 +\ 1.\ {\isasymAnd}x.\ \isasymlbrakk{\isasymexists}x.\ Q\ x;\ P\ x\isasymrbrakk\ \isasymLongrightarrow\ {\isasymexists}x.\ P\ x\ \isasymand\ Q\ x
   5.954 +\end{isabelle}
   5.955 +%
   5.956 +When we remove the other quantifier, we get a different bound 
   5.957 +variable in the subgoal.  (The name \isa{xa} is generated automatically.)
   5.958 +\begin{isabelle}
   5.959 +\ 1.\ {\isasymAnd}x\ xa.\ {\isasymlbrakk}P\ x;\ Q\ xa\isasymrbrakk\
   5.960 +\isasymLongrightarrow\ {\isasymexists}x.\ P\ x\ \isasymand\ Q\ x
   5.961 +\end{isabelle}
   5.962 +The proviso of the existential elimination rule has forced the variables to
   5.963 +differ: we can hardly expect two arbitrary values to be equal!  There is
   5.964 +no way to prove this subgoal.  Removing the
   5.965 +conclusion's existential quantifier yields two
   5.966 +identical placeholders, which can become  any term involving the variables \bigisa{x}
   5.967 +and~\bigisa{xa}.  We need one to become \bigisa{x}
   5.968 +and the other to become~\bigisa{xa}, but Isabelle requires all instances of a
   5.969 +placeholder to be identical. 
   5.970 +\begin{isabelle}
   5.971 +\ 1.\ {\isasymAnd}x\ xa.\ {\isasymlbrakk}P\ x;\ Q\ xa\isasymrbrakk\
   5.972 +\isasymLongrightarrow\ P\ (?x3\ x\ xa)\isanewline
   5.973 +\ 2.\ {\isasymAnd}x\ xa.\ {\isasymlbrakk}P\ x;\ Q\ xa\isasymrbrakk\ \isasymLongrightarrow\ Q\ (?x3\ x\ xa)
   5.974 +\end{isabelle}
   5.975 +We can prove either subgoal 
   5.976 +using the \isa{assumption} method.  If we prove the first one, the placeholder
   5.977 +changes  into~\bigisa{x}. 
   5.978 +\begin{isabelle}
   5.979 +\ 1.\ {\isasymAnd}x\ xa.\ {\isasymlbrakk}P\ x;\ Q\ xa\isasymrbrakk\
   5.980 +\isasymLongrightarrow\ Q\ x
   5.981 +\end{isabelle}
   5.982 +We are left with a subgoal that cannot be proved, 
   5.983 +because there is no way to prove that \bigisa{x}
   5.984 +equals~\bigisa{xa}.  Applying the \isa{assumption} method results in an
   5.985 +error message:
   5.986 +\begin{isabelle}
   5.987 +*** empty result sequence -- proof command failed
   5.988 +\end{isabelle}
   5.989 +We can tell Isabelle to abandon a failed proof using the \isacommand{oops} command.
   5.990 +
   5.991 +\medskip 
   5.992 +
   5.993 +Here is another abortive proof, illustrating the interaction between 
   5.994 +bound variables and unknowns.  
   5.995 +If $R$ is a reflexive relation, 
   5.996 +is there an $x$ such that $R\,x\,y$ holds for all $y$?  Let us see what happens when
   5.997 +we attempt to prove it. 
   5.998 +\begin{isabelle}
   5.999 +\isacommand{lemma}\ "{\isasymforall}z.\ R\ z\ z\ \isasymLongrightarrow\
  5.1000 +{\isasymexists}x.\ {\isasymforall}y.\ R\ x\ y"\isanewline
  5.1001 +\isacommand{apply}\ (rule\ exI)\isanewline
  5.1002 +\isacommand{apply}\ (rule\ allI)\isanewline
  5.1003 +\isacommand{apply}\ (drule\ spec)\isanewline
  5.1004 +\isacommand{oops}
  5.1005 +\end{isabelle}
  5.1006 +First, 
  5.1007 +we remove the existential quantifier. The new proof state has 
  5.1008 +an unknown, namely~\bigisa{?x}. 
  5.1009 +\begin{isabelle}
  5.1010 +%{\isasymforall}z.\ R\ z\ z\ \isasymLongrightarrow\ {\isasymexists}x.\
  5.1011 +%{\isasymforall}y.\ R\ x\ y\isanewline
  5.1012 +\ 1.\ {\isasymforall}z.\ R\ z\ z\ \isasymLongrightarrow\ {\isasymforall}y.\ R\ ?x\ y
  5.1013 +\end{isabelle}
  5.1014 +Next, we remove the universal quantifier 
  5.1015 +from the conclusion, putting the bound variable~\isa{y} into the subgoal. 
  5.1016 +\begin{isabelle}
  5.1017 +\ 1.\ {\isasymAnd}y.\ {\isasymforall}z.\ R\ z\ z\ \isasymLongrightarrow\ R\ ?x\ y
  5.1018 +\end{isabelle}
  5.1019 +Finally, we try to apply our reflexivity assumption.  We obtain a 
  5.1020 +new assumption whose identical placeholders may be replaced by 
  5.1021 +any term involving~\bigisa{y}. 
  5.1022 +\begin{isabelle}
  5.1023 +\ 1.\ {\isasymAnd}y.\ R\ (?z2\ y)\ (?z2\ y)\ \isasymLongrightarrow\ R\ ?x\ y
  5.1024 +\end{isabelle}
  5.1025 +This subgoal can only be proved by putting \bigisa{y} for all the placeholders,
  5.1026 +making the assumption and conclusion become \isa{R\ y\ y}. 
  5.1027 +But Isabelle refuses to substitute \bigisa{y}, a bound variable, for
  5.1028 +\bigisa{?x}; that would be a bound variable capture.  The proof fails.
  5.1029 +Note that Isabelle can replace \bigisa{?z2~y} by \bigisa{y}; this involves
  5.1030 +instantiating
  5.1031 +\bigisa{?z2} to the identity function.
  5.1032 +
  5.1033 +This example is typical of how Isabelle enforces sound quantifier reasoning. 
  5.1034 +
  5.1035 +
  5.1036 +\section{Proving theorems using the \emph{\texttt{blast}} method}
  5.1037 +
  5.1038 +It is hard to prove substantial theorems using the methods 
  5.1039 +described above. A proof may be dozens or hundreds of steps long.  You 
  5.1040 +may need to search among different ways of proving certain 
  5.1041 +subgoals. Often a choice that proves one subgoal renders another 
  5.1042 +impossible to prove.  There are further complications that we have not
  5.1043 +discussed, concerning negation and disjunction.  Isabelle's
  5.1044 +\textbf{classical reasoner} is a family of tools that perform such
  5.1045 +proofs automatically.  The most important of these is the 
  5.1046 +{\isa{blast}} method. 
  5.1047 +
  5.1048 +In this section, we shall first see how to use the classical 
  5.1049 +reasoner in its default mode and then how to insert additional 
  5.1050 +rules, enabling it to work in new problem domains. 
  5.1051 +
  5.1052 + We begin with examples from pure predicate logic. The following 
  5.1053 +example is known as Andrew's challenge. Peter Andrews designed 
  5.1054 +it to be hard to prove by automatic means.%
  5.1055 +\footnote{Pelletier~\cite{pelletier86} describes it and many other
  5.1056 +problems for automatic theorem provers.}
  5.1057 +The nested biconditionals cause an exponential explosion: the formal
  5.1058 +proof is  enormous.  However, the {\isa{blast}} method proves it in
  5.1059 +a fraction  of a second. 
  5.1060 +\begin{isabelle}
  5.1061 +\isacommand{lemma}\
  5.1062 +"(({\isasymexists}x.\
  5.1063 +{\isasymforall}y.\
  5.1064 +p(x){=}p(y){)}\
  5.1065 +=\
  5.1066 +(({\isasymexists}x.\
  5.1067 +q(x){)}=({\isasymforall}y.\
  5.1068 +p(y){)}){)}\
  5.1069 +\ \ =\ \ \ \ \isanewline
  5.1070 +\ \ \ \ \ \ \ \
  5.1071 +(({\isasymexists}x.\
  5.1072 +{\isasymforall}y.\
  5.1073 +q(x){=}q(y){)}\
  5.1074 +=\
  5.1075 +(({\isasymexists}x.\
  5.1076 +p(x){)}=({\isasymforall}y.\
  5.1077 +q(y){)}){)}"\isanewline
  5.1078 +\isacommand{apply}\ blast\isanewline
  5.1079 +\isacommand{done}
  5.1080 +\end{isabelle}
  5.1081 +The next example is a logic problem composed by Lewis Carroll. 
  5.1082 +The {\isa{blast}} method finds it trivial. Moreover, it turns out 
  5.1083 +that not all of the assumptions are necessary. We can easily 
  5.1084 +experiment with variations of this formula and see which ones 
  5.1085 +can be proved. 
  5.1086 +\begin{isabelle}
  5.1087 +\isacommand{lemma}\
  5.1088 +"({\isasymforall}x.\
  5.1089 +honest(x)\ \isasymand\
  5.1090 +industrious(x)\ \isasymlongrightarrow\
  5.1091 +healthy(x){)}\
  5.1092 +\isasymand\ \ \isanewline
  5.1093 +\ \ \ \ \ \ \ \ \isasymnot\ ({\isasymexists}x.\
  5.1094 +grocer(x)\ \isasymand\
  5.1095 +healthy(x){)}\
  5.1096 +\isasymand\ \isanewline
  5.1097 +\ \ \ \ \ \ \ \ ({\isasymforall}x.\
  5.1098 +industrious(x)\ \isasymand\
  5.1099 +grocer(x)\ \isasymlongrightarrow\
  5.1100 +honest(x){)}\
  5.1101 +\isasymand\ \isanewline
  5.1102 +\ \ \ \ \ \ \ \ ({\isasymforall}x.\
  5.1103 +cyclist(x)\ \isasymlongrightarrow\
  5.1104 +industrious(x){)}\
  5.1105 +\isasymand\ \isanewline
  5.1106 +\ \ \ \ \ \ \ \ ({\isasymforall}x.\
  5.1107 +{\isasymnot}healthy(x)\ \isasymand\
  5.1108 +cyclist(x)\ \isasymlongrightarrow\
  5.1109 +{\isasymnot}honest(x){)}\
  5.1110 +\ \isanewline
  5.1111 +\ \ \ \ \ \ \ \ \isasymlongrightarrow\
  5.1112 +({\isasymforall}x.\
  5.1113 +grocer(x)\ \isasymlongrightarrow\
  5.1114 +{\isasymnot}cyclist(x){)}"\isanewline
  5.1115 +\isacommand{apply}\ blast\isanewline
  5.1116 +\isacommand{done}
  5.1117 +\end{isabelle}
  5.1118 +The {\isa{blast}} method is also effective for set theory, which is
  5.1119 +described in the next chapter.  This formula below may look horrible, but
  5.1120 +the \isa{blast} method proves it easily. 
  5.1121 +\begin{isabelle}
  5.1122 +\isacommand{lemma}\ "({\isasymUnion}i{\isasymin}I.\ A(i){)}\ \isasyminter\ ({\isasymUnion}j{\isasymin}J.\ B(j){)}\ =\isanewline
  5.1123 +\ \ \ \ \ \ \ \ ({\isasymUnion}i{\isasymin}I.\ {\isasymUnion}j{\isasymin}J.\ A(i)\ \isasyminter\ B(j){)}"\isanewline
  5.1124 +\isacommand{apply}\ blast\isanewline
  5.1125 +\isacommand{done}
  5.1126 +\end{isabelle}
  5.1127 +
  5.1128 +Few subgoals are couched purely in predicate logic and set theory.
  5.1129 +We can extend the scope of the classical reasoner by giving it new rules. 
  5.1130 +Extending it effectively requires understanding the notions of
  5.1131 +introduction, elimination and destruction rules.  Moreover, there is a
  5.1132 +distinction between  safe and unsafe rules. A \textbf{safe} rule is one
  5.1133 +that can be applied  backwards without losing information; an
  5.1134 +\textbf{unsafe} rule loses  information, perhaps transforming the subgoal
  5.1135 +into one that cannot be proved.  The safe/unsafe
  5.1136 +distinction affects the proof search: if a proof attempt fails, the
  5.1137 +classical reasoner backtracks to the most recent unsafe rule application
  5.1138 +and makes another choice. 
  5.1139 +
  5.1140 +An important special case avoids all these complications.  A logical 
  5.1141 +equivalence, which in higher-order logic is an equality between 
  5.1142 +formulas, can be given to the classical 
  5.1143 +reasoner and simplifier by using the attribute {\isa{iff}}.  You 
  5.1144 +should do so if the right hand side of the equivalence is  
  5.1145 +simpler than the left-hand side.  
  5.1146 +
  5.1147 +For example, here is a simple fact about list concatenation. 
  5.1148 +The result of appending two lists is empty if and only if both 
  5.1149 +of the lists are themselves empty. Obviously, applying this equivalence 
  5.1150 +will result in a simpler goal. When stating this lemma, we include 
  5.1151 +the {\isa{iff}} attribute. Once we have proved the lemma, Isabelle 
  5.1152 +will make it known to the classical reasoner (and to the simplifier). 
  5.1153 +\begin{isabelle}
  5.1154 +\isacommand{lemma}\
  5.1155 +[iff]{:}\
  5.1156 +"(xs{\isacharat}ys\ =\
  5.1157 +\isacharbrackleft{]})\ =\
  5.1158 +(xs=[]\
  5.1159 +\isacharampersand\
  5.1160 +ys=[]{)}"\isanewline
  5.1161 +\isacommand{apply}\ (induct_tac\
  5.1162 +xs)\isanewline
  5.1163 +\isacommand{apply}\ (simp_all)
  5.1164 +\isanewline
  5.1165 +\isacommand{done}
  5.1166 +\end{isabelle}
  5.1167 +%
  5.1168 +This fact about multiplication is also appropriate for 
  5.1169 +the {\isa{iff}} attribute:\remark{the ?s are ugly here but we need
  5.1170 +them again when talking about \isa{of}; we need a consistent style}
  5.1171 +\begin{isabelle}
  5.1172 +(\mbox{?m}\ \isacharasterisk\ \mbox{?n}\ =\ 0)\ =\ (\mbox{?m}\ =\ 0\ \isasymor\ \mbox{?n}\ =\ 0)
  5.1173 +\end{isabelle}
  5.1174 +A product is zero if and only if one of the factors is zero.  The
  5.1175 +reasoning  involves a logical \textsc{or}.  Proving new rules for
  5.1176 +disjunctive reasoning  is hard, but translating to an actual disjunction
  5.1177 +works:  the classical reasoner handles disjunction properly.
  5.1178 +
  5.1179 +In more detail, this is how the {\isa{iff}} attribute works.  It converts
  5.1180 +the equivalence $P=Q$ to a pair of rules: the introduction
  5.1181 +rule $Q\Imp P$ and the destruction rule $P\Imp Q$.  It gives both to the
  5.1182 +classical reasoner as safe rules, ensuring that all occurrences of $P$ in
  5.1183 +a subgoal are replaced by~$Q$.  The simplifier performs the same
  5.1184 +replacement, since \isa{iff} gives $P=Q$ to the
  5.1185 +simplifier.  But classical reasoning is different from
  5.1186 +simplification.  Simplification is deterministic: it applies rewrite rules
  5.1187 +repeatedly, as long as possible, in order to \emph{transform} a goal.  Classical
  5.1188 +reasoning uses search and backtracking in order to \emph{prove} a goal. 
  5.1189 +
  5.1190 +
  5.1191 +\section{Proving the correctness of Euclid's algorithm}
  5.1192 +\label{sec:proving-euclid}
  5.1193 +
  5.1194 +A brief development will illustrate advanced use of  
  5.1195 +\isa{blast}.  In \S\ref{sec:recdef-simplification}, we declared the
  5.1196 +recursive function {\isa{gcd}}:
  5.1197 +\begin{isabelle}
  5.1198 +\isacommand{consts}\ gcd\ :{:}\ {"}nat{\isacharasterisk}nat={\isachargreater}nat"\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \isanewline
  5.1199 +\isacommand{recdef}\ gcd\ {"}measure\ ((\isasymlambda(m{,}n){.}n)\ :{:}nat{\isacharasterisk}nat={\isachargreater}nat){"}\isanewline
  5.1200 +\ \ \ \ {"}gcd\ (m,\ n)\ =\ (if\ n=0\ then\ m\ else\ gcd(n,\ m\ mod\ n){)}"%
  5.1201 +\end{isabelle}
  5.1202 +Let us prove that it computes the greatest common
  5.1203 +divisor of its two arguments.  
  5.1204 +%
  5.1205 +%The declaration yields a recursion
  5.1206 +%equation  for {\isa{gcd}}.  Simplifying with this equation can 
  5.1207 +%cause looping, expanding to ever-larger expressions of if-then-else 
  5.1208 +%and {\isa{gcd}} calls.  To prevent this, we prove separate simplification rules
  5.1209 +%for $n=0$\ldots
  5.1210 +%\begin{isabelle}
  5.1211 +%\isacommand{lemma}\ gcd_0\ [simp]{:}\ {"}gcd(m,0)\ =\ m"\isanewline
  5.1212 +%\isacommand{apply}\ (simp)\isanewline
  5.1213 +%\isacommand{done}
  5.1214 +%\end{isabelle}
  5.1215 +%\ldots{} and for $n>0$:
  5.1216 +%\begin{isabelle}
  5.1217 +%\isacommand{lemma}\ gcd_non_0:\ "0{\isacharless}n\ \isasymLongrightarrow\ gcd(m{,}n)\ =\ gcd\ (n,\ m\ mod\ n){"}\isanewline
  5.1218 +%\isacommand{apply}\ (simp)\isanewline
  5.1219 +%\isacommand{done}
  5.1220 +%\end{isabelle}
  5.1221 +%This second rule is similar to the original equation but
  5.1222 +%does not loop because it is conditional.  It can be applied only
  5.1223 +%when the second argument is known to be non-zero.
  5.1224 +%Armed with our two new simplification rules, we now delete the 
  5.1225 +%original {\isa{gcd}} recursion equation. 
  5.1226 +%\begin{isabelle}
  5.1227 +%\isacommand{declare}\ gcd{.}simps\ [simp\ del]
  5.1228 +%\end{isabelle}
  5.1229 +%
  5.1230 +%Now we can prove  some interesting facts about the {\isa{gcd}} function,
  5.1231 +%for exampe, that it computes a common divisor of its arguments.  
  5.1232 +%
  5.1233 +The theorem is expressed in terms of the familiar
  5.1234 +\textbf{divides} relation from number theory: 
  5.1235 +\begin{isabelle}
  5.1236 +?m\ dvd\ ?n\ \isasymequiv\ {\isasymexists}k.\ ?n\ =\ ?m\ \isacharasterisk\ k
  5.1237 +\rulename{dvd_def}
  5.1238 +\end{isabelle}
  5.1239 +%
  5.1240 +A simple induction proves the theorem.  Here \isa{gcd.induct} refers to the
  5.1241 +induction rule returned by \isa{recdef}.  The proof relies on the simplification
  5.1242 +rules proved in \S\ref{sec:recdef-simplification}, since rewriting by the
  5.1243 +definition of \isa{gcd} can cause looping.
  5.1244 +\begin{isabelle}
  5.1245 +\isacommand{lemma}\ gcd_dvd_both:\ "(gcd(m{,}n)\ dvd\ m)\ \isasymand\ (gcd(m{,}n)\ dvd\ n){"}\isanewline
  5.1246 +\isacommand{apply}\ (induct_tac\ m\ n\ rule:\ gcd{.}induct)\isanewline
  5.1247 +\isacommand{apply}\ (case_tac\ "n=0"{)}\isanewline
  5.1248 +\isacommand{apply}\ (simp_all)\isanewline
  5.1249 +\isacommand{apply}\ (blast\ dest:\ dvd_mod_imp_dvd)\isanewline
  5.1250 +\isacommand{done}%
  5.1251 +\end{isabelle}
  5.1252 +Notice that the induction formula 
  5.1253 +is a conjunction.  This is necessary: in the inductive step, each 
  5.1254 +half of the conjunction establishes the other. The first three proof steps 
  5.1255 +are applying induction, performing a case analysis on \isa{n}, 
  5.1256 +and simplifying.  Let us pass over these quickly and consider
  5.1257 +the use of {\isa{blast}}.  We have reached the following 
  5.1258 +subgoal: 
  5.1259 +\begin{isabelle}
  5.1260 +%gcd\ (m,\ n)\ dvd\ m\ \isasymand\ gcd\ (m,\ n)\ dvd\ n\isanewline
  5.1261 +\ 1.\ {\isasymAnd}m\ n.\ \isasymlbrakk0\ \isacharless\ n;\isanewline
  5.1262 + \ \ \ \ \ \ \ \ \ \ \ \ gcd\ (n,\ m\ mod\ n)\ dvd\ n\ \isasymand\ gcd\ (n,\ m\ mod\ n)\ dvd\ (m\ mod\ n){\isasymrbrakk}\isanewline
  5.1263 +\ \ \ \ \ \ \ \ \ \ \isasymLongrightarrow\ gcd\ (n,\ m\ mod\ n)\ dvd\ m
  5.1264 +\end{isabelle}
  5.1265 +%
  5.1266 +One of the assumptions, the induction hypothesis, is a conjunction. 
  5.1267 +The two divides relationships it asserts are enough to prove 
  5.1268 +the conclusion, for we have the following theorem at our disposal: 
  5.1269 +\begin{isabelle}
  5.1270 +\isasymlbrakk?k\ dvd\ (?m\ mod\ ?n){;}\ ?k\ dvd\ ?n\isasymrbrakk\ \isasymLongrightarrow\ ?k\ dvd\ ?m%
  5.1271 +\rulename{dvd_mod_imp_dvd}
  5.1272 +\end{isabelle}
  5.1273 +%
  5.1274 +This theorem can be applied in various ways.  As an introduction rule, it
  5.1275 +would cause backward chaining from  the conclusion (namely
  5.1276 +\isa{?k\ dvd\ ?m}) to the two premises, which 
  5.1277 +also involve the divides relation. This process does not look promising
  5.1278 +and could easily loop.  More sensible is  to apply the rule in the forward
  5.1279 +direction; each step would eliminate  the \isa{mod} symboi from an
  5.1280 +assumption, so the process must terminate.  
  5.1281 +
  5.1282 +So the final proof step applies the \isa{blast} method.
  5.1283 +Attaching the {\isa{dest}} attribute to \isa{dvd_mod_imp_dvd} tells \isa{blast}
  5.1284 +to use it as destruction rule: in the forward direction.
  5.1285 +
  5.1286 +\medskip
  5.1287 +We have proved a conjunction.  Now, let us give names to each of the
  5.1288 +two halves:
  5.1289 +\begin{isabelle}
  5.1290 +\isacommand{lemmas}\ gcd_dvd1\ [iff]\ =\ gcd_dvd_both\ [THEN\ conjunct1]\isanewline
  5.1291 +\isacommand{lemmas}\ gcd_dvd2\ [iff]\ =\ gcd_dvd_both\ [THEN\ conjunct2]%
  5.1292 +\end{isabelle}
  5.1293 +
  5.1294 +Several things are happening here. The keyword \isacommand{lemmas}
  5.1295 +tells Isabelle to transform a theorem in some way and to
  5.1296 +give a name to the resulting theorem.  Attributes can be given,
  5.1297 +here \isa{iff}, which supplies the new theorems to the classical reasoner
  5.1298 +and the simplifier.  The directive {\isa{THEN}}, which will be explained
  5.1299 +below, supplies the lemma 
  5.1300 +\isa{gcd_dvd_both} to the
  5.1301 +destruction rule \isa{conjunct1} in order to extract the first part.
  5.1302 +\begin{isabelle}
  5.1303 +\ \ \ \ \ gcd\
  5.1304 +(?m1,\
  5.1305 +?n1)\ dvd\
  5.1306 +?m1%
  5.1307 +\end{isabelle}
  5.1308 +The variable names \isa{?m1} and \isa{?n1} arise because
  5.1309 +Isabelle renames schematic variables to prevent 
  5.1310 +clashes.  The second \isacommand{lemmas} declaration yields
  5.1311 +\begin{isabelle}
  5.1312 +\ \ \ \ \ gcd\
  5.1313 +(?m1,\
  5.1314 +?n1)\ dvd\
  5.1315 +?n1%
  5.1316 +\end{isabelle}
  5.1317 +Later, we shall explore this type of forward reasoning in detail. 
  5.1318 +
  5.1319 +To complete the verification of the {\isa{gcd}} function, we must 
  5.1320 +prove that it returns the greatest of all the common divisors 
  5.1321 +of its arguments.  The proof is by induction and simplification.
  5.1322 +\begin{isabelle}
  5.1323 +\isacommand{lemma}\ gcd_greatest\
  5.1324 +[rule_format]{:}\isanewline
  5.1325 +\ \ \ \ \ \ \ "(k\ dvd\
  5.1326 +m)\ \isasymlongrightarrow\ (k\ dvd\
  5.1327 +n)\ \isasymlongrightarrow\ k\ dvd\
  5.1328 +gcd(m{,}n)"\isanewline
  5.1329 +\isacommand{apply}\ (induct_tac\ m\ n\
  5.1330 +rule:\ gcd{.}induct)\isanewline
  5.1331 +\isacommand{apply}\ (case_tac\ "n=0"{)}\isanewline
  5.1332 +\isacommand{apply}\ (simp_all\ add:\ gcd_non_0\ dvd_mod)\isanewline
  5.1333 +\isacommand{done}
  5.1334 +\end{isabelle}
  5.1335 +%
  5.1336 +Note that the theorem has been expressed using HOL implication,
  5.1337 +\isa{\isasymlongrightarrow}, because the induction affects the two
  5.1338 +preconditions.  The directive \isa{rule_format} tells Isabelle to replace
  5.1339 +each \isa{\isasymlongrightarrow} by \isa{\isasymLongrightarrow} before
  5.1340 +storing the theorem we have proved.  This directive also removes outer
  5.1341 +universal quantifiers, converting a theorem into the usual format for
  5.1342 +inference rules.
  5.1343 +
  5.1344 +The facts proved above can be summarized as a single logical 
  5.1345 +equivalence.  This step gives us a chance to see another application
  5.1346 +of \isa{blast}, and it is worth doing for sound logical reasons.
  5.1347 +\begin{isabelle}
  5.1348 +\isacommand{theorem}\ gcd_greatest_iff\ [iff]{:}\isanewline
  5.1349 +\ \ \ \ \ \ \ \ \ "k\ dvd\ gcd(m{,}n)\ =\ (k\ dvd\ m\ \isasymand\ k\ dvd\ n)"\isanewline
  5.1350 +\isacommand{apply}\ (blast\ intro!{:}\ gcd_greatest\ intro:\ dvd_trans)\isanewline
  5.1351 +\isacommand{done}
  5.1352 +\end{isabelle}
  5.1353 +This theorem concisely expresses the correctness of the {\isa{gcd}} 
  5.1354 +function. 
  5.1355 +We state it with the {\isa{iff}} attribute so that 
  5.1356 +Isabelle can use it to remove some occurrences of {\isa{gcd}}. 
  5.1357 +The theorem has a one-line 
  5.1358 +proof using {\isa{blast}} supplied with four introduction 
  5.1359 +rules: note the {\isa{intro}} attribute. The exclamation mark 
  5.1360 +({\isa{intro}}{\isa{!}})\ signifies safe rules, which are 
  5.1361 +applied aggressively.  Rules given without the exclamation mark 
  5.1362 +are applied reluctantly and their uses can be undone if 
  5.1363 +the search backtracks.  Here the unsafe rule expresses transitivity  
  5.1364 +of the divides relation:
  5.1365 +\begin{isabelle}
  5.1366 +\isasymlbrakk?m\ dvd\ ?n;\ ?n\ dvd\ ?p\isasymrbrakk\ \isasymLongrightarrow\ ?m\ dvd\ ?p%
  5.1367 +\rulename{dvd_trans}
  5.1368 +\end{isabelle}
  5.1369 +Applying \isa{dvd_trans} as 
  5.1370 +an introduction rule entails a risk of looping, for it multiplies 
  5.1371 +occurrences of the divides symbol. However, this proof relies 
  5.1372 +on transitivity reasoning.  The rule {\isa{gcd\_greatest}} is safe to apply 
  5.1373 +aggressively because it yields simpler subgoals.  The proof implicitly
  5.1374 +uses \isa{gcd_dvd1} and \isa{gcd_dvd2} as safe rules, because they were
  5.1375 +declared using \isa{iff}.
  5.1376 +
  5.1377 +
  5.1378 +\section{Other classical reasoning methods}
  5.1379 + 
  5.1380 +The {\isa{blast}} method is our main workhorse for proving theorems 
  5.1381 +automatically. Other components of the classical reasoner interact 
  5.1382 +with the simplifier. Still others perform classical reasoning 
  5.1383 +to a limited extent, giving the user fine control over the proof. 
  5.1384 +
  5.1385 +Of the latter methods, the most useful is {\isa{clarify}}. It performs 
  5.1386 +all obvious reasoning steps without splitting the goal into multiple 
  5.1387 +parts. It does not apply rules that could render the 
  5.1388 +goal unprovable (so-called unsafe rules). By performing the obvious 
  5.1389 +steps, {\isa{clarify}} lays bare the difficult parts of the problem, 
  5.1390 +where human intervention is necessary. 
  5.1391 +
  5.1392 +For example, the following conjecture is false:
  5.1393 +\begin{isabelle}
  5.1394 +\isacommand{lemma}\ "({\isasymforall}x.\ P\ x)\ \isasymand\
  5.1395 +({\isasymexists}x.\ Q\ x)\ \isasymlongrightarrow\ ({\isasymforall}x.\ P\ x\
  5.1396 +\isasymand\ Q\ x)"\isanewline
  5.1397 +\isacommand{apply}\ clarify
  5.1398 +\end{isabelle}
  5.1399 +The {\isa{blast}} method would simply fail, but {\isa{clarify}} presents 
  5.1400 +a subgoal that helps us see why we cannot continue the proof. 
  5.1401 +\begin{isabelle}
  5.1402 +\ 1.\ {\isasymAnd}x\ xa.\ \isasymlbrakk{\isasymforall}x.\ P\ x;\ Q\
  5.1403 +xa\isasymrbrakk\ \isasymLongrightarrow\ P\ x\ \isasymand\ Q\ x
  5.1404 +\end{isabelle}
  5.1405 +The proof must fail because the assumption \isa{Q\ xa} and conclusion \isa{Q\ x}
  5.1406 +refer to distinct bound variables.  To reach this state, \isa{clarify} applied
  5.1407 +the introduction rules for \isa{\isasymlongrightarrow} and \isa{\isasymforall}
  5.1408 +and the elimination rule for ~\isa{\isasymand}.  It did not apply the introduction
  5.1409 +rule for  \isa{\isasymand} because of its policy never to split goals.
  5.1410 +
  5.1411 +Also available is {\isa{clarsimp}}, a method that interleaves {\isa{clarify}}
  5.1412 +and {\isa{simp}}.  Also there is \isa{safe}, which like \isa{clarify} performs
  5.1413 +obvious steps and even applies those that split goals.
  5.1414 +
  5.1415 +The {\isa{force}} method applies the classical reasoner and simplifier 
  5.1416 +to one goal. 
  5.1417 +\remark{example needed? most
  5.1418 +things done by blast, simp or auto can also be done by force, so why add a new
  5.1419 +one?}
  5.1420 +Unless it can prove the goal, it fails. Contrast 
  5.1421 +that with the auto method, which also combines classical reasoning 
  5.1422 +with simplification. The latter's purpose is to prove all the 
  5.1423 +easy subgoals and parts of subgoals. Unfortunately, it can produce 
  5.1424 +large numbers of new subgoals; also, since it proves some subgoals 
  5.1425 +and splits others, it obscures the structure of the proof tree. 
  5.1426 +The {\isa{force}} method does not have these drawbacks. Another 
  5.1427 +difference: {\isa{force}} tries harder than {\isa{auto}} to prove 
  5.1428 +its goal, so it can take much longer to terminate.
  5.1429 +
  5.1430 +Older components of the classical reasoner have largely been 
  5.1431 +superseded by {\isa{blast}}, but they still have niche applications. 
  5.1432 +Most important among these are {\isa{fast}} and {\isa{best}}. While {\isa{blast}} 
  5.1433 +searches for proofs using a built-in first-order reasoner, these 
  5.1434 +earlier methods search for proofs using standard Isabelle inference. 
  5.1435 +That makes them slower but enables them to work correctly in the 
  5.1436 +presence of the more unusual features of Isabelle rules, such 
  5.1437 +as type classes and function unknowns. For example, the introduction rule
  5.1438 +for Hilbert's epsilon-operator has the following form: 
  5.1439 +\begin{isabelle}
  5.1440 +?P\ ?x\ \isasymLongrightarrow\ ?P\ (Eps\ ?P)
  5.1441 +\rulename{someI}
  5.1442 +\end{isabelle}
  5.1443 +
  5.1444 +The repeated occurrence of the variable \isa{?P} makes this rule tricky 
  5.1445 +to apply. Consider this contrived example: 
  5.1446 +\begin{isabelle}
  5.1447 +\isacommand{lemma}\ "{\isasymlbrakk}Q\ a;\ P\ a\isasymrbrakk\isanewline
  5.1448 +\ \ \ \ \ \ \ \ \,\isasymLongrightarrow\ P\ (SOME\ x.\ P\ x\ \isasymand\ Q\ x)\
  5.1449 +\isasymand\ Q\ (SOME\ x.\ P\ x\ \isasymand\ Q\ x)"\isanewline
  5.1450 +\isacommand{apply}\ (rule\ someI)
  5.1451 +\end{isabelle}
  5.1452 +%
  5.1453 +We can apply rule \isa{someI} explicitly.  It yields the 
  5.1454 +following subgoal: 
  5.1455 +\begin{isabelle}
  5.1456 +\ 1.\ {\isasymlbrakk}Q\ a;\ P\ a\isasymrbrakk\ \isasymLongrightarrow\ P\ ?x\
  5.1457 +\isasymand\ Q\ ?x%
  5.1458 +\end{isabelle}
  5.1459 +The proof from this point is trivial.  The question now arises, could we have
  5.1460 +proved the theorem with a single command? Not using {\isa{blast}} method: it
  5.1461 +cannot perform  the higher-order unification that is necessary here.  The
  5.1462 +{\isa{fast}}  method succeeds: 
  5.1463 +\begin{isabelle}
  5.1464 +\isacommand{apply}\ (fast\ intro!:\ someI)
  5.1465 +\end{isabelle}
  5.1466 +
  5.1467 +The {\isa{best}} method is similar to {\isa{fast}} but it uses a 
  5.1468 +best-first search instead of depth-first search. Accordingly, 
  5.1469 +it is slower but is less susceptible to divergence. Transitivity 
  5.1470 +rules usually cause {\isa{fast}} to loop where often {\isa{best}} 
  5.1471 +can manage.
  5.1472 +
  5.1473 +Here is a summary of the classical reasoning methods:
  5.1474 +\begin{itemize}
  5.1475 +\item \isa{blast} works automatically and is the fastest
  5.1476 +\item \isa{clarify} and \isa{clarsimp} perform obvious steps without splitting the
  5.1477 +goal; \isa{safe} even splits goals
  5.1478 +\item \isa{force} uses classical reasoning and simplification to prove a goal;
  5.1479 + \isa{auto} is similar but leaves what it cannot prove
  5.1480 +\item \isa{fast} and \isa{best} are legacy methods that work well with rules involving
  5.1481 +unusual features
  5.1482 +\end{itemize}
  5.1483 +A table illustrates the relationships among four of these methods. 
  5.1484 +\begin{center}
  5.1485 +\begin{tabular}{r|l|l|}
  5.1486 +           & no split   & split \\ \hline
  5.1487 +  no simp  & \isa{clarify}    & \isa{safe} \\ \hline
  5.1488 +     simp  & \isa{clarsimp}   & \isa{auto} \\ \hline
  5.1489 +\end{tabular}
  5.1490 +\end{center}
  5.1491 +
  5.1492 +
  5.1493 +
  5.1494 +
  5.1495 +\section{Forward proof}\label{sec:forward}
  5.1496 +
  5.1497 +Forward proof means deriving new facts from old ones.  It is  the
  5.1498 +most fundamental type of proof.  Backward proof, by working  from goals to
  5.1499 +subgoals, can help us find a difficult proof.  But it is
  5.1500 +not always the best way of presenting the proof so found.  Forward
  5.1501 +proof is particuarly good for reasoning from the general
  5.1502 +to the specific.  For example, consider the following distributive law for
  5.1503 +the 
  5.1504 +\isa{gcd} function:
  5.1505 +\[ k\times\gcd(m,n) = \gcd(k\times m,k\times n)\]
  5.1506 +
  5.1507 +Putting $m=1$ we get (since $\gcd(1,n)=1$ and $k\times1=k$) 
  5.1508 +\[ k = \gcd(k,k\times n)\]
  5.1509 +We have derived a new fact about \isa{gcd}; if re-oriented, it might be
  5.1510 +useful for simplification.  After re-orienting it and putting $n=1$, we
  5.1511 +derive another useful law: 
  5.1512 +\[ \gcd(k,k)=k \]
  5.1513 +Substituting values for variables --- instantiation --- is a forward step. 
  5.1514 +Re-orientation works by applying the symmetry of equality to 
  5.1515 +an equation, so it too is a forward step.  
  5.1516 +
  5.1517 +Now let us reproduce our examples in Isabelle.  Here is the distributive
  5.1518 +law:
  5.1519 +\begin{isabelle}%
  5.1520 +?k\ \isacharasterisk\ gcd\ (?m,\ ?n)\ =\ gcd\ (?k\ \isacharasterisk\ ?m,\ ?k\ \isacharasterisk\ ?n)
  5.1521 +\rulename{gcd_mult_distrib2}
  5.1522 +\end{isabelle}%
  5.1523 +The first step is to replace \isa{?m} by~1 in this law.  We refer to the
  5.1524 +variables not by name but by their position in the theorem, from left to
  5.1525 +right.  In this case, the variables  are \isa{?k}, \isa{?m} and~\isa{?n}.
  5.1526 +So, the expression
  5.1527 +\hbox{\texttt{[of k 1]}} replaces \isa{?k} by~\isa{k} and \isa{?m}
  5.1528 +by~\isa{1}.
  5.1529 +\begin{isabelle}
  5.1530 +\isacommand{lemmas}\ gcd_mult_0\ =\ gcd_mult_distrib2\ [of\ k\ 1]
  5.1531 +\end{isabelle}
  5.1532 +%
  5.1533 +The command 
  5.1534 +\isa{thm gcd_mult_0}
  5.1535 +displays the resulting theorem:
  5.1536 +\begin{isabelle}
  5.1537 +\ \ \ \ \ k\ \isacharasterisk\ gcd\ (1,\ ?n)\ =\ gcd\ (k\ \isacharasterisk\ 1,\ k\ \isacharasterisk\ ?n)
  5.1538 +\end{isabelle}
  5.1539 +Something is odd: {\isa{k}} is an ordinary variable, while {\isa{?n}} 
  5.1540 +is schematic.  We did not specify an instantiation 
  5.1541 +for {\isa{?n}}.  In its present form, the theorem does not allow 
  5.1542 +substitution for {\isa{k}}.  One solution is to avoid giving an instantiation for
  5.1543 +\isa{?k}: instead of a term we can put an underscore~(\isa{_}).  For example,
  5.1544 +\begin{isabelle}
  5.1545 +\ \ \ \ \ gcd_mult_distrib2\ [of\ _\ 1]
  5.1546 +\end{isabelle}
  5.1547 +replaces \isa{?m} by~\isa{1} but leaves \isa{?k} unchanged.  Anyway, let us put
  5.1548 +the theorem \isa{gcd_mult_0} into a simplified form: 
  5.1549 +\begin{isabelle}
  5.1550 +\isacommand{lemmas}\
  5.1551 +gcd_mult_1\ =\ gcd_mult_0\
  5.1552 +[simplified]%
  5.1553 +\end{isabelle}
  5.1554 +%
  5.1555 +Again, we display the resulting theorem:
  5.1556 +\begin{isabelle}
  5.1557 +\ \ \ \ \ k\ =\ gcd\ (k,\ k\ \isacharasterisk\ ?n)
  5.1558 +\end{isabelle}
  5.1559 +%
  5.1560 +To re-orient the equation requires the symmetry rule:
  5.1561 +\begin{isabelle}
  5.1562 +?s\ =\ ?t\
  5.1563 +\isasymLongrightarrow\ ?t\ =\
  5.1564 +?s%
  5.1565 +\rulename{sym}
  5.1566 +\end{isabelle}
  5.1567 +The following declaration gives our equation to \isa{sym}:
  5.1568 +\begin{isabelle}
  5.1569 +\ \ \ \isacommand{lemmas}\ gcd_mult\ =\ gcd_mult_1\
  5.1570 +[THEN\ sym]
  5.1571 +\end{isabelle}
  5.1572 +%
  5.1573 +Here is the result:
  5.1574 +\begin{isabelle}
  5.1575 +\ \ \ \ \ gcd\ (k,\ k\ \isacharasterisk\
  5.1576 +?n)\ =\ k%
  5.1577 +\end{isabelle}
  5.1578 +\isa{THEN~sym} gives the current theorem to the rule \isa{sym} and returns the
  5.1579 +resulting conclusion.\remark{figure necessary?}  The effect is to exchange the
  5.1580 +two operands of the equality. Typically {\isa{THEN}} is used with destruction
  5.1581 +rules.  Above we have used
  5.1582 +\isa{THEN~conjunct1} to extract the first part of the theorem
  5.1583 +\isa{gcd_dvd_both}.  Also useful is \isa{THEN~spec}, which removes the quantifier
  5.1584 +from a theorem of the form $\forall x.\,P$, and \isa{THEN~mp}, which converts the
  5.1585 +implication $P\imp Q$ into the rule $\vcenter{\infer{Q}{P}}$.
  5.1586 +Similar to \isa{mp} are the following two rules, which extract 
  5.1587 +the two directions of reasoning about a boolean equivalence:
  5.1588 +\begin{isabelle}
  5.1589 +\isasymlbrakk?Q\ =\
  5.1590 +?P;\ ?Q\isasymrbrakk\
  5.1591 +\isasymLongrightarrow\ ?P%
  5.1592 +\rulename{iffD1}%
  5.1593 +\isanewline
  5.1594 +\isasymlbrakk?P\ =\ ?Q;\ ?Q\isasymrbrakk\
  5.1595 +\isasymLongrightarrow\ ?P%
  5.1596 +\rulename{iffD2}
  5.1597 +\end{isabelle}
  5.1598 +%
  5.1599 +Normally we would never name the intermediate theorems
  5.1600 +such as \isa{gcd_mult_0} and
  5.1601 +\isa{gcd_mult_1} but would combine
  5.1602 +the three forward steps: 
  5.1603 +\begin{isabelle}
  5.1604 +\isacommand{lemmas}\ gcd_mult\ =\ gcd_mult_distrib2\ [of\ k\ 1,\ simplified,\ THEN\ sym]%
  5.1605 +\end{isabelle}
  5.1606 +The directives, or attributes, are processed from left to right.  This
  5.1607 +declaration of \isa{gcd_mult} is equivalent to the
  5.1608 +previous one.
  5.1609 +
  5.1610 +Such declarations can make the proof script hard to read: 
  5.1611 +what is being proved?  More legible   
  5.1612 +is to state the new lemma explicitly and to prove it using a single
  5.1613 +\isa{rule} method whose operand is expressed using forward reasoning:
  5.1614 +\begin{isabelle}
  5.1615 +\isacommand{lemma}\ gcd_mult\
  5.1616 +[simp]{:}\
  5.1617 +"gcd(k,\
  5.1618 +k{\isacharasterisk}n)\ =\
  5.1619 +k"\isanewline
  5.1620 +\isacommand{apply}\ (rule\ gcd_mult_distrib2\ [of\ k\ 1,\ simplified,\ THEN\ sym]{)}\isanewline
  5.1621 +\isacommand{done}
  5.1622 +\end{isabelle}
  5.1623 +Compared with the previous proof of \isa{gcd_mult}, this
  5.1624 +version shows the reader what has been proved.  Also, it receives
  5.1625 +the usual Isabelle treatment.  In particular, Isabelle generalizes over all
  5.1626 +variables: the resulting theorem will have {\isa{?k}} instead of {\isa{k}}.
  5.1627 +
  5.1628 +At the start  of this section, we also saw a proof of $\gcd(k,k)=k$.  Here
  5.1629 +is the Isabelle version: 
  5.1630 +\begin{isabelle}
  5.1631 +\isacommand{lemma}\ gcd_self\
  5.1632 +[simp]{:}\
  5.1633 +"gcd(k{,}k)\
  5.1634 +=\ k"\isanewline
  5.1635 +\isacommand{apply}\ (rule\ gcd_mult\ [of\ k\ 1,\ simplified]{)}\isanewline
  5.1636 +\isacommand{done}
  5.1637 +\end{isabelle}
  5.1638 +
  5.1639 +Recall that \isa{of} generates an instance of a rule by specifying
  5.1640 +values for its variables.  Analogous is \isa{OF}, which generates an
  5.1641 +instance of a rule by specifying facts for its premises.  Let us try
  5.1642 +it with this rule:
  5.1643 +\begin{isabelle}
  5.1644 +{\isasymlbrakk}gcd(?k,?n){=}1;\ ?k\ dvd\ (?m * ?n){\isasymrbrakk}\
  5.1645 +\isasymLongrightarrow\ ?k\ dvd\ ?m
  5.1646 +\rulename{relprime_dvd_mult}
  5.1647 +\end{isabelle}
  5.1648 +First, we
  5.1649 +prove an instance of its first premise:
  5.1650 +\begin{isabelle}
  5.1651 +\isacommand{lemma}\ relprime_20_81:\ "gcd(\#20,\#81)\ =\ 1"\isanewline
  5.1652 +\isacommand{apply}\ (simp\ add:\ gcd{.}simps)\isanewline
  5.1653 +\isacommand{done}%
  5.1654 +\end{isabelle}
  5.1655 +We have evaluated an application of the \isa{gcd} function by
  5.1656 +simplification.  Expression evaluation  is not guaranteed to terminate, and
  5.1657 +certainly is not  efficient; Isabelle performs arithmetic operations by 
  5.1658 +rewriting symbolic bit strings.  Here, however, the simplification takes
  5.1659 +less than one second.  We can specify this new lemma to {\isa{OF}},
  5.1660 +generating an instance of \isa{relprime_dvd_mult}.  The expression
  5.1661 +\begin{isabelle}
  5.1662 +\ \ \ \ \ relprime_dvd_mult [OF relprime_20_81]
  5.1663 +\end{isabelle}
  5.1664 +yields the theorem
  5.1665 +\begin{isabelle}
  5.1666 +\ \ \ \ \ \isacharhash20\ dvd\ (?m\ \isacharasterisk\ \isacharhash81)\ \isasymLongrightarrow\ \isacharhash20\ dvd\ ?m%
  5.1667 +\end{isabelle}
  5.1668 +%
  5.1669 +{\isa{OF}} takes any number of operands.  Consider 
  5.1670 +the following facts about the divides relation: 
  5.1671 +\begin{isabelle}
  5.1672 +\isasymlbrakk?k\ dvd\ ?m;\
  5.1673 +?k\ dvd\ ?n\isasymrbrakk\
  5.1674 +\isasymLongrightarrow\ ?k\ dvd\
  5.1675 +(?m\ \isacharplus\
  5.1676 +?n)
  5.1677 +\rulename{dvd_add}\isanewline
  5.1678 +?m\ dvd\ ?m%
  5.1679 +\rulename{dvd_refl}
  5.1680 +\end{isabelle}
  5.1681 +Let us supply \isa{dvd_refl} for each of the premises of \isa{dvd_add}:
  5.1682 +\begin{isabelle}
  5.1683 +\ \ \ \ \ dvd_add [OF dvd_refl dvd_refl]
  5.1684 +\end{isabelle}
  5.1685 +Here is the theorem that we have expressed: 
  5.1686 +\begin{isabelle}
  5.1687 +\ \ \ \ \ ?k\ dvd\ (?k\ \isacharplus\ ?k)
  5.1688 +\end{isabelle}
  5.1689 +As with \isa{of}, we can use the \isa{_} symbol to leave some positions
  5.1690 +unspecified:
  5.1691 +\begin{isabelle}
  5.1692 +\ \ \ \ \ dvd_add [OF _ dvd_refl]
  5.1693 +\end{isabelle}
  5.1694 +The result is 
  5.1695 +\begin{isabelle}
  5.1696 +\ \ \ \ \ ?k\ dvd\ ?m\ \isasymLongrightarrow\ ?k\ dvd\ (?m\ \isacharplus\ ?k)
  5.1697 +\end{isabelle}
  5.1698 +
  5.1699 +You may have noticed that {\isa{THEN}} and {\isa{OF}} are based on 
  5.1700 +the same idea, namely to combine two rules.  They differ in the
  5.1701 +order of the combination and thus in their effect.  We use \isa{THEN}
  5.1702 +typically with a destruction rule to extract a subformula of the current
  5.1703 +theorem.  We use \isa{OF} with a list of facts to generate an instance of
  5.1704 +the current theorem.
  5.1705 +
  5.1706 +
  5.1707 +Here is a summary of the primitives for forward reasoning:
  5.1708 +\begin{itemize}
  5.1709 +\item {\isa{of}} instantiates the variables of a rule to a list of terms
  5.1710 +\item {\isa{OF}} applies a rule to a list of theorems
  5.1711 +\item {\isa{THEN}} gives a theorem to a named rule and returns the
  5.1712 +conclusion 
  5.1713 +\end{itemize}
  5.1714 +
  5.1715 +
  5.1716 +\section{Methods for forward proof}
  5.1717 +
  5.1718 +We have seen that forward proof works well within a backward 
  5.1719 +proof.  Also in that spirit is the \isa{insert} method, which inserts a
  5.1720 +given theorem as a new assumption of the current subgoal.  This already
  5.1721 +is a forward step; moreover, we may (as always when using a theorem) apply
  5.1722 +{\isa{of}}, {\isa{THEN}} and other directives.  The new assumption can then
  5.1723 +be used to help prove the subgoal.
  5.1724 +
  5.1725 +For example, consider this theorem about the divides relation. 
  5.1726 +Only the first proof step is given; it inserts the distributive law for
  5.1727 +\isa{gcd}.  We specify its variables as shown. 
  5.1728 +\begin{isabelle}
  5.1729 +\isacommand{lemma}\
  5.1730 +relprime_dvd_mult:\isanewline
  5.1731 +\ \ \ \ \ \ \ "{\isasymlbrakk}\ gcd(k,n){=}1;\
  5.1732 +k\ dvd\ (m*n)\
  5.1733 +{\isasymrbrakk}\
  5.1734 +\isasymLongrightarrow\ k\ dvd\
  5.1735 +m"\isanewline
  5.1736 +\isacommand{apply}\ (insert\ gcd_mult_distrib2\ [of\ m\ k\
  5.1737 +n])
  5.1738 +\end{isabelle}
  5.1739 +In the resulting subgoal, note how the equation has been 
  5.1740 +inserted: 
  5.1741 +\begin{isabelle}
  5.1742 +{\isasymlbrakk}gcd\ (k,\ n)\ =\ 1;\ k\
  5.1743 +dvd\ (m\ \isacharasterisk\
  5.1744 +n){\isasymrbrakk}\ \isasymLongrightarrow\ k\ dvd\
  5.1745 +m\isanewline
  5.1746 +\ 1.\ {\isasymlbrakk}gcd\ (k,\ n)\ =\ 1;\ k\ dvd\ (m\ \isacharasterisk\ n){;}\isanewline
  5.1747 +\ \ \ \ \ m\ \isacharasterisk\ gcd\
  5.1748 +(k,\ n)\
  5.1749 +=\ gcd\ (m\ \isacharasterisk\
  5.1750 +k,\ m\ \isacharasterisk\
  5.1751 +n){\isasymrbrakk}\isanewline
  5.1752 +\ \ \ \ \isasymLongrightarrow\ k\ dvd\ m
  5.1753 +\end{isabelle}
  5.1754 +The next proof step, \isa{\isacommand{apply}(simp)}, 
  5.1755 +utilizes the assumption \isa{gcd(k,n)\ =\
  5.1756 +1}. Here is the result: 
  5.1757 +\begin{isabelle}
  5.1758 +{\isasymlbrakk}gcd\ (k,\
  5.1759 +n)\ =\ 1;\ k\
  5.1760 +dvd\ (m\ \isacharasterisk\
  5.1761 +n){\isasymrbrakk}\ \isasymLongrightarrow\ k\ dvd\
  5.1762 +m\isanewline
  5.1763 +\ 1.\ {\isasymlbrakk}gcd\ (k,\ n)\ =\ 1;\ k\ dvd\ (m\ \isacharasterisk\ n){;}\isanewline
  5.1764 +\ \ \ \ \ m\ =\ gcd\ (m\
  5.1765 +\isacharasterisk\ k,\ m\ \isacharasterisk\
  5.1766 +n){\isasymrbrakk}\isanewline
  5.1767 +\ \ \ \ \isasymLongrightarrow\ k\ dvd\ m
  5.1768 +\end{isabelle}
  5.1769 +Simplification has yielded an equation for \isa{m} that will be used to
  5.1770 +complete the proof. 
  5.1771 +
  5.1772 +\medskip
  5.1773 +Here is another proof using \isa{insert}.  \remark{Effect with unknowns?}
  5.1774 +Division  and remainder obey a well-known law: 
  5.1775 +\begin{isabelle}
  5.1776 +(?m\ div\ ?n)\ \isacharasterisk\
  5.1777 +?n\ \isacharplus\ ?m\ mod\ ?n\
  5.1778 +=\ ?m
  5.1779 +\rulename{mod_div_equality}
  5.1780 +\end{isabelle}
  5.1781 +
  5.1782 +We refer to this law explicitly in the following proof: 
  5.1783 +\begin{isabelle}
  5.1784 +\isacommand{lemma}\ div_mult_self_is_m:\ \isanewline
  5.1785 +\ \ \ \ \ \ "0{\isacharless}n\ \isasymLongrightarrow\ (m{\isacharasterisk}n)\ div\ n\ =\ (m:{:}nat)"\isanewline
  5.1786 +\isacommand{apply}\ (insert\ mod_div_equality\ [of\ "m{\isacharasterisk}n"\ n]{)}\isanewline
  5.1787 +\isacommand{apply}\ (simp)\isanewline
  5.1788 +\isacommand{done}
  5.1789 +\end{isabelle}
  5.1790 +The first step inserts the law, specifying \isa{m*n} and
  5.1791 +\isa{n} for its variables.  Notice that nontrivial expressions must be
  5.1792 +enclosed in quotation marks.  Here is the resulting 
  5.1793 +subgoal, with its new assumption: 
  5.1794 +\begin{isabelle}
  5.1795 +%0\ \isacharless\ n\ \isasymLongrightarrow\ (m\
  5.1796 +%\isacharasterisk\ n)\ div\ n\ =\ m\isanewline
  5.1797 +\ 1.\ \isasymlbrakk0\ \isacharless\
  5.1798 +n;\ \ (m\ \isacharasterisk\ n)\ div\ n\
  5.1799 +\isacharasterisk\ n\ \isacharplus\ (m\ \isacharasterisk\ n)\ mod\ n\
  5.1800 +=\ m\ \isacharasterisk\ n\isasymrbrakk\isanewline
  5.1801 +\ \ \ \ \isasymLongrightarrow\ (m\ \isacharasterisk\ n)\ div\ n\
  5.1802 +=\ m
  5.1803 +\end{isabelle}
  5.1804 +Simplification reduces \isa{(m\ \isacharasterisk\ n)\ mod\ n} to zero.
  5.1805 +Then it cancels the factor~\isa{n} on both
  5.1806 +sides of the equation, proving the theorem. 
  5.1807 +
  5.1808 +\medskip
  5.1809 +A similar method is {\isa{subgoal\_tac}}. Instead of inserting 
  5.1810 +a theorem as an assumption, it inserts an arbitrary formula. 
  5.1811 +This formula must be proved later as a separate subgoal. The 
  5.1812 +idea is to claim that the formula holds on the basis of the current 
  5.1813 +assumptions, to use this claim to complete the proof, and finally 
  5.1814 +to justify the claim. It is a valuable means of giving the proof 
  5.1815 +some structure. The explicit formula will be more readable than 
  5.1816 +proof commands that yield that formula indirectly.
  5.1817 +
  5.1818 +Look at the following example. 
  5.1819 +\begin{isabelle}
  5.1820 +\isacommand{lemma}\ "\isasymlbrakk(z::int)\ <\ \#37;\ \#66\ <\ \#2*z;\ z*z\
  5.1821 +\isasymnoteq\ \#1225;\ Q(\#34);\ Q(\#36)\isasymrbrakk\isanewline
  5.1822 +\ \ \ \ \ \ \ \ \,\isasymLongrightarrow\ Q(z)"\isanewline
  5.1823 +\isacommand{apply}\ (subgoal_tac\ "z\ =\ \#34\ \isasymor\ z\ =\
  5.1824 +\#36")\isanewline
  5.1825 +\isacommand{apply}\ blast\isanewline
  5.1826 +\isacommand{apply}\ (subgoal_tac\ "z\ \isasymnoteq\ \#35")\isanewline
  5.1827 +\isacommand{apply}\ arith\isanewline
  5.1828 +\isacommand{apply}\ force\isanewline
  5.1829 +\isacommand{done}
  5.1830 +\end{isabelle}
  5.1831 +Let us prove it informally.  The first assumption tells us 
  5.1832 +that \isa{z} is no greater than 36. The second tells us that \isa{z} 
  5.1833 +is at least 34. The third assumption tells us that \isa{z} cannot be 35, since
  5.1834 +$35\times35=1225$.  So \isa{z} is either 34 or 36, and since \isa{Q} holds for
  5.1835 +both of those  values, we have the conclusion. 
  5.1836 +
  5.1837 +The Isabelle proof closely follows this reasoning. The first 
  5.1838 +step is to claim that \isa{z} is either 34 or 36. The resulting proof 
  5.1839 +state gives us two subgoals: 
  5.1840 +\begin{isabelle}
  5.1841 +%{\isasymlbrakk}z\ <\ \#37;\ \#66\ <\ \#2\ *\ z;\ z\ *\ z\ \isasymnoteq\ \#1225;\
  5.1842 +%Q\ \#34;\ Q\ \#36\isasymrbrakk\ \isasymLongrightarrow\ Q\ z\isanewline
  5.1843 +\ 1.\ {\isasymlbrakk}z\ <\ \#37;\ \#66\ <\ \#2\ *\ z;\ z\ *\ z\ \isasymnoteq\ \#1225;\ Q\ \#34;\ Q\ \#36;\isanewline
  5.1844 +\ \ \ \ \ z\ =\ \#34\ \isasymor\ z\ =\ \#36\isasymrbrakk\isanewline
  5.1845 +\ \ \ \ \isasymLongrightarrow\ Q\ z\isanewline
  5.1846 +\ 2.\ {\isasymlbrakk}z\ <\ \#37;\ \#66\ <\ \#2\ *\ z;\ z\ *\ z\ \isasymnoteq\ \#1225;\ Q\ \#34;\ Q\ \#36\isasymrbrakk\isanewline
  5.1847 +\ \ \ \ \isasymLongrightarrow\ z\ =\ \#34\ \isasymor\ z\ =\ \#36
  5.1848 +\end{isabelle}
  5.1849 +
  5.1850 +The first subgoal is trivial, but for the second Isabelle needs help to eliminate
  5.1851 +the case
  5.1852 +\isa{z}=35.  The second invocation  of {\isa{subgoal\_tac}} leaves two
  5.1853 +subgoals: 
  5.1854 +\begin{isabelle}
  5.1855 +\ 1.\ {\isasymlbrakk}z\ <\ \#37;\ \#66\ <\ \#2\ *\ z;\ z\ *\ z\ \isasymnoteq\
  5.1856 +\#1225;\ Q\ \#34;\ Q\ \#36;\isanewline
  5.1857 +\ \ \ \ \ z\ \isasymnoteq\ \#35\isasymrbrakk\isanewline
  5.1858 +\ \ \ \ \isasymLongrightarrow\ z\ =\ \#34\ \isasymor\ z\ =\ \#36\isanewline
  5.1859 +\ 2.\ {\isasymlbrakk}z\ <\ \#37;\ \#66\ <\ \#2\ *\ z;\ z\ *\ z\ \isasymnoteq\ \#1225;\ Q\ \#34;\ Q\ \#36\isasymrbrakk\isanewline
  5.1860 +\ \ \ \ \isasymLongrightarrow\ z\ \isasymnoteq\ \#35
  5.1861 +\end{isabelle}
  5.1862 +
  5.1863 +Assuming that \isa{z} is not 35, the first subgoal follows by linear arithmetic:
  5.1864 +the method {\isa{arith}}. For the second subgoal we apply the method {\isa{force}}, 
  5.1865 +which proceeds by assuming that \isa{z}=35 and arriving at a contradiction.
  5.1866 +
  5.1867 +
  5.1868 +\medskip
  5.1869 +Summary of these methods:
  5.1870 +\begin{itemize}
  5.1871 +\item {\isa{insert}} adds a theorem as a new assumption
  5.1872 +\item {\isa{subgoal_tac}} adds a formula as a new assumption and leaves the
  5.1873 +subgoal of proving that formula
  5.1874 +\end{itemize}