doc-src/IsarOverview/Isar/Induction.thy
author Walther Neuper <neuper@ist.tugraz.at>
Thu, 12 Aug 2010 15:03:34 +0200
branchisac-from-Isabelle2009-2
changeset 37913 20e3616b2d9c
parent 30637 57753e0ec1d4
permissions -rw-r--r--
prepare reactivation of isac-update-Isa09-2
nipkow@25403
     1
(*<*)theory Induction imports Main begin
nipkow@25403
     2
fun itrev where
nipkow@25403
     3
"itrev [] ys = ys" |
nipkow@25403
     4
"itrev (x#xs) ys = itrev xs (x#ys)"
nipkow@25403
     5
(*>*)
kleing@13999
     6
kleing@13999
     7
section{*Case distinction and induction \label{sec:Induct}*}
kleing@13999
     8
kleing@13999
     9
text{* Computer science applications abound with inductively defined
kleing@13999
    10
structures, which is why we treat them in more detail. HOL already
kleing@13999
    11
comes with a datatype of lists with the two constructors @{text Nil}
kleing@13999
    12
and @{text Cons}. @{text Nil} is written @{term"[]"} and @{text"Cons x
kleing@13999
    13
xs"} is written @{term"x # xs"}.  *}
kleing@13999
    14
kleing@13999
    15
subsection{*Case distinction\label{sec:CaseDistinction}*}
kleing@13999
    16
kleing@13999
    17
text{* We have already met the @{text cases} method for performing
kleing@13999
    18
binary case splits. Here is another example: *}
kleing@13999
    19
lemma "\<not> A \<or> A"
kleing@13999
    20
proof cases
kleing@13999
    21
  assume "A" thus ?thesis ..
kleing@13999
    22
next
kleing@13999
    23
  assume "\<not> A" thus ?thesis ..
kleing@13999
    24
qed
kleing@13999
    25
kleing@13999
    26
text{*\noindent The two cases must come in this order because @{text
wenzelm@27115
    27
cases} merely abbreviates @{text"(rule case_split)"} where
wenzelm@27115
    28
@{thm[source] case_split} is @{thm case_split}. If we reverse
kleing@13999
    29
the order of the two cases in the proof, the first case would prove
kleing@13999
    30
@{prop"\<not> A \<Longrightarrow> \<not> A \<or> A"} which would solve the first premise of
wenzelm@27115
    31
@{thm[source] case_split}, instantiating @{text ?P} with @{term "\<not>
kleing@13999
    32
A"}, thus making the second premise @{prop"\<not> \<not> A \<Longrightarrow> \<not> A \<or> A"}.
kleing@13999
    33
Therefore the order of subgoals is not always completely arbitrary.
kleing@13999
    34
kleing@13999
    35
The above proof is appropriate if @{term A} is textually small.
kleing@13999
    36
However, if @{term A} is large, we do not want to repeat it. This can
kleing@13999
    37
be avoided by the following idiom *}
kleing@13999
    38
kleing@13999
    39
lemma "\<not> A \<or> A"
kleing@13999
    40
proof (cases "A")
kleing@13999
    41
  case True thus ?thesis ..
kleing@13999
    42
next
kleing@13999
    43
  case False thus ?thesis ..
kleing@13999
    44
qed
kleing@13999
    45
kleing@13999
    46
text{*\noindent which is like the previous proof but instantiates
kleing@13999
    47
@{text ?P} right away with @{term A}. Thus we could prove the two
nipkow@25412
    48
cases in any order. The phrase \isakeyword{case}~@{text True}
nipkow@25412
    49
abbreviates \isakeyword{assume}~@{text"True: A"} and analogously for
kleing@13999
    50
@{text"False"} and @{prop"\<not>A"}.
kleing@13999
    51
kleing@13999
    52
The same game can be played with other datatypes, for example lists,
kleing@13999
    53
where @{term tl} is the tail of a list, and @{text length} returns a
kleing@13999
    54
natural number (remember: $0-1=0$):
kleing@13999
    55
*}
kleing@13999
    56
(*<*)declare length_tl[simp del](*>*)
kleing@13999
    57
lemma "length(tl xs) = length xs - 1"
kleing@13999
    58
proof (cases xs)
kleing@13999
    59
  case Nil thus ?thesis by simp
kleing@13999
    60
next
kleing@13999
    61
  case Cons thus ?thesis by simp
kleing@13999
    62
qed
nipkow@25412
    63
text{*\noindent Here \isakeyword{case}~@{text Nil} abbreviates
nipkow@25412
    64
\isakeyword{assume}~@{text"Nil:"}~@{prop"xs = []"} and
nipkow@25412
    65
\isakeyword{case}~@{text Cons} abbreviates \isakeyword{fix}~@{text"? ??"}
nipkow@25412
    66
\isakeyword{assume}~@{text"Cons:"}~@{text"xs = ? # ??"},
kleing@13999
    67
where @{text"?"} and @{text"??"}
kleing@13999
    68
stand for variable names that have been chosen by the system.
kleing@13999
    69
Therefore we cannot refer to them.
kleing@13999
    70
Luckily, this proof is simple enough we do not need to refer to them.
kleing@13999
    71
However, sometimes one may have to. Hence Isar offers a simple scheme for
kleing@13999
    72
naming those variables: replace the anonymous @{text Cons} by
nipkow@25412
    73
@{text"(Cons y ys)"}, which abbreviates \isakeyword{fix}~@{text"y ys"}
nipkow@25412
    74
\isakeyword{assume}~@{text"Cons:"}~@{text"xs = y # ys"}.
kleing@13999
    75
In each \isakeyword{case} the assumption can be
kleing@13999
    76
referred to inside the proof by the name of the constructor. In
kleing@13999
    77
Section~\ref{sec:full-Ind} below we will come across an example
nipkow@25403
    78
of this.
kleing@13999
    79
nipkow@25403
    80
\subsection{Structural induction}
kleing@13999
    81
nipkow@25403
    82
We start with an inductive proof where both cases are proved automatically: *}
nipkow@16522
    83
lemma "2 * (\<Sum>i::nat\<le>n. i) = n*(n+1)"
nipkow@25427
    84
by (induct n) simp_all
kleing@13999
    85
nipkow@15909
    86
text{*\noindent The constraint @{text"::nat"} is needed because all of
nipkow@15909
    87
the operations involved are overloaded.
nipkow@25427
    88
This proof also demonstrates that \isakeyword{by} can take two arguments,
nipkow@25427
    89
one to start and one to finish the proof --- the latter is optional.
nipkow@15909
    90
nipkow@15909
    91
If we want to expose more of the structure of the
kleing@13999
    92
proof, we can use pattern matching to avoid having to repeat the goal
kleing@13999
    93
statement: *}
nipkow@16522
    94
lemma "2 * (\<Sum>i::nat\<le>n. i) = n*(n+1)" (is "?P n")
kleing@13999
    95
proof (induct n)
kleing@13999
    96
  show "?P 0" by simp
kleing@13999
    97
next
kleing@13999
    98
  fix n assume "?P n"
kleing@13999
    99
  thus "?P(Suc n)" by simp
kleing@13999
   100
qed
kleing@13999
   101
kleing@13999
   102
text{* \noindent We could refine this further to show more of the equational
kleing@13999
   103
proof. Instead we explore the same avenue as for case distinctions:
kleing@13999
   104
introducing context via the \isakeyword{case} command: *}
nipkow@16522
   105
lemma "2 * (\<Sum>i::nat \<le> n. i) = n*(n+1)"
kleing@13999
   106
proof (induct n)
kleing@13999
   107
  case 0 show ?case by simp
kleing@13999
   108
next
kleing@13999
   109
  case Suc thus ?case by simp
kleing@13999
   110
qed
kleing@13999
   111
kleing@13999
   112
text{* \noindent The implicitly defined @{text ?case} refers to the
kleing@13999
   113
corresponding case to be proved, i.e.\ @{text"?P 0"} in the first case and
kleing@13999
   114
@{text"?P(Suc n)"} in the second case. Context \isakeyword{case}~@{text 0} is
kleing@13999
   115
empty whereas \isakeyword{case}~@{text Suc} assumes @{text"?P n"}. Again we
kleing@13999
   116
have the same problem as with case distinctions: we cannot refer to an anonymous @{term n}
kleing@13999
   117
in the induction step because it has not been introduced via \isakeyword{fix}
kleing@13999
   118
(in contrast to the previous proof). The solution is the one outlined for
kleing@13999
   119
@{text Cons} above: replace @{term Suc} by @{text"(Suc i)"}: *}
kleing@13999
   120
lemma fixes n::nat shows "n < n*n + 1"
kleing@13999
   121
proof (induct n)
kleing@13999
   122
  case 0 show ?case by simp
kleing@13999
   123
next
kleing@13999
   124
  case (Suc i) thus "Suc i < Suc i * Suc i + 1" by simp
kleing@13999
   125
qed
kleing@13999
   126
kleing@13999
   127
text{* \noindent Of course we could again have written
kleing@13999
   128
\isakeyword{thus}~@{text ?case} instead of giving the term explicitly
nipkow@25403
   129
but we wanted to use @{term i} somewhere.
kleing@13999
   130
nipkow@25403
   131
\subsection{Generalization via @{text arbitrary}}
kleing@13999
   132
nipkow@25403
   133
It is frequently necessary to generalize a claim before it becomes
nipkow@25403
   134
provable by induction. The tutorial~\cite{LNCS2283} demonstrates this
nipkow@25403
   135
with @{prop"itrev xs ys = rev xs @ ys"}, where @{text ys}
nipkow@25403
   136
needs to be universally quantified before induction succeeds.\footnote{@{thm rev.simps(1)},\quad @{thm rev.simps(2)[no_vars]},\\ @{thm itrev.simps(1)[no_vars]},\quad @{thm itrev.simps(2)[no_vars]}} But
nipkow@25403
   137
strictly speaking, this quantification step is already part of the
nipkow@25403
   138
proof and the quantifiers should not clutter the original claim. This
nipkow@25403
   139
is how the quantification step can be combined with induction: *}
nipkow@25403
   140
lemma "itrev xs ys = rev xs @ ys"
nipkow@25427
   141
by (induct xs arbitrary: ys) simp_all
nipkow@25403
   142
text{*\noindent The annotation @{text"arbitrary:"}~\emph{vars}
nipkow@25403
   143
universally quantifies all \emph{vars} before the induction.  Hence
nipkow@25403
   144
they can be replaced by \emph{arbitrary} values in the proof.
nipkow@25403
   145
nipkow@30637
   146
Generalization via @{text"arbitrary"} is particularly convenient
nipkow@30637
   147
if the induction step is a structured proof as opposed to the automatic
nipkow@30637
   148
example above. Then the claim is available in unquantified form but
nipkow@25403
   149
with the generalized variables replaced by @{text"?"}-variables, ready
nipkow@30637
   150
for instantiation. In the above example, in the @{const[source] Cons} case the
nipkow@30637
   151
induction hypothesis is @{text"itrev xs ?ys = rev xs @ ?ys"} (available
nipkow@30637
   152
under the name @{const[source] Cons}).
nipkow@25403
   153
nipkow@25403
   154
nipkow@25403
   155
\subsection{Inductive proofs of conditional formulae}
nipkow@25412
   156
\label{sec:full-Ind}
nipkow@25403
   157
nipkow@25403
   158
Induction also copes well with formulae involving @{text"\<Longrightarrow>"}, for example
nipkow@25403
   159
*}
nipkow@25403
   160
nipkow@25403
   161
lemma "xs \<noteq> [] \<Longrightarrow> hd(rev xs) = last xs"
nipkow@25427
   162
by (induct xs) simp_all
nipkow@25403
   163
nipkow@25403
   164
text{*\noindent This is an improvement over that style the
nipkow@25403
   165
tutorial~\cite{LNCS2283} advises, which requires @{text"\<longrightarrow>"}.
nipkow@25403
   166
A further improvement is shown in the following proof:
nipkow@25403
   167
*}
nipkow@25403
   168
nipkow@25403
   169
lemma  "map f xs = map f ys \<Longrightarrow> length xs = length ys"
nipkow@25403
   170
proof (induct ys arbitrary: xs)
nipkow@25403
   171
  case Nil thus ?case by simp
nipkow@25403
   172
next
nipkow@25403
   173
  case (Cons y ys)  note Asm = Cons
nipkow@25403
   174
  show ?case
nipkow@25403
   175
  proof (cases xs)
nipkow@25403
   176
    case Nil
nipkow@25403
   177
    hence False using Asm(2) by simp
nipkow@25403
   178
    thus ?thesis ..
nipkow@25403
   179
  next
nipkow@25403
   180
    case (Cons x xs')
nipkow@25427
   181
    with Asm(2) have "map f xs' = map f ys" by simp
nipkow@25403
   182
    from Asm(1)[OF this] `xs = x#xs'` show ?thesis by simp
nipkow@25403
   183
  qed
nipkow@25403
   184
qed
nipkow@25403
   185
nipkow@25403
   186
text{*\noindent
nipkow@25403
   187
The base case is trivial. In the step case Isar assumes
nipkow@25403
   188
(under the name @{text Cons}) two propositions:
nipkow@25403
   189
\begin{center}
nipkow@25403
   190
\begin{tabular}{l}
nipkow@25403
   191
@{text"map f ?xs = map f ys \<Longrightarrow> length ?xs = length ys"}\\
nipkow@25403
   192
@{prop"map f xs = map f (y # ys)"}
nipkow@25403
   193
\end{tabular}
nipkow@25403
   194
\end{center}
nipkow@25403
   195
The first is the induction hypothesis, the second, and this is new,
nipkow@25403
   196
is the premise of the induction step. The actual goal at this point is merely
nipkow@25403
   197
@{prop"length xs = length (y#ys)"}. The assumptions are given the new name
nipkow@25403
   198
@{text Asm} to avoid a name clash further down. The proof procedes with a case distinction on @{text xs}. In the case @{prop"xs = []"}, the second of our two
nipkow@25403
   199
assumptions (@{text"Asm(2)"}) implies the contradiction @{text"0 = Suc(\<dots>)"}.
nipkow@25403
   200
 In the case @{prop"xs = x#xs'"}, we first obtain
nipkow@25403
   201
@{prop"map f xs' = map f ys"}, from which a forward step with the first assumption (@{text"Asm(1)[OF this]"}) yields @{prop"length xs' = length ys"}. Together
nipkow@25403
   202
with @{prop"xs = x#xs"} this yields the goal
nipkow@25403
   203
@{prop"length xs = length (y#ys)"}.
nipkow@25403
   204
nipkow@25403
   205
nipkow@25403
   206
\subsection{Induction formulae involving @{text"\<And>"} or @{text"\<Longrightarrow>"}}
nipkow@25403
   207
nipkow@25403
   208
Let us now consider abstractly the situation where the goal to be proved
nipkow@25403
   209
contains both @{text"\<And>"} and @{text"\<Longrightarrow>"}, say @{prop"\<And>x. P x \<Longrightarrow> Q x"}.
nipkow@25403
   210
This means that in each case of the induction,
kleing@13999
   211
@{text ?case} would be of the form @{prop"\<And>x. P' x \<Longrightarrow> Q' x"}.  Thus the
kleing@13999
   212
first proof steps will be the canonical ones, fixing @{text x} and assuming
nipkow@25403
   213
@{prop"P' x"}. To avoid this tedium, induction performs the canonical steps
nipkow@25403
   214
automatically: in each step case, the assumptions contain both the
nipkow@25403
   215
usual induction hypothesis and @{prop"P' x"}, whereas @{text ?case} is only
nipkow@25403
   216
@{prop"Q' x"}.
kleing@13999
   217
nipkow@25403
   218
\subsection{Rule induction}
kleing@13999
   219
nipkow@25403
   220
HOL also supports inductively defined sets. See \cite{LNCS2283}
kleing@13999
   221
for details. As an example we define our own version of the reflexive
kleing@13999
   222
transitive closure of a relation --- HOL provides a predefined one as well.*}
berghofe@23733
   223
inductive_set
berghofe@23733
   224
  rtc :: "('a \<times> 'a)set \<Rightarrow> ('a \<times> 'a)set"   ("_*" [1000] 999)
berghofe@23733
   225
  for r :: "('a \<times> 'a)set"
berghofe@23733
   226
where
berghofe@23733
   227
  refl:  "(x,x) \<in> r*"
berghofe@23733
   228
| step:  "\<lbrakk> (x,y) \<in> r; (y,z) \<in> r* \<rbrakk> \<Longrightarrow> (x,z) \<in> r*"
kleing@13999
   229
kleing@13999
   230
text{* \noindent
kleing@13999
   231
First the constant is declared as a function on binary
kleing@13999
   232
relations (with concrete syntax @{term"r*"} instead of @{text"rtc
kleing@13999
   233
r"}), then the defining clauses are given. We will now prove that
kleing@13999
   234
@{term"r*"} is indeed transitive: *}
kleing@13999
   235
kleing@13999
   236
lemma assumes A: "(x,y) \<in> r*" shows "(y,z) \<in> r* \<Longrightarrow> (x,z) \<in> r*"
kleing@13999
   237
using A
kleing@13999
   238
proof induct
kleing@13999
   239
  case refl thus ?case .
kleing@13999
   240
next
kleing@13999
   241
  case step thus ?case by(blast intro: rtc.step)
kleing@13999
   242
qed
kleing@13999
   243
text{*\noindent Rule induction is triggered by a fact $(x_1,\dots,x_n)
kleing@13999
   244
\in R$ piped into the proof, here \isakeyword{using}~@{text A}. The
kleing@13999
   245
proof itself follows the inductive definition very
kleing@13999
   246
closely: there is one case for each rule, and it has the same name as
kleing@13999
   247
the rule, analogous to structural induction.
kleing@13999
   248
kleing@13999
   249
However, this proof is rather terse. Here is a more readable version:
kleing@13999
   250
*}
kleing@13999
   251
nipkow@25403
   252
lemma assumes "(x,y) \<in> r*" and "(y,z) \<in> r*" shows "(x,z) \<in> r*"
nipkow@25403
   253
using assms
nipkow@25403
   254
proof induct
nipkow@25403
   255
  fix x assume "(x,z) \<in> r*"  -- {*@{text B}[@{text y} := @{text x}]*}
nipkow@25403
   256
  thus "(x,z) \<in> r*" .
nipkow@25403
   257
next
nipkow@25403
   258
  fix x' x y
nipkow@25403
   259
  assume 1: "(x',x) \<in> r" and
nipkow@25403
   260
         IH: "(y,z) \<in> r* \<Longrightarrow> (x,z) \<in> r*" and
nipkow@25403
   261
         B:  "(y,z) \<in> r*"
nipkow@25403
   262
  from 1 IH[OF B] show "(x',z) \<in> r*" by(rule rtc.step)
kleing@13999
   263
qed
nipkow@25403
   264
text{*\noindent
nipkow@25403
   265
This time, merely for a change, we start the proof with by feeding both
nipkow@25403
   266
assumptions into the inductive proof. Only the first assumption is
nipkow@25403
   267
``consumed'' by the induction.
nipkow@25403
   268
Since the second one is left over we don't just prove @{text ?thesis} but
nipkow@25403
   269
@{text"(y,z) \<in> r* \<Longrightarrow> ?thesis"}, just as in the previous proof.
nipkow@25403
   270
The base case is trivial. In the assumptions for the induction step we can
kleing@13999
   271
see very clearly how things fit together and permit ourselves the
kleing@13999
   272
obvious forward step @{text"IH[OF B]"}.
kleing@13999
   273
nipkow@25403
   274
The notation \isakeyword{case}~\isa{(}\emph{constructor} \emph{vars}\isa{)}
nipkow@25403
   275
is also supported for inductive definitions. The \emph{constructor} is the
nipkow@25403
   276
name of the rule and the \emph{vars} fix the free variables in the
kleing@13999
   277
rule; the order of the \emph{vars} must correspond to the
nipkow@25403
   278
left-to-right order of the variables as they appear in the rule.
kleing@13999
   279
For example, we could start the above detailed proof of the induction
nipkow@25403
   280
with \isakeyword{case}~\isa{(step x' x y)}. In that case we don't need
nipkow@25403
   281
to spell out the assumptions but can refer to them by @{text"step(.)"},
nipkow@25403
   282
although the resulting text will be quite cryptic.
kleing@13999
   283
nipkow@25403
   284
\subsection{More induction}
kleing@13999
   285
nipkow@25403
   286
We close the section by demonstrating how arbitrary induction
kleing@13999
   287
rules are applied. As a simple example we have chosen recursion
kleing@13999
   288
induction, i.e.\ induction based on a recursive function
kleing@13999
   289
definition. However, most of what we show works for induction in
kleing@13999
   290
general.
kleing@13999
   291
kleing@13999
   292
The example is an unusual definition of rotation: *}
kleing@13999
   293
nipkow@25403
   294
fun rot :: "'a list \<Rightarrow> 'a list" where
nipkow@25403
   295
"rot [] = []" |
nipkow@25403
   296
"rot [x] = [x]" |
kleing@13999
   297
"rot (x#y#zs) = y # rot(x#zs)"
kleing@13999
   298
text{*\noindent This yields, among other things, the induction rule
kleing@13999
   299
@{thm[source]rot.induct}: @{thm[display]rot.induct[no_vars]}
nipkow@25403
   300
The following proof relies on a default naming scheme for cases: they are
kleing@13999
   301
called 1, 2, etc, unless they have been named explicitly. The latter happens
nipkow@25403
   302
only with datatypes and inductively defined sets, but (usually)
nipkow@25403
   303
not with recursive functions. *}
kleing@13999
   304
kleing@13999
   305
lemma "xs \<noteq> [] \<Longrightarrow> rot xs = tl xs @ [hd xs]"
kleing@13999
   306
proof (induct xs rule: rot.induct)
kleing@13999
   307
  case 1 thus ?case by simp
kleing@13999
   308
next
kleing@13999
   309
  case 2 show ?case by simp
kleing@13999
   310
next
kleing@13999
   311
  case (3 a b cs)
kleing@13999
   312
  have "rot (a # b # cs) = b # rot(a # cs)" by simp
kleing@13999
   313
  also have "\<dots> = b # tl(a # cs) @ [hd(a # cs)]" by(simp add:3)
kleing@13999
   314
  also have "\<dots> = tl (a # b # cs) @ [hd (a # b # cs)]" by simp
kleing@13999
   315
  finally show ?case .
kleing@13999
   316
qed
kleing@13999
   317
kleing@13999
   318
text{*\noindent
kleing@13999
   319
The third case is only shown in gory detail (see \cite{BauerW-TPHOLs01}
kleing@13999
   320
for how to reason with chains of equations) to demonstrate that the
nipkow@25403
   321
\isakeyword{case}~\isa{(}\emph{constructor} \emph{vars}\isa{)} notation also
kleing@13999
   322
works for arbitrary induction theorems with numbered cases. The order
kleing@13999
   323
of the \emph{vars} corresponds to the order of the
kleing@13999
   324
@{text"\<And>"}-quantified variables in each case of the induction
nipkow@25403
   325
theorem. For induction theorems produced by \isakeyword{fun} it is
kleing@13999
   326
the order in which the variables appear on the left-hand side of the
kleing@13999
   327
equation.
kleing@13999
   328
kleing@13999
   329
The proof is so simple that it can be condensed to
kleing@13999
   330
*}
kleing@13999
   331
kleing@13999
   332
(*<*)lemma "xs \<noteq> [] \<Longrightarrow> rot xs = tl xs @ [hd xs]"(*>*)
nipkow@25412
   333
by (induct xs rule: rot.induct) simp_all
kleing@13999
   334
kleing@13999
   335
(*<*)end(*>*)
nipkow@25403
   336
(*
nipkow@25403
   337
lemma assumes A: "(\<And>n. (\<And>m. m < n \<Longrightarrow> P m) \<Longrightarrow> P n)"
nipkow@25403
   338
  shows "P(n::nat)"
nipkow@25403
   339
proof (rule A)
nipkow@25403
   340
  show "\<And>m. m < n \<Longrightarrow> P m"
nipkow@25403
   341
  proof (induct n)
nipkow@25403
   342
    case 0 thus ?case by simp
nipkow@25403
   343
  next
nipkow@25403
   344
    case (Suc n)   -- {*\isakeyword{fix} @{term m} \isakeyword{assume} @{text Suc}: @{text[source]"?m < n \<Longrightarrow> P ?m"} @{prop[source]"m < Suc n"}*}
nipkow@25403
   345
    show ?case    -- {*@{term ?case}*}
nipkow@25403
   346
    proof cases
nipkow@25403
   347
      assume eq: "m = n"
nipkow@25403
   348
      from Suc and A have "P n" by blast
nipkow@25403
   349
      with eq show "P m" by simp
nipkow@25403
   350
    next
nipkow@25403
   351
      assume "m \<noteq> n"
nipkow@25403
   352
      with Suc have "m < n" by arith
nipkow@25403
   353
      thus "P m" by(rule Suc)
nipkow@25403
   354
    qed
nipkow@25403
   355
  qed
nipkow@25403
   356
qed
nipkow@25403
   357
*)