# HG changeset patch # User ballarin # Date 1230631801 -3600 # Node ID ea97aa6aeba2b790c69d6a3b1ba1e6913a3519ca # Parent 8f84a608883d24a8ede1a0052e696c40cedd2a81# Parent 7dc7a75033ea53c879f5c3e26709d974f4e92ba8 Merged. diff -r 8f84a608883d -r ea97aa6aeba2 Admin/MacOS/README --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/Admin/MacOS/README Tue Dec 30 11:10:01 2008 +0100 @@ -0,0 +1,17 @@ +Isabelle application bundle for MacOS +===================================== + +Requirements: + +* CocoaDialog http://cocoadialog.sourceforge.net/ + +* Platypus http://www.sveinbjorn.org/platypus + +* AppHack 1.1 http://www.sveinbjorn.org/apphack + + Manual setup: + File type: "Isabelle theory" + Icon: "theory.icns" + "Editor" + Suffixes: "thy" + diff -r 8f84a608883d -r ea97aa6aeba2 Admin/MacOS/isabelle.icns Binary file Admin/MacOS/isabelle.icns has changed diff -r 8f84a608883d -r ea97aa6aeba2 Admin/MacOS/mk --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/Admin/MacOS/mk Tue Dec 30 11:10:01 2008 +0100 @@ -0,0 +1,19 @@ +#!/bin/bash +# +# Make Isabelle application bundle + +THIS="$(cd "$(dirname "$0")"; pwd)" + +PLATYPUS_APP="/Applications/Platypus-4.0/Platypus.app" +COCOADIALOG_APP="/Applications/CocoaDialog.app" + +"$PLATYPUS_APP/Contents/Resources/platypus" \ + -a Isabelle -u Isabelle \ + -I "de.tum.in.isabelle" \ + -i "$THIS/isabelle.icns" \ + -D -X thy \ + -p /bin/bash \ + -c "$THIS/script" \ + -o None \ + -f "$COCOADIALOG_APP" \ + "$PWD/Isabelle.app" diff -r 8f84a608883d -r ea97aa6aeba2 Admin/MacOS/script --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/Admin/MacOS/script Tue Dec 30 11:10:01 2008 +0100 @@ -0,0 +1,78 @@ +#!/bin/bash +# +# Author: Makarius +# +# Isabelle application wrapper + +THIS="$(cd "$(dirname "$0")"; pwd)" +THIS_APP="$(cd "$THIS/../.."; pwd)" +SUPER_APP="$(cd "$THIS/../../.."; pwd)" + + +# sane environment defaults +cd "$HOME" +PATH="$PATH:/opt/local/bin" + + +# settings support + +function choosefrom () +{ + local RESULT="" + local FILE="" + + for FILE in "$@" + do + [ -z "$RESULT" -a -e "$FILE" ] && RESULT="$FILE" + done + + [ -z "$RESULT" ] && RESULT="$FILE" + echo "$RESULT" +} + + +# Isabelle + +ISABELLE_TOOL="$(choosefrom \ + "$THIS/Isabelle/bin/isabelle" \ + "$SUPER_APP/Isabelle/bin/isabelle" \ + "$HOME/bin/isabelle" \ + isabelle)" + + +# Proof General / Emacs + +PROOFGENERAL_EMACS="$(choosefrom \ + "$THIS/Emacs.app/Contents/MacOS/Emacs" \ + "$SUPER_APP/Emacs.app/Contents/MacOS/Emacs" \ + /Applications/Emacs.app/Contents/MacOS/Emacs \ + "")" + +if [ -n "$PROOFGENERAL_EMACS" ]; then + PROOFGENERAL_OPTIONS="-p $PROOFGENERAL_EMACS $PROOFGENERAL_OPTIONS" +fi + + +# run interface with error feedback + +OUTPUT="/tmp/isabelle$$.out" + +( "$HOME/bin/isabelle" emacs "$@" ) > "$OUTPUT" 2>&1 +RC=$? + +if [ "$RC" != 0 ]; then + echo >> "$OUTPUT" + echo "Return code: $RC" >> "$OUTPUT" +fi + +if [ $(stat -f "%z" "$OUTPUT") != 0 ]; then + "$THIS/CocoaDialog.app/Contents/MacOS/CocoaDialog" textbox \ + --title "Isabelle" \ + --informative-text "Isabelle output" \ + --text-from-file "$OUTPUT" \ + --button1 "OK" +fi + +rm -f "$OUTPUT" + +exit "$RC" diff -r 8f84a608883d -r ea97aa6aeba2 Admin/MacOS/theory.icns Binary file Admin/MacOS/theory.icns has changed diff -r 8f84a608883d -r ea97aa6aeba2 Admin/Mercurial/isabelle-style.diff --- a/Admin/Mercurial/isabelle-style.diff Tue Dec 30 08:18:54 2008 +0100 +++ b/Admin/Mercurial/isabelle-style.diff Tue Dec 30 11:10:01 2008 +0100 @@ -13,23 +13,22 @@ >
> #files# >
-Only in isabelle: filelog.tmpl~ +diff -r gitweb/changeset.tmpl isabelle/changeset.tmpl +19c19 +< #desc|strip|escape|firstline# {inbranch%inbranchtag}{branches%branchtag}{tags%tagtag} +--- +> #desc|strip|escape# {inbranch%inbranchtag}{branches%branchtag}{tags%tagtag} diff -r gitweb/map isabelle/map -56,57c56,57 +29c29 +< annotateline = '#author|user#@#rev#
#linenumber#
#line|escape#
' +--- +> annotateline = '#author|user#@#rev#
#linenumber#
#line|escape#
' +59,60c59,60 < shortlogentry = '#date|age# ago#author|person##desc|strip|firstline|escape# {inbranch%inbranchtag}{branches%branchtag}{tags%tagtag}changeset | files' < filelogentry = '#date|age# ago#desc|strip|firstline|escape#file | diff | annotate #rename%filelogrename#' --- > shortlogentry = '#date|age# ago#date|shortdate##author|person##desc|strip|escape# {inbranch%inbranchtag}{branches%branchtag}{tags%tagtag}changeset | files' > filelogentry = '#date|age# ago#date|shortdate##author|person##desc|strip|escape#file | diff | annotate #rename%filelogrename#' -Only in isabelle: map~ diff -r gitweb/summary.tmpl isabelle/summary.tmpl -33d32 +34d33 < owner#owner|obfuscate# -49,55d47 -<
branches
-< -< {branches%branchentry} -< -< -< -<
...
diff -r 8f84a608883d -r ea97aa6aeba2 Admin/Mercurial/misc.diff --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/Admin/Mercurial/misc.diff Tue Dec 30 11:10:01 2008 +0100 @@ -0,0 +1,20 @@ +diff -r hgweb/webcommands.py hgweb/webcommands.py +653c653 +< desc = templatefilters.firstline(ctx.description()) +--- +> desc = ctx.description() +diff -r templates/atom/changelogentry.tmpl templates/atom/changelogentry.tmpl +2c2 +< #desc|strip|firstline|strip|escape# +--- +> #desc|strip|escape# +diff -r templates/rss/changelogentry.tmpl templates/rss/changelogentry.tmpl +2c2 +< #desc|strip|firstline|strip|escape# +--- +> #desc|strip|escape# +diff -r templates/rss/filelogentry.tmpl templates/rss/filelogentry.tmpl +2c2 +< #desc|strip|firstline|strip|escape# +--- +> #desc|strip|escape# diff -r 8f84a608883d -r ea97aa6aeba2 Admin/build --- a/Admin/build Tue Dec 30 08:18:54 2008 +0100 +++ b/Admin/build Tue Dec 30 11:10:01 2008 +0100 @@ -7,7 +7,7 @@ #paranoia setting for sunbroy PATH="/usr/local/dist/DIR/j2sdk1.5.0/bin:$PATH" -PATH="/home/scala/scala/bin:$PATH" +PATH="/home/scala/current/bin:$PATH" ## directory layout @@ -101,15 +101,6 @@ pushd "$ISABELLE_HOME/src/Pure" >/dev/null "$ISABELLE_TOOL" make jar || fail "Failed to build Pure.jar!" popd >/dev/null - - if [ -d "$HOME/lib/jedit/current" ]; then - pushd "$ISABELLE_HOME/lib/jedit/plugin" >/dev/null - ./mk - [ -f ../isabelle.jar ] || fail "Failed to build jEdit plugin!" - popd >/dev/null - else - echo "Warning: skipping jedit plugin" - fi } diff -r 8f84a608883d -r ea97aa6aeba2 Admin/isatest/settings/at-mac-poly-5.1-para --- a/Admin/isatest/settings/at-mac-poly-5.1-para Tue Dec 30 08:18:54 2008 +0100 +++ b/Admin/isatest/settings/at-mac-poly-5.1-para Tue Dec 30 11:10:01 2008 +0100 @@ -4,7 +4,7 @@ ML_SYSTEM="polyml-5.2.1" ML_PLATFORM="x86-darwin" ML_HOME="$POLYML_HOME/$ML_PLATFORM" - ML_OPTIONS="-H 2000" + ML_OPTIONS="--immutable 800 --mutable 1200" ISABELLE_HOME_USER=~/isabelle-at-mac-poly-e diff -r 8f84a608883d -r ea97aa6aeba2 CONTRIBUTORS --- a/CONTRIBUTORS Tue Dec 30 08:18:54 2008 +0100 +++ b/CONTRIBUTORS Tue Dec 30 11:10:01 2008 +0100 @@ -7,6 +7,9 @@ Contributions to this Isabelle version -------------------------------------- +* December 2008: Armin Heller, TUM and Alexander Krauss, TUM + Method "sizechange" for advanced termination proofs. + * November 2008: Timothy Bourke, NICTA Performance improvement (factor 50) for find_theorems. @@ -204,5 +207,3 @@ * 2004/2005: Tjark Weber, TUM SAT solver method using zChaff. Improved version of HOL/refute. - -$Id$ diff -r 8f84a608883d -r ea97aa6aeba2 INSTALL --- a/INSTALL Tue Dec 30 08:18:54 2008 +0100 +++ b/INSTALL Tue Dec 30 11:10:01 2008 +0100 @@ -85,6 +85,3 @@ Note that the site-wide Isabelle installation may already provide Isabelle executables in some global bin directory (such as /usr/local/bin). - - -$Id$ diff -r 8f84a608883d -r ea97aa6aeba2 NEWS --- a/NEWS Tue Dec 30 08:18:54 2008 +0100 +++ b/NEWS Tue Dec 30 11:10:01 2008 +0100 @@ -42,6 +42,11 @@ ISABELLE_HOME_USER can be changed in Isabelle/etc/settings of any Isabelle distribution. +* Proofs of fully specified statements are run in parallel on +multi-core systems. A speedup factor of 2-3 can be expected on a +regular 4-core machine, if the initial heap space is made reasonably +large (cf. Poly/ML option -H). [Poly/ML 5.2.1 or later] + * The Isabelle System Manual (system) has been updated, with formally checked references as hyperlinks. @@ -55,8 +60,8 @@ * Removed exotic 'token_translation' command. INCOMPATIBILITY, use ML interface instead. -* There is a new lexical item "float" with syntax ["-"] digit+ "." digit+, -without spaces. +* There is a new syntactic category "float_const" for signed decimal +fractions (e.g. 123.45 or -123.45). *** Pure *** @@ -152,11 +157,12 @@ *** HOL *** -* Made repository layout more coherent with logical -distribution structure: +* Made source layout more coherent with logical distribution +structure: src/HOL/Library/RType.thy ~> src/HOL/Typerep.thy src/HOL/Library/Code_Message.thy ~> src/HOL/ + src/HOL/Library/Dense_Linear_Order.thy ~> src/HOL/ src/HOL/Library/GCD.thy ~> src/HOL/ src/HOL/Library/Order_Relation.thy ~> src/HOL/ src/HOL/Library/Parity.thy ~> src/HOL/ @@ -172,6 +178,7 @@ src/HOL/Complex/Complex_Main.thy ~> src/HOL/ src/HOL/Complex/Complex.thy ~> src/HOL/ src/HOL/Complex/FrechetDeriv.thy ~> src/HOL/ + src/HOL/Complex/Fundamental_Theorem_Algebra.thy ~> src/HOL/ src/HOL/Hyperreal/Deriv.thy ~> src/HOL/ src/HOL/Hyperreal/Fact.thy ~> src/HOL/ src/HOL/Hyperreal/Integration.thy ~> src/HOL/ @@ -181,9 +188,12 @@ src/HOL/Hyperreal/MacLaurin.thy ~> src/HOL/ src/HOL/Hyperreal/NthRoot.thy ~> src/HOL/ src/HOL/Hyperreal/Series.thy ~> src/HOL/ + src/HOL/Hyperreal/SEQ.thy ~> src/HOL/ src/HOL/Hyperreal/Taylor.thy ~> src/HOL/ src/HOL/Hyperreal/Transcendental.thy ~> src/HOL/ src/HOL/Real/Float ~> src/HOL/Library/ + src/HOL/Real/HahnBanach ~> src/HOL/HahnBanach + src/HOL/Real/RealVector.thy ~> src/HOL/ src/HOL/arith_data.ML ~> src/HOL/Tools src/HOL/hologic.ML ~> src/HOL/Tools @@ -239,6 +249,10 @@ mechanisms may be specified (currently, [SML], [code] or [nbe]). See further src/HOL/ex/Eval_Examples.thy. +* New method "sizechange" to automate termination proofs using (a +modification of) the size-change principle. Requires SAT solver. See +src/HOL/ex/Termination.thy for examples. + * HOL/Orderings: class "wellorder" moved here, with explicit induction rule "less_induct" as assumption. For instantiation of "wellorder" by means of predicate "wf", use rule wf_wellorderI. INCOMPATIBILITY. @@ -388,6 +402,14 @@ *** ML *** +* High-level support for concurrent ML programming, see +src/Pure/Cuncurrent. The data-oriented model of "future values" is +particularly convenient to organize independent functional +computations. The concept of "synchronized variables" provides a +higher-order interface for components with shared state, avoiding the +delicate details of mutexes and condition variables. [Poly/ML 5.2.1 +or later] + * Simplified ML oracle interface Thm.add_oracle promotes 'a -> cterm to 'a -> thm, while results are always tagged with an authentic oracle name. The Isar command 'oracle' is now polymorphic, no argument type @@ -857,8 +879,8 @@ print_mode_active, PrintMode.setmp etc. INCOMPATIBILITY. * Functions system/system_out provide a robust way to invoke external -shell commands, with propagation of interrupts (requires Poly/ML 5.2). -Do not use OS.Process.system etc. from the basis library! +shell commands, with propagation of interrupts (requires Poly/ML +5.2.1). Do not use OS.Process.system etc. from the basis library! *** System *** @@ -5953,6 +5975,3 @@ types; :mode=text:wrap=hard:maxLineLen=72: - - -$Id$ diff -r 8f84a608883d -r ea97aa6aeba2 build --- a/build Tue Dec 30 08:18:54 2008 +0100 +++ b/build Tue Dec 30 11:10:01 2008 +0100 @@ -1,6 +1,5 @@ #!/usr/bin/env bash # -# $Id$ # Author: Markus Wenzel, TU Muenchen # # build - compile the Isabelle system and object-logics diff -r 8f84a608883d -r ea97aa6aeba2 doc-src/IsarAdvanced/Classes/style.sty --- a/doc-src/IsarAdvanced/Classes/style.sty Tue Dec 30 08:18:54 2008 +0100 +++ b/doc-src/IsarAdvanced/Classes/style.sty Tue Dec 30 11:10:01 2008 +0100 @@ -30,7 +30,7 @@ \pagestyle{headings} \binperiod -\underscoreon +\underscoreoff \renewcommand{\isadigit}[1]{\isamath{#1}} diff -r 8f84a608883d -r ea97aa6aeba2 doc-src/IsarAdvanced/Codegen/style.sty --- a/doc-src/IsarAdvanced/Codegen/style.sty Tue Dec 30 08:18:54 2008 +0100 +++ b/doc-src/IsarAdvanced/Codegen/style.sty Tue Dec 30 11:10:01 2008 +0100 @@ -42,7 +42,7 @@ \pagestyle{headings} \binperiod -\underscoreon +\underscoreoff \renewcommand{\isadigit}[1]{\isamath{#1}} diff -r 8f84a608883d -r ea97aa6aeba2 doc-src/IsarImplementation/Thy/ML.thy --- a/doc-src/IsarImplementation/Thy/ML.thy Tue Dec 30 08:18:54 2008 +0100 +++ b/doc-src/IsarImplementation/Thy/ML.thy Tue Dec 30 11:10:01 2008 +0100 @@ -107,18 +107,23 @@ section {* Thread-safe programming *} text {* - Recent versions of Poly/ML (5.2 or later) support multithreaded - execution based on native operating system threads of the underlying - platform. Thus threads will actually be executed in parallel on - multi-core systems. A speedup-factor of approximately 2--4 can be - expected for large well-structured Isabelle sessions, where theories - are organized as a graph with sufficiently many independent nodes. + Recent versions of Poly/ML (5.2.1 or later) support robust + multithreaded execution, based on native operating system threads of + the underlying platform. Thus threads will actually be executed in + parallel on multi-core systems. A speedup-factor of approximately + 1.5--3 can be expected on a regular 4-core machine.\footnote{There + is some inherent limitation of the speedup factor due to garbage + collection, which is still sequential. It helps to provide initial + heap space generously, using the \texttt{-H} option of Poly/ML.} + Threads also help to organize advanced operations of the system, + with explicit communication between sub-components, real-time + conditions, time-outs etc. - Threads lack the memory protection of separate processes, but + Threads lack the memory protection of separate processes, and operate concurrently on shared heap memory. This has the advantage that results of independent computations are immediately available - to other threads, without requiring explicit communication, - reloading, or even recoding of data. + to other threads, without requiring untyped character streams, + awkward serialization etc. On the other hand, some programming guidelines need to be observed in order to make unprotected parallelism work out smoothly. While @@ -143,27 +148,29 @@ \end{itemize} - Note that ML bindings within the toplevel environment (@{verbatim - "type"}, @{verbatim val}, @{verbatim "structure"} etc.) due to - run-time invocation of the compiler are non-critical, because - Isabelle/Isar incorporates such bindings within the theory or proof - context. - The majority of tools implemented within the Isabelle/Isar framework will not require any of these critical elements: nothing special needs to be observed when staying in the purely functional fragment of ML. Note that output via the official Isabelle channels does not - even count as direct I/O in the above sense, so the operations @{ML - "writeln"}, @{ML "warning"}, @{ML "tracing"} etc.\ are safe. + count as direct I/O, so the operations @{ML "writeln"}, @{ML + "warning"}, @{ML "tracing"} etc.\ are safe. - \paragraph{Multithreading in Isabelle/Isar.} Our parallel execution - model is centered around the theory loader. Whenever a given - subgraph of theories needs to be updated, the system schedules a - number of threads to process the sources as required, while - observing their dependencies. Thus concurrency is limited to - independent nodes according to the theory import relation. + Moreover, ML bindings within the toplevel environment (@{verbatim + "type"}, @{verbatim val}, @{verbatim "structure"} etc.) due to + run-time invocation of the compiler are also safe, because + Isabelle/Isar manages this as part of the theory or proof context. - Any user-code that works relatively to the present background theory + \paragraph{Multithreading in Isabelle/Isar.} The theory loader + automatically exploits the overall parallelism of independent nodes + in the development graph, as well as the inherent irrelevance of + proofs for goals being fully specified in advance. This means, + checking of individual Isar proofs is parallelized by default. + Beyond that, very sophisticated proof tools may use local + parallelism internally, via the general programming model of + ``future values'' (see also @{"file" + "~~/src/Pure/Concurrent/future.ML"}). + + Any ML code that works relatively to the present background theory is already safe. Contextual data may be easily stored within the theory or proof context, thanks to the generic data concept of Isabelle/Isar (see \secref{sec:context-data}). This greatly @@ -179,9 +186,13 @@ quickly, otherwise parallel execution performance may degrade significantly. - Despite this potential bottle-neck, we refrain from fine-grained - locking mechanism within user-code: the restriction to a single lock - prevents deadlocks without demanding special precautions. + Despite this potential bottle-neck, centralized locking is + convenient, because it prevents deadlocks without demanding special + precautions. Explicit communication demands other means, though. + The high-level abstraction of synchronized variables @{"file" + "~~/src/Pure/Concurrent/synchronized.ML"} enables parallel + components to communicate via shared state; see also @{"file" + "~~/src/Pure/Concurrent/mailbox.ML"} as canonical example. \paragraph{Good conduct of impure programs.} The following guidelines enable non-functional programs to participate in diff -r 8f84a608883d -r ea97aa6aeba2 doc-src/IsarImplementation/Thy/document/ML.tex --- a/doc-src/IsarImplementation/Thy/document/ML.tex Tue Dec 30 08:18:54 2008 +0100 +++ b/doc-src/IsarImplementation/Thy/document/ML.tex Tue Dec 30 11:10:01 2008 +0100 @@ -128,18 +128,23 @@ \isamarkuptrue% % \begin{isamarkuptext}% -Recent versions of Poly/ML (5.2 or later) support multithreaded - execution based on native operating system threads of the underlying - platform. Thus threads will actually be executed in parallel on - multi-core systems. A speedup-factor of approximately 2--4 can be - expected for large well-structured Isabelle sessions, where theories - are organized as a graph with sufficiently many independent nodes. +Recent versions of Poly/ML (5.2.1 or later) support robust + multithreaded execution, based on native operating system threads of + the underlying platform. Thus threads will actually be executed in + parallel on multi-core systems. A speedup-factor of approximately + 1.5--3 can be expected on a regular 4-core machine.\footnote{There + is some inherent limitation of the speedup factor due to garbage + collection, which is still sequential. It helps to provide initial + heap space generously, using the \texttt{-H} option of Poly/ML.} + Threads also help to organize advanced operations of the system, + with explicit communication between sub-components, real-time + conditions, time-outs etc. - Threads lack the memory protection of separate processes, but + Threads lack the memory protection of separate processes, and operate concurrently on shared heap memory. This has the advantage that results of independent computations are immediately available - to other threads, without requiring explicit communication, - reloading, or even recoding of data. + to other threads, without requiring untyped character streams, + awkward serialization etc. On the other hand, some programming guidelines need to be observed in order to make unprotected parallelism work out smoothly. While @@ -163,25 +168,26 @@ \end{itemize} - Note that ML bindings within the toplevel environment (\verb|type|, \verb|val|, \verb|structure| etc.) due to - run-time invocation of the compiler are non-critical, because - Isabelle/Isar incorporates such bindings within the theory or proof - context. - The majority of tools implemented within the Isabelle/Isar framework will not require any of these critical elements: nothing special needs to be observed when staying in the purely functional fragment of ML. Note that output via the official Isabelle channels does not - even count as direct I/O in the above sense, so the operations \verb|writeln|, \verb|warning|, \verb|tracing| etc.\ are safe. + count as direct I/O, so the operations \verb|writeln|, \verb|warning|, \verb|tracing| etc.\ are safe. - \paragraph{Multithreading in Isabelle/Isar.} Our parallel execution - model is centered around the theory loader. Whenever a given - subgraph of theories needs to be updated, the system schedules a - number of threads to process the sources as required, while - observing their dependencies. Thus concurrency is limited to - independent nodes according to the theory import relation. + Moreover, ML bindings within the toplevel environment (\verb|type|, \verb|val|, \verb|structure| etc.) due to + run-time invocation of the compiler are also safe, because + Isabelle/Isar manages this as part of the theory or proof context. - Any user-code that works relatively to the present background theory + \paragraph{Multithreading in Isabelle/Isar.} The theory loader + automatically exploits the overall parallelism of independent nodes + in the development graph, as well as the inherent irrelevance of + proofs for goals being fully specified in advance. This means, + checking of individual Isar proofs is parallelized by default. + Beyond that, very sophisticated proof tools may use local + parallelism internally, via the general programming model of + ``future values'' (see also \hyperlink{file.~~/src/Pure/Concurrent/future.ML}{\mbox{\isa{\isatt{{\isachartilde}{\isachartilde}{\isacharslash}src{\isacharslash}Pure{\isacharslash}Concurrent{\isacharslash}future{\isachardot}ML}}}}). + + Any ML code that works relatively to the present background theory is already safe. Contextual data may be easily stored within the theory or proof context, thanks to the generic data concept of Isabelle/Isar (see \secref{sec:context-data}). This greatly @@ -197,9 +203,11 @@ quickly, otherwise parallel execution performance may degrade significantly. - Despite this potential bottle-neck, we refrain from fine-grained - locking mechanism within user-code: the restriction to a single lock - prevents deadlocks without demanding special precautions. + Despite this potential bottle-neck, centralized locking is + convenient, because it prevents deadlocks without demanding special + precautions. Explicit communication demands other means, though. + The high-level abstraction of synchronized variables \hyperlink{file.~~/src/Pure/Concurrent/synchronized.ML}{\mbox{\isa{\isatt{{\isachartilde}{\isachartilde}{\isacharslash}src{\isacharslash}Pure{\isacharslash}Concurrent{\isacharslash}synchronized{\isachardot}ML}}}} enables parallel + components to communicate via shared state; see also \hyperlink{file.~~/src/Pure/Concurrent/mailbox.ML}{\mbox{\isa{\isatt{{\isachartilde}{\isachartilde}{\isacharslash}src{\isacharslash}Pure{\isacharslash}Concurrent{\isacharslash}mailbox{\isachardot}ML}}}} as canonical example. \paragraph{Good conduct of impure programs.} The following guidelines enable non-functional programs to participate in diff -r 8f84a608883d -r ea97aa6aeba2 doc-src/IsarRef/Thy/HOL_Specific.thy --- a/doc-src/IsarRef/Thy/HOL_Specific.thy Tue Dec 30 08:18:54 2008 +0100 +++ b/doc-src/IsarRef/Thy/HOL_Specific.thy Tue Dec 30 11:10:01 2008 +0100 @@ -804,12 +804,15 @@ @{command_def (HOL) "print_atps"}@{text "\<^sup>*"} & : & @{text "context \"} \\ @{command_def (HOL) "atp_info"}@{text "\<^sup>*"} & : & @{text "any \"} \\ @{command_def (HOL) "atp_kill"}@{text "\<^sup>*"} & : & @{text "any \"} \\ + @{command_def (HOL) "atp_messages"}@{text "\<^sup>*"} & : & @{text "any \"} \\ @{method_def (HOL) metis} & : & @{text method} \\ \end{matharray} \begin{rail} 'sledgehammer' (nameref *) ; + 'atp\_messages' ('(' nat ')')? + ; 'metis' thmrefs ; @@ -842,6 +845,12 @@ \item @{command (HOL) atp_kill} terminates all presently running provers. + \item @{command (HOL) atp_messages} displays recent messages issued + by automated theorem provers. This allows to examine results that + might have got lost due to the asynchronous nature of default + @{command (HOL) sledgehammer} output. An optional message limit may + be specified (default 5). + \item @{method (HOL) metis}~@{text "facts"} invokes the Metis prover with the given facts. Metis is an automated proof tool of medium strength, but is fully integrated into Isabelle/HOL, with explicit diff -r 8f84a608883d -r ea97aa6aeba2 doc-src/IsarRef/Thy/Inner_Syntax.thy --- a/doc-src/IsarRef/Thy/Inner_Syntax.thy Tue Dec 30 08:18:54 2008 +0100 +++ b/doc-src/IsarRef/Thy/Inner_Syntax.thy Tue Dec 30 11:10:01 2008 +0100 @@ -683,17 +683,23 @@ @{syntax_def (inner) tid} & = & @{syntax_ref typefree} \\ @{syntax_def (inner) tvar} & = & @{syntax_ref typevar} \\ @{syntax_def (inner) num} & = & @{syntax_ref nat}@{text " | "}@{verbatim "-"}@{syntax_ref nat} \\ + @{syntax_def (inner) float_token} & = & @{syntax_ref nat}@{verbatim "."}@{syntax_ref nat}@{text " | "}@{verbatim "-"}@{syntax_ref nat}@{verbatim "."}@{syntax_ref nat} \\ @{syntax_def (inner) xnum} & = & @{verbatim "#"}@{syntax_ref nat}@{text " | "}@{verbatim "#-"}@{syntax_ref nat} \\ @{syntax_def (inner) xstr} & = & @{verbatim "''"} @{text "\"} @{verbatim "''"} \\ \end{supertabular} \end{center} - The token categories @{syntax_ref (inner) num}, @{syntax_ref (inner) - xnum}, and @{syntax_ref (inner) xstr} are not used in Pure. - Object-logics may implement numerals and string constants by adding - appropriate syntax declarations, together with some translation - functions (e.g.\ see Isabelle/HOL). + The token categories @{syntax (inner) num}, @{syntax (inner) + float_token}, @{syntax (inner) xnum}, and @{syntax (inner) xstr} are + not used in Pure. Object-logics may implement numerals and string + constants by adding appropriate syntax declarations, together with + some translation functions (e.g.\ see Isabelle/HOL). + + The derived categories @{syntax_def (inner) num_const} and + @{syntax_def (inner) float_const} provide robust access to @{syntax + (inner) num}, and @{syntax (inner) float_token}, respectively: the + syntax tree holds a syntactic constant instead of a free variable. *} diff -r 8f84a608883d -r ea97aa6aeba2 doc-src/IsarRef/Thy/document/HOL_Specific.tex --- a/doc-src/IsarRef/Thy/document/HOL_Specific.tex Tue Dec 30 08:18:54 2008 +0100 +++ b/doc-src/IsarRef/Thy/document/HOL_Specific.tex Tue Dec 30 11:10:01 2008 +0100 @@ -814,12 +814,15 @@ \indexdef{HOL}{command}{print\_atps}\hypertarget{command.HOL.print-atps}{\hyperlink{command.HOL.print-atps}{\mbox{\isa{\isacommand{print{\isacharunderscore}atps}}}}}\isa{{\isachardoublequote}\isactrlsup {\isacharasterisk}{\isachardoublequote}} & : & \isa{{\isachardoublequote}context\ {\isasymrightarrow}{\isachardoublequote}} \\ \indexdef{HOL}{command}{atp\_info}\hypertarget{command.HOL.atp-info}{\hyperlink{command.HOL.atp-info}{\mbox{\isa{\isacommand{atp{\isacharunderscore}info}}}}}\isa{{\isachardoublequote}\isactrlsup {\isacharasterisk}{\isachardoublequote}} & : & \isa{{\isachardoublequote}any\ {\isasymrightarrow}{\isachardoublequote}} \\ \indexdef{HOL}{command}{atp\_kill}\hypertarget{command.HOL.atp-kill}{\hyperlink{command.HOL.atp-kill}{\mbox{\isa{\isacommand{atp{\isacharunderscore}kill}}}}}\isa{{\isachardoublequote}\isactrlsup {\isacharasterisk}{\isachardoublequote}} & : & \isa{{\isachardoublequote}any\ {\isasymrightarrow}{\isachardoublequote}} \\ + \indexdef{HOL}{command}{atp\_messages}\hypertarget{command.HOL.atp-messages}{\hyperlink{command.HOL.atp-messages}{\mbox{\isa{\isacommand{atp{\isacharunderscore}messages}}}}}\isa{{\isachardoublequote}\isactrlsup {\isacharasterisk}{\isachardoublequote}} & : & \isa{{\isachardoublequote}any\ {\isasymrightarrow}{\isachardoublequote}} \\ \indexdef{HOL}{method}{metis}\hypertarget{method.HOL.metis}{\hyperlink{method.HOL.metis}{\mbox{\isa{metis}}}} & : & \isa{method} \\ \end{matharray} \begin{rail} 'sledgehammer' (nameref *) ; + 'atp\_messages' ('(' nat ')')? + ; 'metis' thmrefs ; @@ -850,6 +853,12 @@ \item \hyperlink{command.HOL.atp-kill}{\mbox{\isa{\isacommand{atp{\isacharunderscore}kill}}}} terminates all presently running provers. + \item \hyperlink{command.HOL.atp-messages}{\mbox{\isa{\isacommand{atp{\isacharunderscore}messages}}}} displays recent messages issued + by automated theorem provers. This allows to examine results that + might have got lost due to the asynchronous nature of default + \hyperlink{command.HOL.sledgehammer}{\mbox{\isa{\isacommand{sledgehammer}}}} output. An optional message limit may + be specified (default 5). + \item \hyperlink{method.HOL.metis}{\mbox{\isa{metis}}}~\isa{{\isachardoublequote}facts{\isachardoublequote}} invokes the Metis prover with the given facts. Metis is an automated proof tool of medium strength, but is fully integrated into Isabelle/HOL, with explicit diff -r 8f84a608883d -r ea97aa6aeba2 doc-src/IsarRef/Thy/document/Inner_Syntax.tex --- a/doc-src/IsarRef/Thy/document/Inner_Syntax.tex Tue Dec 30 08:18:54 2008 +0100 +++ b/doc-src/IsarRef/Thy/document/Inner_Syntax.tex Tue Dec 30 11:10:01 2008 +0100 @@ -702,16 +702,21 @@ \indexdef{inner}{syntax}{tid}\hypertarget{syntax.inner.tid}{\hyperlink{syntax.inner.tid}{\mbox{\isa{tid}}}} & = & \indexref{}{syntax}{typefree}\hyperlink{syntax.typefree}{\mbox{\isa{typefree}}} \\ \indexdef{inner}{syntax}{tvar}\hypertarget{syntax.inner.tvar}{\hyperlink{syntax.inner.tvar}{\mbox{\isa{tvar}}}} & = & \indexref{}{syntax}{typevar}\hyperlink{syntax.typevar}{\mbox{\isa{typevar}}} \\ \indexdef{inner}{syntax}{num}\hypertarget{syntax.inner.num}{\hyperlink{syntax.inner.num}{\mbox{\isa{num}}}} & = & \indexref{}{syntax}{nat}\hyperlink{syntax.nat}{\mbox{\isa{nat}}}\isa{{\isachardoublequote}\ \ {\isacharbar}\ \ {\isachardoublequote}}\verb|-|\indexref{}{syntax}{nat}\hyperlink{syntax.nat}{\mbox{\isa{nat}}} \\ + \indexdef{inner}{syntax}{float\_token}\hypertarget{syntax.inner.float-token}{\hyperlink{syntax.inner.float-token}{\mbox{\isa{float{\isacharunderscore}token}}}} & = & \indexref{}{syntax}{nat}\hyperlink{syntax.nat}{\mbox{\isa{nat}}}\verb|.|\indexref{}{syntax}{nat}\hyperlink{syntax.nat}{\mbox{\isa{nat}}}\isa{{\isachardoublequote}\ \ {\isacharbar}\ \ {\isachardoublequote}}\verb|-|\indexref{}{syntax}{nat}\hyperlink{syntax.nat}{\mbox{\isa{nat}}}\verb|.|\indexref{}{syntax}{nat}\hyperlink{syntax.nat}{\mbox{\isa{nat}}} \\ \indexdef{inner}{syntax}{xnum}\hypertarget{syntax.inner.xnum}{\hyperlink{syntax.inner.xnum}{\mbox{\isa{xnum}}}} & = & \verb|#|\indexref{}{syntax}{nat}\hyperlink{syntax.nat}{\mbox{\isa{nat}}}\isa{{\isachardoublequote}\ \ {\isacharbar}\ \ {\isachardoublequote}}\verb|#-|\indexref{}{syntax}{nat}\hyperlink{syntax.nat}{\mbox{\isa{nat}}} \\ \indexdef{inner}{syntax}{xstr}\hypertarget{syntax.inner.xstr}{\hyperlink{syntax.inner.xstr}{\mbox{\isa{xstr}}}} & = & \verb|''| \isa{{\isachardoublequote}{\isasymdots}{\isachardoublequote}} \verb|''| \\ \end{supertabular} \end{center} - The token categories \indexref{inner}{syntax}{num}\hyperlink{syntax.inner.num}{\mbox{\isa{num}}}, \indexref{inner}{syntax}{xnum}\hyperlink{syntax.inner.xnum}{\mbox{\isa{xnum}}}, and \indexref{inner}{syntax}{xstr}\hyperlink{syntax.inner.xstr}{\mbox{\isa{xstr}}} are not used in Pure. - Object-logics may implement numerals and string constants by adding - appropriate syntax declarations, together with some translation - functions (e.g.\ see Isabelle/HOL).% + The token categories \hyperlink{syntax.inner.num}{\mbox{\isa{num}}}, \hyperlink{syntax.inner.float-token}{\mbox{\isa{float{\isacharunderscore}token}}}, \hyperlink{syntax.inner.xnum}{\mbox{\isa{xnum}}}, and \hyperlink{syntax.inner.xstr}{\mbox{\isa{xstr}}} are + not used in Pure. Object-logics may implement numerals and string + constants by adding appropriate syntax declarations, together with + some translation functions (e.g.\ see Isabelle/HOL). + + The derived categories \indexdef{inner}{syntax}{num\_const}\hypertarget{syntax.inner.num-const}{\hyperlink{syntax.inner.num-const}{\mbox{\isa{num{\isacharunderscore}const}}}} and + \indexdef{inner}{syntax}{float\_const}\hypertarget{syntax.inner.float-const}{\hyperlink{syntax.inner.float-const}{\mbox{\isa{float{\isacharunderscore}const}}}} provide robust access to \hyperlink{syntax.inner.num}{\mbox{\isa{num}}}, and \hyperlink{syntax.inner.float-token}{\mbox{\isa{float{\isacharunderscore}token}}}, respectively: the + syntax tree holds a syntactic constant instead of a free variable.% \end{isamarkuptext}% \isamarkuptrue% % diff -r 8f84a608883d -r ea97aa6aeba2 etc/isar-keywords-ZF.el --- a/etc/isar-keywords-ZF.el Tue Dec 30 08:18:54 2008 +0100 +++ b/etc/isar-keywords-ZF.el Tue Dec 30 11:10:01 2008 +0100 @@ -200,7 +200,6 @@ "use" "use_thy" "using" - "value" "welcome" "with" "{" @@ -323,7 +322,6 @@ "typ" "unused_thms" "use_thy" - "value" "welcome")) (defconst isar-keywords-theory-begin diff -r 8f84a608883d -r ea97aa6aeba2 etc/isar-keywords.el --- a/etc/isar-keywords.el Tue Dec 30 08:18:54 2008 +0100 +++ b/etc/isar-keywords.el Tue Dec 30 11:10:01 2008 +0100 @@ -32,6 +32,7 @@ "atom_decl" "atp_info" "atp_kill" + "atp_messages" "automaton" "ax_specification" "axclass" @@ -334,6 +335,7 @@ "ML_val" "atp_info" "atp_kill" + "atp_messages" "cd" "class_deps" "code_deps" diff -r 8f84a608883d -r ea97aa6aeba2 etc/proofgeneral-settings.el --- a/etc/proofgeneral-settings.el Tue Dec 30 08:18:54 2008 +0100 +++ b/etc/proofgeneral-settings.el Tue Dec 30 11:10:01 2008 +0100 @@ -1,6 +1,3 @@ -;;; -;;; $Id$ -;;; ;;; Options for Proof General ;; Examples for sensible settings: diff -r 8f84a608883d -r ea97aa6aeba2 etc/settings --- a/etc/settings Tue Dec 30 08:18:54 2008 +0100 +++ b/etc/settings Tue Dec 30 11:10:01 2008 +0100 @@ -1,5 +1,4 @@ # -*- shell-script -*- :mode=shellscript: -# $Id$ # # Isabelle settings -- site defaults. # @@ -202,9 +201,8 @@ "/opt/ProofGeneral" \ "") -PROOFGENERAL_EMACS=$(choosefrom /Applications/Emacs.app/Contents/MacOS/Emacs emacs22) -PROOFGENERAL_OPTIONS="-p $PROOFGENERAL_EMACS" -#PROOFGENERAL_OPTIONS="-m no_brackets -m no_type_brackets -x true -p $PROOFGENERAL_EMACS" +PROOFGENERAL_OPTIONS="" +#PROOFGENERAL_OPTIONS="-m no_brackets -m no_type_brackets" # Automatic setup of remote fonts #XSYMBOL_INSTALLFONTS="xset fp+ tcp/isafonts.informatik.tu-muenchen.de:7200" diff -r 8f84a608883d -r ea97aa6aeba2 etc/symbols --- a/etc/symbols Tue Dec 30 08:18:54 2008 +0100 +++ b/etc/symbols Tue Dec 30 11:10:01 2008 +0100 @@ -1,4 +1,3 @@ -# $Id$ # Default interpretation of some Isabelle symbols \ code: 0x01d7ec font: Isabelle diff -r 8f84a608883d -r ea97aa6aeba2 etc/user-settings.sample --- a/etc/user-settings.sample Tue Dec 30 08:18:54 2008 +0100 +++ b/etc/user-settings.sample Tue Dec 30 11:10:01 2008 +0100 @@ -1,5 +1,4 @@ # -*- shell-script -*- -# $Id$ # # Isabelle user settings sample -- for use in ~/.isabelle/etc/settings diff -r 8f84a608883d -r ea97aa6aeba2 lib/Tools/browser --- a/lib/Tools/browser Tue Dec 30 08:18:54 2008 +0100 +++ b/lib/Tools/browser Tue Dec 30 11:10:01 2008 +0100 @@ -1,6 +1,5 @@ #!/usr/bin/env bash # -# $Id$ # Author: Markus Wenzel, TU Muenchen # # DESCRIPTION: Isabelle graph browser diff -r 8f84a608883d -r ea97aa6aeba2 lib/Tools/codegen --- a/lib/Tools/codegen Tue Dec 30 08:18:54 2008 +0100 +++ b/lib/Tools/codegen Tue Dec 30 11:10:01 2008 +0100 @@ -1,6 +1,5 @@ #!/usr/bin/env bash # -# $Id$ # Author: Florian Haftmann, TUM # # DESCRIPTION: issue code generation from shell diff -r 8f84a608883d -r ea97aa6aeba2 lib/Tools/dimacs2hol --- a/lib/Tools/dimacs2hol Tue Dec 30 08:18:54 2008 +0100 +++ b/lib/Tools/dimacs2hol Tue Dec 30 11:10:01 2008 +0100 @@ -1,8 +1,6 @@ #!/usr/bin/env bash # -# $Id$ # Author: Tjark Weber -# Copyright 2004 # # DESCRIPTION: convert DIMACS CNF files into Isabelle/HOL theories diff -r 8f84a608883d -r ea97aa6aeba2 lib/Tools/display --- a/lib/Tools/display Tue Dec 30 08:18:54 2008 +0100 +++ b/lib/Tools/display Tue Dec 30 11:10:01 2008 +0100 @@ -1,6 +1,5 @@ #!/usr/bin/env bash # -# $Id$ # Author: Markus Wenzel, TU Muenchen # # DESCRIPTION: display document (in DVI or PDF format) diff -r 8f84a608883d -r ea97aa6aeba2 lib/Tools/doc --- a/lib/Tools/doc Tue Dec 30 08:18:54 2008 +0100 +++ b/lib/Tools/doc Tue Dec 30 11:10:01 2008 +0100 @@ -1,6 +1,5 @@ #!/usr/bin/env bash # -# $Id$ # Author: Markus Wenzel, TU Muenchen # # DESCRIPTION: view Isabelle documentation diff -r 8f84a608883d -r ea97aa6aeba2 lib/Tools/document --- a/lib/Tools/document Tue Dec 30 08:18:54 2008 +0100 +++ b/lib/Tools/document Tue Dec 30 11:10:01 2008 +0100 @@ -1,6 +1,5 @@ #!/usr/bin/env bash # -# $Id$ # Author: Markus Wenzel, TU Muenchen # # DESCRIPTION: prepare theory session document diff -r 8f84a608883d -r ea97aa6aeba2 lib/Tools/emacs --- a/lib/Tools/emacs Tue Dec 30 08:18:54 2008 +0100 +++ b/lib/Tools/emacs Tue Dec 30 11:10:01 2008 +0100 @@ -1,6 +1,5 @@ #!/usr/bin/env bash # -# $Id$ # Author: Makarius # # DESCRIPTION: Proof General / Emacs interface wrapper diff -r 8f84a608883d -r ea97aa6aeba2 lib/Tools/env --- a/lib/Tools/env Tue Dec 30 08:18:54 2008 +0100 +++ b/lib/Tools/env Tue Dec 30 11:10:01 2008 +0100 @@ -1,6 +1,5 @@ #!/usr/bin/env bash # -# $Id$ # Author: Markus Wenzel, TU Muenchen # # DESCRIPTION: run a program in a modified environment diff -r 8f84a608883d -r ea97aa6aeba2 lib/Tools/findlogics --- a/lib/Tools/findlogics Tue Dec 30 08:18:54 2008 +0100 +++ b/lib/Tools/findlogics Tue Dec 30 11:10:01 2008 +0100 @@ -1,6 +1,5 @@ #!/usr/bin/env bash # -# $Id$ # Author: Markus Wenzel, TU Muenchen # # DESCRIPTION: collect heap names from ISABELLE_PATH diff -r 8f84a608883d -r ea97aa6aeba2 lib/Tools/getenv --- a/lib/Tools/getenv Tue Dec 30 08:18:54 2008 +0100 +++ b/lib/Tools/getenv Tue Dec 30 11:10:01 2008 +0100 @@ -1,6 +1,5 @@ #!/usr/bin/env bash # -# $Id$ # Author: Markus Wenzel, TU Muenchen # # DESCRIPTION: get values from Isabelle settings environment diff -r 8f84a608883d -r ea97aa6aeba2 lib/Tools/install --- a/lib/Tools/install Tue Dec 30 08:18:54 2008 +0100 +++ b/lib/Tools/install Tue Dec 30 11:10:01 2008 +0100 @@ -1,6 +1,5 @@ #!/usr/bin/env bash # -# $Id$ # Author: Markus Wenzel, TU Muenchen # # DESCRIPTION: install standalone Isabelle executables diff -r 8f84a608883d -r ea97aa6aeba2 lib/Tools/java --- a/lib/Tools/java Tue Dec 30 08:18:54 2008 +0100 +++ b/lib/Tools/java Tue Dec 30 11:10:01 2008 +0100 @@ -1,6 +1,5 @@ #!/usr/bin/env bash # -# $Id$ # Author: Makarius # # DESCRIPTION: invoke Java within the Isabelle environment diff -r 8f84a608883d -r ea97aa6aeba2 lib/Tools/jedit --- a/lib/Tools/jedit Tue Dec 30 08:18:54 2008 +0100 +++ b/lib/Tools/jedit Tue Dec 30 11:10:01 2008 +0100 @@ -1,6 +1,5 @@ #!/usr/bin/env bash # -# $Id$ # Author: Makarius # # DESCRIPTION: Isabelle/jEdit interface wrapper diff -r 8f84a608883d -r ea97aa6aeba2 lib/Tools/keywords --- a/lib/Tools/keywords Tue Dec 30 08:18:54 2008 +0100 +++ b/lib/Tools/keywords Tue Dec 30 11:10:01 2008 +0100 @@ -1,6 +1,5 @@ #!/usr/bin/env bash # -# $Id$ # Author: Makarius # # DESCRIPTION: generate outer syntax keyword files from session logs diff -r 8f84a608883d -r ea97aa6aeba2 lib/Tools/latex --- a/lib/Tools/latex Tue Dec 30 08:18:54 2008 +0100 +++ b/lib/Tools/latex Tue Dec 30 11:10:01 2008 +0100 @@ -1,6 +1,5 @@ #!/usr/bin/env bash # -# $Id$ # Author: Markus Wenzel, TU Muenchen # # DESCRIPTION: run LaTeX (and related tools) diff -r 8f84a608883d -r ea97aa6aeba2 lib/Tools/logo --- a/lib/Tools/logo Tue Dec 30 08:18:54 2008 +0100 +++ b/lib/Tools/logo Tue Dec 30 11:10:01 2008 +0100 @@ -1,6 +1,5 @@ #!/usr/bin/env bash # -# $Id$ # Author: Markus Wenzel, TU Muenchen # # DESCRIPTION: create an instance of the Isabelle logo diff -r 8f84a608883d -r ea97aa6aeba2 lib/Tools/make --- a/lib/Tools/make Tue Dec 30 08:18:54 2008 +0100 +++ b/lib/Tools/make Tue Dec 30 11:10:01 2008 +0100 @@ -1,6 +1,5 @@ #!/usr/bin/env bash # -# $Id$ # Author: Markus Wenzel, TU Muenchen # # DESCRIPTION: Isabelle make utility diff -r 8f84a608883d -r ea97aa6aeba2 lib/Tools/makeall --- a/lib/Tools/makeall Tue Dec 30 08:18:54 2008 +0100 +++ b/lib/Tools/makeall Tue Dec 30 11:10:01 2008 +0100 @@ -1,6 +1,5 @@ #!/usr/bin/env bash # -# $Id$ # Author: Markus Wenzel, TU Muenchen # # DESCRIPTION: apply make utility to all logics diff -r 8f84a608883d -r ea97aa6aeba2 lib/Tools/mkdir --- a/lib/Tools/mkdir Tue Dec 30 08:18:54 2008 +0100 +++ b/lib/Tools/mkdir Tue Dec 30 11:10:01 2008 +0100 @@ -1,6 +1,5 @@ #!/usr/bin/env bash # -# $Id$ # Author: Markus Wenzel, TU Muenchen # # DESCRIPTION: prepare logic session directory diff -r 8f84a608883d -r ea97aa6aeba2 lib/Tools/mkfifo --- a/lib/Tools/mkfifo Tue Dec 30 08:18:54 2008 +0100 +++ b/lib/Tools/mkfifo Tue Dec 30 11:10:01 2008 +0100 @@ -1,6 +1,5 @@ #!/usr/bin/env bash # -# $Id$ # Author: Makarius # # DESCRIPTION: create named pipe diff -r 8f84a608883d -r ea97aa6aeba2 lib/Tools/mkproject --- a/lib/Tools/mkproject Tue Dec 30 08:18:54 2008 +0100 +++ b/lib/Tools/mkproject Tue Dec 30 11:10:01 2008 +0100 @@ -1,7 +1,6 @@ #!/usr/bin/env bash # -# $Id$ -# Author: David Aspinall and Makarius Wenzel +# Author: David Aspinall # # DESCRIPTION: prepare a session directory for PG-Eclipse diff -r 8f84a608883d -r ea97aa6aeba2 lib/Tools/print --- a/lib/Tools/print Tue Dec 30 08:18:54 2008 +0100 +++ b/lib/Tools/print Tue Dec 30 11:10:01 2008 +0100 @@ -1,6 +1,5 @@ #!/usr/bin/env bash # -# $Id$ # Author: Markus Wenzel, TU Muenchen # # DESCRIPTION: print document diff -r 8f84a608883d -r ea97aa6aeba2 lib/Tools/rmfifo --- a/lib/Tools/rmfifo Tue Dec 30 08:18:54 2008 +0100 +++ b/lib/Tools/rmfifo Tue Dec 30 11:10:01 2008 +0100 @@ -1,6 +1,5 @@ #!/usr/bin/env bash # -# $Id$ # Author: Makarius # # DESCRIPTION: remove named pipe diff -r 8f84a608883d -r ea97aa6aeba2 lib/Tools/scala --- a/lib/Tools/scala Tue Dec 30 08:18:54 2008 +0100 +++ b/lib/Tools/scala Tue Dec 30 11:10:01 2008 +0100 @@ -1,6 +1,5 @@ #!/usr/bin/env bash # -# $Id$ # Author: Makarius # # DESCRIPTION: invoke Scala within the Isabelle environment diff -r 8f84a608883d -r ea97aa6aeba2 lib/Tools/tty --- a/lib/Tools/tty Tue Dec 30 08:18:54 2008 +0100 +++ b/lib/Tools/tty Tue Dec 30 11:10:01 2008 +0100 @@ -1,6 +1,5 @@ #!/usr/bin/env bash # -# $Id$ # Author: Markus Wenzel, TU Muenchen # # DESCRIPTION: run Isabelle process with plain tty interaction diff -r 8f84a608883d -r ea97aa6aeba2 lib/Tools/unsymbolize --- a/lib/Tools/unsymbolize Tue Dec 30 08:18:54 2008 +0100 +++ b/lib/Tools/unsymbolize Tue Dec 30 11:10:01 2008 +0100 @@ -1,6 +1,5 @@ #!/usr/bin/env bash # -# $Id$ # Author: Markus Wenzel, TU Muenchen # # DESCRIPTION: remove unreadable symbol names from sources diff -r 8f84a608883d -r ea97aa6aeba2 lib/Tools/usedir --- a/lib/Tools/usedir Tue Dec 30 08:18:54 2008 +0100 +++ b/lib/Tools/usedir Tue Dec 30 11:10:01 2008 +0100 @@ -1,6 +1,5 @@ #!/usr/bin/env bash # -# $Id$ # Author: Markus Wenzel, TU Muenchen # # DESCRIPTION: build object-logic or run examples @@ -40,6 +39,11 @@ echo " ISABELLE_USEDIR_OPTIONS=$ISABELLE_USEDIR_OPTIONS" echo " HOL_USEDIR_OPTIONS=$HOL_USEDIR_OPTIONS" echo + echo " ML_PLATFORM=$ML_PLATFORM" + echo " ML_HOME=$ML_HOME" + echo " ML_SYSTEM=$ML_SYSTEM" + echo " ML_OPTIONS=$ML_OPTIONS" + echo exit 1 } diff -r 8f84a608883d -r ea97aa6aeba2 lib/Tools/version --- a/lib/Tools/version Tue Dec 30 08:18:54 2008 +0100 +++ b/lib/Tools/version Tue Dec 30 11:10:01 2008 +0100 @@ -1,6 +1,5 @@ #!/usr/bin/env bash # -# $Id$ # Author: Stefan Berghofer, TU Muenchen # # DESCRIPTION: display Isabelle version diff -r 8f84a608883d -r ea97aa6aeba2 lib/Tools/yxml --- a/lib/Tools/yxml Tue Dec 30 08:18:54 2008 +0100 +++ b/lib/Tools/yxml Tue Dec 30 11:10:01 2008 +0100 @@ -1,6 +1,5 @@ #!/usr/bin/env bash # -# $Id$ # Author: Makarius # # DESCRIPTION: simple XML to YXML converter diff -r 8f84a608883d -r ea97aa6aeba2 lib/jedit/isabelle.xml --- a/lib/jedit/isabelle.xml Tue Dec 30 08:18:54 2008 +0100 +++ b/lib/jedit/isabelle.xml Tue Dec 30 11:10:01 2008 +0100 @@ -56,6 +56,7 @@ atom_decl + attach automaton avoids @@ -154,7 +155,6 @@ if imports in - includes induction inductive inductive_cases @@ -286,6 +286,7 @@ statespace structure subclass + sublocale subsect subsection subsubsect diff -r 8f84a608883d -r ea97aa6aeba2 lib/scripts/dimacs2hol.pl --- a/lib/scripts/dimacs2hol.pl Tue Dec 30 08:18:54 2008 +0100 +++ b/lib/scripts/dimacs2hol.pl Tue Dec 30 11:10:01 2008 +0100 @@ -1,5 +1,3 @@ -# -# $Id$ # # dimacs2hol.pl - convert files in DIMACS CNF format [1] into Isabelle/HOL # theories diff -r 8f84a608883d -r ea97aa6aeba2 lib/scripts/feeder --- a/lib/scripts/feeder Tue Dec 30 08:18:54 2008 +0100 +++ b/lib/scripts/feeder Tue Dec 30 11:10:01 2008 +0100 @@ -1,6 +1,5 @@ #!/usr/bin/env bash # -# $Id$ # Author: Markus Wenzel, TU Muenchen # # feeder - feed isabelle session diff -r 8f84a608883d -r ea97aa6aeba2 lib/scripts/feeder.pl --- a/lib/scripts/feeder.pl Tue Dec 30 08:18:54 2008 +0100 +++ b/lib/scripts/feeder.pl Tue Dec 30 11:10:01 2008 +0100 @@ -1,5 +1,4 @@ # -# $Id$ # Author: Markus Wenzel, TU Muenchen # # feeder.pl - feed isabelle session diff -r 8f84a608883d -r ea97aa6aeba2 lib/scripts/fileident --- a/lib/scripts/fileident Tue Dec 30 08:18:54 2008 +0100 +++ b/lib/scripts/fileident Tue Dec 30 11:10:01 2008 +0100 @@ -1,7 +1,5 @@ #!/usr/bin/env bash # -# $Id$ -# # fileident --- produce file identification based FILE="$1" diff -r 8f84a608883d -r ea97aa6aeba2 lib/scripts/getsettings --- a/lib/scripts/getsettings Tue Dec 30 08:18:54 2008 +0100 +++ b/lib/scripts/getsettings Tue Dec 30 11:10:01 2008 +0100 @@ -1,5 +1,5 @@ # -*- shell-script -*- :mode=shellscript: -# $Id$ +# # Author: Markus Wenzel, TU Muenchen # # getsettings - bash source script to augment current env. diff -r 8f84a608883d -r ea97aa6aeba2 lib/scripts/keywords.pl --- a/lib/scripts/keywords.pl Tue Dec 30 08:18:54 2008 +0100 +++ b/lib/scripts/keywords.pl Tue Dec 30 11:10:01 2008 +0100 @@ -1,5 +1,4 @@ # -# $Id$ # Author: Makarius # # keywords.pl - generate outer syntax keyword files from session logs @@ -79,8 +78,6 @@ print ";; Generated from ${sessions}.\n"; print ";; *** DO NOT EDIT *** DO NOT EDIT *** DO NOT EDIT ***\n"; print ";;\n"; - print ";; \$", "Id\$\n"; - print ";;\n"; for my $kind (@kinds) { my @names; @@ -154,7 +151,6 @@ EOF print "\n"; print "\n"; - print "\n"; print <<'EOF'; diff -r 8f84a608883d -r ea97aa6aeba2 lib/scripts/polyml-platform --- a/lib/scripts/polyml-platform Tue Dec 30 08:18:54 2008 +0100 +++ b/lib/scripts/polyml-platform Tue Dec 30 11:10:01 2008 +0100 @@ -1,7 +1,5 @@ #!/usr/bin/env bash # -# $Id$ -# # polyml-platform --- determine Poly/ML's idea of current hardware and # operating system type # diff -r 8f84a608883d -r ea97aa6aeba2 lib/scripts/polyml-version --- a/lib/scripts/polyml-version Tue Dec 30 08:18:54 2008 +0100 +++ b/lib/scripts/polyml-version Tue Dec 30 11:10:01 2008 +0100 @@ -1,7 +1,5 @@ #!/usr/bin/env bash # -# $Id$ -# # polyml-version --- determine Poly/ML runtime system version echo -n polyml diff -r 8f84a608883d -r ea97aa6aeba2 lib/scripts/run-mosml --- a/lib/scripts/run-mosml Tue Dec 30 08:18:54 2008 +0100 +++ b/lib/scripts/run-mosml Tue Dec 30 11:10:01 2008 +0100 @@ -1,6 +1,5 @@ #!/usr/bin/env bash # -# $Id$ # Author: Markus Wenzel, TU Muenchen # # Moscow ML 2.00 startup script diff -r 8f84a608883d -r ea97aa6aeba2 lib/scripts/run-polyml --- a/lib/scripts/run-polyml Tue Dec 30 08:18:54 2008 +0100 +++ b/lib/scripts/run-polyml Tue Dec 30 11:10:01 2008 +0100 @@ -1,6 +1,5 @@ #!/usr/bin/env bash # -# $Id$ # Author: Makarius # # Poly/ML 5.1/5.2 startup script. diff -r 8f84a608883d -r ea97aa6aeba2 lib/scripts/run-polyml-4.1.3 --- a/lib/scripts/run-polyml-4.1.3 Tue Dec 30 08:18:54 2008 +0100 +++ b/lib/scripts/run-polyml-4.1.3 Tue Dec 30 11:10:01 2008 +0100 @@ -1,6 +1,5 @@ #!/usr/bin/env bash # -# $Id$ # Author: Markus Wenzel, TU Muenchen # # Poly/ML 4.x startup script. diff -r 8f84a608883d -r ea97aa6aeba2 lib/scripts/run-polyml-4.1.4 --- a/lib/scripts/run-polyml-4.1.4 Tue Dec 30 08:18:54 2008 +0100 +++ b/lib/scripts/run-polyml-4.1.4 Tue Dec 30 11:10:01 2008 +0100 @@ -1,6 +1,5 @@ #!/usr/bin/env bash # -# $Id$ # Author: Markus Wenzel, TU Muenchen # # Poly/ML 4.x startup script. diff -r 8f84a608883d -r ea97aa6aeba2 lib/scripts/run-polyml-4.2.0 --- a/lib/scripts/run-polyml-4.2.0 Tue Dec 30 08:18:54 2008 +0100 +++ b/lib/scripts/run-polyml-4.2.0 Tue Dec 30 11:10:01 2008 +0100 @@ -1,6 +1,5 @@ #!/usr/bin/env bash # -# $Id$ # Author: Markus Wenzel, TU Muenchen # # Poly/ML 4.x startup script. diff -r 8f84a608883d -r ea97aa6aeba2 lib/scripts/run-polyml-5.0 --- a/lib/scripts/run-polyml-5.0 Tue Dec 30 08:18:54 2008 +0100 +++ b/lib/scripts/run-polyml-5.0 Tue Dec 30 11:10:01 2008 +0100 @@ -1,6 +1,5 @@ #!/usr/bin/env bash # -# $Id$ # Author: Makarius # # Poly/ML 5.0 startup script. diff -r 8f84a608883d -r ea97aa6aeba2 lib/scripts/run-smlnj --- a/lib/scripts/run-smlnj Tue Dec 30 08:18:54 2008 +0100 +++ b/lib/scripts/run-smlnj Tue Dec 30 11:10:01 2008 +0100 @@ -1,6 +1,5 @@ #!/usr/bin/env bash # -# $Id$ # Author: Markus Wenzel, TU Muenchen # # SML/NJ startup script (for 110 or later). diff -r 8f84a608883d -r ea97aa6aeba2 lib/scripts/system.pl --- a/lib/scripts/system.pl Tue Dec 30 08:18:54 2008 +0100 +++ b/lib/scripts/system.pl Tue Dec 30 11:10:01 2008 +0100 @@ -1,5 +1,4 @@ # -# $Id$ # Author: Makarius # # system.pl - invoke shell command line (with robust signal handling) diff -r 8f84a608883d -r ea97aa6aeba2 lib/scripts/timestart.bash --- a/lib/scripts/timestart.bash Tue Dec 30 08:18:54 2008 +0100 +++ b/lib/scripts/timestart.bash Tue Dec 30 11:10:01 2008 +0100 @@ -1,5 +1,5 @@ # -*- shell-script -*- -# $Id$ +# # Author: Makarius # # timestart - setup bash environment for timing. diff -r 8f84a608883d -r ea97aa6aeba2 lib/scripts/timestop.bash --- a/lib/scripts/timestop.bash Tue Dec 30 08:18:54 2008 +0100 +++ b/lib/scripts/timestop.bash Tue Dec 30 11:10:01 2008 +0100 @@ -1,5 +1,5 @@ # -*- shell-script -*- -# $Id$ +# # Author: Makarius # # timestop - report timing based on environment (cf. timestart.bash) diff -r 8f84a608883d -r ea97aa6aeba2 lib/scripts/unsymbolize.pl --- a/lib/scripts/unsymbolize.pl Tue Dec 30 08:18:54 2008 +0100 +++ b/lib/scripts/unsymbolize.pl Tue Dec 30 11:10:01 2008 +0100 @@ -1,5 +1,4 @@ # -# $Id$ # Author: Markus Wenzel, TU Muenchen # # unsymbolize.pl - remove unreadable symbol names from sources diff -r 8f84a608883d -r ea97aa6aeba2 lib/scripts/yxml.pl --- a/lib/scripts/yxml.pl Tue Dec 30 08:18:54 2008 +0100 +++ b/lib/scripts/yxml.pl Tue Dec 30 11:10:01 2008 +0100 @@ -1,5 +1,4 @@ # -# $Id$ # Author: Makarius # # yxml.pl - simple XML to YXML converter diff -r 8f84a608883d -r ea97aa6aeba2 lib/texinputs/draft.tex --- a/lib/texinputs/draft.tex Tue Dec 30 08:18:54 2008 +0100 +++ b/lib/texinputs/draft.tex Tue Dec 30 11:10:01 2008 +0100 @@ -1,5 +1,3 @@ -%% -%% $Id$ %% %% root for draft documents %% diff -r 8f84a608883d -r ea97aa6aeba2 lib/texinputs/isabelle.sty --- a/lib/texinputs/isabelle.sty Tue Dec 30 08:18:54 2008 +0100 +++ b/lib/texinputs/isabelle.sty Tue Dec 30 11:10:01 2008 +0100 @@ -1,5 +1,3 @@ -%% -%% $Id$ %% %% macros for Isabelle generated LaTeX output %% diff -r 8f84a608883d -r ea97aa6aeba2 lib/texinputs/isabellesym.sty --- a/lib/texinputs/isabellesym.sty Tue Dec 30 08:18:54 2008 +0100 +++ b/lib/texinputs/isabellesym.sty Tue Dec 30 11:10:01 2008 +0100 @@ -1,5 +1,3 @@ -%% -%% $Id$ %% %% definitions of standard Isabelle symbols %% diff -r 8f84a608883d -r ea97aa6aeba2 lib/texinputs/pdfsetup.sty --- a/lib/texinputs/pdfsetup.sty Tue Dec 30 08:18:54 2008 +0100 +++ b/lib/texinputs/pdfsetup.sty Tue Dec 30 11:10:01 2008 +0100 @@ -1,5 +1,3 @@ -%% -%% $Id$ %% %% default hyperref setup (both for pdf and dvi output) %% diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/Code_Setup.thy --- a/src/HOL/Code_Setup.thy Tue Dec 30 08:18:54 2008 +0100 +++ b/src/HOL/Code_Setup.thy Tue Dec 30 11:10:01 2008 +0100 @@ -198,6 +198,10 @@ subsection {* Evaluation and normalization by evaluation *} +setup {* + Value.add_evaluator ("SML", Codegen.eval_term o ProofContext.theory_of) +*} + ML {* structure Eval_Method = struct @@ -240,6 +244,10 @@ subsection {* Quickcheck *} +setup {* + Quickcheck.add_generator ("SML", Codegen.test_term) +*} + quickcheck_params [size = 5, iterations = 50] end diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/Complex/Fundamental_Theorem_Algebra.thy --- a/src/HOL/Complex/Fundamental_Theorem_Algebra.thy Tue Dec 30 08:18:54 2008 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,1329 +0,0 @@ -(* Title: Fundamental_Theorem_Algebra.thy - Author: Amine Chaieb -*) - -header{*Fundamental Theorem of Algebra*} - -theory Fundamental_Theorem_Algebra -imports "~~/src/HOL/Univ_Poly" "~~/src/HOL/Library/Dense_Linear_Order" "~~/src/HOL/Complex" -begin - -subsection {* Square root of complex numbers *} -definition csqrt :: "complex \ complex" where -"csqrt z = (if Im z = 0 then - if 0 \ Re z then Complex (sqrt(Re z)) 0 - else Complex 0 (sqrt(- Re z)) - else Complex (sqrt((cmod z + Re z) /2)) - ((Im z / abs(Im z)) * sqrt((cmod z - Re z) /2)))" - -lemma csqrt[algebra]: "csqrt z ^ 2 = z" -proof- - obtain x y where xy: "z = Complex x y" by (cases z, simp_all) - {assume y0: "y = 0" - {assume x0: "x \ 0" - then have ?thesis using y0 xy real_sqrt_pow2[OF x0] - by (simp add: csqrt_def power2_eq_square)} - moreover - {assume "\ x \ 0" hence x0: "- x \ 0" by arith - then have ?thesis using y0 xy real_sqrt_pow2[OF x0] - by (simp add: csqrt_def power2_eq_square) } - ultimately have ?thesis by blast} - moreover - {assume y0: "y\0" - {fix x y - let ?z = "Complex x y" - from abs_Re_le_cmod[of ?z] have tha: "abs x \ cmod ?z" by auto - hence "cmod ?z - x \ 0" "cmod ?z + x \ 0" by arith+ - hence "(sqrt (x * x + y * y) + x) / 2 \ 0" "(sqrt (x * x + y * y) - x) / 2 \ 0" by (simp_all add: power2_eq_square) } - note th = this - have sq4: "\x::real. x^2 / 4 = (x / 2) ^ 2" - by (simp add: power2_eq_square) - from th[of x y] - have sq4': "sqrt (((sqrt (x * x + y * y) + x)^2 / 4)) = (sqrt (x * x + y * y) + x) / 2" "sqrt (((sqrt (x * x + y * y) - x)^2 / 4)) = (sqrt (x * x + y * y) - x) / 2" unfolding sq4 by simp_all - then have th1: "sqrt ((sqrt (x * x + y * y) + x) * (sqrt (x * x + y * y) + x) / 4) - sqrt ((sqrt (x * x + y * y) - x) * (sqrt (x * x + y * y) - x) / 4) = x" - unfolding power2_eq_square by simp - have "sqrt 4 = sqrt (2^2)" by simp - hence sqrt4: "sqrt 4 = 2" by (simp only: real_sqrt_abs) - have th2: "2 *(y * sqrt ((sqrt (x * x + y * y) - x) * (sqrt (x * x + y * y) + x) / 4)) / \y\ = y" - using iffD2[OF real_sqrt_pow2_iff sum_power2_ge_zero[of x y]] y0 - unfolding power2_eq_square - by (simp add: ring_simps real_sqrt_divide sqrt4) - from y0 xy have ?thesis apply (simp add: csqrt_def power2_eq_square) - apply (simp add: real_sqrt_sum_squares_mult_ge_zero[of x y] real_sqrt_pow2[OF th(1)[of x y], unfolded power2_eq_square] real_sqrt_pow2[OF th(2)[of x y], unfolded power2_eq_square] real_sqrt_mult[symmetric]) - using th1 th2 ..} - ultimately show ?thesis by blast -qed - - -subsection{* More lemmas about module of complex numbers *} - -lemma complex_of_real_power: "complex_of_real x ^ n = complex_of_real (x^n)" - by (rule of_real_power [symmetric]) - -lemma real_down2: "(0::real) < d1 \ 0 < d2 ==> EX e. 0 < e & e < d1 & e < d2" - apply ferrack apply arith done - -text{* The triangle inequality for cmod *} -lemma complex_mod_triangle_sub: "cmod w \ cmod (w + z) + norm z" - using complex_mod_triangle_ineq2[of "w + z" "-z"] by auto - -subsection{* Basic lemmas about complex polynomials *} - -lemma poly_bound_exists: - shows "\m. m > 0 \ (\z. cmod z <= r \ cmod (poly p z) \ m)" -proof(induct p) - case Nil thus ?case by (rule exI[where x=1], simp) -next - case (Cons c cs) - from Cons.hyps obtain m where m: "\z. cmod z \ r \ cmod (poly cs z) \ m" - by blast - let ?k = " 1 + cmod c + \r * m\" - have kp: "?k > 0" using abs_ge_zero[of "r*m"] norm_ge_zero[of c] by arith - {fix z - assume H: "cmod z \ r" - from m H have th: "cmod (poly cs z) \ m" by blast - from H have rp: "r \ 0" using norm_ge_zero[of z] by arith - have "cmod (poly (c # cs) z) \ cmod c + cmod (z* poly cs z)" - using norm_triangle_ineq[of c "z* poly cs z"] by simp - also have "\ \ cmod c + r*m" using mult_mono[OF H th rp norm_ge_zero[of "poly cs z"]] by (simp add: norm_mult) - also have "\ \ ?k" by simp - finally have "cmod (poly (c # cs) z) \ ?k" .} - with kp show ?case by blast -qed - - -text{* Offsetting the variable in a polynomial gives another of same degree *} - (* FIXME : Lemma holds also in locale --- fix it later *) -lemma poly_offset_lemma: - shows "\b q. (length q = length p) \ (\x. poly (b#q) (x::complex) = (a + x) * poly p x)" -proof(induct p) - case Nil thus ?case by simp -next - case (Cons c cs) - from Cons.hyps obtain b q where - bq: "length q = length cs" "\x. poly (b # q) x = (a + x) * poly cs x" - by blast - let ?b = "a*c" - let ?q = "(b+c)#q" - have lg: "length ?q = length (c#cs)" using bq(1) by simp - {fix x - from bq(2)[rule_format, of x] - have "x*poly (b # q) x = x*((a + x) * poly cs x)" by simp - hence "poly (?b# ?q) x = (a + x) * poly (c # cs) x" - by (simp add: ring_simps)} - with lg show ?case by blast -qed - - (* FIXME : This one too*) -lemma poly_offset: "\ q. length q = length p \ (\x. poly q (x::complex) = poly p (a + x))" -proof (induct p) - case Nil thus ?case by simp -next - case (Cons c cs) - from Cons.hyps obtain q where q: "length q = length cs" "\x. poly q x = poly cs (a + x)" by blast - from poly_offset_lemma[of q a] obtain b p where - bp: "length p = length q" "\x. poly (b # p) x = (a + x) * poly q x" - by blast - thus ?case using q bp by - (rule exI[where x="(c + b)#p"], simp) -qed - -text{* An alternative useful formulation of completeness of the reals *} -lemma real_sup_exists: assumes ex: "\x. P x" and bz: "\z. \x. P x \ x < z" - shows "\(s::real). \y. (\x. P x \ y < x) \ y < s" -proof- - from ex bz obtain x Y where x: "P x" and Y: "\x. P x \ x < Y" by blast - from ex have thx:"\x. x \ Collect P" by blast - from bz have thY: "\Y. isUb UNIV (Collect P) Y" - by(auto simp add: isUb_def isLub_def setge_def setle_def leastP_def Ball_def order_le_less) - from reals_complete[OF thx thY] obtain L where L: "isLub UNIV (Collect P) L" - by blast - from Y[OF x] have xY: "x < Y" . - from L have L': "\x. P x \ x \ L" by (auto simp add: isUb_def isLub_def setge_def setle_def leastP_def Ball_def) - from Y have Y': "\x. P x \ x \ Y" - apply (clarsimp, atomize (full)) by auto - from L Y' have "L \ Y" by (auto simp add: isUb_def isLub_def setge_def setle_def leastP_def Ball_def) - {fix y - {fix z assume z: "P z" "y < z" - from L' z have "y < L" by auto } - moreover - {assume yL: "y < L" "\z. P z \ \ y < z" - hence nox: "\z. P z \ y \ z" by auto - from nox L have "y \ L" by (auto simp add: isUb_def isLub_def setge_def setle_def leastP_def Ball_def) - with yL(1) have False by arith} - ultimately have "(\x. P x \ y < x) \ y < L" by blast} - thus ?thesis by blast -qed - - -subsection{* Some theorems about Sequences*} -text{* Given a binary function @{text "f:: nat \ 'a \ 'a"}, its values are uniquely determined by a function g *} - -lemma num_Axiom: "EX! g. g 0 = e \ (\n. g (Suc n) = f n (g n))" - unfolding Ex1_def - apply (rule_tac x="nat_rec e f" in exI) - apply (rule conjI)+ -apply (rule def_nat_rec_0, simp) -apply (rule allI, rule def_nat_rec_Suc, simp) -apply (rule allI, rule impI, rule ext) -apply (erule conjE) -apply (induct_tac x) -apply (simp add: nat_rec_0) -apply (erule_tac x="n" in allE) -apply (simp) -done - - text{* An equivalent formulation of monotony -- Not used here, but might be useful *} -lemma mono_Suc: "mono f = (\n. (f n :: 'a :: order) \ f (Suc n))" -unfolding mono_def -proof auto - fix A B :: nat - assume H: "\n. f n \ f (Suc n)" "A \ B" - hence "\k. B = A + k" apply - apply (thin_tac "\n. f n \ f (Suc n)") - by presburger - then obtain k where k: "B = A + k" by blast - {fix a k - have "f a \ f (a + k)" - proof (induct k) - case 0 thus ?case by simp - next - case (Suc k) - from Suc.hyps H(1)[rule_format, of "a + k"] show ?case by simp - qed} - with k show "f A \ f B" by blast -qed - -text{* for any sequence, there is a mootonic subsequence *} -lemma seq_monosub: "\f. subseq f \ monoseq (\ n. (s (f n)))" -proof- - {assume H: "\n. \p >n. \ m\p. s m \ s p" - let ?P = "\ p n. p > n \ (\m \ p. s m \ s p)" - from num_Axiom[of "SOME p. ?P p 0" "\p n. SOME p. ?P p n"] - obtain f where f: "f 0 = (SOME p. ?P p 0)" "\n. f (Suc n) = (SOME p. ?P p (f n))" by blast - have "?P (f 0) 0" unfolding f(1) some_eq_ex[of "\p. ?P p 0"] - using H apply - - apply (erule allE[where x=0], erule exE, rule_tac x="p" in exI) - unfolding order_le_less by blast - hence f0: "f 0 > 0" "\m \ f 0. s m \ s (f 0)" by blast+ - {fix n - have "?P (f (Suc n)) (f n)" - unfolding f(2)[rule_format, of n] some_eq_ex[of "\p. ?P p (f n)"] - using H apply - - apply (erule allE[where x="f n"], erule exE, rule_tac x="p" in exI) - unfolding order_le_less by blast - hence "f (Suc n) > f n" "\m \ f (Suc n). s m \ s (f (Suc n))" by blast+} - note fSuc = this - {fix p q assume pq: "p \ f q" - have "s p \ s(f(q))" using f0(2)[rule_format, of p] pq fSuc - by (cases q, simp_all) } - note pqth = this - {fix q - have "f (Suc q) > f q" apply (induct q) - using f0(1) fSuc(1)[of 0] apply simp by (rule fSuc(1))} - note fss = this - from fss have th1: "subseq f" unfolding subseq_Suc_iff .. - {fix a b - have "f a \ f (a + b)" - proof(induct b) - case 0 thus ?case by simp - next - case (Suc b) - from fSuc(1)[of "a + b"] Suc.hyps show ?case by simp - qed} - note fmon0 = this - have "monoseq (\n. s (f n))" - proof- - {fix n - have "s (f n) \ s (f (Suc n))" - proof(cases n) - case 0 - assume n0: "n = 0" - from fSuc(1)[of 0] have th0: "f 0 \ f (Suc 0)" by simp - from f0(2)[rule_format, OF th0] show ?thesis using n0 by simp - next - case (Suc m) - assume m: "n = Suc m" - from fSuc(1)[of n] m have th0: "f (Suc m) \ f (Suc (Suc m))" by simp - from m fSuc(2)[rule_format, OF th0] show ?thesis by simp - qed} - thus "monoseq (\n. s (f n))" unfolding monoseq_Suc by blast - qed - with th1 have ?thesis by blast} - moreover - {fix N assume N: "\p >N. \ m\p. s m > s p" - {fix p assume p: "p \ Suc N" - hence pN: "p > N" by arith with N obtain m where m: "m \ p" "s m > s p" by blast - have "m \ p" using m(2) by auto - with m have "\m>p. s p < s m" by - (rule exI[where x=m], auto)} - note th0 = this - let ?P = "\m x. m > x \ s x < s m" - from num_Axiom[of "SOME x. ?P x (Suc N)" "\m x. SOME y. ?P y x"] - obtain f where f: "f 0 = (SOME x. ?P x (Suc N))" - "\n. f (Suc n) = (SOME m. ?P m (f n))" by blast - have "?P (f 0) (Suc N)" unfolding f(1) some_eq_ex[of "\p. ?P p (Suc N)"] - using N apply - - apply (erule allE[where x="Suc N"], clarsimp) - apply (rule_tac x="m" in exI) - apply auto - apply (subgoal_tac "Suc N \ m") - apply simp - apply (rule ccontr, simp) - done - hence f0: "f 0 > Suc N" "s (Suc N) < s (f 0)" by blast+ - {fix n - have "f n > N \ ?P (f (Suc n)) (f n)" - unfolding f(2)[rule_format, of n] some_eq_ex[of "\p. ?P p (f n)"] - proof (induct n) - case 0 thus ?case - using f0 N apply auto - apply (erule allE[where x="f 0"], clarsimp) - apply (rule_tac x="m" in exI, simp) - by (subgoal_tac "f 0 \ m", auto) - next - case (Suc n) - from Suc.hyps have Nfn: "N < f n" by blast - from Suc.hyps obtain m where m: "m > f n" "s (f n) < s m" by blast - with Nfn have mN: "m > N" by arith - note key = Suc.hyps[unfolded some_eq_ex[of "\p. ?P p (f n)", symmetric] f(2)[rule_format, of n, symmetric]] - - from key have th0: "f (Suc n) > N" by simp - from N[rule_format, OF th0] - obtain m' where m': "m' \ f (Suc n)" "s (f (Suc n)) < s m'" by blast - have "m' \ f (Suc (n))" apply (rule ccontr) using m'(2) by auto - hence "m' > f (Suc n)" using m'(1) by simp - with key m'(2) show ?case by auto - qed} - note fSuc = this - {fix n - have "f n \ Suc N \ f(Suc n) > f n \ s(f n) < s(f(Suc n))" using fSuc[of n] by auto - hence "f n \ Suc N" "f(Suc n) > f n" "s(f n) < s(f(Suc n))" by blast+} - note thf = this - have sqf: "subseq f" unfolding subseq_Suc_iff using thf by simp - have "monoseq (\n. s (f n))" unfolding monoseq_Suc using thf - apply - - apply (rule disjI1) - apply auto - apply (rule order_less_imp_le) - apply blast - done - then have ?thesis using sqf by blast} - ultimately show ?thesis unfolding linorder_not_less[symmetric] by blast -qed - -lemma seq_suble: assumes sf: "subseq f" shows "n \ f n" -proof(induct n) - case 0 thus ?case by simp -next - case (Suc n) - from sf[unfolded subseq_Suc_iff, rule_format, of n] Suc.hyps - have "n < f (Suc n)" by arith - thus ?case by arith -qed - -subsection {* Fundamental theorem of algebra *} -lemma unimodular_reduce_norm: - assumes md: "cmod z = 1" - shows "cmod (z + 1) < 1 \ cmod (z - 1) < 1 \ cmod (z + ii) < 1 \ cmod (z - ii) < 1" -proof- - obtain x y where z: "z = Complex x y " by (cases z, auto) - from md z have xy: "x^2 + y^2 = 1" by (simp add: cmod_def) - {assume C: "cmod (z + 1) \ 1" "cmod (z - 1) \ 1" "cmod (z + ii) \ 1" "cmod (z - ii) \ 1" - from C z xy have "2*x \ 1" "2*x \ -1" "2*y \ 1" "2*y \ -1" - by (simp_all add: cmod_def power2_eq_square ring_simps) - hence "abs (2*x) \ 1" "abs (2*y) \ 1" by simp_all - hence "(abs (2 * x))^2 <= 1^2" "(abs (2 * y)) ^2 <= 1^2" - by - (rule power_mono, simp, simp)+ - hence th0: "4*x^2 \ 1" "4*y^2 \ 1" - by (simp_all add: power2_abs power_mult_distrib) - from add_mono[OF th0] xy have False by simp } - thus ?thesis unfolding linorder_not_le[symmetric] by blast -qed - -text{* Hence we can always reduce modulus of @{text "1 + b z^n"} if nonzero *} -lemma reduce_poly_simple: - assumes b: "b \ 0" and n: "n\0" - shows "\z. cmod (1 + b * z^n) < 1" -using n -proof(induct n rule: nat_less_induct) - fix n - assume IH: "\m 0 \ (\z. cmod (1 + b * z ^ m) < 1)" and n: "n \ 0" - let ?P = "\z n. cmod (1 + b * z ^ n) < 1" - {assume e: "even n" - hence "\m. n = 2*m" by presburger - then obtain m where m: "n = 2*m" by blast - from n m have "m\0" "m < n" by presburger+ - with IH[rule_format, of m] obtain z where z: "?P z m" by blast - from z have "?P (csqrt z) n" by (simp add: m power_mult csqrt) - hence "\z. ?P z n" ..} - moreover - {assume o: "odd n" - from b have b': "b^2 \ 0" unfolding power2_eq_square by simp - have "Im (inverse b) * (Im (inverse b) * \Im b * Im b + Re b * Re b\) + - Re (inverse b) * (Re (inverse b) * \Im b * Im b + Re b * Re b\) = - ((Re (inverse b))^2 + (Im (inverse b))^2) * \Im b * Im b + Re b * Re b\" by algebra - also have "\ = cmod (inverse b) ^2 * cmod b ^ 2" - apply (simp add: cmod_def) using realpow_two_le_add_order[of "Re b" "Im b"] - by (simp add: power2_eq_square) - finally - have th0: "Im (inverse b) * (Im (inverse b) * \Im b * Im b + Re b * Re b\) + - Re (inverse b) * (Re (inverse b) * \Im b * Im b + Re b * Re b\) = - 1" - apply (simp add: power2_eq_square norm_mult[symmetric] norm_inverse[symmetric]) - using right_inverse[OF b'] - by (simp add: power2_eq_square[symmetric] power_inverse[symmetric] ring_simps) - have th0: "cmod (complex_of_real (cmod b) / b) = 1" - apply (simp add: complex_Re_mult cmod_def power2_eq_square Re_complex_of_real Im_complex_of_real divide_inverse ring_simps ) - by (simp add: real_sqrt_mult[symmetric] th0) - from o have "\m. n = Suc (2*m)" by presburger+ - then obtain m where m: "n = Suc (2*m)" by blast - from unimodular_reduce_norm[OF th0] o - have "\v. cmod (complex_of_real (cmod b) / b + v^n) < 1" - apply (cases "cmod (complex_of_real (cmod b) / b + 1) < 1", rule_tac x="1" in exI, simp) - apply (cases "cmod (complex_of_real (cmod b) / b - 1) < 1", rule_tac x="-1" in exI, simp add: diff_def) - apply (cases "cmod (complex_of_real (cmod b) / b + ii) < 1") - apply (cases "even m", rule_tac x="ii" in exI, simp add: m power_mult) - apply (rule_tac x="- ii" in exI, simp add: m power_mult) - apply (cases "even m", rule_tac x="- ii" in exI, simp add: m power_mult diff_def) - apply (rule_tac x="ii" in exI, simp add: m power_mult diff_def) - done - then obtain v where v: "cmod (complex_of_real (cmod b) / b + v^n) < 1" by blast - let ?w = "v / complex_of_real (root n (cmod b))" - from odd_real_root_pow[OF o, of "cmod b"] - have th1: "?w ^ n = v^n / complex_of_real (cmod b)" - by (simp add: power_divide complex_of_real_power) - have th2:"cmod (complex_of_real (cmod b) / b) = 1" using b by (simp add: norm_divide) - hence th3: "cmod (complex_of_real (cmod b) / b) \ 0" by simp - have th4: "cmod (complex_of_real (cmod b) / b) * - cmod (1 + b * (v ^ n / complex_of_real (cmod b))) - < cmod (complex_of_real (cmod b) / b) * 1" - apply (simp only: norm_mult[symmetric] right_distrib) - using b v by (simp add: th2) - - from mult_less_imp_less_left[OF th4 th3] - have "?P ?w n" unfolding th1 . - hence "\z. ?P z n" .. } - ultimately show "\z. ?P z n" by blast -qed - - -text{* Bolzano-Weierstrass type property for closed disc in complex plane. *} - -lemma metric_bound_lemma: "cmod (x - y) <= \Re x - Re y\ + \Im x - Im y\" - using real_sqrt_sum_squares_triangle_ineq[of "Re x - Re y" 0 0 "Im x - Im y" ] - unfolding cmod_def by simp - -lemma bolzano_weierstrass_complex_disc: - assumes r: "\n. cmod (s n) \ r" - shows "\f z. subseq f \ (\e >0. \N. \n \ N. cmod (s (f n) - z) < e)" -proof- - from seq_monosub[of "Re o s"] - obtain f g where f: "subseq f" "monoseq (\n. Re (s (f n)))" - unfolding o_def by blast - from seq_monosub[of "Im o s o f"] - obtain g where g: "subseq g" "monoseq (\n. Im (s(f(g n))))" unfolding o_def by blast - let ?h = "f o g" - from r[rule_format, of 0] have rp: "r \ 0" using norm_ge_zero[of "s 0"] by arith - have th:"\n. r + 1 \ \ Re (s n)\" - proof - fix n - from abs_Re_le_cmod[of "s n"] r[rule_format, of n] show "\Re (s n)\ \ r + 1" by arith - qed - have conv1: "convergent (\n. Re (s ( f n)))" - apply (rule Bseq_monoseq_convergent) - apply (simp add: Bseq_def) - apply (rule exI[where x= "r + 1"]) - using th rp apply simp - using f(2) . - have th:"\n. r + 1 \ \ Im (s n)\" - proof - fix n - from abs_Im_le_cmod[of "s n"] r[rule_format, of n] show "\Im (s n)\ \ r + 1" by arith - qed - - have conv2: "convergent (\n. Im (s (f (g n))))" - apply (rule Bseq_monoseq_convergent) - apply (simp add: Bseq_def) - apply (rule exI[where x= "r + 1"]) - using th rp apply simp - using g(2) . - - from conv1[unfolded convergent_def] obtain x where "LIMSEQ (\n. Re (s (f n))) x" - by blast - hence x: "\r>0. \n0. \n\n0. \ Re (s (f n)) - x \ < r" - unfolding LIMSEQ_def real_norm_def . - - from conv2[unfolded convergent_def] obtain y where "LIMSEQ (\n. Im (s (f (g n)))) y" - by blast - hence y: "\r>0. \n0. \n\n0. \ Im (s (f (g n))) - y \ < r" - unfolding LIMSEQ_def real_norm_def . - let ?w = "Complex x y" - from f(1) g(1) have hs: "subseq ?h" unfolding subseq_def by auto - {fix e assume ep: "e > (0::real)" - hence e2: "e/2 > 0" by simp - from x[rule_format, OF e2] y[rule_format, OF e2] - obtain N1 N2 where N1: "\n\N1. \Re (s (f n)) - x\ < e / 2" and N2: "\n\N2. \Im (s (f (g n))) - y\ < e / 2" by blast - {fix n assume nN12: "n \ N1 + N2" - hence nN1: "g n \ N1" and nN2: "n \ N2" using seq_suble[OF g(1), of n] by arith+ - from add_strict_mono[OF N1[rule_format, OF nN1] N2[rule_format, OF nN2]] - have "cmod (s (?h n) - ?w) < e" - using metric_bound_lemma[of "s (f (g n))" ?w] by simp } - hence "\N. \n\N. cmod (s (?h n) - ?w) < e" by blast } - with hs show ?thesis by blast -qed - -text{* Polynomial is continuous. *} - -lemma poly_cont: - assumes ep: "e > 0" - shows "\d >0. \w. 0 < cmod (w - z) \ cmod (w - z) < d \ cmod (poly p w - poly p z) < e" -proof- - from poly_offset[of p z] obtain q where q: "length q = length p" "\x. poly q x = poly p (z + x)" by blast - {fix w - note q(2)[of "w - z", simplified]} - note th = this - show ?thesis unfolding th[symmetric] - proof(induct q) - case Nil thus ?case using ep by auto - next - case (Cons c cs) - from poly_bound_exists[of 1 "cs"] - obtain m where m: "m > 0" "\z. cmod z \ 1 \ cmod (poly cs z) \ m" by blast - from ep m(1) have em0: "e/m > 0" by (simp add: field_simps) - have one0: "1 > (0::real)" by arith - from real_lbound_gt_zero[OF one0 em0] - obtain d where d: "d >0" "d < 1" "d < e / m" by blast - from d(1,3) m(1) have dm: "d*m > 0" "d*m < e" - by (simp_all add: field_simps real_mult_order) - show ?case - proof(rule ex_forward[OF real_lbound_gt_zero[OF one0 em0]], clarsimp simp add: norm_mult) - fix d w - assume H: "d > 0" "d < 1" "d < e/m" "w\z" "cmod (w-z) < d" - hence d1: "cmod (w-z) \ 1" "d \ 0" by simp_all - from H(3) m(1) have dme: "d*m < e" by (simp add: field_simps) - from H have th: "cmod (w-z) \ d" by simp - from mult_mono[OF th m(2)[OF d1(1)] d1(2) norm_ge_zero] dme - show "cmod (w - z) * cmod (poly cs (w - z)) < e" by simp - qed - qed -qed - -text{* Hence a polynomial attains minimum on a closed disc - in the complex plane. *} -lemma poly_minimum_modulus_disc: - "\z. \w. cmod w \ r \ cmod (poly p z) \ cmod (poly p w)" -proof- - {assume "\ r \ 0" hence ?thesis unfolding linorder_not_le - apply - - apply (rule exI[where x=0]) - apply auto - apply (subgoal_tac "cmod w < 0") - apply simp - apply arith - done } - moreover - {assume rp: "r \ 0" - from rp have "cmod 0 \ r \ cmod (poly p 0) = - (- cmod (poly p 0))" by simp - hence mth1: "\x z. cmod z \ r \ cmod (poly p z) = - x" by blast - {fix x z - assume H: "cmod z \ r" "cmod (poly p z) = - x" "\x < 1" - hence "- x < 0 " by arith - with H(2) norm_ge_zero[of "poly p z"] have False by simp } - then have mth2: "\z. \x. (\z. cmod z \ r \ cmod (poly p z) = - x) \ x < z" by blast - from real_sup_exists[OF mth1 mth2] obtain s where - s: "\y. (\x. (\z. cmod z \ r \ cmod (poly p z) = - x) \ y < x) \(y < s)" by blast - let ?m = "-s" - {fix y - from s[rule_format, of "-y"] have - "(\z x. cmod z \ r \ -(- cmod (poly p z)) < y) \ ?m < y" - unfolding minus_less_iff[of y ] equation_minus_iff by blast } - note s1 = this[unfolded minus_minus] - from s1[of ?m] have s1m: "\z x. cmod z \ r \ cmod (poly p z) \ ?m" - by auto - {fix n::nat - from s1[rule_format, of "?m + 1/real (Suc n)"] - have "\z. cmod z \ r \ cmod (poly p z) < - s + 1 / real (Suc n)" - by simp} - hence th: "\n. \z. cmod z \ r \ cmod (poly p z) < - s + 1 / real (Suc n)" .. - from choice[OF th] obtain g where - g: "\n. cmod (g n) \ r" "\n. cmod (poly p (g n)) e>0. \N. \n\N. cmod (g (f n) - z) < e" - by blast - {fix w - assume wr: "cmod w \ r" - let ?e = "\cmod (poly p z) - ?m\" - {assume e: "?e > 0" - hence e2: "?e/2 > 0" by simp - from poly_cont[OF e2, of z p] obtain d where - d: "d>0" "\w. 0 cmod(w - z) < d \ cmod(poly p w - poly p z) < ?e/2" by blast - {fix w assume w: "cmod (w - z) < d" - have "cmod(poly p w - poly p z) < ?e / 2" - using d(2)[rule_format, of w] w e by (cases "w=z", simp_all)} - note th1 = this - - from fz(2)[rule_format, OF d(1)] obtain N1 where - N1: "\n\N1. cmod (g (f n) - z) < d" by blast - from reals_Archimedean2[of "2/?e"] obtain N2::nat where - N2: "2/?e < real N2" by blast - have th2: "cmod(poly p (g(f(N1 + N2))) - poly p z) < ?e/2" - using N1[rule_format, of "N1 + N2"] th1 by simp - {fix a b e2 m :: real - have "a < e2 \ abs(b - m) < e2 \ 2 * e2 <= abs(b - m) + a - ==> False" by arith} - note th0 = this - have ath: - "\m x e. m <= x \ x < m + e ==> abs(x - m::real) < e" by arith - from s1m[OF g(1)[rule_format]] - have th31: "?m \ cmod(poly p (g (f (N1 + N2))))" . - from seq_suble[OF fz(1), of "N1+N2"] - have th00: "real (Suc (N1+N2)) \ real (Suc (f (N1+N2)))" by simp - have th000: "0 \ (1::real)" "(1::real) \ 1" "real (Suc (N1+N2)) > 0" - using N2 by auto - from frac_le[OF th000 th00] have th00: "?m +1 / real (Suc (f (N1 + N2))) \ ?m + 1 / real (Suc (N1 + N2))" by simp - from g(2)[rule_format, of "f (N1 + N2)"] - have th01:"cmod (poly p (g (f (N1 + N2)))) < - s + 1 / real (Suc (f (N1 + N2)))" . - from order_less_le_trans[OF th01 th00] - have th32: "cmod(poly p (g (f (N1 + N2)))) < ?m + (1/ real(Suc (N1 + N2)))" . - from N2 have "2/?e < real (Suc (N1 + N2))" by arith - with e2 less_imp_inverse_less[of "2/?e" "real (Suc (N1 + N2))"] - have "?e/2 > 1/ real (Suc (N1 + N2))" by (simp add: inverse_eq_divide) - with ath[OF th31 th32] - have thc1:"\cmod(poly p (g (f (N1 + N2)))) - ?m\< ?e/2" by arith - have ath2: "\(a::real) b c m. \a - b\ <= c ==> \b - m\ <= \a - m\ + c" - by arith - have th22: "\cmod (poly p (g (f (N1 + N2)))) - cmod (poly p z)\ -\ cmod (poly p (g (f (N1 + N2))) - poly p z)" - by (simp add: norm_triangle_ineq3) - from ath2[OF th22, of ?m] - have thc2: "2*(?e/2) \ \cmod(poly p (g (f (N1 + N2)))) - ?m\ + cmod (poly p (g (f (N1 + N2))) - poly p z)" by simp - from th0[OF th2 thc1 thc2] have False .} - hence "?e = 0" by auto - then have "cmod (poly p z) = ?m" by simp - with s1m[OF wr] - have "cmod (poly p z) \ cmod (poly p w)" by simp } - hence ?thesis by blast} - ultimately show ?thesis by blast -qed - -lemma "(rcis (sqrt (abs r)) (a/2)) ^ 2 = rcis (abs r) a" - unfolding power2_eq_square - apply (simp add: rcis_mult) - apply (simp add: power2_eq_square[symmetric]) - done - -lemma cispi: "cis pi = -1" - unfolding cis_def - by simp - -lemma "(rcis (sqrt (abs r)) ((pi + a)/2)) ^ 2 = rcis (- abs r) a" - unfolding power2_eq_square - apply (simp add: rcis_mult add_divide_distrib) - apply (simp add: power2_eq_square[symmetric] rcis_def cispi cis_mult[symmetric]) - done - -text {* Nonzero polynomial in z goes to infinity as z does. *} - -instance complex::idom_char_0 by (intro_classes) -instance complex :: recpower_idom_char_0 by intro_classes - -lemma poly_infinity: - assumes ex: "list_ex (\c. c \ 0) p" - shows "\r. \z. r \ cmod z \ d \ cmod (poly (a#p) z)" -using ex -proof(induct p arbitrary: a d) - case (Cons c cs a d) - {assume H: "list_ex (\c. c\0) cs" - with Cons.hyps obtain r where r: "\z. r \ cmod z \ d + cmod a \ cmod (poly (c # cs) z)" by blast - let ?r = "1 + \r\" - {fix z assume h: "1 + \r\ \ cmod z" - have r0: "r \ cmod z" using h by arith - from r[rule_format, OF r0] - have th0: "d + cmod a \ 1 * cmod(poly (c#cs) z)" by arith - from h have z1: "cmod z \ 1" by arith - from order_trans[OF th0 mult_right_mono[OF z1 norm_ge_zero[of "poly (c#cs) z"]]] - have th1: "d \ cmod(z * poly (c#cs) z) - cmod a" - unfolding norm_mult by (simp add: ring_simps) - from complex_mod_triangle_sub[of "z * poly (c#cs) z" a] - have th2: "cmod(z * poly (c#cs) z) - cmod a \ cmod (poly (a#c#cs) z)" - by (simp add: diff_le_eq ring_simps) - from th1 th2 have "d \ cmod (poly (a#c#cs) z)" by arith} - hence ?case by blast} - moreover - {assume cs0: "\ (list_ex (\c. c \ 0) cs)" - with Cons.prems have c0: "c \ 0" by simp - from cs0 have cs0': "list_all (\c. c = 0) cs" - by (auto simp add: list_all_iff list_ex_iff) - {fix z - assume h: "(\d\ + cmod a) / cmod c \ cmod z" - from c0 have "cmod c > 0" by simp - from h c0 have th0: "\d\ + cmod a \ cmod (z*c)" - by (simp add: field_simps norm_mult) - have ath: "\mzh mazh ma. mzh <= mazh + ma ==> abs(d) + ma <= mzh ==> d <= mazh" by arith - from complex_mod_triangle_sub[of "z*c" a ] - have th1: "cmod (z * c) \ cmod (a + z * c) + cmod a" - by (simp add: ring_simps) - from ath[OF th1 th0] have "d \ cmod (poly (a # c # cs) z)" - using poly_0[OF cs0'] by simp} - then have ?case by blast} - ultimately show ?case by blast -qed simp - -text {* Hence polynomial's modulus attains its minimum somewhere. *} -lemma poly_minimum_modulus: - "\z.\w. cmod (poly p z) \ cmod (poly p w)" -proof(induct p) - case (Cons c cs) - {assume cs0: "list_ex (\c. c \ 0) cs" - from poly_infinity[OF cs0, of "cmod (poly (c#cs) 0)" c] - obtain r where r: "\z. r \ cmod z \ cmod (poly (c # cs) 0) \ cmod (poly (c # cs) z)" by blast - have ath: "\z r. r \ cmod z \ cmod z \ \r\" by arith - from poly_minimum_modulus_disc[of "\r\" "c#cs"] - obtain v where v: "\w. cmod w \ \r\ \ cmod (poly (c # cs) v) \ cmod (poly (c # cs) w)" by blast - {fix z assume z: "r \ cmod z" - from v[of 0] r[OF z] - have "cmod (poly (c # cs) v) \ cmod (poly (c # cs) z)" - by simp } - note v0 = this - from v0 v ath[of r] have ?case by blast} - moreover - {assume cs0: "\ (list_ex (\c. c\0) cs)" - hence th:"list_all (\c. c = 0) cs" by (simp add: list_all_iff list_ex_iff) - from poly_0[OF th] Cons.hyps have ?case by simp} - ultimately show ?case by blast -qed simp - -text{* Constant function (non-syntactic characterization). *} -definition "constant f = (\x y. f x = f y)" - -lemma nonconstant_length: "\ (constant (poly p)) \ length p \ 2" - unfolding constant_def - apply (induct p, auto) - apply (unfold not_less[symmetric]) - apply simp - apply (rule ccontr) - apply auto - done - -lemma poly_replicate_append: - "poly ((replicate n 0)@p) (x::'a::{recpower, comm_ring}) = x^n * poly p x" - by(induct n, auto simp add: power_Suc ring_simps) - -text {* Decomposition of polynomial, skipping zero coefficients - after the first. *} - -lemma poly_decompose_lemma: - assumes nz: "\(\z. z\0 \ poly p z = (0::'a::{recpower,idom}))" - shows "\k a q. a\0 \ Suc (length q + k) = length p \ - (\z. poly p z = z^k * poly (a#q) z)" -using nz -proof(induct p) - case Nil thus ?case by simp -next - case (Cons c cs) - {assume c0: "c = 0" - - from Cons.hyps Cons.prems c0 have ?case apply auto - apply (rule_tac x="k+1" in exI) - apply (rule_tac x="a" in exI, clarsimp) - apply (rule_tac x="q" in exI) - by (auto simp add: power_Suc)} - moreover - {assume c0: "c\0" - hence ?case apply- - apply (rule exI[where x=0]) - apply (rule exI[where x=c], clarsimp) - apply (rule exI[where x=cs]) - apply auto - done} - ultimately show ?case by blast -qed - -lemma poly_decompose: - assumes nc: "~constant(poly p)" - shows "\k a q. a\(0::'a::{recpower,idom}) \ k\0 \ - length q + k + 1 = length p \ - (\z. poly p z = poly p 0 + z^k * poly (a#q) z)" -using nc -proof(induct p) - case Nil thus ?case by (simp add: constant_def) -next - case (Cons c cs) - {assume C:"\z. z \ 0 \ poly cs z = 0" - {fix x y - from C have "poly (c#cs) x = poly (c#cs) y" by (cases "x=0", auto)} - with Cons.prems have False by (auto simp add: constant_def)} - hence th: "\ (\z. z \ 0 \ poly cs z = 0)" .. - from poly_decompose_lemma[OF th] - show ?case - apply clarsimp - apply (rule_tac x="k+1" in exI) - apply (rule_tac x="a" in exI) - apply simp - apply (rule_tac x="q" in exI) - apply (auto simp add: power_Suc) - done -qed - -text{* Fundamental theorem of algebral *} - -lemma fundamental_theorem_of_algebra: - assumes nc: "~constant(poly p)" - shows "\z::complex. poly p z = 0" -using nc -proof(induct n\ "length p" arbitrary: p rule: nat_less_induct) - fix n fix p :: "complex list" - let ?p = "poly p" - assume H: "\mp. \ constant (poly p) \ m = length p \ (\(z::complex). poly p z = 0)" and nc: "\ constant ?p" and n: "n = length p" - let ?ths = "\z. ?p z = 0" - - from nonconstant_length[OF nc] have n2: "n\ 2" by (simp add: n) - from poly_minimum_modulus obtain c where - c: "\w. cmod (?p c) \ cmod (?p w)" by blast - {assume pc: "?p c = 0" hence ?ths by blast} - moreover - {assume pc0: "?p c \ 0" - from poly_offset[of p c] obtain q where - q: "length q = length p" "\x. poly q x = ?p (c+x)" by blast - {assume h: "constant (poly q)" - from q(2) have th: "\x. poly q (x - c) = ?p x" by auto - {fix x y - from th have "?p x = poly q (x - c)" by auto - also have "\ = poly q (y - c)" - using h unfolding constant_def by blast - also have "\ = ?p y" using th by auto - finally have "?p x = ?p y" .} - with nc have False unfolding constant_def by blast } - hence qnc: "\ constant (poly q)" by blast - from q(2) have pqc0: "?p c = poly q 0" by simp - from c pqc0 have cq0: "\w. cmod (poly q 0) \ cmod (?p w)" by simp - let ?a0 = "poly q 0" - from pc0 pqc0 have a00: "?a0 \ 0" by simp - from a00 - have qr: "\z. poly q z = poly (map (op * (inverse ?a0)) q) z * ?a0" - by (simp add: poly_cmult_map) - let ?r = "map (op * (inverse ?a0)) q" - have lgqr: "length q = length ?r" by simp - {assume h: "\x y. poly ?r x = poly ?r y" - {fix x y - from qr[rule_format, of x] - have "poly q x = poly ?r x * ?a0" by auto - also have "\ = poly ?r y * ?a0" using h by simp - also have "\ = poly q y" using qr[rule_format, of y] by simp - finally have "poly q x = poly q y" .} - with qnc have False unfolding constant_def by blast} - hence rnc: "\ constant (poly ?r)" unfolding constant_def by blast - from qr[rule_format, of 0] a00 have r01: "poly ?r 0 = 1" by auto - {fix w - have "cmod (poly ?r w) < 1 \ cmod (poly q w / ?a0) < 1" - using qr[rule_format, of w] a00 by simp - also have "\ \ cmod (poly q w) < cmod ?a0" - using a00 unfolding norm_divide by (simp add: field_simps) - finally have "cmod (poly ?r w) < 1 \ cmod (poly q w) < cmod ?a0" .} - note mrmq_eq = this - from poly_decompose[OF rnc] obtain k a s where - kas: "a\0" "k\0" "length s + k + 1 = length ?r" - "\z. poly ?r z = poly ?r 0 + z^k* poly (a#s) z" by blast - {assume "k + 1 = n" - with kas(3) lgqr[symmetric] q(1) n[symmetric] have s0:"s=[]" by auto - {fix w - have "cmod (poly ?r w) = cmod (1 + a * w ^ k)" - using kas(4)[rule_format, of w] s0 r01 by (simp add: ring_simps)} - note hth = this [symmetric] - from reduce_poly_simple[OF kas(1,2)] - have "\w. cmod (poly ?r w) < 1" unfolding hth by blast} - moreover - {assume kn: "k+1 \ n" - from kn kas(3) q(1) n[symmetric] have k1n: "k + 1 < n" by simp - have th01: "\ constant (poly (1#((replicate (k - 1) 0)@[a])))" - unfolding constant_def poly_Nil poly_Cons poly_replicate_append - using kas(1) apply simp - by (rule exI[where x=0], rule exI[where x=1], simp) - from kas(2) have th02: "k+1 = length (1#((replicate (k - 1) 0)@[a]))" - by simp - from H[rule_format, OF k1n th01 th02] - obtain w where w: "1 + w^k * a = 0" - unfolding poly_Nil poly_Cons poly_replicate_append - using kas(2) by (auto simp add: power_Suc[symmetric, of _ "k - Suc 0"] - mult_assoc[of _ _ a, symmetric]) - from poly_bound_exists[of "cmod w" s] obtain m where - m: "m > 0" "\z. cmod z \ cmod w \ cmod (poly s z) \ m" by blast - have w0: "w\0" using kas(2) w by (auto simp add: power_0_left) - from w have "(1 + w ^ k * a) - 1 = 0 - 1" by simp - then have wm1: "w^k * a = - 1" by simp - have inv0: "0 < inverse (cmod w ^ (k + 1) * m)" - using norm_ge_zero[of w] w0 m(1) - by (simp add: inverse_eq_divide zero_less_mult_iff) - with real_down2[OF zero_less_one] obtain t where - t: "t > 0" "t < 1" "t < inverse (cmod w ^ (k + 1) * m)" by blast - let ?ct = "complex_of_real t" - let ?w = "?ct * w" - have "1 + ?w^k * (a + ?w * poly s ?w) = 1 + ?ct^k * (w^k * a) + ?w^k * ?w * poly s ?w" using kas(1) by (simp add: ring_simps power_mult_distrib) - also have "\ = complex_of_real (1 - t^k) + ?w^k * ?w * poly s ?w" - unfolding wm1 by (simp) - finally have "cmod (1 + ?w^k * (a + ?w * poly s ?w)) = cmod (complex_of_real (1 - t^k) + ?w^k * ?w * poly s ?w)" - apply - - apply (rule cong[OF refl[of cmod]]) - apply assumption - done - with norm_triangle_ineq[of "complex_of_real (1 - t^k)" "?w^k * ?w * poly s ?w"] - have th11: "cmod (1 + ?w^k * (a + ?w * poly s ?w)) \ \1 - t^k\ + cmod (?w^k * ?w * poly s ?w)" unfolding norm_of_real by simp - have ath: "\x (t::real). 0\ x \ x < t \ t\1 \ \1 - t\ + x < 1" by arith - have "t *cmod w \ 1 * cmod w" apply (rule mult_mono) using t(1,2) by auto - then have tw: "cmod ?w \ cmod w" using t(1) by (simp add: norm_mult) - from t inv0 have "t* (cmod w ^ (k + 1) * m) < 1" - by (simp add: inverse_eq_divide field_simps) - with zero_less_power[OF t(1), of k] - have th30: "t^k * (t* (cmod w ^ (k + 1) * m)) < t^k * 1" - apply - apply (rule mult_strict_left_mono) by simp_all - have "cmod (?w^k * ?w * poly s ?w) = t^k * (t* (cmod w ^ (k+1) * cmod (poly s ?w)))" using w0 t(1) - by (simp add: ring_simps power_mult_distrib norm_of_real norm_power norm_mult) - then have "cmod (?w^k * ?w * poly s ?w) \ t^k * (t* (cmod w ^ (k + 1) * m))" - using t(1,2) m(2)[rule_format, OF tw] w0 - apply (simp only: ) - apply auto - apply (rule mult_mono, simp_all add: norm_ge_zero)+ - apply (simp add: zero_le_mult_iff zero_le_power) - done - with th30 have th120: "cmod (?w^k * ?w * poly s ?w) < t^k" by simp - from power_strict_mono[OF t(2), of k] t(1) kas(2) have th121: "t^k \ 1" - by auto - from ath[OF norm_ge_zero[of "?w^k * ?w * poly s ?w"] th120 th121] - have th12: "\1 - t^k\ + cmod (?w^k * ?w * poly s ?w) < 1" . - from th11 th12 - have "cmod (1 + ?w^k * (a + ?w * poly s ?w)) < 1" by arith - then have "cmod (poly ?r ?w) < 1" - unfolding kas(4)[rule_format, of ?w] r01 by simp - then have "\w. cmod (poly ?r w) < 1" by blast} - ultimately have cr0_contr: "\w. cmod (poly ?r w) < 1" by blast - from cr0_contr cq0 q(2) - have ?ths unfolding mrmq_eq not_less[symmetric] by auto} - ultimately show ?ths by blast -qed - -text {* Alternative version with a syntactic notion of constant polynomial. *} - -lemma fundamental_theorem_of_algebra_alt: - assumes nc: "~(\a l. a\ 0 \ list_all(\b. b = 0) l \ p = a#l)" - shows "\z. poly p z = (0::complex)" -using nc -proof(induct p) - case (Cons c cs) - {assume "c=0" hence ?case by auto} - moreover - {assume c0: "c\0" - {assume nc: "constant (poly (c#cs))" - from nc[unfolded constant_def, rule_format, of 0] - have "\w. w \ 0 \ poly cs w = 0" by auto - hence "list_all (\c. c=0) cs" - proof(induct cs) - case (Cons d ds) - {assume "d=0" hence ?case using Cons.prems Cons.hyps by simp} - moreover - {assume d0: "d\0" - from poly_bound_exists[of 1 ds] obtain m where - m: "m > 0" "\z. \z. cmod z \ 1 \ cmod (poly ds z) \ m" by blast - have dm: "cmod d / m > 0" using d0 m(1) by (simp add: field_simps) - from real_down2[OF dm zero_less_one] obtain x where - x: "x > 0" "x < cmod d / m" "x < 1" by blast - let ?x = "complex_of_real x" - from x have cx: "?x \ 0" "cmod ?x \ 1" by simp_all - from Cons.prems[rule_format, OF cx(1)] - have cth: "cmod (?x*poly ds ?x) = cmod d" by (simp add: eq_diff_eq[symmetric]) - from m(2)[rule_format, OF cx(2)] x(1) - have th0: "cmod (?x*poly ds ?x) \ x*m" - by (simp add: norm_mult) - from x(2) m(1) have "x*m < cmod d" by (simp add: field_simps) - with th0 have "cmod (?x*poly ds ?x) \ cmod d" by auto - with cth have ?case by blast} - ultimately show ?case by blast - qed simp} - then have nc: "\ constant (poly (c#cs))" using Cons.prems c0 - by blast - from fundamental_theorem_of_algebra[OF nc] have ?case .} - ultimately show ?case by blast -qed simp - -subsection{* Nullstellenstatz, degrees and divisibility of polynomials *} - -lemma nullstellensatz_lemma: - fixes p :: "complex list" - assumes "\x. poly p x = 0 \ poly q x = 0" - and "degree p = n" and "n \ 0" - shows "p divides (pexp q n)" -using prems -proof(induct n arbitrary: p q rule: nat_less_induct) - fix n::nat fix p q :: "complex list" - assume IH: "\mp q. - (\x. poly p x = (0::complex) \ poly q x = 0) \ - degree p = m \ m \ 0 \ p divides (q %^ m)" - and pq0: "\x. poly p x = 0 \ poly q x = 0" - and dpn: "degree p = n" and n0: "n \ 0" - let ?ths = "p divides (q %^ n)" - {fix a assume a: "poly p a = 0" - {assume p0: "poly p = poly []" - hence ?ths unfolding divides_def using pq0 n0 - apply - apply (rule exI[where x="[]"], rule ext) - by (auto simp add: poly_mult poly_exp)} - moreover - {assume p0: "poly p \ poly []" - and oa: "order a p \ 0" - from p0 have pne: "p \ []" by auto - let ?op = "order a p" - from p0 have ap: "([- a, 1] %^ ?op) divides p" - "\ pexp [- a, 1] (Suc ?op) divides p" using order by blast+ - note oop = order_degree[OF p0, unfolded dpn] - {assume q0: "q = []" - hence ?ths using n0 unfolding divides_def - apply simp - apply (rule exI[where x="[]"], rule ext) - by (simp add: divides_def poly_exp poly_mult)} - moreover - {assume q0: "q\[]" - from pq0[rule_format, OF a, unfolded poly_linear_divides] q0 - obtain r where r: "q = pmult [- a, 1] r" by blast - from ap[unfolded divides_def] obtain s where - s: "poly p = poly (pmult (pexp [- a, 1] ?op) s)" by blast - have s0: "poly s \ poly []" - using s p0 by (simp add: poly_entire) - hence pns0: "poly (pnormalize s) \ poly []" and sne: "s\[]" by auto - {assume ds0: "degree s = 0" - from ds0 pns0 have "\k. pnormalize s = [k]" unfolding degree_def - by (cases "pnormalize s", auto) - then obtain k where kpn: "pnormalize s = [k]" by blast - from pns0[unfolded poly_zero] kpn have k: "k \0" "poly s = poly [k]" - using poly_normalize[of s] by simp_all - let ?w = "pmult (pmult [1/k] (pexp [-a,1] (n - ?op))) (pexp r n)" - from k r s oop have "poly (pexp q n) = poly (pmult p ?w)" - by - (rule ext, simp add: poly_mult poly_exp poly_cmult poly_add power_add[symmetric] ring_simps power_mult_distrib[symmetric]) - hence ?ths unfolding divides_def by blast} - moreover - {assume ds0: "degree s \ 0" - from ds0 s0 dpn degree_unique[OF s, unfolded linear_pow_mul_degree] oa - have dsn: "degree s < n" by auto - {fix x assume h: "poly s x = 0" - {assume xa: "x = a" - from h[unfolded xa poly_linear_divides] sne obtain u where - u: "s = pmult [- a, 1] u" by blast - have "poly p = poly (pmult (pexp [- a, 1] (Suc ?op)) u)" - unfolding s u - apply (rule ext) - by (simp add: ring_simps power_mult_distrib[symmetric] poly_mult poly_cmult poly_add poly_exp) - with ap(2)[unfolded divides_def] have False by blast} - note xa = this - from h s have "poly p x = 0" by (simp add: poly_mult) - with pq0 have "poly q x = 0" by blast - with r xa have "poly r x = 0" - by (auto simp add: poly_mult poly_add poly_cmult eq_diff_eq[symmetric])} - note impth = this - from IH[rule_format, OF dsn, of s r] impth ds0 - have "s divides (pexp r (degree s))" by blast - then obtain u where u: "poly (pexp r (degree s)) = poly (pmult s u)" - unfolding divides_def by blast - hence u': "\x. poly s x * poly u x = poly r x ^ degree s" - by (simp add: poly_mult[symmetric] poly_exp[symmetric]) - let ?w = "pmult (pmult u (pexp [-a,1] (n - ?op))) (pexp r (n - degree s))" - from u' s r oop[of a] dsn have "poly (pexp q n) = poly (pmult p ?w)" - apply - apply (rule ext) - apply (simp only: power_mult_distrib power_add[symmetric] poly_add poly_mult poly_exp poly_cmult ring_simps) - - apply (simp add: power_mult_distrib power_add[symmetric] poly_add poly_mult poly_exp poly_cmult mult_assoc[symmetric]) - done - hence ?ths unfolding divides_def by blast} - ultimately have ?ths by blast } - ultimately have ?ths by blast} - ultimately have ?ths using a order_root by blast} - moreover - {assume exa: "\ (\a. poly p a = 0)" - from fundamental_theorem_of_algebra_alt[of p] exa obtain c cs where - ccs: "c\0" "list_all (\c. c = 0) cs" "p = c#cs" by blast - - from poly_0[OF ccs(2)] ccs(3) - have pp: "\x. poly p x = c" by simp - let ?w = "pmult [1/c] (pexp q n)" - from pp ccs(1) - have "poly (pexp q n) = poly (pmult p ?w) " - apply - apply (rule ext) - unfolding poly_mult_assoc[symmetric] by (simp add: poly_mult) - hence ?ths unfolding divides_def by blast} - ultimately show ?ths by blast -qed - -lemma nullstellensatz_univariate: - "(\x. poly p x = (0::complex) \ poly q x = 0) \ - p divides (q %^ (degree p)) \ (poly p = poly [] \ poly q = poly [])" -proof- - {assume pe: "poly p = poly []" - hence eq: "(\x. poly p x = (0::complex) \ poly q x = 0) \ poly q = poly []" - apply auto - by (rule ext, simp) - {assume "p divides (pexp q (degree p))" - then obtain r where r: "poly (pexp q (degree p)) = poly (pmult p r)" - unfolding divides_def by blast - from cong[OF r refl] pe degree_unique[OF pe] - have False by (simp add: poly_mult degree_def)} - with eq pe have ?thesis by blast} - moreover - {assume pe: "poly p \ poly []" - have p0: "poly [0] = poly []" by (rule ext, simp) - {assume dp: "degree p = 0" - then obtain k where "pnormalize p = [k]" using pe poly_normalize[of p] - unfolding degree_def by (cases "pnormalize p", auto) - hence k: "pnormalize p = [k]" "poly p = poly [k]" "k\0" - using pe poly_normalize[of p] by (auto simp add: p0) - hence th1: "\x. poly p x \ 0" by simp - from k(2,3) dp have "poly (pexp q (degree p)) = poly (pmult p [1/k]) " - by - (rule ext, simp add: poly_mult poly_exp) - hence th2: "p divides (pexp q (degree p))" unfolding divides_def by blast - from th1 th2 pe have ?thesis by blast} - moreover - {assume dp: "degree p \ 0" - then obtain n where n: "degree p = Suc n " by (cases "degree p", auto) - {assume "p divides (pexp q (Suc n))" - then obtain u where u: "poly (pexp q (Suc n)) = poly (pmult p u)" - unfolding divides_def by blast - hence u' :"\x. poly (pexp q (Suc n)) x = poly (pmult p u) x" by simp_all - {fix x assume h: "poly p x = 0" "poly q x \ 0" - hence "poly (pexp q (Suc n)) x \ 0" by (simp only: poly_exp) simp - hence False using u' h(1) by (simp only: poly_mult poly_exp) simp}} - with n nullstellensatz_lemma[of p q "degree p"] dp - have ?thesis by auto} - ultimately have ?thesis by blast} - ultimately show ?thesis by blast -qed - -text{* Useful lemma *} - -lemma (in idom_char_0) constant_degree: "constant (poly p) \ degree p = 0" (is "?lhs = ?rhs") -proof - assume l: ?lhs - from l[unfolded constant_def, rule_format, of _ "zero"] - have th: "poly p = poly [poly p 0]" apply - by (rule ext, simp) - from degree_unique[OF th] show ?rhs by (simp add: degree_def) -next - assume r: ?rhs - from r have "pnormalize p = [] \ (\k. pnormalize p = [k])" - unfolding degree_def by (cases "pnormalize p", auto) - then show ?lhs unfolding constant_def poly_normalize[of p, symmetric] - by (auto simp del: poly_normalize) -qed - -(* It would be nicer to prove this without using algebraic closure... *) - -lemma divides_degree_lemma: assumes dpn: "degree (p::complex list) = n" - shows "n \ degree (p *** q) \ poly (p *** q) = poly []" - using dpn -proof(induct n arbitrary: p q) - case 0 thus ?case by simp -next - case (Suc n p q) - from Suc.prems fundamental_theorem_of_algebra[of p] constant_degree[of p] - obtain a where a: "poly p a = 0" by auto - then obtain r where r: "p = pmult [-a, 1] r" unfolding poly_linear_divides - using Suc.prems by (auto simp add: degree_def) - {assume h: "poly (pmult r q) = poly []" - hence "poly (pmult p q) = poly []" using r - apply - apply (rule ext) by (auto simp add: poly_entire poly_mult poly_add poly_cmult) hence ?case by blast} - moreover - {assume h: "poly (pmult r q) \ poly []" - hence r0: "poly r \ poly []" and q0: "poly q \ poly []" - by (auto simp add: poly_entire) - have eq: "poly (pmult p q) = poly (pmult [-a, 1] (pmult r q))" - apply - apply (rule ext) - by (simp add: r poly_mult poly_add poly_cmult ring_simps) - from linear_mul_degree[OF h, of "- a"] - have dqe: "degree (pmult p q) = degree (pmult r q) + 1" - unfolding degree_unique[OF eq] . - from linear_mul_degree[OF r0, of "- a", unfolded r[symmetric]] r Suc.prems - have dr: "degree r = n" by auto - from Suc.hyps[OF dr, of q] have "Suc n \ degree (pmult p q)" - unfolding dqe using h by (auto simp del: poly.simps) - hence ?case by blast} - ultimately show ?case by blast -qed - -lemma divides_degree: assumes pq: "p divides (q:: complex list)" - shows "degree p \ degree q \ poly q = poly []" -using pq divides_degree_lemma[OF refl, of p] -apply (auto simp add: divides_def poly_entire) -apply atomize -apply (erule_tac x="qa" in allE, auto) -apply (subgoal_tac "degree q = degree (p *** qa)", simp) -apply (rule degree_unique, simp) -done - -(* Arithmetic operations on multivariate polynomials. *) - -lemma mpoly_base_conv: - "(0::complex) \ poly [] x" "c \ poly [c] x" "x \ poly [0,1] x" by simp_all - -lemma mpoly_norm_conv: - "poly [0] (x::complex) \ poly [] x" "poly [poly [] y] x \ poly [] x" by simp_all - -lemma mpoly_sub_conv: - "poly p (x::complex) - poly q x \ poly p x + -1 * poly q x" - by (simp add: diff_def) - -lemma poly_pad_rule: "poly p x = 0 ==> poly (0#p) x = (0::complex)" by simp - -lemma poly_cancel_eq_conv: "p = (0::complex) \ a \ 0 \ (q = 0) \ (a * q - b * p = 0)" apply (atomize (full)) by auto - -lemma resolve_eq_raw: "poly [] x \ 0" "poly [c] x \ (c::complex)" by auto -lemma resolve_eq_then: "(P \ (Q \ Q1)) \ (\P \ (Q \ Q2)) - \ Q \ P \ Q1 \ \P\ Q2" apply (atomize (full)) by blast -lemma expand_ex_beta_conv: "list_ex P [c] \ P c" by simp - -lemma poly_divides_pad_rule: - fixes p q :: "complex list" - assumes pq: "p divides q" - shows "p divides ((0::complex)#q)" -proof- - from pq obtain r where r: "poly q = poly (p *** r)" unfolding divides_def by blast - hence "poly (0#q) = poly (p *** ([0,1] *** r))" - by - (rule ext, simp add: poly_mult poly_cmult poly_add) - thus ?thesis unfolding divides_def by blast -qed - -lemma poly_divides_pad_const_rule: - fixes p q :: "complex list" - assumes pq: "p divides q" - shows "p divides (a %* q)" -proof- - from pq obtain r where r: "poly q = poly (p *** r)" unfolding divides_def by blast - hence "poly (a %* q) = poly (p *** (a %* r))" - by - (rule ext, simp add: poly_mult poly_cmult poly_add) - thus ?thesis unfolding divides_def by blast -qed - - -lemma poly_divides_conv0: - fixes p :: "complex list" - assumes lgpq: "length q < length p" and lq:"last p \ 0" - shows "p divides q \ (\ (list_ex (\c. c \ 0) q))" (is "?lhs \ ?rhs") -proof- - {assume r: ?rhs - hence eq: "poly q = poly []" unfolding poly_zero - by (simp add: list_all_iff list_ex_iff) - hence "poly q = poly (p *** [])" by - (rule ext, simp add: poly_mult) - hence ?lhs unfolding divides_def by blast} - moreover - {assume l: ?lhs - have ath: "\lq lp dq::nat. lq < lp ==> lq \ 0 \ dq <= lq - 1 ==> dq < lp - 1" - by arith - {assume q0: "length q = 0" - hence "q = []" by simp - hence ?rhs by simp} - moreover - {assume lgq0: "length q \ 0" - from pnormalize_length[of q] have dql: "degree q \ length q - 1" - unfolding degree_def by simp - from ath[OF lgpq lgq0 dql, unfolded pnormal_degree[OF lq, symmetric]] divides_degree[OF l] have "poly q = poly []" by auto - hence ?rhs unfolding poly_zero by (simp add: list_all_iff list_ex_iff)} - ultimately have ?rhs by blast } - ultimately show "?lhs \ ?rhs" by - (atomize (full), blast) -qed - -lemma poly_divides_conv1: - assumes a0: "a\ (0::complex)" and pp': "(p::complex list) divides p'" - and qrp': "\x. a * poly q x - poly p' x \ poly r x" - shows "p divides q \ p divides (r::complex list)" (is "?lhs \ ?rhs") -proof- - { - from pp' obtain t where t: "poly p' = poly (p *** t)" - unfolding divides_def by blast - {assume l: ?lhs - then obtain u where u: "poly q = poly (p *** u)" unfolding divides_def by blast - have "poly r = poly (p *** ((a %* u) +++ (-- t)))" - using u qrp' t - by - (rule ext, - simp add: poly_add poly_mult poly_cmult poly_minus ring_simps) - then have ?rhs unfolding divides_def by blast} - moreover - {assume r: ?rhs - then obtain u where u: "poly r = poly (p *** u)" unfolding divides_def by blast - from u t qrp' a0 have "poly q = poly (p *** ((1/a) %* (u +++ t)))" - by - (rule ext, atomize (full), simp add: poly_mult poly_add poly_cmult field_simps) - hence ?lhs unfolding divides_def by blast} - ultimately have "?lhs = ?rhs" by blast } -thus "?lhs \ ?rhs" by - (atomize(full), blast) -qed - -lemma basic_cqe_conv1: - "(\x. poly p x = 0 \ poly [] x \ 0) \ False" - "(\x. poly [] x \ 0) \ False" - "(\x. poly [c] x \ 0) \ c\0" - "(\x. poly [] x = 0) \ True" - "(\x. poly [c] x = 0) \ c = 0" by simp_all - -lemma basic_cqe_conv2: - assumes l:"last (a#b#p) \ 0" - shows "(\x. poly (a#b#p) x = (0::complex)) \ True" -proof- - {fix h t - assume h: "h\0" "list_all (\c. c=(0::complex)) t" "a#b#p = h#t" - hence "list_all (\c. c= 0) (b#p)" by simp - moreover have "last (b#p) \ set (b#p)" by simp - ultimately have "last (b#p) = 0" by (simp add: list_all_iff) - with l have False by simp} - hence th: "\ (\ h t. h\0 \ list_all (\c. c=0) t \ a#b#p = h#t)" - by blast - from fundamental_theorem_of_algebra_alt[OF th] - show "(\x. poly (a#b#p) x = (0::complex)) \ True" by auto -qed - -lemma basic_cqe_conv_2b: "(\x. poly p x \ (0::complex)) \ (list_ex (\c. c \ 0) p)" -proof- - have "\ (list_ex (\c. c \ 0) p) \ poly p = poly []" - by (simp add: poly_zero list_all_iff list_ex_iff) - also have "\ \ (\ (\x. poly p x \ 0))" by (auto intro: ext) - finally show "(\x. poly p x \ (0::complex)) \ (list_ex (\c. c \ 0) p)" - by - (atomize (full), blast) -qed - -lemma basic_cqe_conv3: - fixes p q :: "complex list" - assumes l: "last (a#p) \ 0" - shows "(\x. poly (a#p) x =0 \ poly q x \ 0) \ \ ((a#p) divides (q %^ (length p)))" -proof- - note np = pnormalize_eq[OF l] - {assume "poly (a#p) = poly []" hence False using l - unfolding poly_zero apply (auto simp add: list_all_iff del: last.simps) - apply (cases p, simp_all) done} - then have p0: "poly (a#p) \ poly []" by blast - from np have dp:"degree (a#p) = length p" by (simp add: degree_def) - from nullstellensatz_univariate[of "a#p" q] p0 dp - show "(\x. poly (a#p) x =0 \ poly q x \ 0) \ \ ((a#p) divides (q %^ (length p)))" - by - (atomize (full), auto) -qed - -lemma basic_cqe_conv4: - fixes p q :: "complex list" - assumes h: "\x. poly (q %^ n) x \ poly r x" - shows "p divides (q %^ n) \ p divides r" -proof- - from h have "poly (q %^ n) = poly r" by (auto intro: ext) - thus "p divides (q %^ n) \ p divides r" unfolding divides_def by simp -qed - -lemma pmult_Cons_Cons: "((a::complex)#b#p) *** q = (a %*q) +++ (0#((b#p) *** q))" - by simp - -lemma elim_neg_conv: "- z \ (-1) * (z::complex)" by simp -lemma eqT_intr: "PROP P \ (True \ PROP P )" "PROP P \ True" by blast+ -lemma negate_negate_rule: "Trueprop P \ \ P \ False" by (atomize (full), auto) -lemma last_simps: "last [x] = x" "last (x#y#ys) = last (y#ys)" by simp_all -lemma length_simps: "length [] = 0" "length (x#y#xs) = length xs + 2" "length [x] = 1" by simp_all - -lemma complex_entire: "(z::complex) \ 0 \ w \ 0 \ z*w \ 0" by simp -lemma resolve_eq_ne: "(P \ True) \ (\P \ False)" "(P \ False) \ (\P \ True)" - by (atomize (full)) simp_all -lemma cqe_conv1: "poly [] x = 0 \ True" by simp -lemma cqe_conv2: "(p \ (q \ r)) \ ((p \ q) \ (p \ r))" (is "?l \ ?r") -proof - assume "p \ q \ r" thus "p \ q \ p \ r" apply - apply (atomize (full)) by blast -next - assume "p \ q \ p \ r" "p" - thus "q \ r" apply - apply (atomize (full)) apply blast done -qed -lemma poly_const_conv: "poly [c] (x::complex) = y \ c = y" by simp - -end \ No newline at end of file diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/Complex/README.html --- a/src/HOL/Complex/README.html Tue Dec 30 08:18:54 2008 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,67 +0,0 @@ - - - - - - - - - HOL/Complex/README - - - - -

Complex: The Complex Numbers

-

This directory defines the type complex of the complex numbers, -with numeric constants and some complex analysis. The development includes -nonstandard analysis for the complex numbers. Note that the image -HOL-Complex includes theories from the directories -HOL/Real and HOL/Hyperreal. They define other types including real (the real numbers) and hypreal (the hyperreal or non-standard reals). - -

    -
  • CLim Limits, continuous functions, and derivatives for the complex numbers -
  • CSeries Finite summation and infinite series for the complex numbers -
  • CStar Star-transforms for the complex numbers, to form non-standard extensions of sets and functions -
  • Complex The complex numbers -
  • NSCA Nonstandard complex analysis -
  • NSComplex Ultrapower construction of the nonstandard complex numbers -
- -

Real: Dedekind Cut Construction of the Real Line

- -
    -
  • Lubs Definition of upper bounds, lubs and so on, to support completeness proofs. -
  • PReal The positive reals constructed using Dedekind cuts -
  • Rational The rational numbers constructed as equivalence classes of integers -
  • RComplete The reals are complete: they satisfy the supremum property. They also have the Archimedean property. -
  • RealDef The real numbers, their ordering properties, and embedding of the integers and the natural numbers -
  • RealPow Real numbers raised to natural number powers -
-

Hyperreal: Ultrafilter Construction of the Non-Standard Reals

-See J. D. Fleuriot and L. C. Paulson. Mechanizing Nonstandard Real Analysis. LMS J. Computation and Mathematics 3 (2000), 140-190. -
    -
  • Filter Theory of Filters and Ultrafilters. Main result is a version of the Ultrafilter Theorem proved using Zorn's Lemma. -
  • HLog Non-standard logarithms -
  • HSeries Non-standard theory of finite summation and infinite series -
  • HTranscendental Non-standard extensions of transcendental functions -
  • HyperDef Ultrapower construction of the hyperreals -
  • HyperNat Ultrapower construction of the hypernaturals -
  • HyperPow Powers theory for the hyperreals - -
  • Integration Gage integrals -
  • Lim Theory of limits, continuous functions, and derivatives -
  • Log Logarithms for the reals -
  • MacLaurin MacLaurin series -
  • NatStar Star-transforms for the hypernaturals, to form non-standard extensions of sets and functions involving the naturals or reals -
  • NthRoot Existence of n-th roots of real numbers -
  • NSA Theory defining sets of infinite numbers, infinitesimals, the infinitely close relation, and their various algebraic properties. -
  • Poly Univariate real polynomials -
  • SEQ Convergence of sequences and series using standard and nonstandard analysis -
  • Series Finite summation and infinite series for the reals -
  • Star Nonstandard extensions of real sets and real functions -
  • Transcendental Power series and transcendental functions -
-
-

Last modified $Date$ - - diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/Complex/document/root.tex --- a/src/HOL/Complex/document/root.tex Tue Dec 30 08:18:54 2008 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,32 +0,0 @@ - -% $Id$ - -\documentclass[11pt,a4paper]{article} -\usepackage{graphicx,isabelle,isabellesym,latexsym} -\usepackage[latin1]{inputenc} -\usepackage{pdfsetup} - -\urlstyle{rm} -\isabellestyle{it} -\pagestyle{myheadings} - -\begin{document} - -\title{Isabelle/HOL-Complex --- Higher-Order Logic with Complex Numbers} -\maketitle - -\tableofcontents - -\begin{center} - \includegraphics[width=\textwidth,height=\textheight,keepaspectratio]{session_graph} -\end{center} - -\newpage - -\renewcommand{\isamarkupheader}[1]% -{\section{\isabellecontext: #1}\markright{THEORY~``\isabellecontext''}} - -\parindent 0pt\parskip 0.5ex -\input{session} - -\end{document} diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/Complex_Main.thy --- a/src/HOL/Complex_Main.thy Tue Dec 30 08:18:54 2008 +0100 +++ b/src/HOL/Complex_Main.thy Tue Dec 30 11:10:01 2008 +0100 @@ -9,7 +9,7 @@ imports Main Real - "~~/src/HOL/Complex/Fundamental_Theorem_Algebra" + Fundamental_Theorem_Algebra Log Ln Taylor diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/Datatype.thy --- a/src/HOL/Datatype.thy Tue Dec 30 08:18:54 2008 +0100 +++ b/src/HOL/Datatype.thy Tue Dec 30 11:10:01 2008 +0100 @@ -578,7 +578,13 @@ lemma Sumr_inject: "Sumr f = Sumr g ==> f = g" by (unfold Sumr_def) (erule sum_case_inject) -hide (open) const Suml Sumr +primrec Projl :: "'a + 'b => 'a" +where Projl_Inl: "Projl (Inl x) = x" + +primrec Projr :: "'a + 'b => 'b" +where Projr_Inr: "Projr (Inr x) = x" + +hide (open) const Suml Sumr Projl Projr subsection {* The option datatype *} diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/Dense_Linear_Order.thy --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/HOL/Dense_Linear_Order.thy Tue Dec 30 11:10:01 2008 +0100 @@ -0,0 +1,877 @@ +(* Author: Amine Chaieb, TU Muenchen *) + +header {* Dense linear order without endpoints + and a quantifier elimination procedure in Ferrante and Rackoff style *} + +theory Dense_Linear_Order +imports Plain Groebner_Basis +uses + "~~/src/HOL/Tools/Qelim/langford_data.ML" + "~~/src/HOL/Tools/Qelim/ferrante_rackoff_data.ML" + ("~~/src/HOL/Tools/Qelim/langford.ML") + ("~~/src/HOL/Tools/Qelim/ferrante_rackoff.ML") +begin + +setup {* Langford_Data.setup #> Ferrante_Rackoff_Data.setup *} + +context linorder +begin + +lemma less_not_permute: "\ (x < y \ y < x)" by (simp add: not_less linear) + +lemma gather_simps: + shows + "(\x. (\y \ L. y < x) \ (\y \ U. x < y) \ x < u \ P x) \ (\x. (\y \ L. y < x) \ (\y \ (insert u U). x < y) \ P x)" + and "(\x. (\y \ L. y < x) \ (\y \ U. x < y) \ l < x \ P x) \ (\x. (\y \ (insert l L). y < x) \ (\y \ U. x < y) \ P x)" + "(\x. (\y \ L. y < x) \ (\y \ U. x < y) \ x < u) \ (\x. (\y \ L. y < x) \ (\y \ (insert u U). x < y))" + and "(\x. (\y \ L. y < x) \ (\y \ U. x < y) \ l < x) \ (\x. (\y \ (insert l L). y < x) \ (\y \ U. x < y))" by auto + +lemma + gather_start: "(\x. P x) \ (\x. (\y \ {}. y < x) \ (\y\ {}. x < y) \ P x)" + by simp + +text{* Theorems for @{text "\z. \x. x < z \ (P x \ P\<^bsub>-\\<^esub>)"}*} +lemma minf_lt: "\z . \x. x < z \ (x < t \ True)" by auto +lemma minf_gt: "\z . \x. x < z \ (t < x \ False)" + by (simp add: not_less) (rule exI[where x="t"], auto simp add: less_le) + +lemma minf_le: "\z. \x. x < z \ (x \ t \ True)" by (auto simp add: less_le) +lemma minf_ge: "\z. \x. x < z \ (t \ x \ False)" + by (auto simp add: less_le not_less not_le) +lemma minf_eq: "\z. \x. x < z \ (x = t \ False)" by auto +lemma minf_neq: "\z. \x. x < z \ (x \ t \ True)" by auto +lemma minf_P: "\z. \x. x < z \ (P \ P)" by blast + +text{* Theorems for @{text "\z. \x. x < z \ (P x \ P\<^bsub>+\\<^esub>)"}*} +lemma pinf_gt: "\z . \x. z < x \ (t < x \ True)" by auto +lemma pinf_lt: "\z . \x. z < x \ (x < t \ False)" + by (simp add: not_less) (rule exI[where x="t"], auto simp add: less_le) + +lemma pinf_ge: "\z. \x. z < x \ (t \ x \ True)" by (auto simp add: less_le) +lemma pinf_le: "\z. \x. z < x \ (x \ t \ False)" + by (auto simp add: less_le not_less not_le) +lemma pinf_eq: "\z. \x. z < x \ (x = t \ False)" by auto +lemma pinf_neq: "\z. \x. z < x \ (x \ t \ True)" by auto +lemma pinf_P: "\z. \x. z < x \ (P \ P)" by blast + +lemma nmi_lt: "t \ U \ \x. \True \ x < t \ (\ u\ U. u \ x)" by auto +lemma nmi_gt: "t \ U \ \x. \False \ t < x \ (\ u\ U. u \ x)" + by (auto simp add: le_less) +lemma nmi_le: "t \ U \ \x. \True \ x\ t \ (\ u\ U. u \ x)" by auto +lemma nmi_ge: "t \ U \ \x. \False \ t\ x \ (\ u\ U. u \ x)" by auto +lemma nmi_eq: "t \ U \ \x. \False \ x = t \ (\ u\ U. u \ x)" by auto +lemma nmi_neq: "t \ U \\x. \True \ x \ t \ (\ u\ U. u \ x)" by auto +lemma nmi_P: "\ x. ~P \ P \ (\ u\ U. u \ x)" by auto +lemma nmi_conj: "\\x. \P1' \ P1 x \ (\ u\ U. u \ x) ; + \x. \P2' \ P2 x \ (\ u\ U. u \ x)\ \ + \x. \(P1' \ P2') \ (P1 x \ P2 x) \ (\ u\ U. u \ x)" by auto +lemma nmi_disj: "\\x. \P1' \ P1 x \ (\ u\ U. u \ x) ; + \x. \P2' \ P2 x \ (\ u\ U. u \ x)\ \ + \x. \(P1' \ P2') \ (P1 x \ P2 x) \ (\ u\ U. u \ x)" by auto + +lemma npi_lt: "t \ U \ \x. \False \ x < t \ (\ u\ U. x \ u)" by (auto simp add: le_less) +lemma npi_gt: "t \ U \ \x. \True \ t < x \ (\ u\ U. x \ u)" by auto +lemma npi_le: "t \ U \ \x. \False \ x \ t \ (\ u\ U. x \ u)" by auto +lemma npi_ge: "t \ U \ \x. \True \ t \ x \ (\ u\ U. x \ u)" by auto +lemma npi_eq: "t \ U \ \x. \False \ x = t \ (\ u\ U. x \ u)" by auto +lemma npi_neq: "t \ U \ \x. \True \ x \ t \ (\ u\ U. x \ u )" by auto +lemma npi_P: "\ x. ~P \ P \ (\ u\ U. x \ u)" by auto +lemma npi_conj: "\\x. \P1' \ P1 x \ (\ u\ U. x \ u) ; \x. \P2' \ P2 x \ (\ u\ U. x \ u)\ + \ \x. \(P1' \ P2') \ (P1 x \ P2 x) \ (\ u\ U. x \ u)" by auto +lemma npi_disj: "\\x. \P1' \ P1 x \ (\ u\ U. x \ u) ; \x. \P2' \ P2 x \ (\ u\ U. x \ u)\ + \ \x. \(P1' \ P2') \ (P1 x \ P2 x) \ (\ u\ U. x \ u)" by auto + +lemma lin_dense_lt: "t \ U \ \x l u. (\ t. l < t \ t < u \ t \ U) \ l< x \ x < u \ x < t \ (\ y. l < y \ y < u \ y < t)" +proof(clarsimp) + fix x l u y assume tU: "t \ U" and noU: "\t. l < t \ t < u \ t \ U" and lx: "l < x" + and xu: "xy" by auto + {assume H: "t < y" + from less_trans[OF lx px] less_trans[OF H yu] + have "l < t \ t < u" by simp + with tU noU have "False" by auto} + hence "\ t < y" by auto hence "y \ t" by (simp add: not_less) + thus "y < t" using tny by (simp add: less_le) +qed + +lemma lin_dense_gt: "t \ U \ \x l u. (\ t. l < t \ t< u \ t \ U) \ l < x \ x < u \ t < x \ (\ y. l < y \ y < u \ t < y)" +proof(clarsimp) + fix x l u y + assume tU: "t \ U" and noU: "\t. l < t \ t < u \ t \ U" and lx: "l < x" and xu: "xy" by auto + {assume H: "y< t" + from less_trans[OF ly H] less_trans[OF px xu] have "l < t \ t < u" by simp + with tU noU have "False" by auto} + hence "\ y y" by (auto simp add: not_less) + thus "t < y" using tny by (simp add:less_le) +qed + +lemma lin_dense_le: "t \ U \ \x l u. (\ t. l < t \ t< u \ t \ U) \ l< x \ x < u \ x \ t \ (\ y. l < y \ y < u \ y\ t)" +proof(clarsimp) + fix x l u y + assume tU: "t \ U" and noU: "\t. l < t \ t < u \ t \ U" and lx: "l < x" and xu: "x t" and ly: "ly" by auto + {assume H: "t < y" + from less_le_trans[OF lx px] less_trans[OF H yu] + have "l < t \ t < u" by simp + with tU noU have "False" by auto} + hence "\ t < y" by auto thus "y \ t" by (simp add: not_less) +qed + +lemma lin_dense_ge: "t \ U \ \x l u. (\ t. l < t \ t< u \ t \ U) \ l< x \ x < u \ t \ x \ (\ y. l < y \ y < u \ t \ y)" +proof(clarsimp) + fix x l u y + assume tU: "t \ U" and noU: "\t. l < t \ t < u \ t \ U" and lx: "l < x" and xu: "x x" and ly: "ly" by auto + {assume H: "y< t" + from less_trans[OF ly H] le_less_trans[OF px xu] + have "l < t \ t < u" by simp + with tU noU have "False" by auto} + hence "\ y y" by (simp add: not_less) +qed +lemma lin_dense_eq: "t \ U \ \x l u. (\ t. l < t \ t< u \ t \ U) \ l< x \ x < u \ x = t \ (\ y. l < y \ y < u \ y= t)" by auto +lemma lin_dense_neq: "t \ U \ \x l u. (\ t. l < t \ t< u \ t \ U) \ l< x \ x < u \ x \ t \ (\ y. l < y \ y < u \ y\ t)" by auto +lemma lin_dense_P: "\x l u. (\ t. l < t \ t< u \ t \ U) \ l< x \ x < u \ P \ (\ y. l < y \ y < u \ P)" by auto + +lemma lin_dense_conj: + "\\x l u. (\ t. l < t \ t< u \ t \ U) \ l< x \ x < u \ P1 x + \ (\ y. l < y \ y < u \ P1 y) ; + \x l u. (\ t. l < t \ t< u \ t \ U) \ l< x \ x < u \ P2 x + \ (\ y. l < y \ y < u \ P2 y)\ \ + \x l u. (\ t. l < t \ t< u \ t \ U) \ l< x \ x < u \ (P1 x \ P2 x) + \ (\ y. l < y \ y < u \ (P1 y \ P2 y))" + by blast +lemma lin_dense_disj: + "\\x l u. (\ t. l < t \ t< u \ t \ U) \ l< x \ x < u \ P1 x + \ (\ y. l < y \ y < u \ P1 y) ; + \x l u. (\ t. l < t \ t< u \ t \ U) \ l< x \ x < u \ P2 x + \ (\ y. l < y \ y < u \ P2 y)\ \ + \x l u. (\ t. l < t \ t< u \ t \ U) \ l< x \ x < u \ (P1 x \ P2 x) + \ (\ y. l < y \ y < u \ (P1 y \ P2 y))" + by blast + +lemma npmibnd: "\\x. \ MP \ P x \ (\ u\ U. u \ x); \x. \PP \ P x \ (\ u\ U. x \ u)\ + \ \x. \ MP \ \PP \ P x \ (\ u\ U. \ u' \ U. u \ x \ x \ u')" +by auto + +lemma finite_set_intervals: + assumes px: "P x" and lx: "l \ x" and xu: "x \ u" and linS: "l\ S" + and uinS: "u \ S" and fS:"finite S" and lS: "\ x\ S. l \ x" and Su: "\ x\ S. x \ u" + shows "\ a \ S. \ b \ S. (\ y. a < y \ y < b \ y \ S) \ a \ x \ x \ b \ P x" +proof- + let ?Mx = "{y. y\ S \ y \ x}" + let ?xM = "{y. y\ S \ x \ y}" + let ?a = "Max ?Mx" + let ?b = "Min ?xM" + have MxS: "?Mx \ S" by blast + hence fMx: "finite ?Mx" using fS finite_subset by auto + from lx linS have linMx: "l \ ?Mx" by blast + hence Mxne: "?Mx \ {}" by blast + have xMS: "?xM \ S" by blast + hence fxM: "finite ?xM" using fS finite_subset by auto + from xu uinS have linxM: "u \ ?xM" by blast + hence xMne: "?xM \ {}" by blast + have ax:"?a \ x" using Mxne fMx by auto + have xb:"x \ ?b" using xMne fxM by auto + have "?a \ ?Mx" using Max_in[OF fMx Mxne] by simp hence ainS: "?a \ S" using MxS by blast + have "?b \ ?xM" using Min_in[OF fxM xMne] by simp hence binS: "?b \ S" using xMS by blast + have noy:"\ y. ?a < y \ y < ?b \ y \ S" + proof(clarsimp) + fix y assume ay: "?a < y" and yb: "y < ?b" and yS: "y \ S" + from yS have "y\ ?Mx \ y\ ?xM" by (auto simp add: linear) + moreover {assume "y \ ?Mx" hence "y \ ?a" using Mxne fMx by auto with ay have "False" by (simp add: not_le[symmetric])} + moreover {assume "y \ ?xM" hence "?b \ y" using xMne fxM by auto with yb have "False" by (simp add: not_le[symmetric])} + ultimately show "False" by blast + qed + from ainS binS noy ax xb px show ?thesis by blast +qed + +lemma finite_set_intervals2: + assumes px: "P x" and lx: "l \ x" and xu: "x \ u" and linS: "l\ S" + and uinS: "u \ S" and fS:"finite S" and lS: "\ x\ S. l \ x" and Su: "\ x\ S. x \ u" + shows "(\ s\ S. P s) \ (\ a \ S. \ b \ S. (\ y. a < y \ y < b \ y \ S) \ a < x \ x < b \ P x)" +proof- + from finite_set_intervals[where P="P", OF px lx xu linS uinS fS lS Su] + obtain a and b where + as: "a\ S" and bs: "b\ S" and noS:"\y. a < y \ y < b \ y \ S" + and axb: "a \ x \ x \ b \ P x" by auto + from axb have "x= a \ x= b \ (a < x \ x < b)" by (auto simp add: le_less) + thus ?thesis using px as bs noS by blast +qed + +end + +section {* The classical QE after Langford for dense linear orders *} + +context dense_linear_order +begin + +lemma interval_empty_iff: + "{y. x < y \ y < z} = {} \ \ x < z" + by (auto dest: dense) + +lemma dlo_qe_bnds: + assumes ne: "L \ {}" and neU: "U \ {}" and fL: "finite L" and fU: "finite U" + shows "(\x. (\y \ L. y < x) \ (\y \ U. x < y)) \ (\ l \ L. \u \ U. l < u)" +proof (simp only: atomize_eq, rule iffI) + assume H: "\x. (\y\L. y < x) \ (\y\U. x < y)" + then obtain x where xL: "\y\L. y < x" and xU: "\y\U. x < y" by blast + {fix l u assume l: "l \ L" and u: "u \ U" + have "l < x" using xL l by blast + also have "x < u" using xU u by blast + finally (less_trans) have "l < u" .} + thus "\l\L. \u\U. l < u" by blast +next + assume H: "\l\L. \u\U. l < u" + let ?ML = "Max L" + let ?MU = "Min U" + from fL ne have th1: "?ML \ L" and th1': "\l\L. l \ ?ML" by auto + from fU neU have th2: "?MU \ U" and th2': "\u\U. ?MU \ u" by auto + from th1 th2 H have "?ML < ?MU" by auto + with dense obtain w where th3: "?ML < w" and th4: "w < ?MU" by blast + from th3 th1' have "\l \ L. l < w" by auto + moreover from th4 th2' have "\u \ U. w < u" by auto + ultimately show "\x. (\y\L. y < x) \ (\y\U. x < y)" by auto +qed + +lemma dlo_qe_noub: + assumes ne: "L \ {}" and fL: "finite L" + shows "(\x. (\y \ L. y < x) \ (\y \ {}. x < y)) \ True" +proof(simp add: atomize_eq) + from gt_ex[of "Max L"] obtain M where M: "Max L < M" by blast + from ne fL have "\x \ L. x \ Max L" by simp + with M have "\x\L. x < M" by (auto intro: le_less_trans) + thus "\x. \y\L. y < x" by blast +qed + +lemma dlo_qe_nolb: + assumes ne: "U \ {}" and fU: "finite U" + shows "(\x. (\y \ {}. y < x) \ (\y \ U. x < y)) \ True" +proof(simp add: atomize_eq) + from lt_ex[of "Min U"] obtain M where M: "M < Min U" by blast + from ne fU have "\x \ U. Min U \ x" by simp + with M have "\x\U. M < x" by (auto intro: less_le_trans) + thus "\x. \y\U. x < y" by blast +qed + +lemma exists_neq: "\(x::'a). x \ t" "\(x::'a). t \ x" + using gt_ex[of t] by auto + +lemmas dlo_simps = order_refl less_irrefl not_less not_le exists_neq + le_less neq_iff linear less_not_permute + +lemma axiom: "dense_linear_order (op \) (op <)" by (rule dense_linear_order_axioms) +lemma atoms: + shows "TERM (less :: 'a \ _)" + and "TERM (less_eq :: 'a \ _)" + and "TERM (op = :: 'a \ _)" . + +declare axiom[langford qe: dlo_qe_bnds dlo_qe_nolb dlo_qe_noub gather: gather_start gather_simps atoms: atoms] +declare dlo_simps[langfordsimp] + +end + +(* FIXME: Move to HOL -- together with the conj_aci_rule in langford.ML *) +lemma dnf: + "(P & (Q | R)) = ((P&Q) | (P&R))" + "((Q | R) & P) = ((Q&P) | (R&P))" + by blast+ + +lemmas weak_dnf_simps = simp_thms dnf + +lemma nnf_simps: + "(\(P \ Q)) = (\P \ \Q)" "(\(P \ Q)) = (\P \ \Q)" "(P \ Q) = (\P \ Q)" + "(P = Q) = ((P \ Q) \ (\P \ \ Q))" "(\ \(P)) = P" + by blast+ + +lemma ex_distrib: "(\x. P x \ Q x) \ ((\x. P x) \ (\x. Q x))" by blast + +lemmas dnf_simps = weak_dnf_simps nnf_simps ex_distrib + +use "~~/src/HOL/Tools/Qelim/langford.ML" +method_setup dlo = {* + Method.ctxt_args (Method.SIMPLE_METHOD' o LangfordQE.dlo_tac) +*} "Langford's algorithm for quantifier elimination in dense linear orders" + + +section {* Contructive dense linear orders yield QE for linear arithmetic over ordered Fields -- see @{text "Arith_Tools.thy"} *} + +text {* Linear order without upper bounds *} + +class_locale linorder_stupid_syntax = linorder +begin +notation + less_eq ("op \") and + less_eq ("(_/ \ _)" [51, 51] 50) and + less ("op \") and + less ("(_/ \ _)" [51, 51] 50) + +end + +class_locale linorder_no_ub = linorder_stupid_syntax + + assumes gt_ex: "\y. less x y" +begin +lemma ge_ex: "\y. x \ y" using gt_ex by auto + +text {* Theorems for @{text "\z. \x. z \ x \ (P x \ P\<^bsub>+\\<^esub>)"} *} +lemma pinf_conj: + assumes ex1: "\z1. \x. z1 \ x \ (P1 x \ P1')" + and ex2: "\z2. \x. z2 \ x \ (P2 x \ P2')" + shows "\z. \x. z \ x \ ((P1 x \ P2 x) \ (P1' \ P2'))" +proof- + from ex1 ex2 obtain z1 and z2 where z1: "\x. z1 \ x \ (P1 x \ P1')" + and z2: "\x. z2 \ x \ (P2 x \ P2')" by blast + from gt_ex obtain z where z:"ord.max less_eq z1 z2 \ z" by blast + from z have zz1: "z1 \ z" and zz2: "z2 \ z" by simp_all + {fix x assume H: "z \ x" + from less_trans[OF zz1 H] less_trans[OF zz2 H] + have "(P1 x \ P2 x) \ (P1' \ P2')" using z1 zz1 z2 zz2 by auto + } + thus ?thesis by blast +qed + +lemma pinf_disj: + assumes ex1: "\z1. \x. z1 \ x \ (P1 x \ P1')" + and ex2: "\z2. \x. z2 \ x \ (P2 x \ P2')" + shows "\z. \x. z \ x \ ((P1 x \ P2 x) \ (P1' \ P2'))" +proof- + from ex1 ex2 obtain z1 and z2 where z1: "\x. z1 \ x \ (P1 x \ P1')" + and z2: "\x. z2 \ x \ (P2 x \ P2')" by blast + from gt_ex obtain z where z:"ord.max less_eq z1 z2 \ z" by blast + from z have zz1: "z1 \ z" and zz2: "z2 \ z" by simp_all + {fix x assume H: "z \ x" + from less_trans[OF zz1 H] less_trans[OF zz2 H] + have "(P1 x \ P2 x) \ (P1' \ P2')" using z1 zz1 z2 zz2 by auto + } + thus ?thesis by blast +qed + +lemma pinf_ex: assumes ex:"\z. \x. z \ x \ (P x \ P1)" and p1: P1 shows "\ x. P x" +proof- + from ex obtain z where z: "\x. z \ x \ (P x \ P1)" by blast + from gt_ex obtain x where x: "z \ x" by blast + from z x p1 show ?thesis by blast +qed + +end + +text {* Linear order without upper bounds *} + +class_locale linorder_no_lb = linorder_stupid_syntax + + assumes lt_ex: "\y. less y x" +begin +lemma le_ex: "\y. y \ x" using lt_ex by auto + + +text {* Theorems for @{text "\z. \x. x \ z \ (P x \ P\<^bsub>-\\<^esub>)"} *} +lemma minf_conj: + assumes ex1: "\z1. \x. x \ z1 \ (P1 x \ P1')" + and ex2: "\z2. \x. x \ z2 \ (P2 x \ P2')" + shows "\z. \x. x \ z \ ((P1 x \ P2 x) \ (P1' \ P2'))" +proof- + from ex1 ex2 obtain z1 and z2 where z1: "\x. x \ z1 \ (P1 x \ P1')"and z2: "\x. x \ z2 \ (P2 x \ P2')" by blast + from lt_ex obtain z where z:"z \ ord.min less_eq z1 z2" by blast + from z have zz1: "z \ z1" and zz2: "z \ z2" by simp_all + {fix x assume H: "x \ z" + from less_trans[OF H zz1] less_trans[OF H zz2] + have "(P1 x \ P2 x) \ (P1' \ P2')" using z1 zz1 z2 zz2 by auto + } + thus ?thesis by blast +qed + +lemma minf_disj: + assumes ex1: "\z1. \x. x \ z1 \ (P1 x \ P1')" + and ex2: "\z2. \x. x \ z2 \ (P2 x \ P2')" + shows "\z. \x. x \ z \ ((P1 x \ P2 x) \ (P1' \ P2'))" +proof- + from ex1 ex2 obtain z1 and z2 where z1: "\x. x \ z1 \ (P1 x \ P1')"and z2: "\x. x \ z2 \ (P2 x \ P2')" by blast + from lt_ex obtain z where z:"z \ ord.min less_eq z1 z2" by blast + from z have zz1: "z \ z1" and zz2: "z \ z2" by simp_all + {fix x assume H: "x \ z" + from less_trans[OF H zz1] less_trans[OF H zz2] + have "(P1 x \ P2 x) \ (P1' \ P2')" using z1 zz1 z2 zz2 by auto + } + thus ?thesis by blast +qed + +lemma minf_ex: assumes ex:"\z. \x. x \ z \ (P x \ P1)" and p1: P1 shows "\ x. P x" +proof- + from ex obtain z where z: "\x. x \ z \ (P x \ P1)" by blast + from lt_ex obtain x where x: "x \ z" by blast + from z x p1 show ?thesis by blast +qed + +end + + +class_locale constr_dense_linear_order = linorder_no_lb + linorder_no_ub + + fixes between + assumes between_less: "less x y \ less x (between x y) \ less (between x y) y" + and between_same: "between x x = x" + +class_interpretation constr_dense_linear_order < dense_linear_order + apply unfold_locales + using gt_ex lt_ex between_less + by (auto, rule_tac x="between x y" in exI, simp) + +context constr_dense_linear_order +begin + +lemma rinf_U: + assumes fU: "finite U" + and lin_dense: "\x l u. (\ t. l \ t \ t\ u \ t \ U) \ l\ x \ x \ u \ P x + \ (\ y. l \ y \ y \ u \ P y )" + and nmpiU: "\x. \ MP \ \PP \ P x \ (\ u\ U. \ u' \ U. u \ x \ x \ u')" + and nmi: "\ MP" and npi: "\ PP" and ex: "\ x. P x" + shows "\ u\ U. \ u' \ U. P (between u u')" +proof- + from ex obtain x where px: "P x" by blast + from px nmi npi nmpiU have "\ u\ U. \ u' \ U. u \ x \ x \ u'" by auto + then obtain u and u' where uU:"u\ U" and uU': "u' \ U" and ux:"u \ x" and xu':"x \ u'" by auto + from uU have Une: "U \ {}" by auto + term "linorder.Min less_eq" + let ?l = "linorder.Min less_eq U" + let ?u = "linorder.Max less_eq U" + have linM: "?l \ U" using fU Une by simp + have uinM: "?u \ U" using fU Une by simp + have lM: "\ t\ U. ?l \ t" using Une fU by auto + have Mu: "\ t\ U. t \ ?u" using Une fU by auto + have th:"?l \ u" using uU Une lM by auto + from order_trans[OF th ux] have lx: "?l \ x" . + have th: "u' \ ?u" using uU' Une Mu by simp + from order_trans[OF xu' th] have xu: "x \ ?u" . + from finite_set_intervals2[where P="P",OF px lx xu linM uinM fU lM Mu] + have "(\ s\ U. P s) \ + (\ t1\ U. \ t2 \ U. (\ y. t1 \ y \ y \ t2 \ y \ U) \ t1 \ x \ x \ t2 \ P x)" . + moreover { fix u assume um: "u\U" and pu: "P u" + have "between u u = u" by (simp add: between_same) + with um pu have "P (between u u)" by simp + with um have ?thesis by blast} + moreover{ + assume "\ t1\ U. \ t2 \ U. (\ y. t1 \ y \ y \ t2 \ y \ U) \ t1 \ x \ x \ t2 \ P x" + then obtain t1 and t2 where t1M: "t1 \ U" and t2M: "t2\ U" + and noM: "\ y. t1 \ y \ y \ t2 \ y \ U" and t1x: "t1 \ x" and xt2: "x \ t2" and px: "P x" + by blast + from less_trans[OF t1x xt2] have t1t2: "t1 \ t2" . + let ?u = "between t1 t2" + from between_less t1t2 have t1lu: "t1 \ ?u" and ut2: "?u \ t2" by auto + from lin_dense noM t1x xt2 px t1lu ut2 have "P ?u" by blast + with t1M t2M have ?thesis by blast} + ultimately show ?thesis by blast + qed + +theorem fr_eq: + assumes fU: "finite U" + and lin_dense: "\x l u. (\ t. l \ t \ t\ u \ t \ U) \ l\ x \ x \ u \ P x + \ (\ y. l \ y \ y \ u \ P y )" + and nmibnd: "\x. \ MP \ P x \ (\ u\ U. u \ x)" + and npibnd: "\x. \PP \ P x \ (\ u\ U. x \ u)" + and mi: "\z. \x. x \ z \ (P x = MP)" and pi: "\z. \x. z \ x \ (P x = PP)" + shows "(\ x. P x) \ (MP \ PP \ (\ u \ U. \ u'\ U. P (between u u')))" + (is "_ \ (_ \ _ \ ?F)" is "?E \ ?D") +proof- + { + assume px: "\ x. P x" + have "MP \ PP \ (\ MP \ \ PP)" by blast + moreover {assume "MP \ PP" hence "?D" by blast} + moreover {assume nmi: "\ MP" and npi: "\ PP" + from npmibnd[OF nmibnd npibnd] + have nmpiU: "\x. \ MP \ \PP \ P x \ (\ u\ U. \ u' \ U. u \ x \ x \ u')" . + from rinf_U[OF fU lin_dense nmpiU nmi npi px] have "?D" by blast} + ultimately have "?D" by blast} + moreover + { assume "?D" + moreover {assume m:"MP" from minf_ex[OF mi m] have "?E" .} + moreover {assume p: "PP" from pinf_ex[OF pi p] have "?E" . } + moreover {assume f:"?F" hence "?E" by blast} + ultimately have "?E" by blast} + ultimately have "?E = ?D" by blast thus "?E \ ?D" by simp +qed + +lemmas minf_thms = minf_conj minf_disj minf_eq minf_neq minf_lt minf_le minf_gt minf_ge minf_P +lemmas pinf_thms = pinf_conj pinf_disj pinf_eq pinf_neq pinf_lt pinf_le pinf_gt pinf_ge pinf_P + +lemmas nmi_thms = nmi_conj nmi_disj nmi_eq nmi_neq nmi_lt nmi_le nmi_gt nmi_ge nmi_P +lemmas npi_thms = npi_conj npi_disj npi_eq npi_neq npi_lt npi_le npi_gt npi_ge npi_P +lemmas lin_dense_thms = lin_dense_conj lin_dense_disj lin_dense_eq lin_dense_neq lin_dense_lt lin_dense_le lin_dense_gt lin_dense_ge lin_dense_P + +lemma ferrack_axiom: "constr_dense_linear_order less_eq less between" + by (rule constr_dense_linear_order_axioms) +lemma atoms: + shows "TERM (less :: 'a \ _)" + and "TERM (less_eq :: 'a \ _)" + and "TERM (op = :: 'a \ _)" . + +declare ferrack_axiom [ferrack minf: minf_thms pinf: pinf_thms + nmi: nmi_thms npi: npi_thms lindense: + lin_dense_thms qe: fr_eq atoms: atoms] + +declaration {* +let +fun simps phi = map (Morphism.thm phi) [@{thm "not_less"}, @{thm "not_le"}] +fun generic_whatis phi = + let + val [lt, le] = map (Morphism.term phi) [@{term "op \"}, @{term "op \"}] + fun h x t = + case term_of t of + Const("op =", _)$y$z => if term_of x aconv y then Ferrante_Rackoff_Data.Eq + else Ferrante_Rackoff_Data.Nox + | @{term "Not"}$(Const("op =", _)$y$z) => if term_of x aconv y then Ferrante_Rackoff_Data.NEq + else Ferrante_Rackoff_Data.Nox + | b$y$z => if Term.could_unify (b, lt) then + if term_of x aconv y then Ferrante_Rackoff_Data.Lt + else if term_of x aconv z then Ferrante_Rackoff_Data.Gt + else Ferrante_Rackoff_Data.Nox + else if Term.could_unify (b, le) then + if term_of x aconv y then Ferrante_Rackoff_Data.Le + else if term_of x aconv z then Ferrante_Rackoff_Data.Ge + else Ferrante_Rackoff_Data.Nox + else Ferrante_Rackoff_Data.Nox + | _ => Ferrante_Rackoff_Data.Nox + in h end + fun ss phi = HOL_ss addsimps (simps phi) +in + Ferrante_Rackoff_Data.funs @{thm "ferrack_axiom"} + {isolate_conv = K (K (K Thm.reflexive)), whatis = generic_whatis, simpset = ss} +end +*} + +end + +use "~~/src/HOL/Tools/Qelim/ferrante_rackoff.ML" + +method_setup ferrack = {* + Method.ctxt_args (Method.SIMPLE_METHOD' o FerranteRackoff.dlo_tac) +*} "Ferrante and Rackoff's algorithm for quantifier elimination in dense linear orders" + +subsection {* Ferrante and Rackoff algorithm over ordered fields *} + +lemma neg_prod_lt:"(c\'a\ordered_field) < 0 \ ((c*x < 0) == (x > 0))" +proof- + assume H: "c < 0" + have "c*x < 0 = (0/c < x)" by (simp only: neg_divide_less_eq[OF H] ring_simps) + also have "\ = (0 < x)" by simp + finally show "(c*x < 0) == (x > 0)" by simp +qed + +lemma pos_prod_lt:"(c\'a\ordered_field) > 0 \ ((c*x < 0) == (x < 0))" +proof- + assume H: "c > 0" + hence "c*x < 0 = (0/c > x)" by (simp only: pos_less_divide_eq[OF H] ring_simps) + also have "\ = (0 > x)" by simp + finally show "(c*x < 0) == (x < 0)" by simp +qed + +lemma neg_prod_sum_lt: "(c\'a\ordered_field) < 0 \ ((c*x + t< 0) == (x > (- 1/c)*t))" +proof- + assume H: "c < 0" + have "c*x + t< 0 = (c*x < -t)" by (subst less_iff_diff_less_0 [of "c*x" "-t"], simp) + also have "\ = (-t/c < x)" by (simp only: neg_divide_less_eq[OF H] ring_simps) + also have "\ = ((- 1/c)*t < x)" by simp + finally show "(c*x + t < 0) == (x > (- 1/c)*t)" by simp +qed + +lemma pos_prod_sum_lt:"(c\'a\ordered_field) > 0 \ ((c*x + t < 0) == (x < (- 1/c)*t))" +proof- + assume H: "c > 0" + have "c*x + t< 0 = (c*x < -t)" by (subst less_iff_diff_less_0 [of "c*x" "-t"], simp) + also have "\ = (-t/c > x)" by (simp only: pos_less_divide_eq[OF H] ring_simps) + also have "\ = ((- 1/c)*t > x)" by simp + finally show "(c*x + t < 0) == (x < (- 1/c)*t)" by simp +qed + +lemma sum_lt:"((x::'a::pordered_ab_group_add) + t < 0) == (x < - t)" + using less_diff_eq[where a= x and b=t and c=0] by simp + +lemma neg_prod_le:"(c\'a\ordered_field) < 0 \ ((c*x <= 0) == (x >= 0))" +proof- + assume H: "c < 0" + have "c*x <= 0 = (0/c <= x)" by (simp only: neg_divide_le_eq[OF H] ring_simps) + also have "\ = (0 <= x)" by simp + finally show "(c*x <= 0) == (x >= 0)" by simp +qed + +lemma pos_prod_le:"(c\'a\ordered_field) > 0 \ ((c*x <= 0) == (x <= 0))" +proof- + assume H: "c > 0" + hence "c*x <= 0 = (0/c >= x)" by (simp only: pos_le_divide_eq[OF H] ring_simps) + also have "\ = (0 >= x)" by simp + finally show "(c*x <= 0) == (x <= 0)" by simp +qed + +lemma neg_prod_sum_le: "(c\'a\ordered_field) < 0 \ ((c*x + t <= 0) == (x >= (- 1/c)*t))" +proof- + assume H: "c < 0" + have "c*x + t <= 0 = (c*x <= -t)" by (subst le_iff_diff_le_0 [of "c*x" "-t"], simp) + also have "\ = (-t/c <= x)" by (simp only: neg_divide_le_eq[OF H] ring_simps) + also have "\ = ((- 1/c)*t <= x)" by simp + finally show "(c*x + t <= 0) == (x >= (- 1/c)*t)" by simp +qed + +lemma pos_prod_sum_le:"(c\'a\ordered_field) > 0 \ ((c*x + t <= 0) == (x <= (- 1/c)*t))" +proof- + assume H: "c > 0" + have "c*x + t <= 0 = (c*x <= -t)" by (subst le_iff_diff_le_0 [of "c*x" "-t"], simp) + also have "\ = (-t/c >= x)" by (simp only: pos_le_divide_eq[OF H] ring_simps) + also have "\ = ((- 1/c)*t >= x)" by simp + finally show "(c*x + t <= 0) == (x <= (- 1/c)*t)" by simp +qed + +lemma sum_le:"((x::'a::pordered_ab_group_add) + t <= 0) == (x <= - t)" + using le_diff_eq[where a= x and b=t and c=0] by simp + +lemma nz_prod_eq:"(c\'a\ordered_field) \ 0 \ ((c*x = 0) == (x = 0))" by simp +lemma nz_prod_sum_eq: "(c\'a\ordered_field) \ 0 \ ((c*x + t = 0) == (x = (- 1/c)*t))" +proof- + assume H: "c \ 0" + have "c*x + t = 0 = (c*x = -t)" by (subst eq_iff_diff_eq_0 [of "c*x" "-t"], simp) + also have "\ = (x = -t/c)" by (simp only: nonzero_eq_divide_eq[OF H] ring_simps) + finally show "(c*x + t = 0) == (x = (- 1/c)*t)" by simp +qed +lemma sum_eq:"((x::'a::pordered_ab_group_add) + t = 0) == (x = - t)" + using eq_diff_eq[where a= x and b=t and c=0] by simp + + +class_interpretation class_ordered_field_dense_linear_order: constr_dense_linear_order + ["op <=" "op <" + "\ x y. 1/2 * ((x::'a::{ordered_field,recpower,number_ring}) + y)"] +proof (unfold_locales, dlo, dlo, auto) + fix x y::'a assume lt: "x < y" + from less_half_sum[OF lt] show "x < (x + y) /2" by simp +next + fix x y::'a assume lt: "x < y" + from gt_half_sum[OF lt] show "(x + y) /2 < y" by simp +qed + +declaration{* +let +fun earlier [] x y = false + | earlier (h::t) x y = + if h aconvc y then false else if h aconvc x then true else earlier t x y; + +fun dest_frac ct = case term_of ct of + Const (@{const_name "HOL.divide"},_) $ a $ b=> + Rat.rat_of_quotient (snd (HOLogic.dest_number a), snd (HOLogic.dest_number b)) + | t => Rat.rat_of_int (snd (HOLogic.dest_number t)) + +fun mk_frac phi cT x = + let val (a, b) = Rat.quotient_of_rat x + in if b = 1 then Numeral.mk_cnumber cT a + else Thm.capply + (Thm.capply (Drule.cterm_rule (instantiate' [SOME cT] []) @{cpat "op /"}) + (Numeral.mk_cnumber cT a)) + (Numeral.mk_cnumber cT b) + end + +fun whatis x ct = case term_of ct of + Const(@{const_name "HOL.plus"}, _)$(Const(@{const_name "HOL.times"},_)$_$y)$_ => + if y aconv term_of x then ("c*x+t",[(funpow 2 Thm.dest_arg1) ct, Thm.dest_arg ct]) + else ("Nox",[]) +| Const(@{const_name "HOL.plus"}, _)$y$_ => + if y aconv term_of x then ("x+t",[Thm.dest_arg ct]) + else ("Nox",[]) +| Const(@{const_name "HOL.times"}, _)$_$y => + if y aconv term_of x then ("c*x",[Thm.dest_arg1 ct]) + else ("Nox",[]) +| t => if t aconv term_of x then ("x",[]) else ("Nox",[]); + +fun xnormalize_conv ctxt [] ct = reflexive ct +| xnormalize_conv ctxt (vs as (x::_)) ct = + case term_of ct of + Const(@{const_name HOL.less},_)$_$Const(@{const_name "HOL.zero"},_) => + (case whatis x (Thm.dest_arg1 ct) of + ("c*x+t",[c,t]) => + let + val cr = dest_frac c + val clt = Thm.dest_fun2 ct + val cz = Thm.dest_arg ct + val neg = cr + let + val T = ctyp_of_term x + val th = instantiate' [SOME T] [SOME x, SOME t] @{thm "sum_lt"} + val rth = Conv.fconv_rule (Conv.arg_conv (Conv.binop_conv + (Normalizer.semiring_normalize_ord_conv ctxt (earlier vs)))) th + in rth end + | ("c*x",[c]) => + let + val cr = dest_frac c + val clt = Thm.dest_fun2 ct + val cz = Thm.dest_arg ct + val neg = cr reflexive ct) + + +| Const(@{const_name HOL.less_eq},_)$_$Const(@{const_name "HOL.zero"},_) => + (case whatis x (Thm.dest_arg1 ct) of + ("c*x+t",[c,t]) => + let + val T = ctyp_of_term x + val cr = dest_frac c + val clt = Drule.cterm_rule (instantiate' [SOME T] []) @{cpat "op <"} + val cz = Thm.dest_arg ct + val neg = cr + let + val T = ctyp_of_term x + val th = instantiate' [SOME T] [SOME x, SOME t] @{thm "sum_le"} + val rth = Conv.fconv_rule (Conv.arg_conv (Conv.binop_conv + (Normalizer.semiring_normalize_ord_conv ctxt (earlier vs)))) th + in rth end + | ("c*x",[c]) => + let + val T = ctyp_of_term x + val cr = dest_frac c + val clt = Drule.cterm_rule (instantiate' [SOME T] []) @{cpat "op <"} + val cz = Thm.dest_arg ct + val neg = cr reflexive ct) + +| Const("op =",_)$_$Const(@{const_name "HOL.zero"},_) => + (case whatis x (Thm.dest_arg1 ct) of + ("c*x+t",[c,t]) => + let + val T = ctyp_of_term x + val cr = dest_frac c + val ceq = Thm.dest_fun2 ct + val cz = Thm.dest_arg ct + val cthp = Simplifier.rewrite (local_simpset_of ctxt) + (Thm.capply @{cterm "Trueprop"} + (Thm.capply @{cterm "Not"} (Thm.capply (Thm.capply ceq c) cz))) + val cth = equal_elim (symmetric cthp) TrueI + val th = implies_elim + (instantiate' [SOME T] (map SOME [c,x,t]) @{thm nz_prod_sum_eq}) cth + val rth = Conv.fconv_rule (Conv.arg_conv (Conv.binop_conv + (Normalizer.semiring_normalize_ord_conv ctxt (earlier vs)))) th + in rth end + | ("x+t",[t]) => + let + val T = ctyp_of_term x + val th = instantiate' [SOME T] [SOME x, SOME t] @{thm "sum_eq"} + val rth = Conv.fconv_rule (Conv.arg_conv (Conv.binop_conv + (Normalizer.semiring_normalize_ord_conv ctxt (earlier vs)))) th + in rth end + | ("c*x",[c]) => + let + val T = ctyp_of_term x + val cr = dest_frac c + val ceq = Thm.dest_fun2 ct + val cz = Thm.dest_arg ct + val cthp = Simplifier.rewrite (local_simpset_of ctxt) + (Thm.capply @{cterm "Trueprop"} + (Thm.capply @{cterm "Not"} (Thm.capply (Thm.capply ceq c) cz))) + val cth = equal_elim (symmetric cthp) TrueI + val rth = implies_elim + (instantiate' [SOME T] (map SOME [c,x]) @{thm nz_prod_eq}) cth + in rth end + | _ => reflexive ct); + +local + val less_iff_diff_less_0 = mk_meta_eq @{thm "less_iff_diff_less_0"} + val le_iff_diff_le_0 = mk_meta_eq @{thm "le_iff_diff_le_0"} + val eq_iff_diff_eq_0 = mk_meta_eq @{thm "eq_iff_diff_eq_0"} +in +fun field_isolate_conv phi ctxt vs ct = case term_of ct of + Const(@{const_name HOL.less},_)$a$b => + let val (ca,cb) = Thm.dest_binop ct + val T = ctyp_of_term ca + val th = instantiate' [SOME T] [SOME ca, SOME cb] less_iff_diff_less_0 + val nth = Conv.fconv_rule + (Conv.arg_conv (Conv.arg1_conv + (Normalizer.semiring_normalize_ord_conv @{context} (earlier vs)))) th + val rth = transitive nth (xnormalize_conv ctxt vs (Thm.rhs_of nth)) + in rth end +| Const(@{const_name HOL.less_eq},_)$a$b => + let val (ca,cb) = Thm.dest_binop ct + val T = ctyp_of_term ca + val th = instantiate' [SOME T] [SOME ca, SOME cb] le_iff_diff_le_0 + val nth = Conv.fconv_rule + (Conv.arg_conv (Conv.arg1_conv + (Normalizer.semiring_normalize_ord_conv @{context} (earlier vs)))) th + val rth = transitive nth (xnormalize_conv ctxt vs (Thm.rhs_of nth)) + in rth end + +| Const("op =",_)$a$b => + let val (ca,cb) = Thm.dest_binop ct + val T = ctyp_of_term ca + val th = instantiate' [SOME T] [SOME ca, SOME cb] eq_iff_diff_eq_0 + val nth = Conv.fconv_rule + (Conv.arg_conv (Conv.arg1_conv + (Normalizer.semiring_normalize_ord_conv @{context} (earlier vs)))) th + val rth = transitive nth (xnormalize_conv ctxt vs (Thm.rhs_of nth)) + in rth end +| @{term "Not"} $(Const("op =",_)$a$b) => Conv.arg_conv (field_isolate_conv phi ctxt vs) ct +| _ => reflexive ct +end; + +fun classfield_whatis phi = + let + fun h x t = + case term_of t of + Const("op =", _)$y$z => if term_of x aconv y then Ferrante_Rackoff_Data.Eq + else Ferrante_Rackoff_Data.Nox + | @{term "Not"}$(Const("op =", _)$y$z) => if term_of x aconv y then Ferrante_Rackoff_Data.NEq + else Ferrante_Rackoff_Data.Nox + | Const(@{const_name HOL.less},_)$y$z => + if term_of x aconv y then Ferrante_Rackoff_Data.Lt + else if term_of x aconv z then Ferrante_Rackoff_Data.Gt + else Ferrante_Rackoff_Data.Nox + | Const (@{const_name HOL.less_eq},_)$y$z => + if term_of x aconv y then Ferrante_Rackoff_Data.Le + else if term_of x aconv z then Ferrante_Rackoff_Data.Ge + else Ferrante_Rackoff_Data.Nox + | _ => Ferrante_Rackoff_Data.Nox + in h end; +fun class_field_ss phi = + HOL_basic_ss addsimps ([@{thm "linorder_not_less"}, @{thm "linorder_not_le"}]) + addsplits [@{thm "abs_split"},@{thm "split_max"}, @{thm "split_min"}] + +in +Ferrante_Rackoff_Data.funs @{thm "class_ordered_field_dense_linear_order.ferrack_axiom"} + {isolate_conv = field_isolate_conv, whatis = classfield_whatis, simpset = class_field_ss} +end +*} + + +end diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/Deriv.thy --- a/src/HOL/Deriv.thy Tue Dec 30 08:18:54 2008 +0100 +++ b/src/HOL/Deriv.thy Tue Dec 30 11:10:01 2008 +0100 @@ -20,12 +20,6 @@ ("(DERIV (_)/ (_)/ :> (_))" [1000, 1000, 60] 60) where "DERIV f x :> D = ((%h. (f(x + h) - f x) / h) -- 0 --> D)" -definition - differentiable :: "['a::real_normed_field \ 'a, 'a] \ bool" - (infixl "differentiable" 60) where - "f differentiable x = (\D. DERIV f x :> D)" - - consts Bolzano_bisect :: "[real*real=>bool, real, real, nat] => (real*real)" primrec @@ -316,63 +310,104 @@ subsection {* Differentiability predicate *} +definition + differentiable :: "['a::real_normed_field \ 'a, 'a] \ bool" + (infixl "differentiable" 60) where + "f differentiable x = (\D. DERIV f x :> D)" + +lemma differentiableE [elim?]: + assumes "f differentiable x" + obtains df where "DERIV f x :> df" + using prems unfolding differentiable_def .. + lemma differentiableD: "f differentiable x ==> \D. DERIV f x :> D" by (simp add: differentiable_def) lemma differentiableI: "DERIV f x :> D ==> f differentiable x" by (force simp add: differentiable_def) -lemma differentiable_const: "(\z. a) differentiable x" - apply (unfold differentiable_def) - apply (rule_tac x=0 in exI) - apply simp - done +lemma differentiable_ident [simp]: "(\x. x) differentiable x" + by (rule DERIV_ident [THEN differentiableI]) -lemma differentiable_sum: +lemma differentiable_const [simp]: "(\z. a) differentiable x" + by (rule DERIV_const [THEN differentiableI]) + +lemma differentiable_compose: + assumes f: "f differentiable (g x)" + assumes g: "g differentiable x" + shows "(\x. f (g x)) differentiable x" +proof - + from `f differentiable (g x)` obtain df where "DERIV f (g x) :> df" .. + moreover + from `g differentiable x` obtain dg where "DERIV g x :> dg" .. + ultimately + have "DERIV (\x. f (g x)) x :> df * dg" by (rule DERIV_chain2) + thus ?thesis by (rule differentiableI) +qed + +lemma differentiable_sum [simp]: assumes "f differentiable x" and "g differentiable x" shows "(\x. f x + g x) differentiable x" proof - - from prems have "\D. DERIV f x :> D" by (unfold differentiable_def) - then obtain df where "DERIV f x :> df" .. - moreover from prems have "\D. DERIV g x :> D" by (unfold differentiable_def) - then obtain dg where "DERIV g x :> dg" .. - ultimately have "DERIV (\x. f x + g x) x :> df + dg" by (rule DERIV_add) - hence "\D. DERIV (\x. f x + g x) x :> D" by auto - thus ?thesis by (fold differentiable_def) + from `f differentiable x` obtain df where "DERIV f x :> df" .. + moreover + from `g differentiable x` obtain dg where "DERIV g x :> dg" .. + ultimately + have "DERIV (\x. f x + g x) x :> df + dg" by (rule DERIV_add) + thus ?thesis by (rule differentiableI) qed -lemma differentiable_diff: +lemma differentiable_minus [simp]: assumes "f differentiable x" - and "g differentiable x" - shows "(\x. f x - g x) differentiable x" + shows "(\x. - f x) differentiable x" proof - - from prems have "f differentiable x" by simp - moreover - from prems have "\D. DERIV g x :> D" by (unfold differentiable_def) - then obtain dg where "DERIV g x :> dg" .. - then have "DERIV (\x. - g x) x :> -dg" by (rule DERIV_minus) - hence "\D. DERIV (\x. - g x) x :> D" by auto - hence "(\x. - g x) differentiable x" by (fold differentiable_def) - ultimately - show ?thesis - by (auto simp: diff_def dest: differentiable_sum) + from `f differentiable x` obtain df where "DERIV f x :> df" .. + hence "DERIV (\x. - f x) x :> - df" by (rule DERIV_minus) + thus ?thesis by (rule differentiableI) qed -lemma differentiable_mult: +lemma differentiable_diff [simp]: assumes "f differentiable x" - and "g differentiable x" + assumes "g differentiable x" + shows "(\x. f x - g x) differentiable x" + unfolding diff_minus using prems by simp + +lemma differentiable_mult [simp]: + assumes "f differentiable x" + assumes "g differentiable x" shows "(\x. f x * g x) differentiable x" proof - - from prems have "\D. DERIV f x :> D" by (unfold differentiable_def) - then obtain df where "DERIV f x :> df" .. - moreover from prems have "\D. DERIV g x :> D" by (unfold differentiable_def) - then obtain dg where "DERIV g x :> dg" .. - ultimately have "DERIV (\x. f x * g x) x :> df * g x + dg * f x" by (simp add: DERIV_mult) - hence "\D. DERIV (\x. f x * g x) x :> D" by auto - thus ?thesis by (fold differentiable_def) + from `f differentiable x` obtain df where "DERIV f x :> df" .. + moreover + from `g differentiable x` obtain dg where "DERIV g x :> dg" .. + ultimately + have "DERIV (\x. f x * g x) x :> df * g x + dg * f x" by (rule DERIV_mult) + thus ?thesis by (rule differentiableI) qed +lemma differentiable_inverse [simp]: + assumes "f differentiable x" and "f x \ 0" + shows "(\x. inverse (f x)) differentiable x" +proof - + from `f differentiable x` obtain df where "DERIV f x :> df" .. + hence "DERIV (\x. inverse (f x)) x :> - (inverse (f x) * df * inverse (f x))" + using `f x \ 0` by (rule DERIV_inverse') + thus ?thesis by (rule differentiableI) +qed + +lemma differentiable_divide [simp]: + assumes "f differentiable x" + assumes "g differentiable x" and "g x \ 0" + shows "(\x. f x / g x) differentiable x" + unfolding divide_inverse using prems by simp + +lemma differentiable_power [simp]: + fixes f :: "'a::{recpower,real_normed_field} \ 'a" + assumes "f differentiable x" + shows "(\x. f x ^ n) differentiable x" + by (induct n, simp, simp add: power_Suc prems) + subsection {* Nested Intervals and Bisection *} @@ -1722,4 +1757,60 @@ apply (simp add: poly_entire del: pmult_Cons) done + +subsection {* Theorems about Limits *} + +(* need to rename second isCont_inverse *) + +lemma isCont_inv_fun: + fixes f g :: "real \ real" + shows "[| 0 < d; \z. \z - x\ \ d --> g(f(z)) = z; + \z. \z - x\ \ d --> isCont f z |] + ==> isCont g (f x)" +by (rule isCont_inverse_function) + +lemma isCont_inv_fun_inv: + fixes f g :: "real \ real" + shows "[| 0 < d; + \z. \z - x\ \ d --> g(f(z)) = z; + \z. \z - x\ \ d --> isCont f z |] + ==> \e. 0 < e & + (\y. 0 < \y - f(x)\ & \y - f(x)\ < e --> f(g(y)) = y)" +apply (drule isCont_inj_range) +prefer 2 apply (assumption, assumption, auto) +apply (rule_tac x = e in exI, auto) +apply (rotate_tac 2) +apply (drule_tac x = y in spec, auto) +done + + +text{*Bartle/Sherbert: Introduction to Real Analysis, Theorem 4.2.9, p. 110*} +lemma LIM_fun_gt_zero: + "[| f -- c --> (l::real); 0 < l |] + ==> \r. 0 < r & (\x::real. x \ c & \c - x\ < r --> 0 < f x)" +apply (auto simp add: LIM_def) +apply (drule_tac x = "l/2" in spec, safe, force) +apply (rule_tac x = s in exI) +apply (auto simp only: abs_less_iff) +done + +lemma LIM_fun_less_zero: + "[| f -- c --> (l::real); l < 0 |] + ==> \r. 0 < r & (\x::real. x \ c & \c - x\ < r --> f x < 0)" +apply (auto simp add: LIM_def) +apply (drule_tac x = "-l/2" in spec, safe, force) +apply (rule_tac x = s in exI) +apply (auto simp only: abs_less_iff) +done + + +lemma LIM_fun_not_zero: + "[| f -- c --> (l::real); l \ 0 |] + ==> \r. 0 < r & (\x::real. x \ c & \c - x\ < r --> f x \ 0)" +apply (cut_tac x = l and y = 0 in linorder_less_linear, auto) +apply (drule LIM_fun_less_zero) +apply (drule_tac [3] LIM_fun_gt_zero) +apply force+ +done + end diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/Divides.thy --- a/src/HOL/Divides.thy Tue Dec 30 08:18:54 2008 +0100 +++ b/src/HOL/Divides.thy Tue Dec 30 11:10:01 2008 +0100 @@ -127,7 +127,7 @@ note that ultimately show thesis by blast qed -lemma dvd_eq_mod_eq_0 [code]: "a dvd b \ b mod a = 0" +lemma dvd_eq_mod_eq_0 [code unfold]: "a dvd b \ b mod a = 0" proof assume "b mod a = 0" with mod_div_equality [of b a] have "b div a * a = b" by simp diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/FunDef.thy --- a/src/HOL/FunDef.thy Tue Dec 30 08:18:54 2008 +0100 +++ b/src/HOL/FunDef.thy Tue Dec 30 11:10:01 2008 +0100 @@ -3,11 +3,13 @@ Author: Alexander Krauss, TU Muenchen *) -header {* General recursive function definitions *} +header {* Function Definitions and Termination Proofs *} theory FunDef imports Wellfounded uses + "Tools/prop_logic.ML" + "Tools/sat_solver.ML" ("Tools/function_package/fundef_lib.ML") ("Tools/function_package/fundef_common.ML") ("Tools/function_package/inductive_wrap.ML") @@ -22,9 +24,14 @@ ("Tools/function_package/lexicographic_order.ML") ("Tools/function_package/fundef_datatype.ML") ("Tools/function_package/induction_scheme.ML") + ("Tools/function_package/termination.ML") + ("Tools/function_package/decompose.ML") + ("Tools/function_package/descent.ML") + ("Tools/function_package/scnp_solve.ML") + ("Tools/function_package/scnp_reconstruct.ML") begin -text {* Definitions with default value. *} +subsection {* Definitions with default value. *} definition THE_default :: "'a \ ('a \ bool) \ 'a" where @@ -97,9 +104,6 @@ "wf R \ wfP (in_rel R)" by (simp add: wfP_def) -inductive is_measure :: "('a \ nat) \ bool" -where is_measure_trivial: "is_measure f" - use "Tools/function_package/fundef_lib.ML" use "Tools/function_package/fundef_common.ML" use "Tools/function_package/inductive_wrap.ML" @@ -110,19 +114,37 @@ use "Tools/function_package/pattern_split.ML" use "Tools/function_package/auto_term.ML" use "Tools/function_package/fundef_package.ML" -use "Tools/function_package/measure_functions.ML" -use "Tools/function_package/lexicographic_order.ML" use "Tools/function_package/fundef_datatype.ML" use "Tools/function_package/induction_scheme.ML" setup {* FundefPackage.setup + #> FundefDatatype.setup #> InductionScheme.setup - #> MeasureFunctions.setup - #> LexicographicOrder.setup - #> FundefDatatype.setup *} +subsection {* Measure Functions *} + +inductive is_measure :: "('a \ nat) \ bool" +where is_measure_trivial: "is_measure f" + +use "Tools/function_package/measure_functions.ML" +setup MeasureFunctions.setup + +lemma measure_size[measure_function]: "is_measure size" +by (rule is_measure_trivial) + +lemma measure_fst[measure_function]: "is_measure f \ is_measure (\p. f (fst p))" +by (rule is_measure_trivial) +lemma measure_snd[measure_function]: "is_measure f \ is_measure (\p. f (snd p))" +by (rule is_measure_trivial) + +use "Tools/function_package/lexicographic_order.ML" +setup LexicographicOrder.setup + + +subsection {* Congruence Rules *} + lemma let_cong [fundef_cong]: "M = N \ (\x. x = N \ f x = g x) \ Let M f = Let N g" unfolding Let_def by blast @@ -140,17 +162,7 @@ "f (g x) = f' (g' x') \ (f o g) x = (f' o g') x'" unfolding o_apply . -subsection {* Setup for termination proofs *} - -text {* Rules for generating measure functions *} - -lemma [measure_function]: "is_measure size" -by (rule is_measure_trivial) - -lemma [measure_function]: "is_measure f \ is_measure (\p. f (fst p))" -by (rule is_measure_trivial) -lemma [measure_function]: "is_measure f \ is_measure (\p. f (snd p))" -by (rule is_measure_trivial) +subsection {* Simp rules for termination proofs *} lemma termination_basic_simps[termination_simp]: "x < (y::nat) \ x < y + z" @@ -166,5 +178,150 @@ "prod_size f g p = f (fst p) + g (snd p) + Suc 0" by (induct p) auto +subsection {* Decomposition *} + +lemma less_by_empty: + "A = {} \ A \ B" +and union_comp_emptyL: + "\ A O C = {}; B O C = {} \ \ (A \ B) O C = {}" +and union_comp_emptyR: + "\ A O B = {}; A O C = {} \ \ A O (B \ C) = {}" +and wf_no_loop: + "R O R = {} \ wf R" +by (auto simp add: wf_comp_self[of R]) + + +subsection {* Reduction Pairs *} + +definition + "reduction_pair P = (wf (fst P) \ snd P O fst P \ fst P)" + +lemma reduction_pairI[intro]: "wf R \ S O R \ R \ reduction_pair (R, S)" +unfolding reduction_pair_def by auto + +lemma reduction_pair_lemma: + assumes rp: "reduction_pair P" + assumes "R \ fst P" + assumes "S \ snd P" + assumes "wf S" + shows "wf (R \ S)" +proof - + from rp `S \ snd P` have "wf (fst P)" "S O fst P \ fst P" + unfolding reduction_pair_def by auto + with `wf S` have "wf (fst P \ S)" + by (auto intro: wf_union_compatible) + moreover from `R \ fst P` have "R \ S \ fst P \ S" by auto + ultimately show ?thesis by (rule wf_subset) +qed + +definition + "rp_inv_image = (\(R,S) f. (inv_image R f, inv_image S f))" + +lemma rp_inv_image_rp: + "reduction_pair P \ reduction_pair (rp_inv_image P f)" + unfolding reduction_pair_def rp_inv_image_def split_def + by force + + +subsection {* Concrete orders for SCNP termination proofs *} + +definition "pair_less = less_than <*lex*> less_than" +definition "pair_leq = pair_less^=" +definition "max_strict = max_ext pair_less" +definition "max_weak = max_ext pair_leq \ {({}, {})}" +definition "min_strict = min_ext pair_less" +definition "min_weak = min_ext pair_leq \ {({}, {})}" + +lemma wf_pair_less[simp]: "wf pair_less" + by (auto simp: pair_less_def) + +text {* Introduction rules for @{text pair_less}/@{text pair_leq} *} +lemma pair_leqI1: "a < b \ ((a, s), (b, t)) \ pair_leq" + and pair_leqI2: "a \ b \ s \ t \ ((a, s), (b, t)) \ pair_leq" + and pair_lessI1: "a < b \ ((a, s), (b, t)) \ pair_less" + and pair_lessI2: "a \ b \ s < t \ ((a, s), (b, t)) \ pair_less" + unfolding pair_leq_def pair_less_def by auto + +text {* Introduction rules for max *} +lemma smax_emptyI: + "finite Y \ Y \ {} \ ({}, Y) \ max_strict" + and smax_insertI: + "\y \ Y; (x, y) \ pair_less; (X, Y) \ max_strict\ \ (insert x X, Y) \ max_strict" + and wmax_emptyI: + "finite X \ ({}, X) \ max_weak" + and wmax_insertI: + "\y \ YS; (x, y) \ pair_leq; (XS, YS) \ max_weak\ \ (insert x XS, YS) \ max_weak" +unfolding max_strict_def max_weak_def by (auto elim!: max_ext.cases) + +text {* Introduction rules for min *} +lemma smin_emptyI: + "X \ {} \ (X, {}) \ min_strict" + and smin_insertI: + "\x \ XS; (x, y) \ pair_less; (XS, YS) \ min_strict\ \ (XS, insert y YS) \ min_strict" + and wmin_emptyI: + "(X, {}) \ min_weak" + and wmin_insertI: + "\x \ XS; (x, y) \ pair_leq; (XS, YS) \ min_weak\ \ (XS, insert y YS) \ min_weak" +by (auto simp: min_strict_def min_weak_def min_ext_def) + +text {* Reduction Pairs *} + +lemma max_ext_compat: + assumes "S O R \ R" + shows "(max_ext S \ {({},{})}) O max_ext R \ max_ext R" +using assms +apply auto +apply (elim max_ext.cases) +apply rule +apply auto[3] +apply (drule_tac x=xa in meta_spec) +apply simp +apply (erule bexE) +apply (drule_tac x=xb in meta_spec) +by auto + +lemma max_rpair_set: "reduction_pair (max_strict, max_weak)" + unfolding max_strict_def max_weak_def +apply (intro reduction_pairI max_ext_wf) +apply simp +apply (rule max_ext_compat) +by (auto simp: pair_less_def pair_leq_def) + +lemma min_ext_compat: + assumes "S O R \ R" + shows "(min_ext S \ {({},{})}) O min_ext R \ min_ext R" +using assms +apply (auto simp: min_ext_def) +apply (drule_tac x=ya in bspec, assumption) +apply (erule bexE) +apply (drule_tac x=xc in bspec) +apply assumption +by auto + +lemma min_rpair_set: "reduction_pair (min_strict, min_weak)" + unfolding min_strict_def min_weak_def +apply (intro reduction_pairI min_ext_wf) +apply simp +apply (rule min_ext_compat) +by (auto simp: pair_less_def pair_leq_def) + + +subsection {* Tool setup *} + +use "Tools/function_package/termination.ML" +use "Tools/function_package/decompose.ML" +use "Tools/function_package/descent.ML" +use "Tools/function_package/scnp_solve.ML" +use "Tools/function_package/scnp_reconstruct.ML" + +setup {* ScnpReconstruct.setup *} +(* +setup {* + Context.theory_map (FundefCommon.set_termination_prover (ScnpReconstruct.decomp_scnp + [ScnpSolve.MAX, ScnpSolve.MIN, ScnpSolve.MS])) +*} +*) + + end diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/Fundamental_Theorem_Algebra.thy --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/HOL/Fundamental_Theorem_Algebra.thy Tue Dec 30 11:10:01 2008 +0100 @@ -0,0 +1,1327 @@ +(* Author: Amine Chaieb, TU Muenchen *) + +header{*Fundamental Theorem of Algebra*} + +theory Fundamental_Theorem_Algebra +imports Univ_Poly Dense_Linear_Order Complex +begin + +subsection {* Square root of complex numbers *} +definition csqrt :: "complex \ complex" where +"csqrt z = (if Im z = 0 then + if 0 \ Re z then Complex (sqrt(Re z)) 0 + else Complex 0 (sqrt(- Re z)) + else Complex (sqrt((cmod z + Re z) /2)) + ((Im z / abs(Im z)) * sqrt((cmod z - Re z) /2)))" + +lemma csqrt[algebra]: "csqrt z ^ 2 = z" +proof- + obtain x y where xy: "z = Complex x y" by (cases z, simp_all) + {assume y0: "y = 0" + {assume x0: "x \ 0" + then have ?thesis using y0 xy real_sqrt_pow2[OF x0] + by (simp add: csqrt_def power2_eq_square)} + moreover + {assume "\ x \ 0" hence x0: "- x \ 0" by arith + then have ?thesis using y0 xy real_sqrt_pow2[OF x0] + by (simp add: csqrt_def power2_eq_square) } + ultimately have ?thesis by blast} + moreover + {assume y0: "y\0" + {fix x y + let ?z = "Complex x y" + from abs_Re_le_cmod[of ?z] have tha: "abs x \ cmod ?z" by auto + hence "cmod ?z - x \ 0" "cmod ?z + x \ 0" by arith+ + hence "(sqrt (x * x + y * y) + x) / 2 \ 0" "(sqrt (x * x + y * y) - x) / 2 \ 0" by (simp_all add: power2_eq_square) } + note th = this + have sq4: "\x::real. x^2 / 4 = (x / 2) ^ 2" + by (simp add: power2_eq_square) + from th[of x y] + have sq4': "sqrt (((sqrt (x * x + y * y) + x)^2 / 4)) = (sqrt (x * x + y * y) + x) / 2" "sqrt (((sqrt (x * x + y * y) - x)^2 / 4)) = (sqrt (x * x + y * y) - x) / 2" unfolding sq4 by simp_all + then have th1: "sqrt ((sqrt (x * x + y * y) + x) * (sqrt (x * x + y * y) + x) / 4) - sqrt ((sqrt (x * x + y * y) - x) * (sqrt (x * x + y * y) - x) / 4) = x" + unfolding power2_eq_square by simp + have "sqrt 4 = sqrt (2^2)" by simp + hence sqrt4: "sqrt 4 = 2" by (simp only: real_sqrt_abs) + have th2: "2 *(y * sqrt ((sqrt (x * x + y * y) - x) * (sqrt (x * x + y * y) + x) / 4)) / \y\ = y" + using iffD2[OF real_sqrt_pow2_iff sum_power2_ge_zero[of x y]] y0 + unfolding power2_eq_square + by (simp add: ring_simps real_sqrt_divide sqrt4) + from y0 xy have ?thesis apply (simp add: csqrt_def power2_eq_square) + apply (simp add: real_sqrt_sum_squares_mult_ge_zero[of x y] real_sqrt_pow2[OF th(1)[of x y], unfolded power2_eq_square] real_sqrt_pow2[OF th(2)[of x y], unfolded power2_eq_square] real_sqrt_mult[symmetric]) + using th1 th2 ..} + ultimately show ?thesis by blast +qed + + +subsection{* More lemmas about module of complex numbers *} + +lemma complex_of_real_power: "complex_of_real x ^ n = complex_of_real (x^n)" + by (rule of_real_power [symmetric]) + +lemma real_down2: "(0::real) < d1 \ 0 < d2 ==> EX e. 0 < e & e < d1 & e < d2" + apply ferrack apply arith done + +text{* The triangle inequality for cmod *} +lemma complex_mod_triangle_sub: "cmod w \ cmod (w + z) + norm z" + using complex_mod_triangle_ineq2[of "w + z" "-z"] by auto + +subsection{* Basic lemmas about complex polynomials *} + +lemma poly_bound_exists: + shows "\m. m > 0 \ (\z. cmod z <= r \ cmod (poly p z) \ m)" +proof(induct p) + case Nil thus ?case by (rule exI[where x=1], simp) +next + case (Cons c cs) + from Cons.hyps obtain m where m: "\z. cmod z \ r \ cmod (poly cs z) \ m" + by blast + let ?k = " 1 + cmod c + \r * m\" + have kp: "?k > 0" using abs_ge_zero[of "r*m"] norm_ge_zero[of c] by arith + {fix z + assume H: "cmod z \ r" + from m H have th: "cmod (poly cs z) \ m" by blast + from H have rp: "r \ 0" using norm_ge_zero[of z] by arith + have "cmod (poly (c # cs) z) \ cmod c + cmod (z* poly cs z)" + using norm_triangle_ineq[of c "z* poly cs z"] by simp + also have "\ \ cmod c + r*m" using mult_mono[OF H th rp norm_ge_zero[of "poly cs z"]] by (simp add: norm_mult) + also have "\ \ ?k" by simp + finally have "cmod (poly (c # cs) z) \ ?k" .} + with kp show ?case by blast +qed + + +text{* Offsetting the variable in a polynomial gives another of same degree *} + (* FIXME : Lemma holds also in locale --- fix it later *) +lemma poly_offset_lemma: + shows "\b q. (length q = length p) \ (\x. poly (b#q) (x::complex) = (a + x) * poly p x)" +proof(induct p) + case Nil thus ?case by simp +next + case (Cons c cs) + from Cons.hyps obtain b q where + bq: "length q = length cs" "\x. poly (b # q) x = (a + x) * poly cs x" + by blast + let ?b = "a*c" + let ?q = "(b+c)#q" + have lg: "length ?q = length (c#cs)" using bq(1) by simp + {fix x + from bq(2)[rule_format, of x] + have "x*poly (b # q) x = x*((a + x) * poly cs x)" by simp + hence "poly (?b# ?q) x = (a + x) * poly (c # cs) x" + by (simp add: ring_simps)} + with lg show ?case by blast +qed + + (* FIXME : This one too*) +lemma poly_offset: "\ q. length q = length p \ (\x. poly q (x::complex) = poly p (a + x))" +proof (induct p) + case Nil thus ?case by simp +next + case (Cons c cs) + from Cons.hyps obtain q where q: "length q = length cs" "\x. poly q x = poly cs (a + x)" by blast + from poly_offset_lemma[of q a] obtain b p where + bp: "length p = length q" "\x. poly (b # p) x = (a + x) * poly q x" + by blast + thus ?case using q bp by - (rule exI[where x="(c + b)#p"], simp) +qed + +text{* An alternative useful formulation of completeness of the reals *} +lemma real_sup_exists: assumes ex: "\x. P x" and bz: "\z. \x. P x \ x < z" + shows "\(s::real). \y. (\x. P x \ y < x) \ y < s" +proof- + from ex bz obtain x Y where x: "P x" and Y: "\x. P x \ x < Y" by blast + from ex have thx:"\x. x \ Collect P" by blast + from bz have thY: "\Y. isUb UNIV (Collect P) Y" + by(auto simp add: isUb_def isLub_def setge_def setle_def leastP_def Ball_def order_le_less) + from reals_complete[OF thx thY] obtain L where L: "isLub UNIV (Collect P) L" + by blast + from Y[OF x] have xY: "x < Y" . + from L have L': "\x. P x \ x \ L" by (auto simp add: isUb_def isLub_def setge_def setle_def leastP_def Ball_def) + from Y have Y': "\x. P x \ x \ Y" + apply (clarsimp, atomize (full)) by auto + from L Y' have "L \ Y" by (auto simp add: isUb_def isLub_def setge_def setle_def leastP_def Ball_def) + {fix y + {fix z assume z: "P z" "y < z" + from L' z have "y < L" by auto } + moreover + {assume yL: "y < L" "\z. P z \ \ y < z" + hence nox: "\z. P z \ y \ z" by auto + from nox L have "y \ L" by (auto simp add: isUb_def isLub_def setge_def setle_def leastP_def Ball_def) + with yL(1) have False by arith} + ultimately have "(\x. P x \ y < x) \ y < L" by blast} + thus ?thesis by blast +qed + + +subsection{* Some theorems about Sequences*} +text{* Given a binary function @{text "f:: nat \ 'a \ 'a"}, its values are uniquely determined by a function g *} + +lemma num_Axiom: "EX! g. g 0 = e \ (\n. g (Suc n) = f n (g n))" + unfolding Ex1_def + apply (rule_tac x="nat_rec e f" in exI) + apply (rule conjI)+ +apply (rule def_nat_rec_0, simp) +apply (rule allI, rule def_nat_rec_Suc, simp) +apply (rule allI, rule impI, rule ext) +apply (erule conjE) +apply (induct_tac x) +apply (simp add: nat_rec_0) +apply (erule_tac x="n" in allE) +apply (simp) +done + + text{* An equivalent formulation of monotony -- Not used here, but might be useful *} +lemma mono_Suc: "mono f = (\n. (f n :: 'a :: order) \ f (Suc n))" +unfolding mono_def +proof auto + fix A B :: nat + assume H: "\n. f n \ f (Suc n)" "A \ B" + hence "\k. B = A + k" apply - apply (thin_tac "\n. f n \ f (Suc n)") + by presburger + then obtain k where k: "B = A + k" by blast + {fix a k + have "f a \ f (a + k)" + proof (induct k) + case 0 thus ?case by simp + next + case (Suc k) + from Suc.hyps H(1)[rule_format, of "a + k"] show ?case by simp + qed} + with k show "f A \ f B" by blast +qed + +text{* for any sequence, there is a mootonic subsequence *} +lemma seq_monosub: "\f. subseq f \ monoseq (\ n. (s (f n)))" +proof- + {assume H: "\n. \p >n. \ m\p. s m \ s p" + let ?P = "\ p n. p > n \ (\m \ p. s m \ s p)" + from num_Axiom[of "SOME p. ?P p 0" "\p n. SOME p. ?P p n"] + obtain f where f: "f 0 = (SOME p. ?P p 0)" "\n. f (Suc n) = (SOME p. ?P p (f n))" by blast + have "?P (f 0) 0" unfolding f(1) some_eq_ex[of "\p. ?P p 0"] + using H apply - + apply (erule allE[where x=0], erule exE, rule_tac x="p" in exI) + unfolding order_le_less by blast + hence f0: "f 0 > 0" "\m \ f 0. s m \ s (f 0)" by blast+ + {fix n + have "?P (f (Suc n)) (f n)" + unfolding f(2)[rule_format, of n] some_eq_ex[of "\p. ?P p (f n)"] + using H apply - + apply (erule allE[where x="f n"], erule exE, rule_tac x="p" in exI) + unfolding order_le_less by blast + hence "f (Suc n) > f n" "\m \ f (Suc n). s m \ s (f (Suc n))" by blast+} + note fSuc = this + {fix p q assume pq: "p \ f q" + have "s p \ s(f(q))" using f0(2)[rule_format, of p] pq fSuc + by (cases q, simp_all) } + note pqth = this + {fix q + have "f (Suc q) > f q" apply (induct q) + using f0(1) fSuc(1)[of 0] apply simp by (rule fSuc(1))} + note fss = this + from fss have th1: "subseq f" unfolding subseq_Suc_iff .. + {fix a b + have "f a \ f (a + b)" + proof(induct b) + case 0 thus ?case by simp + next + case (Suc b) + from fSuc(1)[of "a + b"] Suc.hyps show ?case by simp + qed} + note fmon0 = this + have "monoseq (\n. s (f n))" + proof- + {fix n + have "s (f n) \ s (f (Suc n))" + proof(cases n) + case 0 + assume n0: "n = 0" + from fSuc(1)[of 0] have th0: "f 0 \ f (Suc 0)" by simp + from f0(2)[rule_format, OF th0] show ?thesis using n0 by simp + next + case (Suc m) + assume m: "n = Suc m" + from fSuc(1)[of n] m have th0: "f (Suc m) \ f (Suc (Suc m))" by simp + from m fSuc(2)[rule_format, OF th0] show ?thesis by simp + qed} + thus "monoseq (\n. s (f n))" unfolding monoseq_Suc by blast + qed + with th1 have ?thesis by blast} + moreover + {fix N assume N: "\p >N. \ m\p. s m > s p" + {fix p assume p: "p \ Suc N" + hence pN: "p > N" by arith with N obtain m where m: "m \ p" "s m > s p" by blast + have "m \ p" using m(2) by auto + with m have "\m>p. s p < s m" by - (rule exI[where x=m], auto)} + note th0 = this + let ?P = "\m x. m > x \ s x < s m" + from num_Axiom[of "SOME x. ?P x (Suc N)" "\m x. SOME y. ?P y x"] + obtain f where f: "f 0 = (SOME x. ?P x (Suc N))" + "\n. f (Suc n) = (SOME m. ?P m (f n))" by blast + have "?P (f 0) (Suc N)" unfolding f(1) some_eq_ex[of "\p. ?P p (Suc N)"] + using N apply - + apply (erule allE[where x="Suc N"], clarsimp) + apply (rule_tac x="m" in exI) + apply auto + apply (subgoal_tac "Suc N \ m") + apply simp + apply (rule ccontr, simp) + done + hence f0: "f 0 > Suc N" "s (Suc N) < s (f 0)" by blast+ + {fix n + have "f n > N \ ?P (f (Suc n)) (f n)" + unfolding f(2)[rule_format, of n] some_eq_ex[of "\p. ?P p (f n)"] + proof (induct n) + case 0 thus ?case + using f0 N apply auto + apply (erule allE[where x="f 0"], clarsimp) + apply (rule_tac x="m" in exI, simp) + by (subgoal_tac "f 0 \ m", auto) + next + case (Suc n) + from Suc.hyps have Nfn: "N < f n" by blast + from Suc.hyps obtain m where m: "m > f n" "s (f n) < s m" by blast + with Nfn have mN: "m > N" by arith + note key = Suc.hyps[unfolded some_eq_ex[of "\p. ?P p (f n)", symmetric] f(2)[rule_format, of n, symmetric]] + + from key have th0: "f (Suc n) > N" by simp + from N[rule_format, OF th0] + obtain m' where m': "m' \ f (Suc n)" "s (f (Suc n)) < s m'" by blast + have "m' \ f (Suc (n))" apply (rule ccontr) using m'(2) by auto + hence "m' > f (Suc n)" using m'(1) by simp + with key m'(2) show ?case by auto + qed} + note fSuc = this + {fix n + have "f n \ Suc N \ f(Suc n) > f n \ s(f n) < s(f(Suc n))" using fSuc[of n] by auto + hence "f n \ Suc N" "f(Suc n) > f n" "s(f n) < s(f(Suc n))" by blast+} + note thf = this + have sqf: "subseq f" unfolding subseq_Suc_iff using thf by simp + have "monoseq (\n. s (f n))" unfolding monoseq_Suc using thf + apply - + apply (rule disjI1) + apply auto + apply (rule order_less_imp_le) + apply blast + done + then have ?thesis using sqf by blast} + ultimately show ?thesis unfolding linorder_not_less[symmetric] by blast +qed + +lemma seq_suble: assumes sf: "subseq f" shows "n \ f n" +proof(induct n) + case 0 thus ?case by simp +next + case (Suc n) + from sf[unfolded subseq_Suc_iff, rule_format, of n] Suc.hyps + have "n < f (Suc n)" by arith + thus ?case by arith +qed + +subsection {* Fundamental theorem of algebra *} +lemma unimodular_reduce_norm: + assumes md: "cmod z = 1" + shows "cmod (z + 1) < 1 \ cmod (z - 1) < 1 \ cmod (z + ii) < 1 \ cmod (z - ii) < 1" +proof- + obtain x y where z: "z = Complex x y " by (cases z, auto) + from md z have xy: "x^2 + y^2 = 1" by (simp add: cmod_def) + {assume C: "cmod (z + 1) \ 1" "cmod (z - 1) \ 1" "cmod (z + ii) \ 1" "cmod (z - ii) \ 1" + from C z xy have "2*x \ 1" "2*x \ -1" "2*y \ 1" "2*y \ -1" + by (simp_all add: cmod_def power2_eq_square ring_simps) + hence "abs (2*x) \ 1" "abs (2*y) \ 1" by simp_all + hence "(abs (2 * x))^2 <= 1^2" "(abs (2 * y)) ^2 <= 1^2" + by - (rule power_mono, simp, simp)+ + hence th0: "4*x^2 \ 1" "4*y^2 \ 1" + by (simp_all add: power2_abs power_mult_distrib) + from add_mono[OF th0] xy have False by simp } + thus ?thesis unfolding linorder_not_le[symmetric] by blast +qed + +text{* Hence we can always reduce modulus of @{text "1 + b z^n"} if nonzero *} +lemma reduce_poly_simple: + assumes b: "b \ 0" and n: "n\0" + shows "\z. cmod (1 + b * z^n) < 1" +using n +proof(induct n rule: nat_less_induct) + fix n + assume IH: "\m 0 \ (\z. cmod (1 + b * z ^ m) < 1)" and n: "n \ 0" + let ?P = "\z n. cmod (1 + b * z ^ n) < 1" + {assume e: "even n" + hence "\m. n = 2*m" by presburger + then obtain m where m: "n = 2*m" by blast + from n m have "m\0" "m < n" by presburger+ + with IH[rule_format, of m] obtain z where z: "?P z m" by blast + from z have "?P (csqrt z) n" by (simp add: m power_mult csqrt) + hence "\z. ?P z n" ..} + moreover + {assume o: "odd n" + from b have b': "b^2 \ 0" unfolding power2_eq_square by simp + have "Im (inverse b) * (Im (inverse b) * \Im b * Im b + Re b * Re b\) + + Re (inverse b) * (Re (inverse b) * \Im b * Im b + Re b * Re b\) = + ((Re (inverse b))^2 + (Im (inverse b))^2) * \Im b * Im b + Re b * Re b\" by algebra + also have "\ = cmod (inverse b) ^2 * cmod b ^ 2" + apply (simp add: cmod_def) using realpow_two_le_add_order[of "Re b" "Im b"] + by (simp add: power2_eq_square) + finally + have th0: "Im (inverse b) * (Im (inverse b) * \Im b * Im b + Re b * Re b\) + + Re (inverse b) * (Re (inverse b) * \Im b * Im b + Re b * Re b\) = + 1" + apply (simp add: power2_eq_square norm_mult[symmetric] norm_inverse[symmetric]) + using right_inverse[OF b'] + by (simp add: power2_eq_square[symmetric] power_inverse[symmetric] ring_simps) + have th0: "cmod (complex_of_real (cmod b) / b) = 1" + apply (simp add: complex_Re_mult cmod_def power2_eq_square Re_complex_of_real Im_complex_of_real divide_inverse ring_simps ) + by (simp add: real_sqrt_mult[symmetric] th0) + from o have "\m. n = Suc (2*m)" by presburger+ + then obtain m where m: "n = Suc (2*m)" by blast + from unimodular_reduce_norm[OF th0] o + have "\v. cmod (complex_of_real (cmod b) / b + v^n) < 1" + apply (cases "cmod (complex_of_real (cmod b) / b + 1) < 1", rule_tac x="1" in exI, simp) + apply (cases "cmod (complex_of_real (cmod b) / b - 1) < 1", rule_tac x="-1" in exI, simp add: diff_def) + apply (cases "cmod (complex_of_real (cmod b) / b + ii) < 1") + apply (cases "even m", rule_tac x="ii" in exI, simp add: m power_mult) + apply (rule_tac x="- ii" in exI, simp add: m power_mult) + apply (cases "even m", rule_tac x="- ii" in exI, simp add: m power_mult diff_def) + apply (rule_tac x="ii" in exI, simp add: m power_mult diff_def) + done + then obtain v where v: "cmod (complex_of_real (cmod b) / b + v^n) < 1" by blast + let ?w = "v / complex_of_real (root n (cmod b))" + from odd_real_root_pow[OF o, of "cmod b"] + have th1: "?w ^ n = v^n / complex_of_real (cmod b)" + by (simp add: power_divide complex_of_real_power) + have th2:"cmod (complex_of_real (cmod b) / b) = 1" using b by (simp add: norm_divide) + hence th3: "cmod (complex_of_real (cmod b) / b) \ 0" by simp + have th4: "cmod (complex_of_real (cmod b) / b) * + cmod (1 + b * (v ^ n / complex_of_real (cmod b))) + < cmod (complex_of_real (cmod b) / b) * 1" + apply (simp only: norm_mult[symmetric] right_distrib) + using b v by (simp add: th2) + + from mult_less_imp_less_left[OF th4 th3] + have "?P ?w n" unfolding th1 . + hence "\z. ?P z n" .. } + ultimately show "\z. ?P z n" by blast +qed + + +text{* Bolzano-Weierstrass type property for closed disc in complex plane. *} + +lemma metric_bound_lemma: "cmod (x - y) <= \Re x - Re y\ + \Im x - Im y\" + using real_sqrt_sum_squares_triangle_ineq[of "Re x - Re y" 0 0 "Im x - Im y" ] + unfolding cmod_def by simp + +lemma bolzano_weierstrass_complex_disc: + assumes r: "\n. cmod (s n) \ r" + shows "\f z. subseq f \ (\e >0. \N. \n \ N. cmod (s (f n) - z) < e)" +proof- + from seq_monosub[of "Re o s"] + obtain f g where f: "subseq f" "monoseq (\n. Re (s (f n)))" + unfolding o_def by blast + from seq_monosub[of "Im o s o f"] + obtain g where g: "subseq g" "monoseq (\n. Im (s(f(g n))))" unfolding o_def by blast + let ?h = "f o g" + from r[rule_format, of 0] have rp: "r \ 0" using norm_ge_zero[of "s 0"] by arith + have th:"\n. r + 1 \ \ Re (s n)\" + proof + fix n + from abs_Re_le_cmod[of "s n"] r[rule_format, of n] show "\Re (s n)\ \ r + 1" by arith + qed + have conv1: "convergent (\n. Re (s ( f n)))" + apply (rule Bseq_monoseq_convergent) + apply (simp add: Bseq_def) + apply (rule exI[where x= "r + 1"]) + using th rp apply simp + using f(2) . + have th:"\n. r + 1 \ \ Im (s n)\" + proof + fix n + from abs_Im_le_cmod[of "s n"] r[rule_format, of n] show "\Im (s n)\ \ r + 1" by arith + qed + + have conv2: "convergent (\n. Im (s (f (g n))))" + apply (rule Bseq_monoseq_convergent) + apply (simp add: Bseq_def) + apply (rule exI[where x= "r + 1"]) + using th rp apply simp + using g(2) . + + from conv1[unfolded convergent_def] obtain x where "LIMSEQ (\n. Re (s (f n))) x" + by blast + hence x: "\r>0. \n0. \n\n0. \ Re (s (f n)) - x \ < r" + unfolding LIMSEQ_def real_norm_def . + + from conv2[unfolded convergent_def] obtain y where "LIMSEQ (\n. Im (s (f (g n)))) y" + by blast + hence y: "\r>0. \n0. \n\n0. \ Im (s (f (g n))) - y \ < r" + unfolding LIMSEQ_def real_norm_def . + let ?w = "Complex x y" + from f(1) g(1) have hs: "subseq ?h" unfolding subseq_def by auto + {fix e assume ep: "e > (0::real)" + hence e2: "e/2 > 0" by simp + from x[rule_format, OF e2] y[rule_format, OF e2] + obtain N1 N2 where N1: "\n\N1. \Re (s (f n)) - x\ < e / 2" and N2: "\n\N2. \Im (s (f (g n))) - y\ < e / 2" by blast + {fix n assume nN12: "n \ N1 + N2" + hence nN1: "g n \ N1" and nN2: "n \ N2" using seq_suble[OF g(1), of n] by arith+ + from add_strict_mono[OF N1[rule_format, OF nN1] N2[rule_format, OF nN2]] + have "cmod (s (?h n) - ?w) < e" + using metric_bound_lemma[of "s (f (g n))" ?w] by simp } + hence "\N. \n\N. cmod (s (?h n) - ?w) < e" by blast } + with hs show ?thesis by blast +qed + +text{* Polynomial is continuous. *} + +lemma poly_cont: + assumes ep: "e > 0" + shows "\d >0. \w. 0 < cmod (w - z) \ cmod (w - z) < d \ cmod (poly p w - poly p z) < e" +proof- + from poly_offset[of p z] obtain q where q: "length q = length p" "\x. poly q x = poly p (z + x)" by blast + {fix w + note q(2)[of "w - z", simplified]} + note th = this + show ?thesis unfolding th[symmetric] + proof(induct q) + case Nil thus ?case using ep by auto + next + case (Cons c cs) + from poly_bound_exists[of 1 "cs"] + obtain m where m: "m > 0" "\z. cmod z \ 1 \ cmod (poly cs z) \ m" by blast + from ep m(1) have em0: "e/m > 0" by (simp add: field_simps) + have one0: "1 > (0::real)" by arith + from real_lbound_gt_zero[OF one0 em0] + obtain d where d: "d >0" "d < 1" "d < e / m" by blast + from d(1,3) m(1) have dm: "d*m > 0" "d*m < e" + by (simp_all add: field_simps real_mult_order) + show ?case + proof(rule ex_forward[OF real_lbound_gt_zero[OF one0 em0]], clarsimp simp add: norm_mult) + fix d w + assume H: "d > 0" "d < 1" "d < e/m" "w\z" "cmod (w-z) < d" + hence d1: "cmod (w-z) \ 1" "d \ 0" by simp_all + from H(3) m(1) have dme: "d*m < e" by (simp add: field_simps) + from H have th: "cmod (w-z) \ d" by simp + from mult_mono[OF th m(2)[OF d1(1)] d1(2) norm_ge_zero] dme + show "cmod (w - z) * cmod (poly cs (w - z)) < e" by simp + qed + qed +qed + +text{* Hence a polynomial attains minimum on a closed disc + in the complex plane. *} +lemma poly_minimum_modulus_disc: + "\z. \w. cmod w \ r \ cmod (poly p z) \ cmod (poly p w)" +proof- + {assume "\ r \ 0" hence ?thesis unfolding linorder_not_le + apply - + apply (rule exI[where x=0]) + apply auto + apply (subgoal_tac "cmod w < 0") + apply simp + apply arith + done } + moreover + {assume rp: "r \ 0" + from rp have "cmod 0 \ r \ cmod (poly p 0) = - (- cmod (poly p 0))" by simp + hence mth1: "\x z. cmod z \ r \ cmod (poly p z) = - x" by blast + {fix x z + assume H: "cmod z \ r" "cmod (poly p z) = - x" "\x < 1" + hence "- x < 0 " by arith + with H(2) norm_ge_zero[of "poly p z"] have False by simp } + then have mth2: "\z. \x. (\z. cmod z \ r \ cmod (poly p z) = - x) \ x < z" by blast + from real_sup_exists[OF mth1 mth2] obtain s where + s: "\y. (\x. (\z. cmod z \ r \ cmod (poly p z) = - x) \ y < x) \(y < s)" by blast + let ?m = "-s" + {fix y + from s[rule_format, of "-y"] have + "(\z x. cmod z \ r \ -(- cmod (poly p z)) < y) \ ?m < y" + unfolding minus_less_iff[of y ] equation_minus_iff by blast } + note s1 = this[unfolded minus_minus] + from s1[of ?m] have s1m: "\z x. cmod z \ r \ cmod (poly p z) \ ?m" + by auto + {fix n::nat + from s1[rule_format, of "?m + 1/real (Suc n)"] + have "\z. cmod z \ r \ cmod (poly p z) < - s + 1 / real (Suc n)" + by simp} + hence th: "\n. \z. cmod z \ r \ cmod (poly p z) < - s + 1 / real (Suc n)" .. + from choice[OF th] obtain g where + g: "\n. cmod (g n) \ r" "\n. cmod (poly p (g n)) e>0. \N. \n\N. cmod (g (f n) - z) < e" + by blast + {fix w + assume wr: "cmod w \ r" + let ?e = "\cmod (poly p z) - ?m\" + {assume e: "?e > 0" + hence e2: "?e/2 > 0" by simp + from poly_cont[OF e2, of z p] obtain d where + d: "d>0" "\w. 0 cmod(w - z) < d \ cmod(poly p w - poly p z) < ?e/2" by blast + {fix w assume w: "cmod (w - z) < d" + have "cmod(poly p w - poly p z) < ?e / 2" + using d(2)[rule_format, of w] w e by (cases "w=z", simp_all)} + note th1 = this + + from fz(2)[rule_format, OF d(1)] obtain N1 where + N1: "\n\N1. cmod (g (f n) - z) < d" by blast + from reals_Archimedean2[of "2/?e"] obtain N2::nat where + N2: "2/?e < real N2" by blast + have th2: "cmod(poly p (g(f(N1 + N2))) - poly p z) < ?e/2" + using N1[rule_format, of "N1 + N2"] th1 by simp + {fix a b e2 m :: real + have "a < e2 \ abs(b - m) < e2 \ 2 * e2 <= abs(b - m) + a + ==> False" by arith} + note th0 = this + have ath: + "\m x e. m <= x \ x < m + e ==> abs(x - m::real) < e" by arith + from s1m[OF g(1)[rule_format]] + have th31: "?m \ cmod(poly p (g (f (N1 + N2))))" . + from seq_suble[OF fz(1), of "N1+N2"] + have th00: "real (Suc (N1+N2)) \ real (Suc (f (N1+N2)))" by simp + have th000: "0 \ (1::real)" "(1::real) \ 1" "real (Suc (N1+N2)) > 0" + using N2 by auto + from frac_le[OF th000 th00] have th00: "?m +1 / real (Suc (f (N1 + N2))) \ ?m + 1 / real (Suc (N1 + N2))" by simp + from g(2)[rule_format, of "f (N1 + N2)"] + have th01:"cmod (poly p (g (f (N1 + N2)))) < - s + 1 / real (Suc (f (N1 + N2)))" . + from order_less_le_trans[OF th01 th00] + have th32: "cmod(poly p (g (f (N1 + N2)))) < ?m + (1/ real(Suc (N1 + N2)))" . + from N2 have "2/?e < real (Suc (N1 + N2))" by arith + with e2 less_imp_inverse_less[of "2/?e" "real (Suc (N1 + N2))"] + have "?e/2 > 1/ real (Suc (N1 + N2))" by (simp add: inverse_eq_divide) + with ath[OF th31 th32] + have thc1:"\cmod(poly p (g (f (N1 + N2)))) - ?m\< ?e/2" by arith + have ath2: "\(a::real) b c m. \a - b\ <= c ==> \b - m\ <= \a - m\ + c" + by arith + have th22: "\cmod (poly p (g (f (N1 + N2)))) - cmod (poly p z)\ +\ cmod (poly p (g (f (N1 + N2))) - poly p z)" + by (simp add: norm_triangle_ineq3) + from ath2[OF th22, of ?m] + have thc2: "2*(?e/2) \ \cmod(poly p (g (f (N1 + N2)))) - ?m\ + cmod (poly p (g (f (N1 + N2))) - poly p z)" by simp + from th0[OF th2 thc1 thc2] have False .} + hence "?e = 0" by auto + then have "cmod (poly p z) = ?m" by simp + with s1m[OF wr] + have "cmod (poly p z) \ cmod (poly p w)" by simp } + hence ?thesis by blast} + ultimately show ?thesis by blast +qed + +lemma "(rcis (sqrt (abs r)) (a/2)) ^ 2 = rcis (abs r) a" + unfolding power2_eq_square + apply (simp add: rcis_mult) + apply (simp add: power2_eq_square[symmetric]) + done + +lemma cispi: "cis pi = -1" + unfolding cis_def + by simp + +lemma "(rcis (sqrt (abs r)) ((pi + a)/2)) ^ 2 = rcis (- abs r) a" + unfolding power2_eq_square + apply (simp add: rcis_mult add_divide_distrib) + apply (simp add: power2_eq_square[symmetric] rcis_def cispi cis_mult[symmetric]) + done + +text {* Nonzero polynomial in z goes to infinity as z does. *} + +instance complex::idom_char_0 by (intro_classes) +instance complex :: recpower_idom_char_0 by intro_classes + +lemma poly_infinity: + assumes ex: "list_ex (\c. c \ 0) p" + shows "\r. \z. r \ cmod z \ d \ cmod (poly (a#p) z)" +using ex +proof(induct p arbitrary: a d) + case (Cons c cs a d) + {assume H: "list_ex (\c. c\0) cs" + with Cons.hyps obtain r where r: "\z. r \ cmod z \ d + cmod a \ cmod (poly (c # cs) z)" by blast + let ?r = "1 + \r\" + {fix z assume h: "1 + \r\ \ cmod z" + have r0: "r \ cmod z" using h by arith + from r[rule_format, OF r0] + have th0: "d + cmod a \ 1 * cmod(poly (c#cs) z)" by arith + from h have z1: "cmod z \ 1" by arith + from order_trans[OF th0 mult_right_mono[OF z1 norm_ge_zero[of "poly (c#cs) z"]]] + have th1: "d \ cmod(z * poly (c#cs) z) - cmod a" + unfolding norm_mult by (simp add: ring_simps) + from complex_mod_triangle_sub[of "z * poly (c#cs) z" a] + have th2: "cmod(z * poly (c#cs) z) - cmod a \ cmod (poly (a#c#cs) z)" + by (simp add: diff_le_eq ring_simps) + from th1 th2 have "d \ cmod (poly (a#c#cs) z)" by arith} + hence ?case by blast} + moreover + {assume cs0: "\ (list_ex (\c. c \ 0) cs)" + with Cons.prems have c0: "c \ 0" by simp + from cs0 have cs0': "list_all (\c. c = 0) cs" + by (auto simp add: list_all_iff list_ex_iff) + {fix z + assume h: "(\d\ + cmod a) / cmod c \ cmod z" + from c0 have "cmod c > 0" by simp + from h c0 have th0: "\d\ + cmod a \ cmod (z*c)" + by (simp add: field_simps norm_mult) + have ath: "\mzh mazh ma. mzh <= mazh + ma ==> abs(d) + ma <= mzh ==> d <= mazh" by arith + from complex_mod_triangle_sub[of "z*c" a ] + have th1: "cmod (z * c) \ cmod (a + z * c) + cmod a" + by (simp add: ring_simps) + from ath[OF th1 th0] have "d \ cmod (poly (a # c # cs) z)" + using poly_0[OF cs0'] by simp} + then have ?case by blast} + ultimately show ?case by blast +qed simp + +text {* Hence polynomial's modulus attains its minimum somewhere. *} +lemma poly_minimum_modulus: + "\z.\w. cmod (poly p z) \ cmod (poly p w)" +proof(induct p) + case (Cons c cs) + {assume cs0: "list_ex (\c. c \ 0) cs" + from poly_infinity[OF cs0, of "cmod (poly (c#cs) 0)" c] + obtain r where r: "\z. r \ cmod z \ cmod (poly (c # cs) 0) \ cmod (poly (c # cs) z)" by blast + have ath: "\z r. r \ cmod z \ cmod z \ \r\" by arith + from poly_minimum_modulus_disc[of "\r\" "c#cs"] + obtain v where v: "\w. cmod w \ \r\ \ cmod (poly (c # cs) v) \ cmod (poly (c # cs) w)" by blast + {fix z assume z: "r \ cmod z" + from v[of 0] r[OF z] + have "cmod (poly (c # cs) v) \ cmod (poly (c # cs) z)" + by simp } + note v0 = this + from v0 v ath[of r] have ?case by blast} + moreover + {assume cs0: "\ (list_ex (\c. c\0) cs)" + hence th:"list_all (\c. c = 0) cs" by (simp add: list_all_iff list_ex_iff) + from poly_0[OF th] Cons.hyps have ?case by simp} + ultimately show ?case by blast +qed simp + +text{* Constant function (non-syntactic characterization). *} +definition "constant f = (\x y. f x = f y)" + +lemma nonconstant_length: "\ (constant (poly p)) \ length p \ 2" + unfolding constant_def + apply (induct p, auto) + apply (unfold not_less[symmetric]) + apply simp + apply (rule ccontr) + apply auto + done + +lemma poly_replicate_append: + "poly ((replicate n 0)@p) (x::'a::{recpower, comm_ring}) = x^n * poly p x" + by(induct n, auto simp add: power_Suc ring_simps) + +text {* Decomposition of polynomial, skipping zero coefficients + after the first. *} + +lemma poly_decompose_lemma: + assumes nz: "\(\z. z\0 \ poly p z = (0::'a::{recpower,idom}))" + shows "\k a q. a\0 \ Suc (length q + k) = length p \ + (\z. poly p z = z^k * poly (a#q) z)" +using nz +proof(induct p) + case Nil thus ?case by simp +next + case (Cons c cs) + {assume c0: "c = 0" + + from Cons.hyps Cons.prems c0 have ?case apply auto + apply (rule_tac x="k+1" in exI) + apply (rule_tac x="a" in exI, clarsimp) + apply (rule_tac x="q" in exI) + by (auto simp add: power_Suc)} + moreover + {assume c0: "c\0" + hence ?case apply- + apply (rule exI[where x=0]) + apply (rule exI[where x=c], clarsimp) + apply (rule exI[where x=cs]) + apply auto + done} + ultimately show ?case by blast +qed + +lemma poly_decompose: + assumes nc: "~constant(poly p)" + shows "\k a q. a\(0::'a::{recpower,idom}) \ k\0 \ + length q + k + 1 = length p \ + (\z. poly p z = poly p 0 + z^k * poly (a#q) z)" +using nc +proof(induct p) + case Nil thus ?case by (simp add: constant_def) +next + case (Cons c cs) + {assume C:"\z. z \ 0 \ poly cs z = 0" + {fix x y + from C have "poly (c#cs) x = poly (c#cs) y" by (cases "x=0", auto)} + with Cons.prems have False by (auto simp add: constant_def)} + hence th: "\ (\z. z \ 0 \ poly cs z = 0)" .. + from poly_decompose_lemma[OF th] + show ?case + apply clarsimp + apply (rule_tac x="k+1" in exI) + apply (rule_tac x="a" in exI) + apply simp + apply (rule_tac x="q" in exI) + apply (auto simp add: power_Suc) + done +qed + +text{* Fundamental theorem of algebral *} + +lemma fundamental_theorem_of_algebra: + assumes nc: "~constant(poly p)" + shows "\z::complex. poly p z = 0" +using nc +proof(induct n\ "length p" arbitrary: p rule: nat_less_induct) + fix n fix p :: "complex list" + let ?p = "poly p" + assume H: "\mp. \ constant (poly p) \ m = length p \ (\(z::complex). poly p z = 0)" and nc: "\ constant ?p" and n: "n = length p" + let ?ths = "\z. ?p z = 0" + + from nonconstant_length[OF nc] have n2: "n\ 2" by (simp add: n) + from poly_minimum_modulus obtain c where + c: "\w. cmod (?p c) \ cmod (?p w)" by blast + {assume pc: "?p c = 0" hence ?ths by blast} + moreover + {assume pc0: "?p c \ 0" + from poly_offset[of p c] obtain q where + q: "length q = length p" "\x. poly q x = ?p (c+x)" by blast + {assume h: "constant (poly q)" + from q(2) have th: "\x. poly q (x - c) = ?p x" by auto + {fix x y + from th have "?p x = poly q (x - c)" by auto + also have "\ = poly q (y - c)" + using h unfolding constant_def by blast + also have "\ = ?p y" using th by auto + finally have "?p x = ?p y" .} + with nc have False unfolding constant_def by blast } + hence qnc: "\ constant (poly q)" by blast + from q(2) have pqc0: "?p c = poly q 0" by simp + from c pqc0 have cq0: "\w. cmod (poly q 0) \ cmod (?p w)" by simp + let ?a0 = "poly q 0" + from pc0 pqc0 have a00: "?a0 \ 0" by simp + from a00 + have qr: "\z. poly q z = poly (map (op * (inverse ?a0)) q) z * ?a0" + by (simp add: poly_cmult_map) + let ?r = "map (op * (inverse ?a0)) q" + have lgqr: "length q = length ?r" by simp + {assume h: "\x y. poly ?r x = poly ?r y" + {fix x y + from qr[rule_format, of x] + have "poly q x = poly ?r x * ?a0" by auto + also have "\ = poly ?r y * ?a0" using h by simp + also have "\ = poly q y" using qr[rule_format, of y] by simp + finally have "poly q x = poly q y" .} + with qnc have False unfolding constant_def by blast} + hence rnc: "\ constant (poly ?r)" unfolding constant_def by blast + from qr[rule_format, of 0] a00 have r01: "poly ?r 0 = 1" by auto + {fix w + have "cmod (poly ?r w) < 1 \ cmod (poly q w / ?a0) < 1" + using qr[rule_format, of w] a00 by simp + also have "\ \ cmod (poly q w) < cmod ?a0" + using a00 unfolding norm_divide by (simp add: field_simps) + finally have "cmod (poly ?r w) < 1 \ cmod (poly q w) < cmod ?a0" .} + note mrmq_eq = this + from poly_decompose[OF rnc] obtain k a s where + kas: "a\0" "k\0" "length s + k + 1 = length ?r" + "\z. poly ?r z = poly ?r 0 + z^k* poly (a#s) z" by blast + {assume "k + 1 = n" + with kas(3) lgqr[symmetric] q(1) n[symmetric] have s0:"s=[]" by auto + {fix w + have "cmod (poly ?r w) = cmod (1 + a * w ^ k)" + using kas(4)[rule_format, of w] s0 r01 by (simp add: ring_simps)} + note hth = this [symmetric] + from reduce_poly_simple[OF kas(1,2)] + have "\w. cmod (poly ?r w) < 1" unfolding hth by blast} + moreover + {assume kn: "k+1 \ n" + from kn kas(3) q(1) n[symmetric] have k1n: "k + 1 < n" by simp + have th01: "\ constant (poly (1#((replicate (k - 1) 0)@[a])))" + unfolding constant_def poly_Nil poly_Cons poly_replicate_append + using kas(1) apply simp + by (rule exI[where x=0], rule exI[where x=1], simp) + from kas(2) have th02: "k+1 = length (1#((replicate (k - 1) 0)@[a]))" + by simp + from H[rule_format, OF k1n th01 th02] + obtain w where w: "1 + w^k * a = 0" + unfolding poly_Nil poly_Cons poly_replicate_append + using kas(2) by (auto simp add: power_Suc[symmetric, of _ "k - Suc 0"] + mult_assoc[of _ _ a, symmetric]) + from poly_bound_exists[of "cmod w" s] obtain m where + m: "m > 0" "\z. cmod z \ cmod w \ cmod (poly s z) \ m" by blast + have w0: "w\0" using kas(2) w by (auto simp add: power_0_left) + from w have "(1 + w ^ k * a) - 1 = 0 - 1" by simp + then have wm1: "w^k * a = - 1" by simp + have inv0: "0 < inverse (cmod w ^ (k + 1) * m)" + using norm_ge_zero[of w] w0 m(1) + by (simp add: inverse_eq_divide zero_less_mult_iff) + with real_down2[OF zero_less_one] obtain t where + t: "t > 0" "t < 1" "t < inverse (cmod w ^ (k + 1) * m)" by blast + let ?ct = "complex_of_real t" + let ?w = "?ct * w" + have "1 + ?w^k * (a + ?w * poly s ?w) = 1 + ?ct^k * (w^k * a) + ?w^k * ?w * poly s ?w" using kas(1) by (simp add: ring_simps power_mult_distrib) + also have "\ = complex_of_real (1 - t^k) + ?w^k * ?w * poly s ?w" + unfolding wm1 by (simp) + finally have "cmod (1 + ?w^k * (a + ?w * poly s ?w)) = cmod (complex_of_real (1 - t^k) + ?w^k * ?w * poly s ?w)" + apply - + apply (rule cong[OF refl[of cmod]]) + apply assumption + done + with norm_triangle_ineq[of "complex_of_real (1 - t^k)" "?w^k * ?w * poly s ?w"] + have th11: "cmod (1 + ?w^k * (a + ?w * poly s ?w)) \ \1 - t^k\ + cmod (?w^k * ?w * poly s ?w)" unfolding norm_of_real by simp + have ath: "\x (t::real). 0\ x \ x < t \ t\1 \ \1 - t\ + x < 1" by arith + have "t *cmod w \ 1 * cmod w" apply (rule mult_mono) using t(1,2) by auto + then have tw: "cmod ?w \ cmod w" using t(1) by (simp add: norm_mult) + from t inv0 have "t* (cmod w ^ (k + 1) * m) < 1" + by (simp add: inverse_eq_divide field_simps) + with zero_less_power[OF t(1), of k] + have th30: "t^k * (t* (cmod w ^ (k + 1) * m)) < t^k * 1" + apply - apply (rule mult_strict_left_mono) by simp_all + have "cmod (?w^k * ?w * poly s ?w) = t^k * (t* (cmod w ^ (k+1) * cmod (poly s ?w)))" using w0 t(1) + by (simp add: ring_simps power_mult_distrib norm_of_real norm_power norm_mult) + then have "cmod (?w^k * ?w * poly s ?w) \ t^k * (t* (cmod w ^ (k + 1) * m))" + using t(1,2) m(2)[rule_format, OF tw] w0 + apply (simp only: ) + apply auto + apply (rule mult_mono, simp_all add: norm_ge_zero)+ + apply (simp add: zero_le_mult_iff zero_le_power) + done + with th30 have th120: "cmod (?w^k * ?w * poly s ?w) < t^k" by simp + from power_strict_mono[OF t(2), of k] t(1) kas(2) have th121: "t^k \ 1" + by auto + from ath[OF norm_ge_zero[of "?w^k * ?w * poly s ?w"] th120 th121] + have th12: "\1 - t^k\ + cmod (?w^k * ?w * poly s ?w) < 1" . + from th11 th12 + have "cmod (1 + ?w^k * (a + ?w * poly s ?w)) < 1" by arith + then have "cmod (poly ?r ?w) < 1" + unfolding kas(4)[rule_format, of ?w] r01 by simp + then have "\w. cmod (poly ?r w) < 1" by blast} + ultimately have cr0_contr: "\w. cmod (poly ?r w) < 1" by blast + from cr0_contr cq0 q(2) + have ?ths unfolding mrmq_eq not_less[symmetric] by auto} + ultimately show ?ths by blast +qed + +text {* Alternative version with a syntactic notion of constant polynomial. *} + +lemma fundamental_theorem_of_algebra_alt: + assumes nc: "~(\a l. a\ 0 \ list_all(\b. b = 0) l \ p = a#l)" + shows "\z. poly p z = (0::complex)" +using nc +proof(induct p) + case (Cons c cs) + {assume "c=0" hence ?case by auto} + moreover + {assume c0: "c\0" + {assume nc: "constant (poly (c#cs))" + from nc[unfolded constant_def, rule_format, of 0] + have "\w. w \ 0 \ poly cs w = 0" by auto + hence "list_all (\c. c=0) cs" + proof(induct cs) + case (Cons d ds) + {assume "d=0" hence ?case using Cons.prems Cons.hyps by simp} + moreover + {assume d0: "d\0" + from poly_bound_exists[of 1 ds] obtain m where + m: "m > 0" "\z. \z. cmod z \ 1 \ cmod (poly ds z) \ m" by blast + have dm: "cmod d / m > 0" using d0 m(1) by (simp add: field_simps) + from real_down2[OF dm zero_less_one] obtain x where + x: "x > 0" "x < cmod d / m" "x < 1" by blast + let ?x = "complex_of_real x" + from x have cx: "?x \ 0" "cmod ?x \ 1" by simp_all + from Cons.prems[rule_format, OF cx(1)] + have cth: "cmod (?x*poly ds ?x) = cmod d" by (simp add: eq_diff_eq[symmetric]) + from m(2)[rule_format, OF cx(2)] x(1) + have th0: "cmod (?x*poly ds ?x) \ x*m" + by (simp add: norm_mult) + from x(2) m(1) have "x*m < cmod d" by (simp add: field_simps) + with th0 have "cmod (?x*poly ds ?x) \ cmod d" by auto + with cth have ?case by blast} + ultimately show ?case by blast + qed simp} + then have nc: "\ constant (poly (c#cs))" using Cons.prems c0 + by blast + from fundamental_theorem_of_algebra[OF nc] have ?case .} + ultimately show ?case by blast +qed simp + +subsection{* Nullstellenstatz, degrees and divisibility of polynomials *} + +lemma nullstellensatz_lemma: + fixes p :: "complex list" + assumes "\x. poly p x = 0 \ poly q x = 0" + and "degree p = n" and "n \ 0" + shows "p divides (pexp q n)" +using prems +proof(induct n arbitrary: p q rule: nat_less_induct) + fix n::nat fix p q :: "complex list" + assume IH: "\mp q. + (\x. poly p x = (0::complex) \ poly q x = 0) \ + degree p = m \ m \ 0 \ p divides (q %^ m)" + and pq0: "\x. poly p x = 0 \ poly q x = 0" + and dpn: "degree p = n" and n0: "n \ 0" + let ?ths = "p divides (q %^ n)" + {fix a assume a: "poly p a = 0" + {assume p0: "poly p = poly []" + hence ?ths unfolding divides_def using pq0 n0 + apply - apply (rule exI[where x="[]"], rule ext) + by (auto simp add: poly_mult poly_exp)} + moreover + {assume p0: "poly p \ poly []" + and oa: "order a p \ 0" + from p0 have pne: "p \ []" by auto + let ?op = "order a p" + from p0 have ap: "([- a, 1] %^ ?op) divides p" + "\ pexp [- a, 1] (Suc ?op) divides p" using order by blast+ + note oop = order_degree[OF p0, unfolded dpn] + {assume q0: "q = []" + hence ?ths using n0 unfolding divides_def + apply simp + apply (rule exI[where x="[]"], rule ext) + by (simp add: divides_def poly_exp poly_mult)} + moreover + {assume q0: "q\[]" + from pq0[rule_format, OF a, unfolded poly_linear_divides] q0 + obtain r where r: "q = pmult [- a, 1] r" by blast + from ap[unfolded divides_def] obtain s where + s: "poly p = poly (pmult (pexp [- a, 1] ?op) s)" by blast + have s0: "poly s \ poly []" + using s p0 by (simp add: poly_entire) + hence pns0: "poly (pnormalize s) \ poly []" and sne: "s\[]" by auto + {assume ds0: "degree s = 0" + from ds0 pns0 have "\k. pnormalize s = [k]" unfolding degree_def + by (cases "pnormalize s", auto) + then obtain k where kpn: "pnormalize s = [k]" by blast + from pns0[unfolded poly_zero] kpn have k: "k \0" "poly s = poly [k]" + using poly_normalize[of s] by simp_all + let ?w = "pmult (pmult [1/k] (pexp [-a,1] (n - ?op))) (pexp r n)" + from k r s oop have "poly (pexp q n) = poly (pmult p ?w)" + by - (rule ext, simp add: poly_mult poly_exp poly_cmult poly_add power_add[symmetric] ring_simps power_mult_distrib[symmetric]) + hence ?ths unfolding divides_def by blast} + moreover + {assume ds0: "degree s \ 0" + from ds0 s0 dpn degree_unique[OF s, unfolded linear_pow_mul_degree] oa + have dsn: "degree s < n" by auto + {fix x assume h: "poly s x = 0" + {assume xa: "x = a" + from h[unfolded xa poly_linear_divides] sne obtain u where + u: "s = pmult [- a, 1] u" by blast + have "poly p = poly (pmult (pexp [- a, 1] (Suc ?op)) u)" + unfolding s u + apply (rule ext) + by (simp add: ring_simps power_mult_distrib[symmetric] poly_mult poly_cmult poly_add poly_exp) + with ap(2)[unfolded divides_def] have False by blast} + note xa = this + from h s have "poly p x = 0" by (simp add: poly_mult) + with pq0 have "poly q x = 0" by blast + with r xa have "poly r x = 0" + by (auto simp add: poly_mult poly_add poly_cmult eq_diff_eq[symmetric])} + note impth = this + from IH[rule_format, OF dsn, of s r] impth ds0 + have "s divides (pexp r (degree s))" by blast + then obtain u where u: "poly (pexp r (degree s)) = poly (pmult s u)" + unfolding divides_def by blast + hence u': "\x. poly s x * poly u x = poly r x ^ degree s" + by (simp add: poly_mult[symmetric] poly_exp[symmetric]) + let ?w = "pmult (pmult u (pexp [-a,1] (n - ?op))) (pexp r (n - degree s))" + from u' s r oop[of a] dsn have "poly (pexp q n) = poly (pmult p ?w)" + apply - apply (rule ext) + apply (simp only: power_mult_distrib power_add[symmetric] poly_add poly_mult poly_exp poly_cmult ring_simps) + + apply (simp add: power_mult_distrib power_add[symmetric] poly_add poly_mult poly_exp poly_cmult mult_assoc[symmetric]) + done + hence ?ths unfolding divides_def by blast} + ultimately have ?ths by blast } + ultimately have ?ths by blast} + ultimately have ?ths using a order_root by blast} + moreover + {assume exa: "\ (\a. poly p a = 0)" + from fundamental_theorem_of_algebra_alt[of p] exa obtain c cs where + ccs: "c\0" "list_all (\c. c = 0) cs" "p = c#cs" by blast + + from poly_0[OF ccs(2)] ccs(3) + have pp: "\x. poly p x = c" by simp + let ?w = "pmult [1/c] (pexp q n)" + from pp ccs(1) + have "poly (pexp q n) = poly (pmult p ?w) " + apply - apply (rule ext) + unfolding poly_mult_assoc[symmetric] by (simp add: poly_mult) + hence ?ths unfolding divides_def by blast} + ultimately show ?ths by blast +qed + +lemma nullstellensatz_univariate: + "(\x. poly p x = (0::complex) \ poly q x = 0) \ + p divides (q %^ (degree p)) \ (poly p = poly [] \ poly q = poly [])" +proof- + {assume pe: "poly p = poly []" + hence eq: "(\x. poly p x = (0::complex) \ poly q x = 0) \ poly q = poly []" + apply auto + by (rule ext, simp) + {assume "p divides (pexp q (degree p))" + then obtain r where r: "poly (pexp q (degree p)) = poly (pmult p r)" + unfolding divides_def by blast + from cong[OF r refl] pe degree_unique[OF pe] + have False by (simp add: poly_mult degree_def)} + with eq pe have ?thesis by blast} + moreover + {assume pe: "poly p \ poly []" + have p0: "poly [0] = poly []" by (rule ext, simp) + {assume dp: "degree p = 0" + then obtain k where "pnormalize p = [k]" using pe poly_normalize[of p] + unfolding degree_def by (cases "pnormalize p", auto) + hence k: "pnormalize p = [k]" "poly p = poly [k]" "k\0" + using pe poly_normalize[of p] by (auto simp add: p0) + hence th1: "\x. poly p x \ 0" by simp + from k(2,3) dp have "poly (pexp q (degree p)) = poly (pmult p [1/k]) " + by - (rule ext, simp add: poly_mult poly_exp) + hence th2: "p divides (pexp q (degree p))" unfolding divides_def by blast + from th1 th2 pe have ?thesis by blast} + moreover + {assume dp: "degree p \ 0" + then obtain n where n: "degree p = Suc n " by (cases "degree p", auto) + {assume "p divides (pexp q (Suc n))" + then obtain u where u: "poly (pexp q (Suc n)) = poly (pmult p u)" + unfolding divides_def by blast + hence u' :"\x. poly (pexp q (Suc n)) x = poly (pmult p u) x" by simp_all + {fix x assume h: "poly p x = 0" "poly q x \ 0" + hence "poly (pexp q (Suc n)) x \ 0" by (simp only: poly_exp) simp + hence False using u' h(1) by (simp only: poly_mult poly_exp) simp}} + with n nullstellensatz_lemma[of p q "degree p"] dp + have ?thesis by auto} + ultimately have ?thesis by blast} + ultimately show ?thesis by blast +qed + +text{* Useful lemma *} + +lemma (in idom_char_0) constant_degree: "constant (poly p) \ degree p = 0" (is "?lhs = ?rhs") +proof + assume l: ?lhs + from l[unfolded constant_def, rule_format, of _ "zero"] + have th: "poly p = poly [poly p 0]" apply - by (rule ext, simp) + from degree_unique[OF th] show ?rhs by (simp add: degree_def) +next + assume r: ?rhs + from r have "pnormalize p = [] \ (\k. pnormalize p = [k])" + unfolding degree_def by (cases "pnormalize p", auto) + then show ?lhs unfolding constant_def poly_normalize[of p, symmetric] + by (auto simp del: poly_normalize) +qed + +(* It would be nicer to prove this without using algebraic closure... *) + +lemma divides_degree_lemma: assumes dpn: "degree (p::complex list) = n" + shows "n \ degree (p *** q) \ poly (p *** q) = poly []" + using dpn +proof(induct n arbitrary: p q) + case 0 thus ?case by simp +next + case (Suc n p q) + from Suc.prems fundamental_theorem_of_algebra[of p] constant_degree[of p] + obtain a where a: "poly p a = 0" by auto + then obtain r where r: "p = pmult [-a, 1] r" unfolding poly_linear_divides + using Suc.prems by (auto simp add: degree_def) + {assume h: "poly (pmult r q) = poly []" + hence "poly (pmult p q) = poly []" using r + apply - apply (rule ext) by (auto simp add: poly_entire poly_mult poly_add poly_cmult) hence ?case by blast} + moreover + {assume h: "poly (pmult r q) \ poly []" + hence r0: "poly r \ poly []" and q0: "poly q \ poly []" + by (auto simp add: poly_entire) + have eq: "poly (pmult p q) = poly (pmult [-a, 1] (pmult r q))" + apply - apply (rule ext) + by (simp add: r poly_mult poly_add poly_cmult ring_simps) + from linear_mul_degree[OF h, of "- a"] + have dqe: "degree (pmult p q) = degree (pmult r q) + 1" + unfolding degree_unique[OF eq] . + from linear_mul_degree[OF r0, of "- a", unfolded r[symmetric]] r Suc.prems + have dr: "degree r = n" by auto + from Suc.hyps[OF dr, of q] have "Suc n \ degree (pmult p q)" + unfolding dqe using h by (auto simp del: poly.simps) + hence ?case by blast} + ultimately show ?case by blast +qed + +lemma divides_degree: assumes pq: "p divides (q:: complex list)" + shows "degree p \ degree q \ poly q = poly []" +using pq divides_degree_lemma[OF refl, of p] +apply (auto simp add: divides_def poly_entire) +apply atomize +apply (erule_tac x="qa" in allE, auto) +apply (subgoal_tac "degree q = degree (p *** qa)", simp) +apply (rule degree_unique, simp) +done + +(* Arithmetic operations on multivariate polynomials. *) + +lemma mpoly_base_conv: + "(0::complex) \ poly [] x" "c \ poly [c] x" "x \ poly [0,1] x" by simp_all + +lemma mpoly_norm_conv: + "poly [0] (x::complex) \ poly [] x" "poly [poly [] y] x \ poly [] x" by simp_all + +lemma mpoly_sub_conv: + "poly p (x::complex) - poly q x \ poly p x + -1 * poly q x" + by (simp add: diff_def) + +lemma poly_pad_rule: "poly p x = 0 ==> poly (0#p) x = (0::complex)" by simp + +lemma poly_cancel_eq_conv: "p = (0::complex) \ a \ 0 \ (q = 0) \ (a * q - b * p = 0)" apply (atomize (full)) by auto + +lemma resolve_eq_raw: "poly [] x \ 0" "poly [c] x \ (c::complex)" by auto +lemma resolve_eq_then: "(P \ (Q \ Q1)) \ (\P \ (Q \ Q2)) + \ Q \ P \ Q1 \ \P\ Q2" apply (atomize (full)) by blast +lemma expand_ex_beta_conv: "list_ex P [c] \ P c" by simp + +lemma poly_divides_pad_rule: + fixes p q :: "complex list" + assumes pq: "p divides q" + shows "p divides ((0::complex)#q)" +proof- + from pq obtain r where r: "poly q = poly (p *** r)" unfolding divides_def by blast + hence "poly (0#q) = poly (p *** ([0,1] *** r))" + by - (rule ext, simp add: poly_mult poly_cmult poly_add) + thus ?thesis unfolding divides_def by blast +qed + +lemma poly_divides_pad_const_rule: + fixes p q :: "complex list" + assumes pq: "p divides q" + shows "p divides (a %* q)" +proof- + from pq obtain r where r: "poly q = poly (p *** r)" unfolding divides_def by blast + hence "poly (a %* q) = poly (p *** (a %* r))" + by - (rule ext, simp add: poly_mult poly_cmult poly_add) + thus ?thesis unfolding divides_def by blast +qed + + +lemma poly_divides_conv0: + fixes p :: "complex list" + assumes lgpq: "length q < length p" and lq:"last p \ 0" + shows "p divides q \ (\ (list_ex (\c. c \ 0) q))" (is "?lhs \ ?rhs") +proof- + {assume r: ?rhs + hence eq: "poly q = poly []" unfolding poly_zero + by (simp add: list_all_iff list_ex_iff) + hence "poly q = poly (p *** [])" by - (rule ext, simp add: poly_mult) + hence ?lhs unfolding divides_def by blast} + moreover + {assume l: ?lhs + have ath: "\lq lp dq::nat. lq < lp ==> lq \ 0 \ dq <= lq - 1 ==> dq < lp - 1" + by arith + {assume q0: "length q = 0" + hence "q = []" by simp + hence ?rhs by simp} + moreover + {assume lgq0: "length q \ 0" + from pnormalize_length[of q] have dql: "degree q \ length q - 1" + unfolding degree_def by simp + from ath[OF lgpq lgq0 dql, unfolded pnormal_degree[OF lq, symmetric]] divides_degree[OF l] have "poly q = poly []" by auto + hence ?rhs unfolding poly_zero by (simp add: list_all_iff list_ex_iff)} + ultimately have ?rhs by blast } + ultimately show "?lhs \ ?rhs" by - (atomize (full), blast) +qed + +lemma poly_divides_conv1: + assumes a0: "a\ (0::complex)" and pp': "(p::complex list) divides p'" + and qrp': "\x. a * poly q x - poly p' x \ poly r x" + shows "p divides q \ p divides (r::complex list)" (is "?lhs \ ?rhs") +proof- + { + from pp' obtain t where t: "poly p' = poly (p *** t)" + unfolding divides_def by blast + {assume l: ?lhs + then obtain u where u: "poly q = poly (p *** u)" unfolding divides_def by blast + have "poly r = poly (p *** ((a %* u) +++ (-- t)))" + using u qrp' t + by - (rule ext, + simp add: poly_add poly_mult poly_cmult poly_minus ring_simps) + then have ?rhs unfolding divides_def by blast} + moreover + {assume r: ?rhs + then obtain u where u: "poly r = poly (p *** u)" unfolding divides_def by blast + from u t qrp' a0 have "poly q = poly (p *** ((1/a) %* (u +++ t)))" + by - (rule ext, atomize (full), simp add: poly_mult poly_add poly_cmult field_simps) + hence ?lhs unfolding divides_def by blast} + ultimately have "?lhs = ?rhs" by blast } +thus "?lhs \ ?rhs" by - (atomize(full), blast) +qed + +lemma basic_cqe_conv1: + "(\x. poly p x = 0 \ poly [] x \ 0) \ False" + "(\x. poly [] x \ 0) \ False" + "(\x. poly [c] x \ 0) \ c\0" + "(\x. poly [] x = 0) \ True" + "(\x. poly [c] x = 0) \ c = 0" by simp_all + +lemma basic_cqe_conv2: + assumes l:"last (a#b#p) \ 0" + shows "(\x. poly (a#b#p) x = (0::complex)) \ True" +proof- + {fix h t + assume h: "h\0" "list_all (\c. c=(0::complex)) t" "a#b#p = h#t" + hence "list_all (\c. c= 0) (b#p)" by simp + moreover have "last (b#p) \ set (b#p)" by simp + ultimately have "last (b#p) = 0" by (simp add: list_all_iff) + with l have False by simp} + hence th: "\ (\ h t. h\0 \ list_all (\c. c=0) t \ a#b#p = h#t)" + by blast + from fundamental_theorem_of_algebra_alt[OF th] + show "(\x. poly (a#b#p) x = (0::complex)) \ True" by auto +qed + +lemma basic_cqe_conv_2b: "(\x. poly p x \ (0::complex)) \ (list_ex (\c. c \ 0) p)" +proof- + have "\ (list_ex (\c. c \ 0) p) \ poly p = poly []" + by (simp add: poly_zero list_all_iff list_ex_iff) + also have "\ \ (\ (\x. poly p x \ 0))" by (auto intro: ext) + finally show "(\x. poly p x \ (0::complex)) \ (list_ex (\c. c \ 0) p)" + by - (atomize (full), blast) +qed + +lemma basic_cqe_conv3: + fixes p q :: "complex list" + assumes l: "last (a#p) \ 0" + shows "(\x. poly (a#p) x =0 \ poly q x \ 0) \ \ ((a#p) divides (q %^ (length p)))" +proof- + note np = pnormalize_eq[OF l] + {assume "poly (a#p) = poly []" hence False using l + unfolding poly_zero apply (auto simp add: list_all_iff del: last.simps) + apply (cases p, simp_all) done} + then have p0: "poly (a#p) \ poly []" by blast + from np have dp:"degree (a#p) = length p" by (simp add: degree_def) + from nullstellensatz_univariate[of "a#p" q] p0 dp + show "(\x. poly (a#p) x =0 \ poly q x \ 0) \ \ ((a#p) divides (q %^ (length p)))" + by - (atomize (full), auto) +qed + +lemma basic_cqe_conv4: + fixes p q :: "complex list" + assumes h: "\x. poly (q %^ n) x \ poly r x" + shows "p divides (q %^ n) \ p divides r" +proof- + from h have "poly (q %^ n) = poly r" by (auto intro: ext) + thus "p divides (q %^ n) \ p divides r" unfolding divides_def by simp +qed + +lemma pmult_Cons_Cons: "((a::complex)#b#p) *** q = (a %*q) +++ (0#((b#p) *** q))" + by simp + +lemma elim_neg_conv: "- z \ (-1) * (z::complex)" by simp +lemma eqT_intr: "PROP P \ (True \ PROP P )" "PROP P \ True" by blast+ +lemma negate_negate_rule: "Trueprop P \ \ P \ False" by (atomize (full), auto) +lemma last_simps: "last [x] = x" "last (x#y#ys) = last (y#ys)" by simp_all +lemma length_simps: "length [] = 0" "length (x#y#xs) = length xs + 2" "length [x] = 1" by simp_all + +lemma complex_entire: "(z::complex) \ 0 \ w \ 0 \ z*w \ 0" by simp +lemma resolve_eq_ne: "(P \ True) \ (\P \ False)" "(P \ False) \ (\P \ True)" + by (atomize (full)) simp_all +lemma cqe_conv1: "poly [] x = 0 \ True" by simp +lemma cqe_conv2: "(p \ (q \ r)) \ ((p \ q) \ (p \ r))" (is "?l \ ?r") +proof + assume "p \ q \ r" thus "p \ q \ p \ r" apply - apply (atomize (full)) by blast +next + assume "p \ q \ p \ r" "p" + thus "q \ r" apply - apply (atomize (full)) apply blast done +qed +lemma poly_const_conv: "poly [c] (x::complex) = y \ c = y" by simp + +end \ No newline at end of file diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/HOL.thy --- a/src/HOL/HOL.thy Tue Dec 30 08:18:54 2008 +0100 +++ b/src/HOL/HOL.thy Tue Dec 30 11:10:01 2008 +0100 @@ -26,6 +26,7 @@ "~~/src/Tools/atomize_elim.ML" "~~/src/Tools/induct.ML" ("~~/src/Tools/induct_tacs.ML") + "~~/src/Tools/value.ML" "~~/src/Tools/code/code_name.ML" "~~/src/Tools/code/code_funcgr.ML" "~~/src/Tools/code/code_thingol.ML" diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/HahnBanach/Bounds.thy --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/HOL/HahnBanach/Bounds.thy Tue Dec 30 11:10:01 2008 +0100 @@ -0,0 +1,82 @@ +(* Title: HOL/Real/HahnBanach/Bounds.thy + Author: Gertrud Bauer, TU Munich +*) + +header {* Bounds *} + +theory Bounds +imports Main ContNotDenum +begin + +locale lub = + fixes A and x + assumes least [intro?]: "(\a. a \ A \ a \ b) \ x \ b" + and upper [intro?]: "a \ A \ a \ x" + +lemmas [elim?] = lub.least lub.upper + +definition + the_lub :: "'a::order set \ 'a" where + "the_lub A = The (lub A)" + +notation (xsymbols) + the_lub ("\_" [90] 90) + +lemma the_lub_equality [elim?]: + assumes "lub A x" + shows "\A = (x::'a::order)" +proof - + interpret lub A x by fact + show ?thesis + proof (unfold the_lub_def) + from `lub A x` show "The (lub A) = x" + proof + fix x' assume lub': "lub A x'" + show "x' = x" + proof (rule order_antisym) + from lub' show "x' \ x" + proof + fix a assume "a \ A" + then show "a \ x" .. + qed + show "x \ x'" + proof + fix a assume "a \ A" + with lub' show "a \ x'" .. + qed + qed + qed + qed +qed + +lemma the_lubI_ex: + assumes ex: "\x. lub A x" + shows "lub A (\A)" +proof - + from ex obtain x where x: "lub A x" .. + also from x have [symmetric]: "\A = x" .. + finally show ?thesis . +qed + +lemma lub_compat: "lub A x = isLub UNIV A x" +proof - + have "isUb UNIV A = (\x. A *<= x \ x \ UNIV)" + by (rule ext) (simp only: isUb_def) + then show ?thesis + by (simp only: lub_def isLub_def leastP_def setge_def setle_def) blast +qed + +lemma real_complete: + fixes A :: "real set" + assumes nonempty: "\a. a \ A" + and ex_upper: "\y. \a \ A. a \ y" + shows "\x. lub A x" +proof - + from ex_upper have "\y. isUb UNIV A y" + unfolding isUb_def setle_def by blast + with nonempty have "\x. isLub UNIV A x" + by (rule reals_complete) + then show ?thesis by (simp only: lub_compat) +qed + +end diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/HahnBanach/FunctionNorm.thy --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/HOL/HahnBanach/FunctionNorm.thy Tue Dec 30 11:10:01 2008 +0100 @@ -0,0 +1,278 @@ +(* Title: HOL/Real/HahnBanach/FunctionNorm.thy + Author: Gertrud Bauer, TU Munich +*) + +header {* The norm of a function *} + +theory FunctionNorm +imports NormedSpace FunctionOrder +begin + +subsection {* Continuous linear forms*} + +text {* + A linear form @{text f} on a normed vector space @{text "(V, \\\)"} + is \emph{continuous}, iff it is bounded, i.e. + \begin{center} + @{text "\c \ R. \x \ V. \f x\ \ c \ \x\"} + \end{center} + In our application no other functions than linear forms are + considered, so we can define continuous linear forms as bounded + linear forms: +*} + +locale continuous = var_V + norm_syntax + linearform + + assumes bounded: "\c. \x \ V. \f x\ \ c * \x\" + +declare continuous.intro [intro?] continuous_axioms.intro [intro?] + +lemma continuousI [intro]: + fixes norm :: "_ \ real" ("\_\") + assumes "linearform V f" + assumes r: "\x. x \ V \ \f x\ \ c * \x\" + shows "continuous V norm f" +proof + show "linearform V f" by fact + from r have "\c. \x\V. \f x\ \ c * \x\" by blast + then show "continuous_axioms V norm f" .. +qed + + +subsection {* The norm of a linear form *} + +text {* + The least real number @{text c} for which holds + \begin{center} + @{text "\x \ V. \f x\ \ c \ \x\"} + \end{center} + is called the \emph{norm} of @{text f}. + + For non-trivial vector spaces @{text "V \ {0}"} the norm can be + defined as + \begin{center} + @{text "\f\ = \x \ 0. \f x\ / \x\"} + \end{center} + + For the case @{text "V = {0}"} the supremum would be taken from an + empty set. Since @{text \} is unbounded, there would be no supremum. + To avoid this situation it must be guaranteed that there is an + element in this set. This element must be @{text "{} \ 0"} so that + @{text fn_norm} has the norm properties. Furthermore it does not + have to change the norm in all other cases, so it must be @{text 0}, + as all other elements are @{text "{} \ 0"}. + + Thus we define the set @{text B} where the supremum is taken from as + follows: + \begin{center} + @{text "{0} \ {\f x\ / \x\. x \ 0 \ x \ F}"} + \end{center} + + @{text fn_norm} is equal to the supremum of @{text B}, if the + supremum exists (otherwise it is undefined). +*} + +locale fn_norm = norm_syntax + + fixes B defines "B V f \ {0} \ {\f x\ / \x\ | x. x \ 0 \ x \ V}" + fixes fn_norm ("\_\\_" [0, 1000] 999) + defines "\f\\V \ \(B V f)" + +locale normed_vectorspace_with_fn_norm = normed_vectorspace + fn_norm + +lemma (in fn_norm) B_not_empty [intro]: "0 \ B V f" + by (simp add: B_def) + +text {* + The following lemma states that every continuous linear form on a + normed space @{text "(V, \\\)"} has a function norm. +*} + +lemma (in normed_vectorspace_with_fn_norm) fn_norm_works: + assumes "continuous V norm f" + shows "lub (B V f) (\f\\V)" +proof - + interpret continuous V norm f by fact + txt {* The existence of the supremum is shown using the + completeness of the reals. Completeness means, that every + non-empty bounded set of reals has a supremum. *} + have "\a. lub (B V f) a" + proof (rule real_complete) + txt {* First we have to show that @{text B} is non-empty: *} + have "0 \ B V f" .. + then show "\x. x \ B V f" .. + + txt {* Then we have to show that @{text B} is bounded: *} + show "\c. \y \ B V f. y \ c" + proof - + txt {* We know that @{text f} is bounded by some value @{text c}. *} + from bounded obtain c where c: "\x \ V. \f x\ \ c * \x\" .. + + txt {* To prove the thesis, we have to show that there is some + @{text b}, such that @{text "y \ b"} for all @{text "y \ + B"}. Due to the definition of @{text B} there are two cases. *} + + def b \ "max c 0" + have "\y \ B V f. y \ b" + proof + fix y assume y: "y \ B V f" + show "y \ b" + proof cases + assume "y = 0" + then show ?thesis unfolding b_def by arith + next + txt {* The second case is @{text "y = \f x\ / \x\"} for some + @{text "x \ V"} with @{text "x \ 0"}. *} + assume "y \ 0" + with y obtain x where y_rep: "y = \f x\ * inverse \x\" + and x: "x \ V" and neq: "x \ 0" + by (auto simp add: B_def real_divide_def) + from x neq have gt: "0 < \x\" .. + + txt {* The thesis follows by a short calculation using the + fact that @{text f} is bounded. *} + + note y_rep + also have "\f x\ * inverse \x\ \ (c * \x\) * inverse \x\" + proof (rule mult_right_mono) + from c x show "\f x\ \ c * \x\" .. + from gt have "0 < inverse \x\" + by (rule positive_imp_inverse_positive) + then show "0 \ inverse \x\" by (rule order_less_imp_le) + qed + also have "\ = c * (\x\ * inverse \x\)" + by (rule real_mult_assoc) + also + from gt have "\x\ \ 0" by simp + then have "\x\ * inverse \x\ = 1" by simp + also have "c * 1 \ b" by (simp add: b_def le_maxI1) + finally show "y \ b" . + qed + qed + then show ?thesis .. + qed + qed + then show ?thesis unfolding fn_norm_def by (rule the_lubI_ex) +qed + +lemma (in normed_vectorspace_with_fn_norm) fn_norm_ub [iff?]: + assumes "continuous V norm f" + assumes b: "b \ B V f" + shows "b \ \f\\V" +proof - + interpret continuous V norm f by fact + have "lub (B V f) (\f\\V)" + using `continuous V norm f` by (rule fn_norm_works) + from this and b show ?thesis .. +qed + +lemma (in normed_vectorspace_with_fn_norm) fn_norm_leastB: + assumes "continuous V norm f" + assumes b: "\b. b \ B V f \ b \ y" + shows "\f\\V \ y" +proof - + interpret continuous V norm f by fact + have "lub (B V f) (\f\\V)" + using `continuous V norm f` by (rule fn_norm_works) + from this and b show ?thesis .. +qed + +text {* The norm of a continuous function is always @{text "\ 0"}. *} + +lemma (in normed_vectorspace_with_fn_norm) fn_norm_ge_zero [iff]: + assumes "continuous V norm f" + shows "0 \ \f\\V" +proof - + interpret continuous V norm f by fact + txt {* The function norm is defined as the supremum of @{text B}. + So it is @{text "\ 0"} if all elements in @{text B} are @{text "\ + 0"}, provided the supremum exists and @{text B} is not empty. *} + have "lub (B V f) (\f\\V)" + using `continuous V norm f` by (rule fn_norm_works) + moreover have "0 \ B V f" .. + ultimately show ?thesis .. +qed + +text {* + \medskip The fundamental property of function norms is: + \begin{center} + @{text "\f x\ \ \f\ \ \x\"} + \end{center} +*} + +lemma (in normed_vectorspace_with_fn_norm) fn_norm_le_cong: + assumes "continuous V norm f" "linearform V f" + assumes x: "x \ V" + shows "\f x\ \ \f\\V * \x\" +proof - + interpret continuous V norm f by fact + interpret linearform V f . + show ?thesis + proof cases + assume "x = 0" + then have "\f x\ = \f 0\" by simp + also have "f 0 = 0" by rule unfold_locales + also have "\\\ = 0" by simp + also have a: "0 \ \f\\V" + using `continuous V norm f` by (rule fn_norm_ge_zero) + from x have "0 \ norm x" .. + with a have "0 \ \f\\V * \x\" by (simp add: zero_le_mult_iff) + finally show "\f x\ \ \f\\V * \x\" . + next + assume "x \ 0" + with x have neq: "\x\ \ 0" by simp + then have "\f x\ = (\f x\ * inverse \x\) * \x\" by simp + also have "\ \ \f\\V * \x\" + proof (rule mult_right_mono) + from x show "0 \ \x\" .. + from x and neq have "\f x\ * inverse \x\ \ B V f" + by (auto simp add: B_def real_divide_def) + with `continuous V norm f` show "\f x\ * inverse \x\ \ \f\\V" + by (rule fn_norm_ub) + qed + finally show ?thesis . + qed +qed + +text {* + \medskip The function norm is the least positive real number for + which the following inequation holds: + \begin{center} + @{text "\f x\ \ c \ \x\"} + \end{center} +*} + +lemma (in normed_vectorspace_with_fn_norm) fn_norm_least [intro?]: + assumes "continuous V norm f" + assumes ineq: "\x \ V. \f x\ \ c * \x\" and ge: "0 \ c" + shows "\f\\V \ c" +proof - + interpret continuous V norm f by fact + show ?thesis + proof (rule fn_norm_leastB [folded B_def fn_norm_def]) + fix b assume b: "b \ B V f" + show "b \ c" + proof cases + assume "b = 0" + with ge show ?thesis by simp + next + assume "b \ 0" + with b obtain x where b_rep: "b = \f x\ * inverse \x\" + and x_neq: "x \ 0" and x: "x \ V" + by (auto simp add: B_def real_divide_def) + note b_rep + also have "\f x\ * inverse \x\ \ (c * \x\) * inverse \x\" + proof (rule mult_right_mono) + have "0 < \x\" using x x_neq .. + then show "0 \ inverse \x\" by simp + from ineq and x show "\f x\ \ c * \x\" .. + qed + also have "\ = c" + proof - + from x_neq and x have "\x\ \ 0" by simp + then show ?thesis by simp + qed + finally show ?thesis . + qed + qed (insert `continuous V norm f`, simp_all add: continuous_def) +qed + +end diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/HahnBanach/FunctionOrder.thy --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/HOL/HahnBanach/FunctionOrder.thy Tue Dec 30 11:10:01 2008 +0100 @@ -0,0 +1,142 @@ +(* Title: HOL/Real/HahnBanach/FunctionOrder.thy + ID: $Id$ + Author: Gertrud Bauer, TU Munich +*) + +header {* An order on functions *} + +theory FunctionOrder +imports Subspace Linearform +begin + +subsection {* The graph of a function *} + +text {* + We define the \emph{graph} of a (real) function @{text f} with + domain @{text F} as the set + \begin{center} + @{text "{(x, f x). x \ F}"} + \end{center} + So we are modeling partial functions by specifying the domain and + the mapping function. We use the term ``function'' also for its + graph. +*} + +types 'a graph = "('a \ real) set" + +definition + graph :: "'a set \ ('a \ real) \ 'a graph" where + "graph F f = {(x, f x) | x. x \ F}" + +lemma graphI [intro]: "x \ F \ (x, f x) \ graph F f" + unfolding graph_def by blast + +lemma graphI2 [intro?]: "x \ F \ \t \ graph F f. t = (x, f x)" + unfolding graph_def by blast + +lemma graphE [elim?]: + "(x, y) \ graph F f \ (x \ F \ y = f x \ C) \ C" + unfolding graph_def by blast + + +subsection {* Functions ordered by domain extension *} + +text {* + A function @{text h'} is an extension of @{text h}, iff the graph of + @{text h} is a subset of the graph of @{text h'}. +*} + +lemma graph_extI: + "(\x. x \ H \ h x = h' x) \ H \ H' + \ graph H h \ graph H' h'" + unfolding graph_def by blast + +lemma graph_extD1 [dest?]: + "graph H h \ graph H' h' \ x \ H \ h x = h' x" + unfolding graph_def by blast + +lemma graph_extD2 [dest?]: + "graph H h \ graph H' h' \ H \ H'" + unfolding graph_def by blast + + +subsection {* Domain and function of a graph *} + +text {* + The inverse functions to @{text graph} are @{text domain} and @{text + funct}. +*} + +definition + "domain" :: "'a graph \ 'a set" where + "domain g = {x. \y. (x, y) \ g}" + +definition + funct :: "'a graph \ ('a \ real)" where + "funct g = (\x. (SOME y. (x, y) \ g))" + +text {* + The following lemma states that @{text g} is the graph of a function + if the relation induced by @{text g} is unique. +*} + +lemma graph_domain_funct: + assumes uniq: "\x y z. (x, y) \ g \ (x, z) \ g \ z = y" + shows "graph (domain g) (funct g) = g" + unfolding domain_def funct_def graph_def +proof auto (* FIXME !? *) + fix a b assume g: "(a, b) \ g" + from g show "(a, SOME y. (a, y) \ g) \ g" by (rule someI2) + from g show "\y. (a, y) \ g" .. + from g show "b = (SOME y. (a, y) \ g)" + proof (rule some_equality [symmetric]) + fix y assume "(a, y) \ g" + with g show "y = b" by (rule uniq) + qed +qed + + +subsection {* Norm-preserving extensions of a function *} + +text {* + Given a linear form @{text f} on the space @{text F} and a seminorm + @{text p} on @{text E}. The set of all linear extensions of @{text + f}, to superspaces @{text H} of @{text F}, which are bounded by + @{text p}, is defined as follows. +*} + +definition + norm_pres_extensions :: + "'a::{plus, minus, uminus, zero} set \ ('a \ real) \ 'a set \ ('a \ real) + \ 'a graph set" where + "norm_pres_extensions E p F f + = {g. \H h. g = graph H h + \ linearform H h + \ H \ E + \ F \ H + \ graph F f \ graph H h + \ (\x \ H. h x \ p x)}" + +lemma norm_pres_extensionE [elim]: + "g \ norm_pres_extensions E p F f + \ (\H h. g = graph H h \ linearform H h + \ H \ E \ F \ H \ graph F f \ graph H h + \ \x \ H. h x \ p x \ C) \ C" + unfolding norm_pres_extensions_def by blast + +lemma norm_pres_extensionI2 [intro]: + "linearform H h \ H \ E \ F \ H + \ graph F f \ graph H h \ \x \ H. h x \ p x + \ graph H h \ norm_pres_extensions E p F f" + unfolding norm_pres_extensions_def by blast + +lemma norm_pres_extensionI: (* FIXME ? *) + "\H h. g = graph H h + \ linearform H h + \ H \ E + \ F \ H + \ graph F f \ graph H h + \ (\x \ H. h x \ p x) \ g \ norm_pres_extensions E p F f" + unfolding norm_pres_extensions_def by blast + +end diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/HahnBanach/HahnBanach.thy --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/HOL/HahnBanach/HahnBanach.thy Tue Dec 30 11:10:01 2008 +0100 @@ -0,0 +1,509 @@ +(* Title: HOL/Real/HahnBanach/HahnBanach.thy + Author: Gertrud Bauer, TU Munich +*) + +header {* The Hahn-Banach Theorem *} + +theory HahnBanach +imports HahnBanachLemmas +begin + +text {* + We present the proof of two different versions of the Hahn-Banach + Theorem, closely following \cite[\S36]{Heuser:1986}. +*} + +subsection {* The Hahn-Banach Theorem for vector spaces *} + +text {* + \textbf{Hahn-Banach Theorem.} Let @{text F} be a subspace of a real + vector space @{text E}, let @{text p} be a semi-norm on @{text E}, + and @{text f} be a linear form defined on @{text F} such that @{text + f} is bounded by @{text p}, i.e. @{text "\x \ F. f x \ p x"}. Then + @{text f} can be extended to a linear form @{text h} on @{text E} + such that @{text h} is norm-preserving, i.e. @{text h} is also + bounded by @{text p}. + + \bigskip + \textbf{Proof Sketch.} + \begin{enumerate} + + \item Define @{text M} as the set of norm-preserving extensions of + @{text f} to subspaces of @{text E}. The linear forms in @{text M} + are ordered by domain extension. + + \item We show that every non-empty chain in @{text M} has an upper + bound in @{text M}. + + \item With Zorn's Lemma we conclude that there is a maximal function + @{text g} in @{text M}. + + \item The domain @{text H} of @{text g} is the whole space @{text + E}, as shown by classical contradiction: + + \begin{itemize} + + \item Assuming @{text g} is not defined on whole @{text E}, it can + still be extended in a norm-preserving way to a super-space @{text + H'} of @{text H}. + + \item Thus @{text g} can not be maximal. Contradiction! + + \end{itemize} + \end{enumerate} +*} + +theorem HahnBanach: + assumes E: "vectorspace E" and "subspace F E" + and "seminorm E p" and "linearform F f" + assumes fp: "\x \ F. f x \ p x" + shows "\h. linearform E h \ (\x \ F. h x = f x) \ (\x \ E. h x \ p x)" + -- {* Let @{text E} be a vector space, @{text F} a subspace of @{text E}, @{text p} a seminorm on @{text E}, *} + -- {* and @{text f} a linear form on @{text F} such that @{text f} is bounded by @{text p}, *} + -- {* then @{text f} can be extended to a linear form @{text h} on @{text E} in a norm-preserving way. \skp *} +proof - + interpret vectorspace E by fact + interpret subspace F E by fact + interpret seminorm E p by fact + interpret linearform F f by fact + def M \ "norm_pres_extensions E p F f" + then have M: "M = \" by (simp only:) + from E have F: "vectorspace F" .. + note FE = `F \ E` + { + fix c assume cM: "c \ chain M" and ex: "\x. x \ c" + have "\c \ M" + -- {* Show that every non-empty chain @{text c} of @{text M} has an upper bound in @{text M}: *} + -- {* @{text "\c"} is greater than any element of the chain @{text c}, so it suffices to show @{text "\c \ M"}. *} + unfolding M_def + proof (rule norm_pres_extensionI) + let ?H = "domain (\c)" + let ?h = "funct (\c)" + + have a: "graph ?H ?h = \c" + proof (rule graph_domain_funct) + fix x y z assume "(x, y) \ \c" and "(x, z) \ \c" + with M_def cM show "z = y" by (rule sup_definite) + qed + moreover from M cM a have "linearform ?H ?h" + by (rule sup_lf) + moreover from a M cM ex FE E have "?H \ E" + by (rule sup_subE) + moreover from a M cM ex FE have "F \ ?H" + by (rule sup_supF) + moreover from a M cM ex have "graph F f \ graph ?H ?h" + by (rule sup_ext) + moreover from a M cM have "\x \ ?H. ?h x \ p x" + by (rule sup_norm_pres) + ultimately show "\H h. \c = graph H h + \ linearform H h + \ H \ E + \ F \ H + \ graph F f \ graph H h + \ (\x \ H. h x \ p x)" by blast + qed + } + then have "\g \ M. \x \ M. g \ x \ g = x" + -- {* With Zorn's Lemma we can conclude that there is a maximal element in @{text M}. \skp *} + proof (rule Zorn's_Lemma) + -- {* We show that @{text M} is non-empty: *} + show "graph F f \ M" + unfolding M_def + proof (rule norm_pres_extensionI2) + show "linearform F f" by fact + show "F \ E" by fact + from F show "F \ F" by (rule vectorspace.subspace_refl) + show "graph F f \ graph F f" .. + show "\x\F. f x \ p x" by fact + qed + qed + then obtain g where gM: "g \ M" and gx: "\x \ M. g \ x \ g = x" + by blast + from gM obtain H h where + g_rep: "g = graph H h" + and linearform: "linearform H h" + and HE: "H \ E" and FH: "F \ H" + and graphs: "graph F f \ graph H h" + and hp: "\x \ H. h x \ p x" unfolding M_def .. + -- {* @{text g} is a norm-preserving extension of @{text f}, in other words: *} + -- {* @{text g} is the graph of some linear form @{text h} defined on a subspace @{text H} of @{text E}, *} + -- {* and @{text h} is an extension of @{text f} that is again bounded by @{text p}. \skp *} + from HE E have H: "vectorspace H" + by (rule subspace.vectorspace) + + have HE_eq: "H = E" + -- {* We show that @{text h} is defined on whole @{text E} by classical contradiction. \skp *} + proof (rule classical) + assume neq: "H \ E" + -- {* Assume @{text h} is not defined on whole @{text E}. Then show that @{text h} can be extended *} + -- {* in a norm-preserving way to a function @{text h'} with the graph @{text g'}. \skp *} + have "\g' \ M. g \ g' \ g \ g'" + proof - + from HE have "H \ E" .. + with neq obtain x' where x'E: "x' \ E" and "x' \ H" by blast + obtain x': "x' \ 0" + proof + show "x' \ 0" + proof + assume "x' = 0" + with H have "x' \ H" by (simp only: vectorspace.zero) + with `x' \ H` show False by contradiction + qed + qed + + def H' \ "H + lin x'" + -- {* Define @{text H'} as the direct sum of @{text H} and the linear closure of @{text x'}. \skp *} + have HH': "H \ H'" + proof (unfold H'_def) + from x'E have "vectorspace (lin x')" .. + with H show "H \ H + lin x'" .. + qed + + obtain xi where + xi: "\y \ H. - p (y + x') - h y \ xi + \ xi \ p (y + x') - h y" + -- {* Pick a real number @{text \} that fulfills certain inequations; this will *} + -- {* be used to establish that @{text h'} is a norm-preserving extension of @{text h}. + \label{ex-xi-use}\skp *} + proof - + from H have "\xi. \y \ H. - p (y + x') - h y \ xi + \ xi \ p (y + x') - h y" + proof (rule ex_xi) + fix u v assume u: "u \ H" and v: "v \ H" + with HE have uE: "u \ E" and vE: "v \ E" by auto + from H u v linearform have "h v - h u = h (v - u)" + by (simp add: linearform.diff) + also from hp and H u v have "\ \ p (v - u)" + by (simp only: vectorspace.diff_closed) + also from x'E uE vE have "v - u = x' + - x' + v + - u" + by (simp add: diff_eq1) + also from x'E uE vE have "\ = v + x' + - (u + x')" + by (simp add: add_ac) + also from x'E uE vE have "\ = (v + x') - (u + x')" + by (simp add: diff_eq1) + also from x'E uE vE E have "p \ \ p (v + x') + p (u + x')" + by (simp add: diff_subadditive) + finally have "h v - h u \ p (v + x') + p (u + x')" . + then show "- p (u + x') - h u \ p (v + x') - h v" by simp + qed + then show thesis by (blast intro: that) + qed + + def h' \ "\x. let (y, a) = + SOME (y, a). x = y + a \ x' \ y \ H in h y + a * xi" + -- {* Define the extension @{text h'} of @{text h} to @{text H'} using @{text \}. \skp *} + + have "g \ graph H' h' \ g \ graph H' h'" + -- {* @{text h'} is an extension of @{text h} \dots \skp *} + proof + show "g \ graph H' h'" + proof - + have "graph H h \ graph H' h'" + proof (rule graph_extI) + fix t assume t: "t \ H" + from E HE t have "(SOME (y, a). t = y + a \ x' \ y \ H) = (t, 0)" + using `x' \ H` `x' \ E` `x' \ 0` by (rule decomp_H'_H) + with h'_def show "h t = h' t" by (simp add: Let_def) + next + from HH' show "H \ H'" .. + qed + with g_rep show ?thesis by (simp only:) + qed + + show "g \ graph H' h'" + proof - + have "graph H h \ graph H' h'" + proof + assume eq: "graph H h = graph H' h'" + have "x' \ H'" + unfolding H'_def + proof + from H show "0 \ H" by (rule vectorspace.zero) + from x'E show "x' \ lin x'" by (rule x_lin_x) + from x'E show "x' = 0 + x'" by simp + qed + then have "(x', h' x') \ graph H' h'" .. + with eq have "(x', h' x') \ graph H h" by (simp only:) + then have "x' \ H" .. + with `x' \ H` show False by contradiction + qed + with g_rep show ?thesis by simp + qed + qed + moreover have "graph H' h' \ M" + -- {* and @{text h'} is norm-preserving. \skp *} + proof (unfold M_def) + show "graph H' h' \ norm_pres_extensions E p F f" + proof (rule norm_pres_extensionI2) + show "linearform H' h'" + using h'_def H'_def HE linearform `x' \ H` `x' \ E` `x' \ 0` E + by (rule h'_lf) + show "H' \ E" + unfolding H'_def + proof + show "H \ E" by fact + show "vectorspace E" by fact + from x'E show "lin x' \ E" .. + qed + from H `F \ H` HH' show FH': "F \ H'" + by (rule vectorspace.subspace_trans) + show "graph F f \ graph H' h'" + proof (rule graph_extI) + fix x assume x: "x \ F" + with graphs have "f x = h x" .. + also have "\ = h x + 0 * xi" by simp + also have "\ = (let (y, a) = (x, 0) in h y + a * xi)" + by (simp add: Let_def) + also have "(x, 0) = + (SOME (y, a). x = y + a \ x' \ y \ H)" + using E HE + proof (rule decomp_H'_H [symmetric]) + from FH x show "x \ H" .. + from x' show "x' \ 0" . + show "x' \ H" by fact + show "x' \ E" by fact + qed + also have + "(let (y, a) = (SOME (y, a). x = y + a \ x' \ y \ H) + in h y + a * xi) = h' x" by (simp only: h'_def) + finally show "f x = h' x" . + next + from FH' show "F \ H'" .. + qed + show "\x \ H'. h' x \ p x" + using h'_def H'_def `x' \ H` `x' \ E` `x' \ 0` E HE + `seminorm E p` linearform and hp xi + by (rule h'_norm_pres) + qed + qed + ultimately show ?thesis .. + qed + then have "\ (\x \ M. g \ x \ g = x)" by simp + -- {* So the graph @{text g} of @{text h} cannot be maximal. Contradiction! \skp *} + with gx show "H = E" by contradiction + qed + + from HE_eq and linearform have "linearform E h" + by (simp only:) + moreover have "\x \ F. h x = f x" + proof + fix x assume "x \ F" + with graphs have "f x = h x" .. + then show "h x = f x" .. + qed + moreover from HE_eq and hp have "\x \ E. h x \ p x" + by (simp only:) + ultimately show ?thesis by blast +qed + + +subsection {* Alternative formulation *} + +text {* + The following alternative formulation of the Hahn-Banach + Theorem\label{abs-HahnBanach} uses the fact that for a real linear + form @{text f} and a seminorm @{text p} the following inequations + are equivalent:\footnote{This was shown in lemma @{thm [source] + abs_ineq_iff} (see page \pageref{abs-ineq-iff}).} + \begin{center} + \begin{tabular}{lll} + @{text "\x \ H. \h x\ \ p x"} & and & + @{text "\x \ H. h x \ p x"} \\ + \end{tabular} + \end{center} +*} + +theorem abs_HahnBanach: + assumes E: "vectorspace E" and FE: "subspace F E" + and lf: "linearform F f" and sn: "seminorm E p" + assumes fp: "\x \ F. \f x\ \ p x" + shows "\g. linearform E g + \ (\x \ F. g x = f x) + \ (\x \ E. \g x\ \ p x)" +proof - + interpret vectorspace E by fact + interpret subspace F E by fact + interpret linearform F f by fact + interpret seminorm E p by fact + have "\g. linearform E g \ (\x \ F. g x = f x) \ (\x \ E. g x \ p x)" + using E FE sn lf + proof (rule HahnBanach) + show "\x \ F. f x \ p x" + using FE E sn lf and fp by (rule abs_ineq_iff [THEN iffD1]) + qed + then obtain g where lg: "linearform E g" and *: "\x \ F. g x = f x" + and **: "\x \ E. g x \ p x" by blast + have "\x \ E. \g x\ \ p x" + using _ E sn lg ** + proof (rule abs_ineq_iff [THEN iffD2]) + show "E \ E" .. + qed + with lg * show ?thesis by blast +qed + + +subsection {* The Hahn-Banach Theorem for normed spaces *} + +text {* + Every continuous linear form @{text f} on a subspace @{text F} of a + norm space @{text E}, can be extended to a continuous linear form + @{text g} on @{text E} such that @{text "\f\ = \g\"}. +*} + +theorem norm_HahnBanach: + fixes V and norm ("\_\") + fixes B defines "\V f. B V f \ {0} \ {\f x\ / \x\ | x. x \ 0 \ x \ V}" + fixes fn_norm ("\_\\_" [0, 1000] 999) + defines "\V f. \f\\V \ \(B V f)" + assumes E_norm: "normed_vectorspace E norm" and FE: "subspace F E" + and linearform: "linearform F f" and "continuous F norm f" + shows "\g. linearform E g + \ continuous E norm g + \ (\x \ F. g x = f x) + \ \g\\E = \f\\F" +proof - + interpret normed_vectorspace E norm by fact + interpret normed_vectorspace_with_fn_norm E norm B fn_norm + by (auto simp: B_def fn_norm_def) intro_locales + interpret subspace F E by fact + interpret linearform F f by fact + interpret continuous F norm f by fact + have E: "vectorspace E" by intro_locales + have F: "vectorspace F" by rule intro_locales + have F_norm: "normed_vectorspace F norm" + using FE E_norm by (rule subspace_normed_vs) + have ge_zero: "0 \ \f\\F" + by (rule normed_vectorspace_with_fn_norm.fn_norm_ge_zero + [OF normed_vectorspace_with_fn_norm.intro, + OF F_norm `continuous F norm f` , folded B_def fn_norm_def]) + txt {* We define a function @{text p} on @{text E} as follows: + @{text "p x = \f\ \ \x\"} *} + def p \ "\x. \f\\F * \x\" + + txt {* @{text p} is a seminorm on @{text E}: *} + have q: "seminorm E p" + proof + fix x y a assume x: "x \ E" and y: "y \ E" + + txt {* @{text p} is positive definite: *} + have "0 \ \f\\F" by (rule ge_zero) + moreover from x have "0 \ \x\" .. + ultimately show "0 \ p x" + by (simp add: p_def zero_le_mult_iff) + + txt {* @{text p} is absolutely homogenous: *} + + show "p (a \ x) = \a\ * p x" + proof - + have "p (a \ x) = \f\\F * \a \ x\" by (simp only: p_def) + also from x have "\a \ x\ = \a\ * \x\" by (rule abs_homogenous) + also have "\f\\F * (\a\ * \x\) = \a\ * (\f\\F * \x\)" by simp + also have "\ = \a\ * p x" by (simp only: p_def) + finally show ?thesis . + qed + + txt {* Furthermore, @{text p} is subadditive: *} + + show "p (x + y) \ p x + p y" + proof - + have "p (x + y) = \f\\F * \x + y\" by (simp only: p_def) + also have a: "0 \ \f\\F" by (rule ge_zero) + from x y have "\x + y\ \ \x\ + \y\" .. + with a have " \f\\F * \x + y\ \ \f\\F * (\x\ + \y\)" + by (simp add: mult_left_mono) + also have "\ = \f\\F * \x\ + \f\\F * \y\" by (simp only: right_distrib) + also have "\ = p x + p y" by (simp only: p_def) + finally show ?thesis . + qed + qed + + txt {* @{text f} is bounded by @{text p}. *} + + have "\x \ F. \f x\ \ p x" + proof + fix x assume "x \ F" + with `continuous F norm f` and linearform + show "\f x\ \ p x" + unfolding p_def by (rule normed_vectorspace_with_fn_norm.fn_norm_le_cong + [OF normed_vectorspace_with_fn_norm.intro, + OF F_norm, folded B_def fn_norm_def]) + qed + + txt {* Using the fact that @{text p} is a seminorm and @{text f} is bounded + by @{text p} we can apply the Hahn-Banach Theorem for real vector + spaces. So @{text f} can be extended in a norm-preserving way to + some function @{text g} on the whole vector space @{text E}. *} + + with E FE linearform q obtain g where + linearformE: "linearform E g" + and a: "\x \ F. g x = f x" + and b: "\x \ E. \g x\ \ p x" + by (rule abs_HahnBanach [elim_format]) iprover + + txt {* We furthermore have to show that @{text g} is also continuous: *} + + have g_cont: "continuous E norm g" using linearformE + proof + fix x assume "x \ E" + with b show "\g x\ \ \f\\F * \x\" + by (simp only: p_def) + qed + + txt {* To complete the proof, we show that @{text "\g\ = \f\"}. *} + + have "\g\\E = \f\\F" + proof (rule order_antisym) + txt {* + First we show @{text "\g\ \ \f\"}. The function norm @{text + "\g\"} is defined as the smallest @{text "c \ \"} such that + \begin{center} + \begin{tabular}{l} + @{text "\x \ E. \g x\ \ c \ \x\"} + \end{tabular} + \end{center} + \noindent Furthermore holds + \begin{center} + \begin{tabular}{l} + @{text "\x \ E. \g x\ \ \f\ \ \x\"} + \end{tabular} + \end{center} + *} + + have "\x \ E. \g x\ \ \f\\F * \x\" + proof + fix x assume "x \ E" + with b show "\g x\ \ \f\\F * \x\" + by (simp only: p_def) + qed + from g_cont this ge_zero + show "\g\\E \ \f\\F" + by (rule fn_norm_least [of g, folded B_def fn_norm_def]) + + txt {* The other direction is achieved by a similar argument. *} + + show "\f\\F \ \g\\E" + proof (rule normed_vectorspace_with_fn_norm.fn_norm_least + [OF normed_vectorspace_with_fn_norm.intro, + OF F_norm, folded B_def fn_norm_def]) + show "\x \ F. \f x\ \ \g\\E * \x\" + proof + fix x assume x: "x \ F" + from a x have "g x = f x" .. + then have "\f x\ = \g x\" by (simp only:) + also from g_cont + have "\ \ \g\\E * \x\" + proof (rule fn_norm_le_cong [of g, folded B_def fn_norm_def]) + from FE x show "x \ E" .. + qed + finally show "\f x\ \ \g\\E * \x\" . + qed + show "0 \ \g\\E" + using g_cont + by (rule fn_norm_ge_zero [of g, folded B_def fn_norm_def]) + show "continuous F norm f" by fact + qed + qed + with linearformE a g_cont show ?thesis by blast +qed + +end diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/HahnBanach/HahnBanachExtLemmas.thy --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/HOL/HahnBanach/HahnBanachExtLemmas.thy Tue Dec 30 11:10:01 2008 +0100 @@ -0,0 +1,280 @@ +(* Title: HOL/Real/HahnBanach/HahnBanachExtLemmas.thy + Author: Gertrud Bauer, TU Munich +*) + +header {* Extending non-maximal functions *} + +theory HahnBanachExtLemmas +imports FunctionNorm +begin + +text {* + In this section the following context is presumed. Let @{text E} be + a real vector space with a seminorm @{text q} on @{text E}. @{text + F} is a subspace of @{text E} and @{text f} a linear function on + @{text F}. We consider a subspace @{text H} of @{text E} that is a + superspace of @{text F} and a linear form @{text h} on @{text + H}. @{text H} is a not equal to @{text E} and @{text "x\<^sub>0"} is + an element in @{text "E - H"}. @{text H} is extended to the direct + sum @{text "H' = H + lin x\<^sub>0"}, so for any @{text "x \ H'"} + the decomposition of @{text "x = y + a \ x"} with @{text "y \ H"} is + unique. @{text h'} is defined on @{text H'} by @{text "h' x = h y + + a \ \"} for a certain @{text \}. + + Subsequently we show some properties of this extension @{text h'} of + @{text h}. + + \medskip This lemma will be used to show the existence of a linear + extension of @{text f} (see page \pageref{ex-xi-use}). It is a + consequence of the completeness of @{text \}. To show + \begin{center} + \begin{tabular}{l} + @{text "\\. \y \ F. a y \ \ \ \ \ b y"} + \end{tabular} + \end{center} + \noindent it suffices to show that + \begin{center} + \begin{tabular}{l} + @{text "\u \ F. \v \ F. a u \ b v"} + \end{tabular} + \end{center} +*} + +lemma ex_xi: + assumes "vectorspace F" + assumes r: "\u v. u \ F \ v \ F \ a u \ b v" + shows "\xi::real. \y \ F. a y \ xi \ xi \ b y" +proof - + interpret vectorspace F by fact + txt {* From the completeness of the reals follows: + The set @{text "S = {a u. u \ F}"} has a supremum, if it is + non-empty and has an upper bound. *} + + let ?S = "{a u | u. u \ F}" + have "\xi. lub ?S xi" + proof (rule real_complete) + have "a 0 \ ?S" by blast + then show "\X. X \ ?S" .. + have "\y \ ?S. y \ b 0" + proof + fix y assume y: "y \ ?S" + then obtain u where u: "u \ F" and y: "y = a u" by blast + from u and zero have "a u \ b 0" by (rule r) + with y show "y \ b 0" by (simp only:) + qed + then show "\u. \y \ ?S. y \ u" .. + qed + then obtain xi where xi: "lub ?S xi" .. + { + fix y assume "y \ F" + then have "a y \ ?S" by blast + with xi have "a y \ xi" by (rule lub.upper) + } moreover { + fix y assume y: "y \ F" + from xi have "xi \ b y" + proof (rule lub.least) + fix au assume "au \ ?S" + then obtain u where u: "u \ F" and au: "au = a u" by blast + from u y have "a u \ b y" by (rule r) + with au show "au \ b y" by (simp only:) + qed + } ultimately show "\xi. \y \ F. a y \ xi \ xi \ b y" by blast +qed + +text {* + \medskip The function @{text h'} is defined as a @{text "h' x = h y + + a \ \"} where @{text "x = y + a \ \"} is a linear extension of + @{text h} to @{text H'}. +*} + +lemma h'_lf: + assumes h'_def: "h' \ \x. let (y, a) = + SOME (y, a). x = y + a \ x0 \ y \ H in h y + a * xi" + and H'_def: "H' \ H + lin x0" + and HE: "H \ E" + assumes "linearform H h" + assumes x0: "x0 \ H" "x0 \ E" "x0 \ 0" + assumes E: "vectorspace E" + shows "linearform H' h'" +proof - + interpret linearform H h by fact + interpret vectorspace E by fact + show ?thesis + proof + note E = `vectorspace E` + have H': "vectorspace H'" + proof (unfold H'_def) + from `x0 \ E` + have "lin x0 \ E" .. + with HE show "vectorspace (H + lin x0)" using E .. + qed + { + fix x1 x2 assume x1: "x1 \ H'" and x2: "x2 \ H'" + show "h' (x1 + x2) = h' x1 + h' x2" + proof - + from H' x1 x2 have "x1 + x2 \ H'" + by (rule vectorspace.add_closed) + with x1 x2 obtain y y1 y2 a a1 a2 where + x1x2: "x1 + x2 = y + a \ x0" and y: "y \ H" + and x1_rep: "x1 = y1 + a1 \ x0" and y1: "y1 \ H" + and x2_rep: "x2 = y2 + a2 \ x0" and y2: "y2 \ H" + unfolding H'_def sum_def lin_def by blast + + have ya: "y1 + y2 = y \ a1 + a2 = a" using E HE _ y x0 + proof (rule decomp_H') txt_raw {* \label{decomp-H-use} *} + from HE y1 y2 show "y1 + y2 \ H" + by (rule subspace.add_closed) + from x0 and HE y y1 y2 + have "x0 \ E" "y \ E" "y1 \ E" "y2 \ E" by auto + with x1_rep x2_rep have "(y1 + y2) + (a1 + a2) \ x0 = x1 + x2" + by (simp add: add_ac add_mult_distrib2) + also note x1x2 + finally show "(y1 + y2) + (a1 + a2) \ x0 = y + a \ x0" . + qed + + from h'_def x1x2 E HE y x0 + have "h' (x1 + x2) = h y + a * xi" + by (rule h'_definite) + also have "\ = h (y1 + y2) + (a1 + a2) * xi" + by (simp only: ya) + also from y1 y2 have "h (y1 + y2) = h y1 + h y2" + by simp + also have "\ + (a1 + a2) * xi = (h y1 + a1 * xi) + (h y2 + a2 * xi)" + by (simp add: left_distrib) + also from h'_def x1_rep E HE y1 x0 + have "h y1 + a1 * xi = h' x1" + by (rule h'_definite [symmetric]) + also from h'_def x2_rep E HE y2 x0 + have "h y2 + a2 * xi = h' x2" + by (rule h'_definite [symmetric]) + finally show ?thesis . + qed + next + fix x1 c assume x1: "x1 \ H'" + show "h' (c \ x1) = c * (h' x1)" + proof - + from H' x1 have ax1: "c \ x1 \ H'" + by (rule vectorspace.mult_closed) + with x1 obtain y a y1 a1 where + cx1_rep: "c \ x1 = y + a \ x0" and y: "y \ H" + and x1_rep: "x1 = y1 + a1 \ x0" and y1: "y1 \ H" + unfolding H'_def sum_def lin_def by blast + + have ya: "c \ y1 = y \ c * a1 = a" using E HE _ y x0 + proof (rule decomp_H') + from HE y1 show "c \ y1 \ H" + by (rule subspace.mult_closed) + from x0 and HE y y1 + have "x0 \ E" "y \ E" "y1 \ E" by auto + with x1_rep have "c \ y1 + (c * a1) \ x0 = c \ x1" + by (simp add: mult_assoc add_mult_distrib1) + also note cx1_rep + finally show "c \ y1 + (c * a1) \ x0 = y + a \ x0" . + qed + + from h'_def cx1_rep E HE y x0 have "h' (c \ x1) = h y + a * xi" + by (rule h'_definite) + also have "\ = h (c \ y1) + (c * a1) * xi" + by (simp only: ya) + also from y1 have "h (c \ y1) = c * h y1" + by simp + also have "\ + (c * a1) * xi = c * (h y1 + a1 * xi)" + by (simp only: right_distrib) + also from h'_def x1_rep E HE y1 x0 have "h y1 + a1 * xi = h' x1" + by (rule h'_definite [symmetric]) + finally show ?thesis . + qed + } + qed +qed + +text {* \medskip The linear extension @{text h'} of @{text h} + is bounded by the seminorm @{text p}. *} + +lemma h'_norm_pres: + assumes h'_def: "h' \ \x. let (y, a) = + SOME (y, a). x = y + a \ x0 \ y \ H in h y + a * xi" + and H'_def: "H' \ H + lin x0" + and x0: "x0 \ H" "x0 \ E" "x0 \ 0" + assumes E: "vectorspace E" and HE: "subspace H E" + and "seminorm E p" and "linearform H h" + assumes a: "\y \ H. h y \ p y" + and a': "\y \ H. - p (y + x0) - h y \ xi \ xi \ p (y + x0) - h y" + shows "\x \ H'. h' x \ p x" +proof - + interpret vectorspace E by fact + interpret subspace H E by fact + interpret seminorm E p by fact + interpret linearform H h by fact + show ?thesis + proof + fix x assume x': "x \ H'" + show "h' x \ p x" + proof - + from a' have a1: "\ya \ H. - p (ya + x0) - h ya \ xi" + and a2: "\ya \ H. xi \ p (ya + x0) - h ya" by auto + from x' obtain y a where + x_rep: "x = y + a \ x0" and y: "y \ H" + unfolding H'_def sum_def lin_def by blast + from y have y': "y \ E" .. + from y have ay: "inverse a \ y \ H" by simp + + from h'_def x_rep E HE y x0 have "h' x = h y + a * xi" + by (rule h'_definite) + also have "\ \ p (y + a \ x0)" + proof (rule linorder_cases) + assume z: "a = 0" + then have "h y + a * xi = h y" by simp + also from a y have "\ \ p y" .. + also from x0 y' z have "p y = p (y + a \ x0)" by simp + finally show ?thesis . + next + txt {* In the case @{text "a < 0"}, we use @{text "a\<^sub>1"} + with @{text ya} taken as @{text "y / a"}: *} + assume lz: "a < 0" then have nz: "a \ 0" by simp + from a1 ay + have "- p (inverse a \ y + x0) - h (inverse a \ y) \ xi" .. + with lz have "a * xi \ + a * (- p (inverse a \ y + x0) - h (inverse a \ y))" + by (simp add: mult_left_mono_neg order_less_imp_le) + + also have "\ = + - a * (p (inverse a \ y + x0)) - a * (h (inverse a \ y))" + by (simp add: right_diff_distrib) + also from lz x0 y' have "- a * (p (inverse a \ y + x0)) = + p (a \ (inverse a \ y + x0))" + by (simp add: abs_homogenous) + also from nz x0 y' have "\ = p (y + a \ x0)" + by (simp add: add_mult_distrib1 mult_assoc [symmetric]) + also from nz y have "a * (h (inverse a \ y)) = h y" + by simp + finally have "a * xi \ p (y + a \ x0) - h y" . + then show ?thesis by simp + next + txt {* In the case @{text "a > 0"}, we use @{text "a\<^sub>2"} + with @{text ya} taken as @{text "y / a"}: *} + assume gz: "0 < a" then have nz: "a \ 0" by simp + from a2 ay + have "xi \ p (inverse a \ y + x0) - h (inverse a \ y)" .. + with gz have "a * xi \ + a * (p (inverse a \ y + x0) - h (inverse a \ y))" + by simp + also have "\ = a * p (inverse a \ y + x0) - a * h (inverse a \ y)" + by (simp add: right_diff_distrib) + also from gz x0 y' + have "a * p (inverse a \ y + x0) = p (a \ (inverse a \ y + x0))" + by (simp add: abs_homogenous) + also from nz x0 y' have "\ = p (y + a \ x0)" + by (simp add: add_mult_distrib1 mult_assoc [symmetric]) + also from nz y have "a * h (inverse a \ y) = h y" + by simp + finally have "a * xi \ p (y + a \ x0) - h y" . + then show ?thesis by simp + qed + also from x_rep have "\ = p x" by (simp only:) + finally show ?thesis . + qed + qed +qed + +end diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/HahnBanach/HahnBanachLemmas.thy --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/HOL/HahnBanach/HahnBanachLemmas.thy Tue Dec 30 11:10:01 2008 +0100 @@ -0,0 +1,4 @@ +(*<*) +theory HahnBanachLemmas imports HahnBanachSupLemmas HahnBanachExtLemmas begin +end +(*>*) \ No newline at end of file diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/HahnBanach/HahnBanachSupLemmas.thy --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/HOL/HahnBanach/HahnBanachSupLemmas.thy Tue Dec 30 11:10:01 2008 +0100 @@ -0,0 +1,446 @@ +(* Title: HOL/Real/HahnBanach/HahnBanachSupLemmas.thy + ID: $Id$ + Author: Gertrud Bauer, TU Munich +*) + +header {* The supremum w.r.t.~the function order *} + +theory HahnBanachSupLemmas +imports FunctionNorm ZornLemma +begin + +text {* + This section contains some lemmas that will be used in the proof of + the Hahn-Banach Theorem. In this section the following context is + presumed. Let @{text E} be a real vector space with a seminorm + @{text p} on @{text E}. @{text F} is a subspace of @{text E} and + @{text f} a linear form on @{text F}. We consider a chain @{text c} + of norm-preserving extensions of @{text f}, such that @{text "\c = + graph H h"}. We will show some properties about the limit function + @{text h}, i.e.\ the supremum of the chain @{text c}. + + \medskip Let @{text c} be a chain of norm-preserving extensions of + the function @{text f} and let @{text "graph H h"} be the supremum + of @{text c}. Every element in @{text H} is member of one of the + elements of the chain. +*} +lemmas [dest?] = chainD +lemmas chainE2 [elim?] = chainD2 [elim_format, standard] + +lemma some_H'h't: + assumes M: "M = norm_pres_extensions E p F f" + and cM: "c \ chain M" + and u: "graph H h = \c" + and x: "x \ H" + shows "\H' h'. graph H' h' \ c + \ (x, h x) \ graph H' h' + \ linearform H' h' \ H' \ E + \ F \ H' \ graph F f \ graph H' h' + \ (\x \ H'. h' x \ p x)" +proof - + from x have "(x, h x) \ graph H h" .. + also from u have "\ = \c" . + finally obtain g where gc: "g \ c" and gh: "(x, h x) \ g" by blast + + from cM have "c \ M" .. + with gc have "g \ M" .. + also from M have "\ = norm_pres_extensions E p F f" . + finally obtain H' and h' where g: "g = graph H' h'" + and * : "linearform H' h'" "H' \ E" "F \ H'" + "graph F f \ graph H' h'" "\x \ H'. h' x \ p x" .. + + from gc and g have "graph H' h' \ c" by (simp only:) + moreover from gh and g have "(x, h x) \ graph H' h'" by (simp only:) + ultimately show ?thesis using * by blast +qed + +text {* + \medskip Let @{text c} be a chain of norm-preserving extensions of + the function @{text f} and let @{text "graph H h"} be the supremum + of @{text c}. Every element in the domain @{text H} of the supremum + function is member of the domain @{text H'} of some function @{text + h'}, such that @{text h} extends @{text h'}. +*} + +lemma some_H'h': + assumes M: "M = norm_pres_extensions E p F f" + and cM: "c \ chain M" + and u: "graph H h = \c" + and x: "x \ H" + shows "\H' h'. x \ H' \ graph H' h' \ graph H h + \ linearform H' h' \ H' \ E \ F \ H' + \ graph F f \ graph H' h' \ (\x \ H'. h' x \ p x)" +proof - + from M cM u x obtain H' h' where + x_hx: "(x, h x) \ graph H' h'" + and c: "graph H' h' \ c" + and * : "linearform H' h'" "H' \ E" "F \ H'" + "graph F f \ graph H' h'" "\x \ H'. h' x \ p x" + by (rule some_H'h't [elim_format]) blast + from x_hx have "x \ H'" .. + moreover from cM u c have "graph H' h' \ graph H h" + by (simp only: chain_ball_Union_upper) + ultimately show ?thesis using * by blast +qed + +text {* + \medskip Any two elements @{text x} and @{text y} in the domain + @{text H} of the supremum function @{text h} are both in the domain + @{text H'} of some function @{text h'}, such that @{text h} extends + @{text h'}. +*} + +lemma some_H'h'2: + assumes M: "M = norm_pres_extensions E p F f" + and cM: "c \ chain M" + and u: "graph H h = \c" + and x: "x \ H" + and y: "y \ H" + shows "\H' h'. x \ H' \ y \ H' + \ graph H' h' \ graph H h + \ linearform H' h' \ H' \ E \ F \ H' + \ graph F f \ graph H' h' \ (\x \ H'. h' x \ p x)" +proof - + txt {* @{text y} is in the domain @{text H''} of some function @{text h''}, + such that @{text h} extends @{text h''}. *} + + from M cM u and y obtain H' h' where + y_hy: "(y, h y) \ graph H' h'" + and c': "graph H' h' \ c" + and * : + "linearform H' h'" "H' \ E" "F \ H'" + "graph F f \ graph H' h'" "\x \ H'. h' x \ p x" + by (rule some_H'h't [elim_format]) blast + + txt {* @{text x} is in the domain @{text H'} of some function @{text h'}, + such that @{text h} extends @{text h'}. *} + + from M cM u and x obtain H'' h'' where + x_hx: "(x, h x) \ graph H'' h''" + and c'': "graph H'' h'' \ c" + and ** : + "linearform H'' h''" "H'' \ E" "F \ H''" + "graph F f \ graph H'' h''" "\x \ H''. h'' x \ p x" + by (rule some_H'h't [elim_format]) blast + + txt {* Since both @{text h'} and @{text h''} are elements of the chain, + @{text h''} is an extension of @{text h'} or vice versa. Thus both + @{text x} and @{text y} are contained in the greater + one. \label{cases1}*} + + from cM c'' c' have "graph H'' h'' \ graph H' h' \ graph H' h' \ graph H'' h''" + (is "?case1 \ ?case2") .. + then show ?thesis + proof + assume ?case1 + have "(x, h x) \ graph H'' h''" by fact + also have "\ \ graph H' h'" by fact + finally have xh:"(x, h x) \ graph H' h'" . + then have "x \ H'" .. + moreover from y_hy have "y \ H'" .. + moreover from cM u and c' have "graph H' h' \ graph H h" + by (simp only: chain_ball_Union_upper) + ultimately show ?thesis using * by blast + next + assume ?case2 + from x_hx have "x \ H''" .. + moreover { + have "(y, h y) \ graph H' h'" by (rule y_hy) + also have "\ \ graph H'' h''" by fact + finally have "(y, h y) \ graph H'' h''" . + } then have "y \ H''" .. + moreover from cM u and c'' have "graph H'' h'' \ graph H h" + by (simp only: chain_ball_Union_upper) + ultimately show ?thesis using ** by blast + qed +qed + +text {* + \medskip The relation induced by the graph of the supremum of a + chain @{text c} is definite, i.~e.~t is the graph of a function. +*} + +lemma sup_definite: + assumes M_def: "M \ norm_pres_extensions E p F f" + and cM: "c \ chain M" + and xy: "(x, y) \ \c" + and xz: "(x, z) \ \c" + shows "z = y" +proof - + from cM have c: "c \ M" .. + from xy obtain G1 where xy': "(x, y) \ G1" and G1: "G1 \ c" .. + from xz obtain G2 where xz': "(x, z) \ G2" and G2: "G2 \ c" .. + + from G1 c have "G1 \ M" .. + then obtain H1 h1 where G1_rep: "G1 = graph H1 h1" + unfolding M_def by blast + + from G2 c have "G2 \ M" .. + then obtain H2 h2 where G2_rep: "G2 = graph H2 h2" + unfolding M_def by blast + + txt {* @{text "G\<^sub>1"} is contained in @{text "G\<^sub>2"} + or vice versa, since both @{text "G\<^sub>1"} and @{text + "G\<^sub>2"} are members of @{text c}. \label{cases2}*} + + from cM G1 G2 have "G1 \ G2 \ G2 \ G1" (is "?case1 \ ?case2") .. + then show ?thesis + proof + assume ?case1 + with xy' G2_rep have "(x, y) \ graph H2 h2" by blast + then have "y = h2 x" .. + also + from xz' G2_rep have "(x, z) \ graph H2 h2" by (simp only:) + then have "z = h2 x" .. + finally show ?thesis . + next + assume ?case2 + with xz' G1_rep have "(x, z) \ graph H1 h1" by blast + then have "z = h1 x" .. + also + from xy' G1_rep have "(x, y) \ graph H1 h1" by (simp only:) + then have "y = h1 x" .. + finally show ?thesis .. + qed +qed + +text {* + \medskip The limit function @{text h} is linear. Every element + @{text x} in the domain of @{text h} is in the domain of a function + @{text h'} in the chain of norm preserving extensions. Furthermore, + @{text h} is an extension of @{text h'} so the function values of + @{text x} are identical for @{text h'} and @{text h}. Finally, the + function @{text h'} is linear by construction of @{text M}. +*} + +lemma sup_lf: + assumes M: "M = norm_pres_extensions E p F f" + and cM: "c \ chain M" + and u: "graph H h = \c" + shows "linearform H h" +proof + fix x y assume x: "x \ H" and y: "y \ H" + with M cM u obtain H' h' where + x': "x \ H'" and y': "y \ H'" + and b: "graph H' h' \ graph H h" + and linearform: "linearform H' h'" + and subspace: "H' \ E" + by (rule some_H'h'2 [elim_format]) blast + + show "h (x + y) = h x + h y" + proof - + from linearform x' y' have "h' (x + y) = h' x + h' y" + by (rule linearform.add) + also from b x' have "h' x = h x" .. + also from b y' have "h' y = h y" .. + also from subspace x' y' have "x + y \ H'" + by (rule subspace.add_closed) + with b have "h' (x + y) = h (x + y)" .. + finally show ?thesis . + qed +next + fix x a assume x: "x \ H" + with M cM u obtain H' h' where + x': "x \ H'" + and b: "graph H' h' \ graph H h" + and linearform: "linearform H' h'" + and subspace: "H' \ E" + by (rule some_H'h' [elim_format]) blast + + show "h (a \ x) = a * h x" + proof - + from linearform x' have "h' (a \ x) = a * h' x" + by (rule linearform.mult) + also from b x' have "h' x = h x" .. + also from subspace x' have "a \ x \ H'" + by (rule subspace.mult_closed) + with b have "h' (a \ x) = h (a \ x)" .. + finally show ?thesis . + qed +qed + +text {* + \medskip The limit of a non-empty chain of norm preserving + extensions of @{text f} is an extension of @{text f}, since every + element of the chain is an extension of @{text f} and the supremum + is an extension for every element of the chain. +*} + +lemma sup_ext: + assumes graph: "graph H h = \c" + and M: "M = norm_pres_extensions E p F f" + and cM: "c \ chain M" + and ex: "\x. x \ c" + shows "graph F f \ graph H h" +proof - + from ex obtain x where xc: "x \ c" .. + from cM have "c \ M" .. + with xc have "x \ M" .. + with M have "x \ norm_pres_extensions E p F f" + by (simp only:) + then obtain G g where "x = graph G g" and "graph F f \ graph G g" .. + then have "graph F f \ x" by (simp only:) + also from xc have "\ \ \c" by blast + also from graph have "\ = graph H h" .. + finally show ?thesis . +qed + +text {* + \medskip The domain @{text H} of the limit function is a superspace + of @{text F}, since @{text F} is a subset of @{text H}. The + existence of the @{text 0} element in @{text F} and the closure + properties follow from the fact that @{text F} is a vector space. +*} + +lemma sup_supF: + assumes graph: "graph H h = \c" + and M: "M = norm_pres_extensions E p F f" + and cM: "c \ chain M" + and ex: "\x. x \ c" + and FE: "F \ E" + shows "F \ H" +proof + from FE show "F \ {}" by (rule subspace.non_empty) + from graph M cM ex have "graph F f \ graph H h" by (rule sup_ext) + then show "F \ H" .. + fix x y assume "x \ F" and "y \ F" + with FE show "x + y \ F" by (rule subspace.add_closed) +next + fix x a assume "x \ F" + with FE show "a \ x \ F" by (rule subspace.mult_closed) +qed + +text {* + \medskip The domain @{text H} of the limit function is a subspace of + @{text E}. +*} + +lemma sup_subE: + assumes graph: "graph H h = \c" + and M: "M = norm_pres_extensions E p F f" + and cM: "c \ chain M" + and ex: "\x. x \ c" + and FE: "F \ E" + and E: "vectorspace E" + shows "H \ E" +proof + show "H \ {}" + proof - + from FE E have "0 \ F" by (rule subspace.zero) + also from graph M cM ex FE have "F \ H" by (rule sup_supF) + then have "F \ H" .. + finally show ?thesis by blast + qed + show "H \ E" + proof + fix x assume "x \ H" + with M cM graph + obtain H' h' where x: "x \ H'" and H'E: "H' \ E" + by (rule some_H'h' [elim_format]) blast + from H'E have "H' \ E" .. + with x show "x \ E" .. + qed + fix x y assume x: "x \ H" and y: "y \ H" + show "x + y \ H" + proof - + from M cM graph x y obtain H' h' where + x': "x \ H'" and y': "y \ H'" and H'E: "H' \ E" + and graphs: "graph H' h' \ graph H h" + by (rule some_H'h'2 [elim_format]) blast + from H'E x' y' have "x + y \ H'" + by (rule subspace.add_closed) + also from graphs have "H' \ H" .. + finally show ?thesis . + qed +next + fix x a assume x: "x \ H" + show "a \ x \ H" + proof - + from M cM graph x + obtain H' h' where x': "x \ H'" and H'E: "H' \ E" + and graphs: "graph H' h' \ graph H h" + by (rule some_H'h' [elim_format]) blast + from H'E x' have "a \ x \ H'" by (rule subspace.mult_closed) + also from graphs have "H' \ H" .. + finally show ?thesis . + qed +qed + +text {* + \medskip The limit function is bounded by the norm @{text p} as + well, since all elements in the chain are bounded by @{text p}. +*} + +lemma sup_norm_pres: + assumes graph: "graph H h = \c" + and M: "M = norm_pres_extensions E p F f" + and cM: "c \ chain M" + shows "\x \ H. h x \ p x" +proof + fix x assume "x \ H" + with M cM graph obtain H' h' where x': "x \ H'" + and graphs: "graph H' h' \ graph H h" + and a: "\x \ H'. h' x \ p x" + by (rule some_H'h' [elim_format]) blast + from graphs x' have [symmetric]: "h' x = h x" .. + also from a x' have "h' x \ p x " .. + finally show "h x \ p x" . +qed + +text {* + \medskip The following lemma is a property of linear forms on real + vector spaces. It will be used for the lemma @{text abs_HahnBanach} + (see page \pageref{abs-HahnBanach}). \label{abs-ineq-iff} For real + vector spaces the following inequations are equivalent: + \begin{center} + \begin{tabular}{lll} + @{text "\x \ H. \h x\ \ p x"} & and & + @{text "\x \ H. h x \ p x"} \\ + \end{tabular} + \end{center} +*} + +lemma abs_ineq_iff: + assumes "subspace H E" and "vectorspace E" and "seminorm E p" + and "linearform H h" + shows "(\x \ H. \h x\ \ p x) = (\x \ H. h x \ p x)" (is "?L = ?R") +proof + interpret subspace H E by fact + interpret vectorspace E by fact + interpret seminorm E p by fact + interpret linearform H h by fact + have H: "vectorspace H" using `vectorspace E` .. + { + assume l: ?L + show ?R + proof + fix x assume x: "x \ H" + have "h x \ \h x\" by arith + also from l x have "\ \ p x" .. + finally show "h x \ p x" . + qed + next + assume r: ?R + show ?L + proof + fix x assume x: "x \ H" + show "\a b :: real. - a \ b \ b \ a \ \b\ \ a" + by arith + from `linearform H h` and H x + have "- h x = h (- x)" by (rule linearform.neg [symmetric]) + also + from H x have "- x \ H" by (rule vectorspace.neg_closed) + with r have "h (- x) \ p (- x)" .. + also have "\ = p x" + using `seminorm E p` `vectorspace E` + proof (rule seminorm.minus) + from x show "x \ E" .. + qed + finally have "- h x \ p x" . + then show "- p x \ h x" by simp + from r x show "h x \ p x" .. + qed + } +qed + +end diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/HahnBanach/Linearform.thy --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/HOL/HahnBanach/Linearform.thy Tue Dec 30 11:10:01 2008 +0100 @@ -0,0 +1,60 @@ +(* Title: HOL/Real/HahnBanach/Linearform.thy + Author: Gertrud Bauer, TU Munich +*) + +header {* Linearforms *} + +theory Linearform +imports VectorSpace +begin + +text {* + A \emph{linear form} is a function on a vector space into the reals + that is additive and multiplicative. +*} + +locale linearform = + fixes V :: "'a\{minus, plus, zero, uminus} set" and f + assumes add [iff]: "x \ V \ y \ V \ f (x + y) = f x + f y" + and mult [iff]: "x \ V \ f (a \ x) = a * f x" + +declare linearform.intro [intro?] + +lemma (in linearform) neg [iff]: + assumes "vectorspace V" + shows "x \ V \ f (- x) = - f x" +proof - + interpret vectorspace V by fact + assume x: "x \ V" + then have "f (- x) = f ((- 1) \ x)" by (simp add: negate_eq1) + also from x have "\ = (- 1) * (f x)" by (rule mult) + also from x have "\ = - (f x)" by simp + finally show ?thesis . +qed + +lemma (in linearform) diff [iff]: + assumes "vectorspace V" + shows "x \ V \ y \ V \ f (x - y) = f x - f y" +proof - + interpret vectorspace V by fact + assume x: "x \ V" and y: "y \ V" + then have "x - y = x + - y" by (rule diff_eq1) + also have "f \ = f x + f (- y)" by (rule add) (simp_all add: x y) + also have "f (- y) = - f y" using `vectorspace V` y by (rule neg) + finally show ?thesis by simp +qed + +text {* Every linear form yields @{text 0} for the @{text 0} vector. *} + +lemma (in linearform) zero [iff]: + assumes "vectorspace V" + shows "f 0 = 0" +proof - + interpret vectorspace V by fact + have "f 0 = f (0 - 0)" by simp + also have "\ = f 0 - f 0" using `vectorspace V` by (rule diff) simp_all + also have "\ = 0" by simp + finally show ?thesis . +qed + +end diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/HahnBanach/NormedSpace.thy --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/HOL/HahnBanach/NormedSpace.thy Tue Dec 30 11:10:01 2008 +0100 @@ -0,0 +1,117 @@ +(* Title: HOL/Real/HahnBanach/NormedSpace.thy + Author: Gertrud Bauer, TU Munich +*) + +header {* Normed vector spaces *} + +theory NormedSpace +imports Subspace +begin + +subsection {* Quasinorms *} + +text {* + A \emph{seminorm} @{text "\\\"} is a function on a real vector space + into the reals that has the following properties: it is positive + definite, absolute homogenous and subadditive. +*} + +locale norm_syntax = + fixes norm :: "'a \ real" ("\_\") + +locale seminorm = var_V + norm_syntax + + constrains V :: "'a\{minus, plus, zero, uminus} set" + assumes ge_zero [iff?]: "x \ V \ 0 \ \x\" + and abs_homogenous [iff?]: "x \ V \ \a \ x\ = \a\ * \x\" + and subadditive [iff?]: "x \ V \ y \ V \ \x + y\ \ \x\ + \y\" + +declare seminorm.intro [intro?] + +lemma (in seminorm) diff_subadditive: + assumes "vectorspace V" + shows "x \ V \ y \ V \ \x - y\ \ \x\ + \y\" +proof - + interpret vectorspace V by fact + assume x: "x \ V" and y: "y \ V" + then have "x - y = x + - 1 \ y" + by (simp add: diff_eq2 negate_eq2a) + also from x y have "\\\ \ \x\ + \- 1 \ y\" + by (simp add: subadditive) + also from y have "\- 1 \ y\ = \- 1\ * \y\" + by (rule abs_homogenous) + also have "\ = \y\" by simp + finally show ?thesis . +qed + +lemma (in seminorm) minus: + assumes "vectorspace V" + shows "x \ V \ \- x\ = \x\" +proof - + interpret vectorspace V by fact + assume x: "x \ V" + then have "- x = - 1 \ x" by (simp only: negate_eq1) + also from x have "\\\ = \- 1\ * \x\" + by (rule abs_homogenous) + also have "\ = \x\" by simp + finally show ?thesis . +qed + + +subsection {* Norms *} + +text {* + A \emph{norm} @{text "\\\"} is a seminorm that maps only the + @{text 0} vector to @{text 0}. +*} + +locale norm = seminorm + + assumes zero_iff [iff]: "x \ V \ (\x\ = 0) = (x = 0)" + + +subsection {* Normed vector spaces *} + +text {* + A vector space together with a norm is called a \emph{normed + space}. +*} + +locale normed_vectorspace = vectorspace + norm + +declare normed_vectorspace.intro [intro?] + +lemma (in normed_vectorspace) gt_zero [intro?]: + "x \ V \ x \ 0 \ 0 < \x\" +proof - + assume x: "x \ V" and neq: "x \ 0" + from x have "0 \ \x\" .. + also have [symmetric]: "\ \ 0" + proof + assume "\x\ = 0" + with x have "x = 0" by simp + with neq show False by contradiction + qed + finally show ?thesis . +qed + +text {* + Any subspace of a normed vector space is again a normed vectorspace. +*} + +lemma subspace_normed_vs [intro?]: + fixes F E norm + assumes "subspace F E" "normed_vectorspace E norm" + shows "normed_vectorspace F norm" +proof - + interpret subspace F E by fact + interpret normed_vectorspace E norm by fact + show ?thesis + proof + show "vectorspace F" by (rule vectorspace) unfold_locales + next + have "NormedSpace.norm E norm" .. + with subset show "NormedSpace.norm F norm" + by (simp add: norm_def seminorm_def norm_axioms_def) + qed +qed + +end diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/HahnBanach/README.html --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/HOL/HahnBanach/README.html Tue Dec 30 11:10:01 2008 +0100 @@ -0,0 +1,38 @@ + + + + + + + + + HOL/Real/HahnBanach/README + + + + +

The Hahn-Banach Theorem for Real Vector Spaces (Isabelle/Isar)

+ +Author: Gertrud Bauer, Technische Universität München

+ +This directory contains the proof of the Hahn-Banach theorem for real vectorspaces, +following H. Heuser, Funktionalanalysis, p. 228 -232. +The Hahn-Banach theorem is one of the fundamental theorems of functioal analysis. +It is a conclusion of Zorn's lemma.

+ +Two different formaulations of the theorem are presented, one for general real vectorspaces +and its application to normed vectorspaces.

+ +The theorem says, that every continous linearform, defined on arbitrary subspaces +(not only one-dimensional subspaces), can be extended to a continous linearform on +the whole vectorspace. + + +


+ +
+bauerg@in.tum.de +
+ + + diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/HahnBanach/ROOT.ML --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/HOL/HahnBanach/ROOT.ML Tue Dec 30 11:10:01 2008 +0100 @@ -0,0 +1,8 @@ +(* Title: HOL/Real/HahnBanach/ROOT.ML + ID: $Id$ + Author: Gertrud Bauer, TU Munich + +The Hahn-Banach theorem for real vector spaces (Isabelle/Isar). +*) + +time_use_thy "HahnBanach"; diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/HahnBanach/Subspace.thy --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/HOL/HahnBanach/Subspace.thy Tue Dec 30 11:10:01 2008 +0100 @@ -0,0 +1,513 @@ +(* Title: HOL/Real/HahnBanach/Subspace.thy + Author: Gertrud Bauer, TU Munich +*) + +header {* Subspaces *} + +theory Subspace +imports VectorSpace +begin + +subsection {* Definition *} + +text {* + A non-empty subset @{text U} of a vector space @{text V} is a + \emph{subspace} of @{text V}, iff @{text U} is closed under addition + and scalar multiplication. +*} + +locale subspace = + fixes U :: "'a\{minus, plus, zero, uminus} set" and V + assumes non_empty [iff, intro]: "U \ {}" + and subset [iff]: "U \ V" + and add_closed [iff]: "x \ U \ y \ U \ x + y \ U" + and mult_closed [iff]: "x \ U \ a \ x \ U" + +notation (symbols) + subspace (infix "\" 50) + +declare vectorspace.intro [intro?] subspace.intro [intro?] + +lemma subspace_subset [elim]: "U \ V \ U \ V" + by (rule subspace.subset) + +lemma (in subspace) subsetD [iff]: "x \ U \ x \ V" + using subset by blast + +lemma subspaceD [elim]: "U \ V \ x \ U \ x \ V" + by (rule subspace.subsetD) + +lemma rev_subspaceD [elim?]: "x \ U \ U \ V \ x \ V" + by (rule subspace.subsetD) + +lemma (in subspace) diff_closed [iff]: + assumes "vectorspace V" + assumes x: "x \ U" and y: "y \ U" + shows "x - y \ U" +proof - + interpret vectorspace V by fact + from x y show ?thesis by (simp add: diff_eq1 negate_eq1) +qed + +text {* + \medskip Similar as for linear spaces, the existence of the zero + element in every subspace follows from the non-emptiness of the + carrier set and by vector space laws. +*} + +lemma (in subspace) zero [intro]: + assumes "vectorspace V" + shows "0 \ U" +proof - + interpret V!: vectorspace V by fact + have "U \ {}" by (rule non_empty) + then obtain x where x: "x \ U" by blast + then have "x \ V" .. then have "0 = x - x" by simp + also from `vectorspace V` x x have "\ \ U" by (rule diff_closed) + finally show ?thesis . +qed + +lemma (in subspace) neg_closed [iff]: + assumes "vectorspace V" + assumes x: "x \ U" + shows "- x \ U" +proof - + interpret vectorspace V by fact + from x show ?thesis by (simp add: negate_eq1) +qed + +text {* \medskip Further derived laws: every subspace is a vector space. *} + +lemma (in subspace) vectorspace [iff]: + assumes "vectorspace V" + shows "vectorspace U" +proof - + interpret vectorspace V by fact + show ?thesis + proof + show "U \ {}" .. + fix x y z assume x: "x \ U" and y: "y \ U" and z: "z \ U" + fix a b :: real + from x y show "x + y \ U" by simp + from x show "a \ x \ U" by simp + from x y z show "(x + y) + z = x + (y + z)" by (simp add: add_ac) + from x y show "x + y = y + x" by (simp add: add_ac) + from x show "x - x = 0" by simp + from x show "0 + x = x" by simp + from x y show "a \ (x + y) = a \ x + a \ y" by (simp add: distrib) + from x show "(a + b) \ x = a \ x + b \ x" by (simp add: distrib) + from x show "(a * b) \ x = a \ b \ x" by (simp add: mult_assoc) + from x show "1 \ x = x" by simp + from x show "- x = - 1 \ x" by (simp add: negate_eq1) + from x y show "x - y = x + - y" by (simp add: diff_eq1) + qed +qed + + +text {* The subspace relation is reflexive. *} + +lemma (in vectorspace) subspace_refl [intro]: "V \ V" +proof + show "V \ {}" .. + show "V \ V" .. + fix x y assume x: "x \ V" and y: "y \ V" + fix a :: real + from x y show "x + y \ V" by simp + from x show "a \ x \ V" by simp +qed + +text {* The subspace relation is transitive. *} + +lemma (in vectorspace) subspace_trans [trans]: + "U \ V \ V \ W \ U \ W" +proof + assume uv: "U \ V" and vw: "V \ W" + from uv show "U \ {}" by (rule subspace.non_empty) + show "U \ W" + proof - + from uv have "U \ V" by (rule subspace.subset) + also from vw have "V \ W" by (rule subspace.subset) + finally show ?thesis . + qed + fix x y assume x: "x \ U" and y: "y \ U" + from uv and x y show "x + y \ U" by (rule subspace.add_closed) + from uv and x show "\a. a \ x \ U" by (rule subspace.mult_closed) +qed + + +subsection {* Linear closure *} + +text {* + The \emph{linear closure} of a vector @{text x} is the set of all + scalar multiples of @{text x}. +*} + +definition + lin :: "('a::{minus, plus, zero}) \ 'a set" where + "lin x = {a \ x | a. True}" + +lemma linI [intro]: "y = a \ x \ y \ lin x" + unfolding lin_def by blast + +lemma linI' [iff]: "a \ x \ lin x" + unfolding lin_def by blast + +lemma linE [elim]: "x \ lin v \ (\a::real. x = a \ v \ C) \ C" + unfolding lin_def by blast + + +text {* Every vector is contained in its linear closure. *} + +lemma (in vectorspace) x_lin_x [iff]: "x \ V \ x \ lin x" +proof - + assume "x \ V" + then have "x = 1 \ x" by simp + also have "\ \ lin x" .. + finally show ?thesis . +qed + +lemma (in vectorspace) "0_lin_x" [iff]: "x \ V \ 0 \ lin x" +proof + assume "x \ V" + then show "0 = 0 \ x" by simp +qed + +text {* Any linear closure is a subspace. *} + +lemma (in vectorspace) lin_subspace [intro]: + "x \ V \ lin x \ V" +proof + assume x: "x \ V" + then show "lin x \ {}" by (auto simp add: x_lin_x) + show "lin x \ V" + proof + fix x' assume "x' \ lin x" + then obtain a where "x' = a \ x" .. + with x show "x' \ V" by simp + qed + fix x' x'' assume x': "x' \ lin x" and x'': "x'' \ lin x" + show "x' + x'' \ lin x" + proof - + from x' obtain a' where "x' = a' \ x" .. + moreover from x'' obtain a'' where "x'' = a'' \ x" .. + ultimately have "x' + x'' = (a' + a'') \ x" + using x by (simp add: distrib) + also have "\ \ lin x" .. + finally show ?thesis . + qed + fix a :: real + show "a \ x' \ lin x" + proof - + from x' obtain a' where "x' = a' \ x" .. + with x have "a \ x' = (a * a') \ x" by (simp add: mult_assoc) + also have "\ \ lin x" .. + finally show ?thesis . + qed +qed + + +text {* Any linear closure is a vector space. *} + +lemma (in vectorspace) lin_vectorspace [intro]: + assumes "x \ V" + shows "vectorspace (lin x)" +proof - + from `x \ V` have "subspace (lin x) V" + by (rule lin_subspace) + from this and vectorspace_axioms show ?thesis + by (rule subspace.vectorspace) +qed + + +subsection {* Sum of two vectorspaces *} + +text {* + The \emph{sum} of two vectorspaces @{text U} and @{text V} is the + set of all sums of elements from @{text U} and @{text V}. +*} + +instantiation "fun" :: (type, type) plus +begin + +definition + sum_def: "plus_fun U V = {u + v | u v. u \ U \ v \ V}" (* FIXME not fully general!? *) + +instance .. + +end + +lemma sumE [elim]: + "x \ U + V \ (\u v. x = u + v \ u \ U \ v \ V \ C) \ C" + unfolding sum_def by blast + +lemma sumI [intro]: + "u \ U \ v \ V \ x = u + v \ x \ U + V" + unfolding sum_def by blast + +lemma sumI' [intro]: + "u \ U \ v \ V \ u + v \ U + V" + unfolding sum_def by blast + +text {* @{text U} is a subspace of @{text "U + V"}. *} + +lemma subspace_sum1 [iff]: + assumes "vectorspace U" "vectorspace V" + shows "U \ U + V" +proof - + interpret vectorspace U by fact + interpret vectorspace V by fact + show ?thesis + proof + show "U \ {}" .. + show "U \ U + V" + proof + fix x assume x: "x \ U" + moreover have "0 \ V" .. + ultimately have "x + 0 \ U + V" .. + with x show "x \ U + V" by simp + qed + fix x y assume x: "x \ U" and "y \ U" + then show "x + y \ U" by simp + from x show "\a. a \ x \ U" by simp + qed +qed + +text {* The sum of two subspaces is again a subspace. *} + +lemma sum_subspace [intro?]: + assumes "subspace U E" "vectorspace E" "subspace V E" + shows "U + V \ E" +proof - + interpret subspace U E by fact + interpret vectorspace E by fact + interpret subspace V E by fact + show ?thesis + proof + have "0 \ U + V" + proof + show "0 \ U" using `vectorspace E` .. + show "0 \ V" using `vectorspace E` .. + show "(0::'a) = 0 + 0" by simp + qed + then show "U + V \ {}" by blast + show "U + V \ E" + proof + fix x assume "x \ U + V" + then obtain u v where "x = u + v" and + "u \ U" and "v \ V" .. + then show "x \ E" by simp + qed + fix x y assume x: "x \ U + V" and y: "y \ U + V" + show "x + y \ U + V" + proof - + from x obtain ux vx where "x = ux + vx" and "ux \ U" and "vx \ V" .. + moreover + from y obtain uy vy where "y = uy + vy" and "uy \ U" and "vy \ V" .. + ultimately + have "ux + uy \ U" + and "vx + vy \ V" + and "x + y = (ux + uy) + (vx + vy)" + using x y by (simp_all add: add_ac) + then show ?thesis .. + qed + fix a show "a \ x \ U + V" + proof - + from x obtain u v where "x = u + v" and "u \ U" and "v \ V" .. + then have "a \ u \ U" and "a \ v \ V" + and "a \ x = (a \ u) + (a \ v)" by (simp_all add: distrib) + then show ?thesis .. + qed + qed +qed + +text{* The sum of two subspaces is a vectorspace. *} + +lemma sum_vs [intro?]: + "U \ E \ V \ E \ vectorspace E \ vectorspace (U + V)" + by (rule subspace.vectorspace) (rule sum_subspace) + + +subsection {* Direct sums *} + +text {* + The sum of @{text U} and @{text V} is called \emph{direct}, iff the + zero element is the only common element of @{text U} and @{text + V}. For every element @{text x} of the direct sum of @{text U} and + @{text V} the decomposition in @{text "x = u + v"} with + @{text "u \ U"} and @{text "v \ V"} is unique. +*} + +lemma decomp: + assumes "vectorspace E" "subspace U E" "subspace V E" + assumes direct: "U \ V = {0}" + and u1: "u1 \ U" and u2: "u2 \ U" + and v1: "v1 \ V" and v2: "v2 \ V" + and sum: "u1 + v1 = u2 + v2" + shows "u1 = u2 \ v1 = v2" +proof - + interpret vectorspace E by fact + interpret subspace U E by fact + interpret subspace V E by fact + show ?thesis + proof + have U: "vectorspace U" (* FIXME: use interpret *) + using `subspace U E` `vectorspace E` by (rule subspace.vectorspace) + have V: "vectorspace V" + using `subspace V E` `vectorspace E` by (rule subspace.vectorspace) + from u1 u2 v1 v2 and sum have eq: "u1 - u2 = v2 - v1" + by (simp add: add_diff_swap) + from u1 u2 have u: "u1 - u2 \ U" + by (rule vectorspace.diff_closed [OF U]) + with eq have v': "v2 - v1 \ U" by (simp only:) + from v2 v1 have v: "v2 - v1 \ V" + by (rule vectorspace.diff_closed [OF V]) + with eq have u': " u1 - u2 \ V" by (simp only:) + + show "u1 = u2" + proof (rule add_minus_eq) + from u1 show "u1 \ E" .. + from u2 show "u2 \ E" .. + from u u' and direct show "u1 - u2 = 0" by blast + qed + show "v1 = v2" + proof (rule add_minus_eq [symmetric]) + from v1 show "v1 \ E" .. + from v2 show "v2 \ E" .. + from v v' and direct show "v2 - v1 = 0" by blast + qed + qed +qed + +text {* + An application of the previous lemma will be used in the proof of + the Hahn-Banach Theorem (see page \pageref{decomp-H-use}): for any + element @{text "y + a \ x\<^sub>0"} of the direct sum of a + vectorspace @{text H} and the linear closure of @{text "x\<^sub>0"} + the components @{text "y \ H"} and @{text a} are uniquely + determined. +*} + +lemma decomp_H': + assumes "vectorspace E" "subspace H E" + assumes y1: "y1 \ H" and y2: "y2 \ H" + and x': "x' \ H" "x' \ E" "x' \ 0" + and eq: "y1 + a1 \ x' = y2 + a2 \ x'" + shows "y1 = y2 \ a1 = a2" +proof - + interpret vectorspace E by fact + interpret subspace H E by fact + show ?thesis + proof + have c: "y1 = y2 \ a1 \ x' = a2 \ x'" + proof (rule decomp) + show "a1 \ x' \ lin x'" .. + show "a2 \ x' \ lin x'" .. + show "H \ lin x' = {0}" + proof + show "H \ lin x' \ {0}" + proof + fix x assume x: "x \ H \ lin x'" + then obtain a where xx': "x = a \ x'" + by blast + have "x = 0" + proof cases + assume "a = 0" + with xx' and x' show ?thesis by simp + next + assume a: "a \ 0" + from x have "x \ H" .. + with xx' have "inverse a \ a \ x' \ H" by simp + with a and x' have "x' \ H" by (simp add: mult_assoc2) + with `x' \ H` show ?thesis by contradiction + qed + then show "x \ {0}" .. + qed + show "{0} \ H \ lin x'" + proof - + have "0 \ H" using `vectorspace E` .. + moreover have "0 \ lin x'" using `x' \ E` .. + ultimately show ?thesis by blast + qed + qed + show "lin x' \ E" using `x' \ E` .. + qed (rule `vectorspace E`, rule `subspace H E`, rule y1, rule y2, rule eq) + then show "y1 = y2" .. + from c have "a1 \ x' = a2 \ x'" .. + with x' show "a1 = a2" by (simp add: mult_right_cancel) + qed +qed + +text {* + Since for any element @{text "y + a \ x'"} of the direct sum of a + vectorspace @{text H} and the linear closure of @{text x'} the + components @{text "y \ H"} and @{text a} are unique, it follows from + @{text "y \ H"} that @{text "a = 0"}. +*} + +lemma decomp_H'_H: + assumes "vectorspace E" "subspace H E" + assumes t: "t \ H" + and x': "x' \ H" "x' \ E" "x' \ 0" + shows "(SOME (y, a). t = y + a \ x' \ y \ H) = (t, 0)" +proof - + interpret vectorspace E by fact + interpret subspace H E by fact + show ?thesis + proof (rule, simp_all only: split_paired_all split_conv) + from t x' show "t = t + 0 \ x' \ t \ H" by simp + fix y and a assume ya: "t = y + a \ x' \ y \ H" + have "y = t \ a = 0" + proof (rule decomp_H') + from ya x' show "y + a \ x' = t + 0 \ x'" by simp + from ya show "y \ H" .. + qed (rule `vectorspace E`, rule `subspace H E`, rule t, (rule x')+) + with t x' show "(y, a) = (y + a \ x', 0)" by simp + qed +qed + +text {* + The components @{text "y \ H"} and @{text a} in @{text "y + a \ x'"} + are unique, so the function @{text h'} defined by + @{text "h' (y + a \ x') = h y + a \ \"} is definite. +*} + +lemma h'_definite: + fixes H + assumes h'_def: + "h' \ (\x. let (y, a) = SOME (y, a). (x = y + a \ x' \ y \ H) + in (h y) + a * xi)" + and x: "x = y + a \ x'" + assumes "vectorspace E" "subspace H E" + assumes y: "y \ H" + and x': "x' \ H" "x' \ E" "x' \ 0" + shows "h' x = h y + a * xi" +proof - + interpret vectorspace E by fact + interpret subspace H E by fact + from x y x' have "x \ H + lin x'" by auto + have "\!p. (\(y, a). x = y + a \ x' \ y \ H) p" (is "\!p. ?P p") + proof (rule ex_ex1I) + from x y show "\p. ?P p" by blast + fix p q assume p: "?P p" and q: "?P q" + show "p = q" + proof - + from p have xp: "x = fst p + snd p \ x' \ fst p \ H" + by (cases p) simp + from q have xq: "x = fst q + snd q \ x' \ fst q \ H" + by (cases q) simp + have "fst p = fst q \ snd p = snd q" + proof (rule decomp_H') + from xp show "fst p \ H" .. + from xq show "fst q \ H" .. + from xp and xq show "fst p + snd p \ x' = fst q + snd q \ x'" + by simp + qed (rule `vectorspace E`, rule `subspace H E`, (rule x')+) + then show ?thesis by (cases p, cases q) simp + qed + qed + then have eq: "(SOME (y, a). x = y + a \ x' \ y \ H) = (y, a)" + by (rule some1_equality) (simp add: x y) + with h'_def show "h' x = h y + a * xi" by (simp add: Let_def) +qed + +end diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/HahnBanach/VectorSpace.thy --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/HOL/HahnBanach/VectorSpace.thy Tue Dec 30 11:10:01 2008 +0100 @@ -0,0 +1,419 @@ +(* Title: HOL/Real/HahnBanach/VectorSpace.thy + ID: $Id$ + Author: Gertrud Bauer, TU Munich +*) + +header {* Vector spaces *} + +theory VectorSpace +imports Real Bounds Zorn +begin + +subsection {* Signature *} + +text {* + For the definition of real vector spaces a type @{typ 'a} of the + sort @{text "{plus, minus, zero}"} is considered, on which a real + scalar multiplication @{text \} is declared. +*} + +consts + prod :: "real \ 'a::{plus, minus, zero} \ 'a" (infixr "'(*')" 70) + +notation (xsymbols) + prod (infixr "\" 70) +notation (HTML output) + prod (infixr "\" 70) + + +subsection {* Vector space laws *} + +text {* + A \emph{vector space} is a non-empty set @{text V} of elements from + @{typ 'a} with the following vector space laws: The set @{text V} is + closed under addition and scalar multiplication, addition is + associative and commutative; @{text "- x"} is the inverse of @{text + x} w.~r.~t.~addition and @{text 0} is the neutral element of + addition. Addition and multiplication are distributive; scalar + multiplication is associative and the real number @{text "1"} is + the neutral element of scalar multiplication. +*} + +locale var_V = fixes V + +locale vectorspace = var_V + + assumes non_empty [iff, intro?]: "V \ {}" + and add_closed [iff]: "x \ V \ y \ V \ x + y \ V" + and mult_closed [iff]: "x \ V \ a \ x \ V" + and add_assoc: "x \ V \ y \ V \ z \ V \ (x + y) + z = x + (y + z)" + and add_commute: "x \ V \ y \ V \ x + y = y + x" + and diff_self [simp]: "x \ V \ x - x = 0" + and add_zero_left [simp]: "x \ V \ 0 + x = x" + and add_mult_distrib1: "x \ V \ y \ V \ a \ (x + y) = a \ x + a \ y" + and add_mult_distrib2: "x \ V \ (a + b) \ x = a \ x + b \ x" + and mult_assoc: "x \ V \ (a * b) \ x = a \ (b \ x)" + and mult_1 [simp]: "x \ V \ 1 \ x = x" + and negate_eq1: "x \ V \ - x = (- 1) \ x" + and diff_eq1: "x \ V \ y \ V \ x - y = x + - y" + +lemma (in vectorspace) negate_eq2: "x \ V \ (- 1) \ x = - x" + by (rule negate_eq1 [symmetric]) + +lemma (in vectorspace) negate_eq2a: "x \ V \ -1 \ x = - x" + by (simp add: negate_eq1) + +lemma (in vectorspace) diff_eq2: "x \ V \ y \ V \ x + - y = x - y" + by (rule diff_eq1 [symmetric]) + +lemma (in vectorspace) diff_closed [iff]: "x \ V \ y \ V \ x - y \ V" + by (simp add: diff_eq1 negate_eq1) + +lemma (in vectorspace) neg_closed [iff]: "x \ V \ - x \ V" + by (simp add: negate_eq1) + +lemma (in vectorspace) add_left_commute: + "x \ V \ y \ V \ z \ V \ x + (y + z) = y + (x + z)" +proof - + assume xyz: "x \ V" "y \ V" "z \ V" + then have "x + (y + z) = (x + y) + z" + by (simp only: add_assoc) + also from xyz have "\ = (y + x) + z" by (simp only: add_commute) + also from xyz have "\ = y + (x + z)" by (simp only: add_assoc) + finally show ?thesis . +qed + +theorems (in vectorspace) add_ac = + add_assoc add_commute add_left_commute + + +text {* The existence of the zero element of a vector space + follows from the non-emptiness of carrier set. *} + +lemma (in vectorspace) zero [iff]: "0 \ V" +proof - + from non_empty obtain x where x: "x \ V" by blast + then have "0 = x - x" by (rule diff_self [symmetric]) + also from x x have "\ \ V" by (rule diff_closed) + finally show ?thesis . +qed + +lemma (in vectorspace) add_zero_right [simp]: + "x \ V \ x + 0 = x" +proof - + assume x: "x \ V" + from this and zero have "x + 0 = 0 + x" by (rule add_commute) + also from x have "\ = x" by (rule add_zero_left) + finally show ?thesis . +qed + +lemma (in vectorspace) mult_assoc2: + "x \ V \ a \ b \ x = (a * b) \ x" + by (simp only: mult_assoc) + +lemma (in vectorspace) diff_mult_distrib1: + "x \ V \ y \ V \ a \ (x - y) = a \ x - a \ y" + by (simp add: diff_eq1 negate_eq1 add_mult_distrib1 mult_assoc2) + +lemma (in vectorspace) diff_mult_distrib2: + "x \ V \ (a - b) \ x = a \ x - (b \ x)" +proof - + assume x: "x \ V" + have " (a - b) \ x = (a + - b) \ x" + by (simp add: real_diff_def) + also from x have "\ = a \ x + (- b) \ x" + by (rule add_mult_distrib2) + also from x have "\ = a \ x + - (b \ x)" + by (simp add: negate_eq1 mult_assoc2) + also from x have "\ = a \ x - (b \ x)" + by (simp add: diff_eq1) + finally show ?thesis . +qed + +lemmas (in vectorspace) distrib = + add_mult_distrib1 add_mult_distrib2 + diff_mult_distrib1 diff_mult_distrib2 + + +text {* \medskip Further derived laws: *} + +lemma (in vectorspace) mult_zero_left [simp]: + "x \ V \ 0 \ x = 0" +proof - + assume x: "x \ V" + have "0 \ x = (1 - 1) \ x" by simp + also have "\ = (1 + - 1) \ x" by simp + also from x have "\ = 1 \ x + (- 1) \ x" + by (rule add_mult_distrib2) + also from x have "\ = x + (- 1) \ x" by simp + also from x have "\ = x + - x" by (simp add: negate_eq2a) + also from x have "\ = x - x" by (simp add: diff_eq2) + also from x have "\ = 0" by simp + finally show ?thesis . +qed + +lemma (in vectorspace) mult_zero_right [simp]: + "a \ 0 = (0::'a)" +proof - + have "a \ 0 = a \ (0 - (0::'a))" by simp + also have "\ = a \ 0 - a \ 0" + by (rule diff_mult_distrib1) simp_all + also have "\ = 0" by simp + finally show ?thesis . +qed + +lemma (in vectorspace) minus_mult_cancel [simp]: + "x \ V \ (- a) \ - x = a \ x" + by (simp add: negate_eq1 mult_assoc2) + +lemma (in vectorspace) add_minus_left_eq_diff: + "x \ V \ y \ V \ - x + y = y - x" +proof - + assume xy: "x \ V" "y \ V" + then have "- x + y = y + - x" by (simp add: add_commute) + also from xy have "\ = y - x" by (simp add: diff_eq1) + finally show ?thesis . +qed + +lemma (in vectorspace) add_minus [simp]: + "x \ V \ x + - x = 0" + by (simp add: diff_eq2) + +lemma (in vectorspace) add_minus_left [simp]: + "x \ V \ - x + x = 0" + by (simp add: diff_eq2 add_commute) + +lemma (in vectorspace) minus_minus [simp]: + "x \ V \ - (- x) = x" + by (simp add: negate_eq1 mult_assoc2) + +lemma (in vectorspace) minus_zero [simp]: + "- (0::'a) = 0" + by (simp add: negate_eq1) + +lemma (in vectorspace) minus_zero_iff [simp]: + "x \ V \ (- x = 0) = (x = 0)" +proof + assume x: "x \ V" + { + from x have "x = - (- x)" by (simp add: minus_minus) + also assume "- x = 0" + also have "- \ = 0" by (rule minus_zero) + finally show "x = 0" . + next + assume "x = 0" + then show "- x = 0" by simp + } +qed + +lemma (in vectorspace) add_minus_cancel [simp]: + "x \ V \ y \ V \ x + (- x + y) = y" + by (simp add: add_assoc [symmetric] del: add_commute) + +lemma (in vectorspace) minus_add_cancel [simp]: + "x \ V \ y \ V \ - x + (x + y) = y" + by (simp add: add_assoc [symmetric] del: add_commute) + +lemma (in vectorspace) minus_add_distrib [simp]: + "x \ V \ y \ V \ - (x + y) = - x + - y" + by (simp add: negate_eq1 add_mult_distrib1) + +lemma (in vectorspace) diff_zero [simp]: + "x \ V \ x - 0 = x" + by (simp add: diff_eq1) + +lemma (in vectorspace) diff_zero_right [simp]: + "x \ V \ 0 - x = - x" + by (simp add: diff_eq1) + +lemma (in vectorspace) add_left_cancel: + "x \ V \ y \ V \ z \ V \ (x + y = x + z) = (y = z)" +proof + assume x: "x \ V" and y: "y \ V" and z: "z \ V" + { + from y have "y = 0 + y" by simp + also from x y have "\ = (- x + x) + y" by simp + also from x y have "\ = - x + (x + y)" + by (simp add: add_assoc neg_closed) + also assume "x + y = x + z" + also from x z have "- x + (x + z) = - x + x + z" + by (simp add: add_assoc [symmetric] neg_closed) + also from x z have "\ = z" by simp + finally show "y = z" . + next + assume "y = z" + then show "x + y = x + z" by (simp only:) + } +qed + +lemma (in vectorspace) add_right_cancel: + "x \ V \ y \ V \ z \ V \ (y + x = z + x) = (y = z)" + by (simp only: add_commute add_left_cancel) + +lemma (in vectorspace) add_assoc_cong: + "x \ V \ y \ V \ x' \ V \ y' \ V \ z \ V + \ x + y = x' + y' \ x + (y + z) = x' + (y' + z)" + by (simp only: add_assoc [symmetric]) + +lemma (in vectorspace) mult_left_commute: + "x \ V \ a \ b \ x = b \ a \ x" + by (simp add: real_mult_commute mult_assoc2) + +lemma (in vectorspace) mult_zero_uniq: + "x \ V \ x \ 0 \ a \ x = 0 \ a = 0" +proof (rule classical) + assume a: "a \ 0" + assume x: "x \ V" "x \ 0" and ax: "a \ x = 0" + from x a have "x = (inverse a * a) \ x" by simp + also from `x \ V` have "\ = inverse a \ (a \ x)" by (rule mult_assoc) + also from ax have "\ = inverse a \ 0" by simp + also have "\ = 0" by simp + finally have "x = 0" . + with `x \ 0` show "a = 0" by contradiction +qed + +lemma (in vectorspace) mult_left_cancel: + "x \ V \ y \ V \ a \ 0 \ (a \ x = a \ y) = (x = y)" +proof + assume x: "x \ V" and y: "y \ V" and a: "a \ 0" + from x have "x = 1 \ x" by simp + also from a have "\ = (inverse a * a) \ x" by simp + also from x have "\ = inverse a \ (a \ x)" + by (simp only: mult_assoc) + also assume "a \ x = a \ y" + also from a y have "inverse a \ \ = y" + by (simp add: mult_assoc2) + finally show "x = y" . +next + assume "x = y" + then show "a \ x = a \ y" by (simp only:) +qed + +lemma (in vectorspace) mult_right_cancel: + "x \ V \ x \ 0 \ (a \ x = b \ x) = (a = b)" +proof + assume x: "x \ V" and neq: "x \ 0" + { + from x have "(a - b) \ x = a \ x - b \ x" + by (simp add: diff_mult_distrib2) + also assume "a \ x = b \ x" + with x have "a \ x - b \ x = 0" by simp + finally have "(a - b) \ x = 0" . + with x neq have "a - b = 0" by (rule mult_zero_uniq) + then show "a = b" by simp + next + assume "a = b" + then show "a \ x = b \ x" by (simp only:) + } +qed + +lemma (in vectorspace) eq_diff_eq: + "x \ V \ y \ V \ z \ V \ (x = z - y) = (x + y = z)" +proof + assume x: "x \ V" and y: "y \ V" and z: "z \ V" + { + assume "x = z - y" + then have "x + y = z - y + y" by simp + also from y z have "\ = z + - y + y" + by (simp add: diff_eq1) + also have "\ = z + (- y + y)" + by (rule add_assoc) (simp_all add: y z) + also from y z have "\ = z + 0" + by (simp only: add_minus_left) + also from z have "\ = z" + by (simp only: add_zero_right) + finally show "x + y = z" . + next + assume "x + y = z" + then have "z - y = (x + y) - y" by simp + also from x y have "\ = x + y + - y" + by (simp add: diff_eq1) + also have "\ = x + (y + - y)" + by (rule add_assoc) (simp_all add: x y) + also from x y have "\ = x" by simp + finally show "x = z - y" .. + } +qed + +lemma (in vectorspace) add_minus_eq_minus: + "x \ V \ y \ V \ x + y = 0 \ x = - y" +proof - + assume x: "x \ V" and y: "y \ V" + from x y have "x = (- y + y) + x" by simp + also from x y have "\ = - y + (x + y)" by (simp add: add_ac) + also assume "x + y = 0" + also from y have "- y + 0 = - y" by simp + finally show "x = - y" . +qed + +lemma (in vectorspace) add_minus_eq: + "x \ V \ y \ V \ x - y = 0 \ x = y" +proof - + assume x: "x \ V" and y: "y \ V" + assume "x - y = 0" + with x y have eq: "x + - y = 0" by (simp add: diff_eq1) + with _ _ have "x = - (- y)" + by (rule add_minus_eq_minus) (simp_all add: x y) + with x y show "x = y" by simp +qed + +lemma (in vectorspace) add_diff_swap: + "a \ V \ b \ V \ c \ V \ d \ V \ a + b = c + d + \ a - c = d - b" +proof - + assume vs: "a \ V" "b \ V" "c \ V" "d \ V" + and eq: "a + b = c + d" + then have "- c + (a + b) = - c + (c + d)" + by (simp add: add_left_cancel) + also have "\ = d" using `c \ V` `d \ V` by (rule minus_add_cancel) + finally have eq: "- c + (a + b) = d" . + from vs have "a - c = (- c + (a + b)) + - b" + by (simp add: add_ac diff_eq1) + also from vs eq have "\ = d + - b" + by (simp add: add_right_cancel) + also from vs have "\ = d - b" by (simp add: diff_eq2) + finally show "a - c = d - b" . +qed + +lemma (in vectorspace) vs_add_cancel_21: + "x \ V \ y \ V \ z \ V \ u \ V + \ (x + (y + z) = y + u) = (x + z = u)" +proof + assume vs: "x \ V" "y \ V" "z \ V" "u \ V" + { + from vs have "x + z = - y + y + (x + z)" by simp + also have "\ = - y + (y + (x + z))" + by (rule add_assoc) (simp_all add: vs) + also from vs have "y + (x + z) = x + (y + z)" + by (simp add: add_ac) + also assume "x + (y + z) = y + u" + also from vs have "- y + (y + u) = u" by simp + finally show "x + z = u" . + next + assume "x + z = u" + with vs show "x + (y + z) = y + u" + by (simp only: add_left_commute [of x]) + } +qed + +lemma (in vectorspace) add_cancel_end: + "x \ V \ y \ V \ z \ V \ (x + (y + z) = y) = (x = - z)" +proof + assume vs: "x \ V" "y \ V" "z \ V" + { + assume "x + (y + z) = y" + with vs have "(x + z) + y = 0 + y" + by (simp add: add_ac) + with vs have "x + z = 0" + by (simp only: add_right_cancel add_closed zero) + with vs show "x = - z" by (simp add: add_minus_eq_minus) + next + assume eq: "x = - z" + then have "x + (y + z) = - z + (y + z)" by simp + also have "\ = y + (- z + z)" + by (rule add_left_commute) (simp_all add: vs) + also from vs have "\ = y" by simp + finally show "x + (y + z) = y" . + } +qed + +end diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/HahnBanach/ZornLemma.thy --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/HOL/HahnBanach/ZornLemma.thy Tue Dec 30 11:10:01 2008 +0100 @@ -0,0 +1,57 @@ +(* Title: HOL/Real/HahnBanach/ZornLemma.thy + Author: Gertrud Bauer, TU Munich +*) + +header {* Zorn's Lemma *} + +theory ZornLemma +imports Zorn +begin + +text {* + Zorn's Lemmas states: if every linear ordered subset of an ordered + set @{text S} has an upper bound in @{text S}, then there exists a + maximal element in @{text S}. In our application, @{text S} is a + set of sets ordered by set inclusion. Since the union of a chain of + sets is an upper bound for all elements of the chain, the conditions + of Zorn's lemma can be modified: if @{text S} is non-empty, it + suffices to show that for every non-empty chain @{text c} in @{text + S} the union of @{text c} also lies in @{text S}. +*} + +theorem Zorn's_Lemma: + assumes r: "\c. c \ chain S \ \x. x \ c \ \c \ S" + and aS: "a \ S" + shows "\y \ S. \z \ S. y \ z \ y = z" +proof (rule Zorn_Lemma2) + show "\c \ chain S. \y \ S. \z \ c. z \ y" + proof + fix c assume "c \ chain S" + show "\y \ S. \z \ c. z \ y" + proof cases + + txt {* If @{text c} is an empty chain, then every element in + @{text S} is an upper bound of @{text c}. *} + + assume "c = {}" + with aS show ?thesis by fast + + txt {* If @{text c} is non-empty, then @{text "\c"} is an upper + bound of @{text c}, lying in @{text S}. *} + + next + assume "c \ {}" + show ?thesis + proof + show "\z \ c. z \ \c" by fast + show "\c \ S" + proof (rule r) + from `c \ {}` show "\x. x \ c" by fast + show "c \ chain S" by fact + qed + qed + qed + qed +qed + +end diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/HahnBanach/document/root.bib --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/HOL/HahnBanach/document/root.bib Tue Dec 30 11:10:01 2008 +0100 @@ -0,0 +1,27 @@ + +@Book{Heuser:1986, + author = {H. Heuser}, + title = {Funktionalanalysis: Theorie und Anwendung}, + publisher = {Teubner}, + year = 1986 +} + +@InCollection{Narici:1996, + author = {L. Narici and E. Beckenstein}, + title = {The {Hahn-Banach Theorem}: The Life and Times}, + booktitle = {Topology Atlas}, + publisher = {York University, Toronto, Ontario, Canada}, + year = 1996, + note = {\url{http://at.yorku.ca/topology/preprint.htm} and + \url{http://at.yorku.ca/p/a/a/a/16.htm}} +} + +@Article{Nowak:1993, + author = {B. Nowak and A. Trybulec}, + title = {{Hahn-Banach} Theorem}, + journal = {Journal of Formalized Mathematics}, + year = {1993}, + volume = {5}, + institution = {University of Bialystok}, + note = {\url{http://mizar.uwb.edu.pl/JFM/Vol5/hahnban.html}} +} diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/HahnBanach/document/root.tex --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/HOL/HahnBanach/document/root.tex Tue Dec 30 11:10:01 2008 +0100 @@ -0,0 +1,83 @@ +\documentclass[10pt,a4paper,twoside]{article} +\usepackage{graphicx} +\usepackage{latexsym,theorem} +\usepackage{isabelle,isabellesym} +\usepackage{pdfsetup} %last one! + +\isabellestyle{it} +\urlstyle{rm} + +\newcommand{\isasymsup}{\isamath{\sup\,}} +\newcommand{\skp}{\smallskip} + + +\begin{document} + +\pagestyle{headings} +\pagenumbering{arabic} + +\title{The Hahn-Banach Theorem \\ for Real Vector Spaces} +\author{Gertrud Bauer \\ \url{http://www.in.tum.de/~bauerg/}} +\maketitle + +\begin{abstract} + The Hahn-Banach Theorem is one of the most fundamental results in functional + analysis. We present a fully formal proof of two versions of the theorem, + one for general linear spaces and another for normed spaces. This + development is based on simply-typed classical set-theory, as provided by + Isabelle/HOL. +\end{abstract} + + +\tableofcontents +\parindent 0pt \parskip 0.5ex + +\clearpage +\section{Preface} + +This is a fully formal proof of the Hahn-Banach Theorem. It closely follows +the informal presentation given in Heuser's textbook \cite[{\S} 36]{Heuser:1986}. +Another formal proof of the same theorem has been done in Mizar +\cite{Nowak:1993}. A general overview of the relevance and history of the +Hahn-Banach Theorem is given by Narici and Beckenstein \cite{Narici:1996}. + +\medskip The document is structured as follows. The first part contains +definitions of basic notions of linear algebra: vector spaces, subspaces, +normed spaces, continuous linear-forms, norm of functions and an order on +functions by domain extension. The second part contains some lemmas about the +supremum (w.r.t.\ the function order) and extension of non-maximal functions. +With these preliminaries, the main proof of the theorem (in its two versions) +is conducted in the third part. The dependencies of individual theories are +as follows. + +\begin{center} + \includegraphics[scale=0.5]{session_graph} +\end{center} + +\clearpage +\part {Basic Notions} + +\input{Bounds} +\input{VectorSpace} +\input{Subspace} +\input{NormedSpace} +\input{Linearform} +\input{FunctionOrder} +\input{FunctionNorm} +\input{ZornLemma} + +\clearpage +\part {Lemmas for the Proof} + +\input{HahnBanachSupLemmas} +\input{HahnBanachExtLemmas} +\input{HahnBanachLemmas} + +\clearpage +\part {The Main Proof} + +\input{HahnBanach} +\bibliographystyle{abbrv} +\bibliography{root} + +\end{document} diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/Hyperreal/SEQ.thy --- a/src/HOL/Hyperreal/SEQ.thy Tue Dec 30 08:18:54 2008 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,1136 +0,0 @@ -(* Title : SEQ.thy - Author : Jacques D. Fleuriot - Copyright : 1998 University of Cambridge - Description : Convergence of sequences and series - Conversion to Isar and new proofs by Lawrence C Paulson, 2004 - Additional contributions by Jeremy Avigad and Brian Huffman -*) - -header {* Sequences and Convergence *} - -theory SEQ -imports "../Real/RealVector" "../RComplete" -begin - -definition - Zseq :: "[nat \ 'a::real_normed_vector] \ bool" where - --{*Standard definition of sequence converging to zero*} - [code del]: "Zseq X = (\r>0. \no. \n\no. norm (X n) < r)" - -definition - LIMSEQ :: "[nat => 'a::real_normed_vector, 'a] => bool" - ("((_)/ ----> (_))" [60, 60] 60) where - --{*Standard definition of convergence of sequence*} - [code del]: "X ----> L = (\r. 0 < r --> (\no. \n. no \ n --> norm (X n - L) < r))" - -definition - lim :: "(nat => 'a::real_normed_vector) => 'a" where - --{*Standard definition of limit using choice operator*} - "lim X = (THE L. X ----> L)" - -definition - convergent :: "(nat => 'a::real_normed_vector) => bool" where - --{*Standard definition of convergence*} - "convergent X = (\L. X ----> L)" - -definition - Bseq :: "(nat => 'a::real_normed_vector) => bool" where - --{*Standard definition for bounded sequence*} - [code del]: "Bseq X = (\K>0.\n. norm (X n) \ K)" - -definition - monoseq :: "(nat=>real)=>bool" where - --{*Definition for monotonicity*} - [code del]: "monoseq X = ((\m. \n\m. X m \ X n) | (\m. \n\m. X n \ X m))" - -definition - subseq :: "(nat => nat) => bool" where - --{*Definition of subsequence*} - [code del]: "subseq f = (\m. \n>m. (f m) < (f n))" - -definition - Cauchy :: "(nat => 'a::real_normed_vector) => bool" where - --{*Standard definition of the Cauchy condition*} - [code del]: "Cauchy X = (\e>0. \M. \m \ M. \n \ M. norm (X m - X n) < e)" - - -subsection {* Bounded Sequences *} - -lemma BseqI': assumes K: "\n. norm (X n) \ K" shows "Bseq X" -unfolding Bseq_def -proof (intro exI conjI allI) - show "0 < max K 1" by simp -next - fix n::nat - have "norm (X n) \ K" by (rule K) - thus "norm (X n) \ max K 1" by simp -qed - -lemma BseqE: "\Bseq X; \K. \0 < K; \n. norm (X n) \ K\ \ Q\ \ Q" -unfolding Bseq_def by auto - -lemma BseqI2': assumes K: "\n\N. norm (X n) \ K" shows "Bseq X" -proof (rule BseqI') - let ?A = "norm ` X ` {..N}" - have 1: "finite ?A" by simp - fix n::nat - show "norm (X n) \ max K (Max ?A)" - proof (cases rule: linorder_le_cases) - assume "n \ N" - hence "norm (X n) \ K" using K by simp - thus "norm (X n) \ max K (Max ?A)" by simp - next - assume "n \ N" - hence "norm (X n) \ ?A" by simp - with 1 have "norm (X n) \ Max ?A" by (rule Max_ge) - thus "norm (X n) \ max K (Max ?A)" by simp - qed -qed - -lemma Bseq_ignore_initial_segment: "Bseq X \ Bseq (\n. X (n + k))" -unfolding Bseq_def by auto - -lemma Bseq_offset: "Bseq (\n. X (n + k)) \ Bseq X" -apply (erule BseqE) -apply (rule_tac N="k" and K="K" in BseqI2') -apply clarify -apply (drule_tac x="n - k" in spec, simp) -done - - -subsection {* Sequences That Converge to Zero *} - -lemma ZseqI: - "(\r. 0 < r \ \no. \n\no. norm (X n) < r) \ Zseq X" -unfolding Zseq_def by simp - -lemma ZseqD: - "\Zseq X; 0 < r\ \ \no. \n\no. norm (X n) < r" -unfolding Zseq_def by simp - -lemma Zseq_zero: "Zseq (\n. 0)" -unfolding Zseq_def by simp - -lemma Zseq_const_iff: "Zseq (\n. k) = (k = 0)" -unfolding Zseq_def by force - -lemma Zseq_norm_iff: "Zseq (\n. norm (X n)) = Zseq (\n. X n)" -unfolding Zseq_def by simp - -lemma Zseq_imp_Zseq: - assumes X: "Zseq X" - assumes Y: "\n. norm (Y n) \ norm (X n) * K" - shows "Zseq (\n. Y n)" -proof (cases) - assume K: "0 < K" - show ?thesis - proof (rule ZseqI) - fix r::real assume "0 < r" - hence "0 < r / K" - using K by (rule divide_pos_pos) - then obtain N where "\n\N. norm (X n) < r / K" - using ZseqD [OF X] by fast - hence "\n\N. norm (X n) * K < r" - by (simp add: pos_less_divide_eq K) - hence "\n\N. norm (Y n) < r" - by (simp add: order_le_less_trans [OF Y]) - thus "\N. \n\N. norm (Y n) < r" .. - qed -next - assume "\ 0 < K" - hence K: "K \ 0" by (simp only: linorder_not_less) - { - fix n::nat - have "norm (Y n) \ norm (X n) * K" by (rule Y) - also have "\ \ norm (X n) * 0" - using K norm_ge_zero by (rule mult_left_mono) - finally have "norm (Y n) = 0" by simp - } - thus ?thesis by (simp add: Zseq_zero) -qed - -lemma Zseq_le: "\Zseq Y; \n. norm (X n) \ norm (Y n)\ \ Zseq X" -by (erule_tac K="1" in Zseq_imp_Zseq, simp) - -lemma Zseq_add: - assumes X: "Zseq X" - assumes Y: "Zseq Y" - shows "Zseq (\n. X n + Y n)" -proof (rule ZseqI) - fix r::real assume "0 < r" - hence r: "0 < r / 2" by simp - obtain M where M: "\n\M. norm (X n) < r/2" - using ZseqD [OF X r] by fast - obtain N where N: "\n\N. norm (Y n) < r/2" - using ZseqD [OF Y r] by fast - show "\N. \n\N. norm (X n + Y n) < r" - proof (intro exI allI impI) - fix n assume n: "max M N \ n" - have "norm (X n + Y n) \ norm (X n) + norm (Y n)" - by (rule norm_triangle_ineq) - also have "\ < r/2 + r/2" - proof (rule add_strict_mono) - from M n show "norm (X n) < r/2" by simp - from N n show "norm (Y n) < r/2" by simp - qed - finally show "norm (X n + Y n) < r" by simp - qed -qed - -lemma Zseq_minus: "Zseq X \ Zseq (\n. - X n)" -unfolding Zseq_def by simp - -lemma Zseq_diff: "\Zseq X; Zseq Y\ \ Zseq (\n. X n - Y n)" -by (simp only: diff_minus Zseq_add Zseq_minus) - -lemma (in bounded_linear) Zseq: - assumes X: "Zseq X" - shows "Zseq (\n. f (X n))" -proof - - obtain K where "\x. norm (f x) \ norm x * K" - using bounded by fast - with X show ?thesis - by (rule Zseq_imp_Zseq) -qed - -lemma (in bounded_bilinear) Zseq: - assumes X: "Zseq X" - assumes Y: "Zseq Y" - shows "Zseq (\n. X n ** Y n)" -proof (rule ZseqI) - fix r::real assume r: "0 < r" - obtain K where K: "0 < K" - and norm_le: "\x y. norm (x ** y) \ norm x * norm y * K" - using pos_bounded by fast - from K have K': "0 < inverse K" - by (rule positive_imp_inverse_positive) - obtain M where M: "\n\M. norm (X n) < r" - using ZseqD [OF X r] by fast - obtain N where N: "\n\N. norm (Y n) < inverse K" - using ZseqD [OF Y K'] by fast - show "\N. \n\N. norm (X n ** Y n) < r" - proof (intro exI allI impI) - fix n assume n: "max M N \ n" - have "norm (X n ** Y n) \ norm (X n) * norm (Y n) * K" - by (rule norm_le) - also have "norm (X n) * norm (Y n) * K < r * inverse K * K" - proof (intro mult_strict_right_mono mult_strict_mono' norm_ge_zero K) - from M n show Xn: "norm (X n) < r" by simp - from N n show Yn: "norm (Y n) < inverse K" by simp - qed - also from K have "r * inverse K * K = r" by simp - finally show "norm (X n ** Y n) < r" . - qed -qed - -lemma (in bounded_bilinear) Zseq_prod_Bseq: - assumes X: "Zseq X" - assumes Y: "Bseq Y" - shows "Zseq (\n. X n ** Y n)" -proof - - obtain K where K: "0 \ K" - and norm_le: "\x y. norm (x ** y) \ norm x * norm y * K" - using nonneg_bounded by fast - obtain B where B: "0 < B" - and norm_Y: "\n. norm (Y n) \ B" - using Y [unfolded Bseq_def] by fast - from X show ?thesis - proof (rule Zseq_imp_Zseq) - fix n::nat - have "norm (X n ** Y n) \ norm (X n) * norm (Y n) * K" - by (rule norm_le) - also have "\ \ norm (X n) * B * K" - by (intro mult_mono' order_refl norm_Y norm_ge_zero - mult_nonneg_nonneg K) - also have "\ = norm (X n) * (B * K)" - by (rule mult_assoc) - finally show "norm (X n ** Y n) \ norm (X n) * (B * K)" . - qed -qed - -lemma (in bounded_bilinear) Bseq_prod_Zseq: - assumes X: "Bseq X" - assumes Y: "Zseq Y" - shows "Zseq (\n. X n ** Y n)" -proof - - obtain K where K: "0 \ K" - and norm_le: "\x y. norm (x ** y) \ norm x * norm y * K" - using nonneg_bounded by fast - obtain B where B: "0 < B" - and norm_X: "\n. norm (X n) \ B" - using X [unfolded Bseq_def] by fast - from Y show ?thesis - proof (rule Zseq_imp_Zseq) - fix n::nat - have "norm (X n ** Y n) \ norm (X n) * norm (Y n) * K" - by (rule norm_le) - also have "\ \ B * norm (Y n) * K" - by (intro mult_mono' order_refl norm_X norm_ge_zero - mult_nonneg_nonneg K) - also have "\ = norm (Y n) * (B * K)" - by (simp only: mult_ac) - finally show "norm (X n ** Y n) \ norm (Y n) * (B * K)" . - qed -qed - -lemma (in bounded_bilinear) Zseq_left: - "Zseq X \ Zseq (\n. X n ** a)" -by (rule bounded_linear_left [THEN bounded_linear.Zseq]) - -lemma (in bounded_bilinear) Zseq_right: - "Zseq X \ Zseq (\n. a ** X n)" -by (rule bounded_linear_right [THEN bounded_linear.Zseq]) - -lemmas Zseq_mult = mult.Zseq -lemmas Zseq_mult_right = mult.Zseq_right -lemmas Zseq_mult_left = mult.Zseq_left - - -subsection {* Limits of Sequences *} - -lemma LIMSEQ_iff: - "(X ----> L) = (\r>0. \no. \n \ no. norm (X n - L) < r)" -by (rule LIMSEQ_def) - -lemma LIMSEQ_Zseq_iff: "((\n. X n) ----> L) = Zseq (\n. X n - L)" -by (simp only: LIMSEQ_def Zseq_def) - -lemma LIMSEQ_I: - "(\r. 0 < r \ \no. \n\no. norm (X n - L) < r) \ X ----> L" -by (simp add: LIMSEQ_def) - -lemma LIMSEQ_D: - "\X ----> L; 0 < r\ \ \no. \n\no. norm (X n - L) < r" -by (simp add: LIMSEQ_def) - -lemma LIMSEQ_const: "(\n. k) ----> k" -by (simp add: LIMSEQ_def) - -lemma LIMSEQ_const_iff: "(\n. k) ----> l = (k = l)" -by (simp add: LIMSEQ_Zseq_iff Zseq_const_iff) - -lemma LIMSEQ_norm: "X ----> a \ (\n. norm (X n)) ----> norm a" -apply (simp add: LIMSEQ_def, safe) -apply (drule_tac x="r" in spec, safe) -apply (rule_tac x="no" in exI, safe) -apply (drule_tac x="n" in spec, safe) -apply (erule order_le_less_trans [OF norm_triangle_ineq3]) -done - -lemma LIMSEQ_ignore_initial_segment: - "f ----> a \ (\n. f (n + k)) ----> a" -apply (rule LIMSEQ_I) -apply (drule (1) LIMSEQ_D) -apply (erule exE, rename_tac N) -apply (rule_tac x=N in exI) -apply simp -done - -lemma LIMSEQ_offset: - "(\n. f (n + k)) ----> a \ f ----> a" -apply (rule LIMSEQ_I) -apply (drule (1) LIMSEQ_D) -apply (erule exE, rename_tac N) -apply (rule_tac x="N + k" in exI) -apply clarify -apply (drule_tac x="n - k" in spec) -apply (simp add: le_diff_conv2) -done - -lemma LIMSEQ_Suc: "f ----> l \ (\n. f (Suc n)) ----> l" -by (drule_tac k="1" in LIMSEQ_ignore_initial_segment, simp) - -lemma LIMSEQ_imp_Suc: "(\n. f (Suc n)) ----> l \ f ----> l" -by (rule_tac k="1" in LIMSEQ_offset, simp) - -lemma LIMSEQ_Suc_iff: "(\n. f (Suc n)) ----> l = f ----> l" -by (blast intro: LIMSEQ_imp_Suc LIMSEQ_Suc) - -lemma add_diff_add: - fixes a b c d :: "'a::ab_group_add" - shows "(a + c) - (b + d) = (a - b) + (c - d)" -by simp - -lemma minus_diff_minus: - fixes a b :: "'a::ab_group_add" - shows "(- a) - (- b) = - (a - b)" -by simp - -lemma LIMSEQ_add: "\X ----> a; Y ----> b\ \ (\n. X n + Y n) ----> a + b" -by (simp only: LIMSEQ_Zseq_iff add_diff_add Zseq_add) - -lemma LIMSEQ_minus: "X ----> a \ (\n. - X n) ----> - a" -by (simp only: LIMSEQ_Zseq_iff minus_diff_minus Zseq_minus) - -lemma LIMSEQ_minus_cancel: "(\n. - X n) ----> - a \ X ----> a" -by (drule LIMSEQ_minus, simp) - -lemma LIMSEQ_diff: "\X ----> a; Y ----> b\ \ (\n. X n - Y n) ----> a - b" -by (simp add: diff_minus LIMSEQ_add LIMSEQ_minus) - -lemma LIMSEQ_unique: "\X ----> a; X ----> b\ \ a = b" -by (drule (1) LIMSEQ_diff, simp add: LIMSEQ_const_iff) - -lemma (in bounded_linear) LIMSEQ: - "X ----> a \ (\n. f (X n)) ----> f a" -by (simp only: LIMSEQ_Zseq_iff diff [symmetric] Zseq) - -lemma (in bounded_bilinear) LIMSEQ: - "\X ----> a; Y ----> b\ \ (\n. X n ** Y n) ----> a ** b" -by (simp only: LIMSEQ_Zseq_iff prod_diff_prod - Zseq_add Zseq Zseq_left Zseq_right) - -lemma LIMSEQ_mult: - fixes a b :: "'a::real_normed_algebra" - shows "[| X ----> a; Y ----> b |] ==> (%n. X n * Y n) ----> a * b" -by (rule mult.LIMSEQ) - -lemma inverse_diff_inverse: - "\(a::'a::division_ring) \ 0; b \ 0\ - \ inverse a - inverse b = - (inverse a * (a - b) * inverse b)" -by (simp add: ring_simps) - -lemma Bseq_inverse_lemma: - fixes x :: "'a::real_normed_div_algebra" - shows "\r \ norm x; 0 < r\ \ norm (inverse x) \ inverse r" -apply (subst nonzero_norm_inverse, clarsimp) -apply (erule (1) le_imp_inverse_le) -done - -lemma Bseq_inverse: - fixes a :: "'a::real_normed_div_algebra" - assumes X: "X ----> a" - assumes a: "a \ 0" - shows "Bseq (\n. inverse (X n))" -proof - - from a have "0 < norm a" by simp - hence "\r>0. r < norm a" by (rule dense) - then obtain r where r1: "0 < r" and r2: "r < norm a" by fast - obtain N where N: "\n. N \ n \ norm (X n - a) < r" - using LIMSEQ_D [OF X r1] by fast - show ?thesis - proof (rule BseqI2' [rule_format]) - fix n assume n: "N \ n" - hence 1: "norm (X n - a) < r" by (rule N) - hence 2: "X n \ 0" using r2 by auto - hence "norm (inverse (X n)) = inverse (norm (X n))" - by (rule nonzero_norm_inverse) - also have "\ \ inverse (norm a - r)" - proof (rule le_imp_inverse_le) - show "0 < norm a - r" using r2 by simp - next - have "norm a - norm (X n) \ norm (a - X n)" - by (rule norm_triangle_ineq2) - also have "\ = norm (X n - a)" - by (rule norm_minus_commute) - also have "\ < r" using 1 . - finally show "norm a - r \ norm (X n)" by simp - qed - finally show "norm (inverse (X n)) \ inverse (norm a - r)" . - qed -qed - -lemma LIMSEQ_inverse_lemma: - fixes a :: "'a::real_normed_div_algebra" - shows "\X ----> a; a \ 0; \n. X n \ 0\ - \ (\n. inverse (X n)) ----> inverse a" -apply (subst LIMSEQ_Zseq_iff) -apply (simp add: inverse_diff_inverse nonzero_imp_inverse_nonzero) -apply (rule Zseq_minus) -apply (rule Zseq_mult_left) -apply (rule mult.Bseq_prod_Zseq) -apply (erule (1) Bseq_inverse) -apply (simp add: LIMSEQ_Zseq_iff) -done - -lemma LIMSEQ_inverse: - fixes a :: "'a::real_normed_div_algebra" - assumes X: "X ----> a" - assumes a: "a \ 0" - shows "(\n. inverse (X n)) ----> inverse a" -proof - - from a have "0 < norm a" by simp - then obtain k where "\n\k. norm (X n - a) < norm a" - using LIMSEQ_D [OF X] by fast - hence "\n\k. X n \ 0" by auto - hence k: "\n. X (n + k) \ 0" by simp - - from X have "(\n. X (n + k)) ----> a" - by (rule LIMSEQ_ignore_initial_segment) - hence "(\n. inverse (X (n + k))) ----> inverse a" - using a k by (rule LIMSEQ_inverse_lemma) - thus "(\n. inverse (X n)) ----> inverse a" - by (rule LIMSEQ_offset) -qed - -lemma LIMSEQ_divide: - fixes a b :: "'a::real_normed_field" - shows "\X ----> a; Y ----> b; b \ 0\ \ (\n. X n / Y n) ----> a / b" -by (simp add: LIMSEQ_mult LIMSEQ_inverse divide_inverse) - -lemma LIMSEQ_pow: - fixes a :: "'a::{real_normed_algebra,recpower}" - shows "X ----> a \ (\n. (X n) ^ m) ----> a ^ m" -by (induct m) (simp_all add: power_Suc LIMSEQ_const LIMSEQ_mult) - -lemma LIMSEQ_setsum: - assumes n: "\n. n \ S \ X n ----> L n" - shows "(\m. \n\S. X n m) ----> (\n\S. L n)" -proof (cases "finite S") - case True - thus ?thesis using n - proof (induct) - case empty - show ?case - by (simp add: LIMSEQ_const) - next - case insert - thus ?case - by (simp add: LIMSEQ_add) - qed -next - case False - thus ?thesis - by (simp add: LIMSEQ_const) -qed - -lemma LIMSEQ_setprod: - fixes L :: "'a \ 'b::{real_normed_algebra,comm_ring_1}" - assumes n: "\n. n \ S \ X n ----> L n" - shows "(\m. \n\S. X n m) ----> (\n\S. L n)" -proof (cases "finite S") - case True - thus ?thesis using n - proof (induct) - case empty - show ?case - by (simp add: LIMSEQ_const) - next - case insert - thus ?case - by (simp add: LIMSEQ_mult) - qed -next - case False - thus ?thesis - by (simp add: setprod_def LIMSEQ_const) -qed - -lemma LIMSEQ_add_const: "f ----> a ==> (%n.(f n + b)) ----> a + b" -by (simp add: LIMSEQ_add LIMSEQ_const) - -(* FIXME: delete *) -lemma LIMSEQ_add_minus: - "[| X ----> a; Y ----> b |] ==> (%n. X n + -Y n) ----> a + -b" -by (simp only: LIMSEQ_add LIMSEQ_minus) - -lemma LIMSEQ_diff_const: "f ----> a ==> (%n.(f n - b)) ----> a - b" -by (simp add: LIMSEQ_diff LIMSEQ_const) - -lemma LIMSEQ_diff_approach_zero: - "g ----> L ==> (%x. f x - g x) ----> 0 ==> - f ----> L" - apply (drule LIMSEQ_add) - apply assumption - apply simp -done - -lemma LIMSEQ_diff_approach_zero2: - "f ----> L ==> (%x. f x - g x) ----> 0 ==> - g ----> L"; - apply (drule LIMSEQ_diff) - apply assumption - apply simp -done - -text{*A sequence tends to zero iff its abs does*} -lemma LIMSEQ_norm_zero: "((\n. norm (X n)) ----> 0) = (X ----> 0)" -by (simp add: LIMSEQ_def) - -lemma LIMSEQ_rabs_zero: "((%n. \f n\) ----> 0) = (f ----> (0::real))" -by (simp add: LIMSEQ_def) - -lemma LIMSEQ_imp_rabs: "f ----> (l::real) ==> (%n. \f n\) ----> \l\" -by (drule LIMSEQ_norm, simp) - -text{*An unbounded sequence's inverse tends to 0*} - -lemma LIMSEQ_inverse_zero: - "\r::real. \N. \n\N. r < X n \ (\n. inverse (X n)) ----> 0" -apply (rule LIMSEQ_I) -apply (drule_tac x="inverse r" in spec, safe) -apply (rule_tac x="N" in exI, safe) -apply (drule_tac x="n" in spec, safe) -apply (frule positive_imp_inverse_positive) -apply (frule (1) less_imp_inverse_less) -apply (subgoal_tac "0 < X n", simp) -apply (erule (1) order_less_trans) -done - -text{*The sequence @{term "1/n"} tends to 0 as @{term n} tends to infinity*} - -lemma LIMSEQ_inverse_real_of_nat: "(%n. inverse(real(Suc n))) ----> 0" -apply (rule LIMSEQ_inverse_zero, safe) -apply (cut_tac x = r in reals_Archimedean2) -apply (safe, rule_tac x = n in exI) -apply (auto simp add: real_of_nat_Suc) -done - -text{*The sequence @{term "r + 1/n"} tends to @{term r} as @{term n} tends to -infinity is now easily proved*} - -lemma LIMSEQ_inverse_real_of_nat_add: - "(%n. r + inverse(real(Suc n))) ----> r" -by (cut_tac LIMSEQ_add [OF LIMSEQ_const LIMSEQ_inverse_real_of_nat], auto) - -lemma LIMSEQ_inverse_real_of_nat_add_minus: - "(%n. r + -inverse(real(Suc n))) ----> r" -by (cut_tac LIMSEQ_add_minus [OF LIMSEQ_const LIMSEQ_inverse_real_of_nat], auto) - -lemma LIMSEQ_inverse_real_of_nat_add_minus_mult: - "(%n. r*( 1 + -inverse(real(Suc n)))) ----> r" -by (cut_tac b=1 in - LIMSEQ_mult [OF LIMSEQ_const LIMSEQ_inverse_real_of_nat_add_minus], auto) - -lemma LIMSEQ_le_const: - "\X ----> (x::real); \N. \n\N. a \ X n\ \ a \ x" -apply (rule ccontr, simp only: linorder_not_le) -apply (drule_tac r="a - x" in LIMSEQ_D, simp) -apply clarsimp -apply (drule_tac x="max N no" in spec, drule mp, rule le_maxI1) -apply (drule_tac x="max N no" in spec, drule mp, rule le_maxI2) -apply simp -done - -lemma LIMSEQ_le_const2: - "\X ----> (x::real); \N. \n\N. X n \ a\ \ x \ a" -apply (subgoal_tac "- a \ - x", simp) -apply (rule LIMSEQ_le_const) -apply (erule LIMSEQ_minus) -apply simp -done - -lemma LIMSEQ_le: - "\X ----> x; Y ----> y; \N. \n\N. X n \ Y n\ \ x \ (y::real)" -apply (subgoal_tac "0 \ y - x", simp) -apply (rule LIMSEQ_le_const) -apply (erule (1) LIMSEQ_diff) -apply (simp add: le_diff_eq) -done - - -subsection {* Convergence *} - -lemma limI: "X ----> L ==> lim X = L" -apply (simp add: lim_def) -apply (blast intro: LIMSEQ_unique) -done - -lemma convergentD: "convergent X ==> \L. (X ----> L)" -by (simp add: convergent_def) - -lemma convergentI: "(X ----> L) ==> convergent X" -by (auto simp add: convergent_def) - -lemma convergent_LIMSEQ_iff: "convergent X = (X ----> lim X)" -by (auto intro: theI LIMSEQ_unique simp add: convergent_def lim_def) - -lemma convergent_minus_iff: "(convergent X) = (convergent (%n. -(X n)))" -apply (simp add: convergent_def) -apply (auto dest: LIMSEQ_minus) -apply (drule LIMSEQ_minus, auto) -done - - -subsection {* Bounded Monotonic Sequences *} - -text{*Subsequence (alternative definition, (e.g. Hoskins)*} - -lemma subseq_Suc_iff: "subseq f = (\n. (f n) < (f (Suc n)))" -apply (simp add: subseq_def) -apply (auto dest!: less_imp_Suc_add) -apply (induct_tac k) -apply (auto intro: less_trans) -done - -lemma monoseq_Suc: - "monoseq X = ((\n. X n \ X (Suc n)) - | (\n. X (Suc n) \ X n))" -apply (simp add: monoseq_def) -apply (auto dest!: le_imp_less_or_eq) -apply (auto intro!: lessI [THEN less_imp_le] dest!: less_imp_Suc_add) -apply (induct_tac "ka") -apply (auto intro: order_trans) -apply (erule contrapos_np) -apply (induct_tac "k") -apply (auto intro: order_trans) -done - -lemma monoI1: "\m. \ n \ m. X m \ X n ==> monoseq X" -by (simp add: monoseq_def) - -lemma monoI2: "\m. \ n \ m. X n \ X m ==> monoseq X" -by (simp add: monoseq_def) - -lemma mono_SucI1: "\n. X n \ X (Suc n) ==> monoseq X" -by (simp add: monoseq_Suc) - -lemma mono_SucI2: "\n. X (Suc n) \ X n ==> monoseq X" -by (simp add: monoseq_Suc) - -text{*Bounded Sequence*} - -lemma BseqD: "Bseq X ==> \K. 0 < K & (\n. norm (X n) \ K)" -by (simp add: Bseq_def) - -lemma BseqI: "[| 0 < K; \n. norm (X n) \ K |] ==> Bseq X" -by (auto simp add: Bseq_def) - -lemma lemma_NBseq_def: - "(\K > 0. \n. norm (X n) \ K) = - (\N. \n. norm (X n) \ real(Suc N))" -apply auto - prefer 2 apply force -apply (cut_tac x = K in reals_Archimedean2, clarify) -apply (rule_tac x = n in exI, clarify) -apply (drule_tac x = na in spec) -apply (auto simp add: real_of_nat_Suc) -done - -text{* alternative definition for Bseq *} -lemma Bseq_iff: "Bseq X = (\N. \n. norm (X n) \ real(Suc N))" -apply (simp add: Bseq_def) -apply (simp (no_asm) add: lemma_NBseq_def) -done - -lemma lemma_NBseq_def2: - "(\K > 0. \n. norm (X n) \ K) = (\N. \n. norm (X n) < real(Suc N))" -apply (subst lemma_NBseq_def, auto) -apply (rule_tac x = "Suc N" in exI) -apply (rule_tac [2] x = N in exI) -apply (auto simp add: real_of_nat_Suc) - prefer 2 apply (blast intro: order_less_imp_le) -apply (drule_tac x = n in spec, simp) -done - -(* yet another definition for Bseq *) -lemma Bseq_iff1a: "Bseq X = (\N. \n. norm (X n) < real(Suc N))" -by (simp add: Bseq_def lemma_NBseq_def2) - -subsubsection{*Upper Bounds and Lubs of Bounded Sequences*} - -lemma Bseq_isUb: - "!!(X::nat=>real). Bseq X ==> \U. isUb (UNIV::real set) {x. \n. X n = x} U" -by (auto intro: isUbI setleI simp add: Bseq_def abs_le_iff) - - -text{* Use completeness of reals (supremum property) - to show that any bounded sequence has a least upper bound*} - -lemma Bseq_isLub: - "!!(X::nat=>real). Bseq X ==> - \U. isLub (UNIV::real set) {x. \n. X n = x} U" -by (blast intro: reals_complete Bseq_isUb) - -subsubsection{*A Bounded and Monotonic Sequence Converges*} - -lemma lemma_converg1: - "!!(X::nat=>real). [| \m. \ n \ m. X m \ X n; - isLub (UNIV::real set) {x. \n. X n = x} (X ma) - |] ==> \n \ ma. X n = X ma" -apply safe -apply (drule_tac y = "X n" in isLubD2) -apply (blast dest: order_antisym)+ -done - -text{* The best of both worlds: Easier to prove this result as a standard - theorem and then use equivalence to "transfer" it into the - equivalent nonstandard form if needed!*} - -lemma Bmonoseq_LIMSEQ: "\n. m \ n --> X n = X m ==> \L. (X ----> L)" -apply (simp add: LIMSEQ_def) -apply (rule_tac x = "X m" in exI, safe) -apply (rule_tac x = m in exI, safe) -apply (drule spec, erule impE, auto) -done - -lemma lemma_converg2: - "!!(X::nat=>real). - [| \m. X m ~= U; isLub UNIV {x. \n. X n = x} U |] ==> \m. X m < U" -apply safe -apply (drule_tac y = "X m" in isLubD2) -apply (auto dest!: order_le_imp_less_or_eq) -done - -lemma lemma_converg3: "!!(X ::nat=>real). \m. X m \ U ==> isUb UNIV {x. \n. X n = x} U" -by (rule setleI [THEN isUbI], auto) - -text{* FIXME: @{term "U - T < U"} is redundant *} -lemma lemma_converg4: "!!(X::nat=> real). - [| \m. X m ~= U; - isLub UNIV {x. \n. X n = x} U; - 0 < T; - U + - T < U - |] ==> \m. U + -T < X m & X m < U" -apply (drule lemma_converg2, assumption) -apply (rule ccontr, simp) -apply (simp add: linorder_not_less) -apply (drule lemma_converg3) -apply (drule isLub_le_isUb, assumption) -apply (auto dest: order_less_le_trans) -done - -text{*A standard proof of the theorem for monotone increasing sequence*} - -lemma Bseq_mono_convergent: - "[| Bseq X; \m. \n \ m. X m \ X n |] ==> convergent (X::nat=>real)" -apply (simp add: convergent_def) -apply (frule Bseq_isLub, safe) -apply (case_tac "\m. X m = U", auto) -apply (blast dest: lemma_converg1 Bmonoseq_LIMSEQ) -(* second case *) -apply (rule_tac x = U in exI) -apply (subst LIMSEQ_iff, safe) -apply (frule lemma_converg2, assumption) -apply (drule lemma_converg4, auto) -apply (rule_tac x = m in exI, safe) -apply (subgoal_tac "X m \ X n") - prefer 2 apply blast -apply (drule_tac x=n and P="%m. X m < U" in spec, arith) -done - -lemma Bseq_minus_iff: "Bseq (%n. -(X n)) = Bseq X" -by (simp add: Bseq_def) - -text{*Main monotonicity theorem*} -lemma Bseq_monoseq_convergent: "[| Bseq X; monoseq X |] ==> convergent X" -apply (simp add: monoseq_def, safe) -apply (rule_tac [2] convergent_minus_iff [THEN ssubst]) -apply (drule_tac [2] Bseq_minus_iff [THEN ssubst]) -apply (auto intro!: Bseq_mono_convergent) -done - -subsubsection{*A Few More Equivalence Theorems for Boundedness*} - -text{*alternative formulation for boundedness*} -lemma Bseq_iff2: "Bseq X = (\k > 0. \x. \n. norm (X(n) + -x) \ k)" -apply (unfold Bseq_def, safe) -apply (rule_tac [2] x = "k + norm x" in exI) -apply (rule_tac x = K in exI, simp) -apply (rule exI [where x = 0], auto) -apply (erule order_less_le_trans, simp) -apply (drule_tac x=n in spec, fold diff_def) -apply (drule order_trans [OF norm_triangle_ineq2]) -apply simp -done - -text{*alternative formulation for boundedness*} -lemma Bseq_iff3: "Bseq X = (\k > 0. \N. \n. norm(X(n) + -X(N)) \ k)" -apply safe -apply (simp add: Bseq_def, safe) -apply (rule_tac x = "K + norm (X N)" in exI) -apply auto -apply (erule order_less_le_trans, simp) -apply (rule_tac x = N in exI, safe) -apply (drule_tac x = n in spec) -apply (rule order_trans [OF norm_triangle_ineq], simp) -apply (auto simp add: Bseq_iff2) -done - -lemma BseqI2: "(\n. k \ f n & f n \ (K::real)) ==> Bseq f" -apply (simp add: Bseq_def) -apply (rule_tac x = " (\k\ + \K\) + 1" in exI, auto) -apply (drule_tac x = n in spec, arith) -done - - -subsection {* Cauchy Sequences *} - -lemma CauchyI: - "(\e. 0 < e \ \M. \m\M. \n\M. norm (X m - X n) < e) \ Cauchy X" -by (simp add: Cauchy_def) - -lemma CauchyD: - "\Cauchy X; 0 < e\ \ \M. \m\M. \n\M. norm (X m - X n) < e" -by (simp add: Cauchy_def) - -subsubsection {* Cauchy Sequences are Bounded *} - -text{*A Cauchy sequence is bounded -- this is the standard - proof mechanization rather than the nonstandard proof*} - -lemma lemmaCauchy: "\n \ M. norm (X M - X n) < (1::real) - ==> \n \ M. norm (X n :: 'a::real_normed_vector) < 1 + norm (X M)" -apply (clarify, drule spec, drule (1) mp) -apply (simp only: norm_minus_commute) -apply (drule order_le_less_trans [OF norm_triangle_ineq2]) -apply simp -done - -lemma Cauchy_Bseq: "Cauchy X ==> Bseq X" -apply (simp add: Cauchy_def) -apply (drule spec, drule mp, rule zero_less_one, safe) -apply (drule_tac x="M" in spec, simp) -apply (drule lemmaCauchy) -apply (rule_tac k="M" in Bseq_offset) -apply (simp add: Bseq_def) -apply (rule_tac x="1 + norm (X M)" in exI) -apply (rule conjI, rule order_less_le_trans [OF zero_less_one], simp) -apply (simp add: order_less_imp_le) -done - -subsubsection {* Cauchy Sequences are Convergent *} - -axclass banach \ real_normed_vector - Cauchy_convergent: "Cauchy X \ convergent X" - -theorem LIMSEQ_imp_Cauchy: - assumes X: "X ----> a" shows "Cauchy X" -proof (rule CauchyI) - fix e::real assume "0 < e" - hence "0 < e/2" by simp - with X have "\N. \n\N. norm (X n - a) < e/2" by (rule LIMSEQ_D) - then obtain N where N: "\n\N. norm (X n - a) < e/2" .. - show "\N. \m\N. \n\N. norm (X m - X n) < e" - proof (intro exI allI impI) - fix m assume "N \ m" - hence m: "norm (X m - a) < e/2" using N by fast - fix n assume "N \ n" - hence n: "norm (X n - a) < e/2" using N by fast - have "norm (X m - X n) = norm ((X m - a) - (X n - a))" by simp - also have "\ \ norm (X m - a) + norm (X n - a)" - by (rule norm_triangle_ineq4) - also from m n have "\ < e" by(simp add:field_simps) - finally show "norm (X m - X n) < e" . - qed -qed - -lemma convergent_Cauchy: "convergent X \ Cauchy X" -unfolding convergent_def -by (erule exE, erule LIMSEQ_imp_Cauchy) - -text {* -Proof that Cauchy sequences converge based on the one from -http://pirate.shu.edu/~wachsmut/ira/numseq/proofs/cauconv.html -*} - -text {* - If sequence @{term "X"} is Cauchy, then its limit is the lub of - @{term "{r::real. \N. \n\N. r < X n}"} -*} - -lemma isUb_UNIV_I: "(\y. y \ S \ y \ u) \ isUb UNIV S u" -by (simp add: isUbI setleI) - -lemma real_abs_diff_less_iff: - "(\x - a\ < (r::real)) = (a - r < x \ x < a + r)" -by auto - -locale real_Cauchy = - fixes X :: "nat \ real" - assumes X: "Cauchy X" - fixes S :: "real set" - defines S_def: "S \ {x::real. \N. \n\N. x < X n}" - -lemma real_CauchyI: - assumes "Cauchy X" - shows "real_Cauchy X" - proof qed (fact assms) - -lemma (in real_Cauchy) mem_S: "\n\N. x < X n \ x \ S" -by (unfold S_def, auto) - -lemma (in real_Cauchy) bound_isUb: - assumes N: "\n\N. X n < x" - shows "isUb UNIV S x" -proof (rule isUb_UNIV_I) - fix y::real assume "y \ S" - hence "\M. \n\M. y < X n" - by (simp add: S_def) - then obtain M where "\n\M. y < X n" .. - hence "y < X (max M N)" by simp - also have "\ < x" using N by simp - finally show "y \ x" - by (rule order_less_imp_le) -qed - -lemma (in real_Cauchy) isLub_ex: "\u. isLub UNIV S u" -proof (rule reals_complete) - obtain N where "\m\N. \n\N. norm (X m - X n) < 1" - using CauchyD [OF X zero_less_one] by fast - hence N: "\n\N. norm (X n - X N) < 1" by simp - show "\x. x \ S" - proof - from N have "\n\N. X N - 1 < X n" - by (simp add: real_abs_diff_less_iff) - thus "X N - 1 \ S" by (rule mem_S) - qed - show "\u. isUb UNIV S u" - proof - from N have "\n\N. X n < X N + 1" - by (simp add: real_abs_diff_less_iff) - thus "isUb UNIV S (X N + 1)" - by (rule bound_isUb) - qed -qed - -lemma (in real_Cauchy) isLub_imp_LIMSEQ: - assumes x: "isLub UNIV S x" - shows "X ----> x" -proof (rule LIMSEQ_I) - fix r::real assume "0 < r" - hence r: "0 < r/2" by simp - obtain N where "\n\N. \m\N. norm (X n - X m) < r/2" - using CauchyD [OF X r] by fast - hence "\n\N. norm (X n - X N) < r/2" by simp - hence N: "\n\N. X N - r/2 < X n \ X n < X N + r/2" - by (simp only: real_norm_def real_abs_diff_less_iff) - - from N have "\n\N. X N - r/2 < X n" by fast - hence "X N - r/2 \ S" by (rule mem_S) - hence 1: "X N - r/2 \ x" using x isLub_isUb isUbD by fast - - from N have "\n\N. X n < X N + r/2" by fast - hence "isUb UNIV S (X N + r/2)" by (rule bound_isUb) - hence 2: "x \ X N + r/2" using x isLub_le_isUb by fast - - show "\N. \n\N. norm (X n - x) < r" - proof (intro exI allI impI) - fix n assume n: "N \ n" - from N n have "X n < X N + r/2" and "X N - r/2 < X n" by simp+ - thus "norm (X n - x) < r" using 1 2 - by (simp add: real_abs_diff_less_iff) - qed -qed - -lemma (in real_Cauchy) LIMSEQ_ex: "\x. X ----> x" -proof - - obtain x where "isLub UNIV S x" - using isLub_ex by fast - hence "X ----> x" - by (rule isLub_imp_LIMSEQ) - thus ?thesis .. -qed - -lemma real_Cauchy_convergent: - fixes X :: "nat \ real" - shows "Cauchy X \ convergent X" -unfolding convergent_def -by (rule real_Cauchy.LIMSEQ_ex) - (rule real_CauchyI) - -instance real :: banach -by intro_classes (rule real_Cauchy_convergent) - -lemma Cauchy_convergent_iff: - fixes X :: "nat \ 'a::banach" - shows "Cauchy X = convergent X" -by (fast intro: Cauchy_convergent convergent_Cauchy) - - -subsection {* Power Sequences *} - -text{*The sequence @{term "x^n"} tends to 0 if @{term "0\x"} and @{term -"x<1"}. Proof will use (NS) Cauchy equivalence for convergence and - also fact that bounded and monotonic sequence converges.*} - -lemma Bseq_realpow: "[| 0 \ (x::real); x \ 1 |] ==> Bseq (%n. x ^ n)" -apply (simp add: Bseq_def) -apply (rule_tac x = 1 in exI) -apply (simp add: power_abs) -apply (auto dest: power_mono) -done - -lemma monoseq_realpow: "[| 0 \ x; x \ 1 |] ==> monoseq (%n. x ^ n)" -apply (clarify intro!: mono_SucI2) -apply (cut_tac n = n and N = "Suc n" and a = x in power_decreasing, auto) -done - -lemma convergent_realpow: - "[| 0 \ (x::real); x \ 1 |] ==> convergent (%n. x ^ n)" -by (blast intro!: Bseq_monoseq_convergent Bseq_realpow monoseq_realpow) - -lemma LIMSEQ_inverse_realpow_zero_lemma: - fixes x :: real - assumes x: "0 \ x" - shows "real n * x + 1 \ (x + 1) ^ n" -apply (induct n) -apply simp -apply simp -apply (rule order_trans) -prefer 2 -apply (erule mult_left_mono) -apply (rule add_increasing [OF x], simp) -apply (simp add: real_of_nat_Suc) -apply (simp add: ring_distribs) -apply (simp add: mult_nonneg_nonneg x) -done - -lemma LIMSEQ_inverse_realpow_zero: - "1 < (x::real) \ (\n. inverse (x ^ n)) ----> 0" -proof (rule LIMSEQ_inverse_zero [rule_format]) - fix y :: real - assume x: "1 < x" - hence "0 < x - 1" by simp - hence "\y. \N::nat. y < real N * (x - 1)" - by (rule reals_Archimedean3) - hence "\N::nat. y < real N * (x - 1)" .. - then obtain N::nat where "y < real N * (x - 1)" .. - also have "\ \ real N * (x - 1) + 1" by simp - also have "\ \ (x - 1 + 1) ^ N" - by (rule LIMSEQ_inverse_realpow_zero_lemma, cut_tac x, simp) - also have "\ = x ^ N" by simp - finally have "y < x ^ N" . - hence "\n\N. y < x ^ n" - apply clarify - apply (erule order_less_le_trans) - apply (erule power_increasing) - apply (rule order_less_imp_le [OF x]) - done - thus "\N. \n\N. y < x ^ n" .. -qed - -lemma LIMSEQ_realpow_zero: - "\0 \ (x::real); x < 1\ \ (\n. x ^ n) ----> 0" -proof (cases) - assume "x = 0" - hence "(\n. x ^ Suc n) ----> 0" by (simp add: LIMSEQ_const) - thus ?thesis by (rule LIMSEQ_imp_Suc) -next - assume "0 \ x" and "x \ 0" - hence x0: "0 < x" by simp - assume x1: "x < 1" - from x0 x1 have "1 < inverse x" - by (rule real_inverse_gt_one) - hence "(\n. inverse (inverse x ^ n)) ----> 0" - by (rule LIMSEQ_inverse_realpow_zero) - thus ?thesis by (simp add: power_inverse) -qed - -lemma LIMSEQ_power_zero: - fixes x :: "'a::{real_normed_algebra_1,recpower}" - shows "norm x < 1 \ (\n. x ^ n) ----> 0" -apply (drule LIMSEQ_realpow_zero [OF norm_ge_zero]) -apply (simp only: LIMSEQ_Zseq_iff, erule Zseq_le) -apply (simp add: power_abs norm_power_ineq) -done - -lemma LIMSEQ_divide_realpow_zero: - "1 < (x::real) ==> (%n. a / (x ^ n)) ----> 0" -apply (cut_tac a = a and x1 = "inverse x" in - LIMSEQ_mult [OF LIMSEQ_const LIMSEQ_realpow_zero]) -apply (auto simp add: divide_inverse power_inverse) -apply (simp add: inverse_eq_divide pos_divide_less_eq) -done - -text{*Limit of @{term "c^n"} for @{term"\c\ < 1"}*} - -lemma LIMSEQ_rabs_realpow_zero: "\c\ < (1::real) ==> (%n. \c\ ^ n) ----> 0" -by (rule LIMSEQ_realpow_zero [OF abs_ge_zero]) - -lemma LIMSEQ_rabs_realpow_zero2: "\c\ < (1::real) ==> (%n. c ^ n) ----> 0" -apply (rule LIMSEQ_rabs_zero [THEN iffD1]) -apply (auto intro: LIMSEQ_rabs_realpow_zero simp add: power_abs) -done - -end diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/IsaMakefile --- a/src/HOL/IsaMakefile Tue Dec 30 08:18:54 2008 +0100 +++ b/src/HOL/IsaMakefile Tue Dec 30 11:10:01 2008 +0100 @@ -112,6 +112,8 @@ Tools/dseq.ML \ Tools/function_package/auto_term.ML \ Tools/function_package/context_tree.ML \ + Tools/function_package/decompose.ML \ + Tools/function_package/descent.ML \ Tools/function_package/fundef_common.ML \ Tools/function_package/fundef_core.ML \ Tools/function_package/fundef_datatype.ML \ @@ -123,8 +125,11 @@ Tools/function_package/measure_functions.ML \ Tools/function_package/mutual.ML \ Tools/function_package/pattern_split.ML \ + Tools/function_package/scnp_reconstruct.ML \ + Tools/function_package/scnp_solve.ML \ Tools/function_package/size.ML \ Tools/function_package/sum_tree.ML \ + Tools/function_package/termination.ML \ Tools/hologic.ML \ Tools/inductive_codegen.ML \ Tools/inductive_package.ML \ @@ -179,6 +184,7 @@ $(SRC)/Tools/code/code_thingol.ML \ $(SRC)/Tools/induct.ML \ $(SRC)/Tools/induct_tacs.ML \ + $(SRC)/Tools/value.ML \ $(SRC)/Tools/nbe.ML \ $(SRC)/Tools/random_word.ML \ $(SRC)/Tools/rat.ML @@ -255,7 +261,7 @@ $(OUT)/HOL: ROOT.ML $(MAIN_DEPENDENCIES) \ Complex_Main.thy \ Complex.thy \ - Complex/Fundamental_Theorem_Algebra.thy \ + Fundamental_Theorem_Algebra.thy \ Deriv.thy \ Fact.thy \ FrechetDeriv.thy \ @@ -265,11 +271,11 @@ Log.thy \ MacLaurin.thy \ NthRoot.thy \ - Hyperreal/SEQ.thy \ + SEQ.thy \ Series.thy \ Taylor.thy \ Transcendental.thy \ - Library/Dense_Linear_Order.thy \ + Dense_Linear_Order.thy \ GCD.thy \ Order_Relation.thy \ Parity.thy \ @@ -281,7 +287,7 @@ RealDef.thy \ RealPow.thy \ Real.thy \ - Real/RealVector.thy \ + RealVector.thy \ Tools/float_syntax.ML \ Tools/rat_arith.ML \ Tools/real_arith.ML \ @@ -331,16 +337,16 @@ HOL-HahnBanach: HOL $(LOG)/HOL-HahnBanach.gz $(LOG)/HOL-HahnBanach.gz: $(OUT)/HOL \ - Real/HahnBanach/Bounds.thy Real/HahnBanach/FunctionNorm.thy \ - Real/HahnBanach/FunctionOrder.thy Real/HahnBanach/HahnBanach.thy \ - Real/HahnBanach/HahnBanachExtLemmas.thy \ - Real/HahnBanach/HahnBanachSupLemmas.thy \ - Real/HahnBanach/Linearform.thy Real/HahnBanach/NormedSpace.thy \ - Real/HahnBanach/README.html Real/HahnBanach/ROOT.ML \ - Real/HahnBanach/Subspace.thy Real/HahnBanach/VectorSpace.thy \ - Real/HahnBanach/ZornLemma.thy Real/HahnBanach/document/root.bib \ - Real/HahnBanach/document/root.tex - @cd Real; $(ISABELLE_TOOL) usedir -g true $(OUT)/HOL HahnBanach + HahnBanach/Bounds.thy HahnBanach/FunctionNorm.thy \ + HahnBanach/FunctionOrder.thy HahnBanach/HahnBanach.thy \ + HahnBanach/HahnBanachExtLemmas.thy \ + HahnBanach/HahnBanachSupLemmas.thy \ + HahnBanach/Linearform.thy HahnBanach/NormedSpace.thy \ + HahnBanach/README.html HahnBanach/ROOT.ML \ + HahnBanach/Subspace.thy HahnBanach/VectorSpace.thy \ + HahnBanach/ZornLemma.thy HahnBanach/document/root.bib \ + HahnBanach/document/root.tex + @$(ISABELLE_TOOL) usedir -g true $(OUT)/HOL HahnBanach ## HOL-Subst @@ -776,20 +782,21 @@ ex/Coherent.thy ex/Dense_Linear_Order_Ex.thy ex/Eval_Examples.thy \ ex/Groebner_Examples.thy ex/Random.thy ex/Quickcheck.thy \ ex/Codegenerator.thy ex/Codegenerator_Pretty.thy \ + ex/CodegenSML_Test.thy \ ex/Commutative_RingEx.thy ex/Efficient_Nat_examples.thy \ ex/Hex_Bin_Examples.thy ex/Commutative_Ring_Complete.thy \ ex/ExecutableContent.thy ex/Fundefs.thy ex/Guess.thy ex/Hebrew.thy \ ex/Binary.thy ex/Higher_Order_Logic.thy ex/Hilbert_Classical.thy \ ex/Induction_Scheme.thy ex/InductiveInvariant.thy \ ex/InductiveInvariant_examples.thy ex/Intuitionistic.thy \ - ex/Lagrange.thy ex/LexOrds.thy ex/LocaleTest2.thy ex/MT.thy \ + ex/Lagrange.thy ex/LocaleTest2.thy ex/MT.thy \ ex/MergeSort.thy ex/MonoidGroup.thy ex/Multiquote.thy ex/NatSum.thy \ ex/Numeral.thy ex/PER.thy ex/PresburgerEx.thy ex/Primrec.thy \ ex/Quickcheck_Examples.thy ex/Reflection.thy ex/reflection_data.ML \ ex/ReflectionEx.thy ex/ROOT.ML ex/Recdefs.thy ex/Records.thy \ ex/Reflected_Presburger.thy ex/coopertac.ML \ ex/Refute_Examples.thy ex/SAT_Examples.thy ex/SVC_Oracle.thy \ - ex/Sudoku.thy ex/Tarski.thy ex/Term_Of_Syntax.thy \ + ex/Sudoku.thy ex/Tarski.thy ex/Termination.thy ex/Term_Of_Syntax.thy \ ex/Unification.thy ex/document/root.bib \ ex/document/root.tex ex/Meson_Test.thy ex/reflection.ML ex/set.thy \ ex/svc_funcs.ML ex/svc_test.thy \ diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/Library/Dense_Linear_Order.thy --- a/src/HOL/Library/Dense_Linear_Order.thy Tue Dec 30 08:18:54 2008 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,879 +0,0 @@ -(* - Author: Amine Chaieb, TU Muenchen -*) - -header {* Dense linear order without endpoints - and a quantifier elimination procedure in Ferrante and Rackoff style *} - -theory Dense_Linear_Order -imports Plain "~~/src/HOL/Groebner_Basis" -uses - "~~/src/HOL/Tools/Qelim/langford_data.ML" - "~~/src/HOL/Tools/Qelim/ferrante_rackoff_data.ML" - ("~~/src/HOL/Tools/Qelim/langford.ML") - ("~~/src/HOL/Tools/Qelim/ferrante_rackoff.ML") -begin - -setup {* Langford_Data.setup #> Ferrante_Rackoff_Data.setup *} - -context linorder -begin - -lemma less_not_permute: "\ (x < y \ y < x)" by (simp add: not_less linear) - -lemma gather_simps: - shows - "(\x. (\y \ L. y < x) \ (\y \ U. x < y) \ x < u \ P x) \ (\x. (\y \ L. y < x) \ (\y \ (insert u U). x < y) \ P x)" - and "(\x. (\y \ L. y < x) \ (\y \ U. x < y) \ l < x \ P x) \ (\x. (\y \ (insert l L). y < x) \ (\y \ U. x < y) \ P x)" - "(\x. (\y \ L. y < x) \ (\y \ U. x < y) \ x < u) \ (\x. (\y \ L. y < x) \ (\y \ (insert u U). x < y))" - and "(\x. (\y \ L. y < x) \ (\y \ U. x < y) \ l < x) \ (\x. (\y \ (insert l L). y < x) \ (\y \ U. x < y))" by auto - -lemma - gather_start: "(\x. P x) \ (\x. (\y \ {}. y < x) \ (\y\ {}. x < y) \ P x)" - by simp - -text{* Theorems for @{text "\z. \x. x < z \ (P x \ P\<^bsub>-\\<^esub>)"}*} -lemma minf_lt: "\z . \x. x < z \ (x < t \ True)" by auto -lemma minf_gt: "\z . \x. x < z \ (t < x \ False)" - by (simp add: not_less) (rule exI[where x="t"], auto simp add: less_le) - -lemma minf_le: "\z. \x. x < z \ (x \ t \ True)" by (auto simp add: less_le) -lemma minf_ge: "\z. \x. x < z \ (t \ x \ False)" - by (auto simp add: less_le not_less not_le) -lemma minf_eq: "\z. \x. x < z \ (x = t \ False)" by auto -lemma minf_neq: "\z. \x. x < z \ (x \ t \ True)" by auto -lemma minf_P: "\z. \x. x < z \ (P \ P)" by blast - -text{* Theorems for @{text "\z. \x. x < z \ (P x \ P\<^bsub>+\\<^esub>)"}*} -lemma pinf_gt: "\z . \x. z < x \ (t < x \ True)" by auto -lemma pinf_lt: "\z . \x. z < x \ (x < t \ False)" - by (simp add: not_less) (rule exI[where x="t"], auto simp add: less_le) - -lemma pinf_ge: "\z. \x. z < x \ (t \ x \ True)" by (auto simp add: less_le) -lemma pinf_le: "\z. \x. z < x \ (x \ t \ False)" - by (auto simp add: less_le not_less not_le) -lemma pinf_eq: "\z. \x. z < x \ (x = t \ False)" by auto -lemma pinf_neq: "\z. \x. z < x \ (x \ t \ True)" by auto -lemma pinf_P: "\z. \x. z < x \ (P \ P)" by blast - -lemma nmi_lt: "t \ U \ \x. \True \ x < t \ (\ u\ U. u \ x)" by auto -lemma nmi_gt: "t \ U \ \x. \False \ t < x \ (\ u\ U. u \ x)" - by (auto simp add: le_less) -lemma nmi_le: "t \ U \ \x. \True \ x\ t \ (\ u\ U. u \ x)" by auto -lemma nmi_ge: "t \ U \ \x. \False \ t\ x \ (\ u\ U. u \ x)" by auto -lemma nmi_eq: "t \ U \ \x. \False \ x = t \ (\ u\ U. u \ x)" by auto -lemma nmi_neq: "t \ U \\x. \True \ x \ t \ (\ u\ U. u \ x)" by auto -lemma nmi_P: "\ x. ~P \ P \ (\ u\ U. u \ x)" by auto -lemma nmi_conj: "\\x. \P1' \ P1 x \ (\ u\ U. u \ x) ; - \x. \P2' \ P2 x \ (\ u\ U. u \ x)\ \ - \x. \(P1' \ P2') \ (P1 x \ P2 x) \ (\ u\ U. u \ x)" by auto -lemma nmi_disj: "\\x. \P1' \ P1 x \ (\ u\ U. u \ x) ; - \x. \P2' \ P2 x \ (\ u\ U. u \ x)\ \ - \x. \(P1' \ P2') \ (P1 x \ P2 x) \ (\ u\ U. u \ x)" by auto - -lemma npi_lt: "t \ U \ \x. \False \ x < t \ (\ u\ U. x \ u)" by (auto simp add: le_less) -lemma npi_gt: "t \ U \ \x. \True \ t < x \ (\ u\ U. x \ u)" by auto -lemma npi_le: "t \ U \ \x. \False \ x \ t \ (\ u\ U. x \ u)" by auto -lemma npi_ge: "t \ U \ \x. \True \ t \ x \ (\ u\ U. x \ u)" by auto -lemma npi_eq: "t \ U \ \x. \False \ x = t \ (\ u\ U. x \ u)" by auto -lemma npi_neq: "t \ U \ \x. \True \ x \ t \ (\ u\ U. x \ u )" by auto -lemma npi_P: "\ x. ~P \ P \ (\ u\ U. x \ u)" by auto -lemma npi_conj: "\\x. \P1' \ P1 x \ (\ u\ U. x \ u) ; \x. \P2' \ P2 x \ (\ u\ U. x \ u)\ - \ \x. \(P1' \ P2') \ (P1 x \ P2 x) \ (\ u\ U. x \ u)" by auto -lemma npi_disj: "\\x. \P1' \ P1 x \ (\ u\ U. x \ u) ; \x. \P2' \ P2 x \ (\ u\ U. x \ u)\ - \ \x. \(P1' \ P2') \ (P1 x \ P2 x) \ (\ u\ U. x \ u)" by auto - -lemma lin_dense_lt: "t \ U \ \x l u. (\ t. l < t \ t < u \ t \ U) \ l< x \ x < u \ x < t \ (\ y. l < y \ y < u \ y < t)" -proof(clarsimp) - fix x l u y assume tU: "t \ U" and noU: "\t. l < t \ t < u \ t \ U" and lx: "l < x" - and xu: "xy" by auto - {assume H: "t < y" - from less_trans[OF lx px] less_trans[OF H yu] - have "l < t \ t < u" by simp - with tU noU have "False" by auto} - hence "\ t < y" by auto hence "y \ t" by (simp add: not_less) - thus "y < t" using tny by (simp add: less_le) -qed - -lemma lin_dense_gt: "t \ U \ \x l u. (\ t. l < t \ t< u \ t \ U) \ l < x \ x < u \ t < x \ (\ y. l < y \ y < u \ t < y)" -proof(clarsimp) - fix x l u y - assume tU: "t \ U" and noU: "\t. l < t \ t < u \ t \ U" and lx: "l < x" and xu: "xy" by auto - {assume H: "y< t" - from less_trans[OF ly H] less_trans[OF px xu] have "l < t \ t < u" by simp - with tU noU have "False" by auto} - hence "\ y y" by (auto simp add: not_less) - thus "t < y" using tny by (simp add:less_le) -qed - -lemma lin_dense_le: "t \ U \ \x l u. (\ t. l < t \ t< u \ t \ U) \ l< x \ x < u \ x \ t \ (\ y. l < y \ y < u \ y\ t)" -proof(clarsimp) - fix x l u y - assume tU: "t \ U" and noU: "\t. l < t \ t < u \ t \ U" and lx: "l < x" and xu: "x t" and ly: "ly" by auto - {assume H: "t < y" - from less_le_trans[OF lx px] less_trans[OF H yu] - have "l < t \ t < u" by simp - with tU noU have "False" by auto} - hence "\ t < y" by auto thus "y \ t" by (simp add: not_less) -qed - -lemma lin_dense_ge: "t \ U \ \x l u. (\ t. l < t \ t< u \ t \ U) \ l< x \ x < u \ t \ x \ (\ y. l < y \ y < u \ t \ y)" -proof(clarsimp) - fix x l u y - assume tU: "t \ U" and noU: "\t. l < t \ t < u \ t \ U" and lx: "l < x" and xu: "x x" and ly: "ly" by auto - {assume H: "y< t" - from less_trans[OF ly H] le_less_trans[OF px xu] - have "l < t \ t < u" by simp - with tU noU have "False" by auto} - hence "\ y y" by (simp add: not_less) -qed -lemma lin_dense_eq: "t \ U \ \x l u. (\ t. l < t \ t< u \ t \ U) \ l< x \ x < u \ x = t \ (\ y. l < y \ y < u \ y= t)" by auto -lemma lin_dense_neq: "t \ U \ \x l u. (\ t. l < t \ t< u \ t \ U) \ l< x \ x < u \ x \ t \ (\ y. l < y \ y < u \ y\ t)" by auto -lemma lin_dense_P: "\x l u. (\ t. l < t \ t< u \ t \ U) \ l< x \ x < u \ P \ (\ y. l < y \ y < u \ P)" by auto - -lemma lin_dense_conj: - "\\x l u. (\ t. l < t \ t< u \ t \ U) \ l< x \ x < u \ P1 x - \ (\ y. l < y \ y < u \ P1 y) ; - \x l u. (\ t. l < t \ t< u \ t \ U) \ l< x \ x < u \ P2 x - \ (\ y. l < y \ y < u \ P2 y)\ \ - \x l u. (\ t. l < t \ t< u \ t \ U) \ l< x \ x < u \ (P1 x \ P2 x) - \ (\ y. l < y \ y < u \ (P1 y \ P2 y))" - by blast -lemma lin_dense_disj: - "\\x l u. (\ t. l < t \ t< u \ t \ U) \ l< x \ x < u \ P1 x - \ (\ y. l < y \ y < u \ P1 y) ; - \x l u. (\ t. l < t \ t< u \ t \ U) \ l< x \ x < u \ P2 x - \ (\ y. l < y \ y < u \ P2 y)\ \ - \x l u. (\ t. l < t \ t< u \ t \ U) \ l< x \ x < u \ (P1 x \ P2 x) - \ (\ y. l < y \ y < u \ (P1 y \ P2 y))" - by blast - -lemma npmibnd: "\\x. \ MP \ P x \ (\ u\ U. u \ x); \x. \PP \ P x \ (\ u\ U. x \ u)\ - \ \x. \ MP \ \PP \ P x \ (\ u\ U. \ u' \ U. u \ x \ x \ u')" -by auto - -lemma finite_set_intervals: - assumes px: "P x" and lx: "l \ x" and xu: "x \ u" and linS: "l\ S" - and uinS: "u \ S" and fS:"finite S" and lS: "\ x\ S. l \ x" and Su: "\ x\ S. x \ u" - shows "\ a \ S. \ b \ S. (\ y. a < y \ y < b \ y \ S) \ a \ x \ x \ b \ P x" -proof- - let ?Mx = "{y. y\ S \ y \ x}" - let ?xM = "{y. y\ S \ x \ y}" - let ?a = "Max ?Mx" - let ?b = "Min ?xM" - have MxS: "?Mx \ S" by blast - hence fMx: "finite ?Mx" using fS finite_subset by auto - from lx linS have linMx: "l \ ?Mx" by blast - hence Mxne: "?Mx \ {}" by blast - have xMS: "?xM \ S" by blast - hence fxM: "finite ?xM" using fS finite_subset by auto - from xu uinS have linxM: "u \ ?xM" by blast - hence xMne: "?xM \ {}" by blast - have ax:"?a \ x" using Mxne fMx by auto - have xb:"x \ ?b" using xMne fxM by auto - have "?a \ ?Mx" using Max_in[OF fMx Mxne] by simp hence ainS: "?a \ S" using MxS by blast - have "?b \ ?xM" using Min_in[OF fxM xMne] by simp hence binS: "?b \ S" using xMS by blast - have noy:"\ y. ?a < y \ y < ?b \ y \ S" - proof(clarsimp) - fix y assume ay: "?a < y" and yb: "y < ?b" and yS: "y \ S" - from yS have "y\ ?Mx \ y\ ?xM" by (auto simp add: linear) - moreover {assume "y \ ?Mx" hence "y \ ?a" using Mxne fMx by auto with ay have "False" by (simp add: not_le[symmetric])} - moreover {assume "y \ ?xM" hence "?b \ y" using xMne fxM by auto with yb have "False" by (simp add: not_le[symmetric])} - ultimately show "False" by blast - qed - from ainS binS noy ax xb px show ?thesis by blast -qed - -lemma finite_set_intervals2: - assumes px: "P x" and lx: "l \ x" and xu: "x \ u" and linS: "l\ S" - and uinS: "u \ S" and fS:"finite S" and lS: "\ x\ S. l \ x" and Su: "\ x\ S. x \ u" - shows "(\ s\ S. P s) \ (\ a \ S. \ b \ S. (\ y. a < y \ y < b \ y \ S) \ a < x \ x < b \ P x)" -proof- - from finite_set_intervals[where P="P", OF px lx xu linS uinS fS lS Su] - obtain a and b where - as: "a\ S" and bs: "b\ S" and noS:"\y. a < y \ y < b \ y \ S" - and axb: "a \ x \ x \ b \ P x" by auto - from axb have "x= a \ x= b \ (a < x \ x < b)" by (auto simp add: le_less) - thus ?thesis using px as bs noS by blast -qed - -end - -section {* The classical QE after Langford for dense linear orders *} - -context dense_linear_order -begin - -lemma interval_empty_iff: - "{y. x < y \ y < z} = {} \ \ x < z" - by (auto dest: dense) - -lemma dlo_qe_bnds: - assumes ne: "L \ {}" and neU: "U \ {}" and fL: "finite L" and fU: "finite U" - shows "(\x. (\y \ L. y < x) \ (\y \ U. x < y)) \ (\ l \ L. \u \ U. l < u)" -proof (simp only: atomize_eq, rule iffI) - assume H: "\x. (\y\L. y < x) \ (\y\U. x < y)" - then obtain x where xL: "\y\L. y < x" and xU: "\y\U. x < y" by blast - {fix l u assume l: "l \ L" and u: "u \ U" - have "l < x" using xL l by blast - also have "x < u" using xU u by blast - finally (less_trans) have "l < u" .} - thus "\l\L. \u\U. l < u" by blast -next - assume H: "\l\L. \u\U. l < u" - let ?ML = "Max L" - let ?MU = "Min U" - from fL ne have th1: "?ML \ L" and th1': "\l\L. l \ ?ML" by auto - from fU neU have th2: "?MU \ U" and th2': "\u\U. ?MU \ u" by auto - from th1 th2 H have "?ML < ?MU" by auto - with dense obtain w where th3: "?ML < w" and th4: "w < ?MU" by blast - from th3 th1' have "\l \ L. l < w" by auto - moreover from th4 th2' have "\u \ U. w < u" by auto - ultimately show "\x. (\y\L. y < x) \ (\y\U. x < y)" by auto -qed - -lemma dlo_qe_noub: - assumes ne: "L \ {}" and fL: "finite L" - shows "(\x. (\y \ L. y < x) \ (\y \ {}. x < y)) \ True" -proof(simp add: atomize_eq) - from gt_ex[of "Max L"] obtain M where M: "Max L < M" by blast - from ne fL have "\x \ L. x \ Max L" by simp - with M have "\x\L. x < M" by (auto intro: le_less_trans) - thus "\x. \y\L. y < x" by blast -qed - -lemma dlo_qe_nolb: - assumes ne: "U \ {}" and fU: "finite U" - shows "(\x. (\y \ {}. y < x) \ (\y \ U. x < y)) \ True" -proof(simp add: atomize_eq) - from lt_ex[of "Min U"] obtain M where M: "M < Min U" by blast - from ne fU have "\x \ U. Min U \ x" by simp - with M have "\x\U. M < x" by (auto intro: less_le_trans) - thus "\x. \y\U. x < y" by blast -qed - -lemma exists_neq: "\(x::'a). x \ t" "\(x::'a). t \ x" - using gt_ex[of t] by auto - -lemmas dlo_simps = order_refl less_irrefl not_less not_le exists_neq - le_less neq_iff linear less_not_permute - -lemma axiom: "dense_linear_order (op \) (op <)" by (rule dense_linear_order_axioms) -lemma atoms: - shows "TERM (less :: 'a \ _)" - and "TERM (less_eq :: 'a \ _)" - and "TERM (op = :: 'a \ _)" . - -declare axiom[langford qe: dlo_qe_bnds dlo_qe_nolb dlo_qe_noub gather: gather_start gather_simps atoms: atoms] -declare dlo_simps[langfordsimp] - -end - -(* FIXME: Move to HOL -- together with the conj_aci_rule in langford.ML *) -lemma dnf: - "(P & (Q | R)) = ((P&Q) | (P&R))" - "((Q | R) & P) = ((Q&P) | (R&P))" - by blast+ - -lemmas weak_dnf_simps = simp_thms dnf - -lemma nnf_simps: - "(\(P \ Q)) = (\P \ \Q)" "(\(P \ Q)) = (\P \ \Q)" "(P \ Q) = (\P \ Q)" - "(P = Q) = ((P \ Q) \ (\P \ \ Q))" "(\ \(P)) = P" - by blast+ - -lemma ex_distrib: "(\x. P x \ Q x) \ ((\x. P x) \ (\x. Q x))" by blast - -lemmas dnf_simps = weak_dnf_simps nnf_simps ex_distrib - -use "~~/src/HOL/Tools/Qelim/langford.ML" -method_setup dlo = {* - Method.ctxt_args (Method.SIMPLE_METHOD' o LangfordQE.dlo_tac) -*} "Langford's algorithm for quantifier elimination in dense linear orders" - - -section {* Contructive dense linear orders yield QE for linear arithmetic over ordered Fields -- see @{text "Arith_Tools.thy"} *} - -text {* Linear order without upper bounds *} - -class_locale linorder_stupid_syntax = linorder -begin -notation - less_eq ("op \") and - less_eq ("(_/ \ _)" [51, 51] 50) and - less ("op \") and - less ("(_/ \ _)" [51, 51] 50) - -end - -class_locale linorder_no_ub = linorder_stupid_syntax + - assumes gt_ex: "\y. less x y" -begin -lemma ge_ex: "\y. x \ y" using gt_ex by auto - -text {* Theorems for @{text "\z. \x. z \ x \ (P x \ P\<^bsub>+\\<^esub>)"} *} -lemma pinf_conj: - assumes ex1: "\z1. \x. z1 \ x \ (P1 x \ P1')" - and ex2: "\z2. \x. z2 \ x \ (P2 x \ P2')" - shows "\z. \x. z \ x \ ((P1 x \ P2 x) \ (P1' \ P2'))" -proof- - from ex1 ex2 obtain z1 and z2 where z1: "\x. z1 \ x \ (P1 x \ P1')" - and z2: "\x. z2 \ x \ (P2 x \ P2')" by blast - from gt_ex obtain z where z:"ord.max less_eq z1 z2 \ z" by blast - from z have zz1: "z1 \ z" and zz2: "z2 \ z" by simp_all - {fix x assume H: "z \ x" - from less_trans[OF zz1 H] less_trans[OF zz2 H] - have "(P1 x \ P2 x) \ (P1' \ P2')" using z1 zz1 z2 zz2 by auto - } - thus ?thesis by blast -qed - -lemma pinf_disj: - assumes ex1: "\z1. \x. z1 \ x \ (P1 x \ P1')" - and ex2: "\z2. \x. z2 \ x \ (P2 x \ P2')" - shows "\z. \x. z \ x \ ((P1 x \ P2 x) \ (P1' \ P2'))" -proof- - from ex1 ex2 obtain z1 and z2 where z1: "\x. z1 \ x \ (P1 x \ P1')" - and z2: "\x. z2 \ x \ (P2 x \ P2')" by blast - from gt_ex obtain z where z:"ord.max less_eq z1 z2 \ z" by blast - from z have zz1: "z1 \ z" and zz2: "z2 \ z" by simp_all - {fix x assume H: "z \ x" - from less_trans[OF zz1 H] less_trans[OF zz2 H] - have "(P1 x \ P2 x) \ (P1' \ P2')" using z1 zz1 z2 zz2 by auto - } - thus ?thesis by blast -qed - -lemma pinf_ex: assumes ex:"\z. \x. z \ x \ (P x \ P1)" and p1: P1 shows "\ x. P x" -proof- - from ex obtain z where z: "\x. z \ x \ (P x \ P1)" by blast - from gt_ex obtain x where x: "z \ x" by blast - from z x p1 show ?thesis by blast -qed - -end - -text {* Linear order without upper bounds *} - -class_locale linorder_no_lb = linorder_stupid_syntax + - assumes lt_ex: "\y. less y x" -begin -lemma le_ex: "\y. y \ x" using lt_ex by auto - - -text {* Theorems for @{text "\z. \x. x \ z \ (P x \ P\<^bsub>-\\<^esub>)"} *} -lemma minf_conj: - assumes ex1: "\z1. \x. x \ z1 \ (P1 x \ P1')" - and ex2: "\z2. \x. x \ z2 \ (P2 x \ P2')" - shows "\z. \x. x \ z \ ((P1 x \ P2 x) \ (P1' \ P2'))" -proof- - from ex1 ex2 obtain z1 and z2 where z1: "\x. x \ z1 \ (P1 x \ P1')"and z2: "\x. x \ z2 \ (P2 x \ P2')" by blast - from lt_ex obtain z where z:"z \ ord.min less_eq z1 z2" by blast - from z have zz1: "z \ z1" and zz2: "z \ z2" by simp_all - {fix x assume H: "x \ z" - from less_trans[OF H zz1] less_trans[OF H zz2] - have "(P1 x \ P2 x) \ (P1' \ P2')" using z1 zz1 z2 zz2 by auto - } - thus ?thesis by blast -qed - -lemma minf_disj: - assumes ex1: "\z1. \x. x \ z1 \ (P1 x \ P1')" - and ex2: "\z2. \x. x \ z2 \ (P2 x \ P2')" - shows "\z. \x. x \ z \ ((P1 x \ P2 x) \ (P1' \ P2'))" -proof- - from ex1 ex2 obtain z1 and z2 where z1: "\x. x \ z1 \ (P1 x \ P1')"and z2: "\x. x \ z2 \ (P2 x \ P2')" by blast - from lt_ex obtain z where z:"z \ ord.min less_eq z1 z2" by blast - from z have zz1: "z \ z1" and zz2: "z \ z2" by simp_all - {fix x assume H: "x \ z" - from less_trans[OF H zz1] less_trans[OF H zz2] - have "(P1 x \ P2 x) \ (P1' \ P2')" using z1 zz1 z2 zz2 by auto - } - thus ?thesis by blast -qed - -lemma minf_ex: assumes ex:"\z. \x. x \ z \ (P x \ P1)" and p1: P1 shows "\ x. P x" -proof- - from ex obtain z where z: "\x. x \ z \ (P x \ P1)" by blast - from lt_ex obtain x where x: "x \ z" by blast - from z x p1 show ?thesis by blast -qed - -end - - -class_locale constr_dense_linear_order = linorder_no_lb + linorder_no_ub + - fixes between - assumes between_less: "less x y \ less x (between x y) \ less (between x y) y" - and between_same: "between x x = x" - -class_interpretation constr_dense_linear_order < dense_linear_order - apply unfold_locales - using gt_ex lt_ex between_less - by (auto, rule_tac x="between x y" in exI, simp) - -context constr_dense_linear_order -begin - -lemma rinf_U: - assumes fU: "finite U" - and lin_dense: "\x l u. (\ t. l \ t \ t\ u \ t \ U) \ l\ x \ x \ u \ P x - \ (\ y. l \ y \ y \ u \ P y )" - and nmpiU: "\x. \ MP \ \PP \ P x \ (\ u\ U. \ u' \ U. u \ x \ x \ u')" - and nmi: "\ MP" and npi: "\ PP" and ex: "\ x. P x" - shows "\ u\ U. \ u' \ U. P (between u u')" -proof- - from ex obtain x where px: "P x" by blast - from px nmi npi nmpiU have "\ u\ U. \ u' \ U. u \ x \ x \ u'" by auto - then obtain u and u' where uU:"u\ U" and uU': "u' \ U" and ux:"u \ x" and xu':"x \ u'" by auto - from uU have Une: "U \ {}" by auto - term "linorder.Min less_eq" - let ?l = "linorder.Min less_eq U" - let ?u = "linorder.Max less_eq U" - have linM: "?l \ U" using fU Une by simp - have uinM: "?u \ U" using fU Une by simp - have lM: "\ t\ U. ?l \ t" using Une fU by auto - have Mu: "\ t\ U. t \ ?u" using Une fU by auto - have th:"?l \ u" using uU Une lM by auto - from order_trans[OF th ux] have lx: "?l \ x" . - have th: "u' \ ?u" using uU' Une Mu by simp - from order_trans[OF xu' th] have xu: "x \ ?u" . - from finite_set_intervals2[where P="P",OF px lx xu linM uinM fU lM Mu] - have "(\ s\ U. P s) \ - (\ t1\ U. \ t2 \ U. (\ y. t1 \ y \ y \ t2 \ y \ U) \ t1 \ x \ x \ t2 \ P x)" . - moreover { fix u assume um: "u\U" and pu: "P u" - have "between u u = u" by (simp add: between_same) - with um pu have "P (between u u)" by simp - with um have ?thesis by blast} - moreover{ - assume "\ t1\ U. \ t2 \ U. (\ y. t1 \ y \ y \ t2 \ y \ U) \ t1 \ x \ x \ t2 \ P x" - then obtain t1 and t2 where t1M: "t1 \ U" and t2M: "t2\ U" - and noM: "\ y. t1 \ y \ y \ t2 \ y \ U" and t1x: "t1 \ x" and xt2: "x \ t2" and px: "P x" - by blast - from less_trans[OF t1x xt2] have t1t2: "t1 \ t2" . - let ?u = "between t1 t2" - from between_less t1t2 have t1lu: "t1 \ ?u" and ut2: "?u \ t2" by auto - from lin_dense noM t1x xt2 px t1lu ut2 have "P ?u" by blast - with t1M t2M have ?thesis by blast} - ultimately show ?thesis by blast - qed - -theorem fr_eq: - assumes fU: "finite U" - and lin_dense: "\x l u. (\ t. l \ t \ t\ u \ t \ U) \ l\ x \ x \ u \ P x - \ (\ y. l \ y \ y \ u \ P y )" - and nmibnd: "\x. \ MP \ P x \ (\ u\ U. u \ x)" - and npibnd: "\x. \PP \ P x \ (\ u\ U. x \ u)" - and mi: "\z. \x. x \ z \ (P x = MP)" and pi: "\z. \x. z \ x \ (P x = PP)" - shows "(\ x. P x) \ (MP \ PP \ (\ u \ U. \ u'\ U. P (between u u')))" - (is "_ \ (_ \ _ \ ?F)" is "?E \ ?D") -proof- - { - assume px: "\ x. P x" - have "MP \ PP \ (\ MP \ \ PP)" by blast - moreover {assume "MP \ PP" hence "?D" by blast} - moreover {assume nmi: "\ MP" and npi: "\ PP" - from npmibnd[OF nmibnd npibnd] - have nmpiU: "\x. \ MP \ \PP \ P x \ (\ u\ U. \ u' \ U. u \ x \ x \ u')" . - from rinf_U[OF fU lin_dense nmpiU nmi npi px] have "?D" by blast} - ultimately have "?D" by blast} - moreover - { assume "?D" - moreover {assume m:"MP" from minf_ex[OF mi m] have "?E" .} - moreover {assume p: "PP" from pinf_ex[OF pi p] have "?E" . } - moreover {assume f:"?F" hence "?E" by blast} - ultimately have "?E" by blast} - ultimately have "?E = ?D" by blast thus "?E \ ?D" by simp -qed - -lemmas minf_thms = minf_conj minf_disj minf_eq minf_neq minf_lt minf_le minf_gt minf_ge minf_P -lemmas pinf_thms = pinf_conj pinf_disj pinf_eq pinf_neq pinf_lt pinf_le pinf_gt pinf_ge pinf_P - -lemmas nmi_thms = nmi_conj nmi_disj nmi_eq nmi_neq nmi_lt nmi_le nmi_gt nmi_ge nmi_P -lemmas npi_thms = npi_conj npi_disj npi_eq npi_neq npi_lt npi_le npi_gt npi_ge npi_P -lemmas lin_dense_thms = lin_dense_conj lin_dense_disj lin_dense_eq lin_dense_neq lin_dense_lt lin_dense_le lin_dense_gt lin_dense_ge lin_dense_P - -lemma ferrack_axiom: "constr_dense_linear_order less_eq less between" - by (rule constr_dense_linear_order_axioms) -lemma atoms: - shows "TERM (less :: 'a \ _)" - and "TERM (less_eq :: 'a \ _)" - and "TERM (op = :: 'a \ _)" . - -declare ferrack_axiom [ferrack minf: minf_thms pinf: pinf_thms - nmi: nmi_thms npi: npi_thms lindense: - lin_dense_thms qe: fr_eq atoms: atoms] - -declaration {* -let -fun simps phi = map (Morphism.thm phi) [@{thm "not_less"}, @{thm "not_le"}] -fun generic_whatis phi = - let - val [lt, le] = map (Morphism.term phi) [@{term "op \"}, @{term "op \"}] - fun h x t = - case term_of t of - Const("op =", _)$y$z => if term_of x aconv y then Ferrante_Rackoff_Data.Eq - else Ferrante_Rackoff_Data.Nox - | @{term "Not"}$(Const("op =", _)$y$z) => if term_of x aconv y then Ferrante_Rackoff_Data.NEq - else Ferrante_Rackoff_Data.Nox - | b$y$z => if Term.could_unify (b, lt) then - if term_of x aconv y then Ferrante_Rackoff_Data.Lt - else if term_of x aconv z then Ferrante_Rackoff_Data.Gt - else Ferrante_Rackoff_Data.Nox - else if Term.could_unify (b, le) then - if term_of x aconv y then Ferrante_Rackoff_Data.Le - else if term_of x aconv z then Ferrante_Rackoff_Data.Ge - else Ferrante_Rackoff_Data.Nox - else Ferrante_Rackoff_Data.Nox - | _ => Ferrante_Rackoff_Data.Nox - in h end - fun ss phi = HOL_ss addsimps (simps phi) -in - Ferrante_Rackoff_Data.funs @{thm "ferrack_axiom"} - {isolate_conv = K (K (K Thm.reflexive)), whatis = generic_whatis, simpset = ss} -end -*} - -end - -use "~~/src/HOL/Tools/Qelim/ferrante_rackoff.ML" - -method_setup ferrack = {* - Method.ctxt_args (Method.SIMPLE_METHOD' o FerranteRackoff.dlo_tac) -*} "Ferrante and Rackoff's algorithm for quantifier elimination in dense linear orders" - -subsection {* Ferrante and Rackoff algorithm over ordered fields *} - -lemma neg_prod_lt:"(c\'a\ordered_field) < 0 \ ((c*x < 0) == (x > 0))" -proof- - assume H: "c < 0" - have "c*x < 0 = (0/c < x)" by (simp only: neg_divide_less_eq[OF H] ring_simps) - also have "\ = (0 < x)" by simp - finally show "(c*x < 0) == (x > 0)" by simp -qed - -lemma pos_prod_lt:"(c\'a\ordered_field) > 0 \ ((c*x < 0) == (x < 0))" -proof- - assume H: "c > 0" - hence "c*x < 0 = (0/c > x)" by (simp only: pos_less_divide_eq[OF H] ring_simps) - also have "\ = (0 > x)" by simp - finally show "(c*x < 0) == (x < 0)" by simp -qed - -lemma neg_prod_sum_lt: "(c\'a\ordered_field) < 0 \ ((c*x + t< 0) == (x > (- 1/c)*t))" -proof- - assume H: "c < 0" - have "c*x + t< 0 = (c*x < -t)" by (subst less_iff_diff_less_0 [of "c*x" "-t"], simp) - also have "\ = (-t/c < x)" by (simp only: neg_divide_less_eq[OF H] ring_simps) - also have "\ = ((- 1/c)*t < x)" by simp - finally show "(c*x + t < 0) == (x > (- 1/c)*t)" by simp -qed - -lemma pos_prod_sum_lt:"(c\'a\ordered_field) > 0 \ ((c*x + t < 0) == (x < (- 1/c)*t))" -proof- - assume H: "c > 0" - have "c*x + t< 0 = (c*x < -t)" by (subst less_iff_diff_less_0 [of "c*x" "-t"], simp) - also have "\ = (-t/c > x)" by (simp only: pos_less_divide_eq[OF H] ring_simps) - also have "\ = ((- 1/c)*t > x)" by simp - finally show "(c*x + t < 0) == (x < (- 1/c)*t)" by simp -qed - -lemma sum_lt:"((x::'a::pordered_ab_group_add) + t < 0) == (x < - t)" - using less_diff_eq[where a= x and b=t and c=0] by simp - -lemma neg_prod_le:"(c\'a\ordered_field) < 0 \ ((c*x <= 0) == (x >= 0))" -proof- - assume H: "c < 0" - have "c*x <= 0 = (0/c <= x)" by (simp only: neg_divide_le_eq[OF H] ring_simps) - also have "\ = (0 <= x)" by simp - finally show "(c*x <= 0) == (x >= 0)" by simp -qed - -lemma pos_prod_le:"(c\'a\ordered_field) > 0 \ ((c*x <= 0) == (x <= 0))" -proof- - assume H: "c > 0" - hence "c*x <= 0 = (0/c >= x)" by (simp only: pos_le_divide_eq[OF H] ring_simps) - also have "\ = (0 >= x)" by simp - finally show "(c*x <= 0) == (x <= 0)" by simp -qed - -lemma neg_prod_sum_le: "(c\'a\ordered_field) < 0 \ ((c*x + t <= 0) == (x >= (- 1/c)*t))" -proof- - assume H: "c < 0" - have "c*x + t <= 0 = (c*x <= -t)" by (subst le_iff_diff_le_0 [of "c*x" "-t"], simp) - also have "\ = (-t/c <= x)" by (simp only: neg_divide_le_eq[OF H] ring_simps) - also have "\ = ((- 1/c)*t <= x)" by simp - finally show "(c*x + t <= 0) == (x >= (- 1/c)*t)" by simp -qed - -lemma pos_prod_sum_le:"(c\'a\ordered_field) > 0 \ ((c*x + t <= 0) == (x <= (- 1/c)*t))" -proof- - assume H: "c > 0" - have "c*x + t <= 0 = (c*x <= -t)" by (subst le_iff_diff_le_0 [of "c*x" "-t"], simp) - also have "\ = (-t/c >= x)" by (simp only: pos_le_divide_eq[OF H] ring_simps) - also have "\ = ((- 1/c)*t >= x)" by simp - finally show "(c*x + t <= 0) == (x <= (- 1/c)*t)" by simp -qed - -lemma sum_le:"((x::'a::pordered_ab_group_add) + t <= 0) == (x <= - t)" - using le_diff_eq[where a= x and b=t and c=0] by simp - -lemma nz_prod_eq:"(c\'a\ordered_field) \ 0 \ ((c*x = 0) == (x = 0))" by simp -lemma nz_prod_sum_eq: "(c\'a\ordered_field) \ 0 \ ((c*x + t = 0) == (x = (- 1/c)*t))" -proof- - assume H: "c \ 0" - have "c*x + t = 0 = (c*x = -t)" by (subst eq_iff_diff_eq_0 [of "c*x" "-t"], simp) - also have "\ = (x = -t/c)" by (simp only: nonzero_eq_divide_eq[OF H] ring_simps) - finally show "(c*x + t = 0) == (x = (- 1/c)*t)" by simp -qed -lemma sum_eq:"((x::'a::pordered_ab_group_add) + t = 0) == (x = - t)" - using eq_diff_eq[where a= x and b=t and c=0] by simp - - -class_interpretation class_ordered_field_dense_linear_order: constr_dense_linear_order - ["op <=" "op <" - "\ x y. 1/2 * ((x::'a::{ordered_field,recpower,number_ring}) + y)"] -proof (unfold_locales, dlo, dlo, auto) - fix x y::'a assume lt: "x < y" - from less_half_sum[OF lt] show "x < (x + y) /2" by simp -next - fix x y::'a assume lt: "x < y" - from gt_half_sum[OF lt] show "(x + y) /2 < y" by simp -qed - -declaration{* -let -fun earlier [] x y = false - | earlier (h::t) x y = - if h aconvc y then false else if h aconvc x then true else earlier t x y; - -fun dest_frac ct = case term_of ct of - Const (@{const_name "HOL.divide"},_) $ a $ b=> - Rat.rat_of_quotient (snd (HOLogic.dest_number a), snd (HOLogic.dest_number b)) - | t => Rat.rat_of_int (snd (HOLogic.dest_number t)) - -fun mk_frac phi cT x = - let val (a, b) = Rat.quotient_of_rat x - in if b = 1 then Numeral.mk_cnumber cT a - else Thm.capply - (Thm.capply (Drule.cterm_rule (instantiate' [SOME cT] []) @{cpat "op /"}) - (Numeral.mk_cnumber cT a)) - (Numeral.mk_cnumber cT b) - end - -fun whatis x ct = case term_of ct of - Const(@{const_name "HOL.plus"}, _)$(Const(@{const_name "HOL.times"},_)$_$y)$_ => - if y aconv term_of x then ("c*x+t",[(funpow 2 Thm.dest_arg1) ct, Thm.dest_arg ct]) - else ("Nox",[]) -| Const(@{const_name "HOL.plus"}, _)$y$_ => - if y aconv term_of x then ("x+t",[Thm.dest_arg ct]) - else ("Nox",[]) -| Const(@{const_name "HOL.times"}, _)$_$y => - if y aconv term_of x then ("c*x",[Thm.dest_arg1 ct]) - else ("Nox",[]) -| t => if t aconv term_of x then ("x",[]) else ("Nox",[]); - -fun xnormalize_conv ctxt [] ct = reflexive ct -| xnormalize_conv ctxt (vs as (x::_)) ct = - case term_of ct of - Const(@{const_name HOL.less},_)$_$Const(@{const_name "HOL.zero"},_) => - (case whatis x (Thm.dest_arg1 ct) of - ("c*x+t",[c,t]) => - let - val cr = dest_frac c - val clt = Thm.dest_fun2 ct - val cz = Thm.dest_arg ct - val neg = cr - let - val T = ctyp_of_term x - val th = instantiate' [SOME T] [SOME x, SOME t] @{thm "sum_lt"} - val rth = Conv.fconv_rule (Conv.arg_conv (Conv.binop_conv - (Normalizer.semiring_normalize_ord_conv ctxt (earlier vs)))) th - in rth end - | ("c*x",[c]) => - let - val cr = dest_frac c - val clt = Thm.dest_fun2 ct - val cz = Thm.dest_arg ct - val neg = cr reflexive ct) - - -| Const(@{const_name HOL.less_eq},_)$_$Const(@{const_name "HOL.zero"},_) => - (case whatis x (Thm.dest_arg1 ct) of - ("c*x+t",[c,t]) => - let - val T = ctyp_of_term x - val cr = dest_frac c - val clt = Drule.cterm_rule (instantiate' [SOME T] []) @{cpat "op <"} - val cz = Thm.dest_arg ct - val neg = cr - let - val T = ctyp_of_term x - val th = instantiate' [SOME T] [SOME x, SOME t] @{thm "sum_le"} - val rth = Conv.fconv_rule (Conv.arg_conv (Conv.binop_conv - (Normalizer.semiring_normalize_ord_conv ctxt (earlier vs)))) th - in rth end - | ("c*x",[c]) => - let - val T = ctyp_of_term x - val cr = dest_frac c - val clt = Drule.cterm_rule (instantiate' [SOME T] []) @{cpat "op <"} - val cz = Thm.dest_arg ct - val neg = cr reflexive ct) - -| Const("op =",_)$_$Const(@{const_name "HOL.zero"},_) => - (case whatis x (Thm.dest_arg1 ct) of - ("c*x+t",[c,t]) => - let - val T = ctyp_of_term x - val cr = dest_frac c - val ceq = Thm.dest_fun2 ct - val cz = Thm.dest_arg ct - val cthp = Simplifier.rewrite (local_simpset_of ctxt) - (Thm.capply @{cterm "Trueprop"} - (Thm.capply @{cterm "Not"} (Thm.capply (Thm.capply ceq c) cz))) - val cth = equal_elim (symmetric cthp) TrueI - val th = implies_elim - (instantiate' [SOME T] (map SOME [c,x,t]) @{thm nz_prod_sum_eq}) cth - val rth = Conv.fconv_rule (Conv.arg_conv (Conv.binop_conv - (Normalizer.semiring_normalize_ord_conv ctxt (earlier vs)))) th - in rth end - | ("x+t",[t]) => - let - val T = ctyp_of_term x - val th = instantiate' [SOME T] [SOME x, SOME t] @{thm "sum_eq"} - val rth = Conv.fconv_rule (Conv.arg_conv (Conv.binop_conv - (Normalizer.semiring_normalize_ord_conv ctxt (earlier vs)))) th - in rth end - | ("c*x",[c]) => - let - val T = ctyp_of_term x - val cr = dest_frac c - val ceq = Thm.dest_fun2 ct - val cz = Thm.dest_arg ct - val cthp = Simplifier.rewrite (local_simpset_of ctxt) - (Thm.capply @{cterm "Trueprop"} - (Thm.capply @{cterm "Not"} (Thm.capply (Thm.capply ceq c) cz))) - val cth = equal_elim (symmetric cthp) TrueI - val rth = implies_elim - (instantiate' [SOME T] (map SOME [c,x]) @{thm nz_prod_eq}) cth - in rth end - | _ => reflexive ct); - -local - val less_iff_diff_less_0 = mk_meta_eq @{thm "less_iff_diff_less_0"} - val le_iff_diff_le_0 = mk_meta_eq @{thm "le_iff_diff_le_0"} - val eq_iff_diff_eq_0 = mk_meta_eq @{thm "eq_iff_diff_eq_0"} -in -fun field_isolate_conv phi ctxt vs ct = case term_of ct of - Const(@{const_name HOL.less},_)$a$b => - let val (ca,cb) = Thm.dest_binop ct - val T = ctyp_of_term ca - val th = instantiate' [SOME T] [SOME ca, SOME cb] less_iff_diff_less_0 - val nth = Conv.fconv_rule - (Conv.arg_conv (Conv.arg1_conv - (Normalizer.semiring_normalize_ord_conv @{context} (earlier vs)))) th - val rth = transitive nth (xnormalize_conv ctxt vs (Thm.rhs_of nth)) - in rth end -| Const(@{const_name HOL.less_eq},_)$a$b => - let val (ca,cb) = Thm.dest_binop ct - val T = ctyp_of_term ca - val th = instantiate' [SOME T] [SOME ca, SOME cb] le_iff_diff_le_0 - val nth = Conv.fconv_rule - (Conv.arg_conv (Conv.arg1_conv - (Normalizer.semiring_normalize_ord_conv @{context} (earlier vs)))) th - val rth = transitive nth (xnormalize_conv ctxt vs (Thm.rhs_of nth)) - in rth end - -| Const("op =",_)$a$b => - let val (ca,cb) = Thm.dest_binop ct - val T = ctyp_of_term ca - val th = instantiate' [SOME T] [SOME ca, SOME cb] eq_iff_diff_eq_0 - val nth = Conv.fconv_rule - (Conv.arg_conv (Conv.arg1_conv - (Normalizer.semiring_normalize_ord_conv @{context} (earlier vs)))) th - val rth = transitive nth (xnormalize_conv ctxt vs (Thm.rhs_of nth)) - in rth end -| @{term "Not"} $(Const("op =",_)$a$b) => Conv.arg_conv (field_isolate_conv phi ctxt vs) ct -| _ => reflexive ct -end; - -fun classfield_whatis phi = - let - fun h x t = - case term_of t of - Const("op =", _)$y$z => if term_of x aconv y then Ferrante_Rackoff_Data.Eq - else Ferrante_Rackoff_Data.Nox - | @{term "Not"}$(Const("op =", _)$y$z) => if term_of x aconv y then Ferrante_Rackoff_Data.NEq - else Ferrante_Rackoff_Data.Nox - | Const(@{const_name HOL.less},_)$y$z => - if term_of x aconv y then Ferrante_Rackoff_Data.Lt - else if term_of x aconv z then Ferrante_Rackoff_Data.Gt - else Ferrante_Rackoff_Data.Nox - | Const (@{const_name HOL.less_eq},_)$y$z => - if term_of x aconv y then Ferrante_Rackoff_Data.Le - else if term_of x aconv z then Ferrante_Rackoff_Data.Ge - else Ferrante_Rackoff_Data.Nox - | _ => Ferrante_Rackoff_Data.Nox - in h end; -fun class_field_ss phi = - HOL_basic_ss addsimps ([@{thm "linorder_not_less"}, @{thm "linorder_not_le"}]) - addsplits [@{thm "abs_split"},@{thm "split_max"}, @{thm "split_min"}] - -in -Ferrante_Rackoff_Data.funs @{thm "class_ordered_field_dense_linear_order.ferrack_axiom"} - {isolate_conv = field_isolate_conv, whatis = classfield_whatis, simpset = class_field_ss} -end -*} - - -end diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/Library/Executable_Set.thy --- a/src/HOL/Library/Executable_Set.thy Tue Dec 30 08:18:54 2008 +0100 +++ b/src/HOL/Library/Executable_Set.thy Tue Dec 30 11:10:01 2008 +0100 @@ -28,6 +28,10 @@ lemma [code]: "eq_set A B \ A \ B \ B \ A" unfolding eq_set_def by auto +(* FIXME allow for Stefan's code generator: +declare set_eq_subset[code unfold] +*) + lemma [code]: "a \ A \ (\x\A. x = a)" unfolding bex_triv_one_point1 .. @@ -35,6 +39,8 @@ definition filter_set :: "('a \ bool) \ 'a set \ 'a set" where "filter_set P xs = {x\xs. P x}" +declare filter_set_def[symmetric, code unfold] + subsection {* Operations on lists *} @@ -269,5 +275,6 @@ Ball ("{*Blall*}") Bex ("{*Blex*}") filter_set ("{*filter*}") + fold ("{* foldl o flip *}") end diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/Library/Library.thy --- a/src/HOL/Library/Library.thy Tue Dec 30 08:18:54 2008 +0100 +++ b/src/HOL/Library/Library.thy Tue Dec 30 11:10:01 2008 +0100 @@ -16,7 +16,6 @@ Continuity ContNotDenum Countable - Dense_Linear_Order Efficient_Nat Enum Eval_Witness diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/Library/Multiset.thy --- a/src/HOL/Library/Multiset.thy Tue Dec 30 08:18:54 2008 +0100 +++ b/src/HOL/Library/Multiset.thy Tue Dec 30 11:10:01 2008 +0100 @@ -1481,4 +1481,155 @@ @{term "{#x+x|x:#M. x# XS \ x \# {# y #} + XS" + and multi_member_this: "x \# {# x #} + XS" + and multi_member_last: "x \# {# x #}" + by auto + +definition "ms_strict = mult pair_less" +definition "ms_weak = ms_strict \ Id" + +lemma ms_reduction_pair: "reduction_pair (ms_strict, ms_weak)" +unfolding reduction_pair_def ms_strict_def ms_weak_def pair_less_def +by (auto intro: wf_mult1 wf_trancl simp: mult_def) + +lemma smsI: + "(set_of A, set_of B) \ max_strict \ (Z + A, Z + B) \ ms_strict" + unfolding ms_strict_def +by (rule one_step_implies_mult) (auto simp add: max_strict_def pair_less_def elim!:max_ext.cases) + +lemma wmsI: + "(set_of A, set_of B) \ max_strict \ A = {#} \ B = {#} + \ (Z + A, Z + B) \ ms_weak" +unfolding ms_weak_def ms_strict_def +by (auto simp add: pair_less_def max_strict_def elim!:max_ext.cases intro: one_step_implies_mult) + +inductive pw_leq +where + pw_leq_empty: "pw_leq {#} {#}" +| pw_leq_step: "\(x,y) \ pair_leq; pw_leq X Y \ \ pw_leq ({#x#} + X) ({#y#} + Y)" + +lemma pw_leq_lstep: + "(x, y) \ pair_leq \ pw_leq {#x#} {#y#}" +by (drule pw_leq_step) (rule pw_leq_empty, simp) + +lemma pw_leq_split: + assumes "pw_leq X Y" + shows "\A B Z. X = A + Z \ Y = B + Z \ ((set_of A, set_of B) \ max_strict \ (B = {#} \ A = {#}))" + using assms +proof (induct) + case pw_leq_empty thus ?case by auto +next + case (pw_leq_step x y X Y) + then obtain A B Z where + [simp]: "X = A + Z" "Y = B + Z" + and 1[simp]: "(set_of A, set_of B) \ max_strict \ (B = {#} \ A = {#})" + by auto + from pw_leq_step have "x = y \ (x, y) \ pair_less" + unfolding pair_leq_def by auto + thus ?case + proof + assume [simp]: "x = y" + have + "{#x#} + X = A + ({#y#}+Z) + \ {#y#} + Y = B + ({#y#}+Z) + \ ((set_of A, set_of B) \ max_strict \ (B = {#} \ A = {#}))" + by (auto simp: add_ac) + thus ?case by (intro exI) + next + assume A: "(x, y) \ pair_less" + let ?A' = "{#x#} + A" and ?B' = "{#y#} + B" + have "{#x#} + X = ?A' + Z" + "{#y#} + Y = ?B' + Z" + by (auto simp add: add_ac) + moreover have + "(set_of ?A', set_of ?B') \ max_strict" + using 1 A unfolding max_strict_def + by (auto elim!: max_ext.cases) + ultimately show ?thesis by blast + qed +qed + +lemma + assumes pwleq: "pw_leq Z Z'" + shows ms_strictI: "(set_of A, set_of B) \ max_strict \ (Z + A, Z' + B) \ ms_strict" + and ms_weakI1: "(set_of A, set_of B) \ max_strict \ (Z + A, Z' + B) \ ms_weak" + and ms_weakI2: "(Z + {#}, Z' + {#}) \ ms_weak" +proof - + from pw_leq_split[OF pwleq] + obtain A' B' Z'' + where [simp]: "Z = A' + Z''" "Z' = B' + Z''" + and mx_or_empty: "(set_of A', set_of B') \ max_strict \ (A' = {#} \ B' = {#})" + by blast + { + assume max: "(set_of A, set_of B) \ max_strict" + from mx_or_empty + have "(Z'' + (A + A'), Z'' + (B + B')) \ ms_strict" + proof + assume max': "(set_of A', set_of B') \ max_strict" + with max have "(set_of (A + A'), set_of (B + B')) \ max_strict" + by (auto simp: max_strict_def intro: max_ext_additive) + thus ?thesis by (rule smsI) + next + assume [simp]: "A' = {#} \ B' = {#}" + show ?thesis by (rule smsI) (auto intro: max) + qed + thus "(Z + A, Z' + B) \ ms_strict" by (simp add:add_ac) + thus "(Z + A, Z' + B) \ ms_weak" by (simp add: ms_weak_def) + } + from mx_or_empty + have "(Z'' + A', Z'' + B') \ ms_weak" by (rule wmsI) + thus "(Z + {#}, Z' + {#}) \ ms_weak" by (simp add:add_ac) +qed + +lemma empty_idemp: "{#} + x = x" "x + {#} = x" +and nonempty_plus: "{# x #} + rs \ {#}" +and nonempty_single: "{# x #} \ {#}" +by auto + +setup {* +let + fun msetT T = Type ("Multiset.multiset", [T]); + + fun mk_mset T [] = Const (@{const_name Mempty}, msetT T) + | mk_mset T [x] = Const (@{const_name single}, T --> msetT T) $ x + | mk_mset T (x :: xs) = + Const (@{const_name plus}, msetT T --> msetT T --> msetT T) $ + mk_mset T [x] $ mk_mset T xs + + fun mset_member_tac m i = + (if m <= 0 then + rtac @{thm multi_member_this} i ORELSE rtac @{thm multi_member_last} i + else + rtac @{thm multi_member_skip} i THEN mset_member_tac (m - 1) i) + + val mset_nonempty_tac = + rtac @{thm nonempty_plus} ORELSE' rtac @{thm nonempty_single} + + val regroup_munion_conv = + FundefLib.regroup_conv @{const_name Multiset.Mempty} @{const_name plus} + (map (fn t => t RS eq_reflection) (@{thms union_ac} @ @{thms empty_idemp})) + + fun unfold_pwleq_tac i = + (rtac @{thm pw_leq_step} i THEN (fn st => unfold_pwleq_tac (i + 1) st)) + ORELSE (rtac @{thm pw_leq_lstep} i) + ORELSE (rtac @{thm pw_leq_empty} i) + + val set_of_simps = [@{thm set_of_empty}, @{thm set_of_single}, @{thm set_of_union}, + @{thm Un_insert_left}, @{thm Un_empty_left}] +in + ScnpReconstruct.multiset_setup (ScnpReconstruct.Multiset + { + msetT=msetT, mk_mset=mk_mset, mset_regroup_conv=regroup_munion_conv, + mset_member_tac=mset_member_tac, mset_nonempty_tac=mset_nonempty_tac, + mset_pwleq_tac=unfold_pwleq_tac, set_of_simps=set_of_simps, + smsI'=@{thm ms_strictI}, wmsI2''=@{thm ms_weakI2}, wmsI1=@{thm ms_weakI1}, + reduction_pair=@{thm ms_reduction_pair} + }) end +*} + +end diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/Lim.thy --- a/src/HOL/Lim.thy Tue Dec 30 08:18:54 2008 +0100 +++ b/src/HOL/Lim.thy Tue Dec 30 11:10:01 2008 +0100 @@ -7,7 +7,7 @@ header{* Limits and Continuity *} theory Lim -imports "~~/src/HOL/Hyperreal/SEQ" +imports SEQ begin text{*Standard Definitions*} diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/MacLaurin.thy --- a/src/HOL/MacLaurin.thy Tue Dec 30 08:18:54 2008 +0100 +++ b/src/HOL/MacLaurin.thy Tue Dec 30 11:10:01 2008 +0100 @@ -58,129 +58,157 @@ *} lemma Maclaurin_lemma2: - "[| \m t. m < n \ 0\t \ t\h \ DERIV (diff m) t :> diff (Suc m) t; - n = Suc k; - difg = + assumes diff: "\m t. m < n \ 0\t \ t\h \ DERIV (diff m) t :> diff (Suc m) t" + assumes n: "n = Suc k" + assumes difg: "difg = (\m t. diff m t - ((\p = 0.. - \m t. m < n & 0 \ t & t \ h --> - DERIV (difg m) t :> difg (Suc m) t" -apply clarify -apply (rule DERIV_diff) -apply (simp (no_asm_simp)) -apply (tactic {* DERIV_tac @{context} *}) -apply (tactic {* DERIV_tac @{context} *}) -apply (rule_tac [2] lemma_DERIV_subst) -apply (rule_tac [2] DERIV_quotient) -apply (rule_tac [3] DERIV_const) -apply (rule_tac [2] DERIV_pow) - prefer 3 apply (simp add: fact_diff_Suc) - prefer 2 apply simp -apply (frule_tac m = m in less_add_one, clarify) -apply (simp del: setsum_op_ivl_Suc) -apply (insert sumr_offset4 [of 1]) -apply (simp del: setsum_op_ivl_Suc fact_Suc realpow_Suc) -apply (rule lemma_DERIV_subst) -apply (rule DERIV_add) -apply (rule_tac [2] DERIV_const) -apply (rule DERIV_sumr, clarify) - prefer 2 apply simp -apply (simp (no_asm) add: divide_inverse mult_assoc del: fact_Suc realpow_Suc) -apply (rule DERIV_cmult) -apply (rule lemma_DERIV_subst) -apply (best intro: DERIV_chain2 intro!: DERIV_intros) -apply (subst fact_Suc) -apply (subst real_of_nat_mult) -apply (simp add: mult_ac) + B * (t ^ (n - m) / real (fact (n - m)))))" + shows + "\m t. m < n & 0 \ t & t \ h --> DERIV (difg m) t :> difg (Suc m) t" +unfolding difg + apply clarify + apply (rule DERIV_diff) + apply (simp add: diff) + apply (simp only: n) + apply (rule DERIV_add) + apply (rule_tac [2] DERIV_cmult) + apply (rule_tac [2] lemma_DERIV_subst) + apply (rule_tac [2] DERIV_quotient) + apply (rule_tac [3] DERIV_const) + apply (rule_tac [2] DERIV_pow) + prefer 3 apply (simp add: fact_diff_Suc) + prefer 2 apply simp + apply (frule less_iff_Suc_add [THEN iffD1], clarify) + apply (simp del: setsum_op_ivl_Suc) + apply (insert sumr_offset4 [of 1]) + apply (simp del: setsum_op_ivl_Suc fact_Suc realpow_Suc) + apply (rule lemma_DERIV_subst) + apply (rule DERIV_add) + apply (rule_tac [2] DERIV_const) + apply (rule DERIV_sumr, clarify) + prefer 2 apply simp + apply (simp (no_asm) add: divide_inverse mult_assoc del: fact_Suc realpow_Suc) + apply (rule DERIV_cmult) + apply (rule lemma_DERIV_subst) + apply (best intro: DERIV_chain2 intro!: DERIV_intros) + apply (subst fact_Suc) + apply (subst real_of_nat_mult) + apply (simp add: mult_ac) done -lemma Maclaurin_lemma3: - fixes difg :: "nat => real => real" shows - "[|\k t. k < Suc m \ 0\t & t\h \ DERIV (difg k) t :> difg (Suc k) t; - \k 0; n < m; 0 < t; - t < h|] - ==> \ta. 0 < ta & ta < t & DERIV (difg (Suc n)) ta :> 0" -apply (rule Rolle, assumption, simp) -apply (drule_tac x = n and P="%k. k difg k 0 = 0" in spec) -apply (rule DERIV_unique) -prefer 2 apply assumption -apply force -apply (metis DERIV_isCont dlo_simps(4) dlo_simps(9) less_trans_Suc nat_less_le not_less_eq real_le_trans) -apply (metis Suc_less_eq differentiableI dlo_simps(7) dlo_simps(8) dlo_simps(9) real_le_trans xt1(8)) -done - lemma Maclaurin: - "[| 0 < h; n > 0; diff 0 = f; - \m t. m < n & 0 \ t & t \ h --> DERIV (diff m) t :> diff (Suc m) t |] - ==> \t. 0 < t & - t < h & + assumes h: "0 < h" + assumes n: "0 < n" + assumes diff_0: "diff 0 = f" + assumes diff_Suc: + "\m t. m < n & 0 \ t & t \ h --> DERIV (diff m) t :> diff (Suc m) t" + shows + "\t. 0 < t & t < h & f h = setsum (%m. (diff m 0 / real (fact m)) * h ^ m) {0..g. - g = (%t. f t - (setsum (%m. (diff m 0 / real(fact m)) * t^m) {0..difg. difg = (%m t. diff m t - (setsum (%p. (diff (m + p) 0 / real (fact p)) * (t ^ p)) {0..ma. ma < n --> (\t. 0 < t & t < h & difg (Suc ma) t = 0) ") - apply (drule_tac x = m and P="%m. m (\t. ?QQ m t)" in spec) - apply (erule impE) - apply (simp (no_asm_simp)) - apply (erule exE) - apply (rule_tac x = t in exI) - apply (simp del: realpow_Suc fact_Suc) -apply (subgoal_tac "\m. m < n --> difg m 0 = 0") - prefer 2 - apply clarify - apply simp - apply (frule_tac m = ma in less_add_one, clarify) - apply (simp del: setsum_op_ivl_Suc) -apply (insert sumr_offset4 [of 1]) -apply (simp del: setsum_op_ivl_Suc fact_Suc realpow_Suc) -apply (subgoal_tac "\m. m < n --> (\t. 0 < t & t < h & DERIV (difg m) t :> 0) ") -apply (rule allI, rule impI) -apply (drule_tac x = ma and P="%m. m (\t. ?QQ m t)" in spec) -apply (erule impE, assumption) -apply (erule exE) -apply (rule_tac x = t in exI) -(* do some tidying up *) -apply (erule_tac [!] V= "difg = (%m t. diff m t - (setsum (%p. diff (m + p) 0 / real (fact p) * t ^ p) {0..m = 0..real) / real (fact m) * h ^ m) + + B * (h ^ n / real (fact n))" + using Maclaurin_lemma [OF h] .. + + obtain g where g_def: "g = (%t. f t - + (setsum (%m. (diff m 0 / real(fact m)) * t^m) {0..(m\nat) t\real. + m < n \ (0\real) \ t \ t \ h \ DERIV (difg m) t :> difg (Suc m) t" + using diff_Suc m difg_def by (rule Maclaurin_lemma2) + + have difg_eq_0: "\m. m < n --> difg m 0 = 0" + apply clarify + apply (simp add: m difg_def) + apply (frule less_iff_Suc_add [THEN iffD1], clarify) + apply (simp del: setsum_op_ivl_Suc) + apply (insert sumr_offset4 [of 1]) + apply (simp del: setsum_op_ivl_Suc fact_Suc realpow_Suc) + done + + have isCont_difg: "\m x. \m < n; 0 \ x; x \ h\ \ isCont (difg m) x" + by (rule DERIV_isCont [OF difg_Suc [rule_format]]) simp + + have differentiable_difg: + "\m x. \m < n; 0 \ x; x \ h\ \ difg m differentiable x" + by (rule differentiableI [OF difg_Suc [rule_format]]) simp + + have difg_Suc_eq_0: "\m t. \m < n; 0 \ t; t \ h; DERIV (difg m) t :> 0\ + \ difg (Suc m) t = 0" + by (rule DERIV_unique [OF difg_Suc [rule_format]]) simp + + have "m < n" using m by simp + + have "\t. 0 < t \ t < h \ DERIV (difg m) t :> 0" + using `m < n` + proof (induct m) + case 0 + show ?case + proof (rule Rolle) + show "0 < h" by fact + show "difg 0 0 = difg 0 h" by (simp add: difg_0 g2) + show "\x. 0 \ x \ x \ h \ isCont (difg (0\nat)) x" + by (simp add: isCont_difg n) + show "\x. 0 < x \ x < h \ difg (0\nat) differentiable x" + by (simp add: differentiable_difg n) + qed + next + case (Suc m') + hence "\t. 0 < t \ t < h \ DERIV (difg m') t :> 0" by simp + then obtain t where t: "0 < t" "t < h" "DERIV (difg m') t :> 0" by fast + have "\t'. 0 < t' \ t' < t \ DERIV (difg (Suc m')) t' :> 0" + proof (rule Rolle) + show "0 < t" by fact + show "difg (Suc m') 0 = difg (Suc m') t" + using t `Suc m' < n` by (simp add: difg_Suc_eq_0 difg_eq_0) + show "\x. 0 \ x \ x \ t \ isCont (difg (Suc m')) x" + using `t < h` `Suc m' < n` by (simp add: isCont_difg) + show "\x. 0 < x \ x < t \ difg (Suc m') differentiable x" + using `t < h` `Suc m' < n` by (simp add: differentiable_difg) + qed + thus ?case + using `t < h` by auto + qed + + then obtain t where "0 < t" "t < h" "DERIV (difg m) t :> 0" by fast + + hence "difg (Suc m) t = 0" + using `m < n` by (simp add: difg_Suc_eq_0) + + show ?thesis + proof (intro exI conjI) + show "0 < t" by fact + show "t < h" by fact + show "f h = + (\m = 0..0 & diff 0 = f & diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/Nominal/Examples/CK_Machine.thy --- a/src/HOL/Nominal/Examples/CK_Machine.thy Tue Dec 30 08:18:54 2008 +0100 +++ b/src/HOL/Nominal/Examples/CK_Machine.thy Tue Dec 30 11:10:01 2008 +0100 @@ -1,5 +1,3 @@ -(* $Id$ *) - theory CK_Machine imports "../Nominal" begin @@ -41,21 +39,21 @@ section {* Capture-Avoiding Substitution *} -consts subst :: "lam \ name \ lam \ lam" ("_[_::=_]" [100,100,100] 100) - nominal_primrec + subst :: "lam \ name \ lam \ lam" ("_[_::=_]" [100,100,100] 100) +where "(VAR x)[y::=s] = (if x=y then s else (VAR x))" - "(APP t\<^isub>1 t\<^isub>2)[y::=s] = APP (t\<^isub>1[y::=s]) (t\<^isub>2[y::=s])" - "x\(y,s) \ (LAM [x].t)[y::=s] = LAM [x].(t[y::=s])" - "(NUM n)[y::=s] = NUM n" - "(t\<^isub>1 -- t\<^isub>2)[y::=s] = (t\<^isub>1[y::=s]) -- (t\<^isub>2[y::=s])" - "(t\<^isub>1 ++ t\<^isub>2)[y::=s] = (t\<^isub>1[y::=s]) ++ (t\<^isub>2[y::=s])" - "x\(y,s) \ (FIX [x].t)[y::=s] = FIX [x].(t[y::=s])" - "TRUE[y::=s] = TRUE" - "FALSE[y::=s] = FALSE" - "(IF t1 t2 t3)[y::=s] = IF (t1[y::=s]) (t2[y::=s]) (t3[y::=s])" - "(ZET t)[y::=s] = ZET (t[y::=s])" - "(EQI t1 t2)[y::=s] = EQI (t1[y::=s]) (t2[y::=s])" +| "(APP t\<^isub>1 t\<^isub>2)[y::=s] = APP (t\<^isub>1[y::=s]) (t\<^isub>2[y::=s])" +| "x\(y,s) \ (LAM [x].t)[y::=s] = LAM [x].(t[y::=s])" +| "(NUM n)[y::=s] = NUM n" +| "(t\<^isub>1 -- t\<^isub>2)[y::=s] = (t\<^isub>1[y::=s]) -- (t\<^isub>2[y::=s])" +| "(t\<^isub>1 ++ t\<^isub>2)[y::=s] = (t\<^isub>1[y::=s]) ++ (t\<^isub>2[y::=s])" +| "x\(y,s) \ (FIX [x].t)[y::=s] = FIX [x].(t[y::=s])" +| "TRUE[y::=s] = TRUE" +| "FALSE[y::=s] = FALSE" +| "(IF t1 t2 t3)[y::=s] = IF (t1[y::=s]) (t2[y::=s]) (t3[y::=s])" +| "(ZET t)[y::=s] = ZET (t[y::=s])" +| "(EQI t1 t2)[y::=s] = EQI (t1[y::=s]) (t2[y::=s])" apply(finite_guess)+ apply(rule TrueI)+ apply(simp add: abs_fresh)+ diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/Nominal/Examples/CR_Takahashi.thy --- a/src/HOL/Nominal/Examples/CR_Takahashi.thy Tue Dec 30 08:18:54 2008 +0100 +++ b/src/HOL/Nominal/Examples/CR_Takahashi.thy Tue Dec 30 11:10:01 2008 +0100 @@ -1,5 +1,3 @@ -(* $Id$ *) - (* Authors: Christian Urban and Mathilde Arnaud *) (* *) (* A formalisation of the Church-Rosser proof by Masako Takahashi.*) @@ -20,12 +18,12 @@ | App "lam" "lam" | Lam "\name\lam" ("Lam [_]._" [100,100] 100) -consts subst :: "lam \ name \ lam \ lam" ("_[_::=_]" [100,100,100] 100) - nominal_primrec + subst :: "lam \ name \ lam \ lam" ("_[_::=_]" [100,100,100] 100) +where "(Var x)[y::=s] = (if x=y then s else (Var x))" - "(App t\<^isub>1 t\<^isub>2)[y::=s] = App (t\<^isub>1[y::=s]) (t\<^isub>2[y::=s])" - "x\(y,s) \ (Lam [x].t)[y::=s] = Lam [x].(t[y::=s])" +| "(App t\<^isub>1 t\<^isub>2)[y::=s] = App (t\<^isub>1[y::=s]) (t\<^isub>2[y::=s])" +| "x\(y,s) \ (Lam [x].t)[y::=s] = Lam [x].(t[y::=s])" apply(finite_guess)+ apply(rule TrueI)+ apply(simp add: abs_fresh) @@ -54,14 +52,16 @@ lemma substitution_lemma: assumes a: "x\y" "x\u" shows "t[x::=s][y::=u] = t[y::=u][x::=s[y::=u]]" -using a by (nominal_induct t avoiding: x y s u rule: lam.strong_induct) - (auto simp add: fresh_fact forget) +using a +by (nominal_induct t avoiding: x y s u rule: lam.strong_induct) + (auto simp add: fresh_fact forget) lemma subst_rename: assumes a: "y\t" shows "t[x::=s] = ([(y,x)]\t)[y::=s]" -using a by (nominal_induct t avoiding: x y s rule: lam.strong_induct) - (auto simp add: calc_atm fresh_atm abs_fresh) +using a +by (nominal_induct t avoiding: x y s rule: lam.strong_induct) + (auto simp add: swap_simps fresh_atm abs_fresh) section {* Beta-Reduction *} @@ -103,8 +103,9 @@ lemma One_subst: assumes a: "t1 \\<^isub>1 t2" "s1 \\<^isub>1 s2" shows "t1[x::=s1] \\<^isub>1 t2[x::=s2]" -using a by (nominal_induct t1 t2 avoiding: s1 s2 x rule: One.strong_induct) - (auto simp add: substitution_lemma fresh_atm fresh_fact) +using a +by (nominal_induct t1 t2 avoiding: s1 s2 x rule: One.strong_induct) + (auto simp add: substitution_lemma fresh_atm fresh_fact) lemma better_o4_intro: assumes a: "t1 \\<^isub>1 t2" "s1 \\<^isub>1 s2" @@ -202,35 +203,30 @@ by (nominal_induct M rule: lam.strong_induct) (auto dest!: Dev_Lam intro: better_d4_intro) -(* needs fixing *) lemma Triangle: assumes a: "t \\<^isub>d t1" "t \\<^isub>1 t2" shows "t2 \\<^isub>1 t1" using a proof(nominal_induct avoiding: t2 rule: Dev.strong_induct) case (d4 x s1 s2 t1 t1' t2) - have ih1: "\t. t1 \\<^isub>1 t \ t \\<^isub>1 t1'" - and ih2: "\s. s1 \\<^isub>1 s \ s \\<^isub>1 s2" - and fc: "x\t2" "x\s1" "x\s2" by fact+ + have fc: "x\t2" "x\s1" by fact+ have "App (Lam [x].t1) s1 \\<^isub>1 t2" by fact - then obtain t' s' where "(t2 = App (Lam [x].t') s' \ t1 \\<^isub>1 t' \ s1 \\<^isub>1 s') \ - (t2 = t'[x::=s'] \ t1 \\<^isub>1 t' \ s1 \\<^isub>1 s')" + then obtain t' s' where reds: + "(t2 = App (Lam [x].t') s' \ t1 \\<^isub>1 t' \ s1 \\<^isub>1 s') \ + (t2 = t'[x::=s'] \ t1 \\<^isub>1 t' \ s1 \\<^isub>1 s')" using fc by (auto dest!: One_Redex) - then show "t2 \\<^isub>1 t1'[x::=s2]" - apply - - apply(erule disjE) - apply(erule conjE)+ - apply(simp) - apply(rule o4) - using fc apply(simp) - using ih1 apply(simp) - using ih2 apply(simp) - apply(erule conjE)+ - apply(simp) - apply(rule One_subst) - using ih1 apply(simp) - using ih2 apply(simp) - done + have ih1: "t1 \\<^isub>1 t' \ t' \\<^isub>1 t1'" by fact + have ih2: "s1 \\<^isub>1 s' \ s' \\<^isub>1 s2" by fact + { assume "t1 \\<^isub>1 t'" "s1 \\<^isub>1 s'" + then have "App (Lam [x].t') s' \\<^isub>1 t1'[x::=s2]" + using ih1 ih2 by (auto intro: better_o4_intro) + } + moreover + { assume "t1 \\<^isub>1 t'" "s1 \\<^isub>1 s'" + then have "t'[x::=s'] \\<^isub>1 t1'[x::=s2]" + using ih1 ih2 by (auto intro: One_subst) + } + ultimately show "t2 \\<^isub>1 t1'[x::=s2]" using reds by auto qed (auto dest!: One_Lam One_Var One_App) lemma Diamond_for_One: @@ -310,4 +306,6 @@ then show "\t3. t1 \\<^isub>\\<^sup>* t3 \ t2 \\<^isub>\\<^sup>* t3" by (simp add: Beta_star_equals_One_star) qed + + end diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/Nominal/Examples/Class.thy --- a/src/HOL/Nominal/Examples/Class.thy Tue Dec 30 08:18:54 2008 +0100 +++ b/src/HOL/Nominal/Examples/Class.thy Tue Dec 30 11:10:01 2008 +0100 @@ -1,5 +1,3 @@ -(* $Id$ *) - theory Class imports "../Nominal" begin @@ -17,16 +15,22 @@ | OR "ty" "ty" ("_ OR _" [100,100] 100) | IMP "ty" "ty" ("_ IMP _" [100,100] 100) -instance ty :: size .. - -nominal_primrec +instantiation ty :: size +begin + +nominal_primrec size_ty +where "size (PR s) = (1::nat)" - "size (NOT T) = 1 + size T" - "size (T1 AND T2) = 1 + size T1 + size T2" - "size (T1 OR T2) = 1 + size T1 + size T2" - "size (T1 IMP T2) = 1 + size T1 + size T2" +| "size (NOT T) = 1 + size T" +| "size (T1 AND T2) = 1 + size T1 + size T2" +| "size (T1 OR T2) = 1 + size T1 + size T2" +| "size (T1 IMP T2) = 1 + size T1 + size T2" by (rule TrueI)+ +instance .. + +end + lemma ty_cases: fixes T::ty shows "(\s. T=PR s) \ (\T'. T=NOT T') \ (\S U. T=S OR U) \ (\S U. T=S AND U) \ (\S U. T=S IMP U)" @@ -66,25 +70,23 @@ text {* renaming functions *} -consts - nrename :: "trm \ name \ name \ trm" ("_[_\n>_]" [100,100,100] 100) +nominal_primrec (freshness_context: "(d::coname,e::coname)") crename :: "trm \ coname \ coname \ trm" ("_[_\c>_]" [100,100,100] 100) - -nominal_primrec (freshness_context: "(d::coname,e::coname)") +where "(Ax x a)[d\c>e] = (if a=d then Ax x e else Ax x a)" - "\a\(d,e,N);x\M\ \ (Cut .M (x).N)[d\c>e] = Cut .(M[d\c>e]) (x).(N[d\c>e])" - "(NotR (x).M a)[d\c>e] = (if a=d then NotR (x).(M[d\c>e]) e else NotR (x).(M[d\c>e]) a)" - "a\(d,e) \ (NotL .M x)[d\c>e] = (NotL .(M[d\c>e]) x)" - "\a\(d,e,N,c);b\(d,e,M,c);b\a\ \ (AndR .M .N c)[d\c>e] = +| "\a\(d,e,N);x\M\ \ (Cut .M (x).N)[d\c>e] = Cut .(M[d\c>e]) (x).(N[d\c>e])" +| "(NotR (x).M a)[d\c>e] = (if a=d then NotR (x).(M[d\c>e]) e else NotR (x).(M[d\c>e]) a)" +| "a\(d,e) \ (NotL .M x)[d\c>e] = (NotL .(M[d\c>e]) x)" +| "\a\(d,e,N,c);b\(d,e,M,c);b\a\ \ (AndR .M .N c)[d\c>e] = (if c=d then AndR .(M[d\c>e]) .(N[d \c>e]) e else AndR .(M[d\c>e]) .(N[d\c>e]) c)" - "x\y \ (AndL1 (x).M y)[d\c>e] = AndL1 (x).(M[d\c>e]) y" - "x\y \ (AndL2 (x).M y)[d\c>e] = AndL2 (x).(M[d\c>e]) y" - "a\(d,e,b) \ (OrR1 .M b)[d\c>e] = (if b=d then OrR1 .(M[d\c>e]) e else OrR1 .(M[d\c>e]) b)" - "a\(d,e,b) \ (OrR2 .M b)[d\c>e] = (if b=d then OrR2 .(M[d\c>e]) e else OrR2 .(M[d\c>e]) b)" - "\x\(N,z);y\(M,z);y\x\ \ (OrL (x).M (y).N z)[d\c>e] = OrL (x).(M[d\c>e]) (y).(N[d\c>e]) z" - "a\(d,e,b) \ (ImpR (x)..M b)[d\c>e] = +| "x\y \ (AndL1 (x).M y)[d\c>e] = AndL1 (x).(M[d\c>e]) y" +| "x\y \ (AndL2 (x).M y)[d\c>e] = AndL2 (x).(M[d\c>e]) y" +| "a\(d,e,b) \ (OrR1 .M b)[d\c>e] = (if b=d then OrR1 .(M[d\c>e]) e else OrR1 .(M[d\c>e]) b)" +| "a\(d,e,b) \ (OrR2 .M b)[d\c>e] = (if b=d then OrR2 .(M[d\c>e]) e else OrR2 .(M[d\c>e]) b)" +| "\x\(N,z);y\(M,z);y\x\ \ (OrL (x).M (y).N z)[d\c>e] = OrL (x).(M[d\c>e]) (y).(N[d\c>e]) z" +| "a\(d,e,b) \ (ImpR (x)..M b)[d\c>e] = (if b=d then ImpR (x)..(M[d\c>e]) e else ImpR (x)..(M[d\c>e]) b)" - "\a\(d,e,N);x\(M,y)\ \ (ImpL .M (x).N y)[d\c>e] = ImpL .(M[d\c>e]) (x).(N[d\c>e]) y" +| "\a\(d,e,N);x\(M,y)\ \ (ImpL .M (x).N y)[d\c>e] = ImpL .(M[d\c>e]) (x).(N[d\c>e]) y" apply(finite_guess)+ apply(rule TrueI)+ apply(simp add: abs_fresh abs_supp fin_supp)+ @@ -92,19 +94,21 @@ done nominal_primrec (freshness_context: "(u::name,v::name)") + nrename :: "trm \ name \ name \ trm" ("_[_\n>_]" [100,100,100] 100) +where "(Ax x a)[u\n>v] = (if x=u then Ax v a else Ax x a)" - "\a\N;x\(u,v,M)\ \ (Cut .M (x).N)[u\n>v] = Cut .(M[u\n>v]) (x).(N[u\n>v])" - "x\(u,v) \ (NotR (x).M a)[u\n>v] = NotR (x).(M[u\n>v]) a" - "(NotL .M x)[u\n>v] = (if x=u then NotL .(M[u\n>v]) v else NotL .(M[u\n>v]) x)" - "\a\(N,c);b\(M,c);b\a\ \ (AndR .M .N c)[u\n>v] = AndR .(M[u\n>v]) .(N[u\n>v]) c" - "x\(u,v,y) \ (AndL1 (x).M y)[u\n>v] = (if y=u then AndL1 (x).(M[u\n>v]) v else AndL1 (x).(M[u\n>v]) y)" - "x\(u,v,y) \ (AndL2 (x).M y)[u\n>v] = (if y=u then AndL2 (x).(M[u\n>v]) v else AndL2 (x).(M[u\n>v]) y)" - "a\b \ (OrR1 .M b)[u\n>v] = OrR1 .(M[u\n>v]) b" - "a\b \ (OrR2 .M b)[u\n>v] = OrR2 .(M[u\n>v]) b" - "\x\(u,v,N,z);y\(u,v,M,z);y\x\ \ (OrL (x).M (y).N z)[u\n>v] = +| "\a\N;x\(u,v,M)\ \ (Cut .M (x).N)[u\n>v] = Cut .(M[u\n>v]) (x).(N[u\n>v])" +| "x\(u,v) \ (NotR (x).M a)[u\n>v] = NotR (x).(M[u\n>v]) a" +| "(NotL .M x)[u\n>v] = (if x=u then NotL .(M[u\n>v]) v else NotL .(M[u\n>v]) x)" +| "\a\(N,c);b\(M,c);b\a\ \ (AndR .M .N c)[u\n>v] = AndR .(M[u\n>v]) .(N[u\n>v]) c" +| "x\(u,v,y) \ (AndL1 (x).M y)[u\n>v] = (if y=u then AndL1 (x).(M[u\n>v]) v else AndL1 (x).(M[u\n>v]) y)" +| "x\(u,v,y) \ (AndL2 (x).M y)[u\n>v] = (if y=u then AndL2 (x).(M[u\n>v]) v else AndL2 (x).(M[u\n>v]) y)" +| "a\b \ (OrR1 .M b)[u\n>v] = OrR1 .(M[u\n>v]) b" +| "a\b \ (OrR2 .M b)[u\n>v] = OrR2 .(M[u\n>v]) b" +| "\x\(u,v,N,z);y\(u,v,M,z);y\x\ \ (OrL (x).M (y).N z)[u\n>v] = (if z=u then OrL (x).(M[u\n>v]) (y).(N[u\n>v]) v else OrL (x).(M[u\n>v]) (y).(N[u\n>v]) z)" - "\a\b; x\(u,v)\ \ (ImpR (x)..M b)[u\n>v] = ImpR (x)..(M[u\n>v]) b" - "\a\N;x\(u,v,M,y)\ \ (ImpL .M (x).N y)[u\n>v] = +| "\a\b; x\(u,v)\ \ (ImpR (x)..M b)[u\n>v] = ImpR (x)..(M[u\n>v]) b" +| "\a\N;x\(u,v,M,y)\ \ (ImpL .M (x).N y)[u\n>v] = (if y=u then ImpL .(M[u\n>v]) (x).(N[u\n>v]) v else ImpL .(M[u\n>v]) (x).(N[u\n>v]) y)" apply(finite_guess)+ apply(rule TrueI)+ @@ -766,32 +770,30 @@ apply(simp add: fin_supp) done -consts +nominal_primrec (freshness_context: "(y::name,c::coname,P::trm)") substn :: "trm \ name \ coname \ trm \ trm" ("_{_:=<_>._}" [100,100,100,100] 100) - substc :: "trm \ coname \ name \ trm \ trm" ("_{_:=(_)._}" [100,100,100,100] 100) - -nominal_primrec (freshness_context: "(y::name,c::coname,P::trm)") +where "(Ax x a){y:=.P} = (if x=y then Cut .P (y).Ax y a else Ax x a)" - "\a\(c,P,N);x\(y,P,M)\ \ (Cut .M (x).N){y:=.P} = +| "\a\(c,P,N);x\(y,P,M)\ \ (Cut .M (x).N){y:=.P} = (if M=Ax y a then Cut .P (x).(N{y:=.P}) else Cut .(M{y:=.P}) (x).(N{y:=.P}))" - "x\(y,P) \ (NotR (x).M a){y:=.P} = NotR (x).(M{y:=.P}) a" - "a\(c,P) \ (NotL .M x){y:=.P} = +| "x\(y,P) \ (NotR (x).M a){y:=.P} = NotR (x).(M{y:=.P}) a" +| "a\(c,P) \ (NotL .M x){y:=.P} = (if x=y then fresh_fun (\x'. Cut .P (x').NotL .(M{y:=.P}) x') else NotL .(M{y:=.P}) x)" - "\a\(c,P,N,d);b\(c,P,M,d);b\a\ \ +| "\a\(c,P,N,d);b\(c,P,M,d);b\a\ \ (AndR .M .N d){y:=.P} = AndR .(M{y:=.P}) .(N{y:=.P}) d" - "x\(y,P,z) \ (AndL1 (x).M z){y:=.P} = +| "x\(y,P,z) \ (AndL1 (x).M z){y:=.P} = (if z=y then fresh_fun (\z'. Cut .P (z').AndL1 (x).(M{y:=.P}) z') else AndL1 (x).(M{y:=.P}) z)" - "x\(y,P,z) \ (AndL2 (x).M z){y:=.P} = +| "x\(y,P,z) \ (AndL2 (x).M z){y:=.P} = (if z=y then fresh_fun (\z'. Cut .P (z').AndL2 (x).(M{y:=.P}) z') else AndL2 (x).(M{y:=.P}) z)" - "a\(c,P,b) \ (OrR1 .M b){y:=.P} = OrR1 .(M{y:=.P}) b" - "a\(c,P,b) \ (OrR2 .M b){y:=.P} = OrR2 .(M{y:=.P}) b" - "\x\(y,N,P,z);u\(y,M,P,z);x\u\ \ (OrL (x).M (u).N z){y:=.P} = +| "a\(c,P,b) \ (OrR1 .M b){y:=.P} = OrR1 .(M{y:=.P}) b" +| "a\(c,P,b) \ (OrR2 .M b){y:=.P} = OrR2 .(M{y:=.P}) b" +| "\x\(y,N,P,z);u\(y,M,P,z);x\u\ \ (OrL (x).M (u).N z){y:=.P} = (if z=y then fresh_fun (\z'. Cut .P (z').OrL (x).(M{y:=.P}) (u).(N{y:=.P}) z') else OrL (x).(M{y:=.P}) (u).(N{y:=.P}) z)" - "\a\(b,c,P); x\(y,P)\ \ (ImpR (x)..M b){y:=.P} = ImpR (x)..(M{y:=.P}) b" - "\a\(N,c,P);x\(y,P,M,z)\ \ (ImpL .M (x).N z){y:=.P} = +| "\a\(b,c,P); x\(y,P)\ \ (ImpR (x)..M b){y:=.P} = ImpR (x)..(M{y:=.P}) b" +| "\a\(N,c,P);x\(y,P,M,z)\ \ (ImpL .M (x).N z){y:=.P} = (if y=z then fresh_fun (\z'. Cut .P (z').ImpL .(M{y:=.P}) (x).(N{y:=.P}) z') else ImpL .(M{y:=.P}) (x).(N{y:=.P}) z)" apply(finite_guess)+ @@ -842,27 +844,29 @@ done nominal_primrec (freshness_context: "(d::name,z::coname,P::trm)") + substc :: "trm \ coname \ name \ trm \ trm" ("_{_:=(_)._}" [100,100,100,100] 100) +where "(Ax x a){d:=(z).P} = (if d=a then Cut .(Ax x a) (z).P else Ax x a)" - "\a\(d,P,N);x\(z,P,M)\ \ (Cut .M (x).N){d:=(z).P} = +| "\a\(d,P,N);x\(z,P,M)\ \ (Cut .M (x).N){d:=(z).P} = (if N=Ax x d then Cut .(M{d:=(z).P}) (z).P else Cut .(M{d:=(z).P}) (x).(N{d:=(z).P}))" - "x\(z,P) \ (NotR (x).M a){d:=(z).P} = +| "x\(z,P) \ (NotR (x).M a){d:=(z).P} = (if d=a then fresh_fun (\a'. Cut .NotR (x).(M{d:=(z).P}) a' (z).P) else NotR (x).(M{d:=(z).P}) a)" - "a\(d,P) \ (NotL .M x){d:=(z).P} = NotL .(M{d:=(z).P}) x" - "\a\(P,c,N,d);b\(P,c,M,d);b\a\ \ (AndR .M .N c){d:=(z).P} = +| "a\(d,P) \ (NotL .M x){d:=(z).P} = NotL .(M{d:=(z).P}) x" +| "\a\(P,c,N,d);b\(P,c,M,d);b\a\ \ (AndR .M .N c){d:=(z).P} = (if d=c then fresh_fun (\a'. Cut .(AndR .(M{d:=(z).P}) .(N{d:=(z).P}) a') (z).P) else AndR .(M{d:=(z).P}) .(N{d:=(z).P}) c)" - "x\(y,z,P) \ (AndL1 (x).M y){d:=(z).P} = AndL1 (x).(M{d:=(z).P}) y" - "x\(y,P,z) \ (AndL2 (x).M y){d:=(z).P} = AndL2 (x).(M{d:=(z).P}) y" - "a\(d,P,b) \ (OrR1 .M b){d:=(z).P} = +| "x\(y,z,P) \ (AndL1 (x).M y){d:=(z).P} = AndL1 (x).(M{d:=(z).P}) y" +| "x\(y,P,z) \ (AndL2 (x).M y){d:=(z).P} = AndL2 (x).(M{d:=(z).P}) y" +| "a\(d,P,b) \ (OrR1 .M b){d:=(z).P} = (if d=b then fresh_fun (\a'. Cut .OrR1 .(M{d:=(z).P}) a' (z).P) else OrR1 .(M{d:=(z).P}) b)" - "a\(d,P,b) \ (OrR2 .M b){d:=(z).P} = +| "a\(d,P,b) \ (OrR2 .M b){d:=(z).P} = (if d=b then fresh_fun (\a'. Cut .OrR2 .(M{d:=(z).P}) a' (z).P) else OrR2 .(M{d:=(z).P}) b)" - "\x\(N,z,P,u);y\(M,z,P,u);x\y\ \ (OrL (x).M (y).N u){d:=(z).P} = +| "\x\(N,z,P,u);y\(M,z,P,u);x\y\ \ (OrL (x).M (y).N u){d:=(z).P} = OrL (x).(M{d:=(z).P}) (y).(N{d:=(z).P}) u" - "\a\(b,d,P); x\(z,P)\ \ (ImpR (x)..M b){d:=(z).P} = +| "\a\(b,d,P); x\(z,P)\ \ (ImpR (x)..M b){d:=(z).P} = (if d=b then fresh_fun (\a'. Cut .ImpR (x)..(M{d:=(z).P}) a' (z).P) else ImpR (x)..(M{d:=(z).P}) b)" - "\a\(N,d,P);x\(y,z,P,M)\ \ (ImpL .M (x).N y){d:=(z).P} = +| "\a\(N,d,P);x\(y,z,P,M)\ \ (ImpL .M (x).N y){d:=(z).P} = ImpL .(M{d:=(z).P}) (x).(N{d:=(z).P}) y" apply(finite_guess)+ apply(rule TrueI)+ @@ -10305,11 +10309,10 @@ lemma BINDINGc_decreasing: shows "X\Y \ BINDINGc B Y \ BINDINGc B X" by (simp add: BINDINGc_def) (blast) - -consts - NOTRIGHT::"ty \ ntrm set \ ctrm set" nominal_primrec + NOTRIGHT :: "ty \ ntrm set \ ctrm set" +where "NOTRIGHT (NOT B) X = { :NotR (x).M a | a x M. fic (NotR (x).M a) a \ (x):M \ X }" apply(rule TrueI)+ done @@ -10365,11 +10368,10 @@ apply(drule pt_bij1[OF pt_coname_inst, OF at_coname_inst]) apply(simp add: swap_simps) done - -consts - NOTLEFT::"ty \ ctrm set \ ntrm set" nominal_primrec + NOTLEFT :: "ty \ ctrm set \ ntrm set" +where "NOTLEFT (NOT B) X = { (x):NotL .M x | a x M. fin (NotL .M x) x \ :M \ X }" apply(rule TrueI)+ done @@ -10425,11 +10427,10 @@ apply(drule pt_bij1[OF pt_coname_inst, OF at_coname_inst]) apply(simp add: swap_simps) done - -consts - ANDRIGHT::"ty \ ctrm set \ ctrm set \ ctrm set" nominal_primrec + ANDRIGHT :: "ty \ ctrm set \ ctrm set \ ctrm set" +where "ANDRIGHT (B AND C) X Y = { :AndR .M .N c | c a b M N. fic (AndR .M .N c) c \ :M \ X \ :N \ Y }" apply(rule TrueI)+ @@ -10505,10 +10506,9 @@ apply(simp) done -consts - ANDLEFT1::"ty \ ntrm set \ ntrm set" - nominal_primrec + ANDLEFT1 :: "ty \ ntrm set \ ntrm set" +where "ANDLEFT1 (B AND C) X = { (y):AndL1 (x).M y | x y M. fin (AndL1 (x).M y) y \ (x):M \ X }" apply(rule TrueI)+ done @@ -10565,10 +10565,9 @@ apply(simp add: swap_simps) done -consts - ANDLEFT2::"ty \ ntrm set \ ntrm set" - nominal_primrec + ANDLEFT2 :: "ty \ ntrm set \ ntrm set" +where "ANDLEFT2 (B AND C) X = { (y):AndL2 (x).M y | x y M. fin (AndL2 (x).M y) y \ (x):M \ X }" apply(rule TrueI)+ done @@ -10625,10 +10624,9 @@ apply(simp add: swap_simps) done -consts - ORLEFT::"ty \ ntrm set \ ntrm set \ ntrm set" - nominal_primrec + ORLEFT :: "ty \ ntrm set \ ntrm set \ ntrm set" +where "ORLEFT (B OR C) X Y = { (z):OrL (x).M (y).N z | x y z M N. fin (OrL (x).M (y).N z) z \ (x):M \ X \ (y):N \ Y }" apply(rule TrueI)+ @@ -10704,10 +10702,9 @@ apply(simp add: swap_simps) done -consts - ORRIGHT1::"ty \ ctrm set \ ctrm set" - nominal_primrec + ORRIGHT1 :: "ty \ ctrm set \ ctrm set" +where "ORRIGHT1 (B OR C) X = { :OrR1 .M b | a b M. fic (OrR1 .M b) b \ :M \ X }" apply(rule TrueI)+ done @@ -10764,10 +10761,9 @@ apply(simp) done -consts - ORRIGHT2::"ty \ ctrm set \ ctrm set" - nominal_primrec + ORRIGHT2 :: "ty \ ctrm set \ ctrm set" +where "ORRIGHT2 (B OR C) X = { :OrR2 .M b | a b M. fic (OrR2 .M b) b \ :M \ X }" apply(rule TrueI)+ done @@ -10824,10 +10820,9 @@ apply(simp) done -consts - IMPRIGHT::"ty \ ntrm set \ ctrm set \ ntrm set \ ctrm set \ ctrm set" - nominal_primrec + IMPRIGHT :: "ty \ ntrm set \ ctrm set \ ntrm set \ ctrm set \ ctrm set" +where "IMPRIGHT (B IMP C) X Y Z U= { :ImpR (x)..M b | x a b M. fic (ImpR (x)..M b) b \ (\z P. x\(z,P) \ (z):P \ Z \ (x):(M{a:=(z).P}) \ X) @@ -10954,10 +10949,9 @@ apply(perm_simp add: nsubst_eqvt fresh_right) done -consts - IMPLEFT::"ty \ ctrm set \ ntrm set \ ntrm set" - nominal_primrec + IMPLEFT :: "ty \ ctrm set \ ntrm set \ ntrm set" +where "IMPLEFT (B IMP C) X Y = { (y):ImpL .M (x).N y | x a y M N. fin (ImpL .M (x).N y) y \ :M \ X \ (x):N \ Y }" apply(rule TrueI)+ @@ -17800,23 +17794,21 @@ apply(auto) done -consts +nominal_primrec (freshness_context: "\n::(name\coname\trm)") stn :: "trm\(name\coname\trm) list\trm" - stc :: "trm\(coname\name\trm) list\trm" - -nominal_primrec (freshness_context: "\n::(name\coname\trm)") +where "stn (Ax x a) \n = lookupc x a \n" - "\a\(N,\n);x\(M,\n)\ \ stn (Cut .M (x).N) \n = (Cut .M (x).N)" - "x\\n \ stn (NotR (x).M a) \n = (NotR (x).M a)" - "a\\n \stn (NotL .M x) \n = (NotL .M x)" - "\a\(N,d,b,\n);b\(M,d,a,\n)\ \ stn (AndR .M .N d) \n = (AndR .M .N d)" - "x\(z,\n) \ stn (AndL1 (x).M z) \n = (AndL1 (x).M z)" - "x\(z,\n) \ stn (AndL2 (x).M z) \n = (AndL2 (x).M z)" - "a\(b,\n) \ stn (OrR1 .M b) \n = (OrR1 .M b)" - "a\(b,\n) \ stn (OrR2 .M b) \n = (OrR2 .M b)" - "\x\(N,z,u,\n);u\(M,z,x,\n)\ \ stn (OrL (x).M (u).N z) \n = (OrL (x).M (u).N z)" - "\a\(b,\n);x\\n\ \ stn (ImpR (x)..M b) \n = (ImpR (x)..M b)" - "\a\(N,\n);x\(M,z,\n)\ \ stn (ImpL .M (x).N z) \n = (ImpL .M (x).N z)" +| "\a\(N,\n);x\(M,\n)\ \ stn (Cut .M (x).N) \n = (Cut .M (x).N)" +| "x\\n \ stn (NotR (x).M a) \n = (NotR (x).M a)" +| "a\\n \stn (NotL .M x) \n = (NotL .M x)" +| "\a\(N,d,b,\n);b\(M,d,a,\n)\ \ stn (AndR .M .N d) \n = (AndR .M .N d)" +| "x\(z,\n) \ stn (AndL1 (x).M z) \n = (AndL1 (x).M z)" +| "x\(z,\n) \ stn (AndL2 (x).M z) \n = (AndL2 (x).M z)" +| "a\(b,\n) \ stn (OrR1 .M b) \n = (OrR1 .M b)" +| "a\(b,\n) \ stn (OrR2 .M b) \n = (OrR2 .M b)" +| "\x\(N,z,u,\n);u\(M,z,x,\n)\ \ stn (OrL (x).M (u).N z) \n = (OrL (x).M (u).N z)" +| "\a\(b,\n);x\\n\ \ stn (ImpR (x)..M b) \n = (ImpR (x)..M b)" +| "\a\(N,\n);x\(M,z,\n)\ \ stn (ImpL .M (x).N z) \n = (ImpL .M (x).N z)" apply(finite_guess)+ apply(rule TrueI)+ apply(simp add: abs_fresh abs_supp fin_supp)+ @@ -17824,18 +17816,20 @@ done nominal_primrec (freshness_context: "\c::(coname\name\trm)") + stc :: "trm\(coname\name\trm) list\trm" +where "stc (Ax x a) \c = lookupd x a \c" - "\a\(N,\c);x\(M,\c)\ \ stc (Cut .M (x).N) \c = (Cut .M (x).N)" - "x\\c \ stc (NotR (x).M a) \c = (NotR (x).M a)" - "a\\c \ stc (NotL .M x) \c = (NotL .M x)" - "\a\(N,d,b,\c);b\(M,d,a,\c)\ \ stc (AndR .M .N d) \c = (AndR .M .N d)" - "x\(z,\c) \ stc (AndL1 (x).M z) \c = (AndL1 (x).M z)" - "x\(z,\c) \ stc (AndL2 (x).M z) \c = (AndL2 (x).M z)" - "a\(b,\c) \ stc (OrR1 .M b) \c = (OrR1 .M b)" - "a\(b,\c) \ stc (OrR2 .M b) \c = (OrR2 .M b)" - "\x\(N,z,u,\c);u\(M,z,x,\c)\ \ stc (OrL (x).M (u).N z) \c = (OrL (x).M (u).N z)" - "\a\(b,\c);x\\c\ \ stc (ImpR (x)..M b) \c = (ImpR (x)..M b)" - "\a\(N,\c);x\(M,z,\c)\ \ stc (ImpL .M (x).N z) \c = (ImpL .M (x).N z)" +| "\a\(N,\c);x\(M,\c)\ \ stc (Cut .M (x).N) \c = (Cut .M (x).N)" +| "x\\c \ stc (NotR (x).M a) \c = (NotR (x).M a)" +| "a\\c \ stc (NotL .M x) \c = (NotL .M x)" +| "\a\(N,d,b,\c);b\(M,d,a,\c)\ \ stc (AndR .M .N d) \c = (AndR .M .N d)" +| "x\(z,\c) \ stc (AndL1 (x).M z) \c = (AndL1 (x).M z)" +| "x\(z,\c) \ stc (AndL2 (x).M z) \c = (AndL2 (x).M z)" +| "a\(b,\c) \ stc (OrR1 .M b) \c = (OrR1 .M b)" +| "a\(b,\c) \ stc (OrR2 .M b) \c = (OrR2 .M b)" +| "\x\(N,z,u,\c);u\(M,z,x,\c)\ \ stc (OrL (x).M (u).N z) \c = (OrL (x).M (u).N z)" +| "\a\(b,\c);x\\c\ \ stc (ImpR (x)..M b) \c = (ImpR (x)..M b)" +| "\a\(N,\c);x\(M,z,\c)\ \ stc (ImpL .M (x).N z) \c = (ImpL .M (x).N z)" apply(finite_guess)+ apply(rule TrueI)+ apply(simp add: abs_fresh abs_supp fin_supp)+ @@ -17926,51 +17920,50 @@ apply(perm_simp) done -consts +nominal_primrec (freshness_context: "(\n::(name\coname\trm) list,\c::(coname\name\trm) list)") psubst :: "(name\coname\trm) list\(coname\name\trm) list\trm\trm" ("_,_<_>" [100,100,100] 100) - -nominal_primrec (freshness_context: "(\n::(name\coname\trm) list,\c::(coname\name\trm) list)") +where "\n,\c = lookup x a \n \c" - "\a\(N,\n,\c);x\(M,\n,\c)\ \ \n,\c.M (x).N> = +| "\a\(N,\n,\c);x\(M,\n,\c)\ \ \n,\c.M (x).N> = Cut .(if \x. M=Ax x a then stn M \n else \n,\c) (x).(if \a. N=Ax x a then stc N \c else \n,\c)" - "x\(\n,\c) \ \n,\c = +| "x\(\n,\c) \ \n,\c = (case (findc \c a) of Some (u,P) \ fresh_fun (\a'. Cut .NotR (x).(\n,\c) a' (u).P) | None \ NotR (x).(\n,\c) a)" - "a\(\n,\c) \ \n,\c.M x> = +| "a\(\n,\c) \ \n,\c.M x> = (case (findn \n x) of Some (c,P) \ fresh_fun (\x'. Cut .P (x').(NotL .(\n,\c) x')) | None \ NotL .(\n,\c) x)" - "\a\(N,c,\n,\c);b\(M,c,\n,\c);b\a\ \ (\n,\c.M .N c>) = +| "\a\(N,c,\n,\c);b\(M,c,\n,\c);b\a\ \ (\n,\c.M .N c>) = (case (findc \c c) of Some (x,P) \ fresh_fun (\a'. Cut .(AndR .(\n,\c) .(\n,\c) a') (x).P) | None \ AndR .(\n,\c) .(\n,\c) c)" - "x\(z,\n,\c) \ (\n,\c) = +| "x\(z,\n,\c) \ (\n,\c) = (case (findn \n z) of Some (c,P) \ fresh_fun (\z'. Cut .P (z').AndL1 (x).(\n,\c) z') | None \ AndL1 (x).(\n,\c) z)" - "x\(z,\n,\c) \ (\n,\c) = +| "x\(z,\n,\c) \ (\n,\c) = (case (findn \n z) of Some (c,P) \ fresh_fun (\z'. Cut .P (z').AndL2 (x).(\n,\c) z') | None \ AndL2 (x).(\n,\c) z)" - "\x\(N,z,\n,\c);u\(M,z,\n,\c);x\u\ \ (\n,\c) = +| "\x\(N,z,\n,\c);u\(M,z,\n,\c);x\u\ \ (\n,\c) = (case (findn \n z) of Some (c,P) \ fresh_fun (\z'. Cut .P (z').OrL (x).(\n,\c) (u).(\n,\c) z') | None \ OrL (x).(\n,\c) (u).(\n,\c) z)" - "a\(b,\n,\c) \ (\n,\c.M b>) = +| "a\(b,\n,\c) \ (\n,\c.M b>) = (case (findc \c b) of Some (x,P) \ fresh_fun (\a'. Cut .OrR1 .(\n,\c) a' (x).P) | None \ OrR1 .(\n,\c) b)" - "a\(b,\n,\c) \ (\n,\c.M b>) = +| "a\(b,\n,\c) \ (\n,\c.M b>) = (case (findc \c b) of Some (x,P) \ fresh_fun (\a'. Cut .OrR2 .(\n,\c) a' (x).P) | None \ OrR2 .(\n,\c) b)" - "\a\(b,\n,\c); x\(\n,\c)\ \ (\n,\c.M b>) = +| "\a\(b,\n,\c); x\(\n,\c)\ \ (\n,\c.M b>) = (case (findc \c b) of Some (z,P) \ fresh_fun (\a'. Cut .ImpR (x)..(\n,\c) a' (z).P) | None \ ImpR (x)..(\n,\c) b)" - "\a\(N,\n,\c); x\(z,M,\n,\c)\ \ (\n,\c.M (x).N z>) = +| "\a\(N,\n,\c); x\(z,M,\n,\c)\ \ (\n,\c.M (x).N z>) = (case (findn \n z) of Some (c,P) \ fresh_fun (\z'. Cut .P (z').ImpL .(\n,\c) (x).(\n,\c) z') | None \ ImpL .(\n,\c) (x).(\n,\c) z)" diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/Nominal/Examples/Compile.thy --- a/src/HOL/Nominal/Examples/Compile.thy Tue Dec 30 08:18:54 2008 +0100 +++ b/src/HOL/Nominal/Examples/Compile.thy Tue Dec 30 11:10:01 2008 +0100 @@ -1,5 +1,3 @@ -(* $Id$ *) - (* The definitions for a challenge suggested by Adam Chlipala *) theory Compile @@ -92,20 +90,24 @@ text {* capture-avoiding substitution *} -consts - subst :: "'a \ name \ 'a \ 'a" ("_[_::=_]" [100,100,100] 100) +class subst = + fixes subst :: "'a \ name \ 'a \ 'a" ("_[_::=_]" [100,100,100] 100) -nominal_primrec +instantiation trm :: subst +begin + +nominal_primrec subst_trm +where "(Var x)[y::=t'] = (if x=y then t' else (Var x))" - "(App t1 t2)[y::=t'] = App (t1[y::=t']) (t2[y::=t'])" - "\x\y; x\t'\ \ (Lam [x].t)[y::=t'] = Lam [x].(t[y::=t'])" - "(Const n)[y::=t'] = Const n" - "(Pr e1 e2)[y::=t'] = Pr (e1[y::=t']) (e2[y::=t'])" - "(Fst e)[y::=t'] = Fst (e[y::=t'])" - "(Snd e)[y::=t'] = Snd (e[y::=t'])" - "(InL e)[y::=t'] = InL (e[y::=t'])" - "(InR e)[y::=t'] = InR (e[y::=t'])" - "\z\x; x\y; x\e; x\e2; z\y; z\e; z\e1; x\t'; z\t'\ \ +| "(App t1 t2)[y::=t'] = App (t1[y::=t']) (t2[y::=t'])" +| "\x\y; x\t'\ \ (Lam [x].t)[y::=t'] = Lam [x].(t[y::=t'])" +| "(Const n)[y::=t'] = Const n" +| "(Pr e1 e2)[y::=t'] = Pr (e1[y::=t']) (e2[y::=t'])" +| "(Fst e)[y::=t'] = Fst (e[y::=t'])" +| "(Snd e)[y::=t'] = Snd (e[y::=t'])" +| "(InL e)[y::=t'] = InL (e[y::=t'])" +| "(InR e)[y::=t'] = InR (e[y::=t'])" +| "\z\x; x\y; x\e; x\e2; z\y; z\e; z\e1; x\t'; z\t'\ \ (Case e of inl x \ e1 | inr z \ e2)[y::=t'] = (Case (e[y::=t']) of inl x \ (e1[y::=t']) | inr z \ (e2[y::=t']))" apply(finite_guess)+ @@ -114,23 +116,35 @@ apply(fresh_guess)+ done -nominal_primrec (Isubst) +instance .. + +end + +instantiation trmI :: subst +begin + +nominal_primrec subst_trmI +where "(IVar x)[y::=t'] = (if x=y then t' else (IVar x))" - "(IApp t1 t2)[y::=t'] = IApp (t1[y::=t']) (t2[y::=t'])" - "\x\y; x\t'\ \ (ILam [x].t)[y::=t'] = ILam [x].(t[y::=t'])" - "(INat n)[y::=t'] = INat n" - "(IUnit)[y::=t'] = IUnit" - "(ISucc e)[y::=t'] = ISucc (e[y::=t'])" - "(IAss e1 e2)[y::=t'] = IAss (e1[y::=t']) (e2[y::=t'])" - "(IRef e)[y::=t'] = IRef (e[y::=t'])" - "(ISeq e1 e2)[y::=t'] = ISeq (e1[y::=t']) (e2[y::=t'])" - "(Iif e e1 e2)[y::=t'] = Iif (e[y::=t']) (e1[y::=t']) (e2[y::=t'])" +| "(IApp t1 t2)[y::=t'] = IApp (t1[y::=t']) (t2[y::=t'])" +| "\x\y; x\t'\ \ (ILam [x].t)[y::=t'] = ILam [x].(t[y::=t'])" +| "(INat n)[y::=t'] = INat n" +| "(IUnit)[y::=t'] = IUnit" +| "(ISucc e)[y::=t'] = ISucc (e[y::=t'])" +| "(IAss e1 e2)[y::=t'] = IAss (e1[y::=t']) (e2[y::=t'])" +| "(IRef e)[y::=t'] = IRef (e[y::=t'])" +| "(ISeq e1 e2)[y::=t'] = ISeq (e1[y::=t']) (e2[y::=t'])" +| "(Iif e e1 e2)[y::=t'] = Iif (e[y::=t']) (e1[y::=t']) (e2[y::=t'])" apply(finite_guess)+ apply(rule TrueI)+ apply(simp add: abs_fresh)+ apply(fresh_guess)+ done +instance .. + +end + lemma Isubst_eqvt[eqvt]: fixes pi::"name prm" and t1::"trmI" @@ -138,7 +152,7 @@ and x::"name" shows "pi\(t1[x::=t2]) = ((pi\t1)[(pi\x)::=(pi\t2)])" apply (nominal_induct t1 avoiding: x t2 rule: trmI.strong_induct) - apply (simp_all add: Isubst.simps eqvts fresh_bij) + apply (simp_all add: subst_trmI.simps eqvts fresh_bij) done lemma Isubst_supp: @@ -147,7 +161,7 @@ and x::"name" shows "((supp (t1[x::=t2]))::name set) \ (supp t2)\((supp t1)-{x})" apply (nominal_induct t1 avoiding: x t2 rule: trmI.strong_induct) - apply (auto simp add: Isubst.simps trmI.supp supp_atm abs_supp supp_nat) + apply (auto simp add: subst_trmI.simps trmI.supp supp_atm abs_supp supp_nat) apply blast+ done @@ -198,29 +212,29 @@ text {* Translation functions *} -consts trans :: "trm \ trmI" - nominal_primrec + trans :: "trm \ trmI" +where "trans (Var x) = (IVar x)" - "trans (App e1 e2) = IApp (trans e1) (trans e2)" - "trans (Lam [x].e) = ILam [x].(trans e)" - "trans (Const n) = INat n" - "trans (Pr e1 e2) = +| "trans (App e1 e2) = IApp (trans e1) (trans e2)" +| "trans (Lam [x].e) = ILam [x].(trans e)" +| "trans (Const n) = INat n" +| "trans (Pr e1 e2) = (let limit = IRef(INat 0) in let v1 = (trans e1) in let v2 = (trans e2) in (((ISucc limit)\v1);;(ISucc(ISucc limit)\v2));;(INat 0 \ ISucc(ISucc(limit))))" - "trans (Fst e) = IRef (ISucc (trans e))" - "trans (Snd e) = IRef (ISucc (ISucc (trans e)))" - "trans (InL e) = +| "trans (Fst e) = IRef (ISucc (trans e))" +| "trans (Snd e) = IRef (ISucc (ISucc (trans e)))" +| "trans (InL e) = (let limit = IRef(INat 0) in let v = (trans e) in (((ISucc limit)\INat(0));;(ISucc(ISucc limit)\v));;(INat 0 \ ISucc(ISucc(limit))))" - "trans (InR e) = +| "trans (InR e) = (let limit = IRef(INat 0) in let v = (trans e) in (((ISucc limit)\INat(1));;(ISucc(ISucc limit)\v));;(INat 0 \ ISucc(ISucc(limit))))" - "\x2\x1; x1\e; x1\e2; x2\e; x2\e1\ \ +| "\x2\x1; x1\e; x1\e2; x2\e; x2\e1\ \ trans (Case e of inl x1 \ e1 | inr x2 \ e2) = (let v = (trans e) in let v1 = (trans e1) in @@ -232,11 +246,11 @@ apply(fresh_guess add: Let_def)+ done -consts trans_type :: "ty \ tyI" - nominal_primrec + trans_type :: "ty \ tyI" +where "trans_type (Data \) = DataI(NatI)" - "trans_type (\1\\2) = (trans_type \1)\(trans_type \2)" +| "trans_type (\1\\2) = (trans_type \1)\(trans_type \2)" by (rule TrueI)+ end \ No newline at end of file diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/Nominal/Examples/Contexts.thy --- a/src/HOL/Nominal/Examples/Contexts.thy Tue Dec 30 08:18:54 2008 +0100 +++ b/src/HOL/Nominal/Examples/Contexts.thy Tue Dec 30 11:10:01 2008 +0100 @@ -1,5 +1,3 @@ -(* $Id$ *) - theory Contexts imports "../Nominal" begin @@ -42,12 +40,12 @@ text {* Capture-Avoiding Substitution *} -consts subst :: "lam \ name \ lam \ lam" ("_[_::=_]" [100,100,100] 100) - nominal_primrec + subst :: "lam \ name \ lam \ lam" ("_[_::=_]" [100,100,100] 100) +where "(Var x)[y::=s] = (if x=y then s else (Var x))" - "(App t\<^isub>1 t\<^isub>2)[y::=s] = App (t\<^isub>1[y::=s]) (t\<^isub>2[y::=s])" - "x\(y,s) \ (Lam [x].t)[y::=s] = Lam [x].(t[y::=s])" +| "(App t\<^isub>1 t\<^isub>2)[y::=s] = App (t\<^isub>1[y::=s]) (t\<^isub>2[y::=s])" +| "x\(y,s) \ (Lam [x].t)[y::=s] = Lam [x].(t[y::=s])" apply(finite_guess)+ apply(rule TrueI)+ apply(simp add: abs_fresh) @@ -59,14 +57,13 @@ This operation is possibly capturing. *} -consts +nominal_primrec filling :: "ctx \ lam \ lam" ("_\_\" [100,100] 100) - -nominal_primrec +where "\\t\ = t" - "(CAppL E t')\t\ = App (E\t\) t'" - "(CAppR t' E)\t\ = App t' (E\t\)" - "(CLam [x].E)\t\ = Lam [x].(E\t\)" +| "(CAppL E t')\t\ = App (E\t\) t'" +| "(CAppR t' E)\t\ = App t' (E\t\)" +| "(CLam [x].E)\t\ = Lam [x].(E\t\)" by (rule TrueI)+ text {* @@ -81,14 +78,13 @@ text {* The composition of two contexts. *} -consts +nominal_primrec ctx_compose :: "ctx \ ctx \ ctx" ("_ \ _" [100,100] 100) - -nominal_primrec +where "\ \ E' = E'" - "(CAppL E t') \ E' = CAppL (E \ E') t'" - "(CAppR t' E) \ E' = CAppR t' (E \ E')" - "(CLam [x].E) \ E' = CLam [x].(E \ E')" +| "(CAppL E t') \ E' = CAppL (E \ E') t'" +| "(CAppR t' E) \ E' = CAppR t' (E \ E')" +| "(CLam [x].E) \ E' = CLam [x].(E \ E')" by (rule TrueI)+ lemma ctx_compose: diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/Nominal/Examples/Crary.thy --- a/src/HOL/Nominal/Examples/Crary.thy Tue Dec 30 08:18:54 2008 +0100 +++ b/src/HOL/Nominal/Examples/Crary.thy Tue Dec 30 11:10:01 2008 +0100 @@ -1,4 +1,3 @@ -(* "$Id$" *) (* *) (* Formalisation of the chapter on Logical Relations *) (* and a Case Study in Equivalence Checking *) @@ -47,14 +46,20 @@ shows "(\ T\<^isub>1 T\<^isub>2. T=T\<^isub>1\T\<^isub>2) \ T=TUnit \ T=TBase" by (induct T rule:ty.induct) (auto) -instance ty :: size .. +instantiation ty :: size +begin -nominal_primrec +nominal_primrec size_ty +where "size (TBase) = 1" - "size (TUnit) = 1" - "size (T\<^isub>1\T\<^isub>2) = size T\<^isub>1 + size T\<^isub>2" +| "size (TUnit) = 1" +| "size (T\<^isub>1\T\<^isub>2) = size T\<^isub>1 + size T\<^isub>2" by (rule TrueI)+ +instance .. + +end + lemma ty_size_greater_zero[simp]: fixes T::"ty" shows "size T > 0" @@ -87,16 +92,15 @@ using a by (induct rule: lookup.induct) (auto simp add: fresh_list_cons fresh_prod fresh_atm) - -consts - psubst :: "Subst \ trm \ trm" ("_<_>" [100,100] 130) nominal_primrec + psubst :: "Subst \ trm \ trm" ("_<_>" [100,100] 130) +where "\<(Var x)> = (lookup \ x)" - "\<(App t\<^isub>1 t\<^isub>2)> = App \1> \2>" - "x\\ \ \<(Lam [x].t)> = Lam [x].(\)" - "\<(Const n)> = Const n" - "\<(Unit)> = Unit" +| "\<(App t\<^isub>1 t\<^isub>2)> = App \1> \2>" +| "x\\ \ \<(Lam [x].t)> = Lam [x].(\)" +| "\<(Const n)> = Const n" +| "\<(Unit)> = Unit" apply(finite_guess)+ apply(rule TrueI)+ apply(simp add: abs_fresh)+ diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/Nominal/Examples/Fsub.thy --- a/src/HOL/Nominal/Examples/Fsub.thy Tue Dec 30 08:18:54 2008 +0100 +++ b/src/HOL/Nominal/Examples/Fsub.thy Tue Dec 30 11:10:01 2008 +0100 @@ -1,5 +1,3 @@ -(* $Id$ *) - (*<*) theory Fsub imports "../Nominal" @@ -229,32 +227,26 @@ section {* Size and Capture-Avoiding Substitution for Types *} -consts size_ty :: "ty \ nat" - nominal_primrec + size_ty :: "ty \ nat" +where "size_ty (Tvar X) = 1" - "size_ty (Top) = 1" - "size_ty (T1 \ T2) = (size_ty T1) + (size_ty T2) + 1" - "X\T1 \ size_ty (\[X<:T1].T2) = (size_ty T1) + (size_ty T2) + 1" +| "size_ty (Top) = 1" +| "size_ty (T1 \ T2) = (size_ty T1) + (size_ty T2) + 1" +| "X\T1 \ size_ty (\[X<:T1].T2) = (size_ty T1) + (size_ty T2) + 1" apply (finite_guess)+ apply (rule TrueI)+ apply (simp add: fresh_nat) apply (fresh_guess)+ done -consts subst_ty :: "tyvrs \ ty \ ty \ ty" - -syntax - subst_ty_syn :: "ty \ tyvrs \ ty \ ty" ("_[_:=_]\<^isub>t\<^isub>y" [100,100,100] 100) - -translations - "T1[Y:=T2]\<^isub>t\<^isub>y" \ "subst_ty Y T2 T1" - nominal_primrec + subst_ty :: "ty \ tyvrs \ ty \ ty" ("_[_:=_]\<^isub>t\<^isub>y" [100,100,100] 100) +where "(Tvar X)[Y:=T]\<^isub>t\<^isub>y= (if X=Y then T else (Tvar X))" - "(Top)[Y:=T]\<^isub>t\<^isub>y = Top" - "(T\<^isub>1 \ T\<^isub>2)[Y:=T]\<^isub>t\<^isub>y = (T\<^isub>1[Y:=T]\<^isub>t\<^isub>y) \ (T\<^isub>2[Y:=T]\<^isub>t\<^isub>y)" - "\X\(Y,T); X\T\<^isub>1\ \ (\[X<:T\<^isub>1].T\<^isub>2)[Y:=T]\<^isub>t\<^isub>y = (\[X<:(T\<^isub>1[Y:=T]\<^isub>t\<^isub>y)].(T\<^isub>2[Y:=T]\<^isub>t\<^isub>y))" +| "(Top)[Y:=T]\<^isub>t\<^isub>y = Top" +| "(T\<^isub>1 \ T\<^isub>2)[Y:=T]\<^isub>t\<^isub>y = (T\<^isub>1[Y:=T]\<^isub>t\<^isub>y) \ (T\<^isub>2[Y:=T]\<^isub>t\<^isub>y)" +| "\X\(Y,T); X\T\<^isub>1\ \ (\[X<:T\<^isub>1].T\<^isub>2)[Y:=T]\<^isub>t\<^isub>y = (\[X<:(T\<^isub>1[Y:=T]\<^isub>t\<^isub>y)].(T\<^isub>2[Y:=T]\<^isub>t\<^isub>y))" apply (finite_guess)+ apply (rule TrueI)+ apply (simp add: abs_fresh) diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/Nominal/Examples/Height.thy --- a/src/HOL/Nominal/Examples/Height.thy Tue Dec 30 08:18:54 2008 +0100 +++ b/src/HOL/Nominal/Examples/Height.thy Tue Dec 30 11:10:01 2008 +0100 @@ -1,5 +1,3 @@ -(* $Id$ *) - theory Height imports "../Nominal" begin @@ -17,13 +15,13 @@ | Lam "\name\lam" ("Lam [_]._" [100,100] 100) text {* Definition of the height-function on lambda-terms. *} -consts - height :: "lam \ int" nominal_primrec + height :: "lam \ int" +where "height (Var x) = 1" - "height (App t1 t2) = (max (height t1) (height t2)) + 1" - "height (Lam [a].t) = (height t) + 1" +| "height (App t1 t2) = (max (height t1) (height t2)) + 1" +| "height (Lam [a].t) = (height t) + 1" apply(finite_guess add: perm_int_def)+ apply(rule TrueI)+ apply(simp add: fresh_int) @@ -32,13 +30,12 @@ text {* Definition of capture-avoiding substitution. *} -consts +nominal_primrec subst :: "lam \ name \ lam \ lam" ("_[_::=_]" [100,100,100] 100) - -nominal_primrec +where "(Var x)[y::=t'] = (if x=y then t' else (Var x))" - "(App t1 t2)[y::=t'] = App (t1[y::=t']) (t2[y::=t'])" - "\x\y; x\t'\ \ (Lam [x].t)[y::=t'] = Lam [x].(t[y::=t'])" +| "(App t1 t2)[y::=t'] = App (t1[y::=t']) (t2[y::=t'])" +| "\x\y; x\t'\ \ (Lam [x].t)[y::=t'] = Lam [x].(t[y::=t'])" apply(finite_guess)+ apply(rule TrueI)+ apply(simp add: abs_fresh) diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/Nominal/Examples/Lam_Funs.thy --- a/src/HOL/Nominal/Examples/Lam_Funs.thy Tue Dec 30 08:18:54 2008 +0100 +++ b/src/HOL/Nominal/Examples/Lam_Funs.thy Tue Dec 30 11:10:01 2008 +0100 @@ -1,5 +1,3 @@ -(* $Id$ *) - theory Lam_Funs imports "../Nominal" begin @@ -18,13 +16,12 @@ text {* The depth of a lambda-term. *} -consts +nominal_primrec depth :: "lam \ nat" - -nominal_primrec +where "depth (Var x) = 1" - "depth (App t1 t2) = (max (depth t1) (depth t2)) + 1" - "depth (Lam [a].t) = (depth t) + 1" +| "depth (App t1 t2) = (max (depth t1) (depth t2)) + 1" +| "depth (Lam [a].t) = (depth t) + 1" apply(finite_guess)+ apply(rule TrueI)+ apply(simp add: fresh_nat) @@ -38,13 +35,12 @@ the invariant that frees always returns a finite set of names. *} -consts +nominal_primrec (invariant: "\s::name set. finite s") frees :: "lam \ name set" - -nominal_primrec (invariant: "\s::name set. finite s") +where "frees (Var a) = {a}" - "frees (App t1 t2) = (frees t1) \ (frees t2)" - "frees (Lam [a].t) = (frees t) - {a}" +| "frees (App t1 t2) = (frees t1) \ (frees t2)" +| "frees (Lam [a].t) = (frees t) - {a}" apply(finite_guess)+ apply(simp)+ apply(simp add: fresh_def) @@ -78,14 +74,13 @@ and X::"name" shows "pi\(lookup \ X) = lookup (pi\\) (pi\X)" by (induct \) (auto simp add: eqvts) - -consts - psubst :: "(name\lam) list \ lam \ lam" ("_<_>" [95,95] 105) nominal_primrec + psubst :: "(name\lam) list \ lam \ lam" ("_<_>" [95,95] 105) +where "\<(Var x)> = (lookup \ x)" - "\<(App e\<^isub>1 e\<^isub>2)> = App (\1>) (\2>)" - "x\\ \ \<(Lam [x].e)> = Lam [x].(\)" +| "\<(App e\<^isub>1 e\<^isub>2)> = App (\1>) (\2>)" +| "x\\ \ \<(Lam [x].e)> = Lam [x].(\)" apply(finite_guess)+ apply(rule TrueI)+ apply(simp add: abs_fresh)+ @@ -130,26 +125,24 @@ text {* Filling a lambda-term into a context. *} -consts +nominal_primrec filling :: "clam \ lam \ lam" ("_\_\" [100,100] 100) - -nominal_primrec +where "\\t\ = t" - "(CAppL E t')\t\ = App (E\t\) t'" - "(CAppR t' E)\t\ = App t' (E\t\)" - "(CLam [x].E)\t\ = Lam [x].(E\t\)" +| "(CAppL E t')\t\ = App (E\t\) t'" +| "(CAppR t' E)\t\ = App t' (E\t\)" +| "(CLam [x].E)\t\ = Lam [x].(E\t\)" by (rule TrueI)+ text {* Composition od two contexts *} -consts +nominal_primrec clam_compose :: "clam \ clam \ clam" ("_ \ _" [100,100] 100) - -nominal_primrec +where "\ \ E' = E'" - "(CAppL E t') \ E' = CAppL (E \ E') t'" - "(CAppR t' E) \ E' = CAppR t' (E \ E')" - "(CLam [x].E) \ E' = CLam [x].(E \ E')" +| "(CAppL E t') \ E' = CAppL (E \ E') t'" +| "(CAppR t' E) \ E' = CAppR t' (E \ E')" +| "(CLam [x].E) \ E' = CLam [x].(E \ E')" by (rule TrueI)+ lemma clam_compose: diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/Nominal/Examples/LocalWeakening.thy --- a/src/HOL/Nominal/Examples/LocalWeakening.thy Tue Dec 30 08:18:54 2008 +0100 +++ b/src/HOL/Nominal/Examples/LocalWeakening.thy Tue Dec 30 11:10:01 2008 +0100 @@ -1,5 +1,3 @@ -(* $Id$ *) - (* Formalisation of weakening using locally nameless *) (* terms; the nominal infrastructure can also derive *) (* strong induction principles for such representations *) @@ -29,13 +27,13 @@ by (induct t rule: llam.induct) (auto simp add: llam.inject) -consts llam_size :: "llam \ nat" - nominal_primrec - "llam_size (lPar a) = 1" - "llam_size (lVar n) = 1" - "llam_size (lApp t1 t2) = 1 + (llam_size t1) + (llam_size t2)" - "llam_size (lLam t) = 1 + (llam_size t)" + llam_size :: "llam \ nat" +where + "llam_size (lPar a) = 1" +| "llam_size (lVar n) = 1" +| "llam_size (lApp t1 t2) = 1 + (llam_size t1) + (llam_size t2)" +| "llam_size (lLam t) = 1 + (llam_size t)" by (rule TrueI)+ function diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/Nominal/Examples/SN.thy --- a/src/HOL/Nominal/Examples/SN.thy Tue Dec 30 08:18:54 2008 +0100 +++ b/src/HOL/Nominal/Examples/SN.thy Tue Dec 30 11:10:01 2008 +0100 @@ -1,5 +1,3 @@ -(* $Id$ *) - theory SN imports Lam_Funs begin @@ -228,12 +226,11 @@ section {* Candidates *} -consts +nominal_primrec RED :: "ty \ lam set" - -nominal_primrec +where "RED (TVar X) = {t. SN(t)}" - "RED (\\\) = {t. \u. (u\RED \ \ (App t u)\RED \)}" +| "RED (\\\) = {t. \u. (u\RED \ \ (App t u)\RED \)}" by (rule TrueI)+ text {* neutral terms *} @@ -248,13 +245,12 @@ where fst[intro!]: "(App t s) \ t" -consts +nominal_primrec fst_app_aux::"lam\lam option" - -nominal_primrec +where "fst_app_aux (Var a) = None" - "fst_app_aux (App t1 t2) = Some t1" - "fst_app_aux (Lam [x].t) = None" +| "fst_app_aux (App t1 t2) = Some t1" +| "fst_app_aux (Lam [x].t) = None" apply(finite_guess)+ apply(rule TrueI)+ apply(simp add: fresh_none) diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/Nominal/Examples/SOS.thy --- a/src/HOL/Nominal/Examples/SOS.thy Tue Dec 30 08:18:54 2008 +0100 +++ b/src/HOL/Nominal/Examples/SOS.thy Tue Dec 30 11:10:01 2008 +0100 @@ -1,4 +1,3 @@ -(* "$Id$" *) (* *) (* Formalisation of some typical SOS-proofs. *) (* *) @@ -62,13 +61,12 @@ (* parallel substitution *) -consts +nominal_primrec psubst :: "(name\trm) list \ trm \ trm" ("_<_>" [95,95] 105) - -nominal_primrec +where "\<(Var x)> = (lookup \ x)" - "\<(App e\<^isub>1 e\<^isub>2)> = App (\1>) (\2>)" - "x\\ \ \<(Lam [x].e)> = Lam [x].(\)" +| "\<(App e\<^isub>1 e\<^isub>2)> = App (\1>) (\2>)" +| "x\\ \ \<(Lam [x].e)> = Lam [x].(\)" apply(finite_guess)+ apply(rule TrueI)+ apply(simp add: abs_fresh)+ @@ -349,12 +347,12 @@ using h by (induct) (auto) (* Valuation *) -consts - V :: "ty \ trm set" nominal_primrec + V :: "ty \ trm set" +where "V (TVar x) = {e. val e}" - "V (T\<^isub>1 \ T\<^isub>2) = {Lam [x].e | x e. \ v \ (V T\<^isub>1). \ v'. e[x::=v] \ v' \ v' \ V T\<^isub>2}" +| "V (T\<^isub>1 \ T\<^isub>2) = {Lam [x].e | x e. \ v \ (V T\<^isub>1). \ v'. e[x::=v] \ v' \ v' \ V T\<^isub>2}" by (rule TrueI)+ lemma V_eqvt: diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/Nominal/Examples/Standardization.thy --- a/src/HOL/Nominal/Examples/Standardization.thy Tue Dec 30 08:18:54 2008 +0100 +++ b/src/HOL/Nominal/Examples/Standardization.thy Tue Dec 30 11:10:01 2008 +0100 @@ -1,5 +1,4 @@ (* Title: HOL/Nominal/Examples/Standardization.thy - ID: $Id$ Author: Stefan Berghofer and Tobias Nipkow Copyright 2005, 2008 TU Muenchen *) @@ -24,24 +23,30 @@ | App "lam" "lam" (infixl "\" 200) | Lam "\name\lam" ("Lam [_]._" [0, 10] 10) -instance lam :: size .. +instantiation lam :: size +begin -nominal_primrec +nominal_primrec size_lam +where "size (Var n) = 0" - "size (t \ u) = size t + size u + 1" - "size (Lam [x].t) = size t + 1" +| "size (t \ u) = size t + size u + 1" +| "size (Lam [x].t) = size t + 1" apply finite_guess+ apply (rule TrueI)+ apply (simp add: fresh_nat) apply fresh_guess+ done -consts subst :: "lam \ name \ lam \ lam" ("_[_::=_]" [300, 0, 0] 300) +instance .. + +end nominal_primrec + subst :: "lam \ name \ lam \ lam" ("_[_::=_]" [300, 0, 0] 300) +where subst_Var: "(Var x)[y::=s] = (if x=y then s else (Var x))" - subst_App: "(t\<^isub>1 \ t\<^isub>2)[y::=s] = t\<^isub>1[y::=s] \ t\<^isub>2[y::=s]" - subst_Lam: "x \ (y, s) \ (Lam [x].t)[y::=s] = (Lam [x].(t[y::=s]))" +| subst_App: "(t\<^isub>1 \ t\<^isub>2)[y::=s] = t\<^isub>1[y::=s] \ t\<^isub>2[y::=s]" +| subst_Lam: "x \ (y, s) \ (Lam [x].t)[y::=s] = (Lam [x].(t[y::=s]))" apply(finite_guess)+ apply(rule TrueI)+ apply(simp add: abs_fresh) diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/Nominal/Examples/Type_Preservation.thy --- a/src/HOL/Nominal/Examples/Type_Preservation.thy Tue Dec 30 08:18:54 2008 +0100 +++ b/src/HOL/Nominal/Examples/Type_Preservation.thy Tue Dec 30 11:10:01 2008 +0100 @@ -1,5 +1,3 @@ -(* $Id$ *) - theory Type_Preservation imports Nominal begin @@ -21,13 +19,12 @@ text {* Capture-Avoiding Substitution *} -consts +nominal_primrec subst :: "lam \ name \ lam \ lam" ("_[_::=_]") - -nominal_primrec +where "(Var x)[y::=s] = (if x=y then s else (Var x))" - "(App t\<^isub>1 t\<^isub>2)[y::=s] = App (t\<^isub>1[y::=s]) (t\<^isub>2[y::=s])" - "x\(y,s) \ (Lam [x].t)[y::=s] = Lam [x].(t[y::=s])" +| "(App t\<^isub>1 t\<^isub>2)[y::=s] = App (t\<^isub>1[y::=s]) (t\<^isub>2[y::=s])" +| "x\(y,s) \ (Lam [x].t)[y::=s] = Lam [x].(t[y::=s])" apply(finite_guess)+ apply(rule TrueI)+ apply(simp add: abs_fresh) diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/Nominal/Examples/W.thy --- a/src/HOL/Nominal/Examples/W.thy Tue Dec 30 08:18:54 2008 +0100 +++ b/src/HOL/Nominal/Examples/W.thy Tue Dec 30 11:10:01 2008 +0100 @@ -1,5 +1,3 @@ -(* "$Id$" *) - theory W imports Nominal begin @@ -50,26 +48,68 @@ Ctxt = "(var\tyS) list" text {* free type variables *} -consts - ftv :: "'a \ tvar list" -primrec (ftv_of_prod) - "ftv (x,y) = (ftv x)@(ftv y)" +class ftv = type + + fixes ftv :: "'a \ tvar list" -defs (overloaded) +instantiation * :: (ftv, ftv) ftv +begin + +primrec ftv_prod +where + "ftv (x::'a::ftv, y::'b::ftv) = (ftv x)@(ftv y)" + +instance .. + +end + +instantiation tvar :: ftv +begin + +definition ftv_of_tvar[simp]: "ftv X \ [(X::tvar)]" + +instance .. + +end + +instantiation var :: ftv +begin + +definition ftv_of_var[simp]: "ftv (x::var) \ []" -primrec (ftv_of_list) +instance .. + +end + +instantiation list :: (ftv) ftv +begin + +primrec ftv_list +where "ftv [] = []" - "ftv (x#xs) = (ftv x)@(ftv xs)" +| "ftv (x#xs) = (ftv x)@(ftv xs)" + +instance .. + +end (* free type-variables of types *) -nominal_primrec (ftv_ty) + +instantiation ty :: ftv +begin + +nominal_primrec ftv_ty +where "ftv (TVar X) = [X]" - "ftv (T\<^isub>1\T\<^isub>2) = (ftv T\<^isub>1)@(ftv T\<^isub>2)" +| "ftv (T\<^isub>1\T\<^isub>2) = (ftv T\<^isub>1)@(ftv T\<^isub>2)" by (rule TrueI)+ +instance .. + +end + lemma ftv_ty_eqvt[eqvt]: fixes pi::"tvar prm" and T::"ty" @@ -77,9 +117,13 @@ by (nominal_induct T rule: ty.strong_induct) (perm_simp add: append_eqvt)+ -nominal_primrec (ftv_tyS) +instantiation tyS :: ftv +begin + +nominal_primrec ftv_tyS +where "ftv (Ty T) = ftv T" - "ftv (\[X].S) = (ftv S) - [X]" +| "ftv (\[X].S) = (ftv S) - [X]" apply(finite_guess add: ftv_ty_eqvt fs_tvar1)+ apply(rule TrueI)+ apply(rule difference_fresh) @@ -87,6 +131,10 @@ apply(fresh_guess add: ftv_ty_eqvt fs_tvar1)+ done +instance .. + +end + lemma ftv_tyS_eqvt[eqvt]: fixes pi::"tvar prm" and S::"tyS" @@ -140,11 +188,11 @@ types Subst = "(tvar\ty) list" -consts - psubst :: "Subst \ 'a \ 'a" ("_<_>" [100,60] 120) +class psubst = + fixes psubst :: "Subst \ 'a \ 'a" ("_<_>" [100,60] 120) abbreviation - subst :: "'a \ tvar \ ty \ 'a" ("_[_::=_]" [100,100,100] 100) + subst :: "'a::psubst \ tvar \ ty \ 'a" ("_[_::=_]" [100,100,100] 100) where "smth[X::=T] \ ([(X,T)])" @@ -159,11 +207,19 @@ shows "pi\(lookup \ X) = lookup (pi\\) (pi\X)" by (induct \) (auto simp add: eqvts) -nominal_primrec (psubst_ty) +instantiation ty :: psubst +begin + +nominal_primrec psubst_ty +where "\ = lookup \ X" - "\1 \ T\<^isub>2> = (\1>) \ (\2>)" +| "\1 \ T\<^isub>2> = (\1>) \ (\2>)" by (rule TrueI)+ +instance .. + +end + lemma psubst_ty_eqvt[eqvt]: fixes pi1::"tvar prm" and \::"Subst" diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/Nominal/Nominal.thy --- a/src/HOL/Nominal/Nominal.thy Tue Dec 30 08:18:54 2008 +0100 +++ b/src/HOL/Nominal/Nominal.thy Tue Dec 30 11:10:01 2008 +0100 @@ -1262,19 +1262,21 @@ apply (simp add: pt_rev_pi [OF pt at]) done -lemma insert_eqvt: +lemma pt_insert_eqvt: + fixes pi::"'x prm" + and x::"'a" assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE('x)" - shows "(pi::'x prm)\(insert (x::'a) X) = insert (pi\x) (pi\X)" + shows "(pi\(insert x X)) = insert (pi\x) (pi\X)" by (auto simp add: perm_set_eq [OF pt at]) -lemma set_eqvt: +lemma pt_set_eqvt: fixes pi :: "'x prm" and xs :: "'a list" assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE('x)" shows "pi\(set xs) = set (pi\xs)" -by (induct xs) (auto simp add: empty_eqvt insert_eqvt [OF pt at]) +by (induct xs) (auto simp add: empty_eqvt pt_insert_eqvt [OF pt at]) lemma supp_singleton: assumes pt: "pt TYPE('a) TYPE('x)" @@ -1568,10 +1570,10 @@ apply(simp add: pt_fresh_left_ineq[OF pta, OF ptb, OF at, OF cp]) apply(drule_tac x="pi\xa" in bspec) apply(simp add: pt_set_bij1[OF ptb, OF at]) -apply(simp add: set_eqvt [OF ptb at] pt_rev_pi[OF pt_list_inst[OF ptb], OF at]) +apply(simp add: pt_set_eqvt [OF ptb at] pt_rev_pi[OF pt_list_inst[OF ptb], OF at]) apply(simp add: pt_fresh_bij_ineq[OF pta, OF ptb, OF at, OF cp]) apply(drule_tac x="(rev pi)\xa" in bspec) -apply(simp add: pt_set_bij1[OF ptb, OF at] set_eqvt [OF ptb at]) +apply(simp add: pt_set_bij1[OF ptb, OF at] pt_set_eqvt [OF ptb at]) apply(simp add: pt_fresh_left_ineq[OF pta, OF ptb, OF at, OF cp]) done diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/Nominal/nominal_atoms.ML --- a/src/HOL/Nominal/nominal_atoms.ML Tue Dec 30 08:18:54 2008 +0100 +++ b/src/HOL/Nominal/nominal_atoms.ML Tue Dec 30 11:10:01 2008 +0100 @@ -798,8 +798,8 @@ val pt_perm_supp_ineq = @{thm "Nominal.pt_perm_supp_ineq"}; val pt_perm_supp = @{thm "Nominal.pt_perm_supp"}; val subseteq_eqvt = @{thm "Nominal.pt_subseteq_eqvt"}; - val insert_eqvt = @{thm "Nominal.insert_eqvt"}; - val set_eqvt = @{thm "Nominal.set_eqvt"}; + val insert_eqvt = @{thm "Nominal.pt_insert_eqvt"}; + val set_eqvt = @{thm "Nominal.pt_set_eqvt"}; val perm_set_eq = @{thm "Nominal.perm_set_eq"}; (* Now we collect and instantiate some lemmas w.r.t. all atom *) diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/Nominal/nominal_primrec.ML --- a/src/HOL/Nominal/nominal_primrec.ML Tue Dec 30 08:18:54 2008 +0100 +++ b/src/HOL/Nominal/nominal_primrec.ML Tue Dec 30 11:10:01 2008 +0100 @@ -1,5 +1,4 @@ (* Title: HOL/Nominal/nominal_primrec.ML - ID: $Id$ Author: Stefan Berghofer, TU Muenchen and Norbert Voelker, FernUni Hagen Package for defining functions on nominal datatypes by primitive recursion. @@ -8,14 +7,10 @@ signature NOMINAL_PRIMREC = sig - val add_primrec: string -> string list option -> string option -> - ((Binding.T * string) * Attrib.src list) list -> theory -> Proof.state - val add_primrec_unchecked: string -> string list option -> string option -> - ((Binding.T * string) * Attrib.src list) list -> theory -> Proof.state - val add_primrec_i: string -> term list option -> term option -> - ((Binding.T * term) * attribute list) list -> theory -> Proof.state - val add_primrec_unchecked_i: string -> term list option -> term option -> - ((Binding.T * term) * attribute list) list -> theory -> Proof.state + val add_primrec: term list option -> term option -> + (Binding.T * typ option * mixfix) list -> + (Binding.T * typ option * mixfix) list -> + (Attrib.binding * term) list -> local_theory -> Proof.state end; structure NominalPrimrec : NOMINAL_PRIMREC = @@ -26,23 +21,31 @@ exception RecError of string; fun primrec_err s = error ("Nominal primrec definition error:\n" ^ s); -fun primrec_eq_err thy s eq = - primrec_err (s ^ "\nin\n" ^ quote (Syntax.string_of_term_global thy eq)); +fun primrec_eq_err lthy s eq = + primrec_err (s ^ "\nin\n" ^ quote (Syntax.string_of_term lthy eq)); (* preprocessing of equations *) -fun process_eqn thy eq rec_fns = +fun unquantify t = let + val (vs, Ts) = split_list (strip_qnt_vars "all" t); + val body = strip_qnt_body "all" t; + val (vs', _) = Name.variants vs (Name.make_context (fold_aterms + (fn Free (v, _) => insert (op =) v | _ => I) body [])) + in (curry subst_bounds (map2 (curry Free) vs' Ts |> rev) body) end; + +fun process_eqn lthy is_fixed spec rec_fns = + let + val eq = unquantify spec; val (lhs, rhs) = - if null (term_vars eq) then - HOLogic.dest_eq (HOLogic.dest_Trueprop (Logic.strip_imp_concl eq)) - handle TERM _ => raise RecError "not a proper equation" - else raise RecError "illegal schematic variable(s)"; + HOLogic.dest_eq (HOLogic.dest_Trueprop (Logic.strip_imp_concl eq)) + handle TERM _ => raise RecError "not a proper equation"; val (recfun, args) = strip_comb lhs; - val fnameT = dest_Const recfun handle TERM _ => - raise RecError "function is not declared as constant in theory"; + val fname = case recfun of Free (v, _) => if is_fixed v then v + else raise RecError "illegal head of function equation" + | _ => raise RecError "illegal head of function equation"; val (ls', rest) = take_prefix is_Free args; val (middle, rs') = take_suffix is_Free rest; @@ -68,26 +71,28 @@ else (check_vars "repeated variable names in pattern: " (duplicates (op =) lfrees); check_vars "extra variables on rhs: " - (map dest_Free (term_frees rhs) \\ lfrees); - case AList.lookup (op =) rec_fns fnameT of + (map dest_Free (term_frees rhs) |> subtract (op =) lfrees + |> filter_out (is_fixed o fst)); + case AList.lookup (op =) rec_fns fname of NONE => - (fnameT, (tname, rpos, [(cname, (ls, cargs, rs, rhs, eq))]))::rec_fns + (fname, (tname, rpos, [(cname, (ls, cargs, rs, rhs, eq))]))::rec_fns | SOME (_, rpos', eqns) => if AList.defined (op =) eqns cname then raise RecError "constructor already occurred as pattern" else if rpos <> rpos' then raise RecError "position of recursive argument inconsistent" else - AList.update (op =) (fnameT, (tname, rpos, (cname, (ls, cargs, rs, rhs, eq))::eqns)) + AList.update (op =) + (fname, (tname, rpos, (cname, (ls, cargs, rs, rhs, eq))::eqns)) rec_fns) end - handle RecError s => primrec_eq_err thy s eq; + handle RecError s => primrec_eq_err lthy s spec; val param_err = "Parameters must be the same for all recursive functions"; -fun process_fun thy descr rec_eqns (i, fnameT as (fname, _)) (fnameTs, fnss) = +fun process_fun lthy descr eqns (i, fname) (fnames, fnss) = let - val (_, (tname, _, constrs)) = List.nth (descr, i); + val (_, (tname, _, constrs)) = nth descr i; (* substitute "fname ls x rs" by "y" for (x, (_, y)) in subs *) @@ -100,16 +105,17 @@ let val (f, ts) = strip_comb t; in - if is_Const f andalso dest_Const f mem map fst rec_eqns then + if is_Free f + andalso member (fn ((v, _), (w, _)) => v = w) eqns (dest_Free f) then let - val fnameT' as (fname', _) = dest_Const f; - val (_, rpos, eqns) = the (AList.lookup (op =) rec_eqns fnameT'); - val ls = Library.take (rpos, ts); - val rest = Library.drop (rpos, ts); - val (x', rs) = (hd rest, tl rest) - handle Empty => raise RecError ("not enough arguments\ - \ in recursive application\nof function " ^ quote fname' ^ " on rhs"); - val rs' = (case eqns of + val (fname', _) = dest_Free f; + val (_, rpos, eqns') = the (AList.lookup (op =) eqns fname'); + val (ls, rs'') = chop rpos ts + val (x', rs) = case rs'' of + x' :: rs => (x', rs) + | [] => raise RecError ("not enough arguments in recursive application\n" + ^ "of function " ^ quote fname' ^ " on rhs"); + val rs' = (case eqns' of (_, (ls', _, rs', _, _)) :: _ => let val (rs1, rs2) = chop (length rs') rs in @@ -126,7 +132,7 @@ | SOME (i', y) => fs |> fold_map (subst subs) (xs @ rs') - ||> process_fun thy descr rec_eqns (i', fnameT') + ||> process_fun lthy descr eqns (i', fname') |-> (fn ts' => pair (list_comb (y, ts'))) end else @@ -138,41 +144,39 @@ (* translate rec equations into function arguments suitable for rec comb *) - fun trans eqns (cname, cargs) (fnameTs', fnss', fns) = + fun trans eqns (cname, cargs) (fnames', fnss', fns) = (case AList.lookup (op =) eqns cname of NONE => (warning ("No equation for constructor " ^ quote cname ^ "\nin definition of function " ^ quote fname); - (fnameTs', fnss', (Const (@{const_name undefined}, dummyT))::fns)) + (fnames', fnss', (Const (@{const_name undefined}, dummyT))::fns)) | SOME (ls, cargs', rs, rhs, eq) => let val recs = filter (is_rec_type o snd) (cargs' ~~ cargs); val rargs = map fst recs; - val subs = map (rpair dummyT o fst) + val subs = map (rpair dummyT o fst) (rev (rename_wrt_term rhs rargs)); - val (rhs', (fnameTs'', fnss'')) = - (subst (map (fn ((x, y), z) => - (Free x, (body_index y, Free z))) - (recs ~~ subs)) rhs (fnameTs', fnss')) - handle RecError s => primrec_eq_err thy s eq - in (fnameTs'', fnss'', + val (rhs', (fnames'', fnss'')) = subst (map2 (fn (x, y) => fn z => + (Free x, (body_index y, Free z))) recs subs) rhs (fnames', fnss') + handle RecError s => primrec_eq_err lthy s eq + in (fnames'', fnss'', (list_abs_free (cargs' @ subs, rhs'))::fns) end) - in (case AList.lookup (op =) fnameTs i of + in (case AList.lookup (op =) fnames i of NONE => - if exists (equal fnameT o snd) fnameTs then + if exists (fn (_, v) => fname = v) fnames then raise RecError ("inconsistent functions for datatype " ^ quote tname) else let - val SOME (_, _, eqns as (_, (ls, _, rs, _, _)) :: _) = - AList.lookup (op =) rec_eqns fnameT; - val (fnameTs', fnss', fns) = fold_rev (trans eqns) constrs - ((i, fnameT)::fnameTs, fnss, []) + val SOME (_, _, eqns' as (_, (ls, _, rs, _, _)) :: _) = + AList.lookup (op =) eqns fname; + val (fnames', fnss', fns) = fold_rev (trans eqns') constrs + ((i, fname)::fnames, fnss, []) in - (fnameTs', (i, (fname, ls, rs, fns))::fnss') + (fnames', (i, (fname, ls, rs, fns))::fnss') end - | SOME fnameT' => - if fnameT = fnameT' then (fnameTs, fnss) + | SOME fname' => + if fname = fname' then (fnames, fnss) else raise RecError ("inconsistent functions for datatype " ^ quote tname)) end; @@ -195,18 +199,21 @@ (* make definition *) -fun make_def thy fs (fname, ls, rs, rec_name, tname) = +fun make_def ctxt fixes fs (fname, ls, rs, rec_name, tname) = let val used = map fst (fold Term.add_frees fs []); val x = (Name.variant used "x", dummyT); val frees = ls @ x :: rs; - val rhs = list_abs_free (frees, + val raw_rhs = list_abs_free (frees, list_comb (Const (rec_name, dummyT), fs @ [Free x])) - val def_name = Sign.base_name fname ^ "_" ^ Sign.base_name tname ^ "_def"; - val def_prop as _ $ _ $ t = - singleton (Syntax.check_terms (ProofContext.init thy)) - (Logic.mk_equals (Const (fname, dummyT), rhs)); - in ((def_name, def_prop), subst_bounds (rev (map Free frees), strip_abs_body t)) end; + val def_name = Thm.def_name (Sign.base_name fname); + val rhs = singleton (Syntax.check_terms ctxt) raw_rhs; + val SOME var = get_first (fn ((b, _), mx) => + if Binding.base_name b = fname then SOME (b, mx) else NONE) fixes; + in + ((var, ((Binding.name def_name, []), rhs)), + subst_bounds (rev (map Free frees), strip_abs_body rhs)) + end; (* find datatypes which contain all datatypes in tnames' *) @@ -227,27 +234,36 @@ local -fun gen_primrec_i note def alt_name invs fctxt eqns_atts thy = +fun prepare_spec prep_spec ctxt raw_fixes raw_spec = let - val (raw_eqns, atts) = split_list eqns_atts; - val eqns = map (apfst Binding.base_name) raw_eqns; - val dt_info = NominalPackage.get_nominal_datatypes thy; - val rec_eqns = fold_rev (process_eqn thy o snd) eqns []; + val ((fixes, spec), _) = prep_spec + raw_fixes (map (single o apsnd single) raw_spec) ctxt + in (fixes, map (apsnd the_single) spec) end; + +fun gen_primrec set_group prep_spec prep_term invs fctxt raw_fixes raw_params raw_spec lthy = + let + val (fixes', spec) = prepare_spec prep_spec lthy (raw_fixes @ raw_params) raw_spec; + val fixes = List.take (fixes', length raw_fixes); + val (names_atts, spec') = split_list spec; + val eqns' = map unquantify spec' + val eqns = fold_rev (process_eqn lthy (fn v => Variable.is_fixed lthy v + orelse exists (fn ((w, _), _) => v = Binding.base_name w) fixes)) spec' []; + val dt_info = NominalPackage.get_nominal_datatypes (ProofContext.theory_of lthy); val lsrs :: lsrss = maps (fn (_, (_, _, eqns)) => - map (fn (_, (ls, _, rs, _, _)) => ls @ rs) eqns) rec_eqns + map (fn (_, (ls, _, rs, _, _)) => ls @ rs) eqns) eqns val _ = (if forall (curry eq_set lsrs) lsrss andalso forall (fn (_, (_, _, (_, (ls, _, rs, _, _)) :: eqns)) => forall (fn (_, (ls', _, rs', _, _)) => ls = ls' andalso rs = rs') eqns - | _ => true) rec_eqns + | _ => true) eqns then () else primrec_err param_err); - val tnames = distinct (op =) (map (#1 o snd) rec_eqns); + val tnames = distinct (op =) (map (#1 o snd) eqns); val dts = find_dts dt_info tnames tnames; val main_fns = map (fn (tname, {index, ...}) => (index, - (fst o the o find_first (fn f => (#1 o snd) f = tname)) rec_eqns)) + (fst o the o find_first (fn (_, x) => #1 x = tname)) eqns)) dts; val {descr, rec_names, rec_rewrites, ...} = if null dts then @@ -256,32 +272,32 @@ val descr = map (fn (i, (tname, args, constrs)) => (i, (tname, args, map (fn (cname, cargs) => (cname, fold (fn (dTs, dT) => fn dTs' => dTs' @ dTs @ [dT]) cargs [])) constrs))) descr; - val (fnameTs, fnss) = - fold_rev (process_fun thy descr rec_eqns) main_fns ([], []); + val (fnames, fnss) = fold_rev (process_fun lthy descr eqns) main_fns ([], []); val (fs, defs) = fold_rev (get_fns fnss) (descr ~~ rec_names) ([], []); - val defs' = map (make_def thy fs) defs; - val nameTs1 = map snd fnameTs; - val nameTs2 = map fst rec_eqns; - val _ = if gen_eq_set (op =) (nameTs1, nameTs2) then () - else primrec_err ("functions " ^ commas_quote (map fst nameTs2) ^ - "\nare not mutually recursive"); - val primrec_name = - if alt_name = "" then (space_implode "_" (map (Sign.base_name o #1) defs)) else alt_name; - val (defs_thms', thy') = - thy - |> Sign.add_path primrec_name - |> fold_map def (map (fn ((name, t), _) => ((name, []), t)) defs'); - val cert = cterm_of thy'; + val defs' = map (make_def lthy fixes fs) defs; + val names1 = map snd fnames; + val names2 = map fst eqns; + val _ = if gen_eq_set (op =) (names1, names2) then () + else primrec_err ("functions " ^ commas_quote names2 ^ + "\nare not mutually recursive"); + val (defs_thms, lthy') = lthy |> + set_group ? LocalTheory.set_group (serial_string ()) |> + fold_map (apfst (snd o snd) oo + LocalTheory.define Thm.definitionK o fst) defs'; + val qualify = Binding.qualify + (space_implode "_" (map (Sign.base_name o #1) defs)); + val names_atts' = map (apfst qualify) names_atts; + val cert = cterm_of (ProofContext.theory_of lthy'); fun mk_idx eq = let - val Const c = head_of (fst (HOLogic.dest_eq (HOLogic.dest_Trueprop + val Free (name, _) = head_of (fst (HOLogic.dest_eq (HOLogic.dest_Trueprop (Logic.strip_imp_concl eq)))); - val SOME i = AList.lookup op = (map swap fnameTs) c; + val SOME i = AList.lookup op = (map swap fnames) name; val SOME (_, _, constrs) = AList.lookup op = descr i; - val SOME (_, _, eqns) = AList.lookup op = rec_eqns c; + val SOME (_, _, eqns'') = AList.lookup op = eqns name; val SOME (cname, (_, cargs, _, _, _)) = find_first - (fn (_, (_, _, _, _, eq')) => eq = eq') eqns + (fn (_, (_, _, _, _, eq')) => eq = eq') eqns'' in (i, find_index (fn (cname', _) => cname = cname') constrs, cargs) end; val rec_rewritess = @@ -296,19 +312,15 @@ curry (List.take o swap) (length fvars) |> map cert; val invs' = (case invs of NONE => map (fn (i, _) => - let - val SOME (_, T) = AList.lookup op = fnameTs i - val (Ts, U) = strip_type T - in - Abs ("x", List.drop (Ts, length lsrs + 1) ---> U, HOLogic.true_const) - end) descr - | SOME invs' => invs'); + Abs ("x", fastype_of (snd (nth defs' i)), HOLogic.true_const)) descr + | SOME invs' => map (prep_term lthy') invs'); val inst = (map cert fvars ~~ cfs) @ (map (cert o Var) pvars ~~ map cert invs') @ (case ctxtvars of - [ctxtvar] => [(cert (Var ctxtvar), cert (the_default HOLogic.unit fctxt))] + [ctxtvar] => [(cert (Var ctxtvar), + cert (the_default HOLogic.unit (Option.map (prep_term lthy') fctxt)))] | _ => []); - val rec_rewrites' = map (fn (_, eq) => + val rec_rewrites' = map (fn eq => let val (i, j, cargs) = mk_idx eq val th = nth (nth rec_rewritess i) j; @@ -317,8 +329,8 @@ strip_comb |> snd in (cargs, Logic.strip_imp_prems eq, Drule.cterm_instantiate (inst @ - (map (cterm_of thy') cargs' ~~ map (cterm_of thy' o Free) cargs)) th) - end) eqns; + (map cert cargs' ~~ map (cert o Free) cargs)) th) + end) eqns'; val prems = foldr1 (common_prefix op aconv) (map (prems_of o #3) rec_rewrites'); val cprems = map cert prems; @@ -346,64 +358,37 @@ val rule = implies_intr_list rule_prems (Conjunction.intr_balanced (map mk_eqn (rec_rewrites' ~~ asmss))); - val goals = map (fn ((cargs, _, _), (_, eqn)) => - (list_all_free (cargs, eqn), [])) (rec_rewrites' ~~ eqns); + val goals = map (fn ((cargs, _, _), eqn) => + (list_all_free (cargs, eqn), [])) (rec_rewrites' ~~ eqns'); in - thy' |> - ProofContext.init |> + lthy' |> + Variable.add_fixes (map fst lsrs) |> snd |> Proof.theorem_i NONE - (fn thss => ProofContext.theory (fn thy => + (fn thss => fn goal_ctxt => let - val simps = map standard (flat thss); - val (simps', thy') = - fold_map note ((map fst eqns ~~ atts) ~~ map single simps) thy; - val simps'' = maps snd simps' + val simps = ProofContext.export goal_ctxt lthy' (flat thss); + val (simps', lthy'') = fold_map (LocalTheory.note Thm.theoremK) + (names_atts' ~~ map single simps) lthy' in - thy' - |> note (("simps", [Simplifier.simp_add]), simps'') + lthy'' + |> LocalTheory.note Thm.theoremK ((qualify (Binding.name "simps"), + [Attrib.internal (K Simplifier.simp_add)]), maps snd simps') |> snd - |> Sign.parent_path - end)) + end) [goals] |> Proof.apply (Method.Basic (fn _ => Method.RAW_METHOD (fn _ => - rewrite_goals_tac (map snd defs_thms') THEN + rewrite_goals_tac defs_thms THEN compose_tac (false, rule, length rule_prems) 1), Position.none)) |> Seq.hd end; -fun gen_primrec note def alt_name invs fctxt eqns thy = - let - val ((names, strings), srcss) = apfst split_list (split_list eqns); - val atts = map (map (Attrib.attribute thy)) srcss; - val eqn_ts = map (fn s => Syntax.read_prop_global thy s - handle ERROR msg => cat_error msg ("The error(s) above occurred for " ^ s)) strings; - val rec_ts = map (fn eq => head_of (fst (HOLogic.dest_eq - (HOLogic.dest_Trueprop (Logic.strip_imp_concl eq)))) - handle TERM _ => primrec_eq_err thy "not a proper equation" eq) eqn_ts; - val (_, eqn_ts') = OldPrimrecPackage.unify_consts thy rec_ts eqn_ts - in - gen_primrec_i note def alt_name - (Option.map (map (Syntax.read_term_global thy)) invs) - (Option.map (Syntax.read_term_global thy) fctxt) - (names ~~ eqn_ts' ~~ atts) thy - end; - -fun thy_note ((name, atts), thms) = - PureThy.add_thmss [((name, thms), atts)] #-> (fn [thms] => pair (name, thms)); -fun thy_def false ((name, atts), t) = - PureThy.add_defs false [((name, t), atts)] #-> (fn [thm] => pair (name, thm)) - | thy_def true ((name, atts), t) = - PureThy.add_defs_unchecked false [((name, t), atts)] #-> (fn [thm] => pair (name, thm)); - in -val add_primrec = gen_primrec thy_note (thy_def false); -val add_primrec_unchecked = gen_primrec thy_note (thy_def true); -val add_primrec_i = gen_primrec_i thy_note (thy_def false); -val add_primrec_unchecked_i = gen_primrec_i thy_note (thy_def true); +val add_primrec = gen_primrec false Specification.check_specification (K I); +val add_primrec_cmd = gen_primrec true Specification.read_specification Syntax.read_term; -end; (*local*) +end; (* outer syntax *) @@ -419,25 +404,26 @@ val parser2 = (invariant -- P.$$$ ":") |-- (Scan.repeat1 (unless_flag P.term) >> SOME) -- Scan.optional parser1 NONE || (parser1 >> pair NONE); -val parser3 = - unless_flag P.name -- Scan.optional parser2 (NONE, NONE) || - (parser2 >> pair ""); -val parser4 = - (P.$$$ "unchecked" >> K true) -- Scan.optional parser3 ("", (NONE, NONE)) || - (parser3 >> pair false); val options = Scan.optional (P.$$$ "(" |-- P.!!! - (parser4 --| P.$$$ ")")) (false, ("", (NONE, NONE))); + (parser2 --| P.$$$ ")")) (NONE, NONE); -val primrec_decl = - options -- Scan.repeat1 (SpecParse.opt_thm_name ":" -- P.prop); +fun pipe_error t = P.!!! (Scan.fail_with (K + (cat_lines ["Equations must be separated by " ^ quote "|", quote t]))); + +val statement = SpecParse.opt_thm_name ":" -- P.prop --| Scan.ahead + ((P.term :-- pipe_error) || Scan.succeed ("","")); + +val statements = P.enum1 "|" statement; + +val primrec_decl = P.opt_target -- options -- + P.fixes -- P.for_fixes --| P.$$$ "where" -- statements; val _ = OuterSyntax.command "nominal_primrec" "define primitive recursive functions on nominal datatypes" K.thy_goal - (primrec_decl >> (fn ((unchecked, (alt_name, (invs, fctxt))), eqns) => - Toplevel.print o Toplevel.theory_to_proof - ((if unchecked then add_primrec_unchecked else add_primrec) alt_name invs fctxt - (map P.triple_swap eqns)))); + (primrec_decl >> (fn ((((opt_target, (invs, fctxt)), raw_fixes), raw_params), raw_spec) => + Toplevel.print o Toplevel.local_theory_to_proof opt_target + (add_primrec_cmd invs fctxt raw_fixes raw_params raw_spec))); end; diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/PReal.thy --- a/src/HOL/PReal.thy Tue Dec 30 08:18:54 2008 +0100 +++ b/src/HOL/PReal.thy Tue Dec 30 11:10:01 2008 +0100 @@ -9,7 +9,7 @@ header {* Positive real numbers *} theory PReal -imports Rational "~~/src/HOL/Library/Dense_Linear_Order" +imports Rational Dense_Linear_Order begin text{*Could be generalized and moved to @{text Ring_and_Field}*} diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/Real.thy --- a/src/HOL/Real.thy Tue Dec 30 08:18:54 2008 +0100 +++ b/src/HOL/Real.thy Tue Dec 30 11:10:01 2008 +0100 @@ -1,5 +1,5 @@ theory Real -imports "~~/src/HOL/Real/RealVector" +imports RComplete RealVector begin end diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/Real/HahnBanach/Bounds.thy --- a/src/HOL/Real/HahnBanach/Bounds.thy Tue Dec 30 08:18:54 2008 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,82 +0,0 @@ -(* Title: HOL/Real/HahnBanach/Bounds.thy - Author: Gertrud Bauer, TU Munich -*) - -header {* Bounds *} - -theory Bounds -imports Main ContNotDenum -begin - -locale lub = - fixes A and x - assumes least [intro?]: "(\a. a \ A \ a \ b) \ x \ b" - and upper [intro?]: "a \ A \ a \ x" - -lemmas [elim?] = lub.least lub.upper - -definition - the_lub :: "'a::order set \ 'a" where - "the_lub A = The (lub A)" - -notation (xsymbols) - the_lub ("\_" [90] 90) - -lemma the_lub_equality [elim?]: - assumes "lub A x" - shows "\A = (x::'a::order)" -proof - - interpret lub A x by fact - show ?thesis - proof (unfold the_lub_def) - from `lub A x` show "The (lub A) = x" - proof - fix x' assume lub': "lub A x'" - show "x' = x" - proof (rule order_antisym) - from lub' show "x' \ x" - proof - fix a assume "a \ A" - then show "a \ x" .. - qed - show "x \ x'" - proof - fix a assume "a \ A" - with lub' show "a \ x'" .. - qed - qed - qed - qed -qed - -lemma the_lubI_ex: - assumes ex: "\x. lub A x" - shows "lub A (\A)" -proof - - from ex obtain x where x: "lub A x" .. - also from x have [symmetric]: "\A = x" .. - finally show ?thesis . -qed - -lemma lub_compat: "lub A x = isLub UNIV A x" -proof - - have "isUb UNIV A = (\x. A *<= x \ x \ UNIV)" - by (rule ext) (simp only: isUb_def) - then show ?thesis - by (simp only: lub_def isLub_def leastP_def setge_def setle_def) blast -qed - -lemma real_complete: - fixes A :: "real set" - assumes nonempty: "\a. a \ A" - and ex_upper: "\y. \a \ A. a \ y" - shows "\x. lub A x" -proof - - from ex_upper have "\y. isUb UNIV A y" - unfolding isUb_def setle_def by blast - with nonempty have "\x. isLub UNIV A x" - by (rule reals_complete) - then show ?thesis by (simp only: lub_compat) -qed - -end diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/Real/HahnBanach/FunctionNorm.thy --- a/src/HOL/Real/HahnBanach/FunctionNorm.thy Tue Dec 30 08:18:54 2008 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,278 +0,0 @@ -(* Title: HOL/Real/HahnBanach/FunctionNorm.thy - Author: Gertrud Bauer, TU Munich -*) - -header {* The norm of a function *} - -theory FunctionNorm -imports NormedSpace FunctionOrder -begin - -subsection {* Continuous linear forms*} - -text {* - A linear form @{text f} on a normed vector space @{text "(V, \\\)"} - is \emph{continuous}, iff it is bounded, i.e. - \begin{center} - @{text "\c \ R. \x \ V. \f x\ \ c \ \x\"} - \end{center} - In our application no other functions than linear forms are - considered, so we can define continuous linear forms as bounded - linear forms: -*} - -locale continuous = var_V + norm_syntax + linearform + - assumes bounded: "\c. \x \ V. \f x\ \ c * \x\" - -declare continuous.intro [intro?] continuous_axioms.intro [intro?] - -lemma continuousI [intro]: - fixes norm :: "_ \ real" ("\_\") - assumes "linearform V f" - assumes r: "\x. x \ V \ \f x\ \ c * \x\" - shows "continuous V norm f" -proof - show "linearform V f" by fact - from r have "\c. \x\V. \f x\ \ c * \x\" by blast - then show "continuous_axioms V norm f" .. -qed - - -subsection {* The norm of a linear form *} - -text {* - The least real number @{text c} for which holds - \begin{center} - @{text "\x \ V. \f x\ \ c \ \x\"} - \end{center} - is called the \emph{norm} of @{text f}. - - For non-trivial vector spaces @{text "V \ {0}"} the norm can be - defined as - \begin{center} - @{text "\f\ = \x \ 0. \f x\ / \x\"} - \end{center} - - For the case @{text "V = {0}"} the supremum would be taken from an - empty set. Since @{text \} is unbounded, there would be no supremum. - To avoid this situation it must be guaranteed that there is an - element in this set. This element must be @{text "{} \ 0"} so that - @{text fn_norm} has the norm properties. Furthermore it does not - have to change the norm in all other cases, so it must be @{text 0}, - as all other elements are @{text "{} \ 0"}. - - Thus we define the set @{text B} where the supremum is taken from as - follows: - \begin{center} - @{text "{0} \ {\f x\ / \x\. x \ 0 \ x \ F}"} - \end{center} - - @{text fn_norm} is equal to the supremum of @{text B}, if the - supremum exists (otherwise it is undefined). -*} - -locale fn_norm = norm_syntax + - fixes B defines "B V f \ {0} \ {\f x\ / \x\ | x. x \ 0 \ x \ V}" - fixes fn_norm ("\_\\_" [0, 1000] 999) - defines "\f\\V \ \(B V f)" - -locale normed_vectorspace_with_fn_norm = normed_vectorspace + fn_norm - -lemma (in fn_norm) B_not_empty [intro]: "0 \ B V f" - by (simp add: B_def) - -text {* - The following lemma states that every continuous linear form on a - normed space @{text "(V, \\\)"} has a function norm. -*} - -lemma (in normed_vectorspace_with_fn_norm) fn_norm_works: - assumes "continuous V norm f" - shows "lub (B V f) (\f\\V)" -proof - - interpret continuous V norm f by fact - txt {* The existence of the supremum is shown using the - completeness of the reals. Completeness means, that every - non-empty bounded set of reals has a supremum. *} - have "\a. lub (B V f) a" - proof (rule real_complete) - txt {* First we have to show that @{text B} is non-empty: *} - have "0 \ B V f" .. - then show "\x. x \ B V f" .. - - txt {* Then we have to show that @{text B} is bounded: *} - show "\c. \y \ B V f. y \ c" - proof - - txt {* We know that @{text f} is bounded by some value @{text c}. *} - from bounded obtain c where c: "\x \ V. \f x\ \ c * \x\" .. - - txt {* To prove the thesis, we have to show that there is some - @{text b}, such that @{text "y \ b"} for all @{text "y \ - B"}. Due to the definition of @{text B} there are two cases. *} - - def b \ "max c 0" - have "\y \ B V f. y \ b" - proof - fix y assume y: "y \ B V f" - show "y \ b" - proof cases - assume "y = 0" - then show ?thesis unfolding b_def by arith - next - txt {* The second case is @{text "y = \f x\ / \x\"} for some - @{text "x \ V"} with @{text "x \ 0"}. *} - assume "y \ 0" - with y obtain x where y_rep: "y = \f x\ * inverse \x\" - and x: "x \ V" and neq: "x \ 0" - by (auto simp add: B_def real_divide_def) - from x neq have gt: "0 < \x\" .. - - txt {* The thesis follows by a short calculation using the - fact that @{text f} is bounded. *} - - note y_rep - also have "\f x\ * inverse \x\ \ (c * \x\) * inverse \x\" - proof (rule mult_right_mono) - from c x show "\f x\ \ c * \x\" .. - from gt have "0 < inverse \x\" - by (rule positive_imp_inverse_positive) - then show "0 \ inverse \x\" by (rule order_less_imp_le) - qed - also have "\ = c * (\x\ * inverse \x\)" - by (rule real_mult_assoc) - also - from gt have "\x\ \ 0" by simp - then have "\x\ * inverse \x\ = 1" by simp - also have "c * 1 \ b" by (simp add: b_def le_maxI1) - finally show "y \ b" . - qed - qed - then show ?thesis .. - qed - qed - then show ?thesis unfolding fn_norm_def by (rule the_lubI_ex) -qed - -lemma (in normed_vectorspace_with_fn_norm) fn_norm_ub [iff?]: - assumes "continuous V norm f" - assumes b: "b \ B V f" - shows "b \ \f\\V" -proof - - interpret continuous V norm f by fact - have "lub (B V f) (\f\\V)" - using `continuous V norm f` by (rule fn_norm_works) - from this and b show ?thesis .. -qed - -lemma (in normed_vectorspace_with_fn_norm) fn_norm_leastB: - assumes "continuous V norm f" - assumes b: "\b. b \ B V f \ b \ y" - shows "\f\\V \ y" -proof - - interpret continuous V norm f by fact - have "lub (B V f) (\f\\V)" - using `continuous V norm f` by (rule fn_norm_works) - from this and b show ?thesis .. -qed - -text {* The norm of a continuous function is always @{text "\ 0"}. *} - -lemma (in normed_vectorspace_with_fn_norm) fn_norm_ge_zero [iff]: - assumes "continuous V norm f" - shows "0 \ \f\\V" -proof - - interpret continuous V norm f by fact - txt {* The function norm is defined as the supremum of @{text B}. - So it is @{text "\ 0"} if all elements in @{text B} are @{text "\ - 0"}, provided the supremum exists and @{text B} is not empty. *} - have "lub (B V f) (\f\\V)" - using `continuous V norm f` by (rule fn_norm_works) - moreover have "0 \ B V f" .. - ultimately show ?thesis .. -qed - -text {* - \medskip The fundamental property of function norms is: - \begin{center} - @{text "\f x\ \ \f\ \ \x\"} - \end{center} -*} - -lemma (in normed_vectorspace_with_fn_norm) fn_norm_le_cong: - assumes "continuous V norm f" "linearform V f" - assumes x: "x \ V" - shows "\f x\ \ \f\\V * \x\" -proof - - interpret continuous V norm f by fact - interpret linearform V f . - show ?thesis - proof cases - assume "x = 0" - then have "\f x\ = \f 0\" by simp - also have "f 0 = 0" by rule unfold_locales - also have "\\\ = 0" by simp - also have a: "0 \ \f\\V" - using `continuous V norm f` by (rule fn_norm_ge_zero) - from x have "0 \ norm x" .. - with a have "0 \ \f\\V * \x\" by (simp add: zero_le_mult_iff) - finally show "\f x\ \ \f\\V * \x\" . - next - assume "x \ 0" - with x have neq: "\x\ \ 0" by simp - then have "\f x\ = (\f x\ * inverse \x\) * \x\" by simp - also have "\ \ \f\\V * \x\" - proof (rule mult_right_mono) - from x show "0 \ \x\" .. - from x and neq have "\f x\ * inverse \x\ \ B V f" - by (auto simp add: B_def real_divide_def) - with `continuous V norm f` show "\f x\ * inverse \x\ \ \f\\V" - by (rule fn_norm_ub) - qed - finally show ?thesis . - qed -qed - -text {* - \medskip The function norm is the least positive real number for - which the following inequation holds: - \begin{center} - @{text "\f x\ \ c \ \x\"} - \end{center} -*} - -lemma (in normed_vectorspace_with_fn_norm) fn_norm_least [intro?]: - assumes "continuous V norm f" - assumes ineq: "\x \ V. \f x\ \ c * \x\" and ge: "0 \ c" - shows "\f\\V \ c" -proof - - interpret continuous V norm f by fact - show ?thesis - proof (rule fn_norm_leastB [folded B_def fn_norm_def]) - fix b assume b: "b \ B V f" - show "b \ c" - proof cases - assume "b = 0" - with ge show ?thesis by simp - next - assume "b \ 0" - with b obtain x where b_rep: "b = \f x\ * inverse \x\" - and x_neq: "x \ 0" and x: "x \ V" - by (auto simp add: B_def real_divide_def) - note b_rep - also have "\f x\ * inverse \x\ \ (c * \x\) * inverse \x\" - proof (rule mult_right_mono) - have "0 < \x\" using x x_neq .. - then show "0 \ inverse \x\" by simp - from ineq and x show "\f x\ \ c * \x\" .. - qed - also have "\ = c" - proof - - from x_neq and x have "\x\ \ 0" by simp - then show ?thesis by simp - qed - finally show ?thesis . - qed - qed (insert `continuous V norm f`, simp_all add: continuous_def) -qed - -end diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/Real/HahnBanach/FunctionOrder.thy --- a/src/HOL/Real/HahnBanach/FunctionOrder.thy Tue Dec 30 08:18:54 2008 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,142 +0,0 @@ -(* Title: HOL/Real/HahnBanach/FunctionOrder.thy - ID: $Id$ - Author: Gertrud Bauer, TU Munich -*) - -header {* An order on functions *} - -theory FunctionOrder -imports Subspace Linearform -begin - -subsection {* The graph of a function *} - -text {* - We define the \emph{graph} of a (real) function @{text f} with - domain @{text F} as the set - \begin{center} - @{text "{(x, f x). x \ F}"} - \end{center} - So we are modeling partial functions by specifying the domain and - the mapping function. We use the term ``function'' also for its - graph. -*} - -types 'a graph = "('a \ real) set" - -definition - graph :: "'a set \ ('a \ real) \ 'a graph" where - "graph F f = {(x, f x) | x. x \ F}" - -lemma graphI [intro]: "x \ F \ (x, f x) \ graph F f" - unfolding graph_def by blast - -lemma graphI2 [intro?]: "x \ F \ \t \ graph F f. t = (x, f x)" - unfolding graph_def by blast - -lemma graphE [elim?]: - "(x, y) \ graph F f \ (x \ F \ y = f x \ C) \ C" - unfolding graph_def by blast - - -subsection {* Functions ordered by domain extension *} - -text {* - A function @{text h'} is an extension of @{text h}, iff the graph of - @{text h} is a subset of the graph of @{text h'}. -*} - -lemma graph_extI: - "(\x. x \ H \ h x = h' x) \ H \ H' - \ graph H h \ graph H' h'" - unfolding graph_def by blast - -lemma graph_extD1 [dest?]: - "graph H h \ graph H' h' \ x \ H \ h x = h' x" - unfolding graph_def by blast - -lemma graph_extD2 [dest?]: - "graph H h \ graph H' h' \ H \ H'" - unfolding graph_def by blast - - -subsection {* Domain and function of a graph *} - -text {* - The inverse functions to @{text graph} are @{text domain} and @{text - funct}. -*} - -definition - "domain" :: "'a graph \ 'a set" where - "domain g = {x. \y. (x, y) \ g}" - -definition - funct :: "'a graph \ ('a \ real)" where - "funct g = (\x. (SOME y. (x, y) \ g))" - -text {* - The following lemma states that @{text g} is the graph of a function - if the relation induced by @{text g} is unique. -*} - -lemma graph_domain_funct: - assumes uniq: "\x y z. (x, y) \ g \ (x, z) \ g \ z = y" - shows "graph (domain g) (funct g) = g" - unfolding domain_def funct_def graph_def -proof auto (* FIXME !? *) - fix a b assume g: "(a, b) \ g" - from g show "(a, SOME y. (a, y) \ g) \ g" by (rule someI2) - from g show "\y. (a, y) \ g" .. - from g show "b = (SOME y. (a, y) \ g)" - proof (rule some_equality [symmetric]) - fix y assume "(a, y) \ g" - with g show "y = b" by (rule uniq) - qed -qed - - -subsection {* Norm-preserving extensions of a function *} - -text {* - Given a linear form @{text f} on the space @{text F} and a seminorm - @{text p} on @{text E}. The set of all linear extensions of @{text - f}, to superspaces @{text H} of @{text F}, which are bounded by - @{text p}, is defined as follows. -*} - -definition - norm_pres_extensions :: - "'a::{plus, minus, uminus, zero} set \ ('a \ real) \ 'a set \ ('a \ real) - \ 'a graph set" where - "norm_pres_extensions E p F f - = {g. \H h. g = graph H h - \ linearform H h - \ H \ E - \ F \ H - \ graph F f \ graph H h - \ (\x \ H. h x \ p x)}" - -lemma norm_pres_extensionE [elim]: - "g \ norm_pres_extensions E p F f - \ (\H h. g = graph H h \ linearform H h - \ H \ E \ F \ H \ graph F f \ graph H h - \ \x \ H. h x \ p x \ C) \ C" - unfolding norm_pres_extensions_def by blast - -lemma norm_pres_extensionI2 [intro]: - "linearform H h \ H \ E \ F \ H - \ graph F f \ graph H h \ \x \ H. h x \ p x - \ graph H h \ norm_pres_extensions E p F f" - unfolding norm_pres_extensions_def by blast - -lemma norm_pres_extensionI: (* FIXME ? *) - "\H h. g = graph H h - \ linearform H h - \ H \ E - \ F \ H - \ graph F f \ graph H h - \ (\x \ H. h x \ p x) \ g \ norm_pres_extensions E p F f" - unfolding norm_pres_extensions_def by blast - -end diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/Real/HahnBanach/HahnBanach.thy --- a/src/HOL/Real/HahnBanach/HahnBanach.thy Tue Dec 30 08:18:54 2008 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,509 +0,0 @@ -(* Title: HOL/Real/HahnBanach/HahnBanach.thy - Author: Gertrud Bauer, TU Munich -*) - -header {* The Hahn-Banach Theorem *} - -theory HahnBanach -imports HahnBanachLemmas -begin - -text {* - We present the proof of two different versions of the Hahn-Banach - Theorem, closely following \cite[\S36]{Heuser:1986}. -*} - -subsection {* The Hahn-Banach Theorem for vector spaces *} - -text {* - \textbf{Hahn-Banach Theorem.} Let @{text F} be a subspace of a real - vector space @{text E}, let @{text p} be a semi-norm on @{text E}, - and @{text f} be a linear form defined on @{text F} such that @{text - f} is bounded by @{text p}, i.e. @{text "\x \ F. f x \ p x"}. Then - @{text f} can be extended to a linear form @{text h} on @{text E} - such that @{text h} is norm-preserving, i.e. @{text h} is also - bounded by @{text p}. - - \bigskip - \textbf{Proof Sketch.} - \begin{enumerate} - - \item Define @{text M} as the set of norm-preserving extensions of - @{text f} to subspaces of @{text E}. The linear forms in @{text M} - are ordered by domain extension. - - \item We show that every non-empty chain in @{text M} has an upper - bound in @{text M}. - - \item With Zorn's Lemma we conclude that there is a maximal function - @{text g} in @{text M}. - - \item The domain @{text H} of @{text g} is the whole space @{text - E}, as shown by classical contradiction: - - \begin{itemize} - - \item Assuming @{text g} is not defined on whole @{text E}, it can - still be extended in a norm-preserving way to a super-space @{text - H'} of @{text H}. - - \item Thus @{text g} can not be maximal. Contradiction! - - \end{itemize} - \end{enumerate} -*} - -theorem HahnBanach: - assumes E: "vectorspace E" and "subspace F E" - and "seminorm E p" and "linearform F f" - assumes fp: "\x \ F. f x \ p x" - shows "\h. linearform E h \ (\x \ F. h x = f x) \ (\x \ E. h x \ p x)" - -- {* Let @{text E} be a vector space, @{text F} a subspace of @{text E}, @{text p} a seminorm on @{text E}, *} - -- {* and @{text f} a linear form on @{text F} such that @{text f} is bounded by @{text p}, *} - -- {* then @{text f} can be extended to a linear form @{text h} on @{text E} in a norm-preserving way. \skp *} -proof - - interpret vectorspace E by fact - interpret subspace F E by fact - interpret seminorm E p by fact - interpret linearform F f by fact - def M \ "norm_pres_extensions E p F f" - then have M: "M = \" by (simp only:) - from E have F: "vectorspace F" .. - note FE = `F \ E` - { - fix c assume cM: "c \ chain M" and ex: "\x. x \ c" - have "\c \ M" - -- {* Show that every non-empty chain @{text c} of @{text M} has an upper bound in @{text M}: *} - -- {* @{text "\c"} is greater than any element of the chain @{text c}, so it suffices to show @{text "\c \ M"}. *} - unfolding M_def - proof (rule norm_pres_extensionI) - let ?H = "domain (\c)" - let ?h = "funct (\c)" - - have a: "graph ?H ?h = \c" - proof (rule graph_domain_funct) - fix x y z assume "(x, y) \ \c" and "(x, z) \ \c" - with M_def cM show "z = y" by (rule sup_definite) - qed - moreover from M cM a have "linearform ?H ?h" - by (rule sup_lf) - moreover from a M cM ex FE E have "?H \ E" - by (rule sup_subE) - moreover from a M cM ex FE have "F \ ?H" - by (rule sup_supF) - moreover from a M cM ex have "graph F f \ graph ?H ?h" - by (rule sup_ext) - moreover from a M cM have "\x \ ?H. ?h x \ p x" - by (rule sup_norm_pres) - ultimately show "\H h. \c = graph H h - \ linearform H h - \ H \ E - \ F \ H - \ graph F f \ graph H h - \ (\x \ H. h x \ p x)" by blast - qed - } - then have "\g \ M. \x \ M. g \ x \ g = x" - -- {* With Zorn's Lemma we can conclude that there is a maximal element in @{text M}. \skp *} - proof (rule Zorn's_Lemma) - -- {* We show that @{text M} is non-empty: *} - show "graph F f \ M" - unfolding M_def - proof (rule norm_pres_extensionI2) - show "linearform F f" by fact - show "F \ E" by fact - from F show "F \ F" by (rule vectorspace.subspace_refl) - show "graph F f \ graph F f" .. - show "\x\F. f x \ p x" by fact - qed - qed - then obtain g where gM: "g \ M" and gx: "\x \ M. g \ x \ g = x" - by blast - from gM obtain H h where - g_rep: "g = graph H h" - and linearform: "linearform H h" - and HE: "H \ E" and FH: "F \ H" - and graphs: "graph F f \ graph H h" - and hp: "\x \ H. h x \ p x" unfolding M_def .. - -- {* @{text g} is a norm-preserving extension of @{text f}, in other words: *} - -- {* @{text g} is the graph of some linear form @{text h} defined on a subspace @{text H} of @{text E}, *} - -- {* and @{text h} is an extension of @{text f} that is again bounded by @{text p}. \skp *} - from HE E have H: "vectorspace H" - by (rule subspace.vectorspace) - - have HE_eq: "H = E" - -- {* We show that @{text h} is defined on whole @{text E} by classical contradiction. \skp *} - proof (rule classical) - assume neq: "H \ E" - -- {* Assume @{text h} is not defined on whole @{text E}. Then show that @{text h} can be extended *} - -- {* in a norm-preserving way to a function @{text h'} with the graph @{text g'}. \skp *} - have "\g' \ M. g \ g' \ g \ g'" - proof - - from HE have "H \ E" .. - with neq obtain x' where x'E: "x' \ E" and "x' \ H" by blast - obtain x': "x' \ 0" - proof - show "x' \ 0" - proof - assume "x' = 0" - with H have "x' \ H" by (simp only: vectorspace.zero) - with `x' \ H` show False by contradiction - qed - qed - - def H' \ "H + lin x'" - -- {* Define @{text H'} as the direct sum of @{text H} and the linear closure of @{text x'}. \skp *} - have HH': "H \ H'" - proof (unfold H'_def) - from x'E have "vectorspace (lin x')" .. - with H show "H \ H + lin x'" .. - qed - - obtain xi where - xi: "\y \ H. - p (y + x') - h y \ xi - \ xi \ p (y + x') - h y" - -- {* Pick a real number @{text \} that fulfills certain inequations; this will *} - -- {* be used to establish that @{text h'} is a norm-preserving extension of @{text h}. - \label{ex-xi-use}\skp *} - proof - - from H have "\xi. \y \ H. - p (y + x') - h y \ xi - \ xi \ p (y + x') - h y" - proof (rule ex_xi) - fix u v assume u: "u \ H" and v: "v \ H" - with HE have uE: "u \ E" and vE: "v \ E" by auto - from H u v linearform have "h v - h u = h (v - u)" - by (simp add: linearform.diff) - also from hp and H u v have "\ \ p (v - u)" - by (simp only: vectorspace.diff_closed) - also from x'E uE vE have "v - u = x' + - x' + v + - u" - by (simp add: diff_eq1) - also from x'E uE vE have "\ = v + x' + - (u + x')" - by (simp add: add_ac) - also from x'E uE vE have "\ = (v + x') - (u + x')" - by (simp add: diff_eq1) - also from x'E uE vE E have "p \ \ p (v + x') + p (u + x')" - by (simp add: diff_subadditive) - finally have "h v - h u \ p (v + x') + p (u + x')" . - then show "- p (u + x') - h u \ p (v + x') - h v" by simp - qed - then show thesis by (blast intro: that) - qed - - def h' \ "\x. let (y, a) = - SOME (y, a). x = y + a \ x' \ y \ H in h y + a * xi" - -- {* Define the extension @{text h'} of @{text h} to @{text H'} using @{text \}. \skp *} - - have "g \ graph H' h' \ g \ graph H' h'" - -- {* @{text h'} is an extension of @{text h} \dots \skp *} - proof - show "g \ graph H' h'" - proof - - have "graph H h \ graph H' h'" - proof (rule graph_extI) - fix t assume t: "t \ H" - from E HE t have "(SOME (y, a). t = y + a \ x' \ y \ H) = (t, 0)" - using `x' \ H` `x' \ E` `x' \ 0` by (rule decomp_H'_H) - with h'_def show "h t = h' t" by (simp add: Let_def) - next - from HH' show "H \ H'" .. - qed - with g_rep show ?thesis by (simp only:) - qed - - show "g \ graph H' h'" - proof - - have "graph H h \ graph H' h'" - proof - assume eq: "graph H h = graph H' h'" - have "x' \ H'" - unfolding H'_def - proof - from H show "0 \ H" by (rule vectorspace.zero) - from x'E show "x' \ lin x'" by (rule x_lin_x) - from x'E show "x' = 0 + x'" by simp - qed - then have "(x', h' x') \ graph H' h'" .. - with eq have "(x', h' x') \ graph H h" by (simp only:) - then have "x' \ H" .. - with `x' \ H` show False by contradiction - qed - with g_rep show ?thesis by simp - qed - qed - moreover have "graph H' h' \ M" - -- {* and @{text h'} is norm-preserving. \skp *} - proof (unfold M_def) - show "graph H' h' \ norm_pres_extensions E p F f" - proof (rule norm_pres_extensionI2) - show "linearform H' h'" - using h'_def H'_def HE linearform `x' \ H` `x' \ E` `x' \ 0` E - by (rule h'_lf) - show "H' \ E" - unfolding H'_def - proof - show "H \ E" by fact - show "vectorspace E" by fact - from x'E show "lin x' \ E" .. - qed - from H `F \ H` HH' show FH': "F \ H'" - by (rule vectorspace.subspace_trans) - show "graph F f \ graph H' h'" - proof (rule graph_extI) - fix x assume x: "x \ F" - with graphs have "f x = h x" .. - also have "\ = h x + 0 * xi" by simp - also have "\ = (let (y, a) = (x, 0) in h y + a * xi)" - by (simp add: Let_def) - also have "(x, 0) = - (SOME (y, a). x = y + a \ x' \ y \ H)" - using E HE - proof (rule decomp_H'_H [symmetric]) - from FH x show "x \ H" .. - from x' show "x' \ 0" . - show "x' \ H" by fact - show "x' \ E" by fact - qed - also have - "(let (y, a) = (SOME (y, a). x = y + a \ x' \ y \ H) - in h y + a * xi) = h' x" by (simp only: h'_def) - finally show "f x = h' x" . - next - from FH' show "F \ H'" .. - qed - show "\x \ H'. h' x \ p x" - using h'_def H'_def `x' \ H` `x' \ E` `x' \ 0` E HE - `seminorm E p` linearform and hp xi - by (rule h'_norm_pres) - qed - qed - ultimately show ?thesis .. - qed - then have "\ (\x \ M. g \ x \ g = x)" by simp - -- {* So the graph @{text g} of @{text h} cannot be maximal. Contradiction! \skp *} - with gx show "H = E" by contradiction - qed - - from HE_eq and linearform have "linearform E h" - by (simp only:) - moreover have "\x \ F. h x = f x" - proof - fix x assume "x \ F" - with graphs have "f x = h x" .. - then show "h x = f x" .. - qed - moreover from HE_eq and hp have "\x \ E. h x \ p x" - by (simp only:) - ultimately show ?thesis by blast -qed - - -subsection {* Alternative formulation *} - -text {* - The following alternative formulation of the Hahn-Banach - Theorem\label{abs-HahnBanach} uses the fact that for a real linear - form @{text f} and a seminorm @{text p} the following inequations - are equivalent:\footnote{This was shown in lemma @{thm [source] - abs_ineq_iff} (see page \pageref{abs-ineq-iff}).} - \begin{center} - \begin{tabular}{lll} - @{text "\x \ H. \h x\ \ p x"} & and & - @{text "\x \ H. h x \ p x"} \\ - \end{tabular} - \end{center} -*} - -theorem abs_HahnBanach: - assumes E: "vectorspace E" and FE: "subspace F E" - and lf: "linearform F f" and sn: "seminorm E p" - assumes fp: "\x \ F. \f x\ \ p x" - shows "\g. linearform E g - \ (\x \ F. g x = f x) - \ (\x \ E. \g x\ \ p x)" -proof - - interpret vectorspace E by fact - interpret subspace F E by fact - interpret linearform F f by fact - interpret seminorm E p by fact - have "\g. linearform E g \ (\x \ F. g x = f x) \ (\x \ E. g x \ p x)" - using E FE sn lf - proof (rule HahnBanach) - show "\x \ F. f x \ p x" - using FE E sn lf and fp by (rule abs_ineq_iff [THEN iffD1]) - qed - then obtain g where lg: "linearform E g" and *: "\x \ F. g x = f x" - and **: "\x \ E. g x \ p x" by blast - have "\x \ E. \g x\ \ p x" - using _ E sn lg ** - proof (rule abs_ineq_iff [THEN iffD2]) - show "E \ E" .. - qed - with lg * show ?thesis by blast -qed - - -subsection {* The Hahn-Banach Theorem for normed spaces *} - -text {* - Every continuous linear form @{text f} on a subspace @{text F} of a - norm space @{text E}, can be extended to a continuous linear form - @{text g} on @{text E} such that @{text "\f\ = \g\"}. -*} - -theorem norm_HahnBanach: - fixes V and norm ("\_\") - fixes B defines "\V f. B V f \ {0} \ {\f x\ / \x\ | x. x \ 0 \ x \ V}" - fixes fn_norm ("\_\\_" [0, 1000] 999) - defines "\V f. \f\\V \ \(B V f)" - assumes E_norm: "normed_vectorspace E norm" and FE: "subspace F E" - and linearform: "linearform F f" and "continuous F norm f" - shows "\g. linearform E g - \ continuous E norm g - \ (\x \ F. g x = f x) - \ \g\\E = \f\\F" -proof - - interpret normed_vectorspace E norm by fact - interpret normed_vectorspace_with_fn_norm E norm B fn_norm - by (auto simp: B_def fn_norm_def) intro_locales - interpret subspace F E by fact - interpret linearform F f by fact - interpret continuous F norm f by fact - have E: "vectorspace E" by intro_locales - have F: "vectorspace F" by rule intro_locales - have F_norm: "normed_vectorspace F norm" - using FE E_norm by (rule subspace_normed_vs) - have ge_zero: "0 \ \f\\F" - by (rule normed_vectorspace_with_fn_norm.fn_norm_ge_zero - [OF normed_vectorspace_with_fn_norm.intro, - OF F_norm `continuous F norm f` , folded B_def fn_norm_def]) - txt {* We define a function @{text p} on @{text E} as follows: - @{text "p x = \f\ \ \x\"} *} - def p \ "\x. \f\\F * \x\" - - txt {* @{text p} is a seminorm on @{text E}: *} - have q: "seminorm E p" - proof - fix x y a assume x: "x \ E" and y: "y \ E" - - txt {* @{text p} is positive definite: *} - have "0 \ \f\\F" by (rule ge_zero) - moreover from x have "0 \ \x\" .. - ultimately show "0 \ p x" - by (simp add: p_def zero_le_mult_iff) - - txt {* @{text p} is absolutely homogenous: *} - - show "p (a \ x) = \a\ * p x" - proof - - have "p (a \ x) = \f\\F * \a \ x\" by (simp only: p_def) - also from x have "\a \ x\ = \a\ * \x\" by (rule abs_homogenous) - also have "\f\\F * (\a\ * \x\) = \a\ * (\f\\F * \x\)" by simp - also have "\ = \a\ * p x" by (simp only: p_def) - finally show ?thesis . - qed - - txt {* Furthermore, @{text p} is subadditive: *} - - show "p (x + y) \ p x + p y" - proof - - have "p (x + y) = \f\\F * \x + y\" by (simp only: p_def) - also have a: "0 \ \f\\F" by (rule ge_zero) - from x y have "\x + y\ \ \x\ + \y\" .. - with a have " \f\\F * \x + y\ \ \f\\F * (\x\ + \y\)" - by (simp add: mult_left_mono) - also have "\ = \f\\F * \x\ + \f\\F * \y\" by (simp only: right_distrib) - also have "\ = p x + p y" by (simp only: p_def) - finally show ?thesis . - qed - qed - - txt {* @{text f} is bounded by @{text p}. *} - - have "\x \ F. \f x\ \ p x" - proof - fix x assume "x \ F" - with `continuous F norm f` and linearform - show "\f x\ \ p x" - unfolding p_def by (rule normed_vectorspace_with_fn_norm.fn_norm_le_cong - [OF normed_vectorspace_with_fn_norm.intro, - OF F_norm, folded B_def fn_norm_def]) - qed - - txt {* Using the fact that @{text p} is a seminorm and @{text f} is bounded - by @{text p} we can apply the Hahn-Banach Theorem for real vector - spaces. So @{text f} can be extended in a norm-preserving way to - some function @{text g} on the whole vector space @{text E}. *} - - with E FE linearform q obtain g where - linearformE: "linearform E g" - and a: "\x \ F. g x = f x" - and b: "\x \ E. \g x\ \ p x" - by (rule abs_HahnBanach [elim_format]) iprover - - txt {* We furthermore have to show that @{text g} is also continuous: *} - - have g_cont: "continuous E norm g" using linearformE - proof - fix x assume "x \ E" - with b show "\g x\ \ \f\\F * \x\" - by (simp only: p_def) - qed - - txt {* To complete the proof, we show that @{text "\g\ = \f\"}. *} - - have "\g\\E = \f\\F" - proof (rule order_antisym) - txt {* - First we show @{text "\g\ \ \f\"}. The function norm @{text - "\g\"} is defined as the smallest @{text "c \ \"} such that - \begin{center} - \begin{tabular}{l} - @{text "\x \ E. \g x\ \ c \ \x\"} - \end{tabular} - \end{center} - \noindent Furthermore holds - \begin{center} - \begin{tabular}{l} - @{text "\x \ E. \g x\ \ \f\ \ \x\"} - \end{tabular} - \end{center} - *} - - have "\x \ E. \g x\ \ \f\\F * \x\" - proof - fix x assume "x \ E" - with b show "\g x\ \ \f\\F * \x\" - by (simp only: p_def) - qed - from g_cont this ge_zero - show "\g\\E \ \f\\F" - by (rule fn_norm_least [of g, folded B_def fn_norm_def]) - - txt {* The other direction is achieved by a similar argument. *} - - show "\f\\F \ \g\\E" - proof (rule normed_vectorspace_with_fn_norm.fn_norm_least - [OF normed_vectorspace_with_fn_norm.intro, - OF F_norm, folded B_def fn_norm_def]) - show "\x \ F. \f x\ \ \g\\E * \x\" - proof - fix x assume x: "x \ F" - from a x have "g x = f x" .. - then have "\f x\ = \g x\" by (simp only:) - also from g_cont - have "\ \ \g\\E * \x\" - proof (rule fn_norm_le_cong [of g, folded B_def fn_norm_def]) - from FE x show "x \ E" .. - qed - finally show "\f x\ \ \g\\E * \x\" . - qed - show "0 \ \g\\E" - using g_cont - by (rule fn_norm_ge_zero [of g, folded B_def fn_norm_def]) - show "continuous F norm f" by fact - qed - qed - with linearformE a g_cont show ?thesis by blast -qed - -end diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/Real/HahnBanach/HahnBanachExtLemmas.thy --- a/src/HOL/Real/HahnBanach/HahnBanachExtLemmas.thy Tue Dec 30 08:18:54 2008 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,280 +0,0 @@ -(* Title: HOL/Real/HahnBanach/HahnBanachExtLemmas.thy - Author: Gertrud Bauer, TU Munich -*) - -header {* Extending non-maximal functions *} - -theory HahnBanachExtLemmas -imports FunctionNorm -begin - -text {* - In this section the following context is presumed. Let @{text E} be - a real vector space with a seminorm @{text q} on @{text E}. @{text - F} is a subspace of @{text E} and @{text f} a linear function on - @{text F}. We consider a subspace @{text H} of @{text E} that is a - superspace of @{text F} and a linear form @{text h} on @{text - H}. @{text H} is a not equal to @{text E} and @{text "x\<^sub>0"} is - an element in @{text "E - H"}. @{text H} is extended to the direct - sum @{text "H' = H + lin x\<^sub>0"}, so for any @{text "x \ H'"} - the decomposition of @{text "x = y + a \ x"} with @{text "y \ H"} is - unique. @{text h'} is defined on @{text H'} by @{text "h' x = h y + - a \ \"} for a certain @{text \}. - - Subsequently we show some properties of this extension @{text h'} of - @{text h}. - - \medskip This lemma will be used to show the existence of a linear - extension of @{text f} (see page \pageref{ex-xi-use}). It is a - consequence of the completeness of @{text \}. To show - \begin{center} - \begin{tabular}{l} - @{text "\\. \y \ F. a y \ \ \ \ \ b y"} - \end{tabular} - \end{center} - \noindent it suffices to show that - \begin{center} - \begin{tabular}{l} - @{text "\u \ F. \v \ F. a u \ b v"} - \end{tabular} - \end{center} -*} - -lemma ex_xi: - assumes "vectorspace F" - assumes r: "\u v. u \ F \ v \ F \ a u \ b v" - shows "\xi::real. \y \ F. a y \ xi \ xi \ b y" -proof - - interpret vectorspace F by fact - txt {* From the completeness of the reals follows: - The set @{text "S = {a u. u \ F}"} has a supremum, if it is - non-empty and has an upper bound. *} - - let ?S = "{a u | u. u \ F}" - have "\xi. lub ?S xi" - proof (rule real_complete) - have "a 0 \ ?S" by blast - then show "\X. X \ ?S" .. - have "\y \ ?S. y \ b 0" - proof - fix y assume y: "y \ ?S" - then obtain u where u: "u \ F" and y: "y = a u" by blast - from u and zero have "a u \ b 0" by (rule r) - with y show "y \ b 0" by (simp only:) - qed - then show "\u. \y \ ?S. y \ u" .. - qed - then obtain xi where xi: "lub ?S xi" .. - { - fix y assume "y \ F" - then have "a y \ ?S" by blast - with xi have "a y \ xi" by (rule lub.upper) - } moreover { - fix y assume y: "y \ F" - from xi have "xi \ b y" - proof (rule lub.least) - fix au assume "au \ ?S" - then obtain u where u: "u \ F" and au: "au = a u" by blast - from u y have "a u \ b y" by (rule r) - with au show "au \ b y" by (simp only:) - qed - } ultimately show "\xi. \y \ F. a y \ xi \ xi \ b y" by blast -qed - -text {* - \medskip The function @{text h'} is defined as a @{text "h' x = h y - + a \ \"} where @{text "x = y + a \ \"} is a linear extension of - @{text h} to @{text H'}. -*} - -lemma h'_lf: - assumes h'_def: "h' \ \x. let (y, a) = - SOME (y, a). x = y + a \ x0 \ y \ H in h y + a * xi" - and H'_def: "H' \ H + lin x0" - and HE: "H \ E" - assumes "linearform H h" - assumes x0: "x0 \ H" "x0 \ E" "x0 \ 0" - assumes E: "vectorspace E" - shows "linearform H' h'" -proof - - interpret linearform H h by fact - interpret vectorspace E by fact - show ?thesis - proof - note E = `vectorspace E` - have H': "vectorspace H'" - proof (unfold H'_def) - from `x0 \ E` - have "lin x0 \ E" .. - with HE show "vectorspace (H + lin x0)" using E .. - qed - { - fix x1 x2 assume x1: "x1 \ H'" and x2: "x2 \ H'" - show "h' (x1 + x2) = h' x1 + h' x2" - proof - - from H' x1 x2 have "x1 + x2 \ H'" - by (rule vectorspace.add_closed) - with x1 x2 obtain y y1 y2 a a1 a2 where - x1x2: "x1 + x2 = y + a \ x0" and y: "y \ H" - and x1_rep: "x1 = y1 + a1 \ x0" and y1: "y1 \ H" - and x2_rep: "x2 = y2 + a2 \ x0" and y2: "y2 \ H" - unfolding H'_def sum_def lin_def by blast - - have ya: "y1 + y2 = y \ a1 + a2 = a" using E HE _ y x0 - proof (rule decomp_H') txt_raw {* \label{decomp-H-use} *} - from HE y1 y2 show "y1 + y2 \ H" - by (rule subspace.add_closed) - from x0 and HE y y1 y2 - have "x0 \ E" "y \ E" "y1 \ E" "y2 \ E" by auto - with x1_rep x2_rep have "(y1 + y2) + (a1 + a2) \ x0 = x1 + x2" - by (simp add: add_ac add_mult_distrib2) - also note x1x2 - finally show "(y1 + y2) + (a1 + a2) \ x0 = y + a \ x0" . - qed - - from h'_def x1x2 E HE y x0 - have "h' (x1 + x2) = h y + a * xi" - by (rule h'_definite) - also have "\ = h (y1 + y2) + (a1 + a2) * xi" - by (simp only: ya) - also from y1 y2 have "h (y1 + y2) = h y1 + h y2" - by simp - also have "\ + (a1 + a2) * xi = (h y1 + a1 * xi) + (h y2 + a2 * xi)" - by (simp add: left_distrib) - also from h'_def x1_rep E HE y1 x0 - have "h y1 + a1 * xi = h' x1" - by (rule h'_definite [symmetric]) - also from h'_def x2_rep E HE y2 x0 - have "h y2 + a2 * xi = h' x2" - by (rule h'_definite [symmetric]) - finally show ?thesis . - qed - next - fix x1 c assume x1: "x1 \ H'" - show "h' (c \ x1) = c * (h' x1)" - proof - - from H' x1 have ax1: "c \ x1 \ H'" - by (rule vectorspace.mult_closed) - with x1 obtain y a y1 a1 where - cx1_rep: "c \ x1 = y + a \ x0" and y: "y \ H" - and x1_rep: "x1 = y1 + a1 \ x0" and y1: "y1 \ H" - unfolding H'_def sum_def lin_def by blast - - have ya: "c \ y1 = y \ c * a1 = a" using E HE _ y x0 - proof (rule decomp_H') - from HE y1 show "c \ y1 \ H" - by (rule subspace.mult_closed) - from x0 and HE y y1 - have "x0 \ E" "y \ E" "y1 \ E" by auto - with x1_rep have "c \ y1 + (c * a1) \ x0 = c \ x1" - by (simp add: mult_assoc add_mult_distrib1) - also note cx1_rep - finally show "c \ y1 + (c * a1) \ x0 = y + a \ x0" . - qed - - from h'_def cx1_rep E HE y x0 have "h' (c \ x1) = h y + a * xi" - by (rule h'_definite) - also have "\ = h (c \ y1) + (c * a1) * xi" - by (simp only: ya) - also from y1 have "h (c \ y1) = c * h y1" - by simp - also have "\ + (c * a1) * xi = c * (h y1 + a1 * xi)" - by (simp only: right_distrib) - also from h'_def x1_rep E HE y1 x0 have "h y1 + a1 * xi = h' x1" - by (rule h'_definite [symmetric]) - finally show ?thesis . - qed - } - qed -qed - -text {* \medskip The linear extension @{text h'} of @{text h} - is bounded by the seminorm @{text p}. *} - -lemma h'_norm_pres: - assumes h'_def: "h' \ \x. let (y, a) = - SOME (y, a). x = y + a \ x0 \ y \ H in h y + a * xi" - and H'_def: "H' \ H + lin x0" - and x0: "x0 \ H" "x0 \ E" "x0 \ 0" - assumes E: "vectorspace E" and HE: "subspace H E" - and "seminorm E p" and "linearform H h" - assumes a: "\y \ H. h y \ p y" - and a': "\y \ H. - p (y + x0) - h y \ xi \ xi \ p (y + x0) - h y" - shows "\x \ H'. h' x \ p x" -proof - - interpret vectorspace E by fact - interpret subspace H E by fact - interpret seminorm E p by fact - interpret linearform H h by fact - show ?thesis - proof - fix x assume x': "x \ H'" - show "h' x \ p x" - proof - - from a' have a1: "\ya \ H. - p (ya + x0) - h ya \ xi" - and a2: "\ya \ H. xi \ p (ya + x0) - h ya" by auto - from x' obtain y a where - x_rep: "x = y + a \ x0" and y: "y \ H" - unfolding H'_def sum_def lin_def by blast - from y have y': "y \ E" .. - from y have ay: "inverse a \ y \ H" by simp - - from h'_def x_rep E HE y x0 have "h' x = h y + a * xi" - by (rule h'_definite) - also have "\ \ p (y + a \ x0)" - proof (rule linorder_cases) - assume z: "a = 0" - then have "h y + a * xi = h y" by simp - also from a y have "\ \ p y" .. - also from x0 y' z have "p y = p (y + a \ x0)" by simp - finally show ?thesis . - next - txt {* In the case @{text "a < 0"}, we use @{text "a\<^sub>1"} - with @{text ya} taken as @{text "y / a"}: *} - assume lz: "a < 0" then have nz: "a \ 0" by simp - from a1 ay - have "- p (inverse a \ y + x0) - h (inverse a \ y) \ xi" .. - with lz have "a * xi \ - a * (- p (inverse a \ y + x0) - h (inverse a \ y))" - by (simp add: mult_left_mono_neg order_less_imp_le) - - also have "\ = - - a * (p (inverse a \ y + x0)) - a * (h (inverse a \ y))" - by (simp add: right_diff_distrib) - also from lz x0 y' have "- a * (p (inverse a \ y + x0)) = - p (a \ (inverse a \ y + x0))" - by (simp add: abs_homogenous) - also from nz x0 y' have "\ = p (y + a \ x0)" - by (simp add: add_mult_distrib1 mult_assoc [symmetric]) - also from nz y have "a * (h (inverse a \ y)) = h y" - by simp - finally have "a * xi \ p (y + a \ x0) - h y" . - then show ?thesis by simp - next - txt {* In the case @{text "a > 0"}, we use @{text "a\<^sub>2"} - with @{text ya} taken as @{text "y / a"}: *} - assume gz: "0 < a" then have nz: "a \ 0" by simp - from a2 ay - have "xi \ p (inverse a \ y + x0) - h (inverse a \ y)" .. - with gz have "a * xi \ - a * (p (inverse a \ y + x0) - h (inverse a \ y))" - by simp - also have "\ = a * p (inverse a \ y + x0) - a * h (inverse a \ y)" - by (simp add: right_diff_distrib) - also from gz x0 y' - have "a * p (inverse a \ y + x0) = p (a \ (inverse a \ y + x0))" - by (simp add: abs_homogenous) - also from nz x0 y' have "\ = p (y + a \ x0)" - by (simp add: add_mult_distrib1 mult_assoc [symmetric]) - also from nz y have "a * h (inverse a \ y) = h y" - by simp - finally have "a * xi \ p (y + a \ x0) - h y" . - then show ?thesis by simp - qed - also from x_rep have "\ = p x" by (simp only:) - finally show ?thesis . - qed - qed -qed - -end diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/Real/HahnBanach/HahnBanachLemmas.thy --- a/src/HOL/Real/HahnBanach/HahnBanachLemmas.thy Tue Dec 30 08:18:54 2008 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,4 +0,0 @@ -(*<*) -theory HahnBanachLemmas imports HahnBanachSupLemmas HahnBanachExtLemmas begin -end -(*>*) \ No newline at end of file diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/Real/HahnBanach/HahnBanachSupLemmas.thy --- a/src/HOL/Real/HahnBanach/HahnBanachSupLemmas.thy Tue Dec 30 08:18:54 2008 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,446 +0,0 @@ -(* Title: HOL/Real/HahnBanach/HahnBanachSupLemmas.thy - ID: $Id$ - Author: Gertrud Bauer, TU Munich -*) - -header {* The supremum w.r.t.~the function order *} - -theory HahnBanachSupLemmas -imports FunctionNorm ZornLemma -begin - -text {* - This section contains some lemmas that will be used in the proof of - the Hahn-Banach Theorem. In this section the following context is - presumed. Let @{text E} be a real vector space with a seminorm - @{text p} on @{text E}. @{text F} is a subspace of @{text E} and - @{text f} a linear form on @{text F}. We consider a chain @{text c} - of norm-preserving extensions of @{text f}, such that @{text "\c = - graph H h"}. We will show some properties about the limit function - @{text h}, i.e.\ the supremum of the chain @{text c}. - - \medskip Let @{text c} be a chain of norm-preserving extensions of - the function @{text f} and let @{text "graph H h"} be the supremum - of @{text c}. Every element in @{text H} is member of one of the - elements of the chain. -*} -lemmas [dest?] = chainD -lemmas chainE2 [elim?] = chainD2 [elim_format, standard] - -lemma some_H'h't: - assumes M: "M = norm_pres_extensions E p F f" - and cM: "c \ chain M" - and u: "graph H h = \c" - and x: "x \ H" - shows "\H' h'. graph H' h' \ c - \ (x, h x) \ graph H' h' - \ linearform H' h' \ H' \ E - \ F \ H' \ graph F f \ graph H' h' - \ (\x \ H'. h' x \ p x)" -proof - - from x have "(x, h x) \ graph H h" .. - also from u have "\ = \c" . - finally obtain g where gc: "g \ c" and gh: "(x, h x) \ g" by blast - - from cM have "c \ M" .. - with gc have "g \ M" .. - also from M have "\ = norm_pres_extensions E p F f" . - finally obtain H' and h' where g: "g = graph H' h'" - and * : "linearform H' h'" "H' \ E" "F \ H'" - "graph F f \ graph H' h'" "\x \ H'. h' x \ p x" .. - - from gc and g have "graph H' h' \ c" by (simp only:) - moreover from gh and g have "(x, h x) \ graph H' h'" by (simp only:) - ultimately show ?thesis using * by blast -qed - -text {* - \medskip Let @{text c} be a chain of norm-preserving extensions of - the function @{text f} and let @{text "graph H h"} be the supremum - of @{text c}. Every element in the domain @{text H} of the supremum - function is member of the domain @{text H'} of some function @{text - h'}, such that @{text h} extends @{text h'}. -*} - -lemma some_H'h': - assumes M: "M = norm_pres_extensions E p F f" - and cM: "c \ chain M" - and u: "graph H h = \c" - and x: "x \ H" - shows "\H' h'. x \ H' \ graph H' h' \ graph H h - \ linearform H' h' \ H' \ E \ F \ H' - \ graph F f \ graph H' h' \ (\x \ H'. h' x \ p x)" -proof - - from M cM u x obtain H' h' where - x_hx: "(x, h x) \ graph H' h'" - and c: "graph H' h' \ c" - and * : "linearform H' h'" "H' \ E" "F \ H'" - "graph F f \ graph H' h'" "\x \ H'. h' x \ p x" - by (rule some_H'h't [elim_format]) blast - from x_hx have "x \ H'" .. - moreover from cM u c have "graph H' h' \ graph H h" - by (simp only: chain_ball_Union_upper) - ultimately show ?thesis using * by blast -qed - -text {* - \medskip Any two elements @{text x} and @{text y} in the domain - @{text H} of the supremum function @{text h} are both in the domain - @{text H'} of some function @{text h'}, such that @{text h} extends - @{text h'}. -*} - -lemma some_H'h'2: - assumes M: "M = norm_pres_extensions E p F f" - and cM: "c \ chain M" - and u: "graph H h = \c" - and x: "x \ H" - and y: "y \ H" - shows "\H' h'. x \ H' \ y \ H' - \ graph H' h' \ graph H h - \ linearform H' h' \ H' \ E \ F \ H' - \ graph F f \ graph H' h' \ (\x \ H'. h' x \ p x)" -proof - - txt {* @{text y} is in the domain @{text H''} of some function @{text h''}, - such that @{text h} extends @{text h''}. *} - - from M cM u and y obtain H' h' where - y_hy: "(y, h y) \ graph H' h'" - and c': "graph H' h' \ c" - and * : - "linearform H' h'" "H' \ E" "F \ H'" - "graph F f \ graph H' h'" "\x \ H'. h' x \ p x" - by (rule some_H'h't [elim_format]) blast - - txt {* @{text x} is in the domain @{text H'} of some function @{text h'}, - such that @{text h} extends @{text h'}. *} - - from M cM u and x obtain H'' h'' where - x_hx: "(x, h x) \ graph H'' h''" - and c'': "graph H'' h'' \ c" - and ** : - "linearform H'' h''" "H'' \ E" "F \ H''" - "graph F f \ graph H'' h''" "\x \ H''. h'' x \ p x" - by (rule some_H'h't [elim_format]) blast - - txt {* Since both @{text h'} and @{text h''} are elements of the chain, - @{text h''} is an extension of @{text h'} or vice versa. Thus both - @{text x} and @{text y} are contained in the greater - one. \label{cases1}*} - - from cM c'' c' have "graph H'' h'' \ graph H' h' \ graph H' h' \ graph H'' h''" - (is "?case1 \ ?case2") .. - then show ?thesis - proof - assume ?case1 - have "(x, h x) \ graph H'' h''" by fact - also have "\ \ graph H' h'" by fact - finally have xh:"(x, h x) \ graph H' h'" . - then have "x \ H'" .. - moreover from y_hy have "y \ H'" .. - moreover from cM u and c' have "graph H' h' \ graph H h" - by (simp only: chain_ball_Union_upper) - ultimately show ?thesis using * by blast - next - assume ?case2 - from x_hx have "x \ H''" .. - moreover { - have "(y, h y) \ graph H' h'" by (rule y_hy) - also have "\ \ graph H'' h''" by fact - finally have "(y, h y) \ graph H'' h''" . - } then have "y \ H''" .. - moreover from cM u and c'' have "graph H'' h'' \ graph H h" - by (simp only: chain_ball_Union_upper) - ultimately show ?thesis using ** by blast - qed -qed - -text {* - \medskip The relation induced by the graph of the supremum of a - chain @{text c} is definite, i.~e.~t is the graph of a function. -*} - -lemma sup_definite: - assumes M_def: "M \ norm_pres_extensions E p F f" - and cM: "c \ chain M" - and xy: "(x, y) \ \c" - and xz: "(x, z) \ \c" - shows "z = y" -proof - - from cM have c: "c \ M" .. - from xy obtain G1 where xy': "(x, y) \ G1" and G1: "G1 \ c" .. - from xz obtain G2 where xz': "(x, z) \ G2" and G2: "G2 \ c" .. - - from G1 c have "G1 \ M" .. - then obtain H1 h1 where G1_rep: "G1 = graph H1 h1" - unfolding M_def by blast - - from G2 c have "G2 \ M" .. - then obtain H2 h2 where G2_rep: "G2 = graph H2 h2" - unfolding M_def by blast - - txt {* @{text "G\<^sub>1"} is contained in @{text "G\<^sub>2"} - or vice versa, since both @{text "G\<^sub>1"} and @{text - "G\<^sub>2"} are members of @{text c}. \label{cases2}*} - - from cM G1 G2 have "G1 \ G2 \ G2 \ G1" (is "?case1 \ ?case2") .. - then show ?thesis - proof - assume ?case1 - with xy' G2_rep have "(x, y) \ graph H2 h2" by blast - then have "y = h2 x" .. - also - from xz' G2_rep have "(x, z) \ graph H2 h2" by (simp only:) - then have "z = h2 x" .. - finally show ?thesis . - next - assume ?case2 - with xz' G1_rep have "(x, z) \ graph H1 h1" by blast - then have "z = h1 x" .. - also - from xy' G1_rep have "(x, y) \ graph H1 h1" by (simp only:) - then have "y = h1 x" .. - finally show ?thesis .. - qed -qed - -text {* - \medskip The limit function @{text h} is linear. Every element - @{text x} in the domain of @{text h} is in the domain of a function - @{text h'} in the chain of norm preserving extensions. Furthermore, - @{text h} is an extension of @{text h'} so the function values of - @{text x} are identical for @{text h'} and @{text h}. Finally, the - function @{text h'} is linear by construction of @{text M}. -*} - -lemma sup_lf: - assumes M: "M = norm_pres_extensions E p F f" - and cM: "c \ chain M" - and u: "graph H h = \c" - shows "linearform H h" -proof - fix x y assume x: "x \ H" and y: "y \ H" - with M cM u obtain H' h' where - x': "x \ H'" and y': "y \ H'" - and b: "graph H' h' \ graph H h" - and linearform: "linearform H' h'" - and subspace: "H' \ E" - by (rule some_H'h'2 [elim_format]) blast - - show "h (x + y) = h x + h y" - proof - - from linearform x' y' have "h' (x + y) = h' x + h' y" - by (rule linearform.add) - also from b x' have "h' x = h x" .. - also from b y' have "h' y = h y" .. - also from subspace x' y' have "x + y \ H'" - by (rule subspace.add_closed) - with b have "h' (x + y) = h (x + y)" .. - finally show ?thesis . - qed -next - fix x a assume x: "x \ H" - with M cM u obtain H' h' where - x': "x \ H'" - and b: "graph H' h' \ graph H h" - and linearform: "linearform H' h'" - and subspace: "H' \ E" - by (rule some_H'h' [elim_format]) blast - - show "h (a \ x) = a * h x" - proof - - from linearform x' have "h' (a \ x) = a * h' x" - by (rule linearform.mult) - also from b x' have "h' x = h x" .. - also from subspace x' have "a \ x \ H'" - by (rule subspace.mult_closed) - with b have "h' (a \ x) = h (a \ x)" .. - finally show ?thesis . - qed -qed - -text {* - \medskip The limit of a non-empty chain of norm preserving - extensions of @{text f} is an extension of @{text f}, since every - element of the chain is an extension of @{text f} and the supremum - is an extension for every element of the chain. -*} - -lemma sup_ext: - assumes graph: "graph H h = \c" - and M: "M = norm_pres_extensions E p F f" - and cM: "c \ chain M" - and ex: "\x. x \ c" - shows "graph F f \ graph H h" -proof - - from ex obtain x where xc: "x \ c" .. - from cM have "c \ M" .. - with xc have "x \ M" .. - with M have "x \ norm_pres_extensions E p F f" - by (simp only:) - then obtain G g where "x = graph G g" and "graph F f \ graph G g" .. - then have "graph F f \ x" by (simp only:) - also from xc have "\ \ \c" by blast - also from graph have "\ = graph H h" .. - finally show ?thesis . -qed - -text {* - \medskip The domain @{text H} of the limit function is a superspace - of @{text F}, since @{text F} is a subset of @{text H}. The - existence of the @{text 0} element in @{text F} and the closure - properties follow from the fact that @{text F} is a vector space. -*} - -lemma sup_supF: - assumes graph: "graph H h = \c" - and M: "M = norm_pres_extensions E p F f" - and cM: "c \ chain M" - and ex: "\x. x \ c" - and FE: "F \ E" - shows "F \ H" -proof - from FE show "F \ {}" by (rule subspace.non_empty) - from graph M cM ex have "graph F f \ graph H h" by (rule sup_ext) - then show "F \ H" .. - fix x y assume "x \ F" and "y \ F" - with FE show "x + y \ F" by (rule subspace.add_closed) -next - fix x a assume "x \ F" - with FE show "a \ x \ F" by (rule subspace.mult_closed) -qed - -text {* - \medskip The domain @{text H} of the limit function is a subspace of - @{text E}. -*} - -lemma sup_subE: - assumes graph: "graph H h = \c" - and M: "M = norm_pres_extensions E p F f" - and cM: "c \ chain M" - and ex: "\x. x \ c" - and FE: "F \ E" - and E: "vectorspace E" - shows "H \ E" -proof - show "H \ {}" - proof - - from FE E have "0 \ F" by (rule subspace.zero) - also from graph M cM ex FE have "F \ H" by (rule sup_supF) - then have "F \ H" .. - finally show ?thesis by blast - qed - show "H \ E" - proof - fix x assume "x \ H" - with M cM graph - obtain H' h' where x: "x \ H'" and H'E: "H' \ E" - by (rule some_H'h' [elim_format]) blast - from H'E have "H' \ E" .. - with x show "x \ E" .. - qed - fix x y assume x: "x \ H" and y: "y \ H" - show "x + y \ H" - proof - - from M cM graph x y obtain H' h' where - x': "x \ H'" and y': "y \ H'" and H'E: "H' \ E" - and graphs: "graph H' h' \ graph H h" - by (rule some_H'h'2 [elim_format]) blast - from H'E x' y' have "x + y \ H'" - by (rule subspace.add_closed) - also from graphs have "H' \ H" .. - finally show ?thesis . - qed -next - fix x a assume x: "x \ H" - show "a \ x \ H" - proof - - from M cM graph x - obtain H' h' where x': "x \ H'" and H'E: "H' \ E" - and graphs: "graph H' h' \ graph H h" - by (rule some_H'h' [elim_format]) blast - from H'E x' have "a \ x \ H'" by (rule subspace.mult_closed) - also from graphs have "H' \ H" .. - finally show ?thesis . - qed -qed - -text {* - \medskip The limit function is bounded by the norm @{text p} as - well, since all elements in the chain are bounded by @{text p}. -*} - -lemma sup_norm_pres: - assumes graph: "graph H h = \c" - and M: "M = norm_pres_extensions E p F f" - and cM: "c \ chain M" - shows "\x \ H. h x \ p x" -proof - fix x assume "x \ H" - with M cM graph obtain H' h' where x': "x \ H'" - and graphs: "graph H' h' \ graph H h" - and a: "\x \ H'. h' x \ p x" - by (rule some_H'h' [elim_format]) blast - from graphs x' have [symmetric]: "h' x = h x" .. - also from a x' have "h' x \ p x " .. - finally show "h x \ p x" . -qed - -text {* - \medskip The following lemma is a property of linear forms on real - vector spaces. It will be used for the lemma @{text abs_HahnBanach} - (see page \pageref{abs-HahnBanach}). \label{abs-ineq-iff} For real - vector spaces the following inequations are equivalent: - \begin{center} - \begin{tabular}{lll} - @{text "\x \ H. \h x\ \ p x"} & and & - @{text "\x \ H. h x \ p x"} \\ - \end{tabular} - \end{center} -*} - -lemma abs_ineq_iff: - assumes "subspace H E" and "vectorspace E" and "seminorm E p" - and "linearform H h" - shows "(\x \ H. \h x\ \ p x) = (\x \ H. h x \ p x)" (is "?L = ?R") -proof - interpret subspace H E by fact - interpret vectorspace E by fact - interpret seminorm E p by fact - interpret linearform H h by fact - have H: "vectorspace H" using `vectorspace E` .. - { - assume l: ?L - show ?R - proof - fix x assume x: "x \ H" - have "h x \ \h x\" by arith - also from l x have "\ \ p x" .. - finally show "h x \ p x" . - qed - next - assume r: ?R - show ?L - proof - fix x assume x: "x \ H" - show "\a b :: real. - a \ b \ b \ a \ \b\ \ a" - by arith - from `linearform H h` and H x - have "- h x = h (- x)" by (rule linearform.neg [symmetric]) - also - from H x have "- x \ H" by (rule vectorspace.neg_closed) - with r have "h (- x) \ p (- x)" .. - also have "\ = p x" - using `seminorm E p` `vectorspace E` - proof (rule seminorm.minus) - from x show "x \ E" .. - qed - finally have "- h x \ p x" . - then show "- p x \ h x" by simp - from r x show "h x \ p x" .. - qed - } -qed - -end diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/Real/HahnBanach/Linearform.thy --- a/src/HOL/Real/HahnBanach/Linearform.thy Tue Dec 30 08:18:54 2008 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,60 +0,0 @@ -(* Title: HOL/Real/HahnBanach/Linearform.thy - Author: Gertrud Bauer, TU Munich -*) - -header {* Linearforms *} - -theory Linearform -imports VectorSpace -begin - -text {* - A \emph{linear form} is a function on a vector space into the reals - that is additive and multiplicative. -*} - -locale linearform = - fixes V :: "'a\{minus, plus, zero, uminus} set" and f - assumes add [iff]: "x \ V \ y \ V \ f (x + y) = f x + f y" - and mult [iff]: "x \ V \ f (a \ x) = a * f x" - -declare linearform.intro [intro?] - -lemma (in linearform) neg [iff]: - assumes "vectorspace V" - shows "x \ V \ f (- x) = - f x" -proof - - interpret vectorspace V by fact - assume x: "x \ V" - then have "f (- x) = f ((- 1) \ x)" by (simp add: negate_eq1) - also from x have "\ = (- 1) * (f x)" by (rule mult) - also from x have "\ = - (f x)" by simp - finally show ?thesis . -qed - -lemma (in linearform) diff [iff]: - assumes "vectorspace V" - shows "x \ V \ y \ V \ f (x - y) = f x - f y" -proof - - interpret vectorspace V by fact - assume x: "x \ V" and y: "y \ V" - then have "x - y = x + - y" by (rule diff_eq1) - also have "f \ = f x + f (- y)" by (rule add) (simp_all add: x y) - also have "f (- y) = - f y" using `vectorspace V` y by (rule neg) - finally show ?thesis by simp -qed - -text {* Every linear form yields @{text 0} for the @{text 0} vector. *} - -lemma (in linearform) zero [iff]: - assumes "vectorspace V" - shows "f 0 = 0" -proof - - interpret vectorspace V by fact - have "f 0 = f (0 - 0)" by simp - also have "\ = f 0 - f 0" using `vectorspace V` by (rule diff) simp_all - also have "\ = 0" by simp - finally show ?thesis . -qed - -end diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/Real/HahnBanach/NormedSpace.thy --- a/src/HOL/Real/HahnBanach/NormedSpace.thy Tue Dec 30 08:18:54 2008 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,117 +0,0 @@ -(* Title: HOL/Real/HahnBanach/NormedSpace.thy - Author: Gertrud Bauer, TU Munich -*) - -header {* Normed vector spaces *} - -theory NormedSpace -imports Subspace -begin - -subsection {* Quasinorms *} - -text {* - A \emph{seminorm} @{text "\\\"} is a function on a real vector space - into the reals that has the following properties: it is positive - definite, absolute homogenous and subadditive. -*} - -locale norm_syntax = - fixes norm :: "'a \ real" ("\_\") - -locale seminorm = var_V + norm_syntax + - constrains V :: "'a\{minus, plus, zero, uminus} set" - assumes ge_zero [iff?]: "x \ V \ 0 \ \x\" - and abs_homogenous [iff?]: "x \ V \ \a \ x\ = \a\ * \x\" - and subadditive [iff?]: "x \ V \ y \ V \ \x + y\ \ \x\ + \y\" - -declare seminorm.intro [intro?] - -lemma (in seminorm) diff_subadditive: - assumes "vectorspace V" - shows "x \ V \ y \ V \ \x - y\ \ \x\ + \y\" -proof - - interpret vectorspace V by fact - assume x: "x \ V" and y: "y \ V" - then have "x - y = x + - 1 \ y" - by (simp add: diff_eq2 negate_eq2a) - also from x y have "\\\ \ \x\ + \- 1 \ y\" - by (simp add: subadditive) - also from y have "\- 1 \ y\ = \- 1\ * \y\" - by (rule abs_homogenous) - also have "\ = \y\" by simp - finally show ?thesis . -qed - -lemma (in seminorm) minus: - assumes "vectorspace V" - shows "x \ V \ \- x\ = \x\" -proof - - interpret vectorspace V by fact - assume x: "x \ V" - then have "- x = - 1 \ x" by (simp only: negate_eq1) - also from x have "\\\ = \- 1\ * \x\" - by (rule abs_homogenous) - also have "\ = \x\" by simp - finally show ?thesis . -qed - - -subsection {* Norms *} - -text {* - A \emph{norm} @{text "\\\"} is a seminorm that maps only the - @{text 0} vector to @{text 0}. -*} - -locale norm = seminorm + - assumes zero_iff [iff]: "x \ V \ (\x\ = 0) = (x = 0)" - - -subsection {* Normed vector spaces *} - -text {* - A vector space together with a norm is called a \emph{normed - space}. -*} - -locale normed_vectorspace = vectorspace + norm - -declare normed_vectorspace.intro [intro?] - -lemma (in normed_vectorspace) gt_zero [intro?]: - "x \ V \ x \ 0 \ 0 < \x\" -proof - - assume x: "x \ V" and neq: "x \ 0" - from x have "0 \ \x\" .. - also have [symmetric]: "\ \ 0" - proof - assume "\x\ = 0" - with x have "x = 0" by simp - with neq show False by contradiction - qed - finally show ?thesis . -qed - -text {* - Any subspace of a normed vector space is again a normed vectorspace. -*} - -lemma subspace_normed_vs [intro?]: - fixes F E norm - assumes "subspace F E" "normed_vectorspace E norm" - shows "normed_vectorspace F norm" -proof - - interpret subspace F E by fact - interpret normed_vectorspace E norm by fact - show ?thesis - proof - show "vectorspace F" by (rule vectorspace) unfold_locales - next - have "NormedSpace.norm E norm" .. - with subset show "NormedSpace.norm F norm" - by (simp add: norm_def seminorm_def norm_axioms_def) - qed -qed - -end diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/Real/HahnBanach/README.html --- a/src/HOL/Real/HahnBanach/README.html Tue Dec 30 08:18:54 2008 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,38 +0,0 @@ - - - - - - - - - HOL/Real/HahnBanach/README - - - - -

The Hahn-Banach Theorem for Real Vector Spaces (Isabelle/Isar)

- -Author: Gertrud Bauer, Technische Universität München

- -This directory contains the proof of the Hahn-Banach theorem for real vectorspaces, -following H. Heuser, Funktionalanalysis, p. 228 -232. -The Hahn-Banach theorem is one of the fundamental theorems of functioal analysis. -It is a conclusion of Zorn's lemma.

- -Two different formaulations of the theorem are presented, one for general real vectorspaces -and its application to normed vectorspaces.

- -The theorem says, that every continous linearform, defined on arbitrary subspaces -(not only one-dimensional subspaces), can be extended to a continous linearform on -the whole vectorspace. - - -


- -
-bauerg@in.tum.de -
- - - diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/Real/HahnBanach/ROOT.ML --- a/src/HOL/Real/HahnBanach/ROOT.ML Tue Dec 30 08:18:54 2008 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,8 +0,0 @@ -(* Title: HOL/Real/HahnBanach/ROOT.ML - ID: $Id$ - Author: Gertrud Bauer, TU Munich - -The Hahn-Banach theorem for real vector spaces (Isabelle/Isar). -*) - -time_use_thy "HahnBanach"; diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/Real/HahnBanach/Subspace.thy --- a/src/HOL/Real/HahnBanach/Subspace.thy Tue Dec 30 08:18:54 2008 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,513 +0,0 @@ -(* Title: HOL/Real/HahnBanach/Subspace.thy - Author: Gertrud Bauer, TU Munich -*) - -header {* Subspaces *} - -theory Subspace -imports VectorSpace -begin - -subsection {* Definition *} - -text {* - A non-empty subset @{text U} of a vector space @{text V} is a - \emph{subspace} of @{text V}, iff @{text U} is closed under addition - and scalar multiplication. -*} - -locale subspace = - fixes U :: "'a\{minus, plus, zero, uminus} set" and V - assumes non_empty [iff, intro]: "U \ {}" - and subset [iff]: "U \ V" - and add_closed [iff]: "x \ U \ y \ U \ x + y \ U" - and mult_closed [iff]: "x \ U \ a \ x \ U" - -notation (symbols) - subspace (infix "\" 50) - -declare vectorspace.intro [intro?] subspace.intro [intro?] - -lemma subspace_subset [elim]: "U \ V \ U \ V" - by (rule subspace.subset) - -lemma (in subspace) subsetD [iff]: "x \ U \ x \ V" - using subset by blast - -lemma subspaceD [elim]: "U \ V \ x \ U \ x \ V" - by (rule subspace.subsetD) - -lemma rev_subspaceD [elim?]: "x \ U \ U \ V \ x \ V" - by (rule subspace.subsetD) - -lemma (in subspace) diff_closed [iff]: - assumes "vectorspace V" - assumes x: "x \ U" and y: "y \ U" - shows "x - y \ U" -proof - - interpret vectorspace V by fact - from x y show ?thesis by (simp add: diff_eq1 negate_eq1) -qed - -text {* - \medskip Similar as for linear spaces, the existence of the zero - element in every subspace follows from the non-emptiness of the - carrier set and by vector space laws. -*} - -lemma (in subspace) zero [intro]: - assumes "vectorspace V" - shows "0 \ U" -proof - - interpret V!: vectorspace V by fact - have "U \ {}" by (rule non_empty) - then obtain x where x: "x \ U" by blast - then have "x \ V" .. then have "0 = x - x" by simp - also from `vectorspace V` x x have "\ \ U" by (rule diff_closed) - finally show ?thesis . -qed - -lemma (in subspace) neg_closed [iff]: - assumes "vectorspace V" - assumes x: "x \ U" - shows "- x \ U" -proof - - interpret vectorspace V by fact - from x show ?thesis by (simp add: negate_eq1) -qed - -text {* \medskip Further derived laws: every subspace is a vector space. *} - -lemma (in subspace) vectorspace [iff]: - assumes "vectorspace V" - shows "vectorspace U" -proof - - interpret vectorspace V by fact - show ?thesis - proof - show "U \ {}" .. - fix x y z assume x: "x \ U" and y: "y \ U" and z: "z \ U" - fix a b :: real - from x y show "x + y \ U" by simp - from x show "a \ x \ U" by simp - from x y z show "(x + y) + z = x + (y + z)" by (simp add: add_ac) - from x y show "x + y = y + x" by (simp add: add_ac) - from x show "x - x = 0" by simp - from x show "0 + x = x" by simp - from x y show "a \ (x + y) = a \ x + a \ y" by (simp add: distrib) - from x show "(a + b) \ x = a \ x + b \ x" by (simp add: distrib) - from x show "(a * b) \ x = a \ b \ x" by (simp add: mult_assoc) - from x show "1 \ x = x" by simp - from x show "- x = - 1 \ x" by (simp add: negate_eq1) - from x y show "x - y = x + - y" by (simp add: diff_eq1) - qed -qed - - -text {* The subspace relation is reflexive. *} - -lemma (in vectorspace) subspace_refl [intro]: "V \ V" -proof - show "V \ {}" .. - show "V \ V" .. - fix x y assume x: "x \ V" and y: "y \ V" - fix a :: real - from x y show "x + y \ V" by simp - from x show "a \ x \ V" by simp -qed - -text {* The subspace relation is transitive. *} - -lemma (in vectorspace) subspace_trans [trans]: - "U \ V \ V \ W \ U \ W" -proof - assume uv: "U \ V" and vw: "V \ W" - from uv show "U \ {}" by (rule subspace.non_empty) - show "U \ W" - proof - - from uv have "U \ V" by (rule subspace.subset) - also from vw have "V \ W" by (rule subspace.subset) - finally show ?thesis . - qed - fix x y assume x: "x \ U" and y: "y \ U" - from uv and x y show "x + y \ U" by (rule subspace.add_closed) - from uv and x show "\a. a \ x \ U" by (rule subspace.mult_closed) -qed - - -subsection {* Linear closure *} - -text {* - The \emph{linear closure} of a vector @{text x} is the set of all - scalar multiples of @{text x}. -*} - -definition - lin :: "('a::{minus, plus, zero}) \ 'a set" where - "lin x = {a \ x | a. True}" - -lemma linI [intro]: "y = a \ x \ y \ lin x" - unfolding lin_def by blast - -lemma linI' [iff]: "a \ x \ lin x" - unfolding lin_def by blast - -lemma linE [elim]: "x \ lin v \ (\a::real. x = a \ v \ C) \ C" - unfolding lin_def by blast - - -text {* Every vector is contained in its linear closure. *} - -lemma (in vectorspace) x_lin_x [iff]: "x \ V \ x \ lin x" -proof - - assume "x \ V" - then have "x = 1 \ x" by simp - also have "\ \ lin x" .. - finally show ?thesis . -qed - -lemma (in vectorspace) "0_lin_x" [iff]: "x \ V \ 0 \ lin x" -proof - assume "x \ V" - then show "0 = 0 \ x" by simp -qed - -text {* Any linear closure is a subspace. *} - -lemma (in vectorspace) lin_subspace [intro]: - "x \ V \ lin x \ V" -proof - assume x: "x \ V" - then show "lin x \ {}" by (auto simp add: x_lin_x) - show "lin x \ V" - proof - fix x' assume "x' \ lin x" - then obtain a where "x' = a \ x" .. - with x show "x' \ V" by simp - qed - fix x' x'' assume x': "x' \ lin x" and x'': "x'' \ lin x" - show "x' + x'' \ lin x" - proof - - from x' obtain a' where "x' = a' \ x" .. - moreover from x'' obtain a'' where "x'' = a'' \ x" .. - ultimately have "x' + x'' = (a' + a'') \ x" - using x by (simp add: distrib) - also have "\ \ lin x" .. - finally show ?thesis . - qed - fix a :: real - show "a \ x' \ lin x" - proof - - from x' obtain a' where "x' = a' \ x" .. - with x have "a \ x' = (a * a') \ x" by (simp add: mult_assoc) - also have "\ \ lin x" .. - finally show ?thesis . - qed -qed - - -text {* Any linear closure is a vector space. *} - -lemma (in vectorspace) lin_vectorspace [intro]: - assumes "x \ V" - shows "vectorspace (lin x)" -proof - - from `x \ V` have "subspace (lin x) V" - by (rule lin_subspace) - from this and vectorspace_axioms show ?thesis - by (rule subspace.vectorspace) -qed - - -subsection {* Sum of two vectorspaces *} - -text {* - The \emph{sum} of two vectorspaces @{text U} and @{text V} is the - set of all sums of elements from @{text U} and @{text V}. -*} - -instantiation "fun" :: (type, type) plus -begin - -definition - sum_def: "plus_fun U V = {u + v | u v. u \ U \ v \ V}" (* FIXME not fully general!? *) - -instance .. - -end - -lemma sumE [elim]: - "x \ U + V \ (\u v. x = u + v \ u \ U \ v \ V \ C) \ C" - unfolding sum_def by blast - -lemma sumI [intro]: - "u \ U \ v \ V \ x = u + v \ x \ U + V" - unfolding sum_def by blast - -lemma sumI' [intro]: - "u \ U \ v \ V \ u + v \ U + V" - unfolding sum_def by blast - -text {* @{text U} is a subspace of @{text "U + V"}. *} - -lemma subspace_sum1 [iff]: - assumes "vectorspace U" "vectorspace V" - shows "U \ U + V" -proof - - interpret vectorspace U by fact - interpret vectorspace V by fact - show ?thesis - proof - show "U \ {}" .. - show "U \ U + V" - proof - fix x assume x: "x \ U" - moreover have "0 \ V" .. - ultimately have "x + 0 \ U + V" .. - with x show "x \ U + V" by simp - qed - fix x y assume x: "x \ U" and "y \ U" - then show "x + y \ U" by simp - from x show "\a. a \ x \ U" by simp - qed -qed - -text {* The sum of two subspaces is again a subspace. *} - -lemma sum_subspace [intro?]: - assumes "subspace U E" "vectorspace E" "subspace V E" - shows "U + V \ E" -proof - - interpret subspace U E by fact - interpret vectorspace E by fact - interpret subspace V E by fact - show ?thesis - proof - have "0 \ U + V" - proof - show "0 \ U" using `vectorspace E` .. - show "0 \ V" using `vectorspace E` .. - show "(0::'a) = 0 + 0" by simp - qed - then show "U + V \ {}" by blast - show "U + V \ E" - proof - fix x assume "x \ U + V" - then obtain u v where "x = u + v" and - "u \ U" and "v \ V" .. - then show "x \ E" by simp - qed - fix x y assume x: "x \ U + V" and y: "y \ U + V" - show "x + y \ U + V" - proof - - from x obtain ux vx where "x = ux + vx" and "ux \ U" and "vx \ V" .. - moreover - from y obtain uy vy where "y = uy + vy" and "uy \ U" and "vy \ V" .. - ultimately - have "ux + uy \ U" - and "vx + vy \ V" - and "x + y = (ux + uy) + (vx + vy)" - using x y by (simp_all add: add_ac) - then show ?thesis .. - qed - fix a show "a \ x \ U + V" - proof - - from x obtain u v where "x = u + v" and "u \ U" and "v \ V" .. - then have "a \ u \ U" and "a \ v \ V" - and "a \ x = (a \ u) + (a \ v)" by (simp_all add: distrib) - then show ?thesis .. - qed - qed -qed - -text{* The sum of two subspaces is a vectorspace. *} - -lemma sum_vs [intro?]: - "U \ E \ V \ E \ vectorspace E \ vectorspace (U + V)" - by (rule subspace.vectorspace) (rule sum_subspace) - - -subsection {* Direct sums *} - -text {* - The sum of @{text U} and @{text V} is called \emph{direct}, iff the - zero element is the only common element of @{text U} and @{text - V}. For every element @{text x} of the direct sum of @{text U} and - @{text V} the decomposition in @{text "x = u + v"} with - @{text "u \ U"} and @{text "v \ V"} is unique. -*} - -lemma decomp: - assumes "vectorspace E" "subspace U E" "subspace V E" - assumes direct: "U \ V = {0}" - and u1: "u1 \ U" and u2: "u2 \ U" - and v1: "v1 \ V" and v2: "v2 \ V" - and sum: "u1 + v1 = u2 + v2" - shows "u1 = u2 \ v1 = v2" -proof - - interpret vectorspace E by fact - interpret subspace U E by fact - interpret subspace V E by fact - show ?thesis - proof - have U: "vectorspace U" (* FIXME: use interpret *) - using `subspace U E` `vectorspace E` by (rule subspace.vectorspace) - have V: "vectorspace V" - using `subspace V E` `vectorspace E` by (rule subspace.vectorspace) - from u1 u2 v1 v2 and sum have eq: "u1 - u2 = v2 - v1" - by (simp add: add_diff_swap) - from u1 u2 have u: "u1 - u2 \ U" - by (rule vectorspace.diff_closed [OF U]) - with eq have v': "v2 - v1 \ U" by (simp only:) - from v2 v1 have v: "v2 - v1 \ V" - by (rule vectorspace.diff_closed [OF V]) - with eq have u': " u1 - u2 \ V" by (simp only:) - - show "u1 = u2" - proof (rule add_minus_eq) - from u1 show "u1 \ E" .. - from u2 show "u2 \ E" .. - from u u' and direct show "u1 - u2 = 0" by blast - qed - show "v1 = v2" - proof (rule add_minus_eq [symmetric]) - from v1 show "v1 \ E" .. - from v2 show "v2 \ E" .. - from v v' and direct show "v2 - v1 = 0" by blast - qed - qed -qed - -text {* - An application of the previous lemma will be used in the proof of - the Hahn-Banach Theorem (see page \pageref{decomp-H-use}): for any - element @{text "y + a \ x\<^sub>0"} of the direct sum of a - vectorspace @{text H} and the linear closure of @{text "x\<^sub>0"} - the components @{text "y \ H"} and @{text a} are uniquely - determined. -*} - -lemma decomp_H': - assumes "vectorspace E" "subspace H E" - assumes y1: "y1 \ H" and y2: "y2 \ H" - and x': "x' \ H" "x' \ E" "x' \ 0" - and eq: "y1 + a1 \ x' = y2 + a2 \ x'" - shows "y1 = y2 \ a1 = a2" -proof - - interpret vectorspace E by fact - interpret subspace H E by fact - show ?thesis - proof - have c: "y1 = y2 \ a1 \ x' = a2 \ x'" - proof (rule decomp) - show "a1 \ x' \ lin x'" .. - show "a2 \ x' \ lin x'" .. - show "H \ lin x' = {0}" - proof - show "H \ lin x' \ {0}" - proof - fix x assume x: "x \ H \ lin x'" - then obtain a where xx': "x = a \ x'" - by blast - have "x = 0" - proof cases - assume "a = 0" - with xx' and x' show ?thesis by simp - next - assume a: "a \ 0" - from x have "x \ H" .. - with xx' have "inverse a \ a \ x' \ H" by simp - with a and x' have "x' \ H" by (simp add: mult_assoc2) - with `x' \ H` show ?thesis by contradiction - qed - then show "x \ {0}" .. - qed - show "{0} \ H \ lin x'" - proof - - have "0 \ H" using `vectorspace E` .. - moreover have "0 \ lin x'" using `x' \ E` .. - ultimately show ?thesis by blast - qed - qed - show "lin x' \ E" using `x' \ E` .. - qed (rule `vectorspace E`, rule `subspace H E`, rule y1, rule y2, rule eq) - then show "y1 = y2" .. - from c have "a1 \ x' = a2 \ x'" .. - with x' show "a1 = a2" by (simp add: mult_right_cancel) - qed -qed - -text {* - Since for any element @{text "y + a \ x'"} of the direct sum of a - vectorspace @{text H} and the linear closure of @{text x'} the - components @{text "y \ H"} and @{text a} are unique, it follows from - @{text "y \ H"} that @{text "a = 0"}. -*} - -lemma decomp_H'_H: - assumes "vectorspace E" "subspace H E" - assumes t: "t \ H" - and x': "x' \ H" "x' \ E" "x' \ 0" - shows "(SOME (y, a). t = y + a \ x' \ y \ H) = (t, 0)" -proof - - interpret vectorspace E by fact - interpret subspace H E by fact - show ?thesis - proof (rule, simp_all only: split_paired_all split_conv) - from t x' show "t = t + 0 \ x' \ t \ H" by simp - fix y and a assume ya: "t = y + a \ x' \ y \ H" - have "y = t \ a = 0" - proof (rule decomp_H') - from ya x' show "y + a \ x' = t + 0 \ x'" by simp - from ya show "y \ H" .. - qed (rule `vectorspace E`, rule `subspace H E`, rule t, (rule x')+) - with t x' show "(y, a) = (y + a \ x', 0)" by simp - qed -qed - -text {* - The components @{text "y \ H"} and @{text a} in @{text "y + a \ x'"} - are unique, so the function @{text h'} defined by - @{text "h' (y + a \ x') = h y + a \ \"} is definite. -*} - -lemma h'_definite: - fixes H - assumes h'_def: - "h' \ (\x. let (y, a) = SOME (y, a). (x = y + a \ x' \ y \ H) - in (h y) + a * xi)" - and x: "x = y + a \ x'" - assumes "vectorspace E" "subspace H E" - assumes y: "y \ H" - and x': "x' \ H" "x' \ E" "x' \ 0" - shows "h' x = h y + a * xi" -proof - - interpret vectorspace E by fact - interpret subspace H E by fact - from x y x' have "x \ H + lin x'" by auto - have "\!p. (\(y, a). x = y + a \ x' \ y \ H) p" (is "\!p. ?P p") - proof (rule ex_ex1I) - from x y show "\p. ?P p" by blast - fix p q assume p: "?P p" and q: "?P q" - show "p = q" - proof - - from p have xp: "x = fst p + snd p \ x' \ fst p \ H" - by (cases p) simp - from q have xq: "x = fst q + snd q \ x' \ fst q \ H" - by (cases q) simp - have "fst p = fst q \ snd p = snd q" - proof (rule decomp_H') - from xp show "fst p \ H" .. - from xq show "fst q \ H" .. - from xp and xq show "fst p + snd p \ x' = fst q + snd q \ x'" - by simp - qed (rule `vectorspace E`, rule `subspace H E`, (rule x')+) - then show ?thesis by (cases p, cases q) simp - qed - qed - then have eq: "(SOME (y, a). x = y + a \ x' \ y \ H) = (y, a)" - by (rule some1_equality) (simp add: x y) - with h'_def show "h' x = h y + a * xi" by (simp add: Let_def) -qed - -end diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/Real/HahnBanach/VectorSpace.thy --- a/src/HOL/Real/HahnBanach/VectorSpace.thy Tue Dec 30 08:18:54 2008 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,419 +0,0 @@ -(* Title: HOL/Real/HahnBanach/VectorSpace.thy - ID: $Id$ - Author: Gertrud Bauer, TU Munich -*) - -header {* Vector spaces *} - -theory VectorSpace -imports Real Bounds Zorn -begin - -subsection {* Signature *} - -text {* - For the definition of real vector spaces a type @{typ 'a} of the - sort @{text "{plus, minus, zero}"} is considered, on which a real - scalar multiplication @{text \} is declared. -*} - -consts - prod :: "real \ 'a::{plus, minus, zero} \ 'a" (infixr "'(*')" 70) - -notation (xsymbols) - prod (infixr "\" 70) -notation (HTML output) - prod (infixr "\" 70) - - -subsection {* Vector space laws *} - -text {* - A \emph{vector space} is a non-empty set @{text V} of elements from - @{typ 'a} with the following vector space laws: The set @{text V} is - closed under addition and scalar multiplication, addition is - associative and commutative; @{text "- x"} is the inverse of @{text - x} w.~r.~t.~addition and @{text 0} is the neutral element of - addition. Addition and multiplication are distributive; scalar - multiplication is associative and the real number @{text "1"} is - the neutral element of scalar multiplication. -*} - -locale var_V = fixes V - -locale vectorspace = var_V + - assumes non_empty [iff, intro?]: "V \ {}" - and add_closed [iff]: "x \ V \ y \ V \ x + y \ V" - and mult_closed [iff]: "x \ V \ a \ x \ V" - and add_assoc: "x \ V \ y \ V \ z \ V \ (x + y) + z = x + (y + z)" - and add_commute: "x \ V \ y \ V \ x + y = y + x" - and diff_self [simp]: "x \ V \ x - x = 0" - and add_zero_left [simp]: "x \ V \ 0 + x = x" - and add_mult_distrib1: "x \ V \ y \ V \ a \ (x + y) = a \ x + a \ y" - and add_mult_distrib2: "x \ V \ (a + b) \ x = a \ x + b \ x" - and mult_assoc: "x \ V \ (a * b) \ x = a \ (b \ x)" - and mult_1 [simp]: "x \ V \ 1 \ x = x" - and negate_eq1: "x \ V \ - x = (- 1) \ x" - and diff_eq1: "x \ V \ y \ V \ x - y = x + - y" - -lemma (in vectorspace) negate_eq2: "x \ V \ (- 1) \ x = - x" - by (rule negate_eq1 [symmetric]) - -lemma (in vectorspace) negate_eq2a: "x \ V \ -1 \ x = - x" - by (simp add: negate_eq1) - -lemma (in vectorspace) diff_eq2: "x \ V \ y \ V \ x + - y = x - y" - by (rule diff_eq1 [symmetric]) - -lemma (in vectorspace) diff_closed [iff]: "x \ V \ y \ V \ x - y \ V" - by (simp add: diff_eq1 negate_eq1) - -lemma (in vectorspace) neg_closed [iff]: "x \ V \ - x \ V" - by (simp add: negate_eq1) - -lemma (in vectorspace) add_left_commute: - "x \ V \ y \ V \ z \ V \ x + (y + z) = y + (x + z)" -proof - - assume xyz: "x \ V" "y \ V" "z \ V" - then have "x + (y + z) = (x + y) + z" - by (simp only: add_assoc) - also from xyz have "\ = (y + x) + z" by (simp only: add_commute) - also from xyz have "\ = y + (x + z)" by (simp only: add_assoc) - finally show ?thesis . -qed - -theorems (in vectorspace) add_ac = - add_assoc add_commute add_left_commute - - -text {* The existence of the zero element of a vector space - follows from the non-emptiness of carrier set. *} - -lemma (in vectorspace) zero [iff]: "0 \ V" -proof - - from non_empty obtain x where x: "x \ V" by blast - then have "0 = x - x" by (rule diff_self [symmetric]) - also from x x have "\ \ V" by (rule diff_closed) - finally show ?thesis . -qed - -lemma (in vectorspace) add_zero_right [simp]: - "x \ V \ x + 0 = x" -proof - - assume x: "x \ V" - from this and zero have "x + 0 = 0 + x" by (rule add_commute) - also from x have "\ = x" by (rule add_zero_left) - finally show ?thesis . -qed - -lemma (in vectorspace) mult_assoc2: - "x \ V \ a \ b \ x = (a * b) \ x" - by (simp only: mult_assoc) - -lemma (in vectorspace) diff_mult_distrib1: - "x \ V \ y \ V \ a \ (x - y) = a \ x - a \ y" - by (simp add: diff_eq1 negate_eq1 add_mult_distrib1 mult_assoc2) - -lemma (in vectorspace) diff_mult_distrib2: - "x \ V \ (a - b) \ x = a \ x - (b \ x)" -proof - - assume x: "x \ V" - have " (a - b) \ x = (a + - b) \ x" - by (simp add: real_diff_def) - also from x have "\ = a \ x + (- b) \ x" - by (rule add_mult_distrib2) - also from x have "\ = a \ x + - (b \ x)" - by (simp add: negate_eq1 mult_assoc2) - also from x have "\ = a \ x - (b \ x)" - by (simp add: diff_eq1) - finally show ?thesis . -qed - -lemmas (in vectorspace) distrib = - add_mult_distrib1 add_mult_distrib2 - diff_mult_distrib1 diff_mult_distrib2 - - -text {* \medskip Further derived laws: *} - -lemma (in vectorspace) mult_zero_left [simp]: - "x \ V \ 0 \ x = 0" -proof - - assume x: "x \ V" - have "0 \ x = (1 - 1) \ x" by simp - also have "\ = (1 + - 1) \ x" by simp - also from x have "\ = 1 \ x + (- 1) \ x" - by (rule add_mult_distrib2) - also from x have "\ = x + (- 1) \ x" by simp - also from x have "\ = x + - x" by (simp add: negate_eq2a) - also from x have "\ = x - x" by (simp add: diff_eq2) - also from x have "\ = 0" by simp - finally show ?thesis . -qed - -lemma (in vectorspace) mult_zero_right [simp]: - "a \ 0 = (0::'a)" -proof - - have "a \ 0 = a \ (0 - (0::'a))" by simp - also have "\ = a \ 0 - a \ 0" - by (rule diff_mult_distrib1) simp_all - also have "\ = 0" by simp - finally show ?thesis . -qed - -lemma (in vectorspace) minus_mult_cancel [simp]: - "x \ V \ (- a) \ - x = a \ x" - by (simp add: negate_eq1 mult_assoc2) - -lemma (in vectorspace) add_minus_left_eq_diff: - "x \ V \ y \ V \ - x + y = y - x" -proof - - assume xy: "x \ V" "y \ V" - then have "- x + y = y + - x" by (simp add: add_commute) - also from xy have "\ = y - x" by (simp add: diff_eq1) - finally show ?thesis . -qed - -lemma (in vectorspace) add_minus [simp]: - "x \ V \ x + - x = 0" - by (simp add: diff_eq2) - -lemma (in vectorspace) add_minus_left [simp]: - "x \ V \ - x + x = 0" - by (simp add: diff_eq2 add_commute) - -lemma (in vectorspace) minus_minus [simp]: - "x \ V \ - (- x) = x" - by (simp add: negate_eq1 mult_assoc2) - -lemma (in vectorspace) minus_zero [simp]: - "- (0::'a) = 0" - by (simp add: negate_eq1) - -lemma (in vectorspace) minus_zero_iff [simp]: - "x \ V \ (- x = 0) = (x = 0)" -proof - assume x: "x \ V" - { - from x have "x = - (- x)" by (simp add: minus_minus) - also assume "- x = 0" - also have "- \ = 0" by (rule minus_zero) - finally show "x = 0" . - next - assume "x = 0" - then show "- x = 0" by simp - } -qed - -lemma (in vectorspace) add_minus_cancel [simp]: - "x \ V \ y \ V \ x + (- x + y) = y" - by (simp add: add_assoc [symmetric] del: add_commute) - -lemma (in vectorspace) minus_add_cancel [simp]: - "x \ V \ y \ V \ - x + (x + y) = y" - by (simp add: add_assoc [symmetric] del: add_commute) - -lemma (in vectorspace) minus_add_distrib [simp]: - "x \ V \ y \ V \ - (x + y) = - x + - y" - by (simp add: negate_eq1 add_mult_distrib1) - -lemma (in vectorspace) diff_zero [simp]: - "x \ V \ x - 0 = x" - by (simp add: diff_eq1) - -lemma (in vectorspace) diff_zero_right [simp]: - "x \ V \ 0 - x = - x" - by (simp add: diff_eq1) - -lemma (in vectorspace) add_left_cancel: - "x \ V \ y \ V \ z \ V \ (x + y = x + z) = (y = z)" -proof - assume x: "x \ V" and y: "y \ V" and z: "z \ V" - { - from y have "y = 0 + y" by simp - also from x y have "\ = (- x + x) + y" by simp - also from x y have "\ = - x + (x + y)" - by (simp add: add_assoc neg_closed) - also assume "x + y = x + z" - also from x z have "- x + (x + z) = - x + x + z" - by (simp add: add_assoc [symmetric] neg_closed) - also from x z have "\ = z" by simp - finally show "y = z" . - next - assume "y = z" - then show "x + y = x + z" by (simp only:) - } -qed - -lemma (in vectorspace) add_right_cancel: - "x \ V \ y \ V \ z \ V \ (y + x = z + x) = (y = z)" - by (simp only: add_commute add_left_cancel) - -lemma (in vectorspace) add_assoc_cong: - "x \ V \ y \ V \ x' \ V \ y' \ V \ z \ V - \ x + y = x' + y' \ x + (y + z) = x' + (y' + z)" - by (simp only: add_assoc [symmetric]) - -lemma (in vectorspace) mult_left_commute: - "x \ V \ a \ b \ x = b \ a \ x" - by (simp add: real_mult_commute mult_assoc2) - -lemma (in vectorspace) mult_zero_uniq: - "x \ V \ x \ 0 \ a \ x = 0 \ a = 0" -proof (rule classical) - assume a: "a \ 0" - assume x: "x \ V" "x \ 0" and ax: "a \ x = 0" - from x a have "x = (inverse a * a) \ x" by simp - also from `x \ V` have "\ = inverse a \ (a \ x)" by (rule mult_assoc) - also from ax have "\ = inverse a \ 0" by simp - also have "\ = 0" by simp - finally have "x = 0" . - with `x \ 0` show "a = 0" by contradiction -qed - -lemma (in vectorspace) mult_left_cancel: - "x \ V \ y \ V \ a \ 0 \ (a \ x = a \ y) = (x = y)" -proof - assume x: "x \ V" and y: "y \ V" and a: "a \ 0" - from x have "x = 1 \ x" by simp - also from a have "\ = (inverse a * a) \ x" by simp - also from x have "\ = inverse a \ (a \ x)" - by (simp only: mult_assoc) - also assume "a \ x = a \ y" - also from a y have "inverse a \ \ = y" - by (simp add: mult_assoc2) - finally show "x = y" . -next - assume "x = y" - then show "a \ x = a \ y" by (simp only:) -qed - -lemma (in vectorspace) mult_right_cancel: - "x \ V \ x \ 0 \ (a \ x = b \ x) = (a = b)" -proof - assume x: "x \ V" and neq: "x \ 0" - { - from x have "(a - b) \ x = a \ x - b \ x" - by (simp add: diff_mult_distrib2) - also assume "a \ x = b \ x" - with x have "a \ x - b \ x = 0" by simp - finally have "(a - b) \ x = 0" . - with x neq have "a - b = 0" by (rule mult_zero_uniq) - then show "a = b" by simp - next - assume "a = b" - then show "a \ x = b \ x" by (simp only:) - } -qed - -lemma (in vectorspace) eq_diff_eq: - "x \ V \ y \ V \ z \ V \ (x = z - y) = (x + y = z)" -proof - assume x: "x \ V" and y: "y \ V" and z: "z \ V" - { - assume "x = z - y" - then have "x + y = z - y + y" by simp - also from y z have "\ = z + - y + y" - by (simp add: diff_eq1) - also have "\ = z + (- y + y)" - by (rule add_assoc) (simp_all add: y z) - also from y z have "\ = z + 0" - by (simp only: add_minus_left) - also from z have "\ = z" - by (simp only: add_zero_right) - finally show "x + y = z" . - next - assume "x + y = z" - then have "z - y = (x + y) - y" by simp - also from x y have "\ = x + y + - y" - by (simp add: diff_eq1) - also have "\ = x + (y + - y)" - by (rule add_assoc) (simp_all add: x y) - also from x y have "\ = x" by simp - finally show "x = z - y" .. - } -qed - -lemma (in vectorspace) add_minus_eq_minus: - "x \ V \ y \ V \ x + y = 0 \ x = - y" -proof - - assume x: "x \ V" and y: "y \ V" - from x y have "x = (- y + y) + x" by simp - also from x y have "\ = - y + (x + y)" by (simp add: add_ac) - also assume "x + y = 0" - also from y have "- y + 0 = - y" by simp - finally show "x = - y" . -qed - -lemma (in vectorspace) add_minus_eq: - "x \ V \ y \ V \ x - y = 0 \ x = y" -proof - - assume x: "x \ V" and y: "y \ V" - assume "x - y = 0" - with x y have eq: "x + - y = 0" by (simp add: diff_eq1) - with _ _ have "x = - (- y)" - by (rule add_minus_eq_minus) (simp_all add: x y) - with x y show "x = y" by simp -qed - -lemma (in vectorspace) add_diff_swap: - "a \ V \ b \ V \ c \ V \ d \ V \ a + b = c + d - \ a - c = d - b" -proof - - assume vs: "a \ V" "b \ V" "c \ V" "d \ V" - and eq: "a + b = c + d" - then have "- c + (a + b) = - c + (c + d)" - by (simp add: add_left_cancel) - also have "\ = d" using `c \ V` `d \ V` by (rule minus_add_cancel) - finally have eq: "- c + (a + b) = d" . - from vs have "a - c = (- c + (a + b)) + - b" - by (simp add: add_ac diff_eq1) - also from vs eq have "\ = d + - b" - by (simp add: add_right_cancel) - also from vs have "\ = d - b" by (simp add: diff_eq2) - finally show "a - c = d - b" . -qed - -lemma (in vectorspace) vs_add_cancel_21: - "x \ V \ y \ V \ z \ V \ u \ V - \ (x + (y + z) = y + u) = (x + z = u)" -proof - assume vs: "x \ V" "y \ V" "z \ V" "u \ V" - { - from vs have "x + z = - y + y + (x + z)" by simp - also have "\ = - y + (y + (x + z))" - by (rule add_assoc) (simp_all add: vs) - also from vs have "y + (x + z) = x + (y + z)" - by (simp add: add_ac) - also assume "x + (y + z) = y + u" - also from vs have "- y + (y + u) = u" by simp - finally show "x + z = u" . - next - assume "x + z = u" - with vs show "x + (y + z) = y + u" - by (simp only: add_left_commute [of x]) - } -qed - -lemma (in vectorspace) add_cancel_end: - "x \ V \ y \ V \ z \ V \ (x + (y + z) = y) = (x = - z)" -proof - assume vs: "x \ V" "y \ V" "z \ V" - { - assume "x + (y + z) = y" - with vs have "(x + z) + y = 0 + y" - by (simp add: add_ac) - with vs have "x + z = 0" - by (simp only: add_right_cancel add_closed zero) - with vs show "x = - z" by (simp add: add_minus_eq_minus) - next - assume eq: "x = - z" - then have "x + (y + z) = - z + (y + z)" by simp - also have "\ = y + (- z + z)" - by (rule add_left_commute) (simp_all add: vs) - also from vs have "\ = y" by simp - finally show "x + (y + z) = y" . - } -qed - -end diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/Real/HahnBanach/ZornLemma.thy --- a/src/HOL/Real/HahnBanach/ZornLemma.thy Tue Dec 30 08:18:54 2008 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,57 +0,0 @@ -(* Title: HOL/Real/HahnBanach/ZornLemma.thy - Author: Gertrud Bauer, TU Munich -*) - -header {* Zorn's Lemma *} - -theory ZornLemma -imports Zorn -begin - -text {* - Zorn's Lemmas states: if every linear ordered subset of an ordered - set @{text S} has an upper bound in @{text S}, then there exists a - maximal element in @{text S}. In our application, @{text S} is a - set of sets ordered by set inclusion. Since the union of a chain of - sets is an upper bound for all elements of the chain, the conditions - of Zorn's lemma can be modified: if @{text S} is non-empty, it - suffices to show that for every non-empty chain @{text c} in @{text - S} the union of @{text c} also lies in @{text S}. -*} - -theorem Zorn's_Lemma: - assumes r: "\c. c \ chain S \ \x. x \ c \ \c \ S" - and aS: "a \ S" - shows "\y \ S. \z \ S. y \ z \ y = z" -proof (rule Zorn_Lemma2) - show "\c \ chain S. \y \ S. \z \ c. z \ y" - proof - fix c assume "c \ chain S" - show "\y \ S. \z \ c. z \ y" - proof cases - - txt {* If @{text c} is an empty chain, then every element in - @{text S} is an upper bound of @{text c}. *} - - assume "c = {}" - with aS show ?thesis by fast - - txt {* If @{text c} is non-empty, then @{text "\c"} is an upper - bound of @{text c}, lying in @{text S}. *} - - next - assume "c \ {}" - show ?thesis - proof - show "\z \ c. z \ \c" by fast - show "\c \ S" - proof (rule r) - from `c \ {}` show "\x. x \ c" by fast - show "c \ chain S" by fact - qed - qed - qed - qed -qed - -end diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/Real/HahnBanach/document/root.bib --- a/src/HOL/Real/HahnBanach/document/root.bib Tue Dec 30 08:18:54 2008 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,27 +0,0 @@ - -@Book{Heuser:1986, - author = {H. Heuser}, - title = {Funktionalanalysis: Theorie und Anwendung}, - publisher = {Teubner}, - year = 1986 -} - -@InCollection{Narici:1996, - author = {L. Narici and E. Beckenstein}, - title = {The {Hahn-Banach Theorem}: The Life and Times}, - booktitle = {Topology Atlas}, - publisher = {York University, Toronto, Ontario, Canada}, - year = 1996, - note = {\url{http://at.yorku.ca/topology/preprint.htm} and - \url{http://at.yorku.ca/p/a/a/a/16.htm}} -} - -@Article{Nowak:1993, - author = {B. Nowak and A. Trybulec}, - title = {{Hahn-Banach} Theorem}, - journal = {Journal of Formalized Mathematics}, - year = {1993}, - volume = {5}, - institution = {University of Bialystok}, - note = {\url{http://mizar.uwb.edu.pl/JFM/Vol5/hahnban.html}} -} diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/Real/HahnBanach/document/root.tex --- a/src/HOL/Real/HahnBanach/document/root.tex Tue Dec 30 08:18:54 2008 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,83 +0,0 @@ -\documentclass[10pt,a4paper,twoside]{article} -\usepackage{graphicx} -\usepackage{latexsym,theorem} -\usepackage{isabelle,isabellesym} -\usepackage{pdfsetup} %last one! - -\isabellestyle{it} -\urlstyle{rm} - -\newcommand{\isasymsup}{\isamath{\sup\,}} -\newcommand{\skp}{\smallskip} - - -\begin{document} - -\pagestyle{headings} -\pagenumbering{arabic} - -\title{The Hahn-Banach Theorem \\ for Real Vector Spaces} -\author{Gertrud Bauer \\ \url{http://www.in.tum.de/~bauerg/}} -\maketitle - -\begin{abstract} - The Hahn-Banach Theorem is one of the most fundamental results in functional - analysis. We present a fully formal proof of two versions of the theorem, - one for general linear spaces and another for normed spaces. This - development is based on simply-typed classical set-theory, as provided by - Isabelle/HOL. -\end{abstract} - - -\tableofcontents -\parindent 0pt \parskip 0.5ex - -\clearpage -\section{Preface} - -This is a fully formal proof of the Hahn-Banach Theorem. It closely follows -the informal presentation given in Heuser's textbook \cite[{\S} 36]{Heuser:1986}. -Another formal proof of the same theorem has been done in Mizar -\cite{Nowak:1993}. A general overview of the relevance and history of the -Hahn-Banach Theorem is given by Narici and Beckenstein \cite{Narici:1996}. - -\medskip The document is structured as follows. The first part contains -definitions of basic notions of linear algebra: vector spaces, subspaces, -normed spaces, continuous linear-forms, norm of functions and an order on -functions by domain extension. The second part contains some lemmas about the -supremum (w.r.t.\ the function order) and extension of non-maximal functions. -With these preliminaries, the main proof of the theorem (in its two versions) -is conducted in the third part. The dependencies of individual theories are -as follows. - -\begin{center} - \includegraphics[scale=0.5]{session_graph} -\end{center} - -\clearpage -\part {Basic Notions} - -\input{Bounds} -\input{VectorSpace} -\input{Subspace} -\input{NormedSpace} -\input{Linearform} -\input{FunctionOrder} -\input{FunctionNorm} -\input{ZornLemma} - -\clearpage -\part {Lemmas for the Proof} - -\input{HahnBanachSupLemmas} -\input{HahnBanachExtLemmas} -\input{HahnBanachLemmas} - -\clearpage -\part {The Main Proof} - -\input{HahnBanach} -\bibliographystyle{abbrv} -\bibliography{root} - -\end{document} diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/Real/RealVector.thy --- a/src/HOL/Real/RealVector.thy Tue Dec 30 08:18:54 2008 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,841 +0,0 @@ -(* Title: HOL/Real/RealVector.thy - Author: Brian Huffman -*) - -header {* Vector Spaces and Algebras over the Reals *} - -theory RealVector -imports "~~/src/HOL/RealPow" -begin - -subsection {* Locale for additive functions *} - -locale additive = - fixes f :: "'a::ab_group_add \ 'b::ab_group_add" - assumes add: "f (x + y) = f x + f y" -begin - -lemma zero: "f 0 = 0" -proof - - have "f 0 = f (0 + 0)" by simp - also have "\ = f 0 + f 0" by (rule add) - finally show "f 0 = 0" by simp -qed - -lemma minus: "f (- x) = - f x" -proof - - have "f (- x) + f x = f (- x + x)" by (rule add [symmetric]) - also have "\ = - f x + f x" by (simp add: zero) - finally show "f (- x) = - f x" by (rule add_right_imp_eq) -qed - -lemma diff: "f (x - y) = f x - f y" -by (simp add: diff_def add minus) - -lemma setsum: "f (setsum g A) = (\x\A. f (g x))" -apply (cases "finite A") -apply (induct set: finite) -apply (simp add: zero) -apply (simp add: add) -apply (simp add: zero) -done - -end - -subsection {* Vector spaces *} - -locale vector_space = - fixes scale :: "'a::field \ 'b::ab_group_add \ 'b" - assumes scale_right_distrib: "scale a (x + y) = scale a x + scale a y" - and scale_left_distrib: "scale (a + b) x = scale a x + scale b x" - and scale_scale [simp]: "scale a (scale b x) = scale (a * b) x" - and scale_one [simp]: "scale 1 x = x" -begin - -lemma scale_left_commute: - "scale a (scale b x) = scale b (scale a x)" -by (simp add: mult_commute) - -lemma scale_zero_left [simp]: "scale 0 x = 0" - and scale_minus_left [simp]: "scale (- a) x = - (scale a x)" - and scale_left_diff_distrib: "scale (a - b) x = scale a x - scale b x" -proof - - interpret s: additive "\a. scale a x" - proof qed (rule scale_left_distrib) - show "scale 0 x = 0" by (rule s.zero) - show "scale (- a) x = - (scale a x)" by (rule s.minus) - show "scale (a - b) x = scale a x - scale b x" by (rule s.diff) -qed - -lemma scale_zero_right [simp]: "scale a 0 = 0" - and scale_minus_right [simp]: "scale a (- x) = - (scale a x)" - and scale_right_diff_distrib: "scale a (x - y) = scale a x - scale a y" -proof - - interpret s: additive "\x. scale a x" - proof qed (rule scale_right_distrib) - show "scale a 0 = 0" by (rule s.zero) - show "scale a (- x) = - (scale a x)" by (rule s.minus) - show "scale a (x - y) = scale a x - scale a y" by (rule s.diff) -qed - -lemma scale_eq_0_iff [simp]: - "scale a x = 0 \ a = 0 \ x = 0" -proof cases - assume "a = 0" thus ?thesis by simp -next - assume anz [simp]: "a \ 0" - { assume "scale a x = 0" - hence "scale (inverse a) (scale a x) = 0" by simp - hence "x = 0" by simp } - thus ?thesis by force -qed - -lemma scale_left_imp_eq: - "\a \ 0; scale a x = scale a y\ \ x = y" -proof - - assume nonzero: "a \ 0" - assume "scale a x = scale a y" - hence "scale a (x - y) = 0" - by (simp add: scale_right_diff_distrib) - hence "x - y = 0" by (simp add: nonzero) - thus "x = y" by (simp only: right_minus_eq) -qed - -lemma scale_right_imp_eq: - "\x \ 0; scale a x = scale b x\ \ a = b" -proof - - assume nonzero: "x \ 0" - assume "scale a x = scale b x" - hence "scale (a - b) x = 0" - by (simp add: scale_left_diff_distrib) - hence "a - b = 0" by (simp add: nonzero) - thus "a = b" by (simp only: right_minus_eq) -qed - -lemma scale_cancel_left: - "scale a x = scale a y \ x = y \ a = 0" -by (auto intro: scale_left_imp_eq) - -lemma scale_cancel_right: - "scale a x = scale b x \ a = b \ x = 0" -by (auto intro: scale_right_imp_eq) - -end - -subsection {* Real vector spaces *} - -class scaleR = type + - fixes scaleR :: "real \ 'a \ 'a" (infixr "*\<^sub>R" 75) -begin - -abbreviation - divideR :: "'a \ real \ 'a" (infixl "'/\<^sub>R" 70) -where - "x /\<^sub>R r == scaleR (inverse r) x" - -end - -instantiation real :: scaleR -begin - -definition - real_scaleR_def [simp]: "scaleR a x = a * x" - -instance .. - -end - -class real_vector = scaleR + ab_group_add + - assumes scaleR_right_distrib: "scaleR a (x + y) = scaleR a x + scaleR a y" - and scaleR_left_distrib: "scaleR (a + b) x = scaleR a x + scaleR b x" - and scaleR_scaleR [simp]: "scaleR a (scaleR b x) = scaleR (a * b) x" - and scaleR_one [simp]: "scaleR 1 x = x" - -interpretation real_vector!: - vector_space "scaleR :: real \ 'a \ 'a::real_vector" -apply unfold_locales -apply (rule scaleR_right_distrib) -apply (rule scaleR_left_distrib) -apply (rule scaleR_scaleR) -apply (rule scaleR_one) -done - -text {* Recover original theorem names *} - -lemmas scaleR_left_commute = real_vector.scale_left_commute -lemmas scaleR_zero_left = real_vector.scale_zero_left -lemmas scaleR_minus_left = real_vector.scale_minus_left -lemmas scaleR_left_diff_distrib = real_vector.scale_left_diff_distrib -lemmas scaleR_zero_right = real_vector.scale_zero_right -lemmas scaleR_minus_right = real_vector.scale_minus_right -lemmas scaleR_right_diff_distrib = real_vector.scale_right_diff_distrib -lemmas scaleR_eq_0_iff = real_vector.scale_eq_0_iff -lemmas scaleR_left_imp_eq = real_vector.scale_left_imp_eq -lemmas scaleR_right_imp_eq = real_vector.scale_right_imp_eq -lemmas scaleR_cancel_left = real_vector.scale_cancel_left -lemmas scaleR_cancel_right = real_vector.scale_cancel_right - -class real_algebra = real_vector + ring + - assumes mult_scaleR_left [simp]: "scaleR a x * y = scaleR a (x * y)" - and mult_scaleR_right [simp]: "x * scaleR a y = scaleR a (x * y)" - -class real_algebra_1 = real_algebra + ring_1 - -class real_div_algebra = real_algebra_1 + division_ring - -class real_field = real_div_algebra + field - -instance real :: real_field -apply (intro_classes, unfold real_scaleR_def) -apply (rule right_distrib) -apply (rule left_distrib) -apply (rule mult_assoc [symmetric]) -apply (rule mult_1_left) -apply (rule mult_assoc) -apply (rule mult_left_commute) -done - -interpretation scaleR_left!: additive "(\a. scaleR a x::'a::real_vector)" -proof qed (rule scaleR_left_distrib) - -interpretation scaleR_right!: additive "(\x. scaleR a x::'a::real_vector)" -proof qed (rule scaleR_right_distrib) - -lemma nonzero_inverse_scaleR_distrib: - fixes x :: "'a::real_div_algebra" shows - "\a \ 0; x \ 0\ \ inverse (scaleR a x) = scaleR (inverse a) (inverse x)" -by (rule inverse_unique, simp) - -lemma inverse_scaleR_distrib: - fixes x :: "'a::{real_div_algebra,division_by_zero}" - shows "inverse (scaleR a x) = scaleR (inverse a) (inverse x)" -apply (case_tac "a = 0", simp) -apply (case_tac "x = 0", simp) -apply (erule (1) nonzero_inverse_scaleR_distrib) -done - - -subsection {* Embedding of the Reals into any @{text real_algebra_1}: -@{term of_real} *} - -definition - of_real :: "real \ 'a::real_algebra_1" where - "of_real r = scaleR r 1" - -lemma scaleR_conv_of_real: "scaleR r x = of_real r * x" -by (simp add: of_real_def) - -lemma of_real_0 [simp]: "of_real 0 = 0" -by (simp add: of_real_def) - -lemma of_real_1 [simp]: "of_real 1 = 1" -by (simp add: of_real_def) - -lemma of_real_add [simp]: "of_real (x + y) = of_real x + of_real y" -by (simp add: of_real_def scaleR_left_distrib) - -lemma of_real_minus [simp]: "of_real (- x) = - of_real x" -by (simp add: of_real_def) - -lemma of_real_diff [simp]: "of_real (x - y) = of_real x - of_real y" -by (simp add: of_real_def scaleR_left_diff_distrib) - -lemma of_real_mult [simp]: "of_real (x * y) = of_real x * of_real y" -by (simp add: of_real_def mult_commute) - -lemma nonzero_of_real_inverse: - "x \ 0 \ of_real (inverse x) = - inverse (of_real x :: 'a::real_div_algebra)" -by (simp add: of_real_def nonzero_inverse_scaleR_distrib) - -lemma of_real_inverse [simp]: - "of_real (inverse x) = - inverse (of_real x :: 'a::{real_div_algebra,division_by_zero})" -by (simp add: of_real_def inverse_scaleR_distrib) - -lemma nonzero_of_real_divide: - "y \ 0 \ of_real (x / y) = - (of_real x / of_real y :: 'a::real_field)" -by (simp add: divide_inverse nonzero_of_real_inverse) - -lemma of_real_divide [simp]: - "of_real (x / y) = - (of_real x / of_real y :: 'a::{real_field,division_by_zero})" -by (simp add: divide_inverse) - -lemma of_real_power [simp]: - "of_real (x ^ n) = (of_real x :: 'a::{real_algebra_1,recpower}) ^ n" -by (induct n) (simp_all add: power_Suc) - -lemma of_real_eq_iff [simp]: "(of_real x = of_real y) = (x = y)" -by (simp add: of_real_def scaleR_cancel_right) - -lemmas of_real_eq_0_iff [simp] = of_real_eq_iff [of _ 0, simplified] - -lemma of_real_eq_id [simp]: "of_real = (id :: real \ real)" -proof - fix r - show "of_real r = id r" - by (simp add: of_real_def) -qed - -text{*Collapse nested embeddings*} -lemma of_real_of_nat_eq [simp]: "of_real (of_nat n) = of_nat n" -by (induct n) auto - -lemma of_real_of_int_eq [simp]: "of_real (of_int z) = of_int z" -by (cases z rule: int_diff_cases, simp) - -lemma of_real_number_of_eq: - "of_real (number_of w) = (number_of w :: 'a::{number_ring,real_algebra_1})" -by (simp add: number_of_eq) - -text{*Every real algebra has characteristic zero*} -instance real_algebra_1 < ring_char_0 -proof - fix m n :: nat - have "(of_real (of_nat m) = (of_real (of_nat n)::'a)) = (m = n)" - by (simp only: of_real_eq_iff of_nat_eq_iff) - thus "(of_nat m = (of_nat n::'a)) = (m = n)" - by (simp only: of_real_of_nat_eq) -qed - -instance real_field < field_char_0 .. - - -subsection {* The Set of Real Numbers *} - -definition - Reals :: "'a::real_algebra_1 set" where - [code del]: "Reals \ range of_real" - -notation (xsymbols) - Reals ("\") - -lemma Reals_of_real [simp]: "of_real r \ Reals" -by (simp add: Reals_def) - -lemma Reals_of_int [simp]: "of_int z \ Reals" -by (subst of_real_of_int_eq [symmetric], rule Reals_of_real) - -lemma Reals_of_nat [simp]: "of_nat n \ Reals" -by (subst of_real_of_nat_eq [symmetric], rule Reals_of_real) - -lemma Reals_number_of [simp]: - "(number_of w::'a::{number_ring,real_algebra_1}) \ Reals" -by (subst of_real_number_of_eq [symmetric], rule Reals_of_real) - -lemma Reals_0 [simp]: "0 \ Reals" -apply (unfold Reals_def) -apply (rule range_eqI) -apply (rule of_real_0 [symmetric]) -done - -lemma Reals_1 [simp]: "1 \ Reals" -apply (unfold Reals_def) -apply (rule range_eqI) -apply (rule of_real_1 [symmetric]) -done - -lemma Reals_add [simp]: "\a \ Reals; b \ Reals\ \ a + b \ Reals" -apply (auto simp add: Reals_def) -apply (rule range_eqI) -apply (rule of_real_add [symmetric]) -done - -lemma Reals_minus [simp]: "a \ Reals \ - a \ Reals" -apply (auto simp add: Reals_def) -apply (rule range_eqI) -apply (rule of_real_minus [symmetric]) -done - -lemma Reals_diff [simp]: "\a \ Reals; b \ Reals\ \ a - b \ Reals" -apply (auto simp add: Reals_def) -apply (rule range_eqI) -apply (rule of_real_diff [symmetric]) -done - -lemma Reals_mult [simp]: "\a \ Reals; b \ Reals\ \ a * b \ Reals" -apply (auto simp add: Reals_def) -apply (rule range_eqI) -apply (rule of_real_mult [symmetric]) -done - -lemma nonzero_Reals_inverse: - fixes a :: "'a::real_div_algebra" - shows "\a \ Reals; a \ 0\ \ inverse a \ Reals" -apply (auto simp add: Reals_def) -apply (rule range_eqI) -apply (erule nonzero_of_real_inverse [symmetric]) -done - -lemma Reals_inverse [simp]: - fixes a :: "'a::{real_div_algebra,division_by_zero}" - shows "a \ Reals \ inverse a \ Reals" -apply (auto simp add: Reals_def) -apply (rule range_eqI) -apply (rule of_real_inverse [symmetric]) -done - -lemma nonzero_Reals_divide: - fixes a b :: "'a::real_field" - shows "\a \ Reals; b \ Reals; b \ 0\ \ a / b \ Reals" -apply (auto simp add: Reals_def) -apply (rule range_eqI) -apply (erule nonzero_of_real_divide [symmetric]) -done - -lemma Reals_divide [simp]: - fixes a b :: "'a::{real_field,division_by_zero}" - shows "\a \ Reals; b \ Reals\ \ a / b \ Reals" -apply (auto simp add: Reals_def) -apply (rule range_eqI) -apply (rule of_real_divide [symmetric]) -done - -lemma Reals_power [simp]: - fixes a :: "'a::{real_algebra_1,recpower}" - shows "a \ Reals \ a ^ n \ Reals" -apply (auto simp add: Reals_def) -apply (rule range_eqI) -apply (rule of_real_power [symmetric]) -done - -lemma Reals_cases [cases set: Reals]: - assumes "q \ \" - obtains (of_real) r where "q = of_real r" - unfolding Reals_def -proof - - from `q \ \` have "q \ range of_real" unfolding Reals_def . - then obtain r where "q = of_real r" .. - then show thesis .. -qed - -lemma Reals_induct [case_names of_real, induct set: Reals]: - "q \ \ \ (\r. P (of_real r)) \ P q" - by (rule Reals_cases) auto - - -subsection {* Real normed vector spaces *} - -class norm = type + - fixes norm :: "'a \ real" - -instantiation real :: norm -begin - -definition - real_norm_def [simp]: "norm r \ \r\" - -instance .. - -end - -class sgn_div_norm = scaleR + norm + sgn + - assumes sgn_div_norm: "sgn x = x /\<^sub>R norm x" - -class real_normed_vector = real_vector + sgn_div_norm + - assumes norm_ge_zero [simp]: "0 \ norm x" - and norm_eq_zero [simp]: "norm x = 0 \ x = 0" - and norm_triangle_ineq: "norm (x + y) \ norm x + norm y" - and norm_scaleR: "norm (scaleR a x) = \a\ * norm x" - -class real_normed_algebra = real_algebra + real_normed_vector + - assumes norm_mult_ineq: "norm (x * y) \ norm x * norm y" - -class real_normed_algebra_1 = real_algebra_1 + real_normed_algebra + - assumes norm_one [simp]: "norm 1 = 1" - -class real_normed_div_algebra = real_div_algebra + real_normed_vector + - assumes norm_mult: "norm (x * y) = norm x * norm y" - -class real_normed_field = real_field + real_normed_div_algebra - -instance real_normed_div_algebra < real_normed_algebra_1 -proof - fix x y :: 'a - show "norm (x * y) \ norm x * norm y" - by (simp add: norm_mult) -next - have "norm (1 * 1::'a) = norm (1::'a) * norm (1::'a)" - by (rule norm_mult) - thus "norm (1::'a) = 1" by simp -qed - -instance real :: real_normed_field -apply (intro_classes, unfold real_norm_def real_scaleR_def) -apply (simp add: real_sgn_def) -apply (rule abs_ge_zero) -apply (rule abs_eq_0) -apply (rule abs_triangle_ineq) -apply (rule abs_mult) -apply (rule abs_mult) -done - -lemma norm_zero [simp]: "norm (0::'a::real_normed_vector) = 0" -by simp - -lemma zero_less_norm_iff [simp]: - fixes x :: "'a::real_normed_vector" - shows "(0 < norm x) = (x \ 0)" -by (simp add: order_less_le) - -lemma norm_not_less_zero [simp]: - fixes x :: "'a::real_normed_vector" - shows "\ norm x < 0" -by (simp add: linorder_not_less) - -lemma norm_le_zero_iff [simp]: - fixes x :: "'a::real_normed_vector" - shows "(norm x \ 0) = (x = 0)" -by (simp add: order_le_less) - -lemma norm_minus_cancel [simp]: - fixes x :: "'a::real_normed_vector" - shows "norm (- x) = norm x" -proof - - have "norm (- x) = norm (scaleR (- 1) x)" - by (simp only: scaleR_minus_left scaleR_one) - also have "\ = \- 1\ * norm x" - by (rule norm_scaleR) - finally show ?thesis by simp -qed - -lemma norm_minus_commute: - fixes a b :: "'a::real_normed_vector" - shows "norm (a - b) = norm (b - a)" -proof - - have "norm (- (b - a)) = norm (b - a)" - by (rule norm_minus_cancel) - thus ?thesis by simp -qed - -lemma norm_triangle_ineq2: - fixes a b :: "'a::real_normed_vector" - shows "norm a - norm b \ norm (a - b)" -proof - - have "norm (a - b + b) \ norm (a - b) + norm b" - by (rule norm_triangle_ineq) - thus ?thesis by simp -qed - -lemma norm_triangle_ineq3: - fixes a b :: "'a::real_normed_vector" - shows "\norm a - norm b\ \ norm (a - b)" -apply (subst abs_le_iff) -apply auto -apply (rule norm_triangle_ineq2) -apply (subst norm_minus_commute) -apply (rule norm_triangle_ineq2) -done - -lemma norm_triangle_ineq4: - fixes a b :: "'a::real_normed_vector" - shows "norm (a - b) \ norm a + norm b" -proof - - have "norm (a + - b) \ norm a + norm (- b)" - by (rule norm_triangle_ineq) - thus ?thesis - by (simp only: diff_minus norm_minus_cancel) -qed - -lemma norm_diff_ineq: - fixes a b :: "'a::real_normed_vector" - shows "norm a - norm b \ norm (a + b)" -proof - - have "norm a - norm (- b) \ norm (a - - b)" - by (rule norm_triangle_ineq2) - thus ?thesis by simp -qed - -lemma norm_diff_triangle_ineq: - fixes a b c d :: "'a::real_normed_vector" - shows "norm ((a + b) - (c + d)) \ norm (a - c) + norm (b - d)" -proof - - have "norm ((a + b) - (c + d)) = norm ((a - c) + (b - d))" - by (simp add: diff_minus add_ac) - also have "\ \ norm (a - c) + norm (b - d)" - by (rule norm_triangle_ineq) - finally show ?thesis . -qed - -lemma abs_norm_cancel [simp]: - fixes a :: "'a::real_normed_vector" - shows "\norm a\ = norm a" -by (rule abs_of_nonneg [OF norm_ge_zero]) - -lemma norm_add_less: - fixes x y :: "'a::real_normed_vector" - shows "\norm x < r; norm y < s\ \ norm (x + y) < r + s" -by (rule order_le_less_trans [OF norm_triangle_ineq add_strict_mono]) - -lemma norm_mult_less: - fixes x y :: "'a::real_normed_algebra" - shows "\norm x < r; norm y < s\ \ norm (x * y) < r * s" -apply (rule order_le_less_trans [OF norm_mult_ineq]) -apply (simp add: mult_strict_mono') -done - -lemma norm_of_real [simp]: - "norm (of_real r :: 'a::real_normed_algebra_1) = \r\" -unfolding of_real_def by (simp add: norm_scaleR) - -lemma norm_number_of [simp]: - "norm (number_of w::'a::{number_ring,real_normed_algebra_1}) - = \number_of w\" -by (subst of_real_number_of_eq [symmetric], rule norm_of_real) - -lemma norm_of_int [simp]: - "norm (of_int z::'a::real_normed_algebra_1) = \of_int z\" -by (subst of_real_of_int_eq [symmetric], rule norm_of_real) - -lemma norm_of_nat [simp]: - "norm (of_nat n::'a::real_normed_algebra_1) = of_nat n" -apply (subst of_real_of_nat_eq [symmetric]) -apply (subst norm_of_real, simp) -done - -lemma nonzero_norm_inverse: - fixes a :: "'a::real_normed_div_algebra" - shows "a \ 0 \ norm (inverse a) = inverse (norm a)" -apply (rule inverse_unique [symmetric]) -apply (simp add: norm_mult [symmetric]) -done - -lemma norm_inverse: - fixes a :: "'a::{real_normed_div_algebra,division_by_zero}" - shows "norm (inverse a) = inverse (norm a)" -apply (case_tac "a = 0", simp) -apply (erule nonzero_norm_inverse) -done - -lemma nonzero_norm_divide: - fixes a b :: "'a::real_normed_field" - shows "b \ 0 \ norm (a / b) = norm a / norm b" -by (simp add: divide_inverse norm_mult nonzero_norm_inverse) - -lemma norm_divide: - fixes a b :: "'a::{real_normed_field,division_by_zero}" - shows "norm (a / b) = norm a / norm b" -by (simp add: divide_inverse norm_mult norm_inverse) - -lemma norm_power_ineq: - fixes x :: "'a::{real_normed_algebra_1,recpower}" - shows "norm (x ^ n) \ norm x ^ n" -proof (induct n) - case 0 show "norm (x ^ 0) \ norm x ^ 0" by simp -next - case (Suc n) - have "norm (x * x ^ n) \ norm x * norm (x ^ n)" - by (rule norm_mult_ineq) - also from Suc have "\ \ norm x * norm x ^ n" - using norm_ge_zero by (rule mult_left_mono) - finally show "norm (x ^ Suc n) \ norm x ^ Suc n" - by (simp add: power_Suc) -qed - -lemma norm_power: - fixes x :: "'a::{real_normed_div_algebra,recpower}" - shows "norm (x ^ n) = norm x ^ n" -by (induct n) (simp_all add: power_Suc norm_mult) - - -subsection {* Sign function *} - -lemma norm_sgn: - "norm (sgn(x::'a::real_normed_vector)) = (if x = 0 then 0 else 1)" -by (simp add: sgn_div_norm norm_scaleR) - -lemma sgn_zero [simp]: "sgn(0::'a::real_normed_vector) = 0" -by (simp add: sgn_div_norm) - -lemma sgn_zero_iff: "(sgn(x::'a::real_normed_vector) = 0) = (x = 0)" -by (simp add: sgn_div_norm) - -lemma sgn_minus: "sgn (- x) = - sgn(x::'a::real_normed_vector)" -by (simp add: sgn_div_norm) - -lemma sgn_scaleR: - "sgn (scaleR r x) = scaleR (sgn r) (sgn(x::'a::real_normed_vector))" -by (simp add: sgn_div_norm norm_scaleR mult_ac) - -lemma sgn_one [simp]: "sgn (1::'a::real_normed_algebra_1) = 1" -by (simp add: sgn_div_norm) - -lemma sgn_of_real: - "sgn (of_real r::'a::real_normed_algebra_1) = of_real (sgn r)" -unfolding of_real_def by (simp only: sgn_scaleR sgn_one) - -lemma sgn_mult: - fixes x y :: "'a::real_normed_div_algebra" - shows "sgn (x * y) = sgn x * sgn y" -by (simp add: sgn_div_norm norm_mult mult_commute) - -lemma real_sgn_eq: "sgn (x::real) = x / \x\" -by (simp add: sgn_div_norm divide_inverse) - -lemma real_sgn_pos: "0 < (x::real) \ sgn x = 1" -unfolding real_sgn_eq by simp - -lemma real_sgn_neg: "(x::real) < 0 \ sgn x = -1" -unfolding real_sgn_eq by simp - - -subsection {* Bounded Linear and Bilinear Operators *} - -locale bounded_linear = additive + - constrains f :: "'a::real_normed_vector \ 'b::real_normed_vector" - assumes scaleR: "f (scaleR r x) = scaleR r (f x)" - assumes bounded: "\K. \x. norm (f x) \ norm x * K" -begin - -lemma pos_bounded: - "\K>0. \x. norm (f x) \ norm x * K" -proof - - obtain K where K: "\x. norm (f x) \ norm x * K" - using bounded by fast - show ?thesis - proof (intro exI impI conjI allI) - show "0 < max 1 K" - by (rule order_less_le_trans [OF zero_less_one le_maxI1]) - next - fix x - have "norm (f x) \ norm x * K" using K . - also have "\ \ norm x * max 1 K" - by (rule mult_left_mono [OF le_maxI2 norm_ge_zero]) - finally show "norm (f x) \ norm x * max 1 K" . - qed -qed - -lemma nonneg_bounded: - "\K\0. \x. norm (f x) \ norm x * K" -proof - - from pos_bounded - show ?thesis by (auto intro: order_less_imp_le) -qed - -end - -locale bounded_bilinear = - fixes prod :: "['a::real_normed_vector, 'b::real_normed_vector] - \ 'c::real_normed_vector" - (infixl "**" 70) - assumes add_left: "prod (a + a') b = prod a b + prod a' b" - assumes add_right: "prod a (b + b') = prod a b + prod a b'" - assumes scaleR_left: "prod (scaleR r a) b = scaleR r (prod a b)" - assumes scaleR_right: "prod a (scaleR r b) = scaleR r (prod a b)" - assumes bounded: "\K. \a b. norm (prod a b) \ norm a * norm b * K" -begin - -lemma pos_bounded: - "\K>0. \a b. norm (a ** b) \ norm a * norm b * K" -apply (cut_tac bounded, erule exE) -apply (rule_tac x="max 1 K" in exI, safe) -apply (rule order_less_le_trans [OF zero_less_one le_maxI1]) -apply (drule spec, drule spec, erule order_trans) -apply (rule mult_left_mono [OF le_maxI2]) -apply (intro mult_nonneg_nonneg norm_ge_zero) -done - -lemma nonneg_bounded: - "\K\0. \a b. norm (a ** b) \ norm a * norm b * K" -proof - - from pos_bounded - show ?thesis by (auto intro: order_less_imp_le) -qed - -lemma additive_right: "additive (\b. prod a b)" -by (rule additive.intro, rule add_right) - -lemma additive_left: "additive (\a. prod a b)" -by (rule additive.intro, rule add_left) - -lemma zero_left: "prod 0 b = 0" -by (rule additive.zero [OF additive_left]) - -lemma zero_right: "prod a 0 = 0" -by (rule additive.zero [OF additive_right]) - -lemma minus_left: "prod (- a) b = - prod a b" -by (rule additive.minus [OF additive_left]) - -lemma minus_right: "prod a (- b) = - prod a b" -by (rule additive.minus [OF additive_right]) - -lemma diff_left: - "prod (a - a') b = prod a b - prod a' b" -by (rule additive.diff [OF additive_left]) - -lemma diff_right: - "prod a (b - b') = prod a b - prod a b'" -by (rule additive.diff [OF additive_right]) - -lemma bounded_linear_left: - "bounded_linear (\a. a ** b)" -apply (unfold_locales) -apply (rule add_left) -apply (rule scaleR_left) -apply (cut_tac bounded, safe) -apply (rule_tac x="norm b * K" in exI) -apply (simp add: mult_ac) -done - -lemma bounded_linear_right: - "bounded_linear (\b. a ** b)" -apply (unfold_locales) -apply (rule add_right) -apply (rule scaleR_right) -apply (cut_tac bounded, safe) -apply (rule_tac x="norm a * K" in exI) -apply (simp add: mult_ac) -done - -lemma prod_diff_prod: - "(x ** y - a ** b) = (x - a) ** (y - b) + (x - a) ** b + a ** (y - b)" -by (simp add: diff_left diff_right) - -end - -interpretation mult!: - bounded_bilinear "op * :: 'a \ 'a \ 'a::real_normed_algebra" -apply (rule bounded_bilinear.intro) -apply (rule left_distrib) -apply (rule right_distrib) -apply (rule mult_scaleR_left) -apply (rule mult_scaleR_right) -apply (rule_tac x="1" in exI) -apply (simp add: norm_mult_ineq) -done - -interpretation mult_left!: - bounded_linear "(\x::'a::real_normed_algebra. x * y)" -by (rule mult.bounded_linear_left) - -interpretation mult_right!: - bounded_linear "(\y::'a::real_normed_algebra. x * y)" -by (rule mult.bounded_linear_right) - -interpretation divide!: - bounded_linear "(\x::'a::real_normed_field. x / y)" -unfolding divide_inverse by (rule mult.bounded_linear_left) - -interpretation scaleR!: bounded_bilinear "scaleR" -apply (rule bounded_bilinear.intro) -apply (rule scaleR_left_distrib) -apply (rule scaleR_right_distrib) -apply simp -apply (rule scaleR_left_commute) -apply (rule_tac x="1" in exI) -apply (simp add: norm_scaleR) -done - -interpretation scaleR_left!: bounded_linear "\r. scaleR r x" -by (rule scaleR.bounded_linear_left) - -interpretation scaleR_right!: bounded_linear "\x. scaleR r x" -by (rule scaleR.bounded_linear_right) - -interpretation of_real!: bounded_linear "\r. of_real r" -unfolding of_real_def by (rule scaleR.bounded_linear_left) - -end diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/RealVector.thy --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/HOL/RealVector.thy Tue Dec 30 11:10:01 2008 +0100 @@ -0,0 +1,841 @@ +(* Title: HOL/RealVector.thy + Author: Brian Huffman +*) + +header {* Vector Spaces and Algebras over the Reals *} + +theory RealVector +imports RealPow +begin + +subsection {* Locale for additive functions *} + +locale additive = + fixes f :: "'a::ab_group_add \ 'b::ab_group_add" + assumes add: "f (x + y) = f x + f y" +begin + +lemma zero: "f 0 = 0" +proof - + have "f 0 = f (0 + 0)" by simp + also have "\ = f 0 + f 0" by (rule add) + finally show "f 0 = 0" by simp +qed + +lemma minus: "f (- x) = - f x" +proof - + have "f (- x) + f x = f (- x + x)" by (rule add [symmetric]) + also have "\ = - f x + f x" by (simp add: zero) + finally show "f (- x) = - f x" by (rule add_right_imp_eq) +qed + +lemma diff: "f (x - y) = f x - f y" +by (simp add: diff_def add minus) + +lemma setsum: "f (setsum g A) = (\x\A. f (g x))" +apply (cases "finite A") +apply (induct set: finite) +apply (simp add: zero) +apply (simp add: add) +apply (simp add: zero) +done + +end + +subsection {* Vector spaces *} + +locale vector_space = + fixes scale :: "'a::field \ 'b::ab_group_add \ 'b" + assumes scale_right_distrib: "scale a (x + y) = scale a x + scale a y" + and scale_left_distrib: "scale (a + b) x = scale a x + scale b x" + and scale_scale [simp]: "scale a (scale b x) = scale (a * b) x" + and scale_one [simp]: "scale 1 x = x" +begin + +lemma scale_left_commute: + "scale a (scale b x) = scale b (scale a x)" +by (simp add: mult_commute) + +lemma scale_zero_left [simp]: "scale 0 x = 0" + and scale_minus_left [simp]: "scale (- a) x = - (scale a x)" + and scale_left_diff_distrib: "scale (a - b) x = scale a x - scale b x" +proof - + interpret s: additive "\a. scale a x" + proof qed (rule scale_left_distrib) + show "scale 0 x = 0" by (rule s.zero) + show "scale (- a) x = - (scale a x)" by (rule s.minus) + show "scale (a - b) x = scale a x - scale b x" by (rule s.diff) +qed + +lemma scale_zero_right [simp]: "scale a 0 = 0" + and scale_minus_right [simp]: "scale a (- x) = - (scale a x)" + and scale_right_diff_distrib: "scale a (x - y) = scale a x - scale a y" +proof - + interpret s: additive "\x. scale a x" + proof qed (rule scale_right_distrib) + show "scale a 0 = 0" by (rule s.zero) + show "scale a (- x) = - (scale a x)" by (rule s.minus) + show "scale a (x - y) = scale a x - scale a y" by (rule s.diff) +qed + +lemma scale_eq_0_iff [simp]: + "scale a x = 0 \ a = 0 \ x = 0" +proof cases + assume "a = 0" thus ?thesis by simp +next + assume anz [simp]: "a \ 0" + { assume "scale a x = 0" + hence "scale (inverse a) (scale a x) = 0" by simp + hence "x = 0" by simp } + thus ?thesis by force +qed + +lemma scale_left_imp_eq: + "\a \ 0; scale a x = scale a y\ \ x = y" +proof - + assume nonzero: "a \ 0" + assume "scale a x = scale a y" + hence "scale a (x - y) = 0" + by (simp add: scale_right_diff_distrib) + hence "x - y = 0" by (simp add: nonzero) + thus "x = y" by (simp only: right_minus_eq) +qed + +lemma scale_right_imp_eq: + "\x \ 0; scale a x = scale b x\ \ a = b" +proof - + assume nonzero: "x \ 0" + assume "scale a x = scale b x" + hence "scale (a - b) x = 0" + by (simp add: scale_left_diff_distrib) + hence "a - b = 0" by (simp add: nonzero) + thus "a = b" by (simp only: right_minus_eq) +qed + +lemma scale_cancel_left: + "scale a x = scale a y \ x = y \ a = 0" +by (auto intro: scale_left_imp_eq) + +lemma scale_cancel_right: + "scale a x = scale b x \ a = b \ x = 0" +by (auto intro: scale_right_imp_eq) + +end + +subsection {* Real vector spaces *} + +class scaleR = type + + fixes scaleR :: "real \ 'a \ 'a" (infixr "*\<^sub>R" 75) +begin + +abbreviation + divideR :: "'a \ real \ 'a" (infixl "'/\<^sub>R" 70) +where + "x /\<^sub>R r == scaleR (inverse r) x" + +end + +instantiation real :: scaleR +begin + +definition + real_scaleR_def [simp]: "scaleR a x = a * x" + +instance .. + +end + +class real_vector = scaleR + ab_group_add + + assumes scaleR_right_distrib: "scaleR a (x + y) = scaleR a x + scaleR a y" + and scaleR_left_distrib: "scaleR (a + b) x = scaleR a x + scaleR b x" + and scaleR_scaleR [simp]: "scaleR a (scaleR b x) = scaleR (a * b) x" + and scaleR_one [simp]: "scaleR 1 x = x" + +interpretation real_vector!: + vector_space "scaleR :: real \ 'a \ 'a::real_vector" +apply unfold_locales +apply (rule scaleR_right_distrib) +apply (rule scaleR_left_distrib) +apply (rule scaleR_scaleR) +apply (rule scaleR_one) +done + +text {* Recover original theorem names *} + +lemmas scaleR_left_commute = real_vector.scale_left_commute +lemmas scaleR_zero_left = real_vector.scale_zero_left +lemmas scaleR_minus_left = real_vector.scale_minus_left +lemmas scaleR_left_diff_distrib = real_vector.scale_left_diff_distrib +lemmas scaleR_zero_right = real_vector.scale_zero_right +lemmas scaleR_minus_right = real_vector.scale_minus_right +lemmas scaleR_right_diff_distrib = real_vector.scale_right_diff_distrib +lemmas scaleR_eq_0_iff = real_vector.scale_eq_0_iff +lemmas scaleR_left_imp_eq = real_vector.scale_left_imp_eq +lemmas scaleR_right_imp_eq = real_vector.scale_right_imp_eq +lemmas scaleR_cancel_left = real_vector.scale_cancel_left +lemmas scaleR_cancel_right = real_vector.scale_cancel_right + +class real_algebra = real_vector + ring + + assumes mult_scaleR_left [simp]: "scaleR a x * y = scaleR a (x * y)" + and mult_scaleR_right [simp]: "x * scaleR a y = scaleR a (x * y)" + +class real_algebra_1 = real_algebra + ring_1 + +class real_div_algebra = real_algebra_1 + division_ring + +class real_field = real_div_algebra + field + +instance real :: real_field +apply (intro_classes, unfold real_scaleR_def) +apply (rule right_distrib) +apply (rule left_distrib) +apply (rule mult_assoc [symmetric]) +apply (rule mult_1_left) +apply (rule mult_assoc) +apply (rule mult_left_commute) +done + +interpretation scaleR_left!: additive "(\a. scaleR a x::'a::real_vector)" +proof qed (rule scaleR_left_distrib) + +interpretation scaleR_right!: additive "(\x. scaleR a x::'a::real_vector)" +proof qed (rule scaleR_right_distrib) + +lemma nonzero_inverse_scaleR_distrib: + fixes x :: "'a::real_div_algebra" shows + "\a \ 0; x \ 0\ \ inverse (scaleR a x) = scaleR (inverse a) (inverse x)" +by (rule inverse_unique, simp) + +lemma inverse_scaleR_distrib: + fixes x :: "'a::{real_div_algebra,division_by_zero}" + shows "inverse (scaleR a x) = scaleR (inverse a) (inverse x)" +apply (case_tac "a = 0", simp) +apply (case_tac "x = 0", simp) +apply (erule (1) nonzero_inverse_scaleR_distrib) +done + + +subsection {* Embedding of the Reals into any @{text real_algebra_1}: +@{term of_real} *} + +definition + of_real :: "real \ 'a::real_algebra_1" where + "of_real r = scaleR r 1" + +lemma scaleR_conv_of_real: "scaleR r x = of_real r * x" +by (simp add: of_real_def) + +lemma of_real_0 [simp]: "of_real 0 = 0" +by (simp add: of_real_def) + +lemma of_real_1 [simp]: "of_real 1 = 1" +by (simp add: of_real_def) + +lemma of_real_add [simp]: "of_real (x + y) = of_real x + of_real y" +by (simp add: of_real_def scaleR_left_distrib) + +lemma of_real_minus [simp]: "of_real (- x) = - of_real x" +by (simp add: of_real_def) + +lemma of_real_diff [simp]: "of_real (x - y) = of_real x - of_real y" +by (simp add: of_real_def scaleR_left_diff_distrib) + +lemma of_real_mult [simp]: "of_real (x * y) = of_real x * of_real y" +by (simp add: of_real_def mult_commute) + +lemma nonzero_of_real_inverse: + "x \ 0 \ of_real (inverse x) = + inverse (of_real x :: 'a::real_div_algebra)" +by (simp add: of_real_def nonzero_inverse_scaleR_distrib) + +lemma of_real_inverse [simp]: + "of_real (inverse x) = + inverse (of_real x :: 'a::{real_div_algebra,division_by_zero})" +by (simp add: of_real_def inverse_scaleR_distrib) + +lemma nonzero_of_real_divide: + "y \ 0 \ of_real (x / y) = + (of_real x / of_real y :: 'a::real_field)" +by (simp add: divide_inverse nonzero_of_real_inverse) + +lemma of_real_divide [simp]: + "of_real (x / y) = + (of_real x / of_real y :: 'a::{real_field,division_by_zero})" +by (simp add: divide_inverse) + +lemma of_real_power [simp]: + "of_real (x ^ n) = (of_real x :: 'a::{real_algebra_1,recpower}) ^ n" +by (induct n) (simp_all add: power_Suc) + +lemma of_real_eq_iff [simp]: "(of_real x = of_real y) = (x = y)" +by (simp add: of_real_def scaleR_cancel_right) + +lemmas of_real_eq_0_iff [simp] = of_real_eq_iff [of _ 0, simplified] + +lemma of_real_eq_id [simp]: "of_real = (id :: real \ real)" +proof + fix r + show "of_real r = id r" + by (simp add: of_real_def) +qed + +text{*Collapse nested embeddings*} +lemma of_real_of_nat_eq [simp]: "of_real (of_nat n) = of_nat n" +by (induct n) auto + +lemma of_real_of_int_eq [simp]: "of_real (of_int z) = of_int z" +by (cases z rule: int_diff_cases, simp) + +lemma of_real_number_of_eq: + "of_real (number_of w) = (number_of w :: 'a::{number_ring,real_algebra_1})" +by (simp add: number_of_eq) + +text{*Every real algebra has characteristic zero*} +instance real_algebra_1 < ring_char_0 +proof + fix m n :: nat + have "(of_real (of_nat m) = (of_real (of_nat n)::'a)) = (m = n)" + by (simp only: of_real_eq_iff of_nat_eq_iff) + thus "(of_nat m = (of_nat n::'a)) = (m = n)" + by (simp only: of_real_of_nat_eq) +qed + +instance real_field < field_char_0 .. + + +subsection {* The Set of Real Numbers *} + +definition + Reals :: "'a::real_algebra_1 set" where + [code del]: "Reals \ range of_real" + +notation (xsymbols) + Reals ("\") + +lemma Reals_of_real [simp]: "of_real r \ Reals" +by (simp add: Reals_def) + +lemma Reals_of_int [simp]: "of_int z \ Reals" +by (subst of_real_of_int_eq [symmetric], rule Reals_of_real) + +lemma Reals_of_nat [simp]: "of_nat n \ Reals" +by (subst of_real_of_nat_eq [symmetric], rule Reals_of_real) + +lemma Reals_number_of [simp]: + "(number_of w::'a::{number_ring,real_algebra_1}) \ Reals" +by (subst of_real_number_of_eq [symmetric], rule Reals_of_real) + +lemma Reals_0 [simp]: "0 \ Reals" +apply (unfold Reals_def) +apply (rule range_eqI) +apply (rule of_real_0 [symmetric]) +done + +lemma Reals_1 [simp]: "1 \ Reals" +apply (unfold Reals_def) +apply (rule range_eqI) +apply (rule of_real_1 [symmetric]) +done + +lemma Reals_add [simp]: "\a \ Reals; b \ Reals\ \ a + b \ Reals" +apply (auto simp add: Reals_def) +apply (rule range_eqI) +apply (rule of_real_add [symmetric]) +done + +lemma Reals_minus [simp]: "a \ Reals \ - a \ Reals" +apply (auto simp add: Reals_def) +apply (rule range_eqI) +apply (rule of_real_minus [symmetric]) +done + +lemma Reals_diff [simp]: "\a \ Reals; b \ Reals\ \ a - b \ Reals" +apply (auto simp add: Reals_def) +apply (rule range_eqI) +apply (rule of_real_diff [symmetric]) +done + +lemma Reals_mult [simp]: "\a \ Reals; b \ Reals\ \ a * b \ Reals" +apply (auto simp add: Reals_def) +apply (rule range_eqI) +apply (rule of_real_mult [symmetric]) +done + +lemma nonzero_Reals_inverse: + fixes a :: "'a::real_div_algebra" + shows "\a \ Reals; a \ 0\ \ inverse a \ Reals" +apply (auto simp add: Reals_def) +apply (rule range_eqI) +apply (erule nonzero_of_real_inverse [symmetric]) +done + +lemma Reals_inverse [simp]: + fixes a :: "'a::{real_div_algebra,division_by_zero}" + shows "a \ Reals \ inverse a \ Reals" +apply (auto simp add: Reals_def) +apply (rule range_eqI) +apply (rule of_real_inverse [symmetric]) +done + +lemma nonzero_Reals_divide: + fixes a b :: "'a::real_field" + shows "\a \ Reals; b \ Reals; b \ 0\ \ a / b \ Reals" +apply (auto simp add: Reals_def) +apply (rule range_eqI) +apply (erule nonzero_of_real_divide [symmetric]) +done + +lemma Reals_divide [simp]: + fixes a b :: "'a::{real_field,division_by_zero}" + shows "\a \ Reals; b \ Reals\ \ a / b \ Reals" +apply (auto simp add: Reals_def) +apply (rule range_eqI) +apply (rule of_real_divide [symmetric]) +done + +lemma Reals_power [simp]: + fixes a :: "'a::{real_algebra_1,recpower}" + shows "a \ Reals \ a ^ n \ Reals" +apply (auto simp add: Reals_def) +apply (rule range_eqI) +apply (rule of_real_power [symmetric]) +done + +lemma Reals_cases [cases set: Reals]: + assumes "q \ \" + obtains (of_real) r where "q = of_real r" + unfolding Reals_def +proof - + from `q \ \` have "q \ range of_real" unfolding Reals_def . + then obtain r where "q = of_real r" .. + then show thesis .. +qed + +lemma Reals_induct [case_names of_real, induct set: Reals]: + "q \ \ \ (\r. P (of_real r)) \ P q" + by (rule Reals_cases) auto + + +subsection {* Real normed vector spaces *} + +class norm = type + + fixes norm :: "'a \ real" + +instantiation real :: norm +begin + +definition + real_norm_def [simp]: "norm r \ \r\" + +instance .. + +end + +class sgn_div_norm = scaleR + norm + sgn + + assumes sgn_div_norm: "sgn x = x /\<^sub>R norm x" + +class real_normed_vector = real_vector + sgn_div_norm + + assumes norm_ge_zero [simp]: "0 \ norm x" + and norm_eq_zero [simp]: "norm x = 0 \ x = 0" + and norm_triangle_ineq: "norm (x + y) \ norm x + norm y" + and norm_scaleR: "norm (scaleR a x) = \a\ * norm x" + +class real_normed_algebra = real_algebra + real_normed_vector + + assumes norm_mult_ineq: "norm (x * y) \ norm x * norm y" + +class real_normed_algebra_1 = real_algebra_1 + real_normed_algebra + + assumes norm_one [simp]: "norm 1 = 1" + +class real_normed_div_algebra = real_div_algebra + real_normed_vector + + assumes norm_mult: "norm (x * y) = norm x * norm y" + +class real_normed_field = real_field + real_normed_div_algebra + +instance real_normed_div_algebra < real_normed_algebra_1 +proof + fix x y :: 'a + show "norm (x * y) \ norm x * norm y" + by (simp add: norm_mult) +next + have "norm (1 * 1::'a) = norm (1::'a) * norm (1::'a)" + by (rule norm_mult) + thus "norm (1::'a) = 1" by simp +qed + +instance real :: real_normed_field +apply (intro_classes, unfold real_norm_def real_scaleR_def) +apply (simp add: real_sgn_def) +apply (rule abs_ge_zero) +apply (rule abs_eq_0) +apply (rule abs_triangle_ineq) +apply (rule abs_mult) +apply (rule abs_mult) +done + +lemma norm_zero [simp]: "norm (0::'a::real_normed_vector) = 0" +by simp + +lemma zero_less_norm_iff [simp]: + fixes x :: "'a::real_normed_vector" + shows "(0 < norm x) = (x \ 0)" +by (simp add: order_less_le) + +lemma norm_not_less_zero [simp]: + fixes x :: "'a::real_normed_vector" + shows "\ norm x < 0" +by (simp add: linorder_not_less) + +lemma norm_le_zero_iff [simp]: + fixes x :: "'a::real_normed_vector" + shows "(norm x \ 0) = (x = 0)" +by (simp add: order_le_less) + +lemma norm_minus_cancel [simp]: + fixes x :: "'a::real_normed_vector" + shows "norm (- x) = norm x" +proof - + have "norm (- x) = norm (scaleR (- 1) x)" + by (simp only: scaleR_minus_left scaleR_one) + also have "\ = \- 1\ * norm x" + by (rule norm_scaleR) + finally show ?thesis by simp +qed + +lemma norm_minus_commute: + fixes a b :: "'a::real_normed_vector" + shows "norm (a - b) = norm (b - a)" +proof - + have "norm (- (b - a)) = norm (b - a)" + by (rule norm_minus_cancel) + thus ?thesis by simp +qed + +lemma norm_triangle_ineq2: + fixes a b :: "'a::real_normed_vector" + shows "norm a - norm b \ norm (a - b)" +proof - + have "norm (a - b + b) \ norm (a - b) + norm b" + by (rule norm_triangle_ineq) + thus ?thesis by simp +qed + +lemma norm_triangle_ineq3: + fixes a b :: "'a::real_normed_vector" + shows "\norm a - norm b\ \ norm (a - b)" +apply (subst abs_le_iff) +apply auto +apply (rule norm_triangle_ineq2) +apply (subst norm_minus_commute) +apply (rule norm_triangle_ineq2) +done + +lemma norm_triangle_ineq4: + fixes a b :: "'a::real_normed_vector" + shows "norm (a - b) \ norm a + norm b" +proof - + have "norm (a + - b) \ norm a + norm (- b)" + by (rule norm_triangle_ineq) + thus ?thesis + by (simp only: diff_minus norm_minus_cancel) +qed + +lemma norm_diff_ineq: + fixes a b :: "'a::real_normed_vector" + shows "norm a - norm b \ norm (a + b)" +proof - + have "norm a - norm (- b) \ norm (a - - b)" + by (rule norm_triangle_ineq2) + thus ?thesis by simp +qed + +lemma norm_diff_triangle_ineq: + fixes a b c d :: "'a::real_normed_vector" + shows "norm ((a + b) - (c + d)) \ norm (a - c) + norm (b - d)" +proof - + have "norm ((a + b) - (c + d)) = norm ((a - c) + (b - d))" + by (simp add: diff_minus add_ac) + also have "\ \ norm (a - c) + norm (b - d)" + by (rule norm_triangle_ineq) + finally show ?thesis . +qed + +lemma abs_norm_cancel [simp]: + fixes a :: "'a::real_normed_vector" + shows "\norm a\ = norm a" +by (rule abs_of_nonneg [OF norm_ge_zero]) + +lemma norm_add_less: + fixes x y :: "'a::real_normed_vector" + shows "\norm x < r; norm y < s\ \ norm (x + y) < r + s" +by (rule order_le_less_trans [OF norm_triangle_ineq add_strict_mono]) + +lemma norm_mult_less: + fixes x y :: "'a::real_normed_algebra" + shows "\norm x < r; norm y < s\ \ norm (x * y) < r * s" +apply (rule order_le_less_trans [OF norm_mult_ineq]) +apply (simp add: mult_strict_mono') +done + +lemma norm_of_real [simp]: + "norm (of_real r :: 'a::real_normed_algebra_1) = \r\" +unfolding of_real_def by (simp add: norm_scaleR) + +lemma norm_number_of [simp]: + "norm (number_of w::'a::{number_ring,real_normed_algebra_1}) + = \number_of w\" +by (subst of_real_number_of_eq [symmetric], rule norm_of_real) + +lemma norm_of_int [simp]: + "norm (of_int z::'a::real_normed_algebra_1) = \of_int z\" +by (subst of_real_of_int_eq [symmetric], rule norm_of_real) + +lemma norm_of_nat [simp]: + "norm (of_nat n::'a::real_normed_algebra_1) = of_nat n" +apply (subst of_real_of_nat_eq [symmetric]) +apply (subst norm_of_real, simp) +done + +lemma nonzero_norm_inverse: + fixes a :: "'a::real_normed_div_algebra" + shows "a \ 0 \ norm (inverse a) = inverse (norm a)" +apply (rule inverse_unique [symmetric]) +apply (simp add: norm_mult [symmetric]) +done + +lemma norm_inverse: + fixes a :: "'a::{real_normed_div_algebra,division_by_zero}" + shows "norm (inverse a) = inverse (norm a)" +apply (case_tac "a = 0", simp) +apply (erule nonzero_norm_inverse) +done + +lemma nonzero_norm_divide: + fixes a b :: "'a::real_normed_field" + shows "b \ 0 \ norm (a / b) = norm a / norm b" +by (simp add: divide_inverse norm_mult nonzero_norm_inverse) + +lemma norm_divide: + fixes a b :: "'a::{real_normed_field,division_by_zero}" + shows "norm (a / b) = norm a / norm b" +by (simp add: divide_inverse norm_mult norm_inverse) + +lemma norm_power_ineq: + fixes x :: "'a::{real_normed_algebra_1,recpower}" + shows "norm (x ^ n) \ norm x ^ n" +proof (induct n) + case 0 show "norm (x ^ 0) \ norm x ^ 0" by simp +next + case (Suc n) + have "norm (x * x ^ n) \ norm x * norm (x ^ n)" + by (rule norm_mult_ineq) + also from Suc have "\ \ norm x * norm x ^ n" + using norm_ge_zero by (rule mult_left_mono) + finally show "norm (x ^ Suc n) \ norm x ^ Suc n" + by (simp add: power_Suc) +qed + +lemma norm_power: + fixes x :: "'a::{real_normed_div_algebra,recpower}" + shows "norm (x ^ n) = norm x ^ n" +by (induct n) (simp_all add: power_Suc norm_mult) + + +subsection {* Sign function *} + +lemma norm_sgn: + "norm (sgn(x::'a::real_normed_vector)) = (if x = 0 then 0 else 1)" +by (simp add: sgn_div_norm norm_scaleR) + +lemma sgn_zero [simp]: "sgn(0::'a::real_normed_vector) = 0" +by (simp add: sgn_div_norm) + +lemma sgn_zero_iff: "(sgn(x::'a::real_normed_vector) = 0) = (x = 0)" +by (simp add: sgn_div_norm) + +lemma sgn_minus: "sgn (- x) = - sgn(x::'a::real_normed_vector)" +by (simp add: sgn_div_norm) + +lemma sgn_scaleR: + "sgn (scaleR r x) = scaleR (sgn r) (sgn(x::'a::real_normed_vector))" +by (simp add: sgn_div_norm norm_scaleR mult_ac) + +lemma sgn_one [simp]: "sgn (1::'a::real_normed_algebra_1) = 1" +by (simp add: sgn_div_norm) + +lemma sgn_of_real: + "sgn (of_real r::'a::real_normed_algebra_1) = of_real (sgn r)" +unfolding of_real_def by (simp only: sgn_scaleR sgn_one) + +lemma sgn_mult: + fixes x y :: "'a::real_normed_div_algebra" + shows "sgn (x * y) = sgn x * sgn y" +by (simp add: sgn_div_norm norm_mult mult_commute) + +lemma real_sgn_eq: "sgn (x::real) = x / \x\" +by (simp add: sgn_div_norm divide_inverse) + +lemma real_sgn_pos: "0 < (x::real) \ sgn x = 1" +unfolding real_sgn_eq by simp + +lemma real_sgn_neg: "(x::real) < 0 \ sgn x = -1" +unfolding real_sgn_eq by simp + + +subsection {* Bounded Linear and Bilinear Operators *} + +locale bounded_linear = additive + + constrains f :: "'a::real_normed_vector \ 'b::real_normed_vector" + assumes scaleR: "f (scaleR r x) = scaleR r (f x)" + assumes bounded: "\K. \x. norm (f x) \ norm x * K" +begin + +lemma pos_bounded: + "\K>0. \x. norm (f x) \ norm x * K" +proof - + obtain K where K: "\x. norm (f x) \ norm x * K" + using bounded by fast + show ?thesis + proof (intro exI impI conjI allI) + show "0 < max 1 K" + by (rule order_less_le_trans [OF zero_less_one le_maxI1]) + next + fix x + have "norm (f x) \ norm x * K" using K . + also have "\ \ norm x * max 1 K" + by (rule mult_left_mono [OF le_maxI2 norm_ge_zero]) + finally show "norm (f x) \ norm x * max 1 K" . + qed +qed + +lemma nonneg_bounded: + "\K\0. \x. norm (f x) \ norm x * K" +proof - + from pos_bounded + show ?thesis by (auto intro: order_less_imp_le) +qed + +end + +locale bounded_bilinear = + fixes prod :: "['a::real_normed_vector, 'b::real_normed_vector] + \ 'c::real_normed_vector" + (infixl "**" 70) + assumes add_left: "prod (a + a') b = prod a b + prod a' b" + assumes add_right: "prod a (b + b') = prod a b + prod a b'" + assumes scaleR_left: "prod (scaleR r a) b = scaleR r (prod a b)" + assumes scaleR_right: "prod a (scaleR r b) = scaleR r (prod a b)" + assumes bounded: "\K. \a b. norm (prod a b) \ norm a * norm b * K" +begin + +lemma pos_bounded: + "\K>0. \a b. norm (a ** b) \ norm a * norm b * K" +apply (cut_tac bounded, erule exE) +apply (rule_tac x="max 1 K" in exI, safe) +apply (rule order_less_le_trans [OF zero_less_one le_maxI1]) +apply (drule spec, drule spec, erule order_trans) +apply (rule mult_left_mono [OF le_maxI2]) +apply (intro mult_nonneg_nonneg norm_ge_zero) +done + +lemma nonneg_bounded: + "\K\0. \a b. norm (a ** b) \ norm a * norm b * K" +proof - + from pos_bounded + show ?thesis by (auto intro: order_less_imp_le) +qed + +lemma additive_right: "additive (\b. prod a b)" +by (rule additive.intro, rule add_right) + +lemma additive_left: "additive (\a. prod a b)" +by (rule additive.intro, rule add_left) + +lemma zero_left: "prod 0 b = 0" +by (rule additive.zero [OF additive_left]) + +lemma zero_right: "prod a 0 = 0" +by (rule additive.zero [OF additive_right]) + +lemma minus_left: "prod (- a) b = - prod a b" +by (rule additive.minus [OF additive_left]) + +lemma minus_right: "prod a (- b) = - prod a b" +by (rule additive.minus [OF additive_right]) + +lemma diff_left: + "prod (a - a') b = prod a b - prod a' b" +by (rule additive.diff [OF additive_left]) + +lemma diff_right: + "prod a (b - b') = prod a b - prod a b'" +by (rule additive.diff [OF additive_right]) + +lemma bounded_linear_left: + "bounded_linear (\a. a ** b)" +apply (unfold_locales) +apply (rule add_left) +apply (rule scaleR_left) +apply (cut_tac bounded, safe) +apply (rule_tac x="norm b * K" in exI) +apply (simp add: mult_ac) +done + +lemma bounded_linear_right: + "bounded_linear (\b. a ** b)" +apply (unfold_locales) +apply (rule add_right) +apply (rule scaleR_right) +apply (cut_tac bounded, safe) +apply (rule_tac x="norm a * K" in exI) +apply (simp add: mult_ac) +done + +lemma prod_diff_prod: + "(x ** y - a ** b) = (x - a) ** (y - b) + (x - a) ** b + a ** (y - b)" +by (simp add: diff_left diff_right) + +end + +interpretation mult!: + bounded_bilinear "op * :: 'a \ 'a \ 'a::real_normed_algebra" +apply (rule bounded_bilinear.intro) +apply (rule left_distrib) +apply (rule right_distrib) +apply (rule mult_scaleR_left) +apply (rule mult_scaleR_right) +apply (rule_tac x="1" in exI) +apply (simp add: norm_mult_ineq) +done + +interpretation mult_left!: + bounded_linear "(\x::'a::real_normed_algebra. x * y)" +by (rule mult.bounded_linear_left) + +interpretation mult_right!: + bounded_linear "(\y::'a::real_normed_algebra. x * y)" +by (rule mult.bounded_linear_right) + +interpretation divide!: + bounded_linear "(\x::'a::real_normed_field. x / y)" +unfolding divide_inverse by (rule mult.bounded_linear_left) + +interpretation scaleR!: bounded_bilinear "scaleR" +apply (rule bounded_bilinear.intro) +apply (rule scaleR_left_distrib) +apply (rule scaleR_right_distrib) +apply simp +apply (rule scaleR_left_commute) +apply (rule_tac x="1" in exI) +apply (simp add: norm_scaleR) +done + +interpretation scaleR_left!: bounded_linear "\r. scaleR r x" +by (rule scaleR.bounded_linear_left) + +interpretation scaleR_right!: bounded_linear "\x. scaleR r x" +by (rule scaleR.bounded_linear_right) + +interpretation of_real!: bounded_linear "\r. of_real r" +unfolding of_real_def by (rule scaleR.bounded_linear_left) + +end diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/SEQ.thy --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/HOL/SEQ.thy Tue Dec 30 11:10:01 2008 +0100 @@ -0,0 +1,1136 @@ +(* Title : SEQ.thy + Author : Jacques D. Fleuriot + Copyright : 1998 University of Cambridge + Description : Convergence of sequences and series + Conversion to Isar and new proofs by Lawrence C Paulson, 2004 + Additional contributions by Jeremy Avigad and Brian Huffman +*) + +header {* Sequences and Convergence *} + +theory SEQ +imports RealVector RComplete +begin + +definition + Zseq :: "[nat \ 'a::real_normed_vector] \ bool" where + --{*Standard definition of sequence converging to zero*} + [code del]: "Zseq X = (\r>0. \no. \n\no. norm (X n) < r)" + +definition + LIMSEQ :: "[nat => 'a::real_normed_vector, 'a] => bool" + ("((_)/ ----> (_))" [60, 60] 60) where + --{*Standard definition of convergence of sequence*} + [code del]: "X ----> L = (\r. 0 < r --> (\no. \n. no \ n --> norm (X n - L) < r))" + +definition + lim :: "(nat => 'a::real_normed_vector) => 'a" where + --{*Standard definition of limit using choice operator*} + "lim X = (THE L. X ----> L)" + +definition + convergent :: "(nat => 'a::real_normed_vector) => bool" where + --{*Standard definition of convergence*} + "convergent X = (\L. X ----> L)" + +definition + Bseq :: "(nat => 'a::real_normed_vector) => bool" where + --{*Standard definition for bounded sequence*} + [code del]: "Bseq X = (\K>0.\n. norm (X n) \ K)" + +definition + monoseq :: "(nat=>real)=>bool" where + --{*Definition for monotonicity*} + [code del]: "monoseq X = ((\m. \n\m. X m \ X n) | (\m. \n\m. X n \ X m))" + +definition + subseq :: "(nat => nat) => bool" where + --{*Definition of subsequence*} + [code del]: "subseq f = (\m. \n>m. (f m) < (f n))" + +definition + Cauchy :: "(nat => 'a::real_normed_vector) => bool" where + --{*Standard definition of the Cauchy condition*} + [code del]: "Cauchy X = (\e>0. \M. \m \ M. \n \ M. norm (X m - X n) < e)" + + +subsection {* Bounded Sequences *} + +lemma BseqI': assumes K: "\n. norm (X n) \ K" shows "Bseq X" +unfolding Bseq_def +proof (intro exI conjI allI) + show "0 < max K 1" by simp +next + fix n::nat + have "norm (X n) \ K" by (rule K) + thus "norm (X n) \ max K 1" by simp +qed + +lemma BseqE: "\Bseq X; \K. \0 < K; \n. norm (X n) \ K\ \ Q\ \ Q" +unfolding Bseq_def by auto + +lemma BseqI2': assumes K: "\n\N. norm (X n) \ K" shows "Bseq X" +proof (rule BseqI') + let ?A = "norm ` X ` {..N}" + have 1: "finite ?A" by simp + fix n::nat + show "norm (X n) \ max K (Max ?A)" + proof (cases rule: linorder_le_cases) + assume "n \ N" + hence "norm (X n) \ K" using K by simp + thus "norm (X n) \ max K (Max ?A)" by simp + next + assume "n \ N" + hence "norm (X n) \ ?A" by simp + with 1 have "norm (X n) \ Max ?A" by (rule Max_ge) + thus "norm (X n) \ max K (Max ?A)" by simp + qed +qed + +lemma Bseq_ignore_initial_segment: "Bseq X \ Bseq (\n. X (n + k))" +unfolding Bseq_def by auto + +lemma Bseq_offset: "Bseq (\n. X (n + k)) \ Bseq X" +apply (erule BseqE) +apply (rule_tac N="k" and K="K" in BseqI2') +apply clarify +apply (drule_tac x="n - k" in spec, simp) +done + + +subsection {* Sequences That Converge to Zero *} + +lemma ZseqI: + "(\r. 0 < r \ \no. \n\no. norm (X n) < r) \ Zseq X" +unfolding Zseq_def by simp + +lemma ZseqD: + "\Zseq X; 0 < r\ \ \no. \n\no. norm (X n) < r" +unfolding Zseq_def by simp + +lemma Zseq_zero: "Zseq (\n. 0)" +unfolding Zseq_def by simp + +lemma Zseq_const_iff: "Zseq (\n. k) = (k = 0)" +unfolding Zseq_def by force + +lemma Zseq_norm_iff: "Zseq (\n. norm (X n)) = Zseq (\n. X n)" +unfolding Zseq_def by simp + +lemma Zseq_imp_Zseq: + assumes X: "Zseq X" + assumes Y: "\n. norm (Y n) \ norm (X n) * K" + shows "Zseq (\n. Y n)" +proof (cases) + assume K: "0 < K" + show ?thesis + proof (rule ZseqI) + fix r::real assume "0 < r" + hence "0 < r / K" + using K by (rule divide_pos_pos) + then obtain N where "\n\N. norm (X n) < r / K" + using ZseqD [OF X] by fast + hence "\n\N. norm (X n) * K < r" + by (simp add: pos_less_divide_eq K) + hence "\n\N. norm (Y n) < r" + by (simp add: order_le_less_trans [OF Y]) + thus "\N. \n\N. norm (Y n) < r" .. + qed +next + assume "\ 0 < K" + hence K: "K \ 0" by (simp only: linorder_not_less) + { + fix n::nat + have "norm (Y n) \ norm (X n) * K" by (rule Y) + also have "\ \ norm (X n) * 0" + using K norm_ge_zero by (rule mult_left_mono) + finally have "norm (Y n) = 0" by simp + } + thus ?thesis by (simp add: Zseq_zero) +qed + +lemma Zseq_le: "\Zseq Y; \n. norm (X n) \ norm (Y n)\ \ Zseq X" +by (erule_tac K="1" in Zseq_imp_Zseq, simp) + +lemma Zseq_add: + assumes X: "Zseq X" + assumes Y: "Zseq Y" + shows "Zseq (\n. X n + Y n)" +proof (rule ZseqI) + fix r::real assume "0 < r" + hence r: "0 < r / 2" by simp + obtain M where M: "\n\M. norm (X n) < r/2" + using ZseqD [OF X r] by fast + obtain N where N: "\n\N. norm (Y n) < r/2" + using ZseqD [OF Y r] by fast + show "\N. \n\N. norm (X n + Y n) < r" + proof (intro exI allI impI) + fix n assume n: "max M N \ n" + have "norm (X n + Y n) \ norm (X n) + norm (Y n)" + by (rule norm_triangle_ineq) + also have "\ < r/2 + r/2" + proof (rule add_strict_mono) + from M n show "norm (X n) < r/2" by simp + from N n show "norm (Y n) < r/2" by simp + qed + finally show "norm (X n + Y n) < r" by simp + qed +qed + +lemma Zseq_minus: "Zseq X \ Zseq (\n. - X n)" +unfolding Zseq_def by simp + +lemma Zseq_diff: "\Zseq X; Zseq Y\ \ Zseq (\n. X n - Y n)" +by (simp only: diff_minus Zseq_add Zseq_minus) + +lemma (in bounded_linear) Zseq: + assumes X: "Zseq X" + shows "Zseq (\n. f (X n))" +proof - + obtain K where "\x. norm (f x) \ norm x * K" + using bounded by fast + with X show ?thesis + by (rule Zseq_imp_Zseq) +qed + +lemma (in bounded_bilinear) Zseq: + assumes X: "Zseq X" + assumes Y: "Zseq Y" + shows "Zseq (\n. X n ** Y n)" +proof (rule ZseqI) + fix r::real assume r: "0 < r" + obtain K where K: "0 < K" + and norm_le: "\x y. norm (x ** y) \ norm x * norm y * K" + using pos_bounded by fast + from K have K': "0 < inverse K" + by (rule positive_imp_inverse_positive) + obtain M where M: "\n\M. norm (X n) < r" + using ZseqD [OF X r] by fast + obtain N where N: "\n\N. norm (Y n) < inverse K" + using ZseqD [OF Y K'] by fast + show "\N. \n\N. norm (X n ** Y n) < r" + proof (intro exI allI impI) + fix n assume n: "max M N \ n" + have "norm (X n ** Y n) \ norm (X n) * norm (Y n) * K" + by (rule norm_le) + also have "norm (X n) * norm (Y n) * K < r * inverse K * K" + proof (intro mult_strict_right_mono mult_strict_mono' norm_ge_zero K) + from M n show Xn: "norm (X n) < r" by simp + from N n show Yn: "norm (Y n) < inverse K" by simp + qed + also from K have "r * inverse K * K = r" by simp + finally show "norm (X n ** Y n) < r" . + qed +qed + +lemma (in bounded_bilinear) Zseq_prod_Bseq: + assumes X: "Zseq X" + assumes Y: "Bseq Y" + shows "Zseq (\n. X n ** Y n)" +proof - + obtain K where K: "0 \ K" + and norm_le: "\x y. norm (x ** y) \ norm x * norm y * K" + using nonneg_bounded by fast + obtain B where B: "0 < B" + and norm_Y: "\n. norm (Y n) \ B" + using Y [unfolded Bseq_def] by fast + from X show ?thesis + proof (rule Zseq_imp_Zseq) + fix n::nat + have "norm (X n ** Y n) \ norm (X n) * norm (Y n) * K" + by (rule norm_le) + also have "\ \ norm (X n) * B * K" + by (intro mult_mono' order_refl norm_Y norm_ge_zero + mult_nonneg_nonneg K) + also have "\ = norm (X n) * (B * K)" + by (rule mult_assoc) + finally show "norm (X n ** Y n) \ norm (X n) * (B * K)" . + qed +qed + +lemma (in bounded_bilinear) Bseq_prod_Zseq: + assumes X: "Bseq X" + assumes Y: "Zseq Y" + shows "Zseq (\n. X n ** Y n)" +proof - + obtain K where K: "0 \ K" + and norm_le: "\x y. norm (x ** y) \ norm x * norm y * K" + using nonneg_bounded by fast + obtain B where B: "0 < B" + and norm_X: "\n. norm (X n) \ B" + using X [unfolded Bseq_def] by fast + from Y show ?thesis + proof (rule Zseq_imp_Zseq) + fix n::nat + have "norm (X n ** Y n) \ norm (X n) * norm (Y n) * K" + by (rule norm_le) + also have "\ \ B * norm (Y n) * K" + by (intro mult_mono' order_refl norm_X norm_ge_zero + mult_nonneg_nonneg K) + also have "\ = norm (Y n) * (B * K)" + by (simp only: mult_ac) + finally show "norm (X n ** Y n) \ norm (Y n) * (B * K)" . + qed +qed + +lemma (in bounded_bilinear) Zseq_left: + "Zseq X \ Zseq (\n. X n ** a)" +by (rule bounded_linear_left [THEN bounded_linear.Zseq]) + +lemma (in bounded_bilinear) Zseq_right: + "Zseq X \ Zseq (\n. a ** X n)" +by (rule bounded_linear_right [THEN bounded_linear.Zseq]) + +lemmas Zseq_mult = mult.Zseq +lemmas Zseq_mult_right = mult.Zseq_right +lemmas Zseq_mult_left = mult.Zseq_left + + +subsection {* Limits of Sequences *} + +lemma LIMSEQ_iff: + "(X ----> L) = (\r>0. \no. \n \ no. norm (X n - L) < r)" +by (rule LIMSEQ_def) + +lemma LIMSEQ_Zseq_iff: "((\n. X n) ----> L) = Zseq (\n. X n - L)" +by (simp only: LIMSEQ_def Zseq_def) + +lemma LIMSEQ_I: + "(\r. 0 < r \ \no. \n\no. norm (X n - L) < r) \ X ----> L" +by (simp add: LIMSEQ_def) + +lemma LIMSEQ_D: + "\X ----> L; 0 < r\ \ \no. \n\no. norm (X n - L) < r" +by (simp add: LIMSEQ_def) + +lemma LIMSEQ_const: "(\n. k) ----> k" +by (simp add: LIMSEQ_def) + +lemma LIMSEQ_const_iff: "(\n. k) ----> l = (k = l)" +by (simp add: LIMSEQ_Zseq_iff Zseq_const_iff) + +lemma LIMSEQ_norm: "X ----> a \ (\n. norm (X n)) ----> norm a" +apply (simp add: LIMSEQ_def, safe) +apply (drule_tac x="r" in spec, safe) +apply (rule_tac x="no" in exI, safe) +apply (drule_tac x="n" in spec, safe) +apply (erule order_le_less_trans [OF norm_triangle_ineq3]) +done + +lemma LIMSEQ_ignore_initial_segment: + "f ----> a \ (\n. f (n + k)) ----> a" +apply (rule LIMSEQ_I) +apply (drule (1) LIMSEQ_D) +apply (erule exE, rename_tac N) +apply (rule_tac x=N in exI) +apply simp +done + +lemma LIMSEQ_offset: + "(\n. f (n + k)) ----> a \ f ----> a" +apply (rule LIMSEQ_I) +apply (drule (1) LIMSEQ_D) +apply (erule exE, rename_tac N) +apply (rule_tac x="N + k" in exI) +apply clarify +apply (drule_tac x="n - k" in spec) +apply (simp add: le_diff_conv2) +done + +lemma LIMSEQ_Suc: "f ----> l \ (\n. f (Suc n)) ----> l" +by (drule_tac k="1" in LIMSEQ_ignore_initial_segment, simp) + +lemma LIMSEQ_imp_Suc: "(\n. f (Suc n)) ----> l \ f ----> l" +by (rule_tac k="1" in LIMSEQ_offset, simp) + +lemma LIMSEQ_Suc_iff: "(\n. f (Suc n)) ----> l = f ----> l" +by (blast intro: LIMSEQ_imp_Suc LIMSEQ_Suc) + +lemma add_diff_add: + fixes a b c d :: "'a::ab_group_add" + shows "(a + c) - (b + d) = (a - b) + (c - d)" +by simp + +lemma minus_diff_minus: + fixes a b :: "'a::ab_group_add" + shows "(- a) - (- b) = - (a - b)" +by simp + +lemma LIMSEQ_add: "\X ----> a; Y ----> b\ \ (\n. X n + Y n) ----> a + b" +by (simp only: LIMSEQ_Zseq_iff add_diff_add Zseq_add) + +lemma LIMSEQ_minus: "X ----> a \ (\n. - X n) ----> - a" +by (simp only: LIMSEQ_Zseq_iff minus_diff_minus Zseq_minus) + +lemma LIMSEQ_minus_cancel: "(\n. - X n) ----> - a \ X ----> a" +by (drule LIMSEQ_minus, simp) + +lemma LIMSEQ_diff: "\X ----> a; Y ----> b\ \ (\n. X n - Y n) ----> a - b" +by (simp add: diff_minus LIMSEQ_add LIMSEQ_minus) + +lemma LIMSEQ_unique: "\X ----> a; X ----> b\ \ a = b" +by (drule (1) LIMSEQ_diff, simp add: LIMSEQ_const_iff) + +lemma (in bounded_linear) LIMSEQ: + "X ----> a \ (\n. f (X n)) ----> f a" +by (simp only: LIMSEQ_Zseq_iff diff [symmetric] Zseq) + +lemma (in bounded_bilinear) LIMSEQ: + "\X ----> a; Y ----> b\ \ (\n. X n ** Y n) ----> a ** b" +by (simp only: LIMSEQ_Zseq_iff prod_diff_prod + Zseq_add Zseq Zseq_left Zseq_right) + +lemma LIMSEQ_mult: + fixes a b :: "'a::real_normed_algebra" + shows "[| X ----> a; Y ----> b |] ==> (%n. X n * Y n) ----> a * b" +by (rule mult.LIMSEQ) + +lemma inverse_diff_inverse: + "\(a::'a::division_ring) \ 0; b \ 0\ + \ inverse a - inverse b = - (inverse a * (a - b) * inverse b)" +by (simp add: ring_simps) + +lemma Bseq_inverse_lemma: + fixes x :: "'a::real_normed_div_algebra" + shows "\r \ norm x; 0 < r\ \ norm (inverse x) \ inverse r" +apply (subst nonzero_norm_inverse, clarsimp) +apply (erule (1) le_imp_inverse_le) +done + +lemma Bseq_inverse: + fixes a :: "'a::real_normed_div_algebra" + assumes X: "X ----> a" + assumes a: "a \ 0" + shows "Bseq (\n. inverse (X n))" +proof - + from a have "0 < norm a" by simp + hence "\r>0. r < norm a" by (rule dense) + then obtain r where r1: "0 < r" and r2: "r < norm a" by fast + obtain N where N: "\n. N \ n \ norm (X n - a) < r" + using LIMSEQ_D [OF X r1] by fast + show ?thesis + proof (rule BseqI2' [rule_format]) + fix n assume n: "N \ n" + hence 1: "norm (X n - a) < r" by (rule N) + hence 2: "X n \ 0" using r2 by auto + hence "norm (inverse (X n)) = inverse (norm (X n))" + by (rule nonzero_norm_inverse) + also have "\ \ inverse (norm a - r)" + proof (rule le_imp_inverse_le) + show "0 < norm a - r" using r2 by simp + next + have "norm a - norm (X n) \ norm (a - X n)" + by (rule norm_triangle_ineq2) + also have "\ = norm (X n - a)" + by (rule norm_minus_commute) + also have "\ < r" using 1 . + finally show "norm a - r \ norm (X n)" by simp + qed + finally show "norm (inverse (X n)) \ inverse (norm a - r)" . + qed +qed + +lemma LIMSEQ_inverse_lemma: + fixes a :: "'a::real_normed_div_algebra" + shows "\X ----> a; a \ 0; \n. X n \ 0\ + \ (\n. inverse (X n)) ----> inverse a" +apply (subst LIMSEQ_Zseq_iff) +apply (simp add: inverse_diff_inverse nonzero_imp_inverse_nonzero) +apply (rule Zseq_minus) +apply (rule Zseq_mult_left) +apply (rule mult.Bseq_prod_Zseq) +apply (erule (1) Bseq_inverse) +apply (simp add: LIMSEQ_Zseq_iff) +done + +lemma LIMSEQ_inverse: + fixes a :: "'a::real_normed_div_algebra" + assumes X: "X ----> a" + assumes a: "a \ 0" + shows "(\n. inverse (X n)) ----> inverse a" +proof - + from a have "0 < norm a" by simp + then obtain k where "\n\k. norm (X n - a) < norm a" + using LIMSEQ_D [OF X] by fast + hence "\n\k. X n \ 0" by auto + hence k: "\n. X (n + k) \ 0" by simp + + from X have "(\n. X (n + k)) ----> a" + by (rule LIMSEQ_ignore_initial_segment) + hence "(\n. inverse (X (n + k))) ----> inverse a" + using a k by (rule LIMSEQ_inverse_lemma) + thus "(\n. inverse (X n)) ----> inverse a" + by (rule LIMSEQ_offset) +qed + +lemma LIMSEQ_divide: + fixes a b :: "'a::real_normed_field" + shows "\X ----> a; Y ----> b; b \ 0\ \ (\n. X n / Y n) ----> a / b" +by (simp add: LIMSEQ_mult LIMSEQ_inverse divide_inverse) + +lemma LIMSEQ_pow: + fixes a :: "'a::{real_normed_algebra,recpower}" + shows "X ----> a \ (\n. (X n) ^ m) ----> a ^ m" +by (induct m) (simp_all add: power_Suc LIMSEQ_const LIMSEQ_mult) + +lemma LIMSEQ_setsum: + assumes n: "\n. n \ S \ X n ----> L n" + shows "(\m. \n\S. X n m) ----> (\n\S. L n)" +proof (cases "finite S") + case True + thus ?thesis using n + proof (induct) + case empty + show ?case + by (simp add: LIMSEQ_const) + next + case insert + thus ?case + by (simp add: LIMSEQ_add) + qed +next + case False + thus ?thesis + by (simp add: LIMSEQ_const) +qed + +lemma LIMSEQ_setprod: + fixes L :: "'a \ 'b::{real_normed_algebra,comm_ring_1}" + assumes n: "\n. n \ S \ X n ----> L n" + shows "(\m. \n\S. X n m) ----> (\n\S. L n)" +proof (cases "finite S") + case True + thus ?thesis using n + proof (induct) + case empty + show ?case + by (simp add: LIMSEQ_const) + next + case insert + thus ?case + by (simp add: LIMSEQ_mult) + qed +next + case False + thus ?thesis + by (simp add: setprod_def LIMSEQ_const) +qed + +lemma LIMSEQ_add_const: "f ----> a ==> (%n.(f n + b)) ----> a + b" +by (simp add: LIMSEQ_add LIMSEQ_const) + +(* FIXME: delete *) +lemma LIMSEQ_add_minus: + "[| X ----> a; Y ----> b |] ==> (%n. X n + -Y n) ----> a + -b" +by (simp only: LIMSEQ_add LIMSEQ_minus) + +lemma LIMSEQ_diff_const: "f ----> a ==> (%n.(f n - b)) ----> a - b" +by (simp add: LIMSEQ_diff LIMSEQ_const) + +lemma LIMSEQ_diff_approach_zero: + "g ----> L ==> (%x. f x - g x) ----> 0 ==> + f ----> L" + apply (drule LIMSEQ_add) + apply assumption + apply simp +done + +lemma LIMSEQ_diff_approach_zero2: + "f ----> L ==> (%x. f x - g x) ----> 0 ==> + g ----> L"; + apply (drule LIMSEQ_diff) + apply assumption + apply simp +done + +text{*A sequence tends to zero iff its abs does*} +lemma LIMSEQ_norm_zero: "((\n. norm (X n)) ----> 0) = (X ----> 0)" +by (simp add: LIMSEQ_def) + +lemma LIMSEQ_rabs_zero: "((%n. \f n\) ----> 0) = (f ----> (0::real))" +by (simp add: LIMSEQ_def) + +lemma LIMSEQ_imp_rabs: "f ----> (l::real) ==> (%n. \f n\) ----> \l\" +by (drule LIMSEQ_norm, simp) + +text{*An unbounded sequence's inverse tends to 0*} + +lemma LIMSEQ_inverse_zero: + "\r::real. \N. \n\N. r < X n \ (\n. inverse (X n)) ----> 0" +apply (rule LIMSEQ_I) +apply (drule_tac x="inverse r" in spec, safe) +apply (rule_tac x="N" in exI, safe) +apply (drule_tac x="n" in spec, safe) +apply (frule positive_imp_inverse_positive) +apply (frule (1) less_imp_inverse_less) +apply (subgoal_tac "0 < X n", simp) +apply (erule (1) order_less_trans) +done + +text{*The sequence @{term "1/n"} tends to 0 as @{term n} tends to infinity*} + +lemma LIMSEQ_inverse_real_of_nat: "(%n. inverse(real(Suc n))) ----> 0" +apply (rule LIMSEQ_inverse_zero, safe) +apply (cut_tac x = r in reals_Archimedean2) +apply (safe, rule_tac x = n in exI) +apply (auto simp add: real_of_nat_Suc) +done + +text{*The sequence @{term "r + 1/n"} tends to @{term r} as @{term n} tends to +infinity is now easily proved*} + +lemma LIMSEQ_inverse_real_of_nat_add: + "(%n. r + inverse(real(Suc n))) ----> r" +by (cut_tac LIMSEQ_add [OF LIMSEQ_const LIMSEQ_inverse_real_of_nat], auto) + +lemma LIMSEQ_inverse_real_of_nat_add_minus: + "(%n. r + -inverse(real(Suc n))) ----> r" +by (cut_tac LIMSEQ_add_minus [OF LIMSEQ_const LIMSEQ_inverse_real_of_nat], auto) + +lemma LIMSEQ_inverse_real_of_nat_add_minus_mult: + "(%n. r*( 1 + -inverse(real(Suc n)))) ----> r" +by (cut_tac b=1 in + LIMSEQ_mult [OF LIMSEQ_const LIMSEQ_inverse_real_of_nat_add_minus], auto) + +lemma LIMSEQ_le_const: + "\X ----> (x::real); \N. \n\N. a \ X n\ \ a \ x" +apply (rule ccontr, simp only: linorder_not_le) +apply (drule_tac r="a - x" in LIMSEQ_D, simp) +apply clarsimp +apply (drule_tac x="max N no" in spec, drule mp, rule le_maxI1) +apply (drule_tac x="max N no" in spec, drule mp, rule le_maxI2) +apply simp +done + +lemma LIMSEQ_le_const2: + "\X ----> (x::real); \N. \n\N. X n \ a\ \ x \ a" +apply (subgoal_tac "- a \ - x", simp) +apply (rule LIMSEQ_le_const) +apply (erule LIMSEQ_minus) +apply simp +done + +lemma LIMSEQ_le: + "\X ----> x; Y ----> y; \N. \n\N. X n \ Y n\ \ x \ (y::real)" +apply (subgoal_tac "0 \ y - x", simp) +apply (rule LIMSEQ_le_const) +apply (erule (1) LIMSEQ_diff) +apply (simp add: le_diff_eq) +done + + +subsection {* Convergence *} + +lemma limI: "X ----> L ==> lim X = L" +apply (simp add: lim_def) +apply (blast intro: LIMSEQ_unique) +done + +lemma convergentD: "convergent X ==> \L. (X ----> L)" +by (simp add: convergent_def) + +lemma convergentI: "(X ----> L) ==> convergent X" +by (auto simp add: convergent_def) + +lemma convergent_LIMSEQ_iff: "convergent X = (X ----> lim X)" +by (auto intro: theI LIMSEQ_unique simp add: convergent_def lim_def) + +lemma convergent_minus_iff: "(convergent X) = (convergent (%n. -(X n)))" +apply (simp add: convergent_def) +apply (auto dest: LIMSEQ_minus) +apply (drule LIMSEQ_minus, auto) +done + + +subsection {* Bounded Monotonic Sequences *} + +text{*Subsequence (alternative definition, (e.g. Hoskins)*} + +lemma subseq_Suc_iff: "subseq f = (\n. (f n) < (f (Suc n)))" +apply (simp add: subseq_def) +apply (auto dest!: less_imp_Suc_add) +apply (induct_tac k) +apply (auto intro: less_trans) +done + +lemma monoseq_Suc: + "monoseq X = ((\n. X n \ X (Suc n)) + | (\n. X (Suc n) \ X n))" +apply (simp add: monoseq_def) +apply (auto dest!: le_imp_less_or_eq) +apply (auto intro!: lessI [THEN less_imp_le] dest!: less_imp_Suc_add) +apply (induct_tac "ka") +apply (auto intro: order_trans) +apply (erule contrapos_np) +apply (induct_tac "k") +apply (auto intro: order_trans) +done + +lemma monoI1: "\m. \ n \ m. X m \ X n ==> monoseq X" +by (simp add: monoseq_def) + +lemma monoI2: "\m. \ n \ m. X n \ X m ==> monoseq X" +by (simp add: monoseq_def) + +lemma mono_SucI1: "\n. X n \ X (Suc n) ==> monoseq X" +by (simp add: monoseq_Suc) + +lemma mono_SucI2: "\n. X (Suc n) \ X n ==> monoseq X" +by (simp add: monoseq_Suc) + +text{*Bounded Sequence*} + +lemma BseqD: "Bseq X ==> \K. 0 < K & (\n. norm (X n) \ K)" +by (simp add: Bseq_def) + +lemma BseqI: "[| 0 < K; \n. norm (X n) \ K |] ==> Bseq X" +by (auto simp add: Bseq_def) + +lemma lemma_NBseq_def: + "(\K > 0. \n. norm (X n) \ K) = + (\N. \n. norm (X n) \ real(Suc N))" +apply auto + prefer 2 apply force +apply (cut_tac x = K in reals_Archimedean2, clarify) +apply (rule_tac x = n in exI, clarify) +apply (drule_tac x = na in spec) +apply (auto simp add: real_of_nat_Suc) +done + +text{* alternative definition for Bseq *} +lemma Bseq_iff: "Bseq X = (\N. \n. norm (X n) \ real(Suc N))" +apply (simp add: Bseq_def) +apply (simp (no_asm) add: lemma_NBseq_def) +done + +lemma lemma_NBseq_def2: + "(\K > 0. \n. norm (X n) \ K) = (\N. \n. norm (X n) < real(Suc N))" +apply (subst lemma_NBseq_def, auto) +apply (rule_tac x = "Suc N" in exI) +apply (rule_tac [2] x = N in exI) +apply (auto simp add: real_of_nat_Suc) + prefer 2 apply (blast intro: order_less_imp_le) +apply (drule_tac x = n in spec, simp) +done + +(* yet another definition for Bseq *) +lemma Bseq_iff1a: "Bseq X = (\N. \n. norm (X n) < real(Suc N))" +by (simp add: Bseq_def lemma_NBseq_def2) + +subsubsection{*Upper Bounds and Lubs of Bounded Sequences*} + +lemma Bseq_isUb: + "!!(X::nat=>real). Bseq X ==> \U. isUb (UNIV::real set) {x. \n. X n = x} U" +by (auto intro: isUbI setleI simp add: Bseq_def abs_le_iff) + + +text{* Use completeness of reals (supremum property) + to show that any bounded sequence has a least upper bound*} + +lemma Bseq_isLub: + "!!(X::nat=>real). Bseq X ==> + \U. isLub (UNIV::real set) {x. \n. X n = x} U" +by (blast intro: reals_complete Bseq_isUb) + +subsubsection{*A Bounded and Monotonic Sequence Converges*} + +lemma lemma_converg1: + "!!(X::nat=>real). [| \m. \ n \ m. X m \ X n; + isLub (UNIV::real set) {x. \n. X n = x} (X ma) + |] ==> \n \ ma. X n = X ma" +apply safe +apply (drule_tac y = "X n" in isLubD2) +apply (blast dest: order_antisym)+ +done + +text{* The best of both worlds: Easier to prove this result as a standard + theorem and then use equivalence to "transfer" it into the + equivalent nonstandard form if needed!*} + +lemma Bmonoseq_LIMSEQ: "\n. m \ n --> X n = X m ==> \L. (X ----> L)" +apply (simp add: LIMSEQ_def) +apply (rule_tac x = "X m" in exI, safe) +apply (rule_tac x = m in exI, safe) +apply (drule spec, erule impE, auto) +done + +lemma lemma_converg2: + "!!(X::nat=>real). + [| \m. X m ~= U; isLub UNIV {x. \n. X n = x} U |] ==> \m. X m < U" +apply safe +apply (drule_tac y = "X m" in isLubD2) +apply (auto dest!: order_le_imp_less_or_eq) +done + +lemma lemma_converg3: "!!(X ::nat=>real). \m. X m \ U ==> isUb UNIV {x. \n. X n = x} U" +by (rule setleI [THEN isUbI], auto) + +text{* FIXME: @{term "U - T < U"} is redundant *} +lemma lemma_converg4: "!!(X::nat=> real). + [| \m. X m ~= U; + isLub UNIV {x. \n. X n = x} U; + 0 < T; + U + - T < U + |] ==> \m. U + -T < X m & X m < U" +apply (drule lemma_converg2, assumption) +apply (rule ccontr, simp) +apply (simp add: linorder_not_less) +apply (drule lemma_converg3) +apply (drule isLub_le_isUb, assumption) +apply (auto dest: order_less_le_trans) +done + +text{*A standard proof of the theorem for monotone increasing sequence*} + +lemma Bseq_mono_convergent: + "[| Bseq X; \m. \n \ m. X m \ X n |] ==> convergent (X::nat=>real)" +apply (simp add: convergent_def) +apply (frule Bseq_isLub, safe) +apply (case_tac "\m. X m = U", auto) +apply (blast dest: lemma_converg1 Bmonoseq_LIMSEQ) +(* second case *) +apply (rule_tac x = U in exI) +apply (subst LIMSEQ_iff, safe) +apply (frule lemma_converg2, assumption) +apply (drule lemma_converg4, auto) +apply (rule_tac x = m in exI, safe) +apply (subgoal_tac "X m \ X n") + prefer 2 apply blast +apply (drule_tac x=n and P="%m. X m < U" in spec, arith) +done + +lemma Bseq_minus_iff: "Bseq (%n. -(X n)) = Bseq X" +by (simp add: Bseq_def) + +text{*Main monotonicity theorem*} +lemma Bseq_monoseq_convergent: "[| Bseq X; monoseq X |] ==> convergent X" +apply (simp add: monoseq_def, safe) +apply (rule_tac [2] convergent_minus_iff [THEN ssubst]) +apply (drule_tac [2] Bseq_minus_iff [THEN ssubst]) +apply (auto intro!: Bseq_mono_convergent) +done + +subsubsection{*A Few More Equivalence Theorems for Boundedness*} + +text{*alternative formulation for boundedness*} +lemma Bseq_iff2: "Bseq X = (\k > 0. \x. \n. norm (X(n) + -x) \ k)" +apply (unfold Bseq_def, safe) +apply (rule_tac [2] x = "k + norm x" in exI) +apply (rule_tac x = K in exI, simp) +apply (rule exI [where x = 0], auto) +apply (erule order_less_le_trans, simp) +apply (drule_tac x=n in spec, fold diff_def) +apply (drule order_trans [OF norm_triangle_ineq2]) +apply simp +done + +text{*alternative formulation for boundedness*} +lemma Bseq_iff3: "Bseq X = (\k > 0. \N. \n. norm(X(n) + -X(N)) \ k)" +apply safe +apply (simp add: Bseq_def, safe) +apply (rule_tac x = "K + norm (X N)" in exI) +apply auto +apply (erule order_less_le_trans, simp) +apply (rule_tac x = N in exI, safe) +apply (drule_tac x = n in spec) +apply (rule order_trans [OF norm_triangle_ineq], simp) +apply (auto simp add: Bseq_iff2) +done + +lemma BseqI2: "(\n. k \ f n & f n \ (K::real)) ==> Bseq f" +apply (simp add: Bseq_def) +apply (rule_tac x = " (\k\ + \K\) + 1" in exI, auto) +apply (drule_tac x = n in spec, arith) +done + + +subsection {* Cauchy Sequences *} + +lemma CauchyI: + "(\e. 0 < e \ \M. \m\M. \n\M. norm (X m - X n) < e) \ Cauchy X" +by (simp add: Cauchy_def) + +lemma CauchyD: + "\Cauchy X; 0 < e\ \ \M. \m\M. \n\M. norm (X m - X n) < e" +by (simp add: Cauchy_def) + +subsubsection {* Cauchy Sequences are Bounded *} + +text{*A Cauchy sequence is bounded -- this is the standard + proof mechanization rather than the nonstandard proof*} + +lemma lemmaCauchy: "\n \ M. norm (X M - X n) < (1::real) + ==> \n \ M. norm (X n :: 'a::real_normed_vector) < 1 + norm (X M)" +apply (clarify, drule spec, drule (1) mp) +apply (simp only: norm_minus_commute) +apply (drule order_le_less_trans [OF norm_triangle_ineq2]) +apply simp +done + +lemma Cauchy_Bseq: "Cauchy X ==> Bseq X" +apply (simp add: Cauchy_def) +apply (drule spec, drule mp, rule zero_less_one, safe) +apply (drule_tac x="M" in spec, simp) +apply (drule lemmaCauchy) +apply (rule_tac k="M" in Bseq_offset) +apply (simp add: Bseq_def) +apply (rule_tac x="1 + norm (X M)" in exI) +apply (rule conjI, rule order_less_le_trans [OF zero_less_one], simp) +apply (simp add: order_less_imp_le) +done + +subsubsection {* Cauchy Sequences are Convergent *} + +axclass banach \ real_normed_vector + Cauchy_convergent: "Cauchy X \ convergent X" + +theorem LIMSEQ_imp_Cauchy: + assumes X: "X ----> a" shows "Cauchy X" +proof (rule CauchyI) + fix e::real assume "0 < e" + hence "0 < e/2" by simp + with X have "\N. \n\N. norm (X n - a) < e/2" by (rule LIMSEQ_D) + then obtain N where N: "\n\N. norm (X n - a) < e/2" .. + show "\N. \m\N. \n\N. norm (X m - X n) < e" + proof (intro exI allI impI) + fix m assume "N \ m" + hence m: "norm (X m - a) < e/2" using N by fast + fix n assume "N \ n" + hence n: "norm (X n - a) < e/2" using N by fast + have "norm (X m - X n) = norm ((X m - a) - (X n - a))" by simp + also have "\ \ norm (X m - a) + norm (X n - a)" + by (rule norm_triangle_ineq4) + also from m n have "\ < e" by(simp add:field_simps) + finally show "norm (X m - X n) < e" . + qed +qed + +lemma convergent_Cauchy: "convergent X \ Cauchy X" +unfolding convergent_def +by (erule exE, erule LIMSEQ_imp_Cauchy) + +text {* +Proof that Cauchy sequences converge based on the one from +http://pirate.shu.edu/~wachsmut/ira/numseq/proofs/cauconv.html +*} + +text {* + If sequence @{term "X"} is Cauchy, then its limit is the lub of + @{term "{r::real. \N. \n\N. r < X n}"} +*} + +lemma isUb_UNIV_I: "(\y. y \ S \ y \ u) \ isUb UNIV S u" +by (simp add: isUbI setleI) + +lemma real_abs_diff_less_iff: + "(\x - a\ < (r::real)) = (a - r < x \ x < a + r)" +by auto + +locale real_Cauchy = + fixes X :: "nat \ real" + assumes X: "Cauchy X" + fixes S :: "real set" + defines S_def: "S \ {x::real. \N. \n\N. x < X n}" + +lemma real_CauchyI: + assumes "Cauchy X" + shows "real_Cauchy X" + proof qed (fact assms) + +lemma (in real_Cauchy) mem_S: "\n\N. x < X n \ x \ S" +by (unfold S_def, auto) + +lemma (in real_Cauchy) bound_isUb: + assumes N: "\n\N. X n < x" + shows "isUb UNIV S x" +proof (rule isUb_UNIV_I) + fix y::real assume "y \ S" + hence "\M. \n\M. y < X n" + by (simp add: S_def) + then obtain M where "\n\M. y < X n" .. + hence "y < X (max M N)" by simp + also have "\ < x" using N by simp + finally show "y \ x" + by (rule order_less_imp_le) +qed + +lemma (in real_Cauchy) isLub_ex: "\u. isLub UNIV S u" +proof (rule reals_complete) + obtain N where "\m\N. \n\N. norm (X m - X n) < 1" + using CauchyD [OF X zero_less_one] by fast + hence N: "\n\N. norm (X n - X N) < 1" by simp + show "\x. x \ S" + proof + from N have "\n\N. X N - 1 < X n" + by (simp add: real_abs_diff_less_iff) + thus "X N - 1 \ S" by (rule mem_S) + qed + show "\u. isUb UNIV S u" + proof + from N have "\n\N. X n < X N + 1" + by (simp add: real_abs_diff_less_iff) + thus "isUb UNIV S (X N + 1)" + by (rule bound_isUb) + qed +qed + +lemma (in real_Cauchy) isLub_imp_LIMSEQ: + assumes x: "isLub UNIV S x" + shows "X ----> x" +proof (rule LIMSEQ_I) + fix r::real assume "0 < r" + hence r: "0 < r/2" by simp + obtain N where "\n\N. \m\N. norm (X n - X m) < r/2" + using CauchyD [OF X r] by fast + hence "\n\N. norm (X n - X N) < r/2" by simp + hence N: "\n\N. X N - r/2 < X n \ X n < X N + r/2" + by (simp only: real_norm_def real_abs_diff_less_iff) + + from N have "\n\N. X N - r/2 < X n" by fast + hence "X N - r/2 \ S" by (rule mem_S) + hence 1: "X N - r/2 \ x" using x isLub_isUb isUbD by fast + + from N have "\n\N. X n < X N + r/2" by fast + hence "isUb UNIV S (X N + r/2)" by (rule bound_isUb) + hence 2: "x \ X N + r/2" using x isLub_le_isUb by fast + + show "\N. \n\N. norm (X n - x) < r" + proof (intro exI allI impI) + fix n assume n: "N \ n" + from N n have "X n < X N + r/2" and "X N - r/2 < X n" by simp+ + thus "norm (X n - x) < r" using 1 2 + by (simp add: real_abs_diff_less_iff) + qed +qed + +lemma (in real_Cauchy) LIMSEQ_ex: "\x. X ----> x" +proof - + obtain x where "isLub UNIV S x" + using isLub_ex by fast + hence "X ----> x" + by (rule isLub_imp_LIMSEQ) + thus ?thesis .. +qed + +lemma real_Cauchy_convergent: + fixes X :: "nat \ real" + shows "Cauchy X \ convergent X" +unfolding convergent_def +by (rule real_Cauchy.LIMSEQ_ex) + (rule real_CauchyI) + +instance real :: banach +by intro_classes (rule real_Cauchy_convergent) + +lemma Cauchy_convergent_iff: + fixes X :: "nat \ 'a::banach" + shows "Cauchy X = convergent X" +by (fast intro: Cauchy_convergent convergent_Cauchy) + + +subsection {* Power Sequences *} + +text{*The sequence @{term "x^n"} tends to 0 if @{term "0\x"} and @{term +"x<1"}. Proof will use (NS) Cauchy equivalence for convergence and + also fact that bounded and monotonic sequence converges.*} + +lemma Bseq_realpow: "[| 0 \ (x::real); x \ 1 |] ==> Bseq (%n. x ^ n)" +apply (simp add: Bseq_def) +apply (rule_tac x = 1 in exI) +apply (simp add: power_abs) +apply (auto dest: power_mono) +done + +lemma monoseq_realpow: "[| 0 \ x; x \ 1 |] ==> monoseq (%n. x ^ n)" +apply (clarify intro!: mono_SucI2) +apply (cut_tac n = n and N = "Suc n" and a = x in power_decreasing, auto) +done + +lemma convergent_realpow: + "[| 0 \ (x::real); x \ 1 |] ==> convergent (%n. x ^ n)" +by (blast intro!: Bseq_monoseq_convergent Bseq_realpow monoseq_realpow) + +lemma LIMSEQ_inverse_realpow_zero_lemma: + fixes x :: real + assumes x: "0 \ x" + shows "real n * x + 1 \ (x + 1) ^ n" +apply (induct n) +apply simp +apply simp +apply (rule order_trans) +prefer 2 +apply (erule mult_left_mono) +apply (rule add_increasing [OF x], simp) +apply (simp add: real_of_nat_Suc) +apply (simp add: ring_distribs) +apply (simp add: mult_nonneg_nonneg x) +done + +lemma LIMSEQ_inverse_realpow_zero: + "1 < (x::real) \ (\n. inverse (x ^ n)) ----> 0" +proof (rule LIMSEQ_inverse_zero [rule_format]) + fix y :: real + assume x: "1 < x" + hence "0 < x - 1" by simp + hence "\y. \N::nat. y < real N * (x - 1)" + by (rule reals_Archimedean3) + hence "\N::nat. y < real N * (x - 1)" .. + then obtain N::nat where "y < real N * (x - 1)" .. + also have "\ \ real N * (x - 1) + 1" by simp + also have "\ \ (x - 1 + 1) ^ N" + by (rule LIMSEQ_inverse_realpow_zero_lemma, cut_tac x, simp) + also have "\ = x ^ N" by simp + finally have "y < x ^ N" . + hence "\n\N. y < x ^ n" + apply clarify + apply (erule order_less_le_trans) + apply (erule power_increasing) + apply (rule order_less_imp_le [OF x]) + done + thus "\N. \n\N. y < x ^ n" .. +qed + +lemma LIMSEQ_realpow_zero: + "\0 \ (x::real); x < 1\ \ (\n. x ^ n) ----> 0" +proof (cases) + assume "x = 0" + hence "(\n. x ^ Suc n) ----> 0" by (simp add: LIMSEQ_const) + thus ?thesis by (rule LIMSEQ_imp_Suc) +next + assume "0 \ x" and "x \ 0" + hence x0: "0 < x" by simp + assume x1: "x < 1" + from x0 x1 have "1 < inverse x" + by (rule real_inverse_gt_one) + hence "(\n. inverse (inverse x ^ n)) ----> 0" + by (rule LIMSEQ_inverse_realpow_zero) + thus ?thesis by (simp add: power_inverse) +qed + +lemma LIMSEQ_power_zero: + fixes x :: "'a::{real_normed_algebra_1,recpower}" + shows "norm x < 1 \ (\n. x ^ n) ----> 0" +apply (drule LIMSEQ_realpow_zero [OF norm_ge_zero]) +apply (simp only: LIMSEQ_Zseq_iff, erule Zseq_le) +apply (simp add: power_abs norm_power_ineq) +done + +lemma LIMSEQ_divide_realpow_zero: + "1 < (x::real) ==> (%n. a / (x ^ n)) ----> 0" +apply (cut_tac a = a and x1 = "inverse x" in + LIMSEQ_mult [OF LIMSEQ_const LIMSEQ_realpow_zero]) +apply (auto simp add: divide_inverse power_inverse) +apply (simp add: inverse_eq_divide pos_divide_less_eq) +done + +text{*Limit of @{term "c^n"} for @{term"\c\ < 1"}*} + +lemma LIMSEQ_rabs_realpow_zero: "\c\ < (1::real) ==> (%n. \c\ ^ n) ----> 0" +by (rule LIMSEQ_realpow_zero [OF abs_ge_zero]) + +lemma LIMSEQ_rabs_realpow_zero2: "\c\ < (1::real) ==> (%n. c ^ n) ----> 0" +apply (rule LIMSEQ_rabs_zero [THEN iffD1]) +apply (auto intro: LIMSEQ_rabs_realpow_zero simp add: power_abs) +done + +end diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/Series.thy --- a/src/HOL/Series.thy Tue Dec 30 08:18:54 2008 +0100 +++ b/src/HOL/Series.thy Tue Dec 30 11:10:01 2008 +0100 @@ -10,7 +10,7 @@ header{*Finite Summation and Infinite Series*} theory Series -imports "~~/src/HOL/Hyperreal/SEQ" +imports SEQ begin definition diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/Sum_Type.thy --- a/src/HOL/Sum_Type.thy Tue Dec 30 08:18:54 2008 +0100 +++ b/src/HOL/Sum_Type.thy Tue Dec 30 11:10:01 2008 +0100 @@ -120,29 +120,6 @@ by (blast dest!: Inr_inject) -subsection {* Projections *} - -definition - "sum_case f g x = - (if (\!y. x = Inl y) - then f (THE y. x = Inl y) - else g (THE y. x = Inr y))" -definition "Projl x = sum_case id undefined x" -definition "Projr x = sum_case undefined id x" - -lemma sum_cases[simp]: - "sum_case f g (Inl x) = f x" - "sum_case f g (Inr y) = g y" - unfolding sum_case_def - by auto - -lemma Projl_Inl[simp]: "Projl (Inl x) = x" - unfolding Projl_def by simp - -lemma Projr_Inr[simp]: "Projr (Inr x) = x" - unfolding Projr_def by simp - - subsection{*The Disjoint Sum of Sets*} (** Introduction rules for the injections **) diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/Tools/atp_manager.ML --- a/src/HOL/Tools/atp_manager.ML Tue Dec 30 08:18:54 2008 +0100 +++ b/src/HOL/Tools/atp_manager.ML Tue Dec 30 11:10:01 2008 +0100 @@ -1,5 +1,4 @@ (* Title: HOL/Tools/atp_manager.ML - ID: $Id$ Author: Fabian Immler, TU Muenchen ATP threads are registered here. @@ -19,6 +18,7 @@ val set_timeout: int -> unit val kill: unit -> unit val info: unit -> unit + val messages: int option -> unit type prover = int -> Proof.state -> bool * string val add_prover: string -> prover -> theory -> theory val print_provers: theory -> unit @@ -30,6 +30,9 @@ (** preferences **) +val message_store_limit = 20; +val message_display_limit = 5; + local val atps = ref "e"; @@ -85,13 +88,14 @@ {timeout_heap: ThreadHeap.T, oldest_heap: ThreadHeap.T, active: (Thread.thread * (Time.time * Time.time * string)) list, - cancelling: (Thread.thread * (Time.time * Time.time * string)) list}; + cancelling: (Thread.thread * (Time.time * Time.time * string)) list, + messages: string list}; -fun make_state timeout_heap oldest_heap active cancelling = +fun make_state timeout_heap oldest_heap active cancelling messages = State {timeout_heap = timeout_heap, oldest_heap = oldest_heap, - active = active, cancelling = cancelling}; + active = active, cancelling = cancelling, messages = messages}; -val state = Synchronized.var "atp_manager" (make_state ThreadHeap.empty ThreadHeap.empty [] []); +val state = Synchronized.var "atp_manager" (make_state ThreadHeap.empty ThreadHeap.empty [] [] []); (* the managing thread *) @@ -100,34 +104,31 @@ val managing_thread = ref (NONE: Thread.thread option); -(* unregister thread from thread manager -- move to cancelling *) +(* unregister thread *) fun unregister (success, message) thread = Synchronized.change_result state - (fn State {timeout_heap, oldest_heap, active, cancelling} => - let - val info = lookup_thread active thread + (fn state as State {timeout_heap, oldest_heap, active, cancelling, messages} => + (case lookup_thread active thread of + SOME (birthtime, _, description) => + let + val (group, active') = + if success then List.partition (fn (_, (tb, _, _)) => tb = birthtime) active + else List.partition (fn (th, _) => Thread.equal (th, thread)) active + val others = delete_thread thread group - (* get birthtime of unregistering thread if successful - for group-killing*) - val birthtime = case info of NONE => Time.zeroTime - | SOME (tb, _, _) => if success then tb else Time.zeroTime + val now = Time.now () + val cancelling' = + fold (fn (th, (tb, _, desc)) => update_thread (th, (tb, now, desc))) others cancelling - (* move unregistering thread to cancelling *) - val active' = delete_thread thread active - val cancelling' = case info of NONE => cancelling - | SOME (tb, _, desc) => update_thread (thread, (tb, Time.now (), desc)) cancelling - - (* move all threads of the same group to cancelling *) - val group_threads = active |> map_filter (fn (th, (tb, _, desc)) => - if tb = birthtime then SOME (th, (tb, Time.now (), desc)) else NONE) - val active'' = filter_out (fn (_, (tb, _, _)) => tb = birthtime) active' - val cancelling'' = append group_threads cancelling' - - (* message for user *) - val message' = case info of NONE => "" - | SOME (_, _, desc) => "Sledgehammer: " ^ desc ^ "\n" ^ message ^ - (if null group_threads then "" - else "\nInterrupted " ^ string_of_int (length group_threads - 1) ^ " other group members") - in (message', make_state timeout_heap oldest_heap active'' cancelling'') end); + val msg = description ^ "\n" ^ message + val message' = "Sledgehammer: " ^ msg ^ + (if null others then "" + else "\nInterrupted " ^ string_of_int (length others) ^ " other group members") + val messages' = msg :: + (if length messages <= message_store_limit then messages + else #1 (chop message_store_limit messages)) + in (message', make_state timeout_heap oldest_heap active' cancelling' messages') end + | NONE => ("", state))); (* kill excessive atp threads *) @@ -140,12 +141,13 @@ fun kill_oldest () = let exception Unchanged in - Synchronized.change_result state (fn State {timeout_heap, oldest_heap, active, cancelling} => + Synchronized.change_result state + (fn State {timeout_heap, oldest_heap, active, cancelling, messages} => if ThreadHeap.is_empty oldest_heap orelse not (excessive_atps active) then raise Unchanged else let val ((_, oldest_thread), oldest_heap') = ThreadHeap.min_elem oldest_heap - in (oldest_thread, make_state timeout_heap oldest_heap' active cancelling) end) + in (oldest_thread, make_state timeout_heap oldest_heap' active cancelling messages) end) |> (priority o unregister (false, "Interrupted (maximum number of ATPs exceeded)")) handle Unchanged => () end; @@ -175,7 +177,7 @@ | SOME (time, _) => SOME time) (* action: cancel find threads whose timeout is reached, and interrupt cancelling threads *) - fun action (State {timeout_heap, oldest_heap, active, cancelling}) = + fun action (State {timeout_heap, oldest_heap, active, cancelling, messages}) = let val (timeout_threads, timeout_heap') = ThreadHeap.upto (Time.now (), Thread.self ()) timeout_heap in @@ -185,7 +187,7 @@ let val _ = List.app (SimpleThread.interrupt o #1) cancelling val cancelling' = filter (Thread.isActive o #1) cancelling - val state' = make_state timeout_heap' oldest_heap active cancelling' + val state' = make_state timeout_heap' oldest_heap active cancelling' messages in SOME (map #2 timeout_threads, state') end end in @@ -203,12 +205,13 @@ fun register birthtime deadtime (thread, desc) = (check_thread_manager (); - Synchronized.change state (fn State {timeout_heap, oldest_heap, active, cancelling} => - let - val timeout_heap' = ThreadHeap.insert (deadtime, thread) timeout_heap - val oldest_heap' = ThreadHeap.insert (birthtime, thread) oldest_heap - val active' = update_thread (thread, (birthtime, deadtime, desc)) active - in make_state timeout_heap' oldest_heap' active' cancelling end)); + Synchronized.change state + (fn State {timeout_heap, oldest_heap, active, cancelling, messages} => + let + val timeout_heap' = ThreadHeap.insert (deadtime, thread) timeout_heap + val oldest_heap' = ThreadHeap.insert (birthtime, thread) oldest_heap + val active' = update_thread (thread, (birthtime, deadtime, desc)) active + in make_state timeout_heap' oldest_heap' active' cancelling messages end)); @@ -217,16 +220,17 @@ (* kill: move all threads to cancelling *) fun kill () = Synchronized.change state - (fn State {timeout_heap, oldest_heap, active, cancelling} => + (fn State {timeout_heap, oldest_heap, active, cancelling, messages} => let val formerly_active = map (fn (th, (tb, _, desc)) => (th, (tb, Time.now (), desc))) active - in make_state timeout_heap oldest_heap [] (formerly_active @ cancelling) end); + in make_state timeout_heap oldest_heap [] (formerly_active @ cancelling) messages end); -(* info: information on running threads *) +(* ATP info *) fun info () = let - val State {timeout_heap, oldest_heap, active, cancelling} = Synchronized.value state + val State {active, cancelling, ...} = Synchronized.value state + fun running_info (_, (birth_time, dead_time, desc)) = "Running: " ^ (string_of_int o Time.toSeconds) (Time.- (Time.now (), birth_time)) ^ " s -- " @@ -235,6 +239,7 @@ fun cancelling_info (_, (_, dead_time, desc)) = "Trying to interrupt thread since " ^ (string_of_int o Time.toSeconds) (Time.- (Time.now (), dead_time)) ^ " s:\n" ^ desc + val running = if null active then "No ATPs running." else space_implode "\n\n" ("Running ATPs:" :: map running_info active) @@ -242,8 +247,17 @@ if null cancelling then "" else space_implode "\n\n" ("Trying to interrupt the following ATPs:" :: map cancelling_info cancelling) + in writeln (running ^ "\n" ^ interrupting) end; +fun messages opt_limit = + let + val limit = the_default message_display_limit opt_limit; + val State {messages = msgs, ...} = Synchronized.value state + val header = "Recent ATP messages" ^ + (if length msgs <= limit then ":" else " (" ^ string_of_int limit ^ " displayed):"); + in writeln (space_implode "\n\n" (header :: #1 (chop limit msgs))) end; + (** The Sledgehammer **) @@ -322,6 +336,11 @@ (Scan.succeed (Toplevel.no_timing o Toplevel.imperative info)); val _ = + OuterSyntax.improper_command "atp_messages" "print recent messages issued by managed provers" K.diag + (Scan.option (P.$$$ "(" |-- P.nat --| P.$$$ ")") >> + (fn limit => Toplevel.no_timing o Toplevel.imperative (fn () => messages limit))); + +val _ = OuterSyntax.improper_command "print_atps" "print external provers" K.diag (Scan.succeed (Toplevel.no_timing o Toplevel.unknown_theory o Toplevel.keep (print_provers o Toplevel.theory_of))); @@ -329,7 +348,7 @@ val _ = OuterSyntax.command "sledgehammer" "call all automatic theorem provers" K.diag (Scan.repeat P.xname >> (fn names => Toplevel.no_timing o Toplevel.unknown_proof o - Toplevel.keep ((sledgehammer names) o Toplevel.proof_of))); + Toplevel.keep ((sledgehammer names) o Toplevel.proof_of))); end; diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/Tools/function_package/decompose.ML --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/HOL/Tools/function_package/decompose.ML Tue Dec 30 11:10:01 2008 +0100 @@ -0,0 +1,105 @@ +(* Title: HOL/Tools/function_package/decompose.ML + Author: Alexander Krauss, TU Muenchen + +Graph decomposition using "Shallow Dependency Pairs". +*) + +signature DECOMPOSE = +sig + + val derive_chains : Proof.context -> tactic + -> (Termination.data -> int -> tactic) + -> Termination.data -> int -> tactic + + val decompose_tac : Proof.context -> tactic + -> Termination.ttac + +end + +structure Decompose : DECOMPOSE = +struct + +structure TermGraph = GraphFun(type key = term val ord = Term.fast_term_ord); + + +fun derive_chains ctxt chain_tac cont D = Termination.CALLS (fn (cs, i) => + let + val thy = ProofContext.theory_of ctxt + + fun prove_chain c1 c2 D = + if is_some (Termination.get_chain D c1 c2) then D else + let + val goal = HOLogic.mk_eq (HOLogic.mk_binop @{const_name "Relation.rel_comp"} (c1, c2), + Const (@{const_name "{}"}, fastype_of c1)) + |> HOLogic.mk_Trueprop (* "C1 O C2 = {}" *) + + val chain = case FundefLib.try_proof (cterm_of thy goal) chain_tac of + FundefLib.Solved thm => SOME thm + | _ => NONE + in + Termination.note_chain c1 c2 chain D + end + in + cont (fold_product prove_chain cs cs D) i + end) + + +fun mk_dgraph D cs = + TermGraph.empty + |> fold (fn c => TermGraph.new_node (c,())) cs + |> fold_product (fn c1 => fn c2 => + if is_none (Termination.get_chain D c1 c2 |> the_default NONE) + then TermGraph.add_edge (c1, c2) else I) + cs cs + + +fun ucomp_empty_tac T = + REPEAT_ALL_NEW (rtac @{thm union_comp_emptyR} + ORELSE' rtac @{thm union_comp_emptyL} + ORELSE' SUBGOAL (fn (_ $ (_ $ (_ $ c1 $ c2) $ _), i) => rtac (T c1 c2) i)) + +fun regroup_calls_tac cs = Termination.CALLS (fn (cs', i) => + let + val is = map (fn c => find_index (curry op aconv c) cs') cs + in + CONVERSION (Conv.arg_conv (Conv.arg_conv (FundefLib.regroup_union_conv is))) i + end) + + +fun solve_trivial_tac D = Termination.CALLS +(fn ([c], i) => + (case Termination.get_chain D c c of + SOME (SOME thm) => rtac @{thm wf_no_loop} i + THEN rtac thm i + | _ => no_tac) + | _ => no_tac) + +fun decompose_tac' ctxt cont err_cont D = Termination.CALLS (fn (cs, i) => + let + val G = mk_dgraph D cs + val sccs = TermGraph.strong_conn G + + fun split [SCC] i = (solve_trivial_tac D i ORELSE cont D i) + | split (SCC::rest) i = + regroup_calls_tac SCC i + THEN rtac @{thm wf_union_compatible} i + THEN rtac @{thm less_by_empty} (i + 2) + THEN ucomp_empty_tac (the o the oo Termination.get_chain D) (i + 2) + THEN split rest (i + 1) + THEN (solve_trivial_tac D i ORELSE cont D i) + in + if length sccs > 1 then split sccs i + else solve_trivial_tac D i ORELSE err_cont D i + end) + +fun decompose_tac ctxt chain_tac cont err_cont = + derive_chains ctxt chain_tac + (decompose_tac' ctxt cont err_cont) + +fun auto_decompose_tac ctxt = + Termination.TERMINATION ctxt + (decompose_tac ctxt (auto_tac (local_clasimpset_of ctxt)) + (K (K all_tac)) (K (K no_tac))) + + +end diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/Tools/function_package/descent.ML --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/HOL/Tools/function_package/descent.ML Tue Dec 30 11:10:01 2008 +0100 @@ -0,0 +1,44 @@ +(* Title: HOL/Tools/function_package/descent.ML + Author: Alexander Krauss, TU Muenchen + +Descent proofs for termination +*) + + +signature DESCENT = +sig + + val derive_diag : Proof.context -> tactic -> (Termination.data -> int -> tactic) + -> Termination.data -> int -> tactic + + val derive_all : Proof.context -> tactic -> (Termination.data -> int -> tactic) + -> Termination.data -> int -> tactic + +end + + +structure Descent : DESCENT = +struct + +fun gen_descent diag ctxt tac cont D = Termination.CALLS (fn (cs, i) => + let + val thy = ProofContext.theory_of ctxt + val measures_of = Termination.get_measures D + + fun derive c D = + let + val (_, p, _, q, _, _) = Termination.dest_call D c + in + if diag andalso p = q + then fold (fn m => Termination.derive_descent thy tac c m m) (measures_of p) D + else fold_product (Termination.derive_descent thy tac c) + (measures_of p) (measures_of q) D + end + in + cont (FundefCommon.PROFILE "deriving descents" (fold derive cs) D) i + end) + +val derive_diag = gen_descent true +val derive_all = gen_descent false + +end diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/Tools/function_package/fundef_lib.ML --- a/src/HOL/Tools/function_package/fundef_lib.ML Tue Dec 30 08:18:54 2008 +0100 +++ b/src/HOL/Tools/function_package/fundef_lib.ML Tue Dec 30 11:10:01 2008 +0100 @@ -130,4 +130,50 @@ | SOME st => if Thm.no_prems st then Solved (Goal.finish st) else Stuck st +fun dest_binop_list cn (t as (Const (n, _) $ a $ b)) = + if cn = n then dest_binop_list cn a @ dest_binop_list cn b else [ t ] + | dest_binop_list _ t = [ t ] + + +(* separate two parts in a +-expression: + "a + b + c + d + e" --> "(a + b + d) + (c + e)" + + Here, + can be any binary operation that is AC. + + cn - The name of the binop-constructor (e.g. @{const_name "op Un"}) + ac - the AC rewrite rules for cn + is - the list of indices of the expressions that should become the first part + (e.g. [0,1,3] in the above example) +*) + +fun regroup_conv neu cn ac is ct = + let + val mk = HOLogic.mk_binop cn + val t = term_of ct + val xs = dest_binop_list cn t + val js = 0 upto (length xs) - 1 \\ is + val ty = fastype_of t + val thy = theory_of_cterm ct + in + Goal.prove_internal [] + (cterm_of thy + (Logic.mk_equals (t, + if is = [] + then mk (Const (neu, ty), foldr1 mk (map (nth xs) js)) + else if js = [] + then mk (foldr1 mk (map (nth xs) is), Const (neu, ty)) + else mk (foldr1 mk (map (nth xs) is), foldr1 mk (map (nth xs) js))))) + (K (MetaSimplifier.rewrite_goals_tac ac + THEN rtac Drule.reflexive_thm 1)) + end + +(* instance for unions *) +fun regroup_union_conv t = + regroup_conv (@{const_name "{}"}) + @{const_name "op Un"} + (map (fn t => t RS eq_reflection) (@{thms "Un_ac"} @ + @{thms "Un_empty_right"} @ + @{thms "Un_empty_left"})) t + + end diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/Tools/function_package/induction_scheme.ML --- a/src/HOL/Tools/function_package/induction_scheme.ML Tue Dec 30 08:18:54 2008 +0100 +++ b/src/HOL/Tools/function_package/induction_scheme.ML Tue Dec 30 11:10:01 2008 +0100 @@ -55,7 +55,7 @@ fun meta thm = thm RS eq_reflection val sum_prod_conv = MetaSimplifier.rewrite true - (map meta (@{thm split_conv} :: @{thms sum_cases})) + (map meta (@{thm split_conv} :: @{thms sum.cases})) fun term_conv thy cv t = cv (cterm_of thy t) @@ -320,7 +320,7 @@ val Pxs = cert (HOLogic.mk_Trueprop (P_comp $ x)) |> Goal.init - |> (MetaSimplifier.rewrite_goals_tac (map meta (branch_hyp :: @{thm split_conv} :: @{thms sum_cases})) + |> (MetaSimplifier.rewrite_goals_tac (map meta (branch_hyp :: @{thm split_conv} :: @{thms sum.cases})) THEN CONVERSION ind_rulify 1) |> Seq.hd |> Thm.elim_implies (Conv.fconv_rule Drule.beta_eta_conversion bstep) diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/Tools/function_package/scnp_reconstruct.ML --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/HOL/Tools/function_package/scnp_reconstruct.ML Tue Dec 30 11:10:01 2008 +0100 @@ -0,0 +1,426 @@ +(* Title: HOL/Tools/function_package/scnp_reconstruct.ML + Author: Armin Heller, TU Muenchen + Author: Alexander Krauss, TU Muenchen + +Proof reconstruction for SCNP +*) + +signature SCNP_RECONSTRUCT = +sig + + val decomp_scnp : ScnpSolve.label list -> Proof.context -> method + + val setup : theory -> theory + + datatype multiset_setup = + Multiset of + { + msetT : typ -> typ, + mk_mset : typ -> term list -> term, + mset_regroup_conv : int list -> conv, + mset_member_tac : int -> int -> tactic, + mset_nonempty_tac : int -> tactic, + mset_pwleq_tac : int -> tactic, + set_of_simps : thm list, + smsI' : thm, + wmsI2'' : thm, + wmsI1 : thm, + reduction_pair : thm + } + + + val multiset_setup : multiset_setup -> theory -> theory + +end + +structure ScnpReconstruct : SCNP_RECONSTRUCT = +struct + +val PROFILE = FundefCommon.PROFILE +fun TRACE x = if ! FundefCommon.profile then Output.tracing x else () + +open ScnpSolve + +val natT = HOLogic.natT +val nat_pairT = HOLogic.mk_prodT (natT, natT) + +(* Theory dependencies *) + +datatype multiset_setup = + Multiset of + { + msetT : typ -> typ, + mk_mset : typ -> term list -> term, + mset_regroup_conv : int list -> conv, + mset_member_tac : int -> int -> tactic, + mset_nonempty_tac : int -> tactic, + mset_pwleq_tac : int -> tactic, + set_of_simps : thm list, + smsI' : thm, + wmsI2'' : thm, + wmsI1 : thm, + reduction_pair : thm + } + +structure MultisetSetup = TheoryDataFun +( + type T = multiset_setup option + val empty = NONE + val copy = I; + val extend = I; + fun merge _ (v1, v2) = if is_some v2 then v2 else v1 +) + +val multiset_setup = MultisetSetup.put o SOME + +fun undef x = error "undef" +fun get_multiset_setup thy = MultisetSetup.get thy + |> the_default (Multiset +{ msetT = undef, mk_mset=undef, + mset_regroup_conv=undef, mset_member_tac = undef, + mset_nonempty_tac = undef, mset_pwleq_tac = undef, + set_of_simps = [],reduction_pair = refl, + smsI'=refl, wmsI2''=refl, wmsI1=refl }) + +fun order_rpair _ MAX = @{thm max_rpair_set} + | order_rpair msrp MS = msrp + | order_rpair _ MIN = @{thm min_rpair_set} + +fun ord_intros_max true = + (@{thm smax_emptyI}, @{thm smax_insertI}) + | ord_intros_max false = + (@{thm wmax_emptyI}, @{thm wmax_insertI}) +fun ord_intros_min true = + (@{thm smin_emptyI}, @{thm smin_insertI}) + | ord_intros_min false = + (@{thm wmin_emptyI}, @{thm wmin_insertI}) + +fun gen_probl D cs = + let + val n = Termination.get_num_points D + val arity = length o Termination.get_measures D + fun measure p i = nth (Termination.get_measures D p) i + + fun mk_graph c = + let + val (_, p, _, q, _, _) = Termination.dest_call D c + + fun add_edge i j = + case Termination.get_descent D c (measure p i) (measure q j) + of SOME (Termination.Less _) => cons (i, GTR, j) + | SOME (Termination.LessEq _) => cons (i, GEQ, j) + | _ => I + + val edges = + fold_product add_edge (0 upto arity p - 1) (0 upto arity q - 1) [] + in + G (p, q, edges) + end + in + GP (map arity (0 upto n - 1), map mk_graph cs) + end + +(* General reduction pair application *) +fun rem_inv_img ctxt = + let + val unfold_tac = LocalDefs.unfold_tac ctxt + in + rtac @{thm subsetI} 1 + THEN etac @{thm CollectE} 1 + THEN REPEAT (etac @{thm exE} 1) + THEN unfold_tac @{thms inv_image_def} + THEN rtac @{thm CollectI} 1 + THEN etac @{thm conjE} 1 + THEN etac @{thm ssubst} 1 + THEN unfold_tac (@{thms split_conv} @ @{thms triv_forall_equality} + @ @{thms sum.cases}) + end + +(* Sets *) + +val setT = HOLogic.mk_setT + +fun mk_set T [] = Const (@{const_name "{}"}, setT T) + | mk_set T (x :: xs) = + Const (@{const_name insert}, T --> setT T --> setT T) $ + x $ mk_set T xs + +fun set_member_tac m i = + if m = 0 then rtac @{thm insertI1} i + else rtac @{thm insertI2} i THEN set_member_tac (m - 1) i + +val set_nonempty_tac = rtac @{thm insert_not_empty} + +fun set_finite_tac i = + rtac @{thm finite.emptyI} i + ORELSE (rtac @{thm finite.insertI} i THEN (fn st => set_finite_tac i st)) + + +(* Reconstruction *) + +fun reconstruct_tac ctxt D cs (gp as GP (_, gs)) certificate = + let + val thy = ProofContext.theory_of ctxt + val Multiset + { msetT, mk_mset, + mset_regroup_conv, mset_member_tac, + mset_nonempty_tac, mset_pwleq_tac, set_of_simps, + smsI', wmsI2'', wmsI1, reduction_pair=ms_rp } + = get_multiset_setup thy + + fun measure_fn p = nth (Termination.get_measures D p) + + fun get_desc_thm cidx m1 m2 bStrict = + case Termination.get_descent D (nth cs cidx) m1 m2 + of SOME (Termination.Less thm) => + if bStrict then thm + else (thm COMP (Thm.lift_rule (cprop_of thm) @{thm less_imp_le})) + | SOME (Termination.LessEq (thm, _)) => + if not bStrict then thm + else sys_error "get_desc_thm" + | _ => sys_error "get_desc_thm" + + val (label, lev, sl, covering) = certificate + + fun prove_lev strict g = + let + val G (p, q, el) = nth gs g + + fun less_proof strict (j, b) (i, a) = + let + val tag_flag = b < a orelse (not strict andalso b <= a) + + val stored_thm = + get_desc_thm g (measure_fn p i) (measure_fn q j) + (not tag_flag) + |> Conv.fconv_rule (Thm.beta_conversion true) + + val rule = if strict + then if b < a then @{thm pair_lessI2} else @{thm pair_lessI1} + else if b <= a then @{thm pair_leqI2} else @{thm pair_leqI1} + in + rtac rule 1 THEN PRIMITIVE (Thm.elim_implies stored_thm) + THEN (if tag_flag then arith_tac ctxt 1 else all_tac) + end + + fun steps_tac MAX strict lq lp = + let + val (empty, step) = ord_intros_max strict + in + if length lq = 0 + then rtac empty 1 THEN set_finite_tac 1 + THEN (if strict then set_nonempty_tac 1 else all_tac) + else + let + val (j, b) :: rest = lq + val (i, a) = the (covering g strict j) + fun choose xs = set_member_tac (Library.find_index (curry op = (i, a)) xs) 1 + val solve_tac = choose lp THEN less_proof strict (j, b) (i, a) + in + rtac step 1 THEN solve_tac THEN steps_tac MAX strict rest lp + end + end + | steps_tac MIN strict lq lp = + let + val (empty, step) = ord_intros_min strict + in + if length lp = 0 + then rtac empty 1 + THEN (if strict then set_nonempty_tac 1 else all_tac) + else + let + val (i, a) :: rest = lp + val (j, b) = the (covering g strict i) + fun choose xs = set_member_tac (Library.find_index (curry op = (j, b)) xs) 1 + val solve_tac = choose lq THEN less_proof strict (j, b) (i, a) + in + rtac step 1 THEN solve_tac THEN steps_tac MIN strict lq rest + end + end + | steps_tac MS strict lq lp = + let + fun get_str_cover (j, b) = + if is_some (covering g true j) then SOME (j, b) else NONE + fun get_wk_cover (j, b) = the (covering g false j) + + val qs = lq \\ map_filter get_str_cover lq + val ps = map get_wk_cover qs + + fun indices xs ys = map (fn y => Library.find_index (curry op = y) xs) ys + val iqs = indices lq qs + val ips = indices lp ps + + local open Conv in + fun t_conv a C = + params_conv ~1 (K ((concl_conv ~1 o arg_conv o arg1_conv o a) C)) ctxt + val goal_rewrite = + t_conv arg1_conv (mset_regroup_conv iqs) + then_conv t_conv arg_conv (mset_regroup_conv ips) + end + in + CONVERSION goal_rewrite 1 + THEN (if strict then rtac smsI' 1 + else if qs = lq then rtac wmsI2'' 1 + else rtac wmsI1 1) + THEN mset_pwleq_tac 1 + THEN EVERY (map2 (less_proof false) qs ps) + THEN (if strict orelse qs <> lq + then LocalDefs.unfold_tac ctxt set_of_simps + THEN steps_tac MAX true (lq \\ qs) (lp \\ ps) + else all_tac) + end + in + rem_inv_img ctxt + THEN steps_tac label strict (nth lev q) (nth lev p) + end + + val (mk_set, setT) = if label = MS then (mk_mset, msetT) else (mk_set, setT) + + fun tag_pair p (i, tag) = + HOLogic.pair_const natT natT $ + (measure_fn p i $ Bound 0) $ HOLogic.mk_number natT tag + + fun pt_lev (p, lm) = Abs ("x", Termination.get_types D p, + mk_set nat_pairT (map (tag_pair p) lm)) + + val level_mapping = + map_index pt_lev lev + |> Termination.mk_sumcases D (setT nat_pairT) + |> cterm_of thy + in + PROFILE "Proof Reconstruction" + (CONVERSION (Conv.arg_conv (Conv.arg_conv (FundefLib.regroup_union_conv sl))) 1 + THEN (rtac @{thm reduction_pair_lemma} 1) + THEN (rtac @{thm rp_inv_image_rp} 1) + THEN (rtac (order_rpair ms_rp label) 1) + THEN PRIMITIVE (instantiate' [] [SOME level_mapping]) + THEN unfold_tac @{thms rp_inv_image_def} (simpset_of thy) + THEN LocalDefs.unfold_tac ctxt + (@{thms split_conv} @ @{thms fst_conv} @ @{thms snd_conv}) + THEN REPEAT (SOMEGOAL (resolve_tac [@{thm Un_least}, @{thm empty_subsetI}])) + THEN EVERY (map (prove_lev true) sl) + THEN EVERY (map (prove_lev false) ((0 upto length cs - 1) \\ sl))) + end + + + +local open Termination in +fun print_cell (SOME (Less _)) = "<" + | print_cell (SOME (LessEq _)) = "\" + | print_cell (SOME (None _)) = "-" + | print_cell (SOME (False _)) = "-" + | print_cell (NONE) = "?" + +fun print_error ctxt D = CALLS (fn (cs, i) => + let + val np = get_num_points D + val ms = map (get_measures D) (0 upto np - 1) + val tys = map (get_types D) (0 upto np - 1) + fun index xs = (1 upto length xs) ~~ xs + fun outp s t f xs = map (fn (x, y) => s ^ Int.toString x ^ t ^ f y ^ "\n") xs + val ims = index (map index ms) + val _ = Output.tracing (concat (outp "fn #" ":\n" (concat o outp "\tmeasure #" ": " (Syntax.string_of_term ctxt)) ims)) + fun print_call (k, c) = + let + val (_, p, _, q, _, _) = dest_call D c + val _ = Output.tracing ("call table for call #" ^ Int.toString k ^ ": fn " ^ + Int.toString (p + 1) ^ " ~> fn " ^ Int.toString (q + 1)) + val caller_ms = nth ms p + val callee_ms = nth ms q + val entries = map (fn x => map (pair x) (callee_ms)) (caller_ms) + fun print_ln (i : int, l) = concat (Int.toString i :: " " :: map (enclose " " " " o print_cell o (uncurry (get_descent D c))) l) + val _ = Output.tracing (concat (Int.toString (p + 1) ^ "|" ^ Int.toString (q + 1) ^ + " " :: map (enclose " " " " o Int.toString) (1 upto length callee_ms)) ^ "\n" + ^ cat_lines (map print_ln ((1 upto (length entries)) ~~ entries))) + in + true + end + fun list_call (k, c) = + let + val (_, p, _, q, _, _) = dest_call D c + val _ = Output.tracing ("call #" ^ (Int.toString k) ^ ": fn " ^ + Int.toString (p + 1) ^ " ~> fn " ^ Int.toString (q + 1) ^ "\n" ^ + (Syntax.string_of_term ctxt c)) + in true end + val _ = forall list_call ((1 upto length cs) ~~ cs) + val _ = forall print_call ((1 upto length cs) ~~ cs) + in + all_tac + end) +end + + +fun single_scnp_tac use_tags orders ctxt cont err_cont D = Termination.CALLS (fn (cs, i) => + let + val gp = gen_probl D cs +(* val _ = TRACE ("SCNP instance: " ^ makestring gp)*) + val certificate = generate_certificate use_tags orders gp +(* val _ = TRACE ("Certificate: " ^ makestring certificate)*) + + val ms_configured = is_some (MultisetSetup.get (ProofContext.theory_of ctxt)) + in + case certificate + of NONE => err_cont D i + | SOME cert => + if not ms_configured andalso #1 cert = MS + then err_cont D i + else SELECT_GOAL (reconstruct_tac ctxt D cs gp cert) i + THEN (rtac @{thm wf_empty} i ORELSE cont D i) + end) + +fun decomp_scnp_tac orders autom_tac ctxt err_cont = + let + open Termination + val derive_diag = Descent.derive_diag ctxt autom_tac + val derive_all = Descent.derive_all ctxt autom_tac + val decompose = Decompose.decompose_tac ctxt autom_tac + val scnp_no_tags = single_scnp_tac false orders ctxt + val scnp_full = single_scnp_tac true orders ctxt + + fun first_round c e = + derive_diag (REPEAT scnp_no_tags c e) + + val second_round = + REPEAT (fn c => fn e => decompose (scnp_no_tags c c) e) + + val third_round = + derive_all oo + REPEAT (fn c => fn e => + scnp_full (decompose c c) e) + + fun Then s1 s2 c e = s1 (s2 c c) (s2 c e) + + val strategy = Then (Then first_round second_round) third_round + + in + TERMINATION ctxt (strategy err_cont err_cont) + end + +fun decomp_scnp orders ctxt = + let + val extra_simps = FundefCommon.TerminationSimps.get ctxt + val autom_tac = auto_tac (local_clasimpset_of ctxt addsimps2 extra_simps) + in + Method.SIMPLE_METHOD + (TRY (FundefCommon.apply_termination_rule ctxt 1) + THEN TRY Termination.wf_union_tac + THEN + (rtac @{thm wf_empty} 1 + ORELSE decomp_scnp_tac orders autom_tac ctxt (print_error ctxt) 1)) + end + + +(* Method setup *) + +val orders = + (Scan.repeat1 + ((Args.$$$ "max" >> K MAX) || + (Args.$$$ "min" >> K MIN) || + (Args.$$$ "ms" >> K MS)) + || Scan.succeed [MAX, MS, MIN]) + +val setup = Method.add_method + ("sizechange", Method.sectioned_args (Scan.lift orders) clasimp_modifiers decomp_scnp, + "termination prover with graph decomposition and the NP subset of size change termination") + +end diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/Tools/function_package/scnp_solve.ML --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/HOL/Tools/function_package/scnp_solve.ML Tue Dec 30 11:10:01 2008 +0100 @@ -0,0 +1,257 @@ +(* Title: HOL/Tools/function_package/scnp_solve.ML + Author: Armin Heller, TU Muenchen + Author: Alexander Krauss, TU Muenchen + +Generate certificates for SCNP using a SAT solver +*) + + +signature SCNP_SOLVE = +sig + + datatype edge = GTR | GEQ + datatype graph = G of int * int * (int * edge * int) list + datatype graph_problem = GP of int list * graph list + + datatype label = MIN | MAX | MS + + type certificate = + label (* which order *) + * (int * int) list list (* (multi)sets *) + * int list (* strictly ordered calls *) + * (int -> bool -> int -> (int * int) option) (* covering function *) + + val generate_certificate : bool -> label list -> graph_problem -> certificate option + + val solver : string ref +end + +structure ScnpSolve : SCNP_SOLVE = +struct + +(** Graph problems **) + +datatype edge = GTR | GEQ ; +datatype graph = G of int * int * (int * edge * int) list ; +datatype graph_problem = GP of int list * graph list ; + +datatype label = MIN | MAX | MS ; +type certificate = + label + * (int * int) list list + * int list + * (int -> bool -> int -> (int * int) option) + +fun graph_at (GP (_, gs), i) = nth gs i ; +fun num_prog_pts (GP (arities, _)) = length arities ; +fun num_graphs (GP (_, gs)) = length gs ; +fun arity (GP (arities, gl)) i = nth arities i ; +fun ndigits (GP (arities, _)) = IntInf.log2 (foldl (op +) 0 arities) + 1 + + +(** Propositional formulas **) + +val Not = PropLogic.Not and And = PropLogic.And and Or = PropLogic.Or +val BoolVar = PropLogic.BoolVar +fun Implies (p, q) = Or (Not p, q) +fun Equiv (p, q) = And (Implies (p, q), Implies (q, p)) +val all = PropLogic.all + +(* finite indexed quantifiers: + +iforall n f <==> /\ + / \ f i + 0<=i Equiv (TAG x i, TAG y i))) + + fun encode_graph (g, p, q, n, m, edges) = + let + fun encode_edge i j = + if exists (fn x => x = (i, GTR, j)) edges then + And (ES (g, i, j), EW (g, i, j)) + else if not (exists (fn x => x = (i, GEQ, j)) edges) then + And (Not (ES (g, i, j)), Not (EW (g, i, j))) + else + And ( + Equiv (ES (g, i, j), + encode_constraint_strict bits ((p, i), (q, j))), + Equiv (EW (g, i, j), + encode_constraint_weak bits ((p, i), (q, j)))) + in + iforall2 n m encode_edge + end + in + iforall ng (encode_graph o graph_info gp) + end + + +(* Order-specific part of encoding *) + +fun encode bits gp mu = + let + val ng = num_graphs gp + val (ES,EW,WEAK,STRICT,P,GAM,EPS,_) = var_constrs gp + + fun encode_graph MAX (g, p, q, n, m, _) = + all [ + Equiv (WEAK g, + iforall m (fn j => + Implies (P (q, j), + iexists n (fn i => + And (P (p, i), EW (g, i, j)))))), + Equiv (STRICT g, + iforall m (fn j => + Implies (P (q, j), + iexists n (fn i => + And (P (p, i), ES (g, i, j)))))), + iexists n (fn i => P (p, i)) + ] + | encode_graph MIN (g, p, q, n, m, _) = + all [ + Equiv (WEAK g, + iforall n (fn i => + Implies (P (p, i), + iexists m (fn j => + And (P (q, j), EW (g, i, j)))))), + Equiv (STRICT g, + iforall n (fn i => + Implies (P (p, i), + iexists m (fn j => + And (P (q, j), ES (g, i, j)))))), + iexists m (fn j => P (q, j)) + ] + | encode_graph MS (g, p, q, n, m, _) = + all [ + Equiv (WEAK g, + iforall m (fn j => + Implies (P (q, j), + iexists n (fn i => GAM (g, i, j))))), + Equiv (STRICT g, + iexists n (fn i => + And (P (p, i), Not (EPS (g, i))))), + iforall2 n m (fn i => fn j => + Implies (GAM (g, i, j), + all [ + P (p, i), + P (q, j), + EW (g, i, j), + Equiv (Not (EPS (g, i)), ES (g, i, j))])), + iforall n (fn i => + Implies (And (P (p, i), EPS (g, i)), + exactly_one m (fn j => GAM (g, i, j)))) + ] + in + all [ + encode_graphs bits gp, + iforall ng (encode_graph mu o graph_info gp), + iforall ng (fn x => WEAK x), + iexists ng (fn x => STRICT x) + ] + end + + +(*Generieren des level-mapping und diverser output*) +fun mk_certificate bits label gp f = + let + val (ES,EW,WEAK,STRICT,P,GAM,EPS,TAG) = var_constrs gp + fun assign (PropLogic.BoolVar v) = the_default false (f v) + fun assignTag i j = + (fold (fn x => fn y => 2 * y + (if assign (TAG (i, j) x) then 1 else 0)) + (bits - 1 downto 0) 0) + + val level_mapping = + let fun prog_pt_mapping p = + map_filter (fn x => if assign (P(p, x)) then SOME (x, assignTag p x) else NONE) + (0 upto (arity gp p) - 1) + in map prog_pt_mapping (0 upto num_prog_pts gp - 1) end + + val strict_list = filter (assign o STRICT) (0 upto num_graphs gp - 1) + + fun covering_pair g bStrict j = + let + val (_, p, q, n, m, _) = graph_info gp g + + fun cover MAX j = find_index (fn i => assign (P (p, i)) andalso assign (EW (g, i, j))) (0 upto n - 1) + | cover MS k = find_index (fn i => assign (GAM (g, i, k))) (0 upto n - 1) + | cover MIN i = find_index (fn j => assign (P (q, j)) andalso assign (EW (g, i, j))) (0 upto m - 1) + fun cover_strict MAX j = find_index (fn i => assign (P (p, i)) andalso assign (ES (g, i, j))) (0 upto n - 1) + | cover_strict MS k = find_index (fn i => assign (GAM (g, i, k)) andalso not (assign (EPS (g, i) ))) (0 upto n - 1) + | cover_strict MIN i = find_index (fn j => assign (P (q, j)) andalso assign (ES (g, i, j))) (0 upto m - 1) + val i = if bStrict then cover_strict label j else cover label j + in + find_first (fn x => fst x = i) (nth level_mapping (if label = MIN then q else p)) + end + in + (label, level_mapping, strict_list, covering_pair) + end + +(*interface for the proof reconstruction*) +fun generate_certificate use_tags labels gp = + let + val bits = if use_tags then ndigits gp else 0 + in + get_first + (fn l => case sat_solver (encode bits gp l) of + SatSolver.SATISFIABLE f => SOME (mk_certificate bits l gp f) + | _ => NONE) + labels + end +end diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/Tools/function_package/sum_tree.ML --- a/src/HOL/Tools/function_package/sum_tree.ML Tue Dec 30 08:18:54 2008 +0100 +++ b/src/HOL/Tools/function_package/sum_tree.ML Tue Dec 30 11:10:01 2008 +0100 @@ -9,8 +9,8 @@ struct (* Theory dependencies *) -val proj_in_rules = [thm "Sum_Type.Projl_Inl", thm "Sum_Type.Projr_Inr"] -val sumcase_split_ss = HOL_basic_ss addsimps (@{thm "Product_Type.split"} :: @{thms "Sum_Type.sum_cases"}) +val proj_in_rules = [@{thm "Datatype.Projl_Inl"}, @{thm "Datatype.Projr_Inr"}] +val sumcase_split_ss = HOL_basic_ss addsimps (@{thm "Product_Type.split"} :: @{thms "sum.cases"}) (* top-down access in balanced tree *) fun access_top_down {left, right, init} len i = @@ -18,7 +18,7 @@ (* Sum types *) fun mk_sumT LT RT = Type ("+", [LT, RT]) -fun mk_sumcase TL TR T l r = Const (@{const_name "Sum_Type.sum_case"}, (TL --> T) --> (TR --> T) --> mk_sumT TL TR --> T) $ l $ r +fun mk_sumcase TL TR T l r = Const (@{const_name "sum.sum_case"}, (TL --> T) --> (TR --> T) --> mk_sumT TL TR --> T) $ l $ r val App = curry op $ @@ -32,8 +32,8 @@ fun mk_proj ST n i = access_top_down { init = (ST, I : term -> term), - left = (fn (T as Type ("+", [LT, RT]), proj) => (LT, App (Const (@{const_name "Projl"}, T --> LT)) o proj)), - right =(fn (T as Type ("+", [LT, RT]), proj) => (RT, App (Const (@{const_name "Projr"}, T --> RT)) o proj))} n i + left = (fn (T as Type ("+", [LT, RT]), proj) => (LT, App (Const (@{const_name "Datatype.Projl"}, T --> LT)) o proj)), + right =(fn (T as Type ("+", [LT, RT]), proj) => (RT, App (Const (@{const_name "Datatype.Projr"}, T --> RT)) o proj))} n i |> snd fun mk_sumcases T fs = diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/Tools/function_package/termination.ML --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/HOL/Tools/function_package/termination.ML Tue Dec 30 11:10:01 2008 +0100 @@ -0,0 +1,324 @@ +(* Title: HOL/Tools/function_package/termination_data.ML + Author: Alexander Krauss, TU Muenchen + +Context data for termination proofs +*) + + +signature TERMINATION = +sig + + type data + datatype cell = Less of thm | LessEq of (thm * thm) | None of (thm * thm) | False of thm + + val mk_sumcases : data -> typ -> term list -> term + + val note_measure : int -> term -> data -> data + val note_chain : term -> term -> thm option -> data -> data + val note_descent : term -> term -> term -> cell -> data -> data + + val get_num_points : data -> int + val get_types : data -> int -> typ + val get_measures : data -> int -> term list + + (* read from cache *) + val get_chain : data -> term -> term -> thm option option + val get_descent : data -> term -> term -> term -> cell option + + (* writes *) + val derive_descent : theory -> tactic -> term -> term -> term -> data -> data + val derive_descents : theory -> tactic -> term -> data -> data + + val dest_call : data -> term -> ((string * typ) list * int * term * int * term * term) + + val CALLS : (term list * int -> tactic) -> int -> tactic + + (* Termination tactics. Sequential composition via continuations. (2nd argument is the error continuation) *) + type ttac = (data -> int -> tactic) -> (data -> int -> tactic) -> data -> int -> tactic + + val TERMINATION : Proof.context -> (data -> int -> tactic) -> int -> tactic + + val REPEAT : ttac -> ttac + + val wf_union_tac : tactic +end + + + +structure Termination : TERMINATION = +struct + +open FundefLib + +val term2_ord = prod_ord Term.fast_term_ord Term.fast_term_ord +structure Term2tab = TableFun(type key = term * term val ord = term2_ord); +structure Term3tab = TableFun(type key = term * (term * term) val ord = prod_ord Term.fast_term_ord term2_ord); + +(** Analyzing binary trees **) + +(* Skeleton of a tree structure *) + +datatype skel = + SLeaf of int (* index *) +| SBranch of (skel * skel) + + +(* abstract make and dest functions *) +fun mk_tree leaf branch = + let fun mk (SLeaf i) = leaf i + | mk (SBranch (s, t)) = branch (mk s, mk t) + in mk end + + +fun dest_tree split = + let fun dest (SLeaf i) x = [(i, x)] + | dest (SBranch (s, t)) x = + let val (l, r) = split x + in dest s l @ dest t r end + in dest end + + +(* concrete versions for sum types *) +fun is_inj (Const ("Sum_Type.Inl", _) $ _) = true + | is_inj (Const ("Sum_Type.Inr", _) $ _) = true + | is_inj _ = false + +fun dest_inl (Const ("Sum_Type.Inl", _) $ t) = SOME t + | dest_inl _ = NONE + +fun dest_inr (Const ("Sum_Type.Inr", _) $ t) = SOME t + | dest_inr _ = NONE + + +fun mk_skel ps = + let + fun skel i ps = + if forall is_inj ps andalso not (null ps) + then let + val (j, s) = skel i (map_filter dest_inl ps) + val (k, t) = skel j (map_filter dest_inr ps) + in (k, SBranch (s, t)) end + else (i + 1, SLeaf i) + in + snd (skel 0 ps) + end + +(* compute list of types for nodes *) +fun node_types sk T = dest_tree (fn Type ("+", [LT, RT]) => (LT, RT)) sk T |> map snd + +(* find index and raw term *) +fun dest_inj (SLeaf i) trm = (i, trm) + | dest_inj (SBranch (s, t)) trm = + case dest_inl trm of + SOME trm' => dest_inj s trm' + | _ => dest_inj t (the (dest_inr trm)) + + + +(** Matrix cell datatype **) + +datatype cell = Less of thm | LessEq of (thm * thm) | None of (thm * thm) | False of thm; + + +type data = + skel (* structure of the sum type encoding "program points" *) + * (int -> typ) (* types of program points *) + * (term list Inttab.table) (* measures for program points *) + * (thm option Term2tab.table) (* which calls form chains? *) + * (cell Term3tab.table) (* local descents *) + + +fun map_measures f (p, T, M, C, D) = (p, T, f M, C, D) +fun map_chains f (p, T, M, C, D) = (p, T, M, f C, D) +fun map_descent f (p, T, M, C, D) = (p, T, M, C, f D) + +fun note_measure p m = map_measures (Inttab.insert_list (op aconv) (p, m)) +fun note_chain c1 c2 res = map_chains (Term2tab.update ((c1, c2), res)) +fun note_descent c m1 m2 res = map_descent (Term3tab.update ((c,(m1, m2)), res)) + +(* Build case expression *) +fun mk_sumcases (sk, _, _, _, _) T fs = + mk_tree (fn i => (nth fs i, domain_type (fastype_of (nth fs i)))) + (fn ((f, fT), (g, gT)) => (SumTree.mk_sumcase fT gT T f g, SumTree.mk_sumT fT gT)) + sk + |> fst + +fun mk_sum_skel rel = + let + val cs = FundefLib.dest_binop_list @{const_name "op Un"} rel + fun collect_pats (Const ("Collect", _) $ Abs (_, _, c)) = + let + val (Const ("op &", _) $ (Const ("op =", _) $ _ $ (Const ("Pair", _) $ r $ l)) $ Gam) + = Term.strip_qnt_body "Ex" c + in cons r o cons l end + in + mk_skel (fold collect_pats cs []) + end + +fun create ctxt T rel = + let + val sk = mk_sum_skel rel + val Ts = node_types sk T + val M = Inttab.make (map_index (apsnd (MeasureFunctions.get_measure_functions ctxt)) Ts) + in + (sk, nth Ts, M, Term2tab.empty, Term3tab.empty) + end + +fun get_num_points (sk, _, _, _, _) = + let + fun num (SLeaf i) = i + 1 + | num (SBranch (s, t)) = num t + in num sk end + +fun get_types (_, T, _, _, _) = T +fun get_measures (_, _, M, _, _) = Inttab.lookup_list M + +fun get_chain (_, _, _, C, _) c1 c2 = + Term2tab.lookup C (c1, c2) + +fun get_descent (_, _, _, _, D) c m1 m2 = + Term3tab.lookup D (c, (m1, m2)) + +fun dest_call D (Const ("Collect", _) $ Abs (_, _, c)) = + let + val n = get_num_points D + val (sk, _, _, _, _) = D + val vs = Term.strip_qnt_vars "Ex" c + + (* FIXME: throw error "dest_call" for malformed terms *) + val (Const ("op &", _) $ (Const ("op =", _) $ _ $ (Const ("Pair", _) $ r $ l)) $ Gam) + = Term.strip_qnt_body "Ex" c + val (p, l') = dest_inj sk l + val (q, r') = dest_inj sk r + in + (vs, p, l', q, r', Gam) + end + | dest_call D t = error "dest_call" + + +fun derive_desc_aux thy tac c (vs, p, l', q, r', Gam) m1 m2 D = + case get_descent D c m1 m2 of + SOME _ => D + | NONE => let + fun cgoal rel = + Term.list_all (vs, + Logic.mk_implies (HOLogic.mk_Trueprop Gam, + HOLogic.mk_Trueprop (Const (rel, @{typ "nat => nat => bool"}) + $ (m2 $ r') $ (m1 $ l')))) + |> cterm_of thy + in + note_descent c m1 m2 + (case try_proof (cgoal @{const_name HOL.less}) tac of + Solved thm => Less thm + | Stuck thm => + (case try_proof (cgoal @{const_name HOL.less_eq}) tac of + Solved thm2 => LessEq (thm2, thm) + | Stuck thm2 => + if prems_of thm2 = [HOLogic.Trueprop $ HOLogic.false_const] + then False thm2 else None (thm2, thm) + | _ => raise Match) (* FIXME *) + | _ => raise Match) D + end + +fun derive_descent thy tac c m1 m2 D = + derive_desc_aux thy tac c (dest_call D c) m1 m2 D + +(* all descents in one go *) +fun derive_descents thy tac c D = + let val cdesc as (vs, p, l', q, r', Gam) = dest_call D c + in fold_product (derive_desc_aux thy tac c cdesc) + (get_measures D p) (get_measures D q) D + end + +fun CALLS tac i st = + if Thm.no_prems st then all_tac st + else case Thm.term_of (Thm.cprem_of st i) of + (_ $ (_ $ rel)) => tac (FundefLib.dest_binop_list @{const_name "op Un"} rel, i) st + |_ => no_tac st + +type ttac = (data -> int -> tactic) -> (data -> int -> tactic) -> data -> int -> tactic + +fun TERMINATION ctxt tac = + SUBGOAL (fn (_ $ (Const (@{const_name "wf"}, wfT) $ rel), i) => + let + val (T, _) = HOLogic.dest_prodT (HOLogic.dest_setT (domain_type wfT)) + in + tac (create ctxt T rel) i + end) + + +(* A tactic to convert open to closed termination goals *) +local +fun dest_term (t : term) = (* FIXME, cf. Lexicographic order *) + let + val (vars, prop) = FundefLib.dest_all_all t + val (prems, concl) = Logic.strip_horn prop + val (lhs, rhs) = concl + |> HOLogic.dest_Trueprop + |> HOLogic.dest_mem |> fst + |> HOLogic.dest_prod + in + (vars, prems, lhs, rhs) + end + +fun mk_pair_compr (T, qs, l, r, conds) = + let + val pT = HOLogic.mk_prodT (T, T) + val n = length qs + val peq = HOLogic.eq_const pT $ Bound n $ (HOLogic.pair_const T T $ l $ r) + val conds' = if null conds then [HOLogic.true_const] else conds + in + HOLogic.Collect_const pT $ + Abs ("uu_", pT, + (foldr1 HOLogic.mk_conj (peq :: conds') + |> fold_rev (fn v => fn t => HOLogic.exists_const (fastype_of v) $ lambda v t) qs)) + end + +in + +fun wf_union_tac st = + let + val thy = theory_of_thm st + val cert = cterm_of (theory_of_thm st) + val ((trueprop $ (wf $ rel)) :: ineqs) = prems_of st + + fun mk_compr ineq = + let + val (vars, prems, lhs, rhs) = dest_term ineq + in + mk_pair_compr (fastype_of lhs, vars, lhs, rhs, map (ObjectLogic.atomize_term thy) prems) + end + + val relation = + if null ineqs then + Const (@{const_name "{}"}, fastype_of rel) + else + foldr1 (HOLogic.mk_binop @{const_name "op Un"}) (map mk_compr ineqs) + + fun solve_membership_tac i = + (EVERY' (replicate (i - 2) (rtac @{thm UnI2})) (* pick the right component of the union *) + THEN' (fn j => TRY (rtac @{thm UnI1} j)) + THEN' (rtac @{thm CollectI}) (* unfold comprehension *) + THEN' (fn i => REPEAT (rtac @{thm exI} i)) (* Turn existentials into schematic Vars *) + THEN' ((rtac @{thm refl}) (* unification instantiates all Vars *) + ORELSE' ((rtac @{thm conjI}) + THEN' (rtac @{thm refl}) + THEN' (CLASET' blast_tac))) (* Solve rest of context... not very elegant *) + ) i + in + ((PRIMITIVE (Drule.cterm_instantiate [(cert rel, cert relation)]) + THEN ALLGOALS (fn i => if i = 1 then all_tac else solve_membership_tac i))) st + end + + +end + + +(* continuation passing repeat combinator *) +fun REPEAT ttac cont err_cont = + ttac (fn D => fn i => (REPEAT ttac cont cont D i)) err_cont + + + + +end diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/Transcendental.thy --- a/src/HOL/Transcendental.thy Tue Dec 30 08:18:54 2008 +0100 +++ b/src/HOL/Transcendental.thy Tue Dec 30 11:10:01 2008 +0100 @@ -11,7 +11,7 @@ imports Fact Series Deriv NthRoot begin -subsection{*Properties of Power Series*} +subsection {* Properties of Power Series *} lemma lemma_realpow_diff: fixes y :: "'a::recpower" @@ -26,8 +26,8 @@ fixes y :: "'a::{recpower,comm_semiring_0}" shows "(\p=0..p=0.. 'a::ring_1) => nat => 'a" where @@ -124,33 +124,22 @@ lemma diffs_minus: "diffs (%n. - c n) = (%n. - diffs c n)" by (simp add: diffs_def) -text{*Show that we can shift the terms down one*} -lemma lemma_diffs: - "(\n=0..n=0..n. f (Suc n)) sums s \ (\n. f n) sums s" +unfolding sums_def +apply (rule LIMSEQ_imp_Suc) +apply (subst setsum_shift_lb_Suc0_0_upt [where f=f, OF f, symmetric]) +apply (simp only: setsum_shift_bounds_Suc_ivl) done -lemma lemma_diffs2: - "(\n=0..n=0.. (%n. of_nat n * c(n) * (x ^ (n - Suc 0))) sums (\n. (diffs c)(n) * (x ^ n))" -apply (subgoal_tac " (%n. of_nat n * c (n) * (x ^ (n - Suc 0))) ----> 0") -apply (rule_tac [2] LIMSEQ_imp_Suc) -apply (drule summable_sums) -apply (auto simp add: sums_def) -apply (drule_tac X="(\n. \n = 0.. (\d. n = m + d + Suc 0)" -by (simp add: less_iff_Suc_add) - -lemma sumdiff: "a + b - (c + d) = a - c + b - (d::real)" -by arith - lemma sumr_diff_mult_const2: "setsum f {0..i = 0..h. \h \ 0; norm h < k\ \ norm (f h) \ K * norm h" shows "f -- 0 --> 0" -proof (simp add: LIM_def, safe) +unfolding LIM_def diff_0_right +proof (safe) + let ?h = "of_real (k / 2)::'a" + have "?h \ 0" and "norm ?h < k" using k by simp_all + hence "norm (f ?h) \ K * norm ?h" by (rule le) + hence "0 \ K * norm ?h" by (rule order_trans [OF norm_ge_zero]) + hence zero_le_K: "0 \ K" using k by (simp add: zero_le_mult_iff) + fix r::real assume r: "0 < r" - have zero_le_K: "0 \ K" - apply (cut_tac k) - apply (cut_tac h="of_real (k/2)" in le, simp) - apply (simp del: of_real_divide) - apply (drule order_trans [OF norm_ge_zero]) - apply (simp add: zero_le_mult_iff) - done show "\s. 0 < s \ (\x. x \ 0 \ norm x < s \ norm (f x) < r)" proof (cases) assume "K = 0" @@ -392,11 +375,12 @@ assumes 3: "summable (\n. (diffs (diffs c)) n * K ^ n)" assumes 4: "norm x < norm K" shows "DERIV (\x. \n. c n * x ^ n) x :> (\n. (diffs c) n * x ^ n)" -proof (simp add: deriv_def, rule LIM_zero_cancel) +unfolding deriv_def +proof (rule LIM_zero_cancel) show "(\h. (suminf (\n. c n * (x + h) ^ n) - suminf (\n. c n * x ^ n)) / h - suminf (\n. diffs c n * x ^ n)) -- 0 --> 0" proof (rule LIM_equal2) - show "0 < norm K - norm x" by (simp add: less_diff_eq 4) + show "0 < norm K - norm x" using 4 by (simp add: less_diff_eq) next fix h :: 'a assume "h \ 0" @@ -421,8 +405,7 @@ apply (rule summable_divide) apply (rule summable_diff [OF B A]) apply (rule sums_summable [OF diffs_equiv [OF C]]) - apply (rule_tac f="suminf" in arg_cong) - apply (rule ext) + apply (rule arg_cong [where f="suminf"], rule ext) apply (simp add: ring_simps) done next @@ -433,22 +416,12 @@ qed -subsection{*Exponential Function*} +subsection {* Exponential Function *} definition exp :: "'a \ 'a::{recpower,real_normed_field,banach}" where "exp x = (\n. x ^ n /\<^sub>R real (fact n))" -definition - sin :: "real => real" where - "sin x = (\n. (if even(n) then 0 else - (-1 ^ ((n - Suc 0) div 2))/(real (fact n))) * x ^ n)" - -definition - cos :: "real => real" where - "cos x = (\n. (if even(n) then (-1 ^ (n div 2))/(real (fact n)) - else 0) * x ^ n)" - lemma summable_exp_generic: fixes x :: "'a::{real_normed_algebra_1,recpower,banach}" defines S_def: "S \ \n. x ^ n /\<^sub>R real (fact n)" @@ -493,66 +466,9 @@ lemma summable_exp: "summable (%n. inverse (real (fact n)) * x ^ n)" by (insert summable_exp_generic [where x=x], simp) -lemma summable_sin: - "summable (%n. - (if even n then 0 - else -1 ^ ((n - Suc 0) div 2)/(real (fact n))) * - x ^ n)" -apply (rule_tac g = "(%n. inverse (real (fact n)) * \x\ ^ n)" in summable_comparison_test) -apply (rule_tac [2] summable_exp) -apply (rule_tac x = 0 in exI) -apply (auto simp add: divide_inverse abs_mult power_abs [symmetric] zero_le_mult_iff) -done - -lemma summable_cos: - "summable (%n. - (if even n then - -1 ^ (n div 2)/(real (fact n)) else 0) * x ^ n)" -apply (rule_tac g = "(%n. inverse (real (fact n)) * \x\ ^ n)" in summable_comparison_test) -apply (rule_tac [2] summable_exp) -apply (rule_tac x = 0 in exI) -apply (auto simp add: divide_inverse abs_mult power_abs [symmetric] zero_le_mult_iff) -done - -lemma lemma_STAR_sin: - "(if even n then 0 - else -1 ^ ((n - Suc 0) div 2)/(real (fact n))) * 0 ^ n = 0" -by (induct "n", auto) - -lemma lemma_STAR_cos: - "0 < n --> - -1 ^ (n div 2)/(real (fact n)) * 0 ^ n = 0" -by (induct "n", auto) - -lemma lemma_STAR_cos1: - "0 < n --> - (-1) ^ (n div 2)/(real (fact n)) * 0 ^ n = 0" -by (induct "n", auto) - -lemma lemma_STAR_cos2: - "(\n=1..n. x ^ n /\<^sub>R real (fact n)) sums exp x" unfolding exp_def by (rule summable_exp_generic [THEN summable_sums]) -lemma sin_converges: - "(%n. (if even n then 0 - else -1 ^ ((n - Suc 0) div 2)/(real (fact n))) * - x ^ n) sums sin(x)" -unfolding sin_def by (rule summable_sin [THEN summable_sums]) - -lemma cos_converges: - "(%n. (if even n then - -1 ^ (n div 2)/(real (fact n)) - else 0) * x ^ n) sums cos(x)" -unfolding cos_def by (rule summable_cos [THEN summable_sums]) - - -subsection{*Formal Derivatives of Exp, Sin, and Cos Series*} lemma exp_fdiffs: "diffs (%n. inverse(real (fact n))) = (%n. inverse(real (fact n)))" @@ -562,48 +478,6 @@ lemma diffs_of_real: "diffs (\n. of_real (f n)) = (\n. of_real (diffs f n))" by (simp add: diffs_def) -lemma sin_fdiffs: - "diffs(%n. if even n then 0 - else -1 ^ ((n - Suc 0) div 2)/(real (fact n))) - = (%n. if even n then - -1 ^ (n div 2)/(real (fact n)) - else 0)" -by (auto intro!: ext - simp add: diffs_def divide_inverse real_of_nat_def of_nat_mult - simp del: mult_Suc of_nat_Suc) - -lemma sin_fdiffs2: - "diffs(%n. if even n then 0 - else -1 ^ ((n - Suc 0) div 2)/(real (fact n))) n - = (if even n then - -1 ^ (n div 2)/(real (fact n)) - else 0)" -by (simp only: sin_fdiffs) - -lemma cos_fdiffs: - "diffs(%n. if even n then - -1 ^ (n div 2)/(real (fact n)) else 0) - = (%n. - (if even n then 0 - else -1 ^ ((n - Suc 0)div 2)/(real (fact n))))" -by (auto intro!: ext - simp add: diffs_def divide_inverse odd_Suc_mult_two_ex real_of_nat_def of_nat_mult - simp del: mult_Suc of_nat_Suc) - - -lemma cos_fdiffs2: - "diffs(%n. if even n then - -1 ^ (n div 2)/(real (fact n)) else 0) n - = - (if even n then 0 - else -1 ^ ((n - Suc 0)div 2)/(real (fact n)))" -by (simp only: cos_fdiffs) - -text{*Now at last we can get the derivatives of exp, sin and cos*} - -lemma lemma_sin_minus: - "- sin x = (\n. - ((if even n then 0 - else -1 ^ ((n - Suc 0) div 2)/(real (fact n))) * x ^ n))" -by (auto intro!: sums_unique sums_minus sin_converges) - lemma lemma_exp_ext: "exp = (\x. \n. x ^ n /\<^sub>R real (fact n))" by (auto intro!: ext simp add: exp_def) @@ -617,45 +491,11 @@ apply (simp del: of_real_add) done -lemma lemma_sin_ext: - "sin = (%x. \n. - (if even n then 0 - else -1 ^ ((n - Suc 0) div 2)/(real (fact n))) * - x ^ n)" -by (auto intro!: ext simp add: sin_def) - -lemma lemma_cos_ext: - "cos = (%x. \n. - (if even n then -1 ^ (n div 2)/(real (fact n)) else 0) * - x ^ n)" -by (auto intro!: ext simp add: cos_def) - -lemma DERIV_sin [simp]: "DERIV sin x :> cos(x)" -apply (simp add: cos_def) -apply (subst lemma_sin_ext) -apply (auto simp add: sin_fdiffs2 [symmetric]) -apply (rule_tac K = "1 + \x\" in termdiffs) -apply (auto intro: sin_converges cos_converges sums_summable intro!: sums_minus [THEN sums_summable] simp add: cos_fdiffs sin_fdiffs) -done - -lemma DERIV_cos [simp]: "DERIV cos x :> -sin(x)" -apply (subst lemma_cos_ext) -apply (auto simp add: lemma_sin_minus cos_fdiffs2 [symmetric] minus_mult_left) -apply (rule_tac K = "1 + \x\" in termdiffs) -apply (auto intro: sin_converges cos_converges sums_summable intro!: sums_minus [THEN sums_summable] simp add: cos_fdiffs sin_fdiffs diffs_minus) -done - lemma isCont_exp [simp]: "isCont exp x" by (rule DERIV_exp [THEN DERIV_isCont]) -lemma isCont_sin [simp]: "isCont sin x" -by (rule DERIV_sin [THEN DERIV_isCont]) -lemma isCont_cos [simp]: "isCont cos x" -by (rule DERIV_cos [THEN DERIV_isCont]) - - -subsection{*Properties of the Exponential Function*} +subsubsection {* Properties of the Exponential Function *} lemma powser_zero: fixes f :: "nat \ 'a::{real_normed_algebra_1,recpower}" @@ -724,6 +564,9 @@ unfolding exp_def by (simp only: Cauchy_product summable_norm_exp exp_series_add) +lemma mult_exp_exp: "exp x * exp y = exp (x + y)" +by (rule exp_add [symmetric]) + lemma exp_of_real: "exp (of_real x) = of_real (exp x)" unfolding exp_def apply (subst of_real.suminf) @@ -731,6 +574,51 @@ apply (simp add: scaleR_conv_of_real) done +lemma exp_not_eq_zero [simp]: "exp x \ 0" +proof + have "exp x * exp (- x) = 1" by (simp add: mult_exp_exp) + also assume "exp x = 0" + finally show "False" by simp +qed + +lemma exp_minus: "exp (- x) = inverse (exp x)" +by (rule inverse_unique [symmetric], simp add: mult_exp_exp) + +lemma exp_diff: "exp (x - y) = exp x / exp y" + unfolding diff_minus divide_inverse + by (simp add: exp_add exp_minus) + + +subsubsection {* Properties of the Exponential Function on Reals *} + +text {* Comparisons of @{term "exp x"} with zero. *} + +text{*Proof: because every exponential can be seen as a square.*} +lemma exp_ge_zero [simp]: "0 \ exp (x::real)" +proof - + have "0 \ exp (x/2) * exp (x/2)" by simp + thus ?thesis by (simp add: exp_add [symmetric]) +qed + +lemma exp_gt_zero [simp]: "0 < exp (x::real)" +by (simp add: order_less_le) + +lemma not_exp_less_zero [simp]: "\ exp (x::real) < 0" +by (simp add: not_less) + +lemma not_exp_le_zero [simp]: "\ exp (x::real) \ 0" +by (simp add: not_le) + +lemma abs_exp_cancel [simp]: "\exp x::real\ = exp x" +by simp + +lemma exp_real_of_nat_mult: "exp(real n * x) = exp(x) ^ n" +apply (induct "n") +apply (auto simp add: real_of_nat_Suc right_distrib exp_add mult_commute) +done + +text {* Strict monotonicity of exponential. *} + lemma exp_ge_add_one_self_aux: "0 \ (x::real) ==> (1 + x) \ exp(x)" apply (drule order_le_imp_less_or_eq, auto) apply (simp add: exp_def) @@ -739,114 +627,61 @@ apply (auto intro: summable_exp simp add: numeral_2_eq_2 zero_le_mult_iff) done -lemma exp_gt_one [simp]: "0 < (x::real) ==> 1 < exp x" -apply (rule order_less_le_trans) -apply (rule_tac [2] exp_ge_add_one_self_aux, auto) -done - -lemma DERIV_exp_add_const: "DERIV (%x. exp (x + y)) x :> exp(x + y)" +lemma exp_gt_one: "0 < (x::real) \ 1 < exp x" proof - - have "DERIV (exp \ (\x. x + y)) x :> exp (x + y) * (1+0)" - by (fast intro: DERIV_chain DERIV_add DERIV_exp DERIV_ident DERIV_const) - thus ?thesis by (simp add: o_def) + assume x: "0 < x" + hence "1 < 1 + x" by simp + also from x have "1 + x \ exp x" + by (simp add: exp_ge_add_one_self_aux) + finally show ?thesis . qed -lemma DERIV_exp_minus [simp]: "DERIV (%x. exp (-x)) x :> - exp(-x)" -proof - - have "DERIV (exp \ uminus) x :> exp (- x) * - 1" - by (fast intro: DERIV_chain DERIV_minus DERIV_exp DERIV_ident) - thus ?thesis by (simp add: o_def) -qed - -lemma DERIV_exp_exp_zero [simp]: "DERIV (%x. exp (x + y) * exp (- x)) x :> 0" -proof - - have "DERIV (\x. exp (x + y) * exp (- x)) x - :> exp (x + y) * exp (- x) + - exp (- x) * exp (x + y)" - by (fast intro: DERIV_exp_add_const DERIV_exp_minus DERIV_mult) - thus ?thesis by (simp add: mult_commute) -qed - -lemma exp_add_mult_minus [simp]: "exp(x + y)*exp(-x) = exp(y::real)" -proof - - have "\x. DERIV (%x. exp (x + y) * exp (- x)) x :> 0" by simp - hence "exp (x + y) * exp (- x) = exp (0 + y) * exp (- 0)" - by (rule DERIV_isconst_all) - thus ?thesis by simp -qed - -lemma exp_mult_minus [simp]: "exp x * exp(-x) = 1" -by (simp add: exp_add [symmetric]) - -lemma exp_mult_minus2 [simp]: "exp(-x)*exp(x) = 1" -by (simp add: mult_commute) - - -lemma exp_minus: "exp(-x) = inverse(exp(x))" -by (auto intro: inverse_unique [symmetric]) - -text{*Proof: because every exponential can be seen as a square.*} -lemma exp_ge_zero [simp]: "0 \ exp (x::real)" -apply (rule_tac t = x in real_sum_of_halves [THEN subst]) -apply (subst exp_add, auto) -done - -lemma exp_not_eq_zero [simp]: "exp x \ 0" -apply (cut_tac x = x in exp_mult_minus2) -apply (auto simp del: exp_mult_minus2) -done - -lemma exp_gt_zero [simp]: "0 < exp (x::real)" -by (simp add: order_less_le) - -lemma inv_exp_gt_zero [simp]: "0 < inverse(exp x::real)" -by (auto intro: positive_imp_inverse_positive) - -lemma abs_exp_cancel [simp]: "\exp x::real\ = exp x" -by auto - -lemma exp_real_of_nat_mult: "exp(real n * x) = exp(x) ^ n" -apply (induct "n") -apply (auto simp add: real_of_nat_Suc right_distrib exp_add mult_commute) -done - -lemma exp_diff: "exp(x - y) = exp(x)/(exp y)" -apply (simp add: diff_minus divide_inverse) -apply (simp (no_asm) add: exp_add exp_minus) -done - - lemma exp_less_mono: fixes x y :: real - assumes xy: "x < y" shows "exp x < exp y" + assumes "x < y" shows "exp x < exp y" proof - - from xy have "1 < exp (y + - x)" - by (rule real_less_sum_gt_zero [THEN exp_gt_one]) - hence "exp x * inverse (exp x) < exp y * inverse (exp x)" - by (auto simp add: exp_add exp_minus) - thus ?thesis - by (simp add: divide_inverse [symmetric] pos_less_divide_eq - del: divide_self_if) + from `x < y` have "0 < y - x" by simp + hence "1 < exp (y - x)" by (rule exp_gt_one) + hence "1 < exp y / exp x" by (simp only: exp_diff) + thus "exp x < exp y" by simp qed lemma exp_less_cancel: "exp (x::real) < exp y ==> x < y" -apply (simp add: linorder_not_le [symmetric]) -apply (auto simp add: order_le_less exp_less_mono) +apply (simp add: linorder_not_le [symmetric]) +apply (auto simp add: order_le_less exp_less_mono) done -lemma exp_less_cancel_iff [iff]: "(exp(x::real) < exp(y)) = (x < y)" +lemma exp_less_cancel_iff [iff]: "exp (x::real) < exp y \ x < y" by (auto intro: exp_less_mono exp_less_cancel) -lemma exp_le_cancel_iff [iff]: "(exp(x::real) \ exp(y)) = (x \ y)" +lemma exp_le_cancel_iff [iff]: "exp (x::real) \ exp y \ x \ y" by (auto simp add: linorder_not_less [symmetric]) -lemma exp_inj_iff [iff]: "(exp (x::real) = exp y) = (x = y)" +lemma exp_inj_iff [iff]: "exp (x::real) = exp y \ x = y" by (simp add: order_eq_iff) +text {* Comparisons of @{term "exp x"} with one. *} + +lemma one_less_exp_iff [simp]: "1 < exp (x::real) \ 0 < x" + using exp_less_cancel_iff [where x=0 and y=x] by simp + +lemma exp_less_one_iff [simp]: "exp (x::real) < 1 \ x < 0" + using exp_less_cancel_iff [where x=x and y=0] by simp + +lemma one_le_exp_iff [simp]: "1 \ exp (x::real) \ 0 \ x" + using exp_le_cancel_iff [where x=0 and y=x] by simp + +lemma exp_le_one_iff [simp]: "exp (x::real) \ 1 \ x \ 0" + using exp_le_cancel_iff [where x=x and y=0] by simp + +lemma exp_eq_one_iff [simp]: "exp (x::real) = 1 \ x = 0" + using exp_inj_iff [where x=x and y=0] by simp + lemma lemma_exp_total: "1 \ y ==> \x. 0 \ x & x \ y - 1 & exp(x::real) = y" apply (rule IVT) apply (auto intro: isCont_exp simp add: le_diff_eq) apply (subgoal_tac "1 + (y - 1) \ exp (y - 1)") -apply simp +apply simp apply (rule exp_ge_add_one_self_aux, simp) done @@ -861,7 +696,7 @@ done -subsection{*Properties of the Logarithmic Function*} +subsection {* Natural Logarithm *} definition ln :: "real => real" where @@ -873,59 +708,46 @@ lemma exp_ln [simp]: "0 < x \ exp (ln x) = x" by (auto dest: exp_total) -lemma exp_ln_iff [simp]: "(exp (ln x) = x) = (0 < x)" -apply (auto dest: exp_total) -apply (erule subst, simp) +lemma exp_ln_iff [simp]: "exp (ln x) = x \ 0 < x" +apply (rule iffI) +apply (erule subst, rule exp_gt_zero) +apply (erule exp_ln) done -lemma ln_mult: "[| 0 < x; 0 < y |] ==> ln(x * y) = ln(x) + ln(y)" -apply (rule exp_inj_iff [THEN iffD1]) -apply (simp add: exp_add exp_ln mult_pos_pos) +lemma ln_unique: "exp y = x \ ln x = y" +by (erule subst, rule ln_exp) + +lemma ln_one [simp]: "ln 1 = 0" +by (rule ln_unique, simp) + +lemma ln_mult: "\0 < x; 0 < y\ \ ln (x * y) = ln x + ln y" +by (rule ln_unique, simp add: exp_add) + +lemma ln_inverse: "0 < x \ ln (inverse x) = - ln x" +by (rule ln_unique, simp add: exp_minus) + +lemma ln_div: "\0 < x; 0 < y\ \ ln (x / y) = ln x - ln y" +by (rule ln_unique, simp add: exp_diff) + +lemma ln_realpow: "0 < x \ ln (x ^ n) = real n * ln x" +by (rule ln_unique, simp add: exp_real_of_nat_mult) + +lemma ln_less_cancel_iff [simp]: "\0 < x; 0 < y\ \ ln x < ln y \ x < y" +by (subst exp_less_cancel_iff [symmetric], simp) + +lemma ln_le_cancel_iff [simp]: "\0 < x; 0 < y\ \ ln x \ ln y \ x \ y" +by (simp add: linorder_not_less [symmetric]) + +lemma ln_inj_iff [simp]: "\0 < x; 0 < y\ \ ln x = ln y \ x = y" +by (simp add: order_eq_iff) + +lemma ln_add_one_self_le_self [simp]: "0 \ x \ ln (1 + x) \ x" +apply (rule exp_le_cancel_iff [THEN iffD1]) +apply (simp add: exp_ge_add_one_self_aux) done -lemma ln_inj_iff[simp]: "[| 0 < x; 0 < y |] ==> (ln x = ln y) = (x = y)" -apply (simp only: exp_ln_iff [symmetric]) -apply (erule subst)+ -apply simp -done - -lemma ln_one[simp]: "ln 1 = 0" -by (rule exp_inj_iff [THEN iffD1], auto) - -lemma ln_inverse: "0 < x ==> ln(inverse x) = - ln x" -apply (rule_tac a1 = "ln x" in add_left_cancel [THEN iffD1]) -apply (auto simp add: positive_imp_inverse_positive ln_mult [symmetric]) -done - -lemma ln_div: - "[|0 < x; 0 < y|] ==> ln(x/y) = ln x - ln y" -apply (simp add: divide_inverse) -apply (auto simp add: positive_imp_inverse_positive ln_mult ln_inverse) -done - -lemma ln_less_cancel_iff[simp]: "[| 0 < x; 0 < y|] ==> (ln x < ln y) = (x < y)" -apply (simp only: exp_ln_iff [symmetric]) -apply (erule subst)+ -apply simp -done - -lemma ln_le_cancel_iff[simp]: "[| 0 < x; 0 < y|] ==> (ln x \ ln y) = (x \ y)" -by (auto simp add: linorder_not_less [symmetric]) - -lemma ln_realpow: "0 < x ==> ln(x ^ n) = real n * ln(x)" -by (auto dest!: exp_total simp add: exp_real_of_nat_mult [symmetric]) - -lemma ln_add_one_self_le_self [simp]: "0 \ x ==> ln(1 + x) \ x" -apply (rule ln_exp [THEN subst]) -apply (rule ln_le_cancel_iff [THEN iffD2]) -apply (auto simp add: exp_ge_add_one_self_aux) -done - -lemma ln_less_self [simp]: "0 < x ==> ln x < x" -apply (rule order_less_le_trans) -apply (rule_tac [2] ln_add_one_self_le_self) -apply (rule ln_less_cancel_iff [THEN iffD2], auto) -done +lemma ln_less_self [simp]: "0 < x \ ln x < x" +by (rule order_less_le_trans [where y="ln (1 + x)"]) simp_all lemma ln_ge_zero [simp]: assumes x: "1 \ x" shows "0 \ ln x" @@ -992,7 +814,151 @@ done -subsection{*Basic Properties of the Trigonometric Functions*} +subsection {* Sine and Cosine *} + +definition + sin :: "real => real" where + "sin x = (\n. (if even(n) then 0 else + (-1 ^ ((n - Suc 0) div 2))/(real (fact n))) * x ^ n)" + +definition + cos :: "real => real" where + "cos x = (\n. (if even(n) then (-1 ^ (n div 2))/(real (fact n)) + else 0) * x ^ n)" + +lemma summable_sin: + "summable (%n. + (if even n then 0 + else -1 ^ ((n - Suc 0) div 2)/(real (fact n))) * + x ^ n)" +apply (rule_tac g = "(%n. inverse (real (fact n)) * \x\ ^ n)" in summable_comparison_test) +apply (rule_tac [2] summable_exp) +apply (rule_tac x = 0 in exI) +apply (auto simp add: divide_inverse abs_mult power_abs [symmetric] zero_le_mult_iff) +done + +lemma summable_cos: + "summable (%n. + (if even n then + -1 ^ (n div 2)/(real (fact n)) else 0) * x ^ n)" +apply (rule_tac g = "(%n. inverse (real (fact n)) * \x\ ^ n)" in summable_comparison_test) +apply (rule_tac [2] summable_exp) +apply (rule_tac x = 0 in exI) +apply (auto simp add: divide_inverse abs_mult power_abs [symmetric] zero_le_mult_iff) +done + +lemma lemma_STAR_sin: + "(if even n then 0 + else -1 ^ ((n - Suc 0) div 2)/(real (fact n))) * 0 ^ n = 0" +by (induct "n", auto) + +lemma lemma_STAR_cos: + "0 < n --> + -1 ^ (n div 2)/(real (fact n)) * 0 ^ n = 0" +by (induct "n", auto) + +lemma lemma_STAR_cos1: + "0 < n --> + (-1) ^ (n div 2)/(real (fact n)) * 0 ^ n = 0" +by (induct "n", auto) + +lemma lemma_STAR_cos2: + "(\n=1..n. - ((if even n then 0 + else -1 ^ ((n - Suc 0) div 2)/(real (fact n))) * x ^ n))" +by (auto intro!: sums_unique sums_minus sin_converges) + +lemma lemma_sin_ext: + "sin = (%x. \n. + (if even n then 0 + else -1 ^ ((n - Suc 0) div 2)/(real (fact n))) * + x ^ n)" +by (auto intro!: ext simp add: sin_def) + +lemma lemma_cos_ext: + "cos = (%x. \n. + (if even n then -1 ^ (n div 2)/(real (fact n)) else 0) * + x ^ n)" +by (auto intro!: ext simp add: cos_def) + +lemma DERIV_sin [simp]: "DERIV sin x :> cos(x)" +apply (simp add: cos_def) +apply (subst lemma_sin_ext) +apply (auto simp add: sin_fdiffs2 [symmetric]) +apply (rule_tac K = "1 + \x\" in termdiffs) +apply (auto intro: sin_converges cos_converges sums_summable intro!: sums_minus [THEN sums_summable] simp add: cos_fdiffs sin_fdiffs) +done + +lemma DERIV_cos [simp]: "DERIV cos x :> -sin(x)" +apply (subst lemma_cos_ext) +apply (auto simp add: lemma_sin_minus cos_fdiffs2 [symmetric] minus_mult_left) +apply (rule_tac K = "1 + \x\" in termdiffs) +apply (auto intro: sin_converges cos_converges sums_summable intro!: sums_minus [THEN sums_summable] simp add: cos_fdiffs sin_fdiffs diffs_minus) +done + +lemma isCont_sin [simp]: "isCont sin x" +by (rule DERIV_sin [THEN DERIV_isCont]) + +lemma isCont_cos [simp]: "isCont cos x" +by (rule DERIV_cos [THEN DERIV_isCont]) + + +subsection {* Properties of Sine and Cosine *} lemma sin_zero [simp]: "sin 0 = 0" unfolding sin_def by (simp add: powser_zero) @@ -1088,9 +1054,6 @@ apply (simp del: realpow_Suc) done -lemma real_gt_one_ge_zero_add_less: "[| 1 < x; 0 \ y |] ==> 1 < x + (y::real)" -by arith - lemma abs_sin_le_one [simp]: "\sin x\ \ 1" by (rule power2_le_imp_le, simp_all add: sin_squared_eq) @@ -1187,7 +1150,7 @@ apply (auto simp add: diff_minus left_distrib right_distrib mult_ac add_ac) done -lemma sin_cos_minus [simp]: +lemma sin_cos_minus: "(sin(-x) + (sin x)) ^ 2 + (cos(-x) - (cos x)) ^ 2 = 0" apply (cut_tac y = 0 and x = x in lemma_DERIV_sin_cos_minus [THEN DERIV_isconst_all]) @@ -1195,14 +1158,10 @@ done lemma sin_minus [simp]: "sin (-x) = -sin(x)" -apply (cut_tac x = x in sin_cos_minus) -apply (simp del: sin_cos_minus) -done + using sin_cos_minus [where x=x] by simp lemma cos_minus [simp]: "cos (-x) = cos(x)" -apply (cut_tac x = x in sin_cos_minus) -apply (simp del: sin_cos_minus) -done + using sin_cos_minus [where x=x] by simp lemma sin_diff: "sin (x - y) = sin x * cos y - cos x * sin y" by (simp add: diff_minus sin_add) @@ -1217,16 +1176,14 @@ by (simp add: cos_diff mult_commute) lemma sin_double [simp]: "sin(2 * x) = 2* sin x * cos x" -by (cut_tac x = x and y = x in sin_add, auto) - + using sin_add [where x=x and y=x] by simp lemma cos_double: "cos(2* x) = ((cos x)\) - ((sin x)\)" -apply (cut_tac x = x and y = x in cos_add) -apply (simp add: power2_eq_square) -done + using cos_add [where x=x and y=x] + by (simp add: power2_eq_square) -subsection{*The Constant Pi*} +subsection {* The Constant Pi *} definition pi :: "real" where @@ -1401,8 +1358,8 @@ lemma pi_not_less_zero [simp]: "\ pi < 0" by (simp add: linorder_not_less) -lemma minus_pi_half_less_zero [simp]: "-(pi/2) < 0" -by auto +lemma minus_pi_half_less_zero: "-(pi/2) < 0" +by simp lemma sin_pi_half [simp]: "sin(pi/2) = 1" apply (cut_tac x = "pi/2" in sin_cos_squared_add2) @@ -1614,7 +1571,7 @@ done -subsection{*Tangent*} +subsection {* Tangent *} definition tan :: "real => real" where @@ -2139,11 +2096,6 @@ lemma sin_zero_abs_cos_one: "sin x = 0 ==> \cos x\ = 1" by (auto simp add: sin_zero_iff even_mult_two_ex) -lemma exp_eq_one_iff [simp]: "(exp (x::real) = 1) = (x = 0)" -apply auto -apply (drule_tac f = ln in arg_cong, auto) -done - lemma cos_one_sin_zero: "cos x = 1 ==> sin x = 0" by (cut_tac x = x in sin_cos_squared_add3, auto) @@ -2190,60 +2142,4 @@ apply (erule polar_ex2) done - -subsection {* Theorems about Limits *} - -(* need to rename second isCont_inverse *) - -lemma isCont_inv_fun: - fixes f g :: "real \ real" - shows "[| 0 < d; \z. \z - x\ \ d --> g(f(z)) = z; - \z. \z - x\ \ d --> isCont f z |] - ==> isCont g (f x)" -by (rule isCont_inverse_function) - -lemma isCont_inv_fun_inv: - fixes f g :: "real \ real" - shows "[| 0 < d; - \z. \z - x\ \ d --> g(f(z)) = z; - \z. \z - x\ \ d --> isCont f z |] - ==> \e. 0 < e & - (\y. 0 < \y - f(x)\ & \y - f(x)\ < e --> f(g(y)) = y)" -apply (drule isCont_inj_range) -prefer 2 apply (assumption, assumption, auto) -apply (rule_tac x = e in exI, auto) -apply (rotate_tac 2) -apply (drule_tac x = y in spec, auto) -done - - -text{*Bartle/Sherbert: Introduction to Real Analysis, Theorem 4.2.9, p. 110*} -lemma LIM_fun_gt_zero: - "[| f -- c --> (l::real); 0 < l |] - ==> \r. 0 < r & (\x::real. x \ c & \c - x\ < r --> 0 < f x)" -apply (auto simp add: LIM_def) -apply (drule_tac x = "l/2" in spec, safe, force) -apply (rule_tac x = s in exI) -apply (auto simp only: abs_less_iff) -done - -lemma LIM_fun_less_zero: - "[| f -- c --> (l::real); l < 0 |] - ==> \r. 0 < r & (\x::real. x \ c & \c - x\ < r --> f x < 0)" -apply (auto simp add: LIM_def) -apply (drule_tac x = "-l/2" in spec, safe, force) -apply (rule_tac x = s in exI) -apply (auto simp only: abs_less_iff) -done - - -lemma LIM_fun_not_zero: - "[| f -- c --> (l::real); l \ 0 |] - ==> \r. 0 < r & (\x::real. x \ c & \c - x\ < r --> f x \ 0)" -apply (cut_tac x = l and y = 0 in linorder_less_linear, auto) -apply (drule LIM_fun_less_zero) -apply (drule_tac [3] LIM_fun_gt_zero) -apply force+ -done - end diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/Wellfounded.thy --- a/src/HOL/Wellfounded.thy Tue Dec 30 08:18:54 2008 +0100 +++ b/src/HOL/Wellfounded.thy Tue Dec 30 11:10:01 2008 +0100 @@ -842,6 +842,11 @@ qed qed +lemma max_ext_additive: + "(A, B) \ max_ext R \ (C, D) \ max_ext R \ + (A \ C, B \ D) \ max_ext R" +by (force elim!: max_ext.cases) + definition min_ext :: "('a \ 'a) set \ ('a set \ 'a set) set" diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/ex/CodegenSML_Test.thy --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/HOL/ex/CodegenSML_Test.thy Tue Dec 30 11:10:01 2008 +0100 @@ -0,0 +1,54 @@ +(* Title: Test file for Stefan Berghofer's SML code generator + Author: Tobias Nipkow, TU Muenchen +*) + +theory CodegenSML_Test +imports Executable_Set +begin + +lemma "True : {False, True} & False ~: {True}" +by evaluation + +lemma +"eq_set ({1::nat,2,3,2} \ {3,1,2,1}) {2,2,3,1} & + eq_set ({1::nat,2,3,2} \ {4,1,5,1}) {4,4,5,1,2,3}" +by evaluation + +lemma +"eq_set ({1::nat,2,3,2} \ {3,1,2,1}) {2,2,3,1} & + eq_set ({1::nat,2,3,2} \ {4,1,5,2}) {2,1,2}" +by evaluation + +lemma +"eq_set ({1::nat,2,3,2} - {3,1,2,1}) {} & + eq_set ({1::nat,2,3,2} - {4,1,5,2}) {3}" +by evaluation + +lemma +"eq_set (Union{{1::nat,2,3,2}, {3,1,2,1}}) {2,2,3,1} & + eq_set (Union{{1::nat,2,3,2}, {4,1,5,1}}) {4,4,5,1,2,3}" +by evaluation + +lemma +"eq_set (Inter{{1::nat,2,3,2}, {3,1,2,1}}) {2,2,3,1} & + eq_set (Inter{{1::nat,2,3,2}, {4,1,5,2}}) {2,1,2}" +by evaluation + +lemma "eq_set ((%x. x+2) ` {1::nat,2,3,2}) {4,5,3,3}" +by evaluation + +lemma +"(ALL x:{1::nat,2,3,2}. EX y : {4,5,2}. x < y) & + (EX x:{1::nat,2,3,2}. ALL y : {4,5,6}. x < y)" +by evaluation + +lemma +"eq_set {x : {4::nat,7,10}. 2 dvd x } {4,10}" +by evaluation + +lemma +"fold (op +) (5::int) {3,7,9} = 24 & + fold_image (op *) id (2::int) {3,4,5} = 120" +by evaluation + +end diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/ex/ExecutableContent.thy --- a/src/HOL/ex/ExecutableContent.thy Tue Dec 30 08:18:54 2008 +0100 +++ b/src/HOL/ex/ExecutableContent.thy Tue Dec 30 11:10:01 2008 +0100 @@ -24,4 +24,11 @@ "~~/src/HOL/ex/Records" begin +text {* However, some aren't executable *} + +declare pair_leq_def[code del] +declare max_weak_def[code del] +declare min_weak_def[code del] +declare ms_weak_def[code del] + end diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/ex/LexOrds.thy --- a/src/HOL/ex/LexOrds.thy Tue Dec 30 08:18:54 2008 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,182 +0,0 @@ -(* Title: HOL/ex/LexOrds.thy - ID: $Id$ - Author: Lukas Bulwahn, TU Muenchen -*) - -header {* Examples and regression tests for method lexicographic order. *} - -theory LexOrds -imports Main -begin - -subsection {* Trivial examples *} - -fun identity :: "nat \ nat" -where - "identity n = n" - -fun yaSuc :: "nat \ nat" -where - "yaSuc 0 = 0" -| "yaSuc (Suc n) = Suc (yaSuc n)" - - -subsection {* Examples on natural numbers *} - -fun bin :: "(nat * nat) \ nat" -where - "bin (0, 0) = 1" -| "bin (Suc n, 0) = 0" -| "bin (0, Suc m) = 0" -| "bin (Suc n, Suc m) = bin (n, m) + bin (Suc n, m)" - - -fun t :: "(nat * nat) \ nat" -where - "t (0,n) = 0" -| "t (n,0) = 0" -| "t (Suc n, Suc m) = (if (n mod 2 = 0) then (t (Suc n, m)) else (t (n, Suc m)))" - - -fun k :: "(nat * nat) * (nat * nat) \ nat" -where - "k ((0,0),(0,0)) = 0" -| "k ((Suc z, y), (u,v)) = k((z, y), (u, v))" (* z is descending *) -| "k ((0, Suc y), (u,v)) = k((1, y), (u, v))" (* y is descending *) -| "k ((0,0), (Suc u, v)) = k((1, 1), (u, v))" (* u is descending *) -| "k ((0,0), (0, Suc v)) = k((1,1), (1,v))" (* v is descending *) - - -fun gcd2 :: "nat \ nat \ nat" -where - "gcd2 x 0 = x" -| "gcd2 0 y = y" -| "gcd2 (Suc x) (Suc y) = (if x < y then gcd2 (Suc x) (y - x) - else gcd2 (x - y) (Suc y))" - -fun ack :: "(nat * nat) \ nat" -where - "ack (0, m) = Suc m" -| "ack (Suc n, 0) = ack(n, 1)" -| "ack (Suc n, Suc m) = ack (n, ack (Suc n, m))" - - -fun greedy :: "nat * nat * nat * nat * nat => nat" -where - "greedy (Suc a, Suc b, Suc c, Suc d, Suc e) = - (if (a < 10) then greedy (Suc a, Suc b, c, d + 2, Suc e) else - (if (a < 20) then greedy (Suc a, b, Suc c, d, Suc e) else - (if (a < 30) then greedy (Suc a, b, Suc c, d, Suc e) else - (if (a < 40) then greedy (Suc a, b, Suc c, d, Suc e) else - (if (a < 50) then greedy (Suc a, b, Suc c, d, Suc e) else - (if (a < 60) then greedy (a, Suc b, Suc c, d, Suc e) else - (if (a < 70) then greedy (a, Suc b, Suc c, d, Suc e) else - (if (a < 80) then greedy (a, Suc b, Suc c, d, Suc e) else - (if (a < 90) then greedy (Suc a, Suc b, Suc c, d, e) else - greedy (Suc a, Suc b, Suc c, d, e))))))))))" -| "greedy (a, b, c, d, e) = 0" - - -fun blowup :: "nat => nat => nat => nat => nat => nat => nat => nat => nat => nat" -where - "blowup 0 0 0 0 0 0 0 0 0 = 0" -| "blowup 0 0 0 0 0 0 0 0 (Suc i) = Suc (blowup i i i i i i i i i)" -| "blowup 0 0 0 0 0 0 0 (Suc h) i = Suc (blowup h h h h h h h h i)" -| "blowup 0 0 0 0 0 0 (Suc g) h i = Suc (blowup g g g g g g g h i)" -| "blowup 0 0 0 0 0 (Suc f) g h i = Suc (blowup f f f f f f g h i)" -| "blowup 0 0 0 0 (Suc e) f g h i = Suc (blowup e e e e e f g h i)" -| "blowup 0 0 0 (Suc d) e f g h i = Suc (blowup d d d d e f g h i)" -| "blowup 0 0 (Suc c) d e f g h i = Suc (blowup c c c d e f g h i)" -| "blowup 0 (Suc b) c d e f g h i = Suc (blowup b b c d e f g h i)" -| "blowup (Suc a) b c d e f g h i = Suc (blowup a b c d e f g h i)" - - -subsection {* Simple examples with other datatypes than nat, e.g. trees and lists *} - -datatype tree = Node | Branch tree tree - -fun g_tree :: "tree * tree \ tree" -where - "g_tree (Node, Node) = Node" -| "g_tree (Node, Branch a b) = Branch Node (g_tree (a,b))" -| "g_tree (Branch a b, Node) = Branch (g_tree (a,Node)) b" -| "g_tree (Branch a b, Branch c d) = Branch (g_tree (a,c)) (g_tree (b,d))" - - -fun acklist :: "'a list * 'a list \ 'a list" -where - "acklist ([], m) = ((hd m)#m)" -| "acklist (n#ns, []) = acklist (ns, [n])" -| "acklist ((n#ns), (m#ms)) = acklist (ns, acklist ((n#ns), ms))" - - -subsection {* Examples with mutual recursion *} - -fun evn od :: "nat \ bool" -where - "evn 0 = True" -| "od 0 = False" -| "evn (Suc n) = od (Suc n)" -| "od (Suc n) = evn n" - - -fun sizechange_f :: "'a list => 'a list => 'a list" and -sizechange_g :: "'a list => 'a list => 'a list => 'a list" -where - "sizechange_f i x = (if i=[] then x else sizechange_g (tl i) x i)" -| "sizechange_g a b c = sizechange_f a (b @ c)" - - -fun - prod :: "nat => nat => nat => nat" and - eprod :: "nat => nat => nat => nat" and - oprod :: "nat => nat => nat => nat" -where - "prod x y z = (if y mod 2 = 0 then eprod x y z else oprod x y z)" -| "oprod x y z = eprod x (y - 1) (z+x)" -| "eprod x y z = (if y=0 then z else prod (2*x) (y div 2) z)" - - -fun - pedal :: "nat => nat => nat => nat" -and - coast :: "nat => nat => nat => nat" -where - "pedal 0 m c = c" -| "pedal n 0 c = c" -| "pedal n m c = - (if n < m then coast (n - 1) (m - 1) (c + m) - else pedal (n - 1) m (c + m))" - -| "coast n m c = - (if n < m then coast n (m - 1) (c + n) - else pedal n m (c + n))" - - -subsection {*Examples for an unprovable termination *} - -text {* If termination cannot be proven, the tactic gives further information about unprovable subgoals on the arguments *} - -function noterm :: "(nat * nat) \ nat" -where - "noterm (a,b) = noterm(b,a)" -by pat_completeness auto -(* termination by apply lexicographic_order*) - -function term_but_no_prove :: "nat * nat \ nat" -where - "term_but_no_prove (0,0) = 1" -| "term_but_no_prove (0, Suc b) = 0" -| "term_but_no_prove (Suc a, 0) = 0" -| "term_but_no_prove (Suc a, Suc b) = term_but_no_prove (b, a)" -by pat_completeness auto -(* termination by lexicographic_order *) - -text{* The tactic distinguishes between N = not provable AND F = False *} -function no_proof :: "nat \ nat" -where - "no_proof m = no_proof (Suc m)" -by pat_completeness auto -(* termination by lexicographic_order *) - -end \ No newline at end of file diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/ex/Quickcheck.thy --- a/src/HOL/ex/Quickcheck.thy Tue Dec 30 08:18:54 2008 +0100 +++ b/src/HOL/ex/Quickcheck.thy Tue Dec 30 11:10:01 2008 +0100 @@ -1,11 +1,9 @@ -(* ID: $Id$ - Author: Florian Haftmann, TU Muenchen -*) +(* Author: Florian Haftmann, TU Muenchen *) header {* A simple counterexample generator *} theory Quickcheck -imports Random Code_Eval +imports Random Code_Eval Map begin subsection {* The @{text random} class *} @@ -25,166 +23,6 @@ end -text {* Datatypes *} - -definition - collapse :: "('a \ ('a \ 'b \ 'a) \ 'a) \ 'a \ 'b \ 'a" where - "collapse f = (do g \ f; g done)" - -ML {* -structure StateMonad = -struct - -fun liftT T sT = sT --> HOLogic.mk_prodT (T, sT); -fun liftT' sT = sT --> sT; - -fun return T sT x = Const (@{const_name return}, T --> liftT T sT) $ x; - -fun scomp T1 T2 sT f g = Const (@{const_name scomp}, - liftT T1 sT --> (T1 --> liftT T2 sT) --> liftT T2 sT) $ f $ g; - -end; -*} - -lemma random'_if: - fixes random' :: "index \ index \ seed \ ('a \ (unit \ term)) \ seed" - assumes "random' 0 j = (\s. undefined)" - and "\i. random' (Suc_index i) j = rhs2 i" - shows "random' i j s = (if i = 0 then undefined else rhs2 (i - 1) s)" - by (cases i rule: index.exhaust) (insert assms, simp_all) - -setup {* -let - exception REC of string; - fun mk_collapse thy ty = Sign.mk_const thy - (@{const_name collapse}, [@{typ seed}, ty]); - fun term_ty ty = HOLogic.mk_prodT (ty, @{typ "unit \ term"}); - fun mk_split thy ty ty' = Sign.mk_const thy - (@{const_name split}, [ty, @{typ "unit \ term"}, StateMonad.liftT (term_ty ty') @{typ seed}]); - fun mk_scomp_split thy ty ty' t t' = - StateMonad.scomp (term_ty ty) (term_ty ty') @{typ seed} t - (mk_split thy ty ty' $ Abs ("", ty, Abs ("", @{typ "unit \ term"}, t'))) - fun mk_cons thy this_ty (c, args) = - let - val tys = map (fst o fst) args; - val c_ty = tys ---> this_ty; - val c = Const (c, tys ---> this_ty); - val t_indices = map (curry ( op * ) 2) (length tys - 1 downto 0); - val c_indices = map (curry ( op + ) 1) t_indices; - val c_t = list_comb (c, map Bound c_indices); - val t_t = Abs ("", @{typ unit}, Eval.mk_term Free Typerep.typerep - (list_comb (c, map (fn k => Bound (k + 1)) t_indices)) - |> map_aterms (fn t as Bound _ => t $ @{term "()"} | t => t)); - val return = StateMonad.return (term_ty this_ty) @{typ seed} - (HOLogic.mk_prod (c_t, t_t)); - val t = fold_rev (fn ((ty, _), random) => - mk_scomp_split thy ty this_ty random) - args return; - val is_rec = exists (snd o fst) args; - in (is_rec, t) end; - fun mk_conss thy ty [] = NONE - | mk_conss thy ty [(_, t)] = SOME t - | mk_conss thy ty ts = SOME (mk_collapse thy (term_ty ty) $ - (Sign.mk_const thy (@{const_name select}, [StateMonad.liftT (term_ty ty) @{typ seed}]) $ - HOLogic.mk_list (StateMonad.liftT (term_ty ty) @{typ seed}) (map snd ts))); - fun mk_clauses thy ty (tyco, (ts_rec, ts_atom)) = - let - val SOME t_atom = mk_conss thy ty ts_atom; - in case mk_conss thy ty ts_rec - of SOME t_rec => mk_collapse thy (term_ty ty) $ - (Sign.mk_const thy (@{const_name select_default}, [StateMonad.liftT (term_ty ty) @{typ seed}]) $ - @{term "i\index"} $ t_rec $ t_atom) - | NONE => t_atom - end; - fun mk_random_eqs thy vs tycos = - let - val this_ty = Type (hd tycos, map TFree vs); - val this_ty' = StateMonad.liftT (term_ty this_ty) @{typ seed}; - val random_name = NameSpace.base @{const_name random}; - val random'_name = random_name ^ "_" ^ Class.type_name (hd tycos) ^ "'"; - fun random ty = Sign.mk_const thy (@{const_name random}, [ty]); - val random' = Free (random'_name, - @{typ index} --> @{typ index} --> this_ty'); - fun atom ty = ((ty, false), random ty $ @{term "j\index"}); - fun dtyp tyco = ((this_ty, true), random' $ @{term "i\index"} $ @{term "j\index"}); - fun rtyp tyco tys = raise REC - ("Will not generate random elements for mutual recursive type " ^ quote (hd tycos)); - val rhss = DatatypePackage.construction_interpretation thy - { atom = atom, dtyp = dtyp, rtyp = rtyp } vs tycos - |> (map o apsnd o map) (mk_cons thy this_ty) - |> (map o apsnd) (List.partition fst) - |> map (mk_clauses thy this_ty) - val eqss = map ((apsnd o map) (HOLogic.mk_Trueprop o HOLogic.mk_eq) o (fn rhs => ((this_ty, random'), [ - (random' $ @{term "0\index"} $ @{term "j\index"}, Abs ("s", @{typ seed}, - Const (@{const_name undefined}, HOLogic.mk_prodT (term_ty this_ty, @{typ seed})))), - (random' $ @{term "Suc_index i"} $ @{term "j\index"}, rhs) - ]))) rhss; - in eqss end; - fun random_inst [tyco] thy = - let - val (raw_vs, _) = DatatypePackage.the_datatype_spec thy tyco; - val vs = (map o apsnd) - (curry (Sorts.inter_sort (Sign.classes_of thy)) @{sort random}) raw_vs; - val { descr, index, ... } = DatatypePackage.the_datatype thy tyco; - val ((this_ty, random'), eqs') = singleton (mk_random_eqs thy vs) tyco; - val eq = (HOLogic.mk_Trueprop o HOLogic.mk_eq) - (Sign.mk_const thy (@{const_name random}, [this_ty]) $ @{term "i\index"}, - random' $ @{term "i\index"} $ @{term "i\index"}) - val del_func = Attrib.internal (fn _ => Thm.declaration_attribute - (fn thm => Context.mapping (Code.del_eqn thm) I)); - fun add_code simps lthy = - let - val thy = ProofContext.theory_of lthy; - val thm = @{thm random'_if} - |> Drule.instantiate' [SOME (Thm.ctyp_of thy this_ty)] [SOME (Thm.cterm_of thy random')] - |> (fn thm => thm OF simps) - |> singleton (ProofContext.export lthy (ProofContext.init thy)); - val c = (fst o dest_Const o fst o strip_comb o fst - o HOLogic.dest_eq o HOLogic.dest_Trueprop o Thm.prop_of) thm; - in - lthy - |> LocalTheory.theory (Code.del_eqns c - #> PureThy.add_thm ((fst (dest_Free random') ^ "_code", thm), [Thm.kind_internal]) - #-> Code.add_eqn) - end; - in - thy - |> TheoryTarget.instantiation ([tyco], vs, @{sort random}) - |> PrimrecPackage.add_primrec - [(Binding.name (fst (dest_Free random')), SOME (snd (dest_Free random')), NoSyn)] - (map (fn eq => ((Binding.empty, [del_func]), eq)) eqs') - |-> add_code - |> `(fn lthy => Syntax.check_term lthy eq) - |-> (fn eq => Specification.definition (NONE, (Attrib.empty_binding, eq))) - |> snd - |> Class.prove_instantiation_instance (K (Class.intro_classes_tac [])) - |> LocalTheory.exit_global - end - | random_inst tycos thy = raise REC - ("Will not generate random elements for mutual recursive type(s) " ^ commas (map quote tycos)); - fun add_random_inst tycos thy = random_inst tycos thy - handle REC msg => (warning msg; thy); -in DatatypePackage.interpretation add_random_inst end -*} - -text {* Type @{typ int} *} - -instantiation int :: random -begin - -definition - "random n = (do - (b, _) \ random n; - (m, t) \ random n; - return (if b then (int m, \u. Code_Eval.App (Code_Eval.Const (STR ''Int.int'') TYPEREP(nat \ int)) (t ())) - else (- int m, \u. Code_Eval.App (Code_Eval.Const (STR ''HOL.uminus_class.uminus'') TYPEREP(int \ int)) - (Code_Eval.App (Code_Eval.Const (STR ''Int.int'') TYPEREP(nat \ int)) (t ())))) - done)" - -instance .. - -end - text {* Type @{typ "'a \ 'b"} *} ML {* @@ -240,6 +78,170 @@ code_reserved SML Random_Engine +text {* Datatypes *} + +definition + collapse :: "('a \ ('a \ 'b \ 'a) \ 'a) \ 'a \ 'b \ 'a" where + "collapse f = (do g \ f; g done)" + +ML {* +structure StateMonad = +struct + +fun liftT T sT = sT --> HOLogic.mk_prodT (T, sT); +fun liftT' sT = sT --> sT; + +fun return T sT x = Const (@{const_name return}, T --> liftT T sT) $ x; + +fun scomp T1 T2 sT f g = Const (@{const_name scomp}, + liftT T1 sT --> (T1 --> liftT T2 sT) --> liftT T2 sT) $ f $ g; + +end; +*} + +lemma random'_if: + fixes random' :: "index \ index \ seed \ ('a \ (unit \ term)) \ seed" + assumes "random' 0 j = (\s. undefined)" + and "\i. random' (Suc_index i) j = rhs2 i" + shows "random' i j s = (if i = 0 then undefined else rhs2 (i - 1) s)" + by (cases i rule: index.exhaust) (insert assms, simp_all) + +setup {* +let + exception REC of string; + exception TYP of string; + fun mk_collapse thy ty = Sign.mk_const thy + (@{const_name collapse}, [@{typ seed}, ty]); + fun term_ty ty = HOLogic.mk_prodT (ty, @{typ "unit \ term"}); + fun mk_split thy ty ty' = Sign.mk_const thy + (@{const_name split}, [ty, @{typ "unit \ term"}, StateMonad.liftT (term_ty ty') @{typ seed}]); + fun mk_scomp_split thy ty ty' t t' = + StateMonad.scomp (term_ty ty) (term_ty ty') @{typ seed} t + (mk_split thy ty ty' $ Abs ("", ty, Abs ("", @{typ "unit \ term"}, t'))) + fun mk_cons thy this_ty (c, args) = + let + val tys = map (fst o fst) args; + val c_ty = tys ---> this_ty; + val c = Const (c, tys ---> this_ty); + val t_indices = map (curry ( op * ) 2) (length tys - 1 downto 0); + val c_indices = map (curry ( op + ) 1) t_indices; + val c_t = list_comb (c, map Bound c_indices); + val t_t = Abs ("", @{typ unit}, Eval.mk_term Free Typerep.typerep + (list_comb (c, map (fn k => Bound (k + 1)) t_indices)) + |> map_aterms (fn t as Bound _ => t $ @{term "()"} | t => t)); + val return = StateMonad.return (term_ty this_ty) @{typ seed} + (HOLogic.mk_prod (c_t, t_t)); + val t = fold_rev (fn ((ty, _), random) => + mk_scomp_split thy ty this_ty random) + args return; + val is_rec = exists (snd o fst) args; + in (is_rec, t) end; + fun mk_conss thy ty [] = NONE + | mk_conss thy ty [(_, t)] = SOME t + | mk_conss thy ty ts = SOME (mk_collapse thy (term_ty ty) $ + (Sign.mk_const thy (@{const_name select}, [StateMonad.liftT (term_ty ty) @{typ seed}]) $ + HOLogic.mk_list (StateMonad.liftT (term_ty ty) @{typ seed}) (map snd ts))); + fun mk_clauses thy ty (tyco, (ts_rec, ts_atom)) = + let + val SOME t_atom = mk_conss thy ty ts_atom; + in case mk_conss thy ty ts_rec + of SOME t_rec => mk_collapse thy (term_ty ty) $ + (Sign.mk_const thy (@{const_name select_default}, [StateMonad.liftT (term_ty ty) @{typ seed}]) $ + @{term "i\index"} $ t_rec $ t_atom) + | NONE => t_atom + end; + fun mk_random_eqs thy vs tycos = + let + val this_ty = Type (hd tycos, map TFree vs); + val this_ty' = StateMonad.liftT (term_ty this_ty) @{typ seed}; + val random_name = NameSpace.base @{const_name random}; + val random'_name = random_name ^ "_" ^ Class.type_name (hd tycos) ^ "'"; + fun random ty = Sign.mk_const thy (@{const_name random}, [ty]); + val random' = Free (random'_name, + @{typ index} --> @{typ index} --> this_ty'); + fun atom ty = if Sign.of_sort thy (ty, @{sort random}) + then ((ty, false), random ty $ @{term "j\index"}) + else raise TYP + ("Will not generate random elements for type(s) " ^ quote (hd tycos)); + fun dtyp tyco = ((this_ty, true), random' $ @{term "i\index"} $ @{term "j\index"}); + fun rtyp tyco tys = raise REC + ("Will not generate random elements for mutual recursive type " ^ quote (hd tycos)); + val rhss = DatatypePackage.construction_interpretation thy + { atom = atom, dtyp = dtyp, rtyp = rtyp } vs tycos + |> (map o apsnd o map) (mk_cons thy this_ty) + |> (map o apsnd) (List.partition fst) + |> map (mk_clauses thy this_ty) + val eqss = map ((apsnd o map) (HOLogic.mk_Trueprop o HOLogic.mk_eq) o (fn rhs => ((this_ty, random'), [ + (random' $ @{term "0\index"} $ @{term "j\index"}, Abs ("s", @{typ seed}, + Const (@{const_name undefined}, HOLogic.mk_prodT (term_ty this_ty, @{typ seed})))), + (random' $ @{term "Suc_index i"} $ @{term "j\index"}, rhs) + ]))) rhss; + in eqss end; + fun random_inst [tyco] thy = + let + val (raw_vs, _) = DatatypePackage.the_datatype_spec thy tyco; + val vs = (map o apsnd) + (curry (Sorts.inter_sort (Sign.classes_of thy)) @{sort random}) raw_vs; + val ((this_ty, random'), eqs') = singleton (mk_random_eqs thy vs) tyco; + val eq = (HOLogic.mk_Trueprop o HOLogic.mk_eq) + (Sign.mk_const thy (@{const_name random}, [this_ty]) $ @{term "i\index"}, + random' $ @{term "i\index"} $ @{term "i\index"}) + val del_func = Attrib.internal (fn _ => Thm.declaration_attribute + (fn thm => Context.mapping (Code.del_eqn thm) I)); + fun add_code simps lthy = + let + val thy = ProofContext.theory_of lthy; + val thm = @{thm random'_if} + |> Drule.instantiate' [SOME (Thm.ctyp_of thy this_ty)] [SOME (Thm.cterm_of thy random')] + |> (fn thm => thm OF simps) + |> singleton (ProofContext.export lthy (ProofContext.init thy)); + val c = (fst o dest_Const o fst o strip_comb o fst + o HOLogic.dest_eq o HOLogic.dest_Trueprop o Thm.prop_of) thm; + in + lthy + |> LocalTheory.theory (Code.del_eqns c + #> PureThy.add_thm ((fst (dest_Free random') ^ "_code", thm), [Thm.kind_internal]) + #-> Code.add_eqn) + end; + in + thy + |> TheoryTarget.instantiation ([tyco], vs, @{sort random}) + |> PrimrecPackage.add_primrec + [(Binding.name (fst (dest_Free random')), SOME (snd (dest_Free random')), NoSyn)] + (map (fn eq => ((Binding.empty, [del_func]), eq)) eqs') + |-> add_code + |> `(fn lthy => Syntax.check_term lthy eq) + |-> (fn eq => Specification.definition (NONE, (Attrib.empty_binding, eq))) + |> snd + |> Class.prove_instantiation_instance (K (Class.intro_classes_tac [])) + |> LocalTheory.exit_global + end + | random_inst tycos thy = raise REC + ("Will not generate random elements for mutual recursive type(s) " ^ commas (map quote tycos)); + fun add_random_inst tycos thy = random_inst tycos thy + handle REC msg => (warning msg; thy) + | TYP msg => (warning msg; thy) +in DatatypePackage.interpretation add_random_inst end +*} + +text {* Type @{typ int} *} + +instantiation int :: random +begin + +definition + "random n = (do + (b, _) \ random n; + (m, t) \ random n; + return (if b then (int m, \u. Code_Eval.App (Code_Eval.Const (STR ''Int.int'') TYPEREP(nat \ int)) (t ())) + else (- int m, \u. Code_Eval.App (Code_Eval.Const (STR ''HOL.uminus_class.uminus'') TYPEREP(int \ int)) + (Code_Eval.App (Code_Eval.Const (STR ''Int.int'') TYPEREP(nat \ int)) (t ())))) + done)" + +instance .. + +end + subsection {* Quickcheck generator *} diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/ex/ROOT.ML --- a/src/HOL/ex/ROOT.ML Tue Dec 30 08:18:54 2008 +0100 +++ b/src/HOL/ex/ROOT.ML Tue Dec 30 11:10:01 2008 +0100 @@ -56,7 +56,7 @@ "set", "Meson_Test", "Code_Antiq", - "LexOrds", + "Termination", "Coherent" ]; diff -r 8f84a608883d -r ea97aa6aeba2 src/HOL/ex/Termination.thy --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/HOL/ex/Termination.thy Tue Dec 30 11:10:01 2008 +0100 @@ -0,0 +1,212 @@ +(* Title: HOL/ex/Termination.thy + ID: $Id$ + Author: Lukas Bulwahn, TU Muenchen + Author: Alexander Krauss, TU Muenchen +*) + +header {* Examples and regression tests for automated termination proofs *} + +theory Termination +imports Main Multiset +begin + +text {* + The @{text fun} command uses the method @{text lexicographic_order} by default. +*} + +subsection {* Trivial examples *} + +fun identity :: "nat \ nat" +where + "identity n = n" + +fun yaSuc :: "nat \ nat" +where + "yaSuc 0 = 0" +| "yaSuc (Suc n) = Suc (yaSuc n)" + + +subsection {* Examples on natural numbers *} + +fun bin :: "(nat * nat) \ nat" +where + "bin (0, 0) = 1" +| "bin (Suc n, 0) = 0" +| "bin (0, Suc m) = 0" +| "bin (Suc n, Suc m) = bin (n, m) + bin (Suc n, m)" + + +fun t :: "(nat * nat) \ nat" +where + "t (0,n) = 0" +| "t (n,0) = 0" +| "t (Suc n, Suc m) = (if (n mod 2 = 0) then (t (Suc n, m)) else (t (n, Suc m)))" + + +fun k :: "(nat * nat) * (nat * nat) \ nat" +where + "k ((0,0),(0,0)) = 0" +| "k ((Suc z, y), (u,v)) = k((z, y), (u, v))" (* z is descending *) +| "k ((0, Suc y), (u,v)) = k((1, y), (u, v))" (* y is descending *) +| "k ((0,0), (Suc u, v)) = k((1, 1), (u, v))" (* u is descending *) +| "k ((0,0), (0, Suc v)) = k((1,1), (1,v))" (* v is descending *) + + +fun gcd2 :: "nat \ nat \ nat" +where + "gcd2 x 0 = x" +| "gcd2 0 y = y" +| "gcd2 (Suc x) (Suc y) = (if x < y then gcd2 (Suc x) (y - x) + else gcd2 (x - y) (Suc y))" + +fun ack :: "(nat * nat) \ nat" +where + "ack (0, m) = Suc m" +| "ack (Suc n, 0) = ack(n, 1)" +| "ack (Suc n, Suc m) = ack (n, ack (Suc n, m))" + + +fun greedy :: "nat * nat * nat * nat * nat => nat" +where + "greedy (Suc a, Suc b, Suc c, Suc d, Suc e) = + (if (a < 10) then greedy (Suc a, Suc b, c, d + 2, Suc e) else + (if (a < 20) then greedy (Suc a, b, Suc c, d, Suc e) else + (if (a < 30) then greedy (Suc a, b, Suc c, d, Suc e) else + (if (a < 40) then greedy (Suc a, b, Suc c, d, Suc e) else + (if (a < 50) then greedy (Suc a, b, Suc c, d, Suc e) else + (if (a < 60) then greedy (a, Suc b, Suc c, d, Suc e) else + (if (a < 70) then greedy (a, Suc b, Suc c, d, Suc e) else + (if (a < 80) then greedy (a, Suc b, Suc c, d, Suc e) else + (if (a < 90) then greedy (Suc a, Suc b, Suc c, d, e) else + greedy (Suc a, Suc b, Suc c, d, e))))))))))" +| "greedy (a, b, c, d, e) = 0" + + +fun blowup :: "nat => nat => nat => nat => nat => nat => nat => nat => nat => nat" +where + "blowup 0 0 0 0 0 0 0 0 0 = 0" +| "blowup 0 0 0 0 0 0 0 0 (Suc i) = Suc (blowup i i i i i i i i i)" +| "blowup 0 0 0 0 0 0 0 (Suc h) i = Suc (blowup h h h h h h h h i)" +| "blowup 0 0 0 0 0 0 (Suc g) h i = Suc (blowup g g g g g g g h i)" +| "blowup 0 0 0 0 0 (Suc f) g h i = Suc (blowup f f f f f f g h i)" +| "blowup 0 0 0 0 (Suc e) f g h i = Suc (blowup e e e e e f g h i)" +| "blowup 0 0 0 (Suc d) e f g h i = Suc (blowup d d d d e f g h i)" +| "blowup 0 0 (Suc c) d e f g h i = Suc (blowup c c c d e f g h i)" +| "blowup 0 (Suc b) c d e f g h i = Suc (blowup b b c d e f g h i)" +| "blowup (Suc a) b c d e f g h i = Suc (blowup a b c d e f g h i)" + + +subsection {* Simple examples with other datatypes than nat, e.g. trees and lists *} + +datatype tree = Node | Branch tree tree + +fun g_tree :: "tree * tree \ tree" +where + "g_tree (Node, Node) = Node" +| "g_tree (Node, Branch a b) = Branch Node (g_tree (a,b))" +| "g_tree (Branch a b, Node) = Branch (g_tree (a,Node)) b" +| "g_tree (Branch a b, Branch c d) = Branch (g_tree (a,c)) (g_tree (b,d))" + + +fun acklist :: "'a list * 'a list \ 'a list" +where + "acklist ([], m) = ((hd m)#m)" +| "acklist (n#ns, []) = acklist (ns, [n])" +| "acklist ((n#ns), (m#ms)) = acklist (ns, acklist ((n#ns), ms))" + + +subsection {* Examples with mutual recursion *} + +fun evn od :: "nat \ bool" +where + "evn 0 = True" +| "od 0 = False" +| "evn (Suc n) = od (Suc n)" +| "od (Suc n) = evn n" + + +fun sizechange_f :: "'a list => 'a list => 'a list" and +sizechange_g :: "'a list => 'a list => 'a list => 'a list" +where + "sizechange_f i x = (if i=[] then x else sizechange_g (tl i) x i)" +| "sizechange_g a b c = sizechange_f a (b @ c)" + +fun + pedal :: "nat => nat => nat => nat" +and + coast :: "nat => nat => nat => nat" +where + "pedal 0 m c = c" +| "pedal n 0 c = c" +| "pedal n m c = + (if n < m then coast (n - 1) (m - 1) (c + m) + else pedal (n - 1) m (c + m))" + +| "coast n m c = + (if n < m then coast n (m - 1) (c + n) + else pedal n m (c + n))" + + + +subsection {* Refined analysis: The @{text sizechange} method *} + +text {* Unsolvable for @{text lexicographic_order} *} + +function fun1 :: "nat * nat \ nat" +where + "fun1 (0,0) = 1" +| "fun1 (0, Suc b) = 0" +| "fun1 (Suc a, 0) = 0" +| "fun1 (Suc a, Suc b) = fun1 (b, a)" +by pat_completeness auto +termination by sizechange + + +text {* + @{text lexicographic_order} can do the following, but it is much slower. +*} + +function + prod :: "nat => nat => nat => nat" and + eprod :: "nat => nat => nat => nat" and + oprod :: "nat => nat => nat => nat" +where + "prod x y z = (if y mod 2 = 0 then eprod x y z else oprod x y z)" +| "oprod x y z = eprod x (y - 1) (z+x)" +| "eprod x y z = (if y=0 then z else prod (2*x) (y div 2) z)" +by pat_completeness auto +termination by sizechange + +text {* + Permutations of arguments: +*} + +function perm :: "nat \ nat \ nat \ nat" +where + "perm m n r = (if r > 0 then perm m (r - 1) n + else if n > 0 then perm r (n - 1) m + else m)" +by auto +termination by sizechange + +text {* + Artificial examples and regression tests: +*} + +function + fun2 :: "nat \ nat \ nat \ nat" +where + "fun2 x y z = + (if x > 1000 \ z > 0 then + fun2 (min x y) y (z - 1) + else if y > 0 \ x > 100 then + fun2 x (y - 1) (2 * z) + else if z > 0 then + fun2 (min y (z - 1)) x x + else + 0 + )" +by pat_completeness auto +termination by sizechange -- {* requires Multiset *} + +end diff -r 8f84a608883d -r ea97aa6aeba2 src/HOLCF/Adm.thy --- a/src/HOLCF/Adm.thy Tue Dec 30 08:18:54 2008 +0100 +++ b/src/HOLCF/Adm.thy Tue Dec 30 11:10:01 2008 +0100 @@ -1,5 +1,4 @@ (* Title: HOLCF/Adm.thy - ID: $Id$ Author: Franz Regensburger and Brian Huffman *) diff -r 8f84a608883d -r ea97aa6aeba2 src/HOLCF/Cfun.thy --- a/src/HOLCF/Cfun.thy Tue Dec 30 08:18:54 2008 +0100 +++ b/src/HOLCF/Cfun.thy Tue Dec 30 11:10:01 2008 +0100 @@ -1,5 +1,4 @@ (* Title: HOLCF/Cfun.thy - ID: $Id$ Author: Franz Regensburger Definition of the type -> of continuous functions. diff -r 8f84a608883d -r ea97aa6aeba2 src/HOLCF/Cont.thy --- a/src/HOLCF/Cont.thy Tue Dec 30 08:18:54 2008 +0100 +++ b/src/HOLCF/Cont.thy Tue Dec 30 11:10:01 2008 +0100 @@ -1,8 +1,5 @@ (* Title: HOLCF/Cont.thy - ID: $Id$ Author: Franz Regensburger - -Results about continuity and monotonicity. *) header {* Continuity and monotonicity *} diff -r 8f84a608883d -r ea97aa6aeba2 src/HOLCF/Cprod.thy --- a/src/HOLCF/Cprod.thy Tue Dec 30 08:18:54 2008 +0100 +++ b/src/HOLCF/Cprod.thy Tue Dec 30 11:10:01 2008 +0100 @@ -1,8 +1,5 @@ (* Title: HOLCF/Cprod.thy - ID: $Id$ Author: Franz Regensburger - -Partial ordering for cartesian product of HOL products. *) header {* The cpo of cartesian products *} diff -r 8f84a608883d -r ea97aa6aeba2 src/HOLCF/Discrete.thy --- a/src/HOLCF/Discrete.thy Tue Dec 30 08:18:54 2008 +0100 +++ b/src/HOLCF/Discrete.thy Tue Dec 30 11:10:01 2008 +0100 @@ -1,8 +1,5 @@ (* Title: HOLCF/Discrete.thy - ID: $Id$ Author: Tobias Nipkow - -Discrete CPOs. *) header {* Discrete cpo types *} diff -r 8f84a608883d -r ea97aa6aeba2 src/HOLCF/Domain.thy --- a/src/HOLCF/Domain.thy Tue Dec 30 08:18:54 2008 +0100 +++ b/src/HOLCF/Domain.thy Tue Dec 30 11:10:01 2008 +0100 @@ -1,5 +1,4 @@ (* Title: HOLCF/Domain.thy - ID: $Id$ Author: Brian Huffman *) diff -r 8f84a608883d -r ea97aa6aeba2 src/HOLCF/Dsum.thy --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/HOLCF/Dsum.thy Tue Dec 30 11:10:01 2008 +0100 @@ -0,0 +1,251 @@ +(* Title: HOLCF/Dsum.thy + Author: Brian Huffman +*) + +header {* The cpo of disjoint sums *} + +theory Dsum +imports Bifinite +begin + +subsection {* Ordering on type @{typ "'a + 'b"} *} + +instantiation "+" :: (sq_ord, sq_ord) sq_ord +begin + +definition + less_sum_def: "x \ y \ case x of + Inl a \ (case y of Inl b \ a \ b | Inr b \ False) | + Inr a \ (case y of Inl b \ False | Inr b \ a \ b)" + +instance .. +end + +lemma Inl_less_iff [simp]: "Inl x \ Inl y = x \ y" +unfolding less_sum_def by simp + +lemma Inr_less_iff [simp]: "Inr x \ Inr y = x \ y" +unfolding less_sum_def by simp + +lemma Inl_less_Inr [simp]: "\ Inl x \ Inr y" +unfolding less_sum_def by simp + +lemma Inr_less_Inl [simp]: "\ Inr x \ Inl y" +unfolding less_sum_def by simp + +lemma Inl_mono: "x \ y \ Inl x \ Inl y" +by simp + +lemma Inr_mono: "x \ y \ Inr x \ Inr y" +by simp + +lemma Inl_lessE: "\Inl a \ x; \b. \x = Inl b; a \ b\ \ R\ \ R" +by (cases x, simp_all) + +lemma Inr_lessE: "\Inr a \ x; \b. \x = Inr b; a \ b\ \ R\ \ R" +by (cases x, simp_all) + +lemmas sum_less_elims = Inl_lessE Inr_lessE + +lemma sum_less_cases: + "\x \ y; + \a b. \x = Inl a; y = Inl b; a \ b\ \ R; + \a b. \x = Inr a; y = Inr b; a \ b\ \ R\ + \ R" +by (cases x, safe elim!: sum_less_elims, auto) + +subsection {* Sum type is a complete partial order *} + +instance "+" :: (po, po) po +proof + fix x :: "'a + 'b" + show "x \ x" + by (induct x, simp_all) +next + fix x y :: "'a + 'b" + assume "x \ y" and "y \ x" thus "x = y" + by (induct x, auto elim!: sum_less_elims intro: antisym_less) +next + fix x y z :: "'a + 'b" + assume "x \ y" and "y \ z" thus "x \ z" + by (induct x, auto elim!: sum_less_elims intro: trans_less) +qed + +lemma monofun_inv_Inl: "monofun (\p. THE a. p = Inl a)" +by (rule monofunI, erule sum_less_cases, simp_all) + +lemma monofun_inv_Inr: "monofun (\p. THE b. p = Inr b)" +by (rule monofunI, erule sum_less_cases, simp_all) + +lemma sum_chain_cases: + assumes Y: "chain Y" + assumes A: "\A. \chain A; Y = (\i. Inl (A i))\ \ R" + assumes B: "\B. \chain B; Y = (\i. Inr (B i))\ \ R" + shows "R" + apply (cases "Y 0") + apply (rule A) + apply (rule ch2ch_monofun [OF monofun_inv_Inl Y]) + apply (rule ext) + apply (cut_tac j=i in chain_mono [OF Y le0], simp) + apply (erule Inl_lessE, simp) + apply (rule B) + apply (rule ch2ch_monofun [OF monofun_inv_Inr Y]) + apply (rule ext) + apply (cut_tac j=i in chain_mono [OF Y le0], simp) + apply (erule Inr_lessE, simp) +done + +lemma is_lub_Inl: "range S <<| x \ range (\i. Inl (S i)) <<| Inl x" + apply (rule is_lubI) + apply (rule ub_rangeI) + apply (simp add: is_ub_lub) + apply (frule ub_rangeD [where i=arbitrary]) + apply (erule Inl_lessE, simp) + apply (erule is_lub_lub) + apply (rule ub_rangeI) + apply (drule ub_rangeD, simp) +done + +lemma is_lub_Inr: "range S <<| x \ range (\i. Inr (S i)) <<| Inr x" + apply (rule is_lubI) + apply (rule ub_rangeI) + apply (simp add: is_ub_lub) + apply (frule ub_rangeD [where i=arbitrary]) + apply (erule Inr_lessE, simp) + apply (erule is_lub_lub) + apply (rule ub_rangeI) + apply (drule ub_rangeD, simp) +done + +instance "+" :: (cpo, cpo) cpo + apply intro_classes + apply (erule sum_chain_cases, safe) + apply (rule exI) + apply (rule is_lub_Inl) + apply (erule cpo_lubI) + apply (rule exI) + apply (rule is_lub_Inr) + apply (erule cpo_lubI) +done + +subsection {* Continuity of @{term Inl}, @{term Inr}, @{term sum_case} *} + +lemma cont2cont_Inl [simp]: "cont f \ cont (\x. Inl (f x))" +by (fast intro: contI is_lub_Inl elim: contE) + +lemma cont2cont_Inr [simp]: "cont f \ cont (\x. Inr (f x))" +by (fast intro: contI is_lub_Inr elim: contE) + +lemma cont_Inl: "cont Inl" +by (rule cont2cont_Inl [OF cont_id]) + +lemma cont_Inr: "cont Inr" +by (rule cont2cont_Inr [OF cont_id]) + +lemmas ch2ch_Inl [simp] = ch2ch_cont [OF cont_Inl] +lemmas ch2ch_Inr [simp] = ch2ch_cont [OF cont_Inr] + +lemmas lub_Inl = cont2contlubE [OF cont_Inl, symmetric] +lemmas lub_Inr = cont2contlubE [OF cont_Inr, symmetric] + +lemma cont_sum_case1: + assumes f: "\a. cont (\x. f x a)" + assumes g: "\b. cont (\x. g x b)" + shows "cont (\x. case y of Inl a \ f x a | Inr b \ g x b)" +by (induct y, simp add: f, simp add: g) + +lemma cont_sum_case2: "\cont f; cont g\ \ cont (sum_case f g)" +apply (rule contI) +apply (erule sum_chain_cases) +apply (simp add: cont2contlubE [OF cont_Inl, symmetric] contE) +apply (simp add: cont2contlubE [OF cont_Inr, symmetric] contE) +done + +lemma cont2cont_sum_case [simp]: + assumes f1: "\a. cont (\x. f x a)" and f2: "\x. cont (\a. f x a)" + assumes g1: "\b. cont (\x. g x b)" and g2: "\x. cont (\b. g x b)" + assumes h: "cont (\x. h x)" + shows "cont (\x. case h x of Inl a \ f x a | Inr b \ g x b)" +apply (rule cont2cont_app2 [OF cont2cont_lambda _ h]) +apply (rule cont_sum_case1 [OF f1 g1]) +apply (rule cont_sum_case2 [OF f2 g2]) +done + +subsection {* Compactness and chain-finiteness *} + +lemma compact_Inl: "compact a \ compact (Inl a)" +apply (rule compactI2) +apply (erule sum_chain_cases, safe) +apply (simp add: lub_Inl) +apply (erule (2) compactD2) +apply (simp add: lub_Inr) +done + +lemma compact_Inr: "compact a \ compact (Inr a)" +apply (rule compactI2) +apply (erule sum_chain_cases, safe) +apply (simp add: lub_Inl) +apply (simp add: lub_Inr) +apply (erule (2) compactD2) +done + +lemma compact_Inl_rev: "compact (Inl a) \ compact a" +unfolding compact_def +by (drule adm_subst [OF cont_Inl], simp) + +lemma compact_Inr_rev: "compact (Inr a) \ compact a" +unfolding compact_def +by (drule adm_subst [OF cont_Inr], simp) + +lemma compact_Inl_iff [simp]: "compact (Inl a) = compact a" +by (safe elim!: compact_Inl compact_Inl_rev) + +lemma compact_Inr_iff [simp]: "compact (Inr a) = compact a" +by (safe elim!: compact_Inr compact_Inr_rev) + +instance "+" :: (chfin, chfin) chfin +apply intro_classes +apply (erule compact_imp_max_in_chain) +apply (case_tac "\i. Y i", simp_all) +done + +instance "+" :: (finite_po, finite_po) finite_po .. + +instance "+" :: (discrete_cpo, discrete_cpo) discrete_cpo +by intro_classes (simp add: less_sum_def split: sum.split) + +subsection {* Sum type is a bifinite domain *} + +instantiation "+" :: (profinite, profinite) profinite +begin + +definition + approx_sum_def: "approx = + (\n. \ x. case x of Inl a \ Inl (approx n\a) | Inr b \ Inr (approx n\b))" + +lemma approx_Inl [simp]: "approx n\(Inl x) = Inl (approx n\x)" + unfolding approx_sum_def by simp + +lemma approx_Inr [simp]: "approx n\(Inr x) = Inr (approx n\x)" + unfolding approx_sum_def by simp + +instance proof + fix i :: nat and x :: "'a + 'b" + show "chain (approx :: nat \ 'a + 'b \ 'a + 'b)" + unfolding approx_sum_def + by (rule ch2ch_LAM, case_tac x, simp_all) + show "(\i. approx i\x) = x" + by (induct x, simp_all add: lub_Inl lub_Inr) + show "approx i\(approx i\x) = approx i\x" + by (induct x, simp_all) + have "{x::'a + 'b. approx i\x = x} \ + {x::'a. approx i\x = x} <+> {x::'b. approx i\x = x}" + by (rule subsetI, case_tac x, simp_all add: InlI InrI) + thus "finite {x::'a + 'b. approx i\x = x}" + by (rule finite_subset, + intro finite_Plus finite_fixes_approx) +qed + +end + +end diff -r 8f84a608883d -r ea97aa6aeba2 src/HOLCF/Ffun.thy --- a/src/HOLCF/Ffun.thy Tue Dec 30 08:18:54 2008 +0100 +++ b/src/HOLCF/Ffun.thy Tue Dec 30 11:10:01 2008 +0100 @@ -1,10 +1,5 @@ (* Title: HOLCF/FunCpo.thy - ID: $Id$ Author: Franz Regensburger - -Definition of the partial ordering for the type of all functions => (fun) - -Class instance of => (fun) for class pcpo. *) header {* Class instances for the full function space *} diff -r 8f84a608883d -r ea97aa6aeba2 src/HOLCF/Fix.thy --- a/src/HOLCF/Fix.thy Tue Dec 30 08:18:54 2008 +0100 +++ b/src/HOLCF/Fix.thy Tue Dec 30 11:10:01 2008 +0100 @@ -1,8 +1,5 @@ (* Title: HOLCF/Fix.thy - ID: $Id$ Author: Franz Regensburger - -Definitions for fixed point operator and admissibility. *) header {* Fixed point operator and admissibility *} diff -r 8f84a608883d -r ea97aa6aeba2 src/HOLCF/Fixrec.thy --- a/src/HOLCF/Fixrec.thy Tue Dec 30 08:18:54 2008 +0100 +++ b/src/HOLCF/Fixrec.thy Tue Dec 30 11:10:01 2008 +0100 @@ -1,5 +1,4 @@ (* Title: HOLCF/Fixrec.thy - ID: $Id$ Author: Amber Telfer and Brian Huffman *) @@ -17,13 +16,13 @@ pcpodef (open) 'a maybe = "UNIV::(one ++ 'a u) set" by simp_all -constdefs - fail :: "'a maybe" - "fail \ Abs_maybe (sinl\ONE)" +definition + fail :: "'a maybe" where + "fail = Abs_maybe (sinl\ONE)" -constdefs +definition return :: "'a \ 'a maybe" where - "return \ \ x. Abs_maybe (sinr\(up\x))" + "return = (\ x. Abs_maybe (sinr\(up\x)))" definition maybe_when :: "'b \ ('a \ 'b) \ 'a maybe \ 'b::pcpo" where @@ -59,7 +58,8 @@ cont_Abs_maybe Abs_maybe_inverse Rep_maybe_strict) translations - "case m of fail \ t1 | return\x \ t2" == "CONST maybe_when\t1\(\ x. t2)\m" + "case m of XCONST fail \ t1 | XCONST return\x \ t2" + == "CONST maybe_when\t1\(\ x. t2)\m" subsubsection {* Monadic bind operator *} @@ -164,8 +164,8 @@ subsection {* Case branch combinator *} -constdefs - branch :: "('a \ 'b maybe) \ ('b \ 'c) \ ('a \ 'c maybe)" +definition + branch :: "('a \ 'b maybe) \ ('b \ 'c) \ ('a \ 'c maybe)" where "branch p \ \ r x. bind\(p\x)\(\ y. return\(r\y))" lemma branch_rews: diff -r 8f84a608883d -r ea97aa6aeba2 src/HOLCF/HOLCF.thy --- a/src/HOLCF/HOLCF.thy Tue Dec 30 08:18:54 2008 +0100 +++ b/src/HOLCF/HOLCF.thy Tue Dec 30 11:10:01 2008 +0100 @@ -1,12 +1,12 @@ (* Title: HOLCF/HOLCF.thy - ID: $Id$ Author: Franz Regensburger HOLCF -- a semantic extension of HOL by the LCF logic. *) theory HOLCF -imports Sprod Ssum Up Lift Discrete One Tr Domain ConvexPD Algebraic Universal Main +imports + Domain ConvexPD Algebraic Universal Dsum Main uses "holcf_logic.ML" "Tools/cont_consts.ML" diff -r 8f84a608883d -r ea97aa6aeba2 src/HOLCF/IsaMakefile --- a/src/HOLCF/IsaMakefile Tue Dec 30 08:18:54 2008 +0100 +++ b/src/HOLCF/IsaMakefile Tue Dec 30 11:10:01 2008 +0100 @@ -41,6 +41,7 @@ Discrete.thy \ Deflation.thy \ Domain.thy \ + Dsum.thy \ Eventual.thy \ Ffun.thy \ Fixrec.thy \ diff -r 8f84a608883d -r ea97aa6aeba2 src/HOLCF/Lift.thy --- a/src/HOLCF/Lift.thy Tue Dec 30 08:18:54 2008 +0100 +++ b/src/HOLCF/Lift.thy Tue Dec 30 11:10:01 2008 +0100 @@ -1,5 +1,4 @@ (* Title: HOLCF/Lift.thy - ID: $Id$ Author: Olaf Mueller *) diff -r 8f84a608883d -r ea97aa6aeba2 src/HOLCF/One.thy --- a/src/HOLCF/One.thy Tue Dec 30 08:18:54 2008 +0100 +++ b/src/HOLCF/One.thy Tue Dec 30 11:10:01 2008 +0100 @@ -1,8 +1,5 @@ (* Title: HOLCF/One.thy - ID: $Id$ Author: Oscar Slotosch - -The unit domain. *) header {* The unit domain *} @@ -15,8 +12,9 @@ translations "one" <= (type) "unit lift" -constdefs +definition ONE :: "one" +where "ONE == Def ()" text {* Exhaustion and Elimination for type @{typ one} *} diff -r 8f84a608883d -r ea97aa6aeba2 src/HOLCF/Pcpo.thy --- a/src/HOLCF/Pcpo.thy Tue Dec 30 08:18:54 2008 +0100 +++ b/src/HOLCF/Pcpo.thy Tue Dec 30 11:10:01 2008 +0100 @@ -1,5 +1,4 @@ (* Title: HOLCF/Pcpo.thy - ID: $Id$ Author: Franz Regensburger *) diff -r 8f84a608883d -r ea97aa6aeba2 src/HOLCF/Pcpodef.thy --- a/src/HOLCF/Pcpodef.thy Tue Dec 30 08:18:54 2008 +0100 +++ b/src/HOLCF/Pcpodef.thy Tue Dec 30 11:10:01 2008 +0100 @@ -1,5 +1,4 @@ (* Title: HOLCF/Pcpodef.thy - ID: $Id$ Author: Brian Huffman *) diff -r 8f84a608883d -r ea97aa6aeba2 src/HOLCF/Porder.thy --- a/src/HOLCF/Porder.thy Tue Dec 30 08:18:54 2008 +0100 +++ b/src/HOLCF/Porder.thy Tue Dec 30 11:10:01 2008 +0100 @@ -1,5 +1,4 @@ (* Title: HOLCF/Porder.thy - ID: $Id$ Author: Franz Regensburger and Brian Huffman *) diff -r 8f84a608883d -r ea97aa6aeba2 src/HOLCF/Sprod.thy --- a/src/HOLCF/Sprod.thy Tue Dec 30 08:18:54 2008 +0100 +++ b/src/HOLCF/Sprod.thy Tue Dec 30 11:10:01 2008 +0100 @@ -1,8 +1,5 @@ (* Title: HOLCF/Sprod.thy - ID: $Id$ Author: Franz Regensburger and Brian Huffman - -Strict product with typedef. *) header {* The type of strict products *} diff -r 8f84a608883d -r ea97aa6aeba2 src/HOLCF/Ssum.thy --- a/src/HOLCF/Ssum.thy Tue Dec 30 08:18:54 2008 +0100 +++ b/src/HOLCF/Ssum.thy Tue Dec 30 11:10:01 2008 +0100 @@ -1,8 +1,5 @@ (* Title: HOLCF/Ssum.thy - ID: $Id$ Author: Franz Regensburger and Brian Huffman - -Strict sum with typedef. *) header {* The type of strict sums *} diff -r 8f84a608883d -r ea97aa6aeba2 src/HOLCF/Tr.thy --- a/src/HOLCF/Tr.thy Tue Dec 30 08:18:54 2008 +0100 +++ b/src/HOLCF/Tr.thy Tue Dec 30 11:10:01 2008 +0100 @@ -1,8 +1,5 @@ (* Title: HOLCF/Tr.thy - ID: $Id$ Author: Franz Regensburger - -Introduce infix if_then_else_fi and boolean connectives andalso, orelse. *) header {* The type of lifted booleans *} diff -r 8f84a608883d -r ea97aa6aeba2 src/HOLCF/Up.thy --- a/src/HOLCF/Up.thy Tue Dec 30 08:18:54 2008 +0100 +++ b/src/HOLCF/Up.thy Tue Dec 30 11:10:01 2008 +0100 @@ -1,8 +1,5 @@ (* Title: HOLCF/Up.thy - ID: $Id$ Author: Franz Regensburger and Brian Huffman - -Lifting. *) header {* The type of lifted values *} diff -r 8f84a608883d -r ea97aa6aeba2 src/HOLCF/document/root.tex --- a/src/HOLCF/document/root.tex Tue Dec 30 08:18:54 2008 +0100 +++ b/src/HOLCF/document/root.tex Tue Dec 30 11:10:01 2008 +0100 @@ -1,5 +1,5 @@ -% $Id$ +% HOLCF/document/root.tex \documentclass[11pt,a4paper]{article} \usepackage{graphicx,isabelle,isabellesym,latexsym} @@ -21,7 +21,7 @@ \tableofcontents \begin{center} - \includegraphics[scale=0.7]{session_graph} + \includegraphics[scale=0.5]{session_graph} \end{center} \newpage diff -r 8f84a608883d -r ea97aa6aeba2 src/Pure/Concurrent/ROOT.ML --- a/src/Pure/Concurrent/ROOT.ML Tue Dec 30 08:18:54 2008 +0100 +++ b/src/Pure/Concurrent/ROOT.ML Tue Dec 30 11:10:01 2008 +0100 @@ -1,15 +1,12 @@ (* Title: Pure/Concurrent/ROOT.ML - ID: $Id$ + Author: Makarius Concurrency within the ML runtime. *) -val future_scheduler = ref true; - use "simple_thread.ML"; use "synchronized.ML"; use "mailbox.ML"; -use "schedule.ML"; use "task_queue.ML"; use "future.ML"; use "par_list.ML"; diff -r 8f84a608883d -r ea97aa6aeba2 src/Pure/Concurrent/future.ML --- a/src/Pure/Concurrent/future.ML Tue Dec 30 08:18:54 2008 +0100 +++ b/src/Pure/Concurrent/future.ML Tue Dec 30 11:10:01 2008 +0100 @@ -1,5 +1,4 @@ (* Title: Pure/Concurrent/future.ML - ID: $Id$ Author: Makarius Future values. @@ -28,8 +27,8 @@ signature FUTURE = sig val enabled: unit -> bool - type task = TaskQueue.task - type group = TaskQueue.group + type task = Task_Queue.task + type group = Task_Queue.group val thread_data: unit -> (string * task) option type 'a future val task_of: 'a future -> task @@ -40,12 +39,11 @@ val fork: (unit -> 'a) -> 'a future val fork_group: group -> (unit -> 'a) -> 'a future val fork_deps: 'b future list -> (unit -> 'a) -> 'a future - val fork_background: (unit -> 'a) -> 'a future + val fork_pri: int -> (unit -> 'a) -> 'a future val join_results: 'a future list -> 'a Exn.result list val join_result: 'a future -> 'a Exn.result val join: 'a future -> 'a val map: ('a -> 'b) -> 'a future -> 'b future - val focus: task list -> unit val interrupt_task: string -> unit val cancel: 'a future -> unit val shutdown: unit -> unit @@ -57,14 +55,14 @@ (** future values **) fun enabled () = - ! future_scheduler andalso Multithreading.enabled () andalso + Multithreading.enabled () andalso not (Multithreading.self_critical ()); (* identifiers *) -type task = TaskQueue.task; -type group = TaskQueue.group; +type task = Task_Queue.task; +type group = Task_Queue.group; local val tag = Universal.tag () : (string * task) option Universal.tag in fun thread_data () = the_default NONE (Thread.getLocal tag); @@ -86,8 +84,8 @@ fun is_finished x = is_some (peek x); fun value x = Future - {task = TaskQueue.new_task (), - group = TaskQueue.new_group (), + {task = Task_Queue.new_task 0, + group = Task_Queue.new_group (), result = ref (SOME (Exn.Result x))}; @@ -96,12 +94,12 @@ (* global state *) -val queue = ref TaskQueue.empty; +val queue = ref Task_Queue.empty; val next = ref 0; val workers = ref ([]: (Thread.thread * bool) list); val scheduler = ref (NONE: Thread.thread option); val excessive = ref 0; -val canceled = ref ([]: TaskQueue.group list); +val canceled = ref ([]: Task_Queue.group list); val do_shutdown = ref false; @@ -114,15 +112,11 @@ fun SYNCHRONIZED name = SimpleThread.synchronized name lock; -fun wait name = (*requires SYNCHRONIZED*) - (Multithreading.tracing 3 (fn () => name ^ ": wait ..."); +fun wait () = (*requires SYNCHRONIZED*) ConditionVar.wait (cond, lock); - Multithreading.tracing 3 (fn () => name ^ ": ... continue")); -fun wait_timeout name timeout = (*requires SYNCHRONIZED*) - (Multithreading.tracing 3 (fn () => name ^ ": wait ..."); +fun wait_timeout timeout = (*requires SYNCHRONIZED*) ConditionVar.waitUntil (cond, lock, Time.+ (Time.now (), timeout)); - Multithreading.tracing 3 (fn () => name ^ ": ... continue")); fun notify_all () = (*requires SYNCHRONIZED*) ConditionVar.broadcast cond; @@ -150,9 +144,9 @@ val _ = trace_active (); val ok = setmp_thread_data (name, task) run (); val _ = SYNCHRONIZED "execute" (fn () => - (change queue (TaskQueue.finish task); + (change queue (Task_Queue.finish task); if ok then () - else if TaskQueue.cancel (! queue) group then () + else if Task_Queue.cancel (! queue) group then () else change canceled (cons group); notify_all ())); in () end; @@ -160,23 +154,23 @@ (* worker threads *) -fun worker_wait name = (*requires SYNCHRONIZED*) - (change_active false; wait name; change_active true); +fun worker_wait () = (*requires SYNCHRONIZED*) + (change_active false; wait (); change_active true); -fun worker_next name = (*requires SYNCHRONIZED*) +fun worker_next () = (*requires SYNCHRONIZED*) if ! excessive > 0 then (dec excessive; change workers (filter_out (fn (thread, _) => Thread.equal (thread, Thread.self ()))); notify_all (); NONE) else - (case change_result queue TaskQueue.dequeue of - NONE => (worker_wait name; worker_next name) + (case change_result queue Task_Queue.dequeue of + NONE => (worker_wait (); worker_next ()) | some => some); fun worker_loop name = - (case SYNCHRONIZED name (fn () => worker_next name) of - NONE => Multithreading.tracing 3 (fn () => name ^ ": exit") + (case SYNCHRONIZED name worker_next of + NONE => () | SOME work => (execute name work; worker_loop name)); fun worker_start name = (*requires SYNCHRONIZED*) @@ -204,27 +198,25 @@ else (); (*canceled groups*) - val _ = change canceled (filter_out (TaskQueue.cancel (! queue))); + val _ = change canceled (filter_out (Task_Queue.cancel (! queue))); (*shutdown*) val continue = not (! do_shutdown andalso null (! workers)); val _ = if continue then () else scheduler := NONE; val _ = notify_all (); - val _ = wait_timeout "scheduler" (Time.fromSeconds 3); + val _ = wait_timeout (Time.fromSeconds 3); in continue end; fun scheduler_loop () = - (while SYNCHRONIZED "scheduler" scheduler_next do (); - Multithreading.tracing 3 (fn () => "scheduler: exit")); + while SYNCHRONIZED "scheduler" scheduler_next do (); fun scheduler_active () = (*requires SYNCHRONIZED*) (case ! scheduler of NONE => false | SOME thread => Thread.isActive thread); fun scheduler_check name = SYNCHRONIZED name (fn () => if not (scheduler_active ()) then - (Multithreading.tracing 3 (fn () => "scheduler: fork"); - do_shutdown := false; scheduler := SOME (SimpleThread.fork false scheduler_loop)) + (do_shutdown := false; scheduler := SOME (SimpleThread.fork false scheduler_loop)) else if ! do_shutdown then error "Scheduler shutdown in progress" else ()); @@ -235,7 +227,7 @@ let val _ = scheduler_check "future check"; - val group = (case opt_group of SOME group => group | NONE => TaskQueue.new_group ()); + val group = (case opt_group of SOME group => group | NONE => Task_Queue.new_group ()); val result = ref (NONE: 'a Exn.result option); val run = Multithreading.with_attributes (Thread.getAttributes ()) @@ -246,18 +238,18 @@ val res_ok = (case res of Exn.Result _ => true - | Exn.Exn Exn.Interrupt => (TaskQueue.invalidate_group group; true) + | Exn.Exn Exn.Interrupt => (Task_Queue.invalidate_group group; true) | _ => false); in res_ok end); val task = SYNCHRONIZED "future" (fn () => - change_result queue (TaskQueue.enqueue group deps pri run) before notify_all ()); + change_result queue (Task_Queue.enqueue group deps pri run) before notify_all ()); in Future {task = task, group = group, result = result} end; -fun fork e = future NONE [] true e; -fun fork_group group e = future (SOME group) [] true e; -fun fork_deps deps e = future NONE (map task_of deps) true e; -fun fork_background e = future NONE [] false e; +fun fork e = future NONE [] 0 e; +fun fork_group group e = future (SOME group) [] 0 e; +fun fork_deps deps e = future NONE (map task_of deps) 0 e; +fun fork_pri pri e = future NONE [] pri e; (* join: retrieve results *) @@ -273,7 +265,7 @@ fun join_loop _ [] = () | join_loop name tasks = (case SYNCHRONIZED name (fn () => - change_result queue (TaskQueue.dequeue_towards tasks)) of + change_result queue (Task_Queue.dequeue_towards tasks)) of NONE => () | SOME (work, tasks') => (execute name work; join_loop name tasks')); val _ = @@ -281,18 +273,18 @@ NONE => (*alien thread -- refrain from contending for resources*) while exists (not o is_finished) xs - do SYNCHRONIZED "join_thread" (fn () => wait "join_thread") + do SYNCHRONIZED "join_thread" (fn () => wait ()) | SOME (name, task) => (*proper task -- actively work towards results*) let val unfinished = xs |> map_filter (fn Future {task, result = ref NONE, ...} => SOME task | _ => NONE); val _ = SYNCHRONIZED "join" (fn () => - (change queue (TaskQueue.depend unfinished task); notify_all ())); + (change queue (Task_Queue.depend unfinished task); notify_all ())); val _ = join_loop ("join_loop: " ^ name) unfinished; val _ = while exists (not o is_finished) xs - do SYNCHRONIZED "join_task" (fn () => worker_wait "join_task"); + do SYNCHRONIZED "join_task" (fn () => worker_wait ()); in () end); in xs |> map (fn Future {result = ref (SOME res), ...} => res) end) (); @@ -300,18 +292,16 @@ fun join_result x = singleton join_results x; fun join x = Exn.release (join_result x); -fun map f x = fork_deps [x] (fn () => f (join x)); +fun map f x = + let val task = task_of x + in future NONE [task] (Task_Queue.pri_of_task task) (fn () => f (join x)) end; (* misc operations *) -(*focus: collection of high-priority task*) -fun focus tasks = SYNCHRONIZED "focus" (fn () => - change queue (TaskQueue.focus tasks)); - (*interrupt: permissive signal, may get ignored*) fun interrupt_task id = SYNCHRONIZED "interrupt" - (fn () => TaskQueue.interrupt_external (! queue) id); + (fn () => Task_Queue.interrupt_external (! queue) id); (*cancel: present and future group members will be interrupted eventually*) fun cancel x = @@ -324,12 +314,12 @@ if Multithreading.available then (scheduler_check "shutdown check"; SYNCHRONIZED "shutdown" (fn () => - (while not (scheduler_active ()) do wait "shutdown: scheduler inactive"; - while not (TaskQueue.is_empty (! queue)) do wait "shutdown: join"; + (while not (scheduler_active ()) do wait (); + while not (Task_Queue.is_empty (! queue)) do wait (); do_shutdown := true; notify_all (); - while not (null (! workers)) do wait "shutdown: workers"; - while scheduler_active () do wait "shutdown: scheduler still active"; + while not (null (! workers)) do wait (); + while scheduler_active () do wait (); OS.Process.sleep (Time.fromMilliseconds 300)))) else (); diff -r 8f84a608883d -r ea97aa6aeba2 src/Pure/Concurrent/par_list.ML --- a/src/Pure/Concurrent/par_list.ML Tue Dec 30 08:18:54 2008 +0100 +++ b/src/Pure/Concurrent/par_list.ML Tue Dec 30 11:10:01 2008 +0100 @@ -30,7 +30,7 @@ fun raw_map f xs = if Future.enabled () then let - val group = TaskQueue.new_group (); + val group = Task_Queue.new_group (); val futures = map (fn x => Future.fork_group group (fn () => f x)) xs; val _ = List.app (ignore o Future.join_result) futures; in Future.join_results futures end diff -r 8f84a608883d -r ea97aa6aeba2 src/Pure/Concurrent/schedule.ML --- a/src/Pure/Concurrent/schedule.ML Tue Dec 30 08:18:54 2008 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,85 +0,0 @@ -(* Title: Pure/Concurrent/schedule.ML - ID: $Id$ - Author: Makarius - -Scheduling -- multiple threads working on a queue of tasks. -*) - -signature SCHEDULE = -sig - datatype 'a task = - Task of {body: unit -> unit, cont: 'a -> 'a, fail: 'a -> 'a} | Wait | Terminate; - val schedule: int -> ('a -> 'a task * 'a) -> 'a -> exn list -end; - -structure Schedule: SCHEDULE = -struct - -datatype 'a task = - Task of {body: unit -> unit, cont: 'a -> 'a, fail: 'a -> 'a} | Wait | Terminate; - -fun schedule n next_task = uninterruptible (fn restore_attributes => fn tasks => - let - (*synchronized execution*) - val lock = Mutex.mutex (); - fun SYNCHRONIZED e = - let - val _ = Mutex.lock lock; - val res = Exn.capture e (); - val _ = Mutex.unlock lock; - in Exn.release res end; - - (*wakeup condition*) - val wakeup = ConditionVar.conditionVar (); - fun wakeup_all () = ConditionVar.broadcast wakeup; - fun wait () = ConditionVar.wait (wakeup, lock); - fun wait_timeout () = - ConditionVar.waitUntil (wakeup, lock, Time.+ (Time.now (), Time.fromSeconds 1)); - - (*queue of tasks*) - val queue = ref tasks; - val active = ref 0; - fun trace_active () = Multithreading.tracing 1 (fn () => - "SCHEDULE: " ^ string_of_int (! active) ^ " active"); - fun dequeue () = - (case change_result queue next_task of - Wait => - (dec active; trace_active (); - wait (); - inc active; trace_active (); - dequeue ()) - | next => next); - - (*pool of running threads*) - val status = ref ([]: exn list); - val running = ref ([]: Thread.thread list); - fun start f = (inc active; change running (cons (SimpleThread.fork false f))); - fun stop () = (dec active; change running (remove Thread.equal (Thread.self ()))); - - (*worker thread*) - fun worker () = - (case SYNCHRONIZED dequeue of - Task {body, cont, fail} => - (case Exn.capture (restore_attributes body) () of - Exn.Result () => - (SYNCHRONIZED (fn () => (change queue cont; wakeup_all ())); worker ()) - | Exn.Exn exn => - SYNCHRONIZED (fn () => - (change status (cons exn); change queue fail; stop (); wakeup_all ()))) - | Terminate => SYNCHRONIZED (fn () => (stop (); wakeup_all ()))); - - (*main control: fork and wait*) - fun fork 0 = () - | fork k = (start worker; fork (k - 1)); - val _ = SYNCHRONIZED (fn () => - (fork (Int.max (n, 1)); - while not (null (! running)) do - (trace_active (); - if not (null (! status)) - then (List.app SimpleThread.interrupt (! running)) - else (); - wait_timeout ()))); - - in ! status end); - -end; diff -r 8f84a608883d -r ea97aa6aeba2 src/Pure/Concurrent/task_queue.ML --- a/src/Pure/Concurrent/task_queue.ML Tue Dec 30 08:18:54 2008 +0100 +++ b/src/Pure/Concurrent/task_queue.ML Tue Dec 30 11:10:01 2008 +0100 @@ -1,5 +1,4 @@ (* Title: Pure/Concurrent/task_queue.ML - ID: $Id$ Author: Makarius Ordered queue of grouped tasks. @@ -8,7 +7,8 @@ signature TASK_QUEUE = sig eqtype task - val new_task: unit -> task + val new_task: int -> task + val pri_of_task: task -> int val str_of_task: task -> string eqtype group val new_group: unit -> group @@ -17,9 +17,8 @@ type queue val empty: queue val is_empty: queue -> bool - val enqueue: group -> task list -> bool -> (bool -> bool) -> queue -> task * queue + val enqueue: group -> task list -> int -> (bool -> bool) -> queue -> task * queue val depend: task list -> task -> queue -> queue - val focus: task list -> queue -> queue val dequeue: queue -> (task * group * (unit -> bool)) option * queue val dequeue_towards: task list -> queue -> (((task * group * (unit -> bool)) * task list) option * queue) @@ -29,20 +28,27 @@ val cancel: queue -> group -> bool end; -structure TaskQueue: TASK_QUEUE = +structure Task_Queue: TASK_QUEUE = struct -(* identifiers *) +(* tasks *) -datatype task = Task of serial; -fun new_task () = Task (serial ()); +datatype task = Task of int * serial; +fun new_task pri = Task (pri, serial ()); -fun str_of_task (Task i) = string_of_int i; +fun pri_of_task (Task (pri, _)) = pri; +fun str_of_task (Task (_, i)) = string_of_int i; +fun task_ord (Task t1, Task t2) = prod_ord (rev_order o int_ord) int_ord (t1, t2); +structure Task_Graph = GraphFun(type key = task val ord = task_ord); + + +(* groups *) datatype group = Group of serial * bool ref; fun new_group () = Group (serial (), ref true); + fun invalidate_group (Group (_, ok)) = ok := false; fun str_of_group (Group (i, ref ok)) = @@ -52,50 +58,46 @@ (* jobs *) datatype job = - Job of bool * (bool -> bool) | (*priority, job: status -> status*) + Job of bool -> bool | Running of Thread.thread; -type jobs = (group * job) IntGraph.T; +type jobs = (group * job) Task_Graph.T; -fun get_group (jobs: jobs) (Task id) = #1 (IntGraph.get_node jobs id); -fun get_job (jobs: jobs) (Task id) = #2 (IntGraph.get_node jobs id); -fun map_job (Task id) f (jobs: jobs) = IntGraph.map_node id (apsnd f) jobs; +fun get_group (jobs: jobs) task = #1 (Task_Graph.get_node jobs task); +fun get_job (jobs: jobs) task = #2 (Task_Graph.get_node jobs task); +fun map_job task f (jobs: jobs) = Task_Graph.map_node task (apsnd f) jobs; -fun add_job (Task id) (Task dep) (jobs: jobs) = - IntGraph.add_edge_acyclic (dep, id) jobs handle IntGraph.UNDEF _ => jobs; +fun add_job task dep (jobs: jobs) = + Task_Graph.add_edge (dep, task) jobs handle Task_Graph.UNDEF _ => jobs; -fun check_job (jobs: jobs) (task as Task id) = - if can (IntGraph.get_node jobs) id then SOME task else NONE; +fun add_job_acyclic task dep (jobs: jobs) = + Task_Graph.add_edge_acyclic (dep, task) jobs handle Task_Graph.UNDEF _ => jobs; (* queue of grouped jobs *) datatype queue = Queue of {groups: task list Inttab.table, (*groups with presently active members*) - jobs: jobs, (*job dependency graph*) - focus: task list}; (*particular collection of high-priority tasks*) + jobs: jobs}; (*job dependency graph*) -fun make_queue groups jobs focus = Queue {groups = groups, jobs = jobs, focus = focus}; +fun make_queue groups jobs = Queue {groups = groups, jobs = jobs}; -val empty = make_queue Inttab.empty IntGraph.empty []; -fun is_empty (Queue {jobs, ...}) = IntGraph.is_empty jobs; +val empty = make_queue Inttab.empty Task_Graph.empty; +fun is_empty (Queue {jobs, ...}) = Task_Graph.is_empty jobs; (* enqueue *) -fun enqueue (group as Group (gid, _)) deps pri job (Queue {groups, jobs, focus}) = +fun enqueue (group as Group (gid, _)) deps pri job (Queue {groups, jobs}) = let - val task as Task id = new_task (); + val task = new_task pri; val groups' = Inttab.cons_list (gid, task) groups; val jobs' = jobs - |> IntGraph.new_node (id, (group, Job (pri, job))) |> fold (add_job task) deps; - in (task, make_queue groups' jobs' focus) end; + |> Task_Graph.new_node (task, (group, Job job)) |> fold (add_job task) deps; + in (task, make_queue groups' jobs') end; -fun depend deps task (Queue {groups, jobs, focus}) = - make_queue groups (fold (add_job task) deps jobs) focus; - -fun focus tasks (Queue {groups, jobs, ...}) = - make_queue groups jobs (map_filter (check_job jobs) tasks); +fun depend deps task (Queue {groups, jobs}) = + make_queue groups (fold (add_job_acyclic task) deps jobs); (* dequeue *) @@ -103,38 +105,30 @@ local fun dequeue_result NONE queue = (NONE, queue) - | dequeue_result (SOME (result as (task, _, _))) (Queue {groups, jobs, focus}) = - (SOME result, make_queue groups (map_job task (K (Running (Thread.self ()))) jobs) focus); - -fun dequeue_global req_pri (queue as Queue {jobs, ...}) = - let - fun ready (id, ((group as Group (_, ref ok), Job (pri, job)), ([], _))) = - if pri = req_pri then SOME (Task id, group, (fn () => job ok)) else NONE - | ready _ = NONE; - in dequeue_result (IntGraph.get_first ready jobs) queue end; - -fun dequeue_local focus (queue as Queue {jobs, ...}) = - let - fun ready id = - (case IntGraph.get_node jobs id of - (group as Group (_, ref ok), Job (_, job)) => - if null (IntGraph.imm_preds jobs id) then SOME (Task id, group, (fn () => job ok)) - else NONE - | _ => NONE); - val ids = map (fn Task id => id) focus; - in dequeue_result (get_first ready (IntGraph.all_preds jobs ids)) queue end; + | dequeue_result (SOME (result as (task, _, _))) (Queue {groups, jobs}) = + (SOME result, make_queue groups (map_job task (K (Running (Thread.self ()))) jobs)); in -fun dequeue (queue as Queue {focus, ...}) = - (case dequeue_local focus queue of - (NONE, _) => - (case dequeue_global true queue of (NONE, _) => dequeue_global false queue | res => res) - | res => res); +fun dequeue (queue as Queue {jobs, ...}) = + let + fun ready (task, ((group as Group (_, ref ok), Job job), ([], _))) = + SOME (task, group, (fn () => job ok)) + | ready _ = NONE; + in dequeue_result (Task_Graph.get_first ready jobs) queue end; fun dequeue_towards tasks (queue as Queue {jobs, ...}) = - let val tasks' = map_filter (check_job jobs) tasks in - (case dequeue_local tasks' queue of + let + val tasks' = filter (can (Task_Graph.get_node jobs)) tasks; + + fun ready task = + (case Task_Graph.get_node jobs task of + (group as Group (_, ref ok), Job job) => + if null (Task_Graph.imm_preds jobs task) then SOME (task, group, (fn () => job ok)) + else NONE + | _ => NONE); + in + (case dequeue_result (get_first ready (Task_Graph.all_preds jobs tasks')) queue of (NONE, queue') => (NONE, queue') | (SOME work, queue') => (SOME (work, tasks'), queue')) end; @@ -147,8 +141,13 @@ fun interrupt (Queue {jobs, ...}) task = (case try (get_job jobs) task of SOME (Running thread) => SimpleThread.interrupt thread | _ => ()); -fun interrupt_external queue str = - (case Int.fromString str of SOME id => interrupt queue (Task id) | NONE => ()); +fun interrupt_external (queue as Queue {jobs, ...}) str = + (case Int.fromString str of + SOME i => + (case Task_Graph.get_first + (fn (task as Task (_, j), _) => if i = j then SOME task else NONE) jobs + of SOME task => interrupt queue task | NONE => ()) + | NONE => ()); (* misc operations *) @@ -161,12 +160,11 @@ val _ = List.app SimpleThread.interrupt running; in null running end; -fun finish (task as Task id) (Queue {groups, jobs, focus}) = +fun finish task (Queue {groups, jobs}) = let val Group (gid, _) = get_group jobs task; val groups' = Inttab.remove_list (op =) (gid, task) groups; - val jobs' = IntGraph.del_node id jobs; - val focus' = remove (op =) task focus; - in make_queue groups' jobs' focus' end; + val jobs' = Task_Graph.del_node task jobs; + in make_queue groups' jobs' end; end; diff -r 8f84a608883d -r ea97aa6aeba2 src/Pure/General/event_bus.scala --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/Pure/General/event_bus.scala Tue Dec 30 11:10:01 2008 +0100 @@ -0,0 +1,38 @@ +/* Title: Pure/General/event_bus.scala + Author: Makarius + +Generic event bus with multiple handlers and optional exception +logging. +*/ + +package isabelle + +import scala.collection.mutable.ListBuffer + + +class EventBus[Event] +{ + /* event handlers */ + + type Handler = Event => Unit + private val handlers = new ListBuffer[Handler] + + def += (h: Handler) = synchronized { handlers += h } + def + (h: Handler) = { this += h; this } + + def -= (h: Handler) = synchronized { handlers -= h } + def - (h: Handler) = { this -= h; this } + + + /* event invocation */ + + var logger: Throwable => Unit = throw _ + + def event(x: Event) = { + val log = logger + for (h <- synchronized { handlers.toList }) { + try { h(x) } + catch { case e: Throwable => log(e) } + } + } +} diff -r 8f84a608883d -r ea97aa6aeba2 src/Pure/General/markup.scala --- a/src/Pure/General/markup.scala Tue Dec 30 08:18:54 2008 +0100 +++ b/src/Pure/General/markup.scala Tue Dec 30 11:10:01 2008 +0100 @@ -1,5 +1,4 @@ /* Title: Pure/General/markup.scala - ID: $Id$ Author: Makarius Common markup elements. @@ -9,6 +8,12 @@ object Markup { + /* name */ + + val NAME = "name" + val KIND = "kind" + + /* position */ val LINE = "line" @@ -20,12 +25,105 @@ val FILE = "file" val ID = "id" + val POSITION_PROPERTIES = Set(LINE, COLUMN, OFFSET, END_LINE, END_COLUMN, END_OFFSET, FILE, ID) + + val POSITION = "position" + val LOCATION = "location" + + + /* logical entities */ + + val TCLASS = "tclass" + val TYCON = "tycon" + val FIXED_DECL = "fixed_decl" + val FIXED = "fixed" + val CONST_DECL = "const_decl" + val CONST = "const" + val FACT_DECL = "fact_decl" + val FACT = "fact" + val DYNAMIC_FACT = "dynamic_fact" + val LOCAL_FACT_DECL = "local_fact_decl" + val LOCAL_FACT = "local_fact" + + + /* inner syntax */ + + val TFREE = "tfree" + val TVAR = "tvar" + val FREE = "free" + val SKOLEM = "skolem" + val BOUND = "bound" + val VAR = "var" + val NUM = "num" + val FLOAT = "float" + val XNUM = "xnum" + val XSTR = "xstr" + val LITERAL = "literal" + val INNER_COMMENT = "inner_comment" + + val SORT = "sort" + val TYP = "typ" + val TERM = "term" + val PROP = "prop" + + val ATTRIBUTE = "attribute" + val METHOD = "method" + + + /* embedded source text */ + + val ML_SOURCE = "ML_source" + val DOC_SOURCE = "doc_source" + + val ANTIQ = "antiq" + val ML_ANTIQ = "ML_antiq" + val DOC_ANTIQ = "doc_antiq" + + + /* outer syntax */ + + val KEYWORD_DECL = "keyword_decl" + val COMMAND_DECL = "command_decl" + + val KEYWORD = "keyword" + val COMMAND = "command" + val IDENT = "ident" + val STRING = "string" + val ALTSTRING = "altstring" + val VERBATIM = "verbatim" + val COMMENT = "comment" + val CONTROL = "control" + val MALFORMED = "malformed" + + val COMMAND_SPAN = "command_span" + val IGNORED_SPAN = "ignored_span" + val MALFORMED_SPAN = "malformed_span" + + + /* toplevel */ + + val STATE = "state" + val SUBGOAL = "subgoal" + val SENDBACK = "sendback" + val HILITE = "hilite" + + + /* command status */ + + val UNPROCESSED = "unprocessed" + val RUNNING = "running" + val FAILED = "failed" + val FINISHED = "finished" + val DISPOSED = "disposed" + /* messages */ val PID = "pid" val SESSION = "session" + val MESSAGE = "message" + /* content */ diff -r 8f84a608883d -r ea97aa6aeba2 src/Pure/General/position.scala --- a/src/Pure/General/position.scala Tue Dec 30 08:18:54 2008 +0100 +++ b/src/Pure/General/position.scala Tue Dec 30 11:10:01 2008 +0100 @@ -1,5 +1,4 @@ /* Title: Pure/General/position.scala - ID: $Id$ Author: Makarius Position properties. diff -r 8f84a608883d -r ea97aa6aeba2 src/Pure/General/swing.scala --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/Pure/General/swing.scala Tue Dec 30 11:10:01 2008 +0100 @@ -0,0 +1,18 @@ +/* Title: Pure/General/swing.scala + Author: Makarius + +Swing utilities. +*/ + +package isabelle + +import javax.swing.SwingUtilities + +object Swing +{ + def now(body: => Unit) = + SwingUtilities.invokeAndWait(new Runnable { def run = body }) + + def later(body: => Unit) = + SwingUtilities.invokeLater(new Runnable { def run = body }) +} diff -r 8f84a608883d -r ea97aa6aeba2 src/Pure/General/symbol.scala --- a/src/Pure/General/symbol.scala Tue Dec 30 08:18:54 2008 +0100 +++ b/src/Pure/General/symbol.scala Tue Dec 30 11:10:01 2008 +0100 @@ -1,5 +1,4 @@ /* Title: Pure/General/symbol.scala - ID: $Id$ Author: Makarius Detecting and recoding Isabelle symbols. @@ -79,7 +78,7 @@ /** Symbol interpretation **/ - class Interpretation { + class Interpretation(isabelle_system: IsabelleSystem) { private var symbols = new HashMap[String, HashMap[String, String]] private var decoder: Recoder = null @@ -126,7 +125,7 @@ } private def read_symbols(path: String) = { - val file = new File(IsabelleSystem.platform_path(path)) + val file = new File(isabelle_system.platform_path(path)) if (file.canRead) { for (line <- Source.fromFile(file).getLines) read_line(line) } diff -r 8f84a608883d -r ea97aa6aeba2 src/Pure/General/xml.scala --- a/src/Pure/General/xml.scala Tue Dec 30 08:18:54 2008 +0100 +++ b/src/Pure/General/xml.scala Tue Dec 30 11:10:01 2008 +0100 @@ -1,5 +1,4 @@ /* Title: Pure/General/xml.scala - ID: $Id$ Author: Makarius Simple XML tree values. @@ -11,16 +10,56 @@ import javax.xml.parsers.DocumentBuilderFactory -object XML { +object XML +{ /* datatype representation */ type Attributes = List[(String, String)] - abstract class Tree + abstract class Tree { + override def toString = { + val s = new StringBuilder + append_tree(this, s) + s.toString + } + } case class Elem(name: String, attributes: Attributes, body: List[Tree]) extends Tree case class Text(content: String) extends Tree + /* string representation */ + + private def append_text(text: String, s: StringBuilder) { + for (c <- text.elements) c match { + case '<' => s.append("<") + case '>' => s.append(">") + case '&' => s.append("&") + case '"' => s.append(""") + case '\'' => s.append("'") + case _ => s.append(c) + } + } + + private def append_elem(name: String, atts: Attributes, s: StringBuilder) { + s.append(name) + for ((a, x) <- atts) { + s.append(" "); s.append(a); s.append("=\""); append_text(x, s); s.append("\"") + } + } + + private def append_tree(tree: Tree, s: StringBuilder) { + tree match { + case Elem(name, atts, Nil) => + s.append("<"); append_elem(name, atts, s); s.append("/>") + case Elem(name, atts, ts) => + s.append("<"); append_elem(name, atts, s); s.append(">") + for (t <- ts) append_tree(t, s) + s.append("") + case Text(text) => append_text(text, s) + } + } + + /* iterate over content */ private type State = Option[(String, List[Tree])] diff -r 8f84a608883d -r ea97aa6aeba2 src/Pure/General/yxml.scala --- a/src/Pure/General/yxml.scala Tue Dec 30 08:18:54 2008 +0100 +++ b/src/Pure/General/yxml.scala Tue Dec 30 11:10:01 2008 +0100 @@ -1,5 +1,4 @@ /* Title: Pure/General/yxml.scala - ID: $Id$ Author: Makarius Efficient text representation of XML trees. @@ -70,7 +69,7 @@ var stack: List[((String, XML.Attributes), List[XML.Tree])] = null - def add(x: XML.Tree) = stack match { + def add(x: XML.Tree) = (stack: @unchecked) match { case ((elem, body) :: pending) => stack = (elem, x :: body) :: pending } @@ -78,7 +77,7 @@ if (name == "") err_element() else stack = ((name, atts), Nil) :: stack - def pop() = stack match { + def pop() = (stack: @unchecked) match { case ((("", _), _) :: _) => err_unbalanced("") case (((name, atts), body) :: pending) => stack = pending; add(XML.Elem(name, atts, body.reverse)) diff -r 8f84a608883d -r ea97aa6aeba2 src/Pure/IsaMakefile --- a/src/Pure/IsaMakefile Tue Dec 30 08:18:54 2008 +0100 +++ b/src/Pure/IsaMakefile Tue Dec 30 11:10:01 2008 +0100 @@ -23,27 +23,24 @@ $(OUT)/Pure: Concurrent/ROOT.ML Concurrent/future.ML \ Concurrent/mailbox.ML Concurrent/par_list.ML \ - Concurrent/par_list_dummy.ML Concurrent/schedule.ML \ - Concurrent/simple_thread.ML Concurrent/synchronized.ML \ - Concurrent/task_queue.ML General/ROOT.ML General/alist.ML \ - General/balanced_tree.ML General/basics.ML General/binding.ML \ - General/buffer.ML \ - General/file.ML General/graph.ML General/heap.ML General/integer.ML \ - General/lazy.ML General/markup.ML General/name_space.ML \ - General/ord_list.ML General/output.ML General/path.ML \ - General/position.ML General/pretty.ML General/print_mode.ML \ - General/properties.ML General/queue.ML General/scan.ML \ - General/secure.ML General/seq.ML General/source.ML General/stack.ML \ - General/symbol.ML General/symbol_pos.ML General/table.ML \ - General/url.ML General/xml.ML General/yxml.ML Isar/ROOT.ML \ - Isar/antiquote.ML Isar/args.ML Isar/attrib.ML Isar/auto_bind.ML \ - Isar/calculation.ML Isar/class.ML Isar/code.ML Isar/code_unit.ML \ - Isar/constdefs.ML Isar/context_rules.ML Isar/element.ML \ - Isar/expression.ML \ - Isar/find_theorems.ML Isar/instance.ML Isar/isar.ML Isar/isar_cmd.ML \ - Isar/isar_syn.ML Isar/local_defs.ML Isar/local_syntax.ML \ - Isar/local_theory.ML Isar/locale.ML Isar/method.ML Isar/net_rules.ML \ - Isar/new_locale.ML \ + Concurrent/par_list_dummy.ML Concurrent/simple_thread.ML \ + Concurrent/synchronized.ML Concurrent/task_queue.ML General/ROOT.ML \ + General/alist.ML General/balanced_tree.ML General/basics.ML \ + General/buffer.ML General/file.ML General/graph.ML General/heap.ML \ + General/integer.ML General/lazy.ML General/markup.ML \ + General/name_space.ML General/ord_list.ML General/output.ML \ + General/path.ML General/position.ML General/pretty.ML \ + General/print_mode.ML General/properties.ML General/queue.ML \ + General/scan.ML General/secure.ML General/seq.ML General/source.ML \ + General/stack.ML General/symbol.ML General/symbol_pos.ML \ + General/table.ML General/url.ML General/xml.ML General/yxml.ML \ + Isar/ROOT.ML Isar/antiquote.ML Isar/args.ML Isar/attrib.ML \ + Isar/auto_bind.ML Isar/calculation.ML Isar/class.ML Isar/code.ML \ + Isar/code_unit.ML Isar/constdefs.ML Isar/context_rules.ML \ + Isar/element.ML Isar/expression.ML Isar/find_theorems.ML \ + Isar/instance.ML Isar/isar.ML Isar/isar_cmd.ML Isar/isar_syn.ML \ + Isar/local_defs.ML Isar/local_syntax.ML Isar/local_theory.ML \ + Isar/locale.ML Isar/method.ML Isar/net_rules.ML Isar/new_locale.ML \ Isar/object_logic.ML Isar/obtain.ML Isar/outer_keyword.ML \ Isar/outer_lex.ML Isar/outer_parse.ML Isar/outer_syntax.ML \ Isar/overloading.ML Isar/proof.ML Isar/proof_context.ML \ @@ -77,17 +74,16 @@ Syntax/syn_trans.ML Syntax/syntax.ML Syntax/type_ext.ML Thy/html.ML \ Thy/latex.ML Thy/present.ML Thy/term_style.ML Thy/thm_deps.ML \ Thy/thy_edit.ML Thy/thy_header.ML Thy/thy_info.ML Thy/thy_load.ML \ - Thy/thy_output.ML Tools/ROOT.ML Tools/invoke.ML \ - Tools/isabelle_process.ML Tools/named_thms.ML \ - Tools/xml_syntax.ML assumption.ML axclass.ML codegen.ML config.ML \ - conjunction.ML consts.ML context.ML context_position.ML conv.ML \ - defs.ML display.ML drule.ML envir.ML facts.ML goal.ML \ - interpretation.ML library.ML logic.ML meta_simplifier.ML more_thm.ML \ - morphism.ML name.ML net.ML old_goals.ML pattern.ML primitive_defs.ML \ - proofterm.ML pure_setup.ML pure_thy.ML search.ML sign.ML \ - simplifier.ML sorts.ML subgoal.ML tactic.ML tctical.ML term.ML \ - term_subst.ML theory.ML thm.ML type.ML type_infer.ML unify.ML \ - variable.ML ../Tools/value.ML ../Tools/quickcheck.ML + Thy/thy_output.ML Tools/ROOT.ML Tools/invoke.ML \ + Tools/isabelle_process.ML Tools/named_thms.ML Tools/xml_syntax.ML \ + assumption.ML axclass.ML codegen.ML config.ML conjunction.ML \ + consts.ML context.ML context_position.ML conv.ML defs.ML display.ML \ + drule.ML envir.ML facts.ML goal.ML interpretation.ML library.ML \ + logic.ML meta_simplifier.ML more_thm.ML morphism.ML name.ML net.ML \ + old_goals.ML pattern.ML primitive_defs.ML proofterm.ML pure_setup.ML \ + pure_thy.ML search.ML sign.ML simplifier.ML sorts.ML subgoal.ML \ + tactic.ML tctical.ML term.ML term_subst.ML theory.ML thm.ML type.ML \ + type_infer.ML unify.ML variable.ML ../Tools/quickcheck.ML @./mk @@ -125,9 +121,10 @@ ## Scala material -SCALA_FILES = General/markup.scala General/position.scala \ - General/symbol.scala General/xml.scala General/yxml.scala \ - Isar/isar.scala Thy/thy_header.scala Tools/isabelle_process.scala \ +SCALA_FILES = General/event_bus.scala General/markup.scala \ + General/position.scala General/swing.scala General/symbol.scala \ + General/xml.scala General/yxml.scala Isar/isar.scala \ + Thy/thy_header.scala Tools/isabelle_process.scala \ Tools/isabelle_syntax.scala Tools/isabelle_system.scala diff -r 8f84a608883d -r ea97aa6aeba2 src/Pure/Isar/class.ML --- a/src/Pure/Isar/class.ML Tue Dec 30 08:18:54 2008 +0100 +++ b/src/Pure/Isar/class.ML Tue Dec 30 11:10:01 2008 +0100 @@ -60,6 +60,59 @@ structure Class : CLASS = struct +(*temporary adaption code to mediate between old and new locale code*) + +structure Old_Locale = +struct + +val intro_locales_tac = Locale.intro_locales_tac; (*already forked!*) + +val interpretation = Locale.interpretation; +val interpretation_in_locale = Locale.interpretation_in_locale; +val get_interpret_morph = Locale.get_interpret_morph; +val Locale = Locale.Locale; +val extern = Locale.extern; +val intros = Locale.intros; +val dests = Locale.dests; +val init = Locale.init; +val Merge = Locale.Merge; +val parameters_of_expr = Locale.parameters_of_expr; +val empty = Locale.empty; +val cert_expr = Locale.cert_expr; +val read_expr = Locale.read_expr; +val parameters_of = Locale.parameters_of; +val add_locale = Locale.add_locale; + +end; + +structure New_Locale = +struct + +val intro_locales_tac = Locale.intro_locales_tac; (*already forked!*) + +val interpretation = Locale.interpretation; (*!*) +val interpretation_in_locale = Locale.interpretation_in_locale; (*!*) +val get_interpret_morph = Locale.get_interpret_morph; (*!*) +fun Locale loc = ([(loc, ("", Expression.Positional []))], []); +val extern = NewLocale.extern; +val intros = Locale.intros; (*!*) +val dests = Locale.dests; (*!*) +val init = NewLocale.init; +fun Merge locs = (map (fn loc => (loc, ("", Expression.Positional []))) locs, []); +val parameters_of_expr = Locale.parameters_of_expr; (*!*) +val empty = ([], []); +val cert_expr = Locale.cert_expr; (*!"*) +val read_expr = Locale.read_expr; (*!"*) +val parameters_of = NewLocale.params_of; (*why typ option?*) +val add_locale = Expression.add_locale; + +end; + +structure Locale = Old_Locale; + +(*proper code again*) + + (** auxiliary **) fun prove_interpretation tac prfx_atts expr inst = @@ -542,7 +595,7 @@ val suplocales = map Locale.Locale sups; val supexpr = Locale.Merge suplocales; val supparams = (map fst o Locale.parameters_of_expr thy) supexpr; - val mergeexpr = Locale.Merge (suplocales); + val mergeexpr = Locale.Merge suplocales; val constrain = Element.Constrains ((map o apsnd o map_atyps) (K (TFree (Name.aT, base_sort))) supparams); fun fork_syn (Element.Fixes xs) = diff -r 8f84a608883d -r ea97aa6aeba2 src/Pure/Isar/isar.scala --- a/src/Pure/Isar/isar.scala Tue Dec 30 08:18:54 2008 +0100 +++ b/src/Pure/Isar/isar.scala Tue Dec 30 11:10:01 2008 +0100 @@ -1,7 +1,5 @@ /* Title: Pure/Isar/isar.scala - ID: $Id$ Author: Makarius - Options: :folding=explicit:collapseFolds=1: Isar toplevel editor model. */ @@ -11,7 +9,9 @@ import java.util.Properties -class Isar(args: String*) extends IsabelleProcess(args: _*) { +class Isar(isabelle_system: IsabelleSystem, results: EventBus[IsabelleProcess.Result], args: String*) + extends IsabelleProcess(isabelle_system, results, args: _*) +{ /* basic editor commands */ diff -r 8f84a608883d -r ea97aa6aeba2 src/Pure/Isar/local_theory.ML --- a/src/Pure/Isar/local_theory.ML Tue Dec 30 08:18:54 2008 +0100 +++ b/src/Pure/Isar/local_theory.ML Tue Dec 30 11:10:01 2008 +0100 @@ -1,5 +1,4 @@ (* Title: Pure/Isar/local_theory.ML - ID: $Id$ Author: Makarius Local theory operations, with abstract target context. diff -r 8f84a608883d -r ea97aa6aeba2 src/Pure/Isar/proof.ML --- a/src/Pure/Isar/proof.ML Tue Dec 30 08:18:54 2008 +0100 +++ b/src/Pure/Isar/proof.ML Tue Dec 30 11:10:01 2008 +0100 @@ -1,5 +1,4 @@ (* Title: Pure/Isar/proof.ML - ID: $Id$ Author: Markus Wenzel, TU Muenchen The Isar/VM proof language interpreter: maintains a structured flow of @@ -826,7 +825,7 @@ |> null props ? (refine (Method.Basic (Method.assumption, Position.none)) #> Seq.hd) end; -fun generic_qed state = +fun generic_qed after_ctxt state = let val (goal_ctxt, {statement, goal, after_qed, ...}) = current_goal state; val outer_state = state |> close_block; @@ -845,7 +844,7 @@ fun after_global' x y = Position.setmp_thread_data pos (fn () => after_global x y) (); in outer_state - |> map_context (ProofContext.auto_bind_facts props) + |> map_context (after_ctxt props) |> pair ((after_local', after_global'), results) end; @@ -872,7 +871,8 @@ fun local_qed txt = end_proof false txt #> - Seq.maps (generic_qed #-> (fn ((after_qed, _), results) => after_qed results)); + Seq.maps (generic_qed ProofContext.auto_bind_facts #-> + (fn ((after_qed, _), results) => after_qed results)); (* global goals *) @@ -892,7 +892,7 @@ fun global_qeds txt = end_proof true txt - #> Seq.map (generic_qed #> (fn (((_, after_qed), results), state) => + #> Seq.map (generic_qed (K I) #> (fn (((_, after_qed), results), state) => after_qed results (context_of state))) |> Seq.DETERM; (*backtracking may destroy theory!*) diff -r 8f84a608883d -r ea97aa6aeba2 src/Pure/Isar/theory_target.ML --- a/src/Pure/Isar/theory_target.ML Tue Dec 30 08:18:54 2008 +0100 +++ b/src/Pure/Isar/theory_target.ML Tue Dec 30 11:10:01 2008 +0100 @@ -1,6 +1,4 @@ (* Title: Pure/Isar/theory_target.ML - ID: $Id$ - ID: $Id$ Author: Makarius Common theory/locale/class/instantiation/overloading targets. diff -r 8f84a608883d -r ea97aa6aeba2 src/Pure/Isar/toplevel.ML --- a/src/Pure/Isar/toplevel.ML Tue Dec 30 08:18:54 2008 +0100 +++ b/src/Pure/Isar/toplevel.ML Tue Dec 30 11:10:01 2008 +0100 @@ -718,7 +718,7 @@ val future_proof = Proof.future_proof (fn prf => - Future.fork_background (fn () => + Future.fork_pri 1 (fn () => let val (states, State (result_node, _)) = (case st' of State (SOME (Proof (_, (_, orig_gthy)), exit), prev) => State (SOME (Proof (ProofNode.init prf, (finish, orig_gthy)), exit), prev)) diff -r 8f84a608883d -r ea97aa6aeba2 src/Pure/ROOT.ML --- a/src/Pure/ROOT.ML Tue Dec 30 08:18:54 2008 +0100 +++ b/src/Pure/ROOT.ML Tue Dec 30 11:10:01 2008 +0100 @@ -87,8 +87,6 @@ cd "Tools"; use "ROOT.ML"; cd ".."; -use "../Tools/value.ML"; -use "../Tools/quickcheck.ML"; use "codegen.ML"; (*configuration for Proof General*) diff -r 8f84a608883d -r ea97aa6aeba2 src/Pure/Syntax/lexicon.ML --- a/src/Pure/Syntax/lexicon.ML Tue Dec 30 08:18:54 2008 +0100 +++ b/src/Pure/Syntax/lexicon.ML Tue Dec 30 11:10:01 2008 +0100 @@ -145,8 +145,18 @@ val tidT = Type ("tid", []); val tvarT = Type ("tvar", []); -val terminals = - ["id", "longid", "var", "tid", "tvar", "num", "float", "xnum", "xstr"]; +val terminal_kinds = + [("id", IdentSy), + ("longid", LongIdentSy), + ("var", VarSy), + ("tid", TFreeSy), + ("tvar", TVarSy), + ("num", NumSy), + ("float_token", FloatSy), + ("xnum", XNumSy), + ("xstr", StrSy)]; + +val terminals = map #1 terminal_kinds; val is_terminal = member (op =) terminals; @@ -186,16 +196,10 @@ (* predef_term *) -fun predef_term "id" = SOME (Token (IdentSy, "id", Position.no_range)) - | predef_term "longid" = SOME (Token (LongIdentSy, "longid", Position.no_range)) - | predef_term "var" = SOME (Token (VarSy, "var", Position.no_range)) - | predef_term "tid" = SOME (Token (TFreeSy, "tid", Position.no_range)) - | predef_term "tvar" = SOME (Token (TVarSy, "tvar", Position.no_range)) - | predef_term "num" = SOME (Token (NumSy, "num", Position.no_range)) - | predef_term "float" = SOME (Token (FloatSy, "float", Position.no_range)) - | predef_term "xnum" = SOME (Token (XNumSy, "xnum", Position.no_range)) - | predef_term "xstr" = SOME (Token (StrSy, "xstr", Position.no_range)) - | predef_term _ = NONE; +fun predef_term s = + (case AList.lookup (op =) terminal_kinds s of + SOME sy => SOME (Token (sy, s, Position.no_range)) + | NONE => NONE); (* xstr tokens *) @@ -382,21 +386,27 @@ | "0" :: "b" :: cs => (1, 2, cs) | "-" :: cs => (~1, 10, cs) | cs => (1, 10, cs)); - val value = sign * #1 (Library.read_radix_int radix digs); - in {radix = radix, leading_zeros = leading_zeros digs, value = value} end; + in + {radix = radix, + leading_zeros = leading_zeros digs, + value = sign * #1 (Library.read_radix_int radix digs)} + end; end; fun read_float str = let val (sign, cs) = - (case Symbol.explode str of "-" :: cs => (~1, cs) | cs => (1, cs)); - val (intpart,fracpart) = + (case Symbol.explode str of + "-" :: cs => (~1, cs) + | cs => (1, cs)); + val (intpart, fracpart) = (case take_prefix Symbol.is_digit cs of - (intpart, "." :: fracpart) => (intpart,fracpart) - | _ => sys_error "read_float") - in {mant = sign * #1 (Library.read_int (intpart@fracpart)), - exp = length fracpart} + (intpart, "." :: fracpart) => (intpart, fracpart) + | _ => raise Fail "read_float"); + in + {mant = sign * #1 (Library.read_int (intpart @ fracpart)), + exp = length fracpart} end end; diff -r 8f84a608883d -r ea97aa6aeba2 src/Pure/Syntax/syntax.ML --- a/src/Pure/Syntax/syntax.ML Tue Dec 30 08:18:54 2008 +0100 +++ b/src/Pure/Syntax/syntax.ML Tue Dec 30 11:10:01 2008 +0100 @@ -1,5 +1,4 @@ (* Title: Pure/Syntax/syntax.ML - ID: $Id$ Author: Tobias Nipkow and Markus Wenzel, TU Muenchen Standard Isabelle syntax, based on arbitrary context-free grammars diff -r 8f84a608883d -r ea97aa6aeba2 src/Pure/Thy/thy_header.scala --- a/src/Pure/Thy/thy_header.scala Tue Dec 30 08:18:54 2008 +0100 +++ b/src/Pure/Thy/thy_header.scala Tue Dec 30 11:10:01 2008 +0100 @@ -1,5 +1,4 @@ /* Title: Pure/Thy/thy_header.scala - ID: $Id$ Author: Makarius Theory header keywords. diff -r 8f84a608883d -r ea97aa6aeba2 src/Pure/Thy/thy_info.ML --- a/src/Pure/Thy/thy_info.ML Tue Dec 30 08:18:54 2008 +0100 +++ b/src/Pure/Thy/thy_info.ML Tue Dec 30 11:10:01 2008 +0100 @@ -315,7 +315,13 @@ datatype task = Task of (unit -> unit) | Finished | Running; fun task_finished Finished = true | task_finished _ = false; -fun future_schedule task_graph = +local + +fun schedule_seq tasks = + Graph.topological_order tasks + |> List.app (fn name => (case Graph.get_node tasks name of Task body => body () | _ => ())); + +fun schedule_futures task_graph = let val tasks = Graph.topological_order task_graph |> map_filter (fn name => (case Graph.get_node task_graph name of Task body => SOME (name, body) | _ => NONE)); @@ -339,45 +345,14 @@ val proof_results = PureThy.join_proofs (map_filter (try get_theory o #1) tasks); in ignore (Exn.release_all (thy_results @ proof_results)) end; -local - -fun max_task (name, (Task body, m)) NONE = SOME (name: string, (body, m)) - | max_task (name, (Task body, m)) (task' as SOME (name', (_, m'))) = - if m > m' orelse m = m' andalso name < name' then SOME (name, (body, m)) else task' - | max_task _ task' = task'; - -fun next_task G = - let - val tasks = Graph.minimals G |> map (fn name => - (name, (Graph.get_node G name, length (Graph.imm_succs G name)))); - val finished = filter (task_finished o fst o snd) tasks; - in - if not (null finished) then next_task (Graph.del_nodes (map fst finished) G) - else if null tasks then (Schedule.Terminate, G) - else - (case fold max_task tasks NONE of - NONE => (Schedule.Wait, G) - | SOME (name, (body, _)) => - (Schedule.Task {body = PrintMode.closure body, - cont = Graph.del_nodes [name], fail = K Graph.empty}, - Graph.map_node name (K Running) G)) - end; - -fun schedule_seq tasks = - Graph.topological_order tasks - |> List.app (fn name => (case Graph.get_node tasks name of Task body => body () | _ => ())); - in fun schedule_tasks tasks n = - let val m = Multithreading.max_threads_value () in - if m <= 1 then schedule_seq tasks - else if Multithreading.self_critical () then + if not (Multithreading.enabled ()) then schedule_seq tasks + else if Multithreading.self_critical () then (warning (loader_msg "no multithreading within critical section" []); schedule_seq tasks) - else if Future.enabled () then future_schedule tasks - else ignore (Exn.release_all (map Exn.Exn (Schedule.schedule (Int.min (m, n)) next_task tasks))) - end; + else schedule_futures tasks; end; diff -r 8f84a608883d -r ea97aa6aeba2 src/Pure/Tools/ROOT.ML --- a/src/Pure/Tools/ROOT.ML Tue Dec 30 08:18:54 2008 +0100 +++ b/src/Pure/Tools/ROOT.ML Tue Dec 30 11:10:01 2008 +0100 @@ -11,3 +11,6 @@ (*derived theory and proof elements*) use "invoke.ML"; + +(*quickcheck needed here because of pg preferences*) +use "../../Tools/quickcheck.ML" diff -r 8f84a608883d -r ea97aa6aeba2 src/Pure/Tools/isabelle_process.scala --- a/src/Pure/Tools/isabelle_process.scala Tue Dec 30 08:18:54 2008 +0100 +++ b/src/Pure/Tools/isabelle_process.scala Tue Dec 30 11:10:01 2008 +0100 @@ -1,5 +1,4 @@ /* Title: Pure/Tools/isabelle_process.ML - ID: $Id$ Author: Makarius Options: :folding=explicit:collapseFolds=1: @@ -13,8 +12,6 @@ import java.io.{BufferedReader, BufferedWriter, InputStreamReader, OutputStreamWriter, InputStream, OutputStream, IOException} -import isabelle.{Symbol, XML} - object IsabelleProcess { @@ -70,21 +67,28 @@ class Result(val kind: Kind.Value, val props: Properties, val result: String) { override def toString = { - val res = XML.content(YXML.parse_failsafe(result)).mkString("") + val res = XML.content(YXML.parse_failsafe(result)).mkString if (props == null) kind.toString + " [[" + res + "]]" else kind.toString + " " + props.toString + " [[" + res + "]]" } - def is_raw() = Kind.is_raw(kind) - def is_control() = Kind.is_control(kind) - def is_system() = Kind.is_system(kind) + def is_raw = Kind.is_raw(kind) + def is_control = Kind.is_control(kind) + def is_system = Kind.is_system(kind) } } -class IsabelleProcess(args: String*) { +class IsabelleProcess(isabelle_system: IsabelleSystem, + results: EventBus[IsabelleProcess.Result], args: String*) +{ + import IsabelleProcess._ - import IsabelleProcess._ + + /* demo constructor */ + + def this(args: String*) = + this(new IsabelleSystem, new EventBus[IsabelleProcess.Result] + Console.println, args: _*) /* process information */ @@ -98,21 +102,31 @@ /* results */ - private val results = new LinkedBlockingQueue[Result] + private val result_queue = new LinkedBlockingQueue[Result] private def put_result(kind: Kind.Value, props: Properties, result: String) { if (kind == Kind.INIT && props != null) { pid = props.getProperty(Markup.PID) the_session = props.getProperty(Markup.SESSION) } - results.put(new Result(kind, props, result)) + result_queue.put(new Result(kind, props, result)) } - def get_result() = results.take + private class ResultThread extends Thread("isabelle: results") { + override def run() = { + var finished = false + while (!finished) { + val result = + try { result_queue.take } + catch { case _: NullPointerException => null } - def try_result() = { - val res = results.poll - if (res != null) Some(res) else None + if (result != null) { + results.event(result) // FIXME try/catch (!??) + if (result.kind == Kind.EXIT) finished = true + } + else finished = true + } + } } @@ -123,7 +137,7 @@ if (pid == null) put_result(Kind.SYSTEM, null, "Cannot interrupt: unknown pid") else { try { - if (IsabelleSystem.exec("kill", "-INT", pid).waitFor == 0) + if (isabelle_system.execute(true, "kill", "-INT", pid).waitFor == 0) put_result(Kind.SIGNAL, null, "INT") else put_result(Kind.SYSTEM, null, "Cannot interrupt: kill command failed") @@ -186,7 +200,7 @@ private class StdinThread(out_stream: OutputStream) extends Thread("isabelle: stdin") { override def run() = { - val writer = new BufferedWriter(new OutputStreamWriter(out_stream, IsabelleSystem.charset)) + val writer = new BufferedWriter(new OutputStreamWriter(out_stream, isabelle_system.charset)) var finished = false while (!finished) { try { @@ -216,7 +230,7 @@ private class StdoutThread(in_stream: InputStream) extends Thread("isabelle: stdout") { override def run() = { - val reader = new BufferedReader(new InputStreamReader(in_stream, IsabelleSystem.charset)) + val reader = new BufferedReader(new InputStreamReader(in_stream, isabelle_system.charset)) var result = new StringBuilder(100) var finished = false @@ -254,7 +268,7 @@ private class MessageThread(fifo: String) extends Thread("isabelle: messages") { override def run() = { - val reader = IsabelleSystem.fifo_reader(fifo) + val reader = isabelle_system.fifo_reader(fifo) var kind: Kind.Value = null var props: Properties = null var result = new StringBuilder @@ -332,33 +346,36 @@ } + /** main **/ { /* isabelle version */ { - val (msg, rc) = IsabelleSystem.isabelle_tool("version") + val (msg, rc) = isabelle_system.isabelle_tool("version") if (rc != 0) error("Version check failed -- bad Isabelle installation:\n" + msg) put_result(Kind.SYSTEM, null, msg) } - /* message fifo */ + /* messages */ - val message_fifo = IsabelleSystem.mk_fifo() - def rm_fifo() = IsabelleSystem.rm_fifo(message_fifo) + val message_fifo = isabelle_system.mk_fifo() + def rm_fifo() = isabelle_system.rm_fifo(message_fifo) val message_thread = new MessageThread(message_fifo) message_thread.start + new ResultThread().start + /* exec process */ try { val cmdline = - List(IsabelleSystem.getenv_strict("ISABELLE_PROCESS"), "-W", message_fifo) ++ args - proc = IsabelleSystem.exec2(cmdline: _*) + List(isabelle_system.getenv_strict("ISABELLE_PROCESS"), "-W", message_fifo) ++ args + proc = isabelle_system.execute(true, cmdline: _*) } catch { case e: IOException => @@ -386,5 +403,4 @@ }.start } - } diff -r 8f84a608883d -r ea97aa6aeba2 src/Pure/Tools/isabelle_syntax.scala --- a/src/Pure/Tools/isabelle_syntax.scala Tue Dec 30 08:18:54 2008 +0100 +++ b/src/Pure/Tools/isabelle_syntax.scala Tue Dec 30 11:10:01 2008 +0100 @@ -1,5 +1,4 @@ /* Title: Pure/Tools/isabelle_syntax.scala - ID: $Id$ Author: Makarius Isabelle outer syntax. diff -r 8f84a608883d -r ea97aa6aeba2 src/Pure/Tools/isabelle_system.scala --- a/src/Pure/Tools/isabelle_system.scala Tue Dec 30 08:18:54 2008 +0100 +++ b/src/Pure/Tools/isabelle_system.scala Tue Dec 30 11:10:01 2008 +0100 @@ -1,5 +1,4 @@ /* Title: Pure/Tools/isabelle_system.scala - ID: $Id$ Author: Makarius Isabelle system support -- basic Cygwin/Posix compatibility. @@ -13,24 +12,26 @@ import scala.io.Source -object IsabelleSystem { +class IsabelleSystem { val charset = "UTF-8" /* Isabelle environment settings */ + private val environment = System.getenv + def getenv(name: String) = { - val value = System.getenv(if (name == "HOME") "HOME_JVM" else name) + val value = environment.get(if (name == "HOME") "HOME_JVM" else name) if (value != null) value else "" } def getenv_strict(name: String) = { - val value = getenv(name) + val value = environment.get(name) if (value != "") value else error("Undefined environment variable: " + name) } - def is_cygwin() = Pattern.matches(".*-cygwin", getenv_strict("ML_PLATFORM")) + val is_cygwin = Pattern.matches(".*-cygwin", getenv_strict("ML_PLATFORM")) /* file path specifications */ @@ -75,17 +76,22 @@ result_path.toString } + def platform_file(path: String) = + new File(platform_path(path)) + /* processes */ - private def posix_prefix() = if (is_cygwin()) List(platform_path("/bin/env")) else Nil + def execute(redirect: Boolean, args: String*): Process = { + val cmdline = new java.util.LinkedList[String] + if (is_cygwin) cmdline.add(platform_path("/bin/env")) + for (s <- args) cmdline.add(s) - def exec(args: String*): Process = Runtime.getRuntime.exec((posix_prefix() ++ args).toArray) - - def exec2(args: String*): Process = { - val cmdline = new java.util.LinkedList[String] - for (s <- posix_prefix() ++ args) cmdline.add(s) - new ProcessBuilder(cmdline).redirectErrorStream(true).start + val proc = new ProcessBuilder(cmdline) + proc.environment.clear + proc.environment.putAll(environment) + proc.redirectErrorStream(redirect) + proc.start } @@ -93,10 +99,10 @@ def isabelle_tool(args: String*) = { val proc = - try { exec2((List(getenv_strict("ISABELLE_TOOL")) ++ args): _*) } + try { execute(true, (List(getenv_strict("ISABELLE_TOOL")) ++ args): _*) } catch { case e: IOException => error(e.getMessage) } proc.getOutputStream.close - val output = Source.fromInputStream(proc.getInputStream, charset).mkString("") + val output = Source.fromInputStream(proc.getInputStream, charset).mkString val rc = proc.waitFor (output, rc) } @@ -115,9 +121,26 @@ if (rc != 0) error(result) } - def fifo_reader(fifo: String) = // blocks until writer is ready - if (is_cygwin()) new BufferedReader(new InputStreamReader(Runtime.getRuntime.exec( - Array(platform_path("/bin/cat"), fifo)).getInputStream, charset)) - else new BufferedReader(new InputStreamReader(new FileInputStream(fifo), charset)) + def fifo_reader(fifo: String) = { + // blocks until writer is ready + val stream = + if (is_cygwin) execute(false, "cat", fifo).getInputStream + else new FileInputStream(fifo) + new BufferedReader(new InputStreamReader(stream, charset)) + } + + /* find logics */ + + def find_logics() = { + val ml_ident = getenv_strict("ML_IDENTIFIER") + var logics: Set[String] = Set() + for (dir <- getenv_strict("ISABELLE_PATH").split(":")) { + val files = platform_file(dir + "/" + ml_ident).listFiles() + if (files != null) { + for (file <- files if file.isFile) logics += file.getName + } + } + logics.toList.sort(_ < _) + } } diff -r 8f84a608883d -r ea97aa6aeba2 src/Pure/codegen.ML --- a/src/Pure/codegen.ML Tue Dec 30 08:18:54 2008 +0100 +++ b/src/Pure/codegen.ML Tue Dec 30 11:10:01 2008 +0100 @@ -1025,8 +1025,6 @@ val setup = add_codegen "default" default_codegen #> add_tycodegen "default" default_tycodegen - #> Value.add_evaluator ("SML", eval_term o ProofContext.theory_of) - #> Quickcheck.add_generator ("SML", test_term) #> Code.add_attribute ("unfold", Scan.succeed (Thm.declaration_attribute (fn thm => Context.mapping (add_unfold thm #> Code.add_inline thm) I))) #> add_preprocessor unfold_preprocessor; diff -r 8f84a608883d -r ea97aa6aeba2 src/Pure/context.ML --- a/src/Pure/context.ML Tue Dec 30 08:18:54 2008 +0100 +++ b/src/Pure/context.ML Tue Dec 30 11:10:01 2008 +0100 @@ -21,11 +21,10 @@ val ancestors_of: theory -> theory list val theory_name: theory -> string val is_stale: theory -> bool - val PureN: string val is_draft: theory -> bool val reject_draft: theory -> theory - val exists_name: string -> theory -> bool - val names_of: theory -> string list + val PureN: string + val display_names: theory -> string list val pretty_thy: theory -> Pretty.T val string_of_thy: theory -> string val pprint_thy: theory -> pprint_args -> unit @@ -144,17 +143,18 @@ datatype theory = Theory of (*identity*) - {self: theory ref option, (*dynamic self reference -- follows theory changes*) - id: serial * (string * int), (*identifier/name of this theory node*) - ids: (string * int) Inttab.table} * (*ancestors and checkpoints*) + {self: theory ref option, (*dynamic self reference -- follows theory changes*) + draft: bool, (*draft mode -- linear destructive changes*) + id: serial, (*identifier*) + ids: unit Inttab.table} * (*cumulative identifiers of non-drafts -- symbolic body content*) (*data*) - Object.T Datatab.table * + Object.T Datatab.table * (*body content*) (*ancestry*) - {parents: theory list, (*immediate predecessors*) - ancestors: theory list} * (*all predecessors*) + {parents: theory list, (*immediate predecessors*) + ancestors: theory list} * (*all predecessors -- canonical reverse order*) (*history*) - {name: string, (*prospective name of finished theory*) - version: int}; (*checkpoint counter*) + {name: string, (*official theory name*) + stage: int}; (*checkpoint counter*) exception THEORY of string * theory list; @@ -165,9 +165,9 @@ val ancestry_of = #3 o rep_theory; val history_of = #4 o rep_theory; -fun make_identity self id ids = {self = self, id = id, ids = ids}; +fun make_identity self draft id ids = {self = self, draft = draft, id = id, ids = ids}; fun make_ancestry parents ancestors = {parents = parents, ancestors = ancestors}; -fun make_history name version = {name = name, version = version}; +fun make_history name stage = {name = name, stage = stage}; val the_self = the o #self o identity_of; val parents_of = #parents o ancestry_of; @@ -177,7 +177,7 @@ (* staleness *) -fun eq_id ((i: int, _), (j, _)) = (i = j); +fun eq_id (i: int, j) = i = j; fun is_stale (Theory ({self = SOME (ref (Theory ({id = id', ...}, _, _, _))), id, ...}, _, _, _)) = @@ -185,47 +185,46 @@ | is_stale (Theory ({self = NONE, ...}, _, _, _)) = true; fun vitalize (thy as Theory ({self = SOME r, ...}, _, _, _)) = (r := thy; thy) - | vitalize (thy as Theory ({self = NONE, id, ids}, data, ancestry, history)) = + | vitalize (thy as Theory ({self = NONE, draft, id, ids}, data, ancestry, history)) = let val r = ref thy; - val thy' = Theory (make_identity (SOME r) id ids, data, ancestry, history); + val thy' = Theory (make_identity (SOME r) draft id ids, data, ancestry, history); in r := thy'; thy' end; -(* names *) +(* draft mode *) -val PureN = "Pure"; - -val draftN = "#"; -val draft_name = (draftN, ~1); - -fun draft_id (_, (name, _)) = (name = draftN); -val is_draft = draft_id o #id o identity_of; +val is_draft = #draft o identity_of; fun reject_draft thy = if is_draft thy then raise THEORY ("Illegal draft theory -- stable checkpoint required", [thy]) else thy; -fun exists_name name (thy as Theory ({id = (_, (a, _)), ids, ...}, _, _, _)) = - name = theory_name thy orelse - name = a orelse - Inttab.exists (fn (_, (b, _)) => b = name) ids; -fun name_of (a, ~1) = a - | name_of (a, i) = a ^ ":" ^ string_of_int i; +(* names *) -fun names_of (Theory ({id = (_, a), ids, ...}, _, _, _)) = - rev (name_of a :: Inttab.fold (fn (_, (b, ~1)) => cons b | _ => I) ids []); +val PureN = "Pure"; +val draftN = "#"; +val finished = ~1; -fun pretty_thy thy = - Pretty.str_list "{" "}" (names_of thy @ (if is_stale thy then ["!"] else [])); +fun display_names thy = + let + val draft = if is_draft thy then [draftN] else []; + val {stage, ...} = history_of thy; + val name = + if stage = finished then theory_name thy + else theory_name thy ^ ":" ^ string_of_int stage; + val ancestor_names = map theory_name (ancestors_of thy); + val stale = if is_stale thy then ["!"] else []; + in rev (stale @ draft @ [name] @ ancestor_names) end; +val pretty_thy = Pretty.str_list "{" "}" o display_names; val string_of_thy = Pretty.string_of o pretty_thy; val pprint_thy = Pretty.pprint o pretty_thy; fun pretty_abbrev_thy thy = let - val names = names_of thy; + val names = display_names thy; val n = length names; val abbrev = if n > 5 then "..." :: List.drop (names, n - 5) else names; in Pretty.str_list "{" "}" abbrev end; @@ -252,20 +251,18 @@ val pprint_thy_ref = Pretty.pprint o pretty_thy o deref; -(* consistency *) +(* build ids *) -fun check_insert id ids = - if draft_id id orelse Inttab.defined ids (#1 id) then ids - else if Inttab.exists (fn (_, a) => a = #2 id) ids then - error ("Different versions of theory component " ^ quote (name_of (#2 id))) - else Inttab.update id ids; +fun insert_id draft id ids = + if draft then ids + else Inttab.update (id, ()) ids; -fun check_merge - (Theory ({id = id1, ids = ids1, ...}, _, _, _)) - (Theory ({id = id2, ids = ids2, ...}, _, _, _)) = - Inttab.fold check_insert ids2 ids1 - |> check_insert id1 - |> check_insert id2; +fun merge_ids + (Theory ({draft = draft1, id = id1, ids = ids1, ...}, _, _, _)) + (Theory ({draft = draft2, id = id2, ids = ids2, ...}, _, _, _)) = + Inttab.merge (K true) (ids1, ids2) + |> insert_id draft1 id1 + |> insert_id draft2 id2; (* equality and inclusion *) @@ -273,22 +270,35 @@ val eq_thy = eq_id o pairself (#id o identity_of); fun proper_subthy (Theory ({id, ...}, _, _, _), Theory ({ids, ...}, _, _, _)) = - Inttab.defined ids (#1 id); + Inttab.defined ids id; fun subthy thys = eq_thy thys orelse proper_subthy thys; fun joinable (thy1, thy2) = subthy (thy1, thy2) orelse subthy (thy2, thy1); +(* consistent ancestors *) + +fun extend_ancestors thy thys = + if member eq_thy thys thy then raise THEORY ("Duplicate theory node", thy :: thys) + else thy :: thys; + +fun extend_ancestors_of thy = extend_ancestors thy (ancestors_of thy); + +val merge_ancestors = merge (fn (thy1, thy2) => + eq_thy (thy1, thy2) orelse + theory_name thy1 = theory_name thy2 andalso + raise THEORY ("Inconsistent theory versions", [thy1, thy2])); + + (* trivial merge *) fun merge (thy1, thy2) = if eq_thy (thy1, thy2) then thy1 else if proper_subthy (thy2, thy1) then thy1 else if proper_subthy (thy1, thy2) then thy2 - else (check_merge thy1 thy2; - error (cat_lines ["Attempt to perform non-trivial merge of theories:", - str_of_thy thy1, str_of_thy thy2])); + else error (cat_lines ["Attempt to perform non-trivial merge of theories:", + str_of_thy thy1, str_of_thy thy2]); fun merge_refs (ref1, ref2) = if ref1 = ref2 then ref1 @@ -300,41 +310,38 @@ (* primitives *) -fun create_thy name self id ids data ancestry history = +fun create_thy self draft ids data ancestry history = + let val identity = make_identity self draft (serial ()) ids; + in vitalize (Theory (identity, data, ancestry, history)) end; + +fun change_thy draft' f thy = let - val {version, ...} = history; - val ids' = check_insert id ids; - val id' = (serial (), name); - val _ = check_insert id' ids'; - val identity' = make_identity self id' ids'; - in vitalize (Theory (identity', data, ancestry, history)) end; - -fun change_thy name f thy = - let - val Theory ({self, id, ids}, data, ancestry, history) = thy; + val Theory ({self, draft, id, ids}, data, ancestry, history) = thy; val (self', data', ancestry') = - if is_draft thy then (self, data, ancestry) (*destructive change!*) - else if #version history > 0 + if draft then (self, data, ancestry) (*destructive change!*) + else if #stage history > 0 then (NONE, copy_data data, ancestry) - else (NONE, extend_data data, make_ancestry [thy] (thy :: #ancestors ancestry)); + else (NONE, extend_data data, make_ancestry [thy] (extend_ancestors_of thy)); + val ids' = insert_id draft id ids; val data'' = f data'; val thy' = NAMED_CRITICAL "theory" (fn () => - (check_thy thy; create_thy name self' id ids data'' ancestry' history)); + (check_thy thy; create_thy self' draft' ids' data'' ancestry' history)); in thy' end; -fun name_thy name = change_thy name I; -val modify_thy = change_thy draft_name; -val extend_thy = modify_thy I; +val name_thy = change_thy false I; +val extend_thy = change_thy true I; +val modify_thy = change_thy true; fun copy_thy thy = let - val Theory ({id, ids, ...}, data, ancestry, history) = thy; + val Theory ({draft, id, ids, ...}, data, ancestry, history) = thy; + val ids' = insert_id draft id ids; val data' = copy_data data; val thy' = NAMED_CRITICAL "theory" (fn () => - (check_thy thy; create_thy draft_name NONE id ids data' ancestry history)); + (check_thy thy; create_thy NONE true ids' data' ancestry history)); in thy' end; -val pre_pure_thy = create_thy draft_name NONE (serial (), draft_name) Inttab.empty +val pre_pure_thy = create_thy NONE true Inttab.empty Datatab.empty (make_ancestry [] []) (make_history PureN 0); @@ -342,56 +349,56 @@ fun merge_thys pp (thy1, thy2) = let - val ids = check_merge thy1 thy2; + val ids = merge_ids thy1 thy2; val data = merge_data (pp thy1) (data_of thy1, data_of thy2); val ancestry = make_ancestry [] []; val history = make_history "" 0; val thy' = NAMED_CRITICAL "theory" (fn () => - (check_thy thy1; check_thy thy2; - create_thy draft_name NONE (serial (), draft_name) ids data ancestry history)); + (check_thy thy1; check_thy thy2; create_thy NONE true ids data ancestry history)); in thy' end; fun maximal_thys thys = thys |> filter_out (fn thy => exists (fn thy' => proper_subthy (thy, thy')) thys); fun begin_thy pp name imports = - if name = draftN then error ("Illegal theory name: " ^ quote draftN) + if name = "" orelse name = draftN then error ("Bad theory name: " ^ quote name) else let val parents = maximal_thys (distinct eq_thy imports); - val ancestors = distinct eq_thy (parents @ maps ancestors_of parents); - val Theory ({id, ids, ...}, data, _, _) = + val ancestors = + Library.foldl merge_ancestors ([], map ancestors_of parents) + |> fold extend_ancestors parents; + + val Theory ({ids, ...}, data, _, _) = (case parents of [] => error "No parent theories" | [thy] => extend_thy thy | thy :: thys => Library.foldl (merge_thys pp) (thy, thys)); + val ancestry = make_ancestry parents ancestors; val history = make_history name 0; val thy' = NAMED_CRITICAL "theory" (fn () => - (map check_thy imports; create_thy draft_name NONE id ids data ancestry history)); + (map check_thy imports; create_thy NONE true ids data ancestry history)); in thy' end; -(* persistent checkpoints *) +(* history stages *) + +fun history_stage f thy = + let + val {name, stage} = history_of thy; + val _ = stage = finished andalso raise THEORY ("Theory already finished", [thy]); + val history' = make_history name (f stage); + val thy' as Theory (identity', data', ancestry', _) = name_thy thy; + val thy'' = NAMED_CRITICAL "theory" (fn () => + (check_thy thy'; vitalize (Theory (identity', data', ancestry', history')))); + in thy'' end; fun checkpoint_thy thy = - if not (is_draft thy) then thy - else - let - val {name, version} = history_of thy; - val thy' as Theory (identity', data', ancestry', _) = name_thy (name, version) thy; - val history' = make_history name (version + 1); - val thy'' = NAMED_CRITICAL "theory" (fn () => - (check_thy thy'; vitalize (Theory (identity', data', ancestry', history')))); - in thy'' end; + if is_draft thy then history_stage (fn stage => stage + 1) thy + else thy; -fun finish_thy thy = NAMED_CRITICAL "theory" (fn () => - let - val name = theory_name thy; - val Theory (identity', data', ancestry', _) = name_thy (name, ~1) thy; - val history' = make_history name 0; - val thy' = vitalize (Theory (identity', data', ancestry', history')); - in thy' end); +val finish_thy = history_stage (fn _ => finished); (* theory data *) diff -r 8f84a608883d -r ea97aa6aeba2 src/Pure/display.ML --- a/src/Pure/display.ML Tue Dec 30 08:18:54 2008 +0100 +++ b/src/Pure/display.ML Tue Dec 30 11:10:01 2008 +0100 @@ -213,7 +213,7 @@ ||> List.partition (Defs.plain_args o #2 o #1); val rests = restricts |> map (apfst (apfst extern_const)) |> sort_wrt (#1 o #1); in - [Pretty.strs ("names:" :: Context.names_of thy)] @ + [Pretty.strs ("names:" :: Context.display_names thy)] @ [Pretty.strs ["name prefix:", NameSpace.path_of naming], Pretty.big_list "classes:" (map pretty_classrel clsses), pretty_default default, diff -r 8f84a608883d -r ea97aa6aeba2 src/Pure/goal.ML --- a/src/Pure/goal.ML Tue Dec 30 08:18:54 2008 +0100 +++ b/src/Pure/goal.ML Tue Dec 30 11:10:01 2008 +0100 @@ -179,7 +179,7 @@ val res = if immediate orelse #maxidx (Thm.rep_cterm stmt) >= 0 orelse not (Future.enabled ()) then result () - else future_result ctxt' (Future.fork_background result) (Thm.term_of stmt); + else future_result ctxt' (Future.fork_pri 1 result) (Thm.term_of stmt); in Conjunction.elim_balanced (length props) res |> map (Assumption.export false ctxt' ctxt) diff -r 8f84a608883d -r ea97aa6aeba2 src/Pure/old_goals.ML --- a/src/Pure/old_goals.ML Tue Dec 30 08:18:54 2008 +0100 +++ b/src/Pure/old_goals.ML Tue Dec 30 11:10:01 2008 +0100 @@ -127,7 +127,7 @@ (*Generates the list of new theories when the proof state's theory changes*) fun thy_error (thy,thy') = - let val names = Context.names_of thy' \\ Context.names_of thy + let val names = Context.display_names thy' \\ Context.display_names thy in case names of [name] => "\nNew theory: " ^ name | _ => "\nNew theories: " ^ space_implode ", " names diff -r 8f84a608883d -r ea97aa6aeba2 src/Pure/pure_setup.ML --- a/src/Pure/pure_setup.ML Tue Dec 30 08:18:54 2008 +0100 +++ b/src/Pure/pure_setup.ML Tue Dec 30 11:10:01 2008 +0100 @@ -1,5 +1,4 @@ (* Title: Pure/pure_setup.ML - ID: $Id$ Author: Makarius Pure theory and ML toplevel setup. @@ -28,12 +27,13 @@ (* ML toplevel pretty printing *) -install_pp (make_pp ["TaskQueue", "task"] (Pretty.pprint o Pretty.str o TaskQueue.str_of_task)); -install_pp (make_pp ["TaskQueue", "group"] (Pretty.pprint o Pretty.str o TaskQueue.str_of_group)); +install_pp (make_pp ["Task_Queue", "task"] (Pretty.pprint o Pretty.str o Task_Queue.str_of_task)); +install_pp (make_pp ["Task_Queue", "group"] (Pretty.pprint o Pretty.str o Task_Queue.str_of_group)); install_pp (make_pp ["Position", "T"] (Pretty.pprint o Pretty.enum "," "{" "}" o map (fn (x, y) => Pretty.str (x ^ "=" ^ y)) o Position.properties_of)); install_pp (make_pp ["Thm", "thm"] ProofDisplay.pprint_thm); install_pp (make_pp ["Thm", "cterm"] ProofDisplay.pprint_cterm); +install_pp (make_pp ["Binding", "T"] (Pretty.pprint o Pretty.str o Binding.display)); install_pp (make_pp ["Thm", "ctyp"] ProofDisplay.pprint_ctyp); install_pp (make_pp ["Context", "theory"] Context.pprint_thy); install_pp (make_pp ["Context", "theory_ref"] Context.pprint_thy_ref); diff -r 8f84a608883d -r ea97aa6aeba2 src/Pure/pure_thy.ML --- a/src/Pure/pure_thy.ML Tue Dec 30 08:18:54 2008 +0100 +++ b/src/Pure/pure_thy.ML Tue Dec 30 11:10:01 2008 +0100 @@ -322,7 +322,7 @@ ("", typ "var => logic", Delimfix "_"), ("_DDDOT", typ "logic", Delimfix "..."), ("_constify", typ "num => num_const", Delimfix "_"), - ("_constify", typ "float => float_const", Delimfix "_"), + ("_constify", typ "float_token => float_const", Delimfix "_"), ("_indexnum", typ "num_const => index", Delimfix "\\<^sub>_"), ("_index", typ "logic => index", Delimfix "(00\\<^bsub>_\\<^esub>)"), ("_indexdefault", typ "index", Delimfix ""), diff -r 8f84a608883d -r ea97aa6aeba2 src/Pure/theory.ML --- a/src/Pure/theory.ML Tue Dec 30 08:18:54 2008 +0100 +++ b/src/Pure/theory.ML Tue Dec 30 11:10:01 2008 +0100 @@ -68,7 +68,7 @@ val copy = Context.copy_thy; fun requires thy name what = - if Context.exists_name name thy then () + if exists (fn thy' => Context.theory_name thy' = name) (thy :: ancestors_of thy) then () else error ("Require theory " ^ quote name ^ " as an ancestor for " ^ what); diff -r 8f84a608883d -r ea97aa6aeba2 src/Tools/code/code_haskell.ML --- a/src/Tools/code/code_haskell.ML Tue Dec 30 08:18:54 2008 +0100 +++ b/src/Tools/code/code_haskell.ML Tue Dec 30 11:10:01 2008 +0100 @@ -414,7 +414,10 @@ o NameSpace.explode) modlname; val pathname = Path.append destination filename; val _ = File.mkdir (Path.dir pathname); - in File.write pathname (Code_Target.code_of_pretty content) end + in File.write pathname + ("{-# OPTIONS_GHC -fglasgow-exts #-}\n\n" + ^ Code_Target.code_of_pretty content) + end in Code_Target.mk_serialization target NONE (fn NONE => K () o map (Code_Target.code_writeln o snd) | SOME file => K () o map (write_module file)) diff -r 8f84a608883d -r ea97aa6aeba2 src/Tools/code/code_ml.ML --- a/src/Tools/code/code_ml.ML Tue Dec 30 08:18:54 2008 +0100 +++ b/src/Tools/code/code_ml.ML Tue Dec 30 11:10:01 2008 +0100 @@ -1,5 +1,4 @@ (* Title: Tools/code/code_ml.ML - ID: $Id$ Author: Florian Haftmann, TU Muenchen Serializer for SML and OCaml. @@ -25,17 +24,21 @@ val target_OCaml = "OCaml"; datatype ml_stmt = - MLFuns of (string * (typscheme * ((iterm list * iterm) * (thm * bool)) list)) list + MLExc of string * int + | MLVal of string * ((typscheme * iterm) * (thm * bool)) + | MLFuns of (string * (typscheme * ((iterm list * iterm) * (thm * bool)) list)) list * string list | MLDatas of (string * ((vname * sort) list * (string * itype list) list)) list | MLClass of string * (vname * ((class * string) list * (string * itype) list)) | MLClassinst of string * ((class * (string * (vname * sort) list)) * ((class * (string * (string * dict list list))) list * ((string * const) * (thm * bool)) list)); -fun stmt_names_of (MLFuns fs) = map fst fs +fun stmt_names_of (MLExc (name, _)) = [name] + | stmt_names_of (MLVal (name, _)) = [name] + | stmt_names_of (MLFuns (fs, _)) = map fst fs | stmt_names_of (MLDatas ds) = map fst ds - | stmt_names_of (MLClass (c, _)) = [c] - | stmt_names_of (MLClassinst (i, _)) = [i]; + | stmt_names_of (MLClass (name, _)) = [name] + | stmt_names_of (MLClassinst (name, _)) = [name]; (** SML serailizer **) @@ -81,144 +84,159 @@ of NONE => pr_tycoexpr fxy (tyco, tys) | SOME (i, pr) => pr pr_typ fxy tys) | pr_typ fxy (ITyVar v) = str ("'" ^ v); - fun pr_term thm vars fxy (IConst c) = - pr_app thm vars fxy (c, []) - | pr_term thm vars fxy (IVar v) = + fun pr_term is_closure thm vars fxy (IConst c) = + pr_app is_closure thm vars fxy (c, []) + | pr_term is_closure thm vars fxy (IVar v) = str (Code_Name.lookup_var vars v) - | pr_term thm vars fxy (t as t1 `$ t2) = + | pr_term is_closure thm vars fxy (t as t1 `$ t2) = (case Code_Thingol.unfold_const_app t - of SOME c_ts => pr_app thm vars fxy c_ts - | NONE => - brackify fxy [pr_term thm vars NOBR t1, pr_term thm vars BR t2]) - | pr_term thm vars fxy (t as _ `|-> _) = + of SOME c_ts => pr_app is_closure thm vars fxy c_ts + | NONE => brackify fxy + [pr_term is_closure thm vars NOBR t1, pr_term is_closure thm vars BR t2]) + | pr_term is_closure thm vars fxy (t as _ `|-> _) = let val (binds, t') = Code_Thingol.unfold_abs t; fun pr ((v, pat), ty) = - pr_bind thm NOBR ((SOME v, pat), ty) + pr_bind is_closure thm NOBR ((SOME v, pat), ty) #>> (fn p => concat [str "fn", p, str "=>"]); val (ps, vars') = fold_map pr binds vars; - in brackets (ps @ [pr_term thm vars' NOBR t']) end - | pr_term thm vars fxy (ICase (cases as (_, t0))) = + in brackets (ps @ [pr_term is_closure thm vars' NOBR t']) end + | pr_term is_closure thm vars fxy (ICase (cases as (_, t0))) = (case Code_Thingol.unfold_const_app t0 of SOME (c_ts as ((c, _), _)) => if is_none (syntax_const c) - then pr_case thm vars fxy cases - else pr_app thm vars fxy c_ts - | NONE => pr_case thm vars fxy cases) - and pr_app' thm vars (app as ((c, (iss, tys)), ts)) = - if is_cons c then let - val k = length tys - in if k < 2 then - (str o deresolve) c :: map (pr_term thm vars BR) ts - else if k = length ts then - [(str o deresolve) c, Pretty.enum "," "(" ")" (map (pr_term thm vars NOBR) ts)] - else [pr_term thm vars BR (Code_Thingol.eta_expand k app)] end else + then pr_case is_closure thm vars fxy cases + else pr_app is_closure thm vars fxy c_ts + | NONE => pr_case is_closure thm vars fxy cases) + and pr_app' is_closure thm vars (app as ((c, (iss, tys)), ts)) = + if is_cons c then + let + val k = length tys + in if k < 2 then + (str o deresolve) c :: map (pr_term is_closure thm vars BR) ts + else if k = length ts then + [(str o deresolve) c, Pretty.enum "," "(" ")" (map (pr_term is_closure thm vars NOBR) ts)] + else [pr_term is_closure thm vars BR (Code_Thingol.eta_expand k app)] end + else if is_closure c + then (str o deresolve) c @@ str "()" + else (str o deresolve) c - :: (map (pr_dicts BR) o filter_out null) iss @ map (pr_term thm vars BR) ts - and pr_app thm vars = gen_pr_app pr_app' pr_term syntax_const naming thm vars + :: (map (pr_dicts BR) o filter_out null) iss @ map (pr_term is_closure thm vars BR) ts + and pr_app is_closure thm vars = gen_pr_app (pr_app' is_closure) (pr_term is_closure) + syntax_const naming thm vars and pr_bind' ((NONE, NONE), _) = str "_" | pr_bind' ((SOME v, NONE), _) = str v | pr_bind' ((NONE, SOME p), _) = p | pr_bind' ((SOME v, SOME p), _) = concat [str v, str "as", p] - and pr_bind thm = gen_pr_bind pr_bind' pr_term thm - and pr_case thm vars fxy (cases as ((_, [_]), _)) = + and pr_bind is_closure = gen_pr_bind pr_bind' (pr_term is_closure) + and pr_case is_closure thm vars fxy (cases as ((_, [_]), _)) = let val (binds, t') = Code_Thingol.unfold_let (ICase cases); fun pr ((pat, ty), t) vars = vars - |> pr_bind thm NOBR ((NONE, SOME pat), ty) - |>> (fn p => semicolon [str "val", p, str "=", pr_term thm vars NOBR t]) + |> pr_bind is_closure thm NOBR ((NONE, SOME pat), ty) + |>> (fn p => semicolon [str "val", p, str "=", pr_term is_closure thm vars NOBR t]) val (ps, vars') = fold_map pr binds vars; in Pretty.chunks [ [str ("let"), Pretty.fbrk, Pretty.chunks ps] |> Pretty.block, - [str ("in"), Pretty.fbrk, pr_term thm vars' NOBR t'] |> Pretty.block, + [str ("in"), Pretty.fbrk, pr_term is_closure thm vars' NOBR t'] |> Pretty.block, str ("end") ] end - | pr_case thm vars fxy (((td, ty), b::bs), _) = + | pr_case is_closure thm vars fxy (((td, ty), b::bs), _) = let fun pr delim (pat, t) = let - val (p, vars') = pr_bind thm NOBR ((NONE, SOME pat), ty) vars; + val (p, vars') = pr_bind is_closure thm NOBR ((NONE, SOME pat), ty) vars; in - concat [str delim, p, str "=>", pr_term thm vars' NOBR t] + concat [str delim, p, str "=>", pr_term is_closure thm vars' NOBR t] end; in (Pretty.enclose "(" ")" o single o brackify fxy) ( str "case" - :: pr_term thm vars NOBR td + :: pr_term is_closure thm vars NOBR td :: pr "of" b :: map (pr "|") bs ) end - | pr_case thm vars fxy ((_, []), _) = str "raise Fail \"empty case\""; - fun pr_stmt (MLFuns (funns as (funn :: funns'))) = + | pr_case is_closure thm vars fxy ((_, []), _) = str "raise Fail \"empty case\""; + fun pr_stmt (MLExc (name, n)) = let - val definer = + val exc_str = + (ML_Syntax.print_string o NameSpace.base o NameSpace.qualifier) name; + in + concat ( + str (if n = 0 then "val" else "fun") + :: (str o deresolve) name + :: map str (replicate n "_") + @ str "=" + :: str "raise" + :: str "(Fail" + @@ str (exc_str ^ ")") + ) + end + | pr_stmt (MLVal (name, (((vs, ty), t), (thm, _)))) = + let + val consts = map_filter + (fn c => if (is_some o syntax_const) c + then NONE else (SOME o NameSpace.base o deresolve) c) + (Code_Thingol.fold_constnames (insert (op =)) t []); + val vars = reserved_names + |> Code_Name.intro_vars consts; + in + concat [ + str "val", + (str o deresolve) name, + str ":", + pr_typ NOBR ty, + str "=", + pr_term (K false) thm vars NOBR t + ] + end + | pr_stmt (MLFuns (funn :: funns, pseudo_funs)) = + let + fun pr_funn definer (name, ((vs, ty), eqs as eq :: eqs')) = let - fun no_args _ (((ts, _), _) :: _) = length ts - | no_args ty [] = (length o fst o Code_Thingol.unfold_fun) ty; - fun mk 0 [] = "val" - | mk 0 vs = if (null o filter_out (null o snd)) vs - then "val" else "fun" - | mk k _ = "fun"; - fun chk (_, ((vs, ty), eqs)) NONE = SOME (mk (no_args ty eqs) vs) - | chk (_, ((vs, ty), eqs)) (SOME defi) = - if defi = mk (no_args ty eqs) vs then SOME defi - else error ("Mixing simultaneous vals and funs not implemented: " - ^ commas (map (labelled_name o fst) funns)); - in the (fold chk funns NONE) end; - fun pr_funn definer (name, ((vs, ty), [])) = + val vs_dict = filter_out (null o snd) vs; + val shift = if null eqs' then I else + map (Pretty.block o single o Pretty.block o single); + fun pr_eq definer ((ts, t), (thm, _)) = let - val vs_dict = filter_out (null o snd) vs; - val n = length vs_dict + (length o fst o Code_Thingol.unfold_fun) ty; - val exc_str = - (ML_Syntax.print_string o NameSpace.base o NameSpace.qualifier) name; + val consts = map_filter + (fn c => if (is_some o syntax_const) c + then NONE else (SOME o NameSpace.base o deresolve) c) + ((fold o Code_Thingol.fold_constnames) (insert (op =)) (t :: ts) []); + val vars = reserved_names + |> Code_Name.intro_vars consts + |> Code_Name.intro_vars ((fold o Code_Thingol.fold_unbound_varnames) + (insert (op =)) ts []); in concat ( str definer :: (str o deresolve) name - :: map str (replicate n "_") + :: (if member (op =) pseudo_funs name then [str "()"] + else pr_tyvar_dicts vs_dict + @ map (pr_term (member (op =) pseudo_funs) thm vars BR) ts) @ str "=" - :: str "raise" - :: str "(Fail" - @@ str (exc_str ^ ")") + @@ pr_term (member (op =) pseudo_funs) thm vars NOBR t ) end - | pr_funn definer (name, ((vs, ty), eqs as eq :: eqs')) = - let - val vs_dict = filter_out (null o snd) vs; - val shift = if null eqs' then I else - map (Pretty.block o single o Pretty.block o single); - fun pr_eq definer ((ts, t), (thm, _)) = - let - val consts = map_filter - (fn c => if (is_some o syntax_const) c - then NONE else (SOME o NameSpace.base o deresolve) c) - ((fold o Code_Thingol.fold_constnames) (insert (op =)) (t :: ts) []); - val vars = reserved_names - |> Code_Name.intro_vars consts - |> Code_Name.intro_vars ((fold o Code_Thingol.fold_unbound_varnames) - (insert (op =)) ts []); - in - concat ( - [str definer, (str o deresolve) name] - @ (if null ts andalso null vs_dict - then [str ":", pr_typ NOBR ty] - else - pr_tyvar_dicts vs_dict - @ map (pr_term thm vars BR) ts) - @ [str "=", pr_term thm vars NOBR t] - ) - end - in - (Pretty.block o Pretty.fbreaks o shift) ( - pr_eq definer eq - :: map (pr_eq "|") eqs' - ) - end; - val (ps, p) = split_last (pr_funn definer funn :: map (pr_funn "and") funns'); - in Pretty.chunks (ps @ [Pretty.block ([p, str ";"])]) end + in + (Pretty.block o Pretty.fbreaks o shift) ( + pr_eq definer eq + :: map (pr_eq "|") eqs' + ) + end; + fun pr_pseudo_fun name = concat [ + str "val", + (str o deresolve) name, + str "=", + (str o deresolve) name, + str "();" + ]; + val (ps, p) = split_last (pr_funn "fun" funn :: map (pr_funn "and") funns); + val pseudo_ps = map pr_pseudo_fun pseudo_funs; + in Pretty.chunks (ps @ Pretty.block ([p, str ";"]) :: pseudo_ps) end | pr_stmt (MLDatas (datas as (data :: datas'))) = let fun pr_co (co, []) = @@ -245,7 +263,7 @@ ); val (ps, p) = split_last (pr_data "datatype" data :: map (pr_data "and") datas'); - in Pretty.chunks (ps @ [Pretty.block ([p, str ";"])]) end + in Pretty.chunks (ps @| Pretty.block ([p, str ";"])) end | pr_stmt (MLClass (class, (v, (superclasses, classparams)))) = let val w = Code_Name.first_upper v ^ "_"; @@ -301,7 +319,7 @@ concat [ (str o pr_label_classparam) classparam, str "=", - pr_app thm reserved_names NOBR (c_inst, []) + pr_app (K false) thm reserved_names NOBR (c_inst, []) ]; in semicolon ([ @@ -374,68 +392,71 @@ of NONE => pr_tycoexpr fxy (tyco, tys) | SOME (i, pr) => pr pr_typ fxy tys) | pr_typ fxy (ITyVar v) = str ("'" ^ v); - fun pr_term thm vars fxy (IConst c) = - pr_app thm vars fxy (c, []) - | pr_term thm vars fxy (IVar v) = + fun pr_term is_closure thm vars fxy (IConst c) = + pr_app is_closure thm vars fxy (c, []) + | pr_term is_closure thm vars fxy (IVar v) = str (Code_Name.lookup_var vars v) - | pr_term thm vars fxy (t as t1 `$ t2) = + | pr_term is_closure thm vars fxy (t as t1 `$ t2) = (case Code_Thingol.unfold_const_app t - of SOME c_ts => pr_app thm vars fxy c_ts + of SOME c_ts => pr_app is_closure thm vars fxy c_ts | NONE => - brackify fxy [pr_term thm vars NOBR t1, pr_term thm vars BR t2]) - | pr_term thm vars fxy (t as _ `|-> _) = + brackify fxy [pr_term is_closure thm vars NOBR t1, pr_term is_closure thm vars BR t2]) + | pr_term is_closure thm vars fxy (t as _ `|-> _) = let val (binds, t') = Code_Thingol.unfold_abs t; - fun pr ((v, pat), ty) = pr_bind thm BR ((SOME v, pat), ty); + fun pr ((v, pat), ty) = pr_bind is_closure thm BR ((SOME v, pat), ty); val (ps, vars') = fold_map pr binds vars; - in brackets (str "fun" :: ps @ str "->" @@ pr_term thm vars' NOBR t') end - | pr_term thm vars fxy (ICase (cases as (_, t0))) = (case Code_Thingol.unfold_const_app t0 + in brackets (str "fun" :: ps @ str "->" @@ pr_term is_closure thm vars' NOBR t') end + | pr_term is_closure thm vars fxy (ICase (cases as (_, t0))) = (case Code_Thingol.unfold_const_app t0 of SOME (c_ts as ((c, _), _)) => if is_none (syntax_const c) - then pr_case thm vars fxy cases - else pr_app thm vars fxy c_ts - | NONE => pr_case thm vars fxy cases) - and pr_app' thm vars (app as ((c, (iss, tys)), ts)) = + then pr_case is_closure thm vars fxy cases + else pr_app is_closure thm vars fxy c_ts + | NONE => pr_case is_closure thm vars fxy cases) + and pr_app' is_closure thm vars (app as ((c, (iss, tys)), ts)) = if is_cons c then if length tys = length ts then case ts of [] => [(str o deresolve) c] - | [t] => [(str o deresolve) c, pr_term thm vars BR t] + | [t] => [(str o deresolve) c, pr_term is_closure thm vars BR t] | _ => [(str o deresolve) c, Pretty.enum "," "(" ")" - (map (pr_term thm vars NOBR) ts)] - else [pr_term thm vars BR (Code_Thingol.eta_expand (length tys) app)] + (map (pr_term is_closure thm vars NOBR) ts)] + else [pr_term is_closure thm vars BR (Code_Thingol.eta_expand (length tys) app)] + else if is_closure c + then (str o deresolve) c @@ str "()" else (str o deresolve) c - :: ((map (pr_dicts BR) o filter_out null) iss @ map (pr_term thm vars BR) ts) - and pr_app thm vars = gen_pr_app pr_app' pr_term syntax_const naming thm vars + :: ((map (pr_dicts BR) o filter_out null) iss @ map (pr_term is_closure thm vars BR) ts) + and pr_app is_closure = gen_pr_app (pr_app' is_closure) (pr_term is_closure) + syntax_const naming and pr_bind' ((NONE, NONE), _) = str "_" | pr_bind' ((SOME v, NONE), _) = str v | pr_bind' ((NONE, SOME p), _) = p | pr_bind' ((SOME v, SOME p), _) = brackets [p, str "as", str v] - and pr_bind thm = gen_pr_bind pr_bind' pr_term thm - and pr_case thm vars fxy (cases as ((_, [_]), _)) = + and pr_bind is_closure = gen_pr_bind pr_bind' (pr_term is_closure) + and pr_case is_closure thm vars fxy (cases as ((_, [_]), _)) = let val (binds, t') = Code_Thingol.unfold_let (ICase cases); fun pr ((pat, ty), t) vars = vars - |> pr_bind thm NOBR ((NONE, SOME pat), ty) + |> pr_bind is_closure thm NOBR ((NONE, SOME pat), ty) |>> (fn p => concat - [str "let", p, str "=", pr_term thm vars NOBR t, str "in"]) + [str "let", p, str "=", pr_term is_closure thm vars NOBR t, str "in"]) val (ps, vars') = fold_map pr binds vars; - in Pretty.chunks (ps @| pr_term thm vars' NOBR t') end - | pr_case thm vars fxy (((td, ty), b::bs), _) = + in Pretty.chunks (ps @| pr_term is_closure thm vars' NOBR t') end + | pr_case is_closure thm vars fxy (((td, ty), b::bs), _) = let fun pr delim (pat, t) = let - val (p, vars') = pr_bind thm NOBR ((NONE, SOME pat), ty) vars; - in concat [str delim, p, str "->", pr_term thm vars' NOBR t] end; + val (p, vars') = pr_bind is_closure thm NOBR ((NONE, SOME pat), ty) vars; + in concat [str delim, p, str "->", pr_term is_closure thm vars' NOBR t] end; in (Pretty.enclose "(" ")" o single o brackify fxy) ( str "match" - :: pr_term thm vars NOBR td + :: pr_term is_closure thm vars NOBR td :: pr "with" b :: map (pr "|") bs ) end - | pr_case thm vars fxy ((_, []), _) = str "failwith \"empty case\""; + | pr_case is_closure thm vars fxy ((_, []), _) = str "failwith \"empty case\""; fun fish_params vars eqs = let fun fish_param _ (w as SOME _) = w @@ -449,7 +470,39 @@ val (fished3, _) = Name.variants fished2 Name.context; val vars' = Code_Name.intro_vars fished3 vars; in map (Code_Name.lookup_var vars') fished3 end; - fun pr_stmt (MLFuns (funns as funn :: funns')) = + fun pr_stmt (MLExc (name, n)) = + let + val exc_str = + (ML_Syntax.print_string o NameSpace.base o NameSpace.qualifier) name; + in + concat ( + str "let" + :: (str o deresolve) name + :: map str (replicate n "_") + @ str "=" + :: str "failwith" + @@ str exc_str + ) + end + | pr_stmt (MLVal (name, (((vs, ty), t), (thm, _)))) = + let + val consts = map_filter + (fn c => if (is_some o syntax_const) c + then NONE else (SOME o NameSpace.base o deresolve) c) + (Code_Thingol.fold_constnames (insert (op =)) t []); + val vars = reserved_names + |> Code_Name.intro_vars consts; + in + concat [ + str "let", + (str o deresolve) name, + str ":", + pr_typ NOBR ty, + str "=", + pr_term (K false) thm vars NOBR t + ] + end + | pr_stmt (MLFuns (funn :: funns, pseudo_funs)) = let fun pr_eq ((ts, t), (thm, _)) = let @@ -462,24 +515,12 @@ |> Code_Name.intro_vars ((fold o Code_Thingol.fold_unbound_varnames) (insert (op =)) ts []); in concat [ - (Pretty.block o Pretty.commas) (map (pr_term thm vars NOBR) ts), + (Pretty.block o Pretty.commas) + (map (pr_term (member (op =) pseudo_funs) thm vars NOBR) ts), str "->", - pr_term thm vars NOBR t + pr_term (member (op =) pseudo_funs) thm vars NOBR t ] end; - fun pr_eqs name ty [] = - let - val n = (length o fst o Code_Thingol.unfold_fun) ty; - val exc_str = - (ML_Syntax.print_string o NameSpace.base o NameSpace.qualifier) name; - in - concat ( - map str (replicate n "_") - @ str "=" - :: str "failwith" - @@ str exc_str - ) - end - | pr_eqs _ _ [((ts, t), (thm, _))] = + fun pr_eqs is_pseudo [((ts, t), (thm, _))] = let val consts = map_filter (fn c => if (is_some o syntax_const) c @@ -491,12 +532,13 @@ (insert (op =)) ts []); in concat ( - map (pr_term thm vars BR) ts + (if is_pseudo then [str "()"] + else map (pr_term (member (op =) pseudo_funs) thm vars BR) ts) @ str "=" - @@ pr_term thm vars NOBR t + @@ pr_term (member (op =) pseudo_funs) thm vars NOBR t ) end - | pr_eqs _ _ (eqs as (eq as (([_], _), _)) :: eqs') = + | pr_eqs _ (eqs as (eq as (([_], _), _)) :: eqs') = Pretty.block ( str "=" :: Pretty.brk 1 @@ -506,7 +548,7 @@ :: maps (append [Pretty.fbrk, str "|", Pretty.brk 1] o single o pr_eq) eqs' ) - | pr_eqs _ _ (eqs as eq :: eqs') = + | pr_eqs _ (eqs as eq :: eqs') = let val consts = map_filter (fn c => if (is_some o syntax_const) c @@ -538,11 +580,20 @@ str definer :: (str o deresolve) name :: pr_tyvar_dicts (filter_out (null o snd) vs) - @| pr_eqs name ty eqs + @| pr_eqs (member (op =) pseudo_funs name) eqs ); + fun pr_pseudo_fun name = concat [ + str "let", + (str o deresolve) name, + str "=", + (str o deresolve) name, + str "();;" + ]; + val (ps, p) = split_last (pr_funn "fun" funn :: map (pr_funn "and") funns); val (ps, p) = split_last - (pr_funn "let rec" funn :: map (pr_funn "and") funns'); - in Pretty.chunks (ps @ [Pretty.block ([p, str ";;"])]) end + (pr_funn "let rec" funn :: map (pr_funn "and") funns); + val pseudo_ps = map pr_pseudo_fun pseudo_funs; + in Pretty.chunks (ps @ Pretty.block ([p, str ";;"]) :: pseudo_ps) end | pr_stmt (MLDatas (datas as (data :: datas'))) = let fun pr_co (co, []) = @@ -569,7 +620,7 @@ ); val (ps, p) = split_last (pr_data "type" data :: map (pr_data "and") datas'); - in Pretty.chunks (ps @ [Pretty.block ([p, str ";;"])]) end + in Pretty.chunks (ps @| Pretty.block ([p, str ";;"])) end | pr_stmt (MLClass (class, (v, (superclasses, classparams)))) = let val w = "_" ^ Code_Name.first_upper v; @@ -613,7 +664,7 @@ concat [ (str o deresolve) classparam, str "=", - pr_app thm reserved_names NOBR (c_inst, []) + pr_app (K false) thm reserved_names NOBR (c_inst, []) ]; in concat ( @@ -721,15 +772,33 @@ val base' = if upper then Code_Name.first_upper base else base; val ([base''], nsp') = Name.variants [base'] nsp; in (base'', nsp') end; - fun add_funs stmts = - fold_map + fun rearrange_fun name (tysm as (vs, ty), raw_eqs) = + let + val eqs = filter (snd o snd) raw_eqs; + val (eqs', is_value) = if null (filter_out (null o snd) vs) then case eqs + of [(([], t), thm)] => if (not o null o fst o Code_Thingol.unfold_fun) ty + then ([(([IVar "x"], t `$ IVar "x"), thm)], false) + else (eqs, not (Code_Thingol.fold_constnames + (fn name' => fn b => b orelse name = name') t false)) + | _ => (eqs, false) + else (eqs, false) + in ((name, (tysm, eqs')), is_value) end; + fun check_kind [((name, (tysm, [(([], t), thm)])), true)] = MLVal (name, ((tysm, t), thm)) + | check_kind [((name, ((vs, ty), [])), _)] = + MLExc (name, (length o filter_out (null o snd)) vs + (length o fst o Code_Thingol.unfold_fun) ty) + | check_kind funns = + MLFuns (map fst funns, map_filter + (fn ((name, ((vs, _), [(([], _), _)])), _) => + if null (filter_out (null o snd) vs) then SOME name else NONE + | _ => NONE) funns); + fun add_funs stmts = fold_map (fn (name, Code_Thingol.Fun (_, stmt)) => - map_nsp_fun_yield (mk_name_stmt false name) #>> - rpair (name, stmt |> apsnd (filter (snd o snd))) + map_nsp_fun_yield (mk_name_stmt false name) + #>> rpair (rearrange_fun name stmt) | (name, _) => error ("Function block containing illegal statement: " ^ labelled_name name) ) stmts - #>> (split_list #> apsnd MLFuns); + #>> (split_list #> apsnd check_kind); fun add_datatypes stmts = fold_map (fn (name, Code_Thingol.Datatype (_, stmt)) =>