# HG changeset patch # User wenzelm # Date 1386262683 -3600 # Node ID d64a4ef26edbaeac0ea1d11aecd342382664f7c8 # Parent cfb21e03fe2a2686d74a6a442503ce0dba6f88e5# Parent 30666a281ae3b317dce4c0b70fb3772da863fedd merged, resolving obvious conflicts in NEWS and src/Pure/System/isabelle_process.ML; diff -r cfb21e03fe2a -r d64a4ef26edb .hgignore --- a/.hgignore Thu Dec 05 17:52:12 2013 +0100 +++ b/.hgignore Thu Dec 05 17:58:03 2013 +0100 @@ -5,7 +5,9 @@ *.jar *.orig *.rej +*.pyc .DS_Store +.swp syntax: regexp diff -r cfb21e03fe2a -r d64a4ef26edb Admin/MacOS/App1/README --- a/Admin/MacOS/App1/README Thu Dec 05 17:52:12 2013 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,14 +0,0 @@ -Isabelle application bundle for MacOS -===================================== - -Requirements: - -* CocoaDialog 2.1.1 http://cocoadialog.sourceforge.net/ - -* Platypus 4.7 http://www.sveinbjorn.org/platypus - Preferences: Install command line tool - -* final packaging: - - hdiutil create -srcfolder DIR DMG - diff -r cfb21e03fe2a -r d64a4ef26edb Admin/MacOS/App1/build --- a/Admin/MacOS/App1/build Thu Dec 05 17:52:12 2013 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,23 +0,0 @@ -#!/usr/bin/env bash -# -# Make Isabelle application bundle - -THIS="$(cd "$(dirname "$0")"; pwd)" - -COCOADIALOG_APP="/Applications/CocoaDialog.app" - -/usr/local/bin/platypus \ - -a Isabelle -u Isabelle \ - -I "de.tum.in.isabelle" \ - -i "$THIS/../isabelle.icns" \ - -D -X thy \ - -Q "$THIS/../theory.icns" \ - -p /bin/bash \ - -R \ - -o None \ - -f "$COCOADIALOG_APP" \ - "$THIS/script" \ - "$PWD/Isabelle.app" - -rm -f Contents/Resources/Isabelle -ln -s Contents/Resources/Isabelle Isabelle.app/Isabelle \ No newline at end of file diff -r cfb21e03fe2a -r d64a4ef26edb Admin/MacOS/App1/script --- a/Admin/MacOS/App1/script Thu Dec 05 17:52:12 2013 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,82 +0,0 @@ -#!/usr/bin/env bash -# -# Author: Makarius -# -# Isabelle application wrapper - -THIS="$(cd "$(dirname "$0")"; pwd)" -THIS_APP="$(cd "$THIS/../.."; pwd)" -SUPER_APP="$(cd "$THIS/../../.."; pwd)" - - -# global defaults - -ISABELLE_TOOL="$THIS/Isabelle/bin/isabelle" -PROOFGENERAL_EMACS="$THIS/Aquamacs.app/Contents/MacOS/Aquamacs" - - -# environment - -cd "$HOME" -if [ -x /usr/libexec/path_helper ]; then - eval $(/usr/libexec/path_helper -s) -fi - -[ -z "$LANG" ] && export LANG=en_US.UTF-8 - - -# run interface with error feedback - -ISABELLE_INTERFACE_CHOICE="$("$ISABELLE_TOOL" getenv -b ISABELLE_INTERFACE_CHOICE)" -if [ "$ISABELLE_INTERFACE_CHOICE" != emacs -a "$ISABELLE_INTERFACE_CHOICE" != jedit ] -then - declare -a CHOICE - CHOICE=($("$THIS/CocoaDialog.app/Contents/MacOS/CocoaDialog" dropdown \ - --title Isabelle \ - --text "Which Isabelle interface?" \ - --items "Isabelle/jEdit PIDE" "Emacs / Proof General" \ - --button2 "OK, do not ask again" --button1 "OK")) - if [ "${CHOICE[1]}" = 0 ]; then - ISABELLE_INTERFACE_CHOICE=jedit - else - ISABELLE_INTERFACE_CHOICE=emacs - fi - if [ "${CHOICE[0]}" = 2 ]; then - ISABELLE_HOME_USER="$("$ISABELLE_TOOL" getenv -b ISABELLE_HOME_USER)" - mkdir -p "$ISABELLE_HOME_USER/etc" - ( echo; echo "ISABELLE_INTERFACE_CHOICE=$ISABELLE_INTERFACE_CHOICE"; ) \ - >> "$ISABELLE_HOME_USER/etc/settings" - "$THIS/CocoaDialog.app/Contents/MacOS/CocoaDialog" ok-msgbox \ - --title Isabelle \ - --text Note \ - --informative-text "ISABELLE_INTERFACE_CHOICE stored in $ISABELLE_HOME_USER/etc/settings" \ - --no-cancel - fi -fi - -OUTPUT="/tmp/isabelle$$.out" - -if [ "$ISABELLE_INTERFACE_CHOICE" = emacs ]; then - ( "$ISABELLE_TOOL" emacs -p "$PROOFGENERAL_EMACS" "$@" ) > "$OUTPUT" 2>&1 - RC=$? -else - ( "$ISABELLE_TOOL" jedit -s "$@" ) > "$OUTPUT" 2>&1 - RC=$? -fi - -if [ "$RC" != 0 ]; then - echo >> "$OUTPUT" - echo "Return code: $RC" >> "$OUTPUT" -fi - -if [ $(stat -f "%z" "$OUTPUT") != 0 ]; then - "$THIS/CocoaDialog.app/Contents/MacOS/CocoaDialog" textbox \ - --title "Isabelle" \ - --informative-text "Isabelle output" \ - --text-from-file "$OUTPUT" \ - --button1 "OK" -fi - -rm -f "$OUTPUT" - -exit "$RC" diff -r cfb21e03fe2a -r d64a4ef26edb Admin/MacOS/App2/Isabelle.app/Contents/Info.plist --- a/Admin/MacOS/App2/Isabelle.app/Contents/Info.plist Thu Dec 05 17:52:12 2013 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,48 +0,0 @@ - - - - - CFBundleDevelopmentRegion - English - CFBundleExecutable - Isabelle - CFBundleGetInfoString - Isabelle - CFBundleIconFile - isabelle.icns - CFBundleIdentifier - de.tum.in.isabelle - CFBundleInfoDictionaryVersion - 6.0 - CFBundleName - Isabelle - CFBundlePackageType - APPL - CFBundleShortVersionString - ???? - CFBundleSignature - ???? - CFBundleVersion - ???? - Java - - JVMVersion - 1.6 - VMOptions - -Xms128m -Xmx512m -Xss2m - ClassPath - $JAVAROOT/isabelle-scala.jar - MainClass - isabelle.GUI_Setup - Properties - - isabelle.home - $APP_PACKAGE/Contents/Resources/Isabelle - apple.laf.useScreenMenuBar - true - com.apple.mrj.application.apple.menu.about.name - Isabelle - - - - diff -r cfb21e03fe2a -r d64a4ef26edb Admin/MacOS/App2/Isabelle.app/Contents/MacOS/Isabelle --- a/Admin/MacOS/App2/Isabelle.app/Contents/MacOS/Isabelle Thu Dec 05 17:52:12 2013 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,1 +0,0 @@ -/System/Library/Frameworks/JavaVM.framework/Resources/MacOS/JavaApplicationStub \ No newline at end of file diff -r cfb21e03fe2a -r d64a4ef26edb Admin/MacOS/App2/README --- a/Admin/MacOS/App2/README Thu Dec 05 17:52:12 2013 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,7 +0,0 @@ -Isabelle/JVM application bundle for MacOS -========================================= - -* http://developer.apple.com/documentation/Java/Conceptual/Java14Development/03-JavaDeployment/JavaDeployment.html - -* http://developer.apple.com/documentation/Java/Reference/Java_InfoplistRef/Articles/JavaDictionaryInfo.plistKeys.html#//apple_ref/doc/uid/TP40001969 - diff -r cfb21e03fe2a -r d64a4ef26edb Admin/MacOS/App2/mk --- a/Admin/MacOS/App2/mk Thu Dec 05 17:52:12 2013 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,12 +0,0 @@ -#!/usr/bin/env bash -# -# Make Isabelle/JVM application bundle - -THIS="$(cd "$(dirname "$0")"; pwd)" - -APP="$THIS/Isabelle.app" - -mkdir -p "$APP/Contents/Resources/Java" -cp "$THIS/../../../lib/classes/isabelle-scala.jar" "$APP/Contents/Resources/Java" -cp "$THIS/../isabelle.icns" "$APP/Contents/Resources" - diff -r cfb21e03fe2a -r d64a4ef26edb Admin/MacOS/App3/Info.plist-part1 --- a/Admin/MacOS/App3/Info.plist-part1 Thu Dec 05 17:52:12 2013 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,36 +0,0 @@ - - - - -CFBundleDevelopmentRegion -English -CFBundleExecutable -JavaAppLauncher -CFBundleIconFile -isabelle.icns -CFBundleIdentifier -de.tum.in.isabelle -CFBundleDisplayName -{ISABELLE_NAME} -CFBundleInfoDictionaryVersion -6.0 -CFBundleName -{ISABELLE_NAME} -CFBundlePackageType -APPL -CFBundleShortVersionString -1.0 -CFBundleSignature -???? -CFBundleVersion -1 -NSHumanReadableCopyright - -LSApplicationCategoryType -public.app-category.developer-tools -JVMRuntime -jdk -JVMMainClassName -isabelle.Main -JVMOptions - diff -r cfb21e03fe2a -r d64a4ef26edb Admin/MacOS/App3/Info.plist-part2 --- a/Admin/MacOS/App3/Info.plist-part2 Thu Dec 05 17:52:12 2013 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,7 +0,0 @@ --Disabelle.home=$APP_ROOT/Contents/Resources/{ISABELLE_NAME} - -JVMArguments - - - - diff -r cfb21e03fe2a -r d64a4ef26edb Admin/MacOS/App3/README --- a/Admin/MacOS/App3/README Thu Dec 05 17:52:12 2013 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,8 +0,0 @@ -Isabelle/JVM application bundle for Mac OS X -============================================ - -* http://java.net/projects/appbundler - - see appbundler-1.0.jar - see com/oracle/appbundler/JavaAppLauncher - diff -r cfb21e03fe2a -r d64a4ef26edb Admin/MacOS/App3/Resources/en.lproj/Localizable.strings --- a/Admin/MacOS/App3/Resources/en.lproj/Localizable.strings Thu Dec 05 17:52:12 2013 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,3 +0,0 @@ -"JRELoadError" = "Unable to load Java Runtime Environment."; -"MainClassNameRequired" = "Main class name is required."; -"JavaDirectoryNotFound" = "Unable to enumerate Java directory contents."; diff -r cfb21e03fe2a -r d64a4ef26edb Admin/MacOS/Info.plist-part1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/Admin/MacOS/Info.plist-part1 Thu Dec 05 17:58:03 2013 +0100 @@ -0,0 +1,36 @@ + + + + +CFBundleDevelopmentRegion +English +CFBundleExecutable +JavaAppLauncher +CFBundleIconFile +isabelle.icns +CFBundleIdentifier +de.tum.in.isabelle +CFBundleDisplayName +{ISABELLE_NAME} +CFBundleInfoDictionaryVersion +6.0 +CFBundleName +{ISABELLE_NAME} +CFBundlePackageType +APPL +CFBundleShortVersionString +1.0 +CFBundleSignature +???? +CFBundleVersion +1 +NSHumanReadableCopyright + +LSApplicationCategoryType +public.app-category.developer-tools +JVMRuntime +jdk +JVMMainClassName +isabelle.Main +JVMOptions + diff -r cfb21e03fe2a -r d64a4ef26edb Admin/MacOS/Info.plist-part2 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/Admin/MacOS/Info.plist-part2 Thu Dec 05 17:58:03 2013 +0100 @@ -0,0 +1,7 @@ +-Disabelle.home=$APP_ROOT/Contents/Resources/{ISABELLE_NAME} + +JVMArguments + + + + diff -r cfb21e03fe2a -r d64a4ef26edb Admin/MacOS/README --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/Admin/MacOS/README Thu Dec 05 17:58:03 2013 +0100 @@ -0,0 +1,8 @@ +Isabelle/JVM application bundle for Mac OS X +============================================ + +* http://java.net/projects/appbundler + + see appbundler-1.0.jar + see com/oracle/appbundler/JavaAppLauncher + diff -r cfb21e03fe2a -r d64a4ef26edb Admin/MacOS/Resources/en.lproj/Localizable.strings --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/Admin/MacOS/Resources/en.lproj/Localizable.strings Thu Dec 05 17:58:03 2013 +0100 @@ -0,0 +1,3 @@ +"JRELoadError" = "Unable to load Java Runtime Environment."; +"MainClassNameRequired" = "Main class name is required."; +"JavaDirectoryNotFound" = "Unable to enumerate Java directory contents."; diff -r cfb21e03fe2a -r d64a4ef26edb Admin/MacOS/Resources/isabelle.icns Binary file Admin/MacOS/Resources/isabelle.icns has changed diff -r cfb21e03fe2a -r d64a4ef26edb Admin/MacOS/Resources/theory.icns Binary file Admin/MacOS/Resources/theory.icns has changed diff -r cfb21e03fe2a -r d64a4ef26edb Admin/MacOS/isabelle.icns Binary file Admin/MacOS/isabelle.icns has changed diff -r cfb21e03fe2a -r d64a4ef26edb Admin/MacOS/theory.icns Binary file Admin/MacOS/theory.icns has changed diff -r cfb21e03fe2a -r d64a4ef26edb Admin/Windows/launch4j/README --- a/Admin/Windows/launch4j/README Thu Dec 05 17:52:12 2013 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,4 +0,0 @@ -Java application wrapper for Windows -==================================== - -* http://launch4j.sourceforge.net diff -r cfb21e03fe2a -r d64a4ef26edb Admin/Windows/launch4j/isabelle.ico Binary file Admin/Windows/launch4j/isabelle.ico has changed diff -r cfb21e03fe2a -r d64a4ef26edb Admin/Windows/launch4j/isabelle.xml --- a/Admin/Windows/launch4j/isabelle.xml Thu Dec 05 17:52:12 2013 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,39 +0,0 @@ - - true - gui - - Isabelle.exe - - - - normal - - - false - true - - isabelle.ico - - isabelle.Main - %EXEDIR%\lib\classes\ext\Pure.jar - %EXEDIR%\lib\classes\ext\scala-compiler.jar - %EXEDIR%\lib\classes\ext\scala-library.jar - %EXEDIR%\lib\classes\ext\scala-swing.jar - %EXEDIR%\lib\classes\ext\scala-actors.jar - %EXEDIR%\lib\classes\ext\scala-reflect.jar - %EXEDIR%\src\Tools\jEdit\dist\jedit.jar - - - %EXEDIR%\contrib\jdk\x86-cygwin - - - jdkOnly - -Dfile.encoding=UTF-8 -server -Xms128m -Xmx1024m -Xss2m -Dactors.corePoolSize=4 -Dactors.enableForkJoin=false -Disabelle.home="%EXEDIR%" - - - isabelle.bmp - false - 10 - false - - \ No newline at end of file diff -r cfb21e03fe2a -r d64a4ef26edb Admin/isatest/isatest-stats --- a/Admin/isatest/isatest-stats Thu Dec 05 17:52:12 2013 +0100 +++ b/Admin/isatest/isatest-stats Thu Dec 05 17:58:03 2013 +0100 @@ -14,11 +14,9 @@ HOL-Auth HOL-BNF HOL-BNF-Examples + HOL-BNF-LFP HOL-BNF-Nitpick_Examples - HOL-BNF-LFP HOL-Bali - HOL-Boogie - HOL-Boogie-Examples HOL-Cardinals HOL-Cardinals-Base HOL-Codegenerator_Test diff -r cfb21e03fe2a -r d64a4ef26edb Admin/lib/Tools/makedist_bundle --- a/Admin/lib/Tools/makedist_bundle Thu Dec 05 17:52:12 2013 +0100 +++ b/Admin/lib/Tools/makedist_bundle Thu Dec 05 17:58:03 2013 +0100 @@ -261,7 +261,7 @@ ( cd "$TMP" - APP_TEMPLATE="$ISABELLE_HOME/Admin/MacOS/App3" + APP_TEMPLATE="$ISABELLE_HOME/Admin/MacOS" APP="${ISABELLE_NAME}.app" for NAME in Java MacOS PlugIns Resources @@ -289,7 +289,6 @@ done cp -R "$APP_TEMPLATE/Resources/." "$APP/Contents/Resources/." - cp "$APP_TEMPLATE/../isabelle.icns" "$APP/Contents/Resources/." ln -sf "../Resources/${ISABELLE_NAME}/contrib/jdk/x86_64-darwin" \ "$APP/Contents/PlugIns/jdk" diff -r cfb21e03fe2a -r d64a4ef26edb CONTRIBUTORS --- a/CONTRIBUTORS Thu Dec 05 17:52:12 2013 +0100 +++ b/CONTRIBUTORS Thu Dec 05 17:58:03 2013 +0100 @@ -3,6 +3,10 @@ who is listed as an author in one of the source files of this Isabelle distribution. +Contributions to this Isabelle version +-------------------------------------- + + Contributions to Isabelle2013-1 ------------------------------- diff -r cfb21e03fe2a -r d64a4ef26edb NEWS --- a/NEWS Thu Dec 05 17:52:12 2013 +0100 +++ b/NEWS Thu Dec 05 17:58:03 2013 +0100 @@ -1,6 +1,98 @@ Isabelle NEWS -- history user-relevant changes ============================================== +New in this Isabelle version +---------------------------- + +*** Prover IDE -- Isabelle/Scala/jEdit *** + +* Auxiliary files ('ML_file' etc.) are managed by the Prover IDE. +Open text buffers take precedence over copies within the file-system. + + +*** HOL *** + +* Qualified constant names Wellfounded.acc, Wellfounded.accp. +INCOMPATIBILITY. + +* Fact generalization and consolidation: + neq_one_mod_two, mod_2_not_eq_zero_eq_one_int ~> not_mod_2_eq_0_eq_1 +INCOMPATIBILITY. + +* Purely algebraic definition of even. Fact generalization and consolidation: + nat_even_iff_2_dvd, int_even_iff_2_dvd ~> even_iff_2_dvd + even_zero_(nat|int) ~> even_zero +INCOMPATIBILITY. + +* Abolished neg_numeral. + * Canonical representation for minus one is "- 1". + * Canonical representation for other negative numbers is "- (numeral _)". + * When devising rule sets for number calculation, consider the + following canonical cases: 0, 1, numeral _, - 1, - numeral _. + * HOLogic.dest_number also recognizes numerals in non-canonical forms + like "numeral One", "- numeral One", "- 0" and even "- … - _". + * Syntax for negative numerals is mere input syntax. +INCOMPATBILITY. + +* Elimination of fact duplicates: + equals_zero_I ~> minus_unique + diff_eq_0_iff_eq ~> right_minus_eq + nat_infinite ~> infinite_UNIV_nat + int_infinite ~> infinite_UNIV_int +INCOMPATIBILITY. + +* Fact name consolidation: + diff_def, diff_minus, ab_diff_minus ~> diff_conv_add_uminus + minus_le_self_iff ~> neg_less_eq_nonneg + le_minus_self_iff ~> less_eq_neg_nonpos + neg_less_nonneg ~> neg_less_pos + less_minus_self_iff ~> less_neg_neg [simp] +INCOMPATIBILITY. + +* More simplification rules on unary and binary minus: +add_diff_cancel, add_diff_cancel_left, add_le_same_cancel1, +add_le_same_cancel2, add_less_same_cancel1, add_less_same_cancel2, +add_minus_cancel, diff_add_cancel, le_add_same_cancel1, +le_add_same_cancel2, less_add_same_cancel1, less_add_same_cancel2, +minus_add_cancel, uminus_add_conv_diff. These correspondingly +have been taken away from fact collections algebra_simps and +field_simps. INCOMPATIBILITY. + +To restore proofs, the following patterns are helpful: + +a) Arbitrary failing proof not involving "diff_def": +Consider simplification with algebra_simps or field_simps. + +b) Lifting rules from addition to subtraction: +Try with "using of [… "- _" …]" by simp". + +c) Simplification with "diff_def": just drop "diff_def". +Consider simplification with algebra_simps or field_simps; +or the brute way with +"simp add: diff_conv_add_uminus del: add_uminus_conv_diff". + +* SUP and INF generalized to conditionally_complete_lattice + +* Theory Lubs moved HOL image to HOL-Library. It is replaced by +Conditionally_Complete_Lattices. INCOMPATIBILITY. + +* Introduce bdd_above and bdd_below in Conditionally_Complete_Lattices, use them +instead of explicitly stating boundedness of sets. + +* ccpo.admissible quantifies only over non-empty chains to allow +more syntax-directed proof rules; the case of the empty chain +shows up as additional case in fixpoint induction proofs. +INCOMPATIBILITY + +*** ML *** + +* Toplevel function "use" refers to raw ML bootstrap environment, +without Isar context nor antiquotations. Potential INCOMPATIBILITY. +Note that 'ML_file' is the canonical command to load ML files into the +formal context. + + + New in Isabelle2013-2 (December 2013) ------------------------------------- @@ -457,6 +549,10 @@ sets ~> set IMCOMPATIBILITY. +* Nitpick: + - Fixed soundness bug whereby mutually recursive datatypes could take + infinite values. + *** ML *** diff -r cfb21e03fe2a -r d64a4ef26edb etc/isar-keywords.el --- a/etc/isar-keywords.el Thu Dec 05 17:52:12 2013 +0100 +++ b/etc/isar-keywords.el Thu Dec 05 17:58:03 2013 +0100 @@ -1,6 +1,6 @@ ;; ;; Keyword classification tables for Isabelle/Isar. -;; Generated from HOL + HOL-Auth + HOL-BNF + HOL-BNF-LFP + HOL-Bali + HOL-Decision_Procs + HOL-IMP + HOL-Imperative_HOL + HOL-Import + HOL-Library + HOL-Mutabelle + HOL-Nominal + HOL-Proofs + HOL-Proofs-Extraction + HOL-SPARK + HOL-Statespace + HOL-TPTP + HOL-ex + HOLCF + Pure. +;; Generated from HOL + HOL-Auth + HOL-BNF + HOL-BNF-LFP + HOL-Bali + HOL-Decision_Procs + HOL-IMP + HOL-Imperative_HOL + HOL-Import + HOL-Library + HOL-Mutabelle + HOL-Nominal + HOL-Proofs + HOL-Proofs-Extraction + HOL-SPARK + HOL-Statespace + HOL-TPTP + HOL-Word-SMT_Examples + HOL-ex + HOLCF + Pure. ;; *** DO NOT EDIT *** DO NOT EDIT *** DO NOT EDIT *** ;; @@ -33,6 +33,7 @@ "axiomatization" "back" "bnf" + "boogie_file" "bundle" "by" "cannot_undo" @@ -343,7 +344,6 @@ "module_name" "monos" "morphisms" - "no_discs_sels" "notes" "obtains" "open" @@ -352,7 +352,6 @@ "parametric" "permissive" "pervasive" - "rep_compat" "shows" "structure" "type_class" @@ -487,6 +486,7 @@ "atom_decl" "attribute_setup" "axiomatization" + "boogie_file" "bundle" "case_of_simps" "class" diff -r cfb21e03fe2a -r d64a4ef26edb src/Doc/Datatypes/Datatypes.thy --- a/src/Doc/Datatypes/Datatypes.thy Thu Dec 05 17:52:12 2013 +0100 +++ b/src/Doc/Datatypes/Datatypes.thy Thu Dec 05 17:58:03 2013 +0100 @@ -8,22 +8,9 @@ *) theory Datatypes -imports Setup -keywords - "primcorec_notyet" :: thy_decl +imports Setup "~~/src/HOL/Library/Simps_Case_Conv" begin -(*<*) -(* FIXME: Temporary setup until "primcorec" and "primcorecursive" are fully implemented. *) -ML_command {* -fun add_dummy_cmd _ _ lthy = lthy; - -val _ = Outer_Syntax.local_theory @{command_spec "primcorec_notyet"} "" - (Parse.fixes -- Parse_Spec.where_alt_specs >> uncurry add_dummy_cmd); -*} -(*>*) - - section {* Introduction \label{sec:introduction} *} @@ -54,17 +41,19 @@ text {* \noindent -The package also provides some convenience, notably automatically generated -discriminators and selectors. - -In addition to plain inductive datatypes, the new package supports coinductive -datatypes, or \emph{codatatypes}, which may have infinite values. For example, -the following command introduces the type of lazy lists, which comprises both -finite and infinite values: +Furthermore, the package provides a lot of convenience, including automatically +generated discriminators, selectors, and relators as well as a wealth of +properties about them. + +In addition to inductive datatypes, the new package supports coinductive +datatypes, or \emph{codatatypes}, which allow infinite values. For example, the +following command introduces the type of lazy lists, which comprises both finite +and infinite values: *} (*<*) locale early + locale late (*>*) codatatype (*<*)(in early) (*>*)'a llist = LNil | LCons 'a "'a llist" @@ -80,10 +69,10 @@ codatatype (*<*)(in early) (*>*)'a tree\<^sub>i\<^sub>i = Node\<^sub>i\<^sub>i 'a "'a tree\<^sub>i\<^sub>i llist" text {* -The first two tree types allow only finite branches, whereas the last two allow -branches of infinite length. Orthogonally, the nodes in the first and third -types have finite branching, whereas those of the second and fourth may have -infinitely many direct subtrees. +The first two tree types allow only paths of finite length, whereas the last two +allow infinite paths. Orthogonally, the nodes in the first and third types have +finitely many direct subtrees, whereas those of the second and fourth may have +infinite branching. To use the package, it is necessary to import the @{theory BNF} theory, which can be precompiled into the \texttt{HOL-BNF} image. The following commands show @@ -152,15 +141,15 @@ \newbox\boxA -\setbox\boxA=\hbox{\texttt{nospam}} - -\newcommand\authoremaili{\texttt{blan{\color{white}nospam}\kern-\wd\boxA{}chette@\allowbreak +\setbox\boxA=\hbox{\texttt{NOSPAM}} + +\newcommand\authoremaili{\texttt{blan{\color{white}NOSPAM}\kern-\wd\boxA{}chette@\allowbreak in.\allowbreak tum.\allowbreak de}} -\newcommand\authoremailii{\texttt{lore{\color{white}nospam}\kern-\wd\boxA{}nz.panny@\allowbreak +\newcommand\authoremailii{\texttt{lore{\color{white}NOSPAM}\kern-\wd\boxA{}nz.panny@\allowbreak \allowbreak tum.\allowbreak de}} -\newcommand\authoremailiii{\texttt{pope{\color{white}nospam}\kern-\wd\boxA{}scua@\allowbreak +\newcommand\authoremailiii{\texttt{pope{\color{white}NOSPAM}\kern-\wd\boxA{}scua@\allowbreak in.\allowbreak tum.\allowbreak de}} -\newcommand\authoremailiv{\texttt{tray{\color{white}nospam}\kern-\wd\boxA{}tel@\allowbreak +\newcommand\authoremailiv{\texttt{tray{\color{white}NOSPAM}\kern-\wd\boxA{}tel@\allowbreak in.\allowbreak tum.\allowbreak de}} The commands @{command datatype_new} and @{command primrec_new} are expected to @@ -171,13 +160,6 @@ Comments and bug reports concerning either the tool or this tutorial should be directed to the authors at \authoremaili, \authoremailii, \authoremailiii, and \authoremailiv. - -\begin{framed} -\noindent -\textbf{Warning:}\enskip This tutorial and the package it describes are under -construction. Please forgive their appearance. Should you have suggestions -or comments regarding either, please let the authors know. -\end{framed} *} @@ -195,7 +177,7 @@ text {* Datatypes are illustrated through concrete examples featuring different flavors of recursion. More examples can be found in the directory -\verb|~~/src/HOL/BNF/Examples|. +\verb|~~/src/HOL/|\allowbreak\verb|BNF/Examples|. *} subsubsection {* Nonrecursive Types @@ -260,7 +242,8 @@ text {* \noindent -Lists were shown in the introduction. Terminated lists are a variant: +Lists were shown in the introduction. Terminated lists are a variant that +stores a value of type @{typ 'b} at the very end: *} datatype_new (*<*)(in early) (*>*)('a, 'b) tlist = TNil 'b | TCons 'a "('a, 'b) tlist" @@ -310,7 +293,7 @@ Not all nestings are admissible. For example, this command will fail: *} - datatype_new 'a wrong = Wrong (*<*)'a + datatype_new 'a wrong = W1 | W2 (*<*)'a typ (*>*)"'a wrong \ 'a" text {* @@ -321,7 +304,7 @@ *} datatype_new ('a, 'b) fn = Fn "'a \ 'b" - datatype_new 'a also_wrong = Also_Wrong (*<*)'a + datatype_new 'a also_wrong = W1 | W2 (*<*)'a typ (*>*)"('a also_wrong, 'a) fn" text {* @@ -344,20 +327,30 @@ datatype_new} and @{command codatatype} commands. Section~\ref{sec:registering-bounded-natural-functors} explains how to register arbitrary type constructors as BNFs. + +Here is another example that fails: *} - -subsubsection {* Custom Names and Syntaxes - \label{sssec:datatype-custom-names-and-syntaxes} *} + datatype_new 'a pow_list = PNil 'a (*<*)'a + datatype_new 'a pow_list' = PNil' 'a (*>*)| PCons "('a * 'a) pow_list" + +text {* +\noindent +This one features a different flavor of nesting, where the recursive call in the +type specification occurs around (rather than inside) another type constructor. +*} + +subsubsection {* Auxiliary Constants and Properties + \label{sssec:datatype-auxiliary-constants-and-properties} *} text {* The @{command datatype_new} command introduces various constants in addition to the constructors. With each datatype are associated set functions, a map function, a relator, discriminators, and selectors, all of which can be given -custom names. In the example below, the traditional names -@{text set}, @{text map}, @{text list_all2}, @{text null}, @{text hd}, and -@{text tl} override the default names @{text list_set}, @{text list_map}, @{text -list_rel}, @{text is_Nil}, @{text un_Cons1}, and @{text un_Cons2}: +custom names. In the example below, the familiar names @{text null}, @{text hd}, +@{text tl}, @{text set}, @{text map}, and @{text list_all2}, override the +default names @{text is_Nil}, @{text un_Cons1}, @{text un_Cons2}, +@{text set_list}, @{text map_list}, and @{text rel_list}: *} (*<*) @@ -370,7 +363,7 @@ Cons (infixr "#" 65) hide_type list - hide_const Nil Cons hd tl set map list_all2 list_case list_rec + hide_const Nil Cons hd tl set map list_all2 context early begin (*>*) @@ -380,14 +373,34 @@ text {* \noindent -The command introduces a discriminator @{const null} and a pair of selectors -@{const hd} and @{const tl} characterized as follows: + +\begin{tabular}{@ {}ll@ {}} +Constructors: & + @{text "Nil \ 'a list"} \\ +& + @{text "Cons \ 'a \ 'a list \ 'a list"} \\ +Discriminator: & + @{text "null \ 'a list \ bool"} \\ +Selectors: & + @{text "hd \ 'a list \ 'a"} \\ +& + @{text "tl \ 'a list \ 'a list"} \\ +Set function: & + @{text "set \ 'a list \ 'a set"} \\ +Map function: & + @{text "map \ ('a \ 'b) \ 'a list \ 'b list"} \\ +Relator: & + @{text "list_all2 \ ('a \ 'b \ bool) \ 'a list \ 'b list \ bool"} +\end{tabular} + +The discriminator @{const null} and the selectors @{const hd} and @{const tl} +are characterized as follows: % \[@{thm list.collapse(1)[of xs, no_vars]} \qquad @{thm list.collapse(2)[of xs, no_vars]}\] % -For two-constructor datatypes, a single discriminator constant suffices. The -discriminator associated with @{const Cons} is simply +For two-constructor datatypes, a single discriminator constant is sufficient. +The discriminator associated with @{const Cons} is simply @{term "\xs. \ null xs"}. The @{text defaults} clause following the @{const Nil} constructor specifies a @@ -447,7 +460,7 @@ @@{command datatype_new} target? @{syntax dt_options}? \\ (@{syntax dt_name} '=' (@{syntax ctor} + '|') + @'and') ; - @{syntax_def dt_options}: '(' (('no_discs_sels' | 'rep_compat') + ',') ')' + @{syntax_def dt_options}: '(' (('no_discs_sels' | 'no_code' | 'rep_compat') + ',') ')' "} The syntactic entity \synt{target} can be used to specify a local @@ -464,6 +477,10 @@ should be generated. \item +The @{text "no_code"} option indicates that the datatype should not be +registered for code generation. + +\item The @{text "rep_compat"} option indicates that the generated names should contain optional (and normally not displayed) ``@{text "new."}'' components to prevent clashes with a later call to \keyw{rep\_datatype}. See @@ -488,7 +505,7 @@ reference manual \cite{isabelle-isar-ref}. The optional names preceding the type variables allow to override the default -names of the set functions (@{text t_set1}, \ldots, @{text t_setM}). +names of the set functions (@{text set1_t}, \ldots, @{text setM_t}). Inside a mutually recursive specification, all defined datatypes must mention exactly the same type variables in the same order. @@ -589,6 +606,10 @@ or the function type. In principle, it should be possible to support old-style datatypes as well, but the command does not support this yet (and there is currently no way to register old-style datatypes as new-style datatypes). + +\item The recursor produced for types that recurse through functions has a +different signature than with the old package. This makes it impossible to use +the old \keyw{primrec} command. \end{itemize} An alternative to @{command datatype_new_compat} is to use the old package's @@ -609,7 +630,7 @@ \begin{itemize} \setlength{\itemsep}{0pt} -\item \relax{Case combinator}: @{text t_case} (rendered using the familiar +\item \relax{Case combinator}: @{text t.case_t} (rendered using the familiar @{text case}--@{text of} syntax) \item \relax{Discriminators}: @{text "t.is_C\<^sub>1"}, \ldots, @@ -621,22 +642,22 @@ \phantom{\relax{Selectors:}} @{text t.un_C\<^sub>n1}$, \ldots, @{text t.un_C\<^sub>nk\<^sub>n}. \item \relax{Set functions} (or \relax{natural transformations}): -@{text t_set1}, \ldots, @{text t_setm} - -\item \relax{Map function} (or \relax{functorial action}): @{text t_map} - -\item \relax{Relator}: @{text t_rel} - -\item \relax{Iterator}: @{text t_fold} - -\item \relax{Recursor}: @{text t_rec} +@{text set1_t}, \ldots, @{text t.setm_t} + +\item \relax{Map function} (or \relax{functorial action}): @{text t.map_t} + +\item \relax{Relator}: @{text t.rel_t} + +\item \relax{Iterator}: @{text t.fold_t} + +\item \relax{Recursor}: @{text t.rec_t} \end{itemize} \noindent The case combinator, discriminators, and selectors are collectively called \emph{destructors}. The prefix ``@{text "t."}'' is an optional component of the -name and is normally hidden. +names and is normally hidden. *} @@ -687,8 +708,9 @@ (*>*) text {* -The first subgroup of properties is concerned with the constructors. -They are listed below for @{typ "'a list"}: +The free constructor theorems are partitioned in three subgroups. The first +subgroup of properties is concerned with the constructors. They are listed below +for @{typ "'a list"}: \begin{indentblock} \begin{description} @@ -715,7 +737,7 @@ \begin{indentblock} \begin{description} -\item[@{text "t."}\hthm{list.distinct {\upshape[}THEN notE}@{text ", elim!"}\hthm{\upshape]}\rm:] ~ \\ +\item[@{text "t."}\hthm{distinct {\upshape[}THEN notE}@{text ", elim!"}\hthm{\upshape]}\rm:] ~ \\ @{thm list.distinct(1)[THEN notE, elim!, no_vars]} \\ @{thm list.distinct(2)[THEN notE, elim!, no_vars]} @@ -750,7 +772,7 @@ \end{indentblock} \noindent -The third and last subgroup revolves around discriminators and selectors: +The third subgroup revolves around discriminators and selectors: \begin{indentblock} \begin{description} @@ -793,11 +815,15 @@ \item[@{text "t."}\hthm{sel\_split\_asm}\rm:] ~ \\ @{thm list.sel_split_asm[no_vars]} -\item[@{text "t."}\hthm{case\_conv\_if}\rm:] ~ \\ -@{thm list.case_conv_if[no_vars]} +\item[@{text "t."}\hthm{case\_eq\_if}\rm:] ~ \\ +@{thm list.case_eq_if[no_vars]} \end{description} \end{indentblock} + +\noindent +In addition, equational versions of @{text t.disc} are registered with the @{text "[code]"} +attribute. *} @@ -805,7 +831,9 @@ \label{sssec:functorial-theorems} *} text {* -The BNF-related theorem are as follows: +The functorial theorems are partitioned in two subgroups. The first subgroup +consists of properties involving the constructors and either a set function, the +map function, or the relator: \begin{indentblock} \begin{description} @@ -818,16 +846,56 @@ @{thm list.map(1)[no_vars]} \\ @{thm list.map(2)[no_vars]} -\item[@{text "t."}\hthm{rel\_inject} @{text "[simp, code]"}\rm:] ~ \\ +\item[@{text "t."}\hthm{rel\_inject} @{text "[simp]"}\rm:] ~ \\ @{thm list.rel_inject(1)[no_vars]} \\ @{thm list.rel_inject(2)[no_vars]} -\item[@{text "t."}\hthm{rel\_distinct} @{text "[simp, code]"}\rm:] ~ \\ +\item[@{text "t."}\hthm{rel\_distinct} @{text "[simp]"}\rm:] ~ \\ @{thm list.rel_distinct(1)[no_vars]} \\ @{thm list.rel_distinct(2)[no_vars]} \end{description} \end{indentblock} + +\noindent +In addition, equational versions of @{text t.rel_inject} and @{text +rel_distinct} are registered with the @{text "[code]"} attribute. + +The second subgroup consists of more abstract properties of the set functions, +the map function, and the relator: + +\begin{indentblock} +\begin{description} + +\item[@{text "t."}\hthm{map\_comp}\rm:] ~ \\ +@{thm list.map_cong0[no_vars]} + +\item[@{text "t."}\hthm{map\_cong} @{text "[fundef_cong]"}\rm:] ~ \\ +@{thm list.map_cong[no_vars]} + +\item[@{text "t."}\hthm{map\_id}\rm:] ~ \\ +@{thm list.map_id[no_vars]} + +\item[@{text "t."}\hthm{rel\_compp}\rm:] ~ \\ +@{thm list.rel_compp[no_vars]} + +\item[@{text "t."}\hthm{rel\_conversep}\rm:] ~ \\ +@{thm list.rel_conversep[no_vars]} + +\item[@{text "t."}\hthm{rel\_eq}\rm:] ~ \\ +@{thm list.rel_eq[no_vars]} + +\item[@{text "t."}\hthm{rel\_flip}\rm:] ~ \\ +@{thm list.rel_flip[no_vars]} + +\item[@{text "t."}\hthm{rel\_mono}\rm:] ~ \\ +@{thm list.rel_mono[no_vars]} + +\item[@{text "t."}\hthm{set\_map}\rm:] ~ \\ +@{thm list.set_map[no_vars]} + +\end{description} +\end{indentblock} *} @@ -889,18 +957,22 @@ is recommended to use @{command datatype_new_compat} or \keyw{rep\_datatype} to register new-style datatypes as old-style datatypes. -\item \emph{The recursor @{text "t_rec"} has a different signature for nested -recursive datatypes.} In the old package, nested recursion was internally -reduced to mutual recursion. This reduction was visible in the type of the -recursor, used by \keyw{primrec}. In the new package, nested recursion is -handled in a more modular fashion. The old-style recursor can be generated on -demand using @{command primrec_new}, as explained in +\item \emph{The constants @{text t_case} and @{text t_rec} are now called +@{text case_t} and @{text rec_t}.} + +\item \emph{The recursor @{text rec_t} has a different signature for nested +recursive datatypes.} In the old package, nested recursion through non-functions +was internally reduced to mutual recursion. This reduction was visible in the +type of the recursor, used by \keyw{primrec}. Recursion through functions was +handled specially. In the new package, nested recursion (for functions and +non-functions) is handled in a more modular fashion. The old-style recursor can +be generated on demand using @{command primrec_new}, as explained in Section~\ref{sssec:primrec-nested-as-mutual-recursion}, if the recursion is via new-style datatypes. -\item \emph{Accordingly, the induction principle is different for nested -recursive datatypes.} Again, the old-style induction principle can be generated -on demand using @{command primrec_new}, as explained in +\item \emph{Accordingly, the induction rule is different for nested recursive +datatypes.} Again, the old-style induction rule can be generated on demand using +@{command primrec_new}, as explained in Section~\ref{sssec:primrec-nested-as-mutual-recursion}, if the recursion is via new-style datatypes. @@ -940,9 +1012,9 @@ \label{sec:defining-recursive-functions} *} text {* -Recursive functions over datatypes can be specified using @{command -primrec_new}, which supports primitive recursion, or using the more general -\keyw{fun} and \keyw{function} commands. Here, the focus is on @{command +Recursive functions over datatypes can be specified using the @{command +primrec_new} command, which supports primitive recursion, or using the more +general \keyw{fun} and \keyw{function} commands. Here, the focus is on @{command primrec_new}; the other two commands are described in a separate tutorial \cite{isabelle-function}. @@ -1026,9 +1098,24 @@ text {* \noindent -The next example is not primitive recursive, but it can be defined easily using -\keyw{fun}. The @{command datatype_new_compat} command is needed to register -new-style datatypes for use with \keyw{fun} and \keyw{function} +Pattern matching is only available for the argument on which the recursion takes +place. Fortunately, it is easy to generate pattern-maching equations using the +\keyw{simps\_of\_case} command provided by the theory +\verb|~~/src/HOL/Library/Simps_Case_Conv|. +*} + + simps_of_case at_simps: at.simps + +text {* +This generates the lemma collection @{thm [source] at_simps}: +% +\[@{thm at_simps(1)[no_vars]} + \qquad @{thm at_simps(2)[no_vars]}\] +% +The next example is defined using \keyw{fun} to escape the syntactic +restrictions imposed on primitive recursive functions. The +@{command datatype_new_compat} command is needed to register new-style datatypes +for use with \keyw{fun} and \keyw{function} (Section~\ref{sssec:datatype-new-compat}): *} @@ -1109,13 +1196,13 @@ \noindent The next example features recursion through the @{text option} type. Although @{text option} is not a new-style datatype, it is registered as a BNF with the -map function @{const option_map}: +map function @{const map_option}: *} primrec_new (*<*)(in early) (*>*)sum_btree :: "('a\{zero,plus}) btree \ 'a" where "sum_btree (BNode a lt rt) = - a + the_default 0 (option_map sum_btree lt) + - the_default 0 (option_map sum_btree rt)" + a + the_default 0 (map_option sum_btree lt) + + the_default 0 (map_option sum_btree rt)" text {* \noindent @@ -1124,28 +1211,51 @@ (@{text \}) is simply composition (@{text "op \"}): *} - primrec_new (*<*)(in early) (*>*)ftree_map :: "('a \ 'a) \ 'a ftree \ 'a ftree" where - "ftree_map f (FTLeaf x) = FTLeaf (f x)" | - "ftree_map f (FTNode g) = FTNode (ftree_map f \ g)" + primrec_new (*<*)(in early) (*>*)relabel_ft :: "('a \ 'a) \ 'a ftree \ 'a ftree" where + "relabel_ft f (FTLeaf x) = FTLeaf (f x)" | + "relabel_ft f (FTNode g) = FTNode (relabel_ft f \ g)" text {* \noindent -(No such map function is defined by the package because the type -variable @{typ 'a} is dead in @{typ "'a ftree"}.) - -Using \keyw{fun} or \keyw{function}, recursion through functions can be -expressed using $\lambda$-expressions and function application rather -than through composition. For example: +For convenience, recursion through functions can also be expressed using +$\lambda$-abstractions and function application rather than through composition. +For example: *} - datatype_new_compat ftree + primrec_new relabel_ft :: "('a \ 'a) \ 'a ftree \ 'a ftree" where + "relabel_ft f (FTLeaf x) = FTLeaf (f x)" | + "relabel_ft f (FTNode g) = FTNode (\x. relabel_ft f (g x))" text {* \blankline *} - function ftree_map :: "('a \ 'a) \ 'a ftree \ 'a ftree" where - "ftree_map f (FTLeaf x) = FTLeaf (f x)" | - "ftree_map f (FTNode g) = FTNode (\x. ftree_map f (g x))" - by auto (metis ftree.exhaust) + primrec_new subtree_ft :: "'a \ 'a ftree \ 'a ftree" where + "subtree_ft x (FTNode g) = g x" + +text {* +\noindent +For recursion through curried $n$-ary functions, $n$ applications of +@{term "op \"} are necessary. The examples below illustrate the case where +$n = 2$: +*} + + datatype_new 'a ftree2 = FTLeaf2 'a | FTNode2 "'a \ 'a \ 'a ftree2" + +text {* \blankline *} + + primrec_new (*<*)(in early) (*>*)relabel_ft2 :: "('a \ 'a) \ 'a ftree2 \ 'a ftree2" where + "relabel_ft2 f (FTLeaf2 x) = FTLeaf2 (f x)" | + "relabel_ft2 f (FTNode2 g) = FTNode2 (op \ (op \ (relabel_ft2 f)) g)" + +text {* \blankline *} + + primrec_new relabel_ft2 :: "('a \ 'a) \ 'a ftree2 \ 'a ftree2" where + "relabel_ft2 f (FTLeaf2 x) = FTLeaf2 (f x)" | + "relabel_ft2 f (FTNode2 g) = FTNode2 (\x y. relabel_ft2 f (g x y))" + +text {* \blankline *} + + primrec_new subtree_ft2 :: "'a \ 'a \ 'a ftree2 \ 'a ftree2" where + "subtree_ft2 x y (FTNode2 g) = g x y" subsubsection {* Nested-as-Mutual Recursion @@ -1177,12 +1287,12 @@ text {* \noindent -Appropriate induction principles are generated under the names +Appropriate induction rules are generated as @{thm [source] at\<^sub>f\<^sub>f.induct}, @{thm [source] ats\<^sub>f\<^sub>f.induct}, and -@{thm [source] at\<^sub>f\<^sub>f_ats\<^sub>f\<^sub>f.induct}. - -%%% TODO: Add recursors. +@{thm [source] at\<^sub>f\<^sub>f_ats\<^sub>f\<^sub>f.induct}. The +induction rules and the underlying recursors are generated on a per-need basis +and are kept in a cache to speed up subsequent definitions. Here is a second example: *} @@ -1340,7 +1450,7 @@ \begin{itemize} \setlength{\itemsep}{0pt} -\item \emph{Theorems sometimes have different names.} +\item \emph{Some theorems have different names.} For $m > 1$ mutually recursive functions, @{text "f\<^sub>1_\_f\<^sub>m.simps"} has been broken down into separate subcollections @{text "f\<^sub>i.simps"}. @@ -1415,7 +1525,7 @@ text {* \noindent Notice that the @{const cont} selector is associated with both @{const Skip} -and @{const Choice}. +and @{const Action}. *} @@ -1488,9 +1598,9 @@ \begin{itemize} \setlength{\itemsep}{0pt} -\item \relax{Coiterator}: @{text t_unfold} - -\item \relax{Corecursor}: @{text t_corec} +\item \relax{Coiterator}: @{text unfold_t} + +\item \relax{Corecursor}: @{text corec_t} \end{itemize} *} @@ -1606,10 +1716,10 @@ \label{sec:defining-corecursive-functions} *} text {* -Corecursive functions can be specified using @{command primcorec} and -@{command primcorecursive}, which support primitive corecursion, or using the -more general \keyw{partial\_function} command. Here, the focus is on -the former two. More examples can be found in the directory +Corecursive functions can be specified using the @{command primcorec} and +\keyw{prim\-corec\-ursive} commands, which support primitive corecursion, or +using the more general \keyw{partial\_function} command. Here, the focus is on +the first two. More examples can be found in the directory \verb|~~/src/HOL/BNF/Examples|. Whereas recursive functions consume datatypes one constructor at a time, @@ -1630,7 +1740,7 @@ This style is popular in the coalgebraic literature. \item The \emph{constructor view} specifies $f$ by equations of the form -\[@{text "\ \ f x\<^sub>1 \ x\<^sub>n = C \"}\] +\[@{text "\ \ f x\<^sub>1 \ x\<^sub>n = C\<^sub>j \"}\] This style is often more concise than the previous one. \item The \emph{code view} specifies $f$ by a single equation of the form @@ -1643,14 +1753,6 @@ All three styles are available as input syntax. Whichever syntax is chosen, characteristic theorems for all three styles are generated. -\begin{framed} -\noindent -\textbf{Warning:}\enskip The @{command primcorec} and @{command primcorecursive} -commands are under development. Some of the functionality described here is -vaporware. An alternative is to define corecursive functions directly using the -generated @{text t_unfold} or @{text t_corec} combinators. -\end{framed} - %%% TODO: partial_function? E.g. for defining tail recursive function on lazy %%% lists (cf. terminal0 in TLList.thy) *} @@ -1668,11 +1770,6 @@ present the same examples expressed using the constructor and destructor views. *} -(*<*) - locale code_view - begin -(*>*) - subsubsection {* Simple Corecursion \label{sssec:primcorec-simple-corecursion} *} @@ -1683,19 +1780,19 @@ *} primcorec literate :: "('a \ 'a) \ 'a \ 'a llist" where - "literate f x = LCons x (literate f (f x))" + "literate g x = LCons x (literate g (g x))" text {* \blankline *} primcorec siterate :: "('a \ 'a) \ 'a \ 'a stream" where - "siterate f x = SCons x (siterate f (f x))" + "siterate g x = SCons x (siterate g (g x))" text {* \noindent The constructor ensures that progress is made---i.e., the function is \emph{productive}. The above functions compute the infinite lazy list or stream -@{text "[x, f x, f (f x), \]"}. Productivity guarantees that prefixes -@{text "[x, f x, f (f x), \, (f ^^ k) x]"} of arbitrary finite length +@{text "[x, g x, g (g x), \]"}. Productivity guarantees that prefixes +@{text "[x, g x, g (g x), \, (g ^^ k) x]"} of arbitrary finite length @{text k} can be computed by unfolding the code equation a finite number of times. @@ -1714,7 +1811,7 @@ appear around constructors that guard corecursive calls: *} - primcorec_notyet lappend :: "'a llist \ 'a llist \ 'a llist" where + primcorec lappend :: "'a llist \ 'a llist \ 'a llist" where "lappend xs ys = (case xs of LNil \ ys @@ -1722,6 +1819,19 @@ text {* \noindent +Pattern matching is not supported by @{command primcorec}. Fortunately, it is +easy to generate pattern-maching equations using the \keyw{simps\_of\_case} +command provided by the theory \verb|~~/src/HOL/Library/Simps_Case_Conv|. +*} + + simps_of_case lappend_simps: lappend.code + +text {* +This generates the lemma collection @{thm [source] lappend_simps}: +% +\[@{thm lappend_simps(1)[no_vars]} + \qquad @{thm lappend_simps(2)[no_vars]}\] +% Corecursion is useful to specify not only functions but also infinite objects: *} @@ -1735,7 +1845,7 @@ pseudorandom seed (@{text n}): *} - primcorec_notyet + primcorec random_process :: "'a stream \ (int \ int) \ int \ 'a process" where "random_process s f n = @@ -1780,43 +1890,71 @@ The next pair of examples generalize the @{const literate} and @{const siterate} functions (Section~\ref{sssec:primcorec-nested-corecursion}) to possibly infinite trees in which subnodes are organized either as a lazy list (@{text -tree\<^sub>i\<^sub>i}) or as a finite set (@{text tree\<^sub>i\<^sub>s}): +tree\<^sub>i\<^sub>i}) or as a finite set (@{text tree\<^sub>i\<^sub>s}). They rely on the map functions of +the nesting type constructors to lift the corecursive calls: *} primcorec iterate\<^sub>i\<^sub>i :: "('a \ 'a llist) \ 'a \ 'a tree\<^sub>i\<^sub>i" where - "iterate\<^sub>i\<^sub>i f x = Node\<^sub>i\<^sub>i x (lmap (iterate\<^sub>i\<^sub>i f) (f x))" + "iterate\<^sub>i\<^sub>i g x = Node\<^sub>i\<^sub>i x (lmap (iterate\<^sub>i\<^sub>i g) (g x))" text {* \blankline *} primcorec iterate\<^sub>i\<^sub>s :: "('a \ 'a fset) \ 'a \ 'a tree\<^sub>i\<^sub>s" where - "iterate\<^sub>i\<^sub>s f x = Node\<^sub>i\<^sub>s x (fimage (iterate\<^sub>i\<^sub>s f) (f x))" + "iterate\<^sub>i\<^sub>s g x = Node\<^sub>i\<^sub>s x (fimage (iterate\<^sub>i\<^sub>s g) (g x))" text {* \noindent -Deterministic finite automata (DFAs) are traditionally defined as 5-tuples -@{text "(Q, \, \, q\<^sub>0, F)"}, where @{text Q} is a finite set of states, +Both examples follow the usual format for constructor arguments associated +with nested recursive occurrences of the datatype. Consider +@{const iterate\<^sub>i\<^sub>i}. The term @{term "g x"} constructs an @{typ "'a llist"} +value, which is turned into an @{typ "'a tree\<^sub>i\<^sub>i llist"} value using +@{const lmap}. + +This format may sometimes feel artificial. The following function constructs +a tree with a single, infinite branch from a stream: +*} + + primcorec tree\<^sub>i\<^sub>i_of_stream :: "'a stream \ 'a tree\<^sub>i\<^sub>i" where + "tree\<^sub>i\<^sub>i_of_stream s = + Node\<^sub>i\<^sub>i (shd s) (lmap tree\<^sub>i\<^sub>i_of_stream (LCons (stl s) LNil))" + +text {* +\noindent +Fortunately, it is easy to prove the following lemma, where the corecursive call +is moved inside the lazy list constructor, thereby eliminating the need for +@{const lmap}: +*} + + lemma tree\<^sub>i\<^sub>i_of_stream_alt: + "tree\<^sub>i\<^sub>i_of_stream s = Node\<^sub>i\<^sub>i (shd s) (LCons (tree\<^sub>i\<^sub>i_of_stream (stl s)) LNil)" + by (subst tree\<^sub>i\<^sub>i_of_stream.code) simp + +text {* +The next example illustrates corecursion through functions, which is a bit +special. Deterministic finite automata (DFAs) are traditionally defined as +5-tuples @{text "(Q, \, \, q\<^sub>0, F)"}, where @{text Q} is a finite set of states, @{text \} is a finite alphabet, @{text \} is a transition function, @{text q\<^sub>0} is an initial state, and @{text F} is a set of final states. The following function translates a DFA into a @{type state_machine}: *} - primcorec (*<*)(in early) (*>*) - sm_of_dfa :: "('q \ 'a \ 'q) \ 'q set \ 'q \ 'a state_machine" + primcorec + (*<*)(in early) (*>*)sm_of_dfa :: "('q \ 'a \ 'q) \ 'q set \ 'q \ 'a state_machine" where - "sm_of_dfa \ F q = State_Machine (q \ F) (sm_of_dfa \ F o \ q)" + "sm_of_dfa \ F q = State_Machine (q \ F) (sm_of_dfa \ F \ \ q)" text {* \noindent The map function for the function type (@{text \}) is composition -(@{text "op \"}). For convenience, corecursion through functions can be -expressed using $\lambda$-expressions and function application rather +(@{text "op \"}). For convenience, corecursion through functions can +also be expressed using $\lambda$-abstractions and function application rather than through composition. For example: *} primcorec sm_of_dfa :: "('q \ 'a \ 'q) \ 'q set \ 'q \ 'a state_machine" where - "sm_of_dfa \ F q = State_Machine (q \ F) (sm_of_dfa \ F o \ q)" + "sm_of_dfa \ F q = State_Machine (q \ F) (\a. sm_of_dfa \ F (\ q a))" text {* \blankline *} @@ -1833,9 +1971,32 @@ primcorec or_sm :: "'a state_machine \ 'a state_machine \ 'a state_machine" where - "or_sm M N = - State_Machine (accept M \ accept N) - (\a. or_sm (trans M a) (trans N a))" + "or_sm M N = State_Machine (accept M \ accept N) + (\a. or_sm (trans M a) (trans N a))" + +text {* +\noindent +For recursion through curried $n$-ary functions, $n$ applications of +@{term "op \"} are necessary. The examples below illustrate the case where +$n = 2$: +*} + + codatatype ('a, 'b) state_machine2 = + State_Machine2 (accept2: bool) (trans2: "'a \ 'b \ ('a, 'b) state_machine2") + +text {* \blankline *} + + primcorec + (*<*)(in early) (*>*)sm2_of_dfa :: "('q \ 'a \ 'b \ 'q) \ 'q set \ 'q \ ('a, 'b) state_machine2" + where + "sm2_of_dfa \ F q = State_Machine2 (q \ F) (op \ (op \ (sm2_of_dfa \ F)) (\ q))" + +text {* \blankline *} + + primcorec + sm2_of_dfa :: "('q \ 'a \ 'b \ 'q) \ 'q set \ 'q \ ('a, 'b) state_machine2" + where + "sm2_of_dfa \ F q = State_Machine2 (q \ F) (\a b. sm2_of_dfa \ F (\ q a b))" subsubsection {* Nested-as-Mutual Corecursion @@ -1848,15 +2009,31 @@ pretend that nested codatatypes are mutually corecursive. For example: *} - primcorec_notyet +(*<*) + context late + begin +(*>*) + primcorec iterate\<^sub>i\<^sub>i :: "('a \ 'a llist) \ 'a \ 'a tree\<^sub>i\<^sub>i" and iterates\<^sub>i\<^sub>i :: "('a \ 'a llist) \ 'a llist \ 'a tree\<^sub>i\<^sub>i llist" where - "iterate\<^sub>i\<^sub>i f x = Node\<^sub>i\<^sub>i x (iterates\<^sub>i\<^sub>i f (f x))" | - "iterates\<^sub>i\<^sub>i f xs = + "iterate\<^sub>i\<^sub>i g x = Node\<^sub>i\<^sub>i x (iterates\<^sub>i\<^sub>i g (g x))" | + "iterates\<^sub>i\<^sub>i g xs = (case xs of LNil \ LNil - | LCons x xs' \ LCons (iterate\<^sub>i\<^sub>i f x) (iterates\<^sub>i\<^sub>i f xs'))" + | LCons x xs' \ LCons (iterate\<^sub>i\<^sub>i g x) (iterates\<^sub>i\<^sub>i g xs'))" + +text {* +\noindent +Coinduction rules are generated as +@{thm [source] iterate\<^sub>i\<^sub>i.coinduct}, +@{thm [source] iterates\<^sub>i\<^sub>i.coinduct}, and +@{thm [source] iterate\<^sub>i\<^sub>i_iterates\<^sub>i\<^sub>i.coinduct} +and analogously for @{text strong_coinduct}. These rules and the +underlying corecursors are generated on a per-need basis and are kept in a cache +to speed up subsequent definitions. +*} + (*<*) end (*>*) @@ -1866,7 +2043,7 @@ \label{ssec:primrec-constructor-view} *} (*<*) - locale ctr_view = code_view + locale ctr_view begin (*>*) @@ -1937,7 +2114,7 @@ \label{ssec:primrec-destructor-view} *} (*<*) - locale dest_view + locale dtr_view begin (*>*) @@ -1951,13 +2128,13 @@ primcorec literate :: "('a \ 'a) \ 'a \ 'a llist" where "\ lnull (literate _ x)" | "lhd (literate _ x) = x" | - "ltl (literate f x) = literate f (f x)" + "ltl (literate g x) = literate g (g x)" text {* \blankline *} primcorec siterate :: "('a \ 'a) \ 'a \ 'a stream" where "shd (siterate _ x) = x" | - "stl (siterate f x) = siterate f (f x)" + "stl (siterate g x) = siterate g (g x)" text {* \blankline *} @@ -1993,6 +2170,9 @@ (*<*) end + locale dtr_view2 + begin + primcorec lappend :: "'a llist \ 'a llist \ 'a llist" where "lnull xs \ lnull ys \ lnull (lappend xs ys)" | (*>*) @@ -2000,8 +2180,6 @@ (*<*) | "lhd (lappend xs ys) = lhd (if lnull xs then ys else xs)" | "ltl (lappend xs ys) = (if xs = LNil then ltl ys else lappend (ltl xs) ys)" - - context dest_view begin (*>*) text {* @@ -2044,8 +2222,8 @@ text {* \blankline *} primcorec iterate\<^sub>i\<^sub>i :: "('a \ 'a llist) \ 'a \ 'a tree\<^sub>i\<^sub>i" where - "lbl\<^sub>i\<^sub>i (iterate\<^sub>i\<^sub>i f x) = x" | - "sub\<^sub>i\<^sub>i (iterate\<^sub>i\<^sub>i f x) = lmap (iterate\<^sub>i\<^sub>i f) (f x)" + "lbl\<^sub>i\<^sub>i (iterate\<^sub>i\<^sub>i g x) = x" | + "sub\<^sub>i\<^sub>i (iterate\<^sub>i\<^sub>i g x) = lmap (iterate\<^sub>i\<^sub>i g) (g x)" (*<*) end (*>*) @@ -2148,11 +2326,39 @@ \end{matharray} @{rail " - @@{command bnf} target? (name ':')? term \\ - term_list term term_list term? + @@{command bnf} target? (name ':')? typ \\ + 'map:' term ('sets:' (term +))? 'bd:' term \\ + ('wits:' (term +))? ('rel:' term)? +"} +*} + + +subsubsection {* \keyw{bnf\_decl} + \label{sssec:bnf-decl} *} + +text {* +%%% TODO: use command_def once the command is available +\begin{matharray}{rcl} + @{text "bnf_decl"} & : & @{text "local_theory \ local_theory"} +\end{matharray} + +@{rail " + @@{command bnf_decl} target? @{syntax dt_name} ; - X_list: '[' (X + ',') ']' + @{syntax_def dt_name}: @{syntax tyargs}? name @{syntax map_rel}? mixfix? + ; + @{syntax_def tyargs}: typefree | '(' (((name | '-') ':')? typefree + ',') ')' + ; + @{syntax_def map_rel}: '(' ((('map' | 'rel') ':' name) +) ')' "} + +Declares a fresh type and fresh constants (map, set, relator, cardinal bound) +and asserts the bnf properties for these constants as axioms. Additionally, +type arguments may be marked as dead (by using @{syntax "-"} instead of a name for the +set function)---this is the only difference of @{syntax dt_name} compared to +the syntax used by the @{command datatype_new}/@{command codatatype} commands. + +The axioms are sound, since one there exists a bnf of any given arity. *} @@ -2185,8 +2391,10 @@ % old \keyw{datatype} % % * @{command wrap_free_constructors} -% * @{text "no_discs_sels"}, @{text "rep_compat"} +% * @{text "no_discs_sels"}, @{text "no_code"}, @{text "rep_compat"} % * hack to have both co and nonco view via locale (cf. ext nats) +% * code generator +% * eq, refl, simps *} @@ -2215,11 +2423,11 @@ @{syntax_def wfc_discs_sels}: name_list (name_list_list name_term_list_list? )? ; @{syntax_def name_term}: (name ':' term) + ; + X_list: '[' (X + ',') ']' "} -% options: no_discs_sels rep_compat - -% X_list is as for BNF +% options: no_discs_sels no_code rep_compat \noindent Section~\ref{ssec:datatype-generated-theorems} lists the generated theorems. @@ -2307,8 +2515,9 @@ suggested major simplifications to the internal constructions, much of which has yet to be implemented. Florian Haftmann and Christian Urban provided general advice on Isabelle and package writing. Stefan Milius and Lutz Schr\"oder -found an elegant proof to eliminate one of the BNF assumptions. Christian -Sternagel suggested many textual improvements to this tutorial. +found an elegant proof to eliminate one of the BNF assumptions. Andreas +Lochbihler and Christian Sternagel suggested many textual improvements to this +tutorial. *} end diff -r cfb21e03fe2a -r d64a4ef26edb src/Doc/Datatypes/document/root.tex --- a/src/Doc/Datatypes/document/root.tex Thu Dec 05 17:52:12 2013 +0100 +++ b/src/Doc/Datatypes/document/root.tex Thu Dec 05 17:58:03 2013 +0100 @@ -58,10 +58,10 @@ \begin{abstract} \noindent -This tutorial describes how to use the new package for defining datatypes and -codatatypes in Isabelle/HOL. The package provides five main commands: +This tutorial describes the new package for defining datatypes and codatatypes +in Isabelle/HOL. The package provides four main commands: \keyw{datatype\_new}, \keyw{codatatype}, \keyw{primrec\_new}, -\keyw{primcorecursive}, and \keyw{primcorec}. The commands suffixed by +and \keyw{primcorec}. The commands suffixed by \keyw{\_new} are intended to subsume, and eventually replace, the corresponding commands from the old datatype package. \end{abstract} diff -r cfb21e03fe2a -r d64a4ef26edb src/Doc/Functions/Functions.thy --- a/src/Doc/Functions/Functions.thy Thu Dec 05 17:52:12 2013 +0100 +++ b/src/Doc/Functions/Functions.thy Thu Dec 05 17:58:03 2013 +0100 @@ -1003,13 +1003,13 @@ recursive calls. In general, there is one introduction rule for each recursive call. - The predicate @{term "accp findzero_rel"} is the accessible part of + The predicate @{term "Wellfounded.accp findzero_rel"} is the accessible part of that relation. An argument belongs to the accessible part, if it can be reached in a finite number of steps (cf.~its definition in @{text "Wellfounded.thy"}). Since the domain predicate is just an abbreviation, you can use - lemmas for @{const accp} and @{const findzero_rel} directly. Some + lemmas for @{const Wellfounded.accp} and @{const findzero_rel} directly. Some lemmas which are occasionally useful are @{thm [source] accpI}, @{thm [source] accp_downward}, and of course the introduction and elimination rules for the recursion relation @{thm [source] "findzero_rel.intros"} and @{thm diff -r cfb21e03fe2a -r d64a4ef26edb src/Doc/IsarImplementation/ML.thy --- a/src/Doc/IsarImplementation/ML.thy Thu Dec 05 17:52:12 2013 +0100 +++ b/src/Doc/IsarImplementation/ML.thy Thu Dec 05 17:58:03 2013 +0100 @@ -1033,7 +1033,7 @@ without any message output. \begin{warn} - The actual error channel is accessed via @{ML Output.error_msg}, but + The actual error channel is accessed via @{ML Output.error_message}, but the old interaction protocol of Proof~General \emph{crashes} if that function is used in regular ML code: error output and toplevel command failure always need to coincide in classic TTY interaction. diff -r cfb21e03fe2a -r d64a4ef26edb src/Doc/JEdit/JEdit.thy --- a/src/Doc/JEdit/JEdit.thy Thu Dec 05 17:52:12 2013 +0100 +++ b/src/Doc/JEdit/JEdit.thy Thu Dec 05 17:58:03 2013 +0100 @@ -1068,12 +1068,6 @@ text {* \begin{itemize} - \item \textbf{Problem:} Lack of dependency management for auxiliary files - that contribute to a theory (e.g.\ @{command ML_file}). - - \textbf{Workaround:} Re-load files manually within the prover, by - editing corresponding command in the text. - \item \textbf{Problem:} Odd behavior of some diagnostic commands with global side-effects, like writing a physical file. diff -r cfb21e03fe2a -r d64a4ef26edb src/Doc/Nitpick/document/root.tex --- a/src/Doc/Nitpick/document/root.tex Thu Dec 05 17:52:12 2013 +0100 +++ b/src/Doc/Nitpick/document/root.tex Thu Dec 05 17:58:03 2013 +0100 @@ -1965,6 +1965,8 @@ \texttt{.kki}, \texttt{.cnf}, \texttt{.out}, and \texttt{.err}; you may safely remove them after Nitpick has run. +\textbf{Warning:} This option is not thread-safe. Use at your own risks. + \nopagebreak {\small See also \textit{debug} (\S\ref{output-format}).} \end{enum} @@ -2382,6 +2384,14 @@ \cite{kodkod-2009}. Unlike the standard version of MiniSat, the JNI version can be used incrementally. +\item[\labelitemi] \textbf{\textit{Riss3g}:} Riss3g is an efficient solver written in +\cpp{}. To use Riss3g, set the environment variable \texttt{RISS3G\_HOME} to the +directory that contains the \texttt{riss3g} executable.% +\footref{cygwin-paths} +The \cpp{} sources for Riss3g are available at +\url{http://tools.computational-logic.org/content/riss3g.php}. +Nitpick has been tested with the SAT Competition 2013 version. + \item[\labelitemi] \textbf{\textit{zChaff}:} zChaff is an older solver written in \cpp{}. To use zChaff, set the environment variable \texttt{ZCHAFF\_HOME} to the directory that contains the \texttt{zchaff} executable.% @@ -2794,11 +2804,12 @@ \subsection{Registering Coinductive Datatypes} \label{registering-coinductive-datatypes} +Coinductive datatypes defined using the \textbf{codatatype} command that do not +involve nested recursion through non-codatatypes are supported by Nitpick. If you have defined a custom coinductive datatype, you can tell Nitpick about -it, so that it can use an efficient Kodkod axiomatization similar to the one it -uses for lazy lists. The interface for registering and unregistering coinductive -datatypes consists of the following pair of functions defined in the -\textit{Nitpick\_HOL} structure: +it, so that it can use an efficient Kodkod axiomatization. The interface for +registering and unregistering coinductive datatypes consists of the following +pair of functions defined in the \textit{Nitpick\_HOL} structure: \prew $\textbf{val}\,~\textit{register\_codatatype\/} : {}$ \\ @@ -2886,6 +2897,12 @@ \item[\labelitemi] Nitpick produces spurious counterexamples when invoked after a \textbf{guess} command in a structured proof. +\item[\labelitemi] Datatypes defined using \textbf{datatype\_new} are not +supported. + +\item[\labelitemi] Codatatypes defined using \textbf{codatatype} that +involve nested recursion through non-codatatypes are not supported. + \item[\labelitemi] The \textit{nitpick\_xxx} attributes and the \textit{Nitpick\_xxx.register\_yyy} functions can cause havoc if used improperly. diff -r cfb21e03fe2a -r d64a4ef26edb src/Doc/ProgProve/Basics.thy --- a/src/Doc/ProgProve/Basics.thy Thu Dec 05 17:52:12 2013 +0100 +++ b/src/Doc/ProgProve/Basics.thy Thu Dec 05 17:58:03 2013 +0100 @@ -22,8 +22,8 @@ \item[type constructors,] in particular @{text list}, the type of lists, and @{text set}, the type of sets. Type constructors are written -postfix, e.g.\ @{typ "nat list"} is the type of lists whose elements are -natural numbers. +postfix, i.e., after their arguments. For example, +@{typ "nat list"} is the type of lists whose elements are natural numbers. \item[function types,] denoted by @{text"\"}. \item[type variables,] @@ -41,8 +41,8 @@ \begin{warn} There are many predefined infix symbols like @{text "+"} and @{text"\"}. The name of the corresponding binary function is @{term"op +"}, -not just @{text"+"}. That is, @{term"x + y"} is syntactic sugar for -\noquotes{@{term[source]"op + x y"}}. +not just @{text"+"}. That is, @{term"x + y"} is nice surface syntax +(``syntactic sugar'') for \noquotes{@{term[source]"op + x y"}}. \end{warn} HOL also supports some basic constructs from functional programming: diff -r cfb21e03fe2a -r d64a4ef26edb src/Doc/ProgProve/Bool_nat_list.thy --- a/src/Doc/ProgProve/Bool_nat_list.thy Thu Dec 05 17:52:12 2013 +0100 +++ b/src/Doc/ProgProve/Bool_nat_list.thy Thu Dec 05 17:58:03 2013 +0100 @@ -99,10 +99,10 @@ For example, given the goal @{text"x + 0 = x"}, there is nothing to indicate that you are talking about natural numbers. Hence Isabelle can only infer that @{term x} is of some arbitrary type where @{text 0} and @{text"+"} - exist. As a consequence, you will be unable to prove the - goal. To alert you to such pitfalls, Isabelle flags numerals without a - fixed type in its output: @{prop"x+0 = x"}. In this particular example, - you need to include + exist. As a consequence, you will be unable to prove the goal. +% To alert you to such pitfalls, Isabelle flags numerals without a +% fixed type in its output: @ {prop"x+0 = x"}. + In this particular example, you need to include an explicit type constraint, for example @{text"x+0 = (x::nat)"}. If there is enough contextual information this may not be necessary: @{prop"Suc x = x"} automatically implies @{text"x::nat"} because @{term Suc} is not @@ -372,10 +372,10 @@ ys zs)"}. It appears almost mysterious because we suddenly complicate the term by appending @{text Nil} on the left. What is really going on is this: when proving some equality \mbox{@{prop"s = t"}}, both @{text s} and @{text t} are -simplified to some common term @{text u}. This heuristic for equality proofs +simplified until they ``meet in the middle''. This heuristic for equality proofs works well for a functional programming context like ours. In the base case -@{text s} is @{term"app (app Nil ys) zs"}, @{text t} is @{term"app Nil (app -ys zs)"}, and @{text u} is @{term"app ys zs"}. +both @{term"app (app Nil ys) zs"} and @{term"app Nil (app +ys zs)"} are simplified to @{term"app ys zs"}, the term in the middle. \subsection{Predefined Lists} \label{sec:predeflists} @@ -419,13 +419,19 @@ From now on lists are always the predefined lists. -\subsection{Exercises} +\subsection*{Exercises} + +\begin{exercise} +Use the \isacom{value} command to evaluate the following expressions: +@{term[source] "1 + (2::nat)"}, @{term[source] "1 + (2::int)"}, +@{term[source] "1 - (2::nat)"} and @{term[source] "1 - (2::int)"}. +\end{exercise} \begin{exercise} Start from the definition of @{const add} given above. -Prove it is associative (@{prop"add (add m n) p = add m (add n p)"}) -and commutative (@{prop"add m n = add n m"}). Define a recursive function -@{text double} @{text"::"} @{typ"nat \ nat"} and prove that @{prop"double m = add m m"}. +Prove that @{const add} is associative and commutative. +Define a recursive function @{text double} @{text"::"} @{typ"nat \ nat"} +and prove @{prop"double m = add m m"}. \end{exercise} \begin{exercise} @@ -436,11 +442,15 @@ \begin{exercise} Define a recursive function @{text "snoc ::"} @{typ"'a list \ 'a \ 'a list"} -that appends an element to the end of a list. Do not use the predefined append -operator @{text"@"}. With the help of @{text snoc} define a recursive function -@{text "reverse ::"} @{typ"'a list \ 'a list"} that reverses a list. Do not -use the predefined function @{const rev}. -Prove @{prop"reverse(reverse xs) = xs"}. +that appends an element to the end of a list. With the help of @{text snoc} +define a recursive function @{text "reverse ::"} @{typ"'a list \ 'a list"} +that reverses a list. Prove @{prop"reverse(reverse xs) = xs"}. +\end{exercise} + +\begin{exercise} +Define a recursive function @{text "sum ::"} @{typ"nat \ nat"} such that +\mbox{@{text"sum n"}} @{text"="} @{text"0 + ... + n"} and prove +@{prop" sum(n::nat) = n * (n+1) div 2"}. \end{exercise} *} (*<*) diff -r cfb21e03fe2a -r d64a4ef26edb src/Doc/ProgProve/Isar.thy --- a/src/Doc/ProgProve/Isar.thy Thu Dec 05 17:52:12 2013 +0100 +++ b/src/Doc/ProgProve/Isar.thy Thu Dec 05 17:58:03 2013 +0100 @@ -590,15 +590,15 @@ the fact just proved, in this case the preceding block. In general, \isacom{note} introduces a new name for one or more facts. -\subsection{Exercises} +\subsection*{Exercises} \exercise Give a readable, structured proof of the following lemma: *} -lemma assumes T: "\ x y. T x y \ T y x" - and A: "\ x y. A x y \ A y x \ x = y" - and TA: "\ x y. T x y \ A x y" and "A x y" -shows "T x y" +lemma assumes T: "\x y. T x y \ T y x" + and A: "\x y. A x y \ A y x \ x = y" + and TA: "\x y. T x y \ A x y" and "A x y" + shows "T x y" (*<*)oops(*>*) text{* \endexercise @@ -612,10 +612,11 @@ text{* Hint: There are predefined functions @{const_typ take} and @{const_typ drop} such that @{text"take k [x\<^sub>1,\] = [x\<^sub>1,\,x\<^sub>k]"} and -@{text"drop k [x\<^sub>1,\] = [x\<^bsub>k+1\<^esub>,\]"}. Let @{text simp} and especially -sledgehammer find and apply the relevant @{const take} and @{const drop} lemmas for you. +@{text"drop k [x\<^sub>1,\] = [x\<^bsub>k+1\<^esub>,\]"}. Let sledgehammer find and apply +the relevant @{const take} and @{const drop} lemmas for you. \endexercise + \section{Case Analysis and Induction} \subsection{Datatype Case Analysis} @@ -1018,7 +1019,7 @@ \isacom{lemma} @{text[source]"I r s t \ \"} \end{isabelle} Applying the standard form of -rule induction in such a situation will lead to strange and typically unproveable goals. +rule induction in such a situation will lead to strange and typically unprovable goals. We can easily reduce this situation to the standard one by introducing new variables @{text x}, @{text y}, @{text z} and reformulating the goal like this: \begin{isabelle} @@ -1040,7 +1041,7 @@ proof(induction "Suc m" arbitrary: m rule: ev.induct) fix n assume IH: "\m. n = Suc m \ \ ev m" show "\ ev (Suc n)" - proof --"contradition" + proof --"contradiction" assume "ev(Suc n)" thus False proof cases --"rule inversion" @@ -1075,45 +1076,38 @@ @{text induct} method. \end{warn} -\subsection{Exercises} + +\subsection*{Exercises} + + +\exercise +Give a structured proof by rule inversion: +*} + +lemma assumes a: "ev(Suc(Suc n))" shows "ev n" +(*<*)oops(*>*) + +text{* +\endexercise + +\begin{exercise} +Give a structured proof of @{prop "\ ev(Suc(Suc(Suc 0)))"} +by rule inversions. If there are no cases to be proved you can close +a proof immediateley with \isacom{qed}. +\end{exercise} + +\begin{exercise} +Recall predicate @{text star} from \autoref{sec:star} and @{text iter} +from Exercise~\ref{exe:iter}. Prove @{prop "iter r n x y \ star r x y"} +in a structured style, do not just sledgehammer each case of the +required induction. +\end{exercise} \begin{exercise} Define a recursive function @{text "elems ::"} @{typ"'a list \ 'a set"} and prove @{prop "x : elems xs \ \ys zs. xs = ys @ x # zs \ x \ elems ys"}. \end{exercise} - -\begin{exercise} -A context-free grammar can be seen as an inductive definition where each -nonterminal $A$ is an inductively defined predicate on lists of terminal -symbols: $A(w)$ mans -that $w$ is in the language generated by $A$. For example, the production $S -\to a S b$ can be viewed as the implication @{prop"S w \ S (a # w @ [b])"} -where @{text a} and @{text b} are constructors of some datatype of terminal -symbols: \isacom{datatype} @{text"tsymbs = a | b | \"} - -Define the two grammars -\[ -\begin{array}{r@ {\quad}c@ {\quad}l} -S &\to& \varepsilon \quad\mid\quad a~S~b \quad\mid\quad S~S \\ -T &\to& \varepsilon \quad\mid\quad T~a~T~b -\end{array} -\] -($\varepsilon$ is the empty word) -as two inductive predicates and prove @{prop"S w \ T w"}. -\end{exercise} - *} -(* -lemma "\ ev(Suc(Suc(Suc 0)))" -proof - assume "ev(Suc(Suc(Suc 0)))" - then show False - proof cases - case evSS - from `ev(Suc 0)` show False by cases - qed -qed -*) (*<*) end diff -r cfb21e03fe2a -r d64a4ef26edb src/Doc/ProgProve/Logic.thy --- a/src/Doc/ProgProve/Logic.thy Thu Dec 05 17:52:12 2013 +0100 +++ b/src/Doc/ProgProve/Logic.thy Thu Dec 05 17:58:03 2013 +0100 @@ -141,6 +141,28 @@ See \cite{Nipkow-Main} for the wealth of further predefined functions in theory @{theory Main}. + +\subsection*{Exercises} + +\exercise +Start from the data type of binary trees defined earlier: +*} + +datatype 'a tree = Tip | Node "'a tree" 'a "'a tree" + +text{* +Define a function @{text "set ::"} @{typ "'a tree \ 'a set"} +that returns the elements in a tree and a function +@{text "ord ::"} @{typ "int tree \ bool"} +the tests if an @{typ "int tree"} is ordered. + +Define a function @{text ins} that inserts an element into an ordered @{typ "int tree"} +while maintaining the order of the tree. If the element is already in the tree, the +same tree should be returned. Prove correctness of @{text ins}: +@{prop "set(ins x t) = {x} \ set t"} and @{prop "ord t \ ord(ins i t)"}. +\endexercise + + \section{Proof Automation} So far we have only seen @{text simp} and @{text auto}: Both perform @@ -459,12 +481,12 @@ text{* In this particular example we could have backchained with @{thm[source] Suc_leD}, too, but because the premise is more complicated than the conclusion this can easily lead to nontermination. -\subsection{Finding Theorems} - -Command \isacom{find{\isacharunderscorekeyword}theorems} searches for specific theorems in the current -theory. Search criteria include pattern matching on terms and on names. -For details see the Isabelle/Isar Reference Manual~\cite{IsarRef}. -\bigskip +%\subsection{Finding Theorems} +% +%Command \isacom{find{\isacharunderscorekeyword}theorems} searches for specific theorems in the current +%theory. Search criteria include pattern matching on terms and on names. +%For details see the Isabelle/Isar Reference Manual~\cite{IsarRef}. +%\bigskip \begin{warn} To ease readability we will drop the question marks @@ -708,8 +730,8 @@ apply(rename_tac u x y) defer (*>*) -txt{* The induction is over @{prop"star r x y"} and we try to prove -\mbox{@{prop"star r y z \ star r x z"}}, +txt{* The induction is over @{prop"star r x y"} (the first matching assumption) +and we try to prove \mbox{@{prop"star r y z \ star r x z"}}, which we abbreviate by @{prop"P x y"}. These are our two subgoals: @{subgoals[display,indent=0]} The first one is @{prop"P x x"}, the result of case @{thm[source]refl}, @@ -764,6 +786,95 @@ conditions}. In rule inductions, these side-conditions appear as additional assumptions. The \isacom{for} clause seen in the definition of the reflexive transitive closure merely simplifies the form of the induction rule. + + +\subsection*{Exercises} + +\begin{exercise} +Formalise the following definition of palindromes +\begin{itemize} +\item The empty list and a singleton list are palindromes. +\item If @{text xs} is a palindrome, so is @{term "a # xs @ [a]"}. +\end{itemize} +as an inductive predicate @{text "palindrome ::"} @{typ "'a list \ bool"} +and prove that @{prop "rev xs = xs"} if @{text xs} is a palindrome. +\end{exercise} + +\exercise +We could also have defined @{const star} as follows: +*} + +inductive star' :: "('a \ 'a \ bool) \ 'a \ 'a \ bool" for r where +refl': "star' r x x" | +step': "star' r x y \ r y z \ star' r x z" + +text{* +The single @{text r} step is performer after rather than before the @{text star'} +steps. Prove @{prop "star' r x y \ star r x y"} and +@{prop "star r x y \ star r' x y"}. You may need lemmas. +Note that rule induction fails +if the assumption about the inductive predicate is not the first assumption. +\endexercise + +\begin{exercise}\label{exe:iter} +Analogous to @{const star}, give an inductive definition of the @{text n}-fold iteration +of a relation @{text r}: @{term "iter r n x y"} should hold if there are @{text x\<^sub>0}, \dots, @{text x\<^sub>n} +such that @{prop"x = x\<^sub>0"}, @{prop"x\<^sub>n = y"} and @{text"r x\<^bsub>i\<^esub> x\<^bsub>i+1\<^esub>"} for +all @{prop"i < n"}. Correct and prove the following claim: +@{prop"star r x y \ iter r n x y"}. +\end{exercise} + +\begin{exercise} +A context-free grammar can be seen as an inductive definition where each +nonterminal $A$ is an inductively defined predicate on lists of terminal +symbols: $A(w)$ mans that $w$ is in the language generated by $A$. +For example, the production $S \to a S b$ can be viewed as the implication +@{prop"S w \ S (a # w @ [b])"} where @{text a} and @{text b} are terminal symbols, +i.e., elements of some alphabet. The alphabet can be defined like this: +\isacom{datatype} @{text"alpha = a | b | \"} + +Define the two grammars (where $\varepsilon$ is the empty word) +\[ +\begin{array}{r@ {\quad}c@ {\quad}l} +S &\to& \varepsilon \quad\mid\quad aSb \quad\mid\quad SS \\ +T &\to& \varepsilon \quad\mid\quad TaTb +\end{array} +\] +as two inductive predicates. +If you think of @{text a} and @{text b} as ``@{text "("}'' and ``@{text ")"}'', +the grammars defines strings of balanced parentheses. +Prove @{prop"T w \ S w"} and @{prop "S w \ T w"} separately and conclude +@{prop "S w = T w"}. +\end{exercise} + +\ifsem +\begin{exercise} +In \autoref{sec:AExp} we defined a recursive evaluation function +@{text "aval :: aexp \ state \ val"}. +Define an inductive evaluation predicate +@{text "aval_rel :: aexp \ state \ val \ bool"} +and prove that it agrees with the recursive function: +@{prop "aval_rel a s v \ aval a s = v"}, +@{prop "aval a s = v \ aval_rel a s v"} and thus +\noquotes{@{prop [source] "aval_rel a s v \ aval a s = v"}}. +\end{exercise} + +\begin{exercise} +Consider the stack machine from Chapter~3 +and recall the concept of \concept{stack underflow} +from Exercise~\ref{exe:stack-underflow}. +Define an inductive predicate +@{text "ok :: nat \ instr list \ nat \ bool"} +such that @{text "ok n is n'"} means that with any initial stack of length +@{text n} the instructions @{text "is"} can be executed +without stack underflow and that the final stack has length @{text n'}. +Prove that @{text ok} correctly computes the final stack size +@{prop[display] "\ok n is n'; length stk = n\ \ length (exec is s stk) = n'"} +and that instruction sequences generated by @{text comp} +cannot cause stack underflow: \ @{text "ok n (comp a) ?"} \ for +some suitable value of @{text "?"}. +\end{exercise} +\fi *} (*<*) end diff -r cfb21e03fe2a -r d64a4ef26edb src/Doc/ProgProve/Types_and_funs.thy --- a/src/Doc/ProgProve/Types_and_funs.thy Thu Dec 05 17:52:12 2013 +0100 +++ b/src/Doc/ProgProve/Types_and_funs.thy Thu Dec 05 17:58:03 2013 +0100 @@ -156,7 +156,7 @@ fun div2 :: "nat \ nat" where "div2 0 = 0" | -"div2 (Suc 0) = Suc 0" | +"div2 (Suc 0) = 0" | "div2 (Suc(Suc n)) = Suc(div2 n)" text{* does not just define @{const div2} but also proves a @@ -171,16 +171,25 @@ This customized induction rule can simplify inductive proofs. For example, *} -lemma "div2(n+n) = n" +lemma "div2(n) = n div 2" apply(induction n rule: div2.induct) -txt{* yields the 3 subgoals +txt{* (where the infix @{text div} is the predefined division operation) +yields the 3 subgoals @{subgoals[display,margin=65]} An application of @{text auto} finishes the proof. Had we used ordinary structural induction on @{text n}, the proof would have needed an additional case analysis in the induction step. +This example leads to the following induction heuristic: +\begin{quote} +\emph{Let @{text f} be a recursive function. +If the definition of @{text f} is more complicated +than having one equation for each constructor of some datatype, +then properties of @{text f} are best proved via @{text "f.induct"}.} +\end{quote} + The general case is often called \concept{computation induction}, because the induction follows the (terminating!) computation. For every defining equation @@ -200,6 +209,35 @@ But note that the induction rule does not mention @{text f} at all, except in its name, and is applicable independently of @{text f}. + +\subsection*{Exercises} + +\begin{exercise} +Starting from the type @{text "'a tree"} defined in the text, define +a function @{text "contents ::"} @{typ "'a tree \ 'a list"} +that collects all values in a tree in a list, in any order, +without removing duplicates. +Then define a function @{text "treesum ::"} @{typ "nat tree \ nat"} +that sums up all values in a tree of natural numbers +and prove @{prop "treesum t = listsum(contents t)"}. +\end{exercise} + +\begin{exercise} +Define a new type @{text "'a tree2"} of binary trees where values are also +stored in the leaves of the tree. Also reformulate the +@{const mirror} function accordingly. Define two functions +@{text "pre_order"} and @{text "post_order"} of type @{text "'a tree2 \ 'a list"} +that traverse a tree and collect all stored values in the respective order in +a list. Prove @{prop "pre_order (mirror t) = rev (post_order t)"}. +\end{exercise} + +\begin{exercise} +Define a function @{text "intersperse ::"} @{typ "'a \ 'a list \ 'a list"} +such that @{text "intersperse a [x\<^sub>1, ..., x\<^sub>n] = [x\<^sub>1, a, x\<^sub>2, a, ..., a, x\<^sub>n]"}. +Now prove that @{prop "map f (intersperse a xs) = intersperse (f a) (map f xs)"}. +\end{exercise} + + \section{Induction Heuristics} We have already noted that theorems about recursive functions are proved by @@ -307,6 +345,18 @@ matters in some cases. The variables that need to be quantified are typically those that change in recursive calls. + +\subsection*{Exercises} + +\begin{exercise} +Write a tail-recursive variant of the @{text add} function on @{typ nat}: +@{term "itadd :: nat \ nat \ nat"}. +Tail-recursive means that in the recursive case, @{text itadd} needs to call +itself directly: \mbox{@{term"itadd (Suc m) n"}} @{text"= itadd \"}. +Prove @{prop "itadd m n = add m n"}. +\end{exercise} + + \section{Simplification} So far we have talked a lot about simplifying terms without explaining the concept. \concept{Simplification} means @@ -481,9 +531,37 @@ splits all case-expressions over natural numbers. For an arbitrary datatype @{text t} it is @{text "t.split"} instead of @{thm[source] nat.split}. Method @{text auto} can be modified in exactly the same way. +The modifier @{text "split:"} can be followed by multiple names. +Splitting if or case-expressions in the assumptions requires +@{text "split: if_splits"} or @{text "split: t.splits"}. -\subsection{Exercises} +\subsection*{Exercises} + +\exercise\label{exe:tree0} +Define a datatype @{text tree0} of binary tree skeletons which do not store +any information, neither in the inner nodes nor in the leaves. +Define a function @{text "nodes :: tree0 \ nat"} that counts the number of +all nodes (inner nodes and leaves) in such a tree. +Consider the following recursive function: +*} +(*<*) +datatype tree0 = Tip | Node tree0 tree0 +(*>*) +fun explode :: "nat \ tree0 \ tree0" where +"explode 0 t = t" | +"explode (Suc n) t = explode n (Node t t)" + +text {* +Find an equation expressing the size of a tree after exploding it +(\noquotes{@{term [source] "nodes (explode n t)"}}) as a function +of @{term "nodes t"} and @{text n}. Prove your equation. +You may use the usual arithmetic operators including the exponentiation +operator ``@{text"^"}''. For example, \noquotes{@{prop [source] "2 ^ 2 = 4"}}. + +Hint: simplifying with the list of theorems @{thm[source] algebra_simps} +takes care of common algebraic properties of the arithmetic operators. +\endexercise \exercise Define arithmetic expressions in one variable over integers (type @{typ int}) @@ -506,8 +584,7 @@ that transforms an expression into a polynomial. This may require auxiliary functions. Prove that @{text coeffs} preserves the value of the expression: \mbox{@{prop"evalp (coeffs e) x = eval e x"}.} -Hint: simplifying with @{thm[source] algebra_simps} takes care of -common algebraic properties of @{text "+"} and @{text "*"}. +Hint: consider the hint in Exercise~\ref{exe:tree0}. \endexercise *} (*<*) diff -r cfb21e03fe2a -r d64a4ef26edb src/Doc/ProgProve/document/intro-isabelle.tex --- a/src/Doc/ProgProve/document/intro-isabelle.tex Thu Dec 05 17:52:12 2013 +0100 +++ b/src/Doc/ProgProve/document/intro-isabelle.tex Thu Dec 05 17:58:03 2013 +0100 @@ -16,7 +16,7 @@ of recursive functions. \ifsem \autoref{sec:CaseStudyExp} contains a -little case study: arithmetic and boolean expressions, their evaluation, +small case study: arithmetic and boolean expressions, their evaluation, optimization and compilation. \fi \autoref{ch:Logic} introduces the rest of HOL: the @@ -35,8 +35,8 @@ % in the intersection of computation and logic. This introduction to the core of Isabelle is intentionally concrete and -example-based: we concentrate on examples that illustrate the typical cases; -we do not explain the general case if it can be inferred from the examples. +example-based: we concentrate on examples that illustrate the typical cases +without explaining the general case if it can be inferred from the examples. We cover the essentials (from a functional programming point of view) as quickly and compactly as possible. \ifsem @@ -46,7 +46,7 @@ For a comprehensive treatment of all things Isabelle we recommend the \emph{Isabelle/Isar Reference Manual}~\cite{IsarRef}, which comes with the Isabelle distribution. -The tutorial by Nipkow, Paulson and Wenzel~\cite{LNCS2283} (in its updated version that comes with the Isabelle distribution) is still recommended for the wealth of examples and material, but its proof style is outdated. In particular it fails to cover the structured proof language Isar. +The tutorial by Nipkow, Paulson and Wenzel~\cite{LNCS2283} (in its updated version that comes with the Isabelle distribution) is still recommended for the wealth of examples and material, but its proof style is outdated. In particular it does not cover the structured proof language Isar. %This introduction to Isabelle has grown out of many years of teaching %Isabelle courses. @@ -88,7 +88,7 @@ \ifsem\else \paragraph{Acknowledgements} -I wish to thank the following people for their comments -on this document: -Florian Haftmann, Ren\'{e} Thiemann and Christian Sternagel. +I wish to thank the following people for their comments on this document: +Florian Haftmann, Ren\'{e} Thiemann, Sean Seefried, Christian Sternagel +and Carl Witty. \fi \ No newline at end of file diff -r cfb21e03fe2a -r d64a4ef26edb src/Doc/Sledgehammer/document/root.tex --- a/src/Doc/Sledgehammer/document/root.tex Thu Dec 05 17:52:12 2013 +0100 +++ b/src/Doc/Sledgehammer/document/root.tex Thu Dec 05 17:58:03 2013 +0100 @@ -121,8 +121,8 @@ For Isabelle/jEdit users, Sledgehammer provides an automatic mode that can be enabled via the ``Auto Sledgehammer'' option under ``Plugins > Plugin Options > -Isabelle > General.'' In this mode, Sledgehammer is run on every newly entered -theorem. +Isabelle > General.'' In this mode, a reduced version of Sledgehammer is run on +every newly entered theorem for a few seconds. \newbox\boxA \setbox\boxA=\hbox{\texttt{NOSPAM}} @@ -719,12 +719,16 @@ If you use Isabelle/jEdit, Sledgehammer also provides an automatic mode that can be enabled via the ``Auto Sledgehammer'' option under ``Plugins > Plugin Options > Isabelle > General.'' For automatic runs, only the first prover set using -\textit{provers} (\S\ref{mode-of-operation}) is considered, fewer facts are -passed to the prover, \textit{slice} (\S\ref{mode-of-operation}) is disabled, -\textit{strict} (\S\ref{problem-encoding}) is enabled, \textit{verbose} -(\S\ref{output-format}) and \textit{debug} (\S\ref{output-format}) are disabled, -and \textit{timeout} (\S\ref{timeouts}) is superseded by the ``Auto Time Limit'' -option in jEdit. Sledgehammer's output is also more concise. +\textit{provers} (\S\ref{mode-of-operation}) is considered (typically E), +\textit{slice} (\S\ref{mode-of-operation}) is disabled, +\textit{minimize} (\S\ref{mode-of-operation}) is disabled, fewer facts are +passed to the prover, \textit{fact\_filter} (\S\ref{relevance-filter}) is set to +\textit{mepo}, \textit{strict} (\S\ref{problem-encoding}) is enabled, +\textit{verbose} (\S\ref{output-format}) and \textit{debug} +(\S\ref{output-format}) are disabled, \textit{preplay\_timeout} +(\S\ref{timeouts}) is set to 0, and \textit{timeout} (\S\ref{timeouts}) is +superseded by the ``Auto Time Limit'' option in jEdit. Sledgehammer's output is +also more concise. \subsection{Metis} @@ -999,8 +1003,7 @@ number of facts. For SMT solvers, several slices are tried with the same options each time but fewer and fewer facts. According to benchmarks with a timeout of 30 seconds, slicing is a valuable optimization, and you should probably leave it -enabled unless you are conducting experiments. This option is implicitly -disabled for (short) automatic runs. +enabled unless you are conducting experiments. \nopagebreak {\small See also \textit{verbose} (\S\ref{output-format}).} @@ -1035,6 +1038,8 @@ simultaneously. The files are identified by the prefixes \texttt{prob\_} and \texttt{mash\_}; you may safely remove them after Sledgehammer has run. +\textbf{Warning:} This option is not thread-safe. Use at your own risks. + \nopagebreak {\small See also \textit{debug} (\S\ref{output-format}).} \end{enum} @@ -1282,14 +1287,12 @@ \opfalse{verbose}{quiet} Specifies whether the \textbf{sledgehammer} command should explain what it does. -This option is implicitly disabled for automatic runs. \opfalse{debug}{no\_debug} Specifies whether Sledgehammer should display additional debugging information beyond what \textit{verbose} already displays. Enabling \textit{debug} also enables \textit{verbose} and \textit{blocking} (\S\ref{mode-of-operation}) -behind the scenes. The \textit{debug} option is implicitly disabled for -automatic runs. +behind the scenes. \nopagebreak {\small See also \textit{spy} (\S\ref{mode-of-operation}) and @@ -1349,8 +1352,6 @@ \opdefault{timeout}{float\_or\_none}{\upshape 30} Specifies the maximum number of seconds that the automatic provers should spend searching for a proof. This excludes problem preparation and is a soft limit. -For automatic runs, the ``Auto Time Limit'' option under ``Plugins > Plugin -Options > Isabelle > General'' is used instead. \opdefault{preplay\_timeout}{float\_or\_none}{\upshape 3} Specifies the maximum number of seconds that \textit{metis} or \textit{smt} diff -r cfb21e03fe2a -r d64a4ef26edb src/Doc/System/Sessions.thy --- a/src/Doc/System/Sessions.thy Thu Dec 05 17:52:12 2013 +0100 +++ b/src/Doc/System/Sessions.thy Thu Dec 05 17:58:03 2013 +0100 @@ -399,7 +399,7 @@ \smallskip Build some session images with cleanup of their descendants, while retaining their ancestry: \begin{ttbox} -isabelle build -b -c HOL-Boogie HOL-SPARK +isabelle build -b -c HOL-Algebra HOL-Word \end{ttbox} \smallskip Clean all sessions without building anything: diff -r cfb21e03fe2a -r d64a4ef26edb src/Doc/Tutorial/document/rules.tex --- a/src/Doc/Tutorial/document/rules.tex Thu Dec 05 17:52:12 2013 +0100 +++ b/src/Doc/Tutorial/document/rules.tex Thu Dec 05 17:58:03 2013 +0100 @@ -1,4 +1,4 @@ -%!TEX root = ../tutorial.tex +%!TEX root = root.tex \chapter{The Rules of the Game} \label{chap:rules} @@ -33,6 +33,8 @@ one symbol only. For predicate logic this can be done, but when users define their own concepts they typically have to refer to other symbols as well. It is best not to be dogmatic. +Our system is not based on pure natural deduction, but includes elements from the sequent calculus +and free-variable tableaux. Natural deduction generally deserves its name. It is easy to use. Each proof step consists of identifying the outermost symbol of a formula and @@ -240,13 +242,14 @@ of a conjunction. Rules of this sort (where the conclusion is a subformula of a premise) are called \textbf{destruction} rules because they take apart and destroy a premise.% -\footnote{This Isabelle terminology has no counterpart in standard logic texts, +\footnote{This Isabelle terminology is not used in standard logic texts, although the distinction between the two forms of elimination rule is well known. Girard \cite[page 74]{girard89},\index{Girard, Jean-Yves|fnote} for example, writes ``The elimination rules [for $\disj$ and $\exists$] are very bad. What is catastrophic about them is the parasitic presence of a formula [$R$] -which has no structural link with the formula which is eliminated.''} +which has no structural link with the formula which is eliminated.'' +These Isabelle rules are inspired by the sequent calculus.} The first proof step applies conjunction introduction, leaving two subgoals: diff -r cfb21e03fe2a -r d64a4ef26edb src/Doc/Tutorial/document/sets.tex --- a/src/Doc/Tutorial/document/sets.tex Thu Dec 05 17:52:12 2013 +0100 +++ b/src/Doc/Tutorial/document/sets.tex Thu Dec 05 17:58:03 2013 +0100 @@ -660,8 +660,8 @@ \textbf{Composition} of relations (the infix \sdx{O}) is also available: \begin{isabelle} -r\ O\ s\ \isasymequiv\ \isacharbraceleft(x,z).\ \isasymexists y.\ (x,y)\ \isasymin\ s\ \isasymand\ (y,z)\ \isasymin\ r\isacharbraceright -\rulenamedx{rel_comp_def} +r\ O\ s\ = \isacharbraceleft(x,z).\ \isasymexists y.\ (x,y)\ \isasymin\ s\ \isasymand\ (y,z)\ \isasymin\ r\isacharbraceright +\rulenamedx{relcomp_unfold} \end{isabelle} % This is one of the many lemmas proved about these concepts: @@ -677,7 +677,7 @@ \isasymlbrakk r\isacharprime\ \isasymsubseteq\ r;\ s\isacharprime\ \isasymsubseteq\ s\isasymrbrakk\ \isasymLongrightarrow\ r\isacharprime\ O\ s\isacharprime\ \isasymsubseteq\ r\ O\ s% -\rulename{rel_comp_mono} +\rulename{relcomp_mono} \end{isabelle} \indexbold{converse!of a relation}% @@ -695,7 +695,7 @@ Here is a typical law proved about converse and composition: \begin{isabelle} (r\ O\ s)\isasyminverse\ =\ s\isasyminverse\ O\ r\isasyminverse -\rulename{converse_rel_comp} +\rulename{converse_relcomp} \end{isabelle} \indexbold{image!under a relation}% diff -r cfb21e03fe2a -r d64a4ef26edb src/Doc/manual.bib --- a/src/Doc/manual.bib Thu Dec 05 17:52:12 2013 +0100 +++ b/src/Doc/manual.bib Thu Dec 05 17:58:03 2013 +0100 @@ -194,7 +194,7 @@ @incollection{basin91, author = {David Basin and Matt Kaufmann}, title = {The {Boyer-Moore} Prover and {Nuprl}: An Experimental - Comparison}, + Comparison}, crossref = {huet-plotkin91}, pages = {89-119}} @@ -472,7 +472,7 @@ @book{constable86, author = {R. L. Constable and others}, title = {Implementing Mathematics with the Nuprl Proof - Development System}, + Development System}, publisher = Prentice, year = 1986} @@ -505,7 +505,7 @@ @incollection{dybjer91, author = {Peter Dybjer}, title = {Inductive Sets and Families in {Martin-L{\"o}f's} Type - Theory and Their Set-Theoretic Semantics}, + Theory and Their Set-Theoretic Semantics}, crossref = {huet-plotkin91}, pages = {280-306}} @@ -533,7 +533,7 @@ @InProceedings{felty91a, Author = {Amy Felty}, Title = {A Logic Program for Transforming Sequent Proofs to Natural - Deduction Proofs}, + Deduction Proofs}, crossref = {extensions91}, pages = {157-178}} @@ -566,9 +566,9 @@ @inproceedings{OBJ, author = {K. Futatsugi and J.A. Goguen and Jean-Pierre Jouannaud - and J. Meseguer}, + and J. Meseguer}, title = {Principles of {OBJ2}}, - booktitle = POPL, + booktitle = POPL, year = 1985, pages = {52-66}} @@ -576,7 +576,7 @@ @book{gallier86, author = {J. H. Gallier}, - title = {Logic for Computer Science: + title = {Logic for Computer Science: Foundations of Automatic Theorem Proving}, year = 1986, publisher = {Harper \& Row}} @@ -605,8 +605,8 @@ author = {Jean-Yves Girard}, title = {Proofs and Types}, year = 1989, - publisher = CUP, - note = {Translated by Yves LaFont and Paul Taylor}} + publisher = CUP, + note = {Translated by Yves Lafont and Paul Taylor}} @Book{mgordon-hol, editor = {M. J. C. Gordon and T. F. Melham}, @@ -777,21 +777,21 @@ @article{huet78, author = {G. P. Huet and B. Lang}, - title = {Proving and Applying Program Transformations Expressed with + title = {Proving and Applying Program Transformations Expressed with Second-Order Patterns}, journal = acta, volume = 11, - year = 1978, + year = 1978, pages = {31-55}} @inproceedings{huet88, author = {G{\'e}rard Huet}, title = {Induction Principles Formalized in the {Calculus of - Constructions}}, + Constructions}}, booktitle = {Programming of Future Generation Computers}, editor = {K. Fuchi and M. Nivat}, year = 1988, - pages = {205-216}, + pages = {205-216}, publisher = {Elsevier}} @inproceedings{Huffman-Kuncar:2013:lifting_transfer, @@ -843,7 +843,7 @@ %K @InProceedings{kammueller-locales, - author = {Florian Kamm{\"u}ller and Markus Wenzel and + author = {Florian Kamm{\"u}ller and Markus Wenzel and Lawrence C. Paulson}, title = {Locales: A Sectioning Concept for {Isabelle}}, crossref = {tphols99}} @@ -926,7 +926,7 @@ note = "\url{https://github.com/frelindb/agsyHOL}"} @incollection{lochbihler-2010, - title = "Coinduction", + title = "Coinductive", author = "Andreas Lochbihler", booktitle = "The Archive of Formal Proofs", editor = "Gerwin Klein and Tobias Nipkow and Lawrence C. Paulson", @@ -944,7 +944,7 @@ author = {Gavin Lowe}, title = {Breaking and Fixing the {Needham}-{Schroeder} Public-Key Protocol using {CSP} and {FDR}}, - booktitle = {Tools and Algorithms for the Construction and Analysis + booktitle = {Tools and Algorithms for the Construction and Analysis of Systems: second international workshop, TACAS '96}, editor = {T. Margaria and B. Steffen}, series = {LNCS 1055}, @@ -978,7 +978,7 @@ @incollection{melham89, author = {Thomas F. Melham}, title = {Automating Recursive Type Definitions in Higher Order - Logic}, + Logic}, pages = {341-386}, crossref = {birtwistle89}} @@ -1057,7 +1057,7 @@ @InProceedings{NaraschewskiW-TPHOLs98, author = {Wolfgang Naraschewski and Markus Wenzel}, - title = + title = {Object-Oriented Verification based on Record Subtyping in Higher-Order Logic}, crossref = {tphols98}} @@ -1190,8 +1190,8 @@ @book{nordstrom90, author = {Bengt {Nordstr{\"o}m} and Kent Petersson and Jan Smith}, title = {Programming in {Martin-L{\"o}f}'s Type Theory. An - Introduction}, - publisher = {Oxford University Press}, + Introduction}, + publisher = {Oxford University Press}, year = 1990} %O @@ -1251,7 +1251,7 @@ @InProceedings{paulson-COLOG, author = {Lawrence C. Paulson}, title = {A Formulation of the Simple Theory of Types (for - {Isabelle})}, + {Isabelle})}, pages = {246-274}, crossref = {colog88}, url = {\url{http://www.cl.cam.ac.uk/Research/Reports/TR175-lcp-simple.dvi.gz}}} @@ -1304,7 +1304,7 @@ %replaces paulson-final @Article{paulson-mscs, author = {Lawrence C. Paulson}, - title = {Final Coalgebras as Greatest Fixed Points + title = {Final Coalgebras as Greatest Fixed Points in {ZF} Set Theory}, journal = {Mathematical Structures in Computer Science}, year = 1999, @@ -1337,9 +1337,9 @@ crossref = {milner-fest}} @book{milner-fest, - title = {Proof, Language, and Interaction: + title = {Proof, Language, and Interaction: Essays in Honor of {Robin Milner}}, - booktitle = {Proof, Language, and Interaction: + booktitle = {Proof, Language, and Interaction: Essays in Honor of {Robin Milner}}, publisher = MIT, year = 2000, @@ -1427,7 +1427,7 @@ @book{paulson87, author = {Lawrence C. Paulson}, title = {Logic and Computation: Interactive proof with Cambridge - LCF}, + LCF}, year = 1987, publisher = CUP} @@ -1470,7 +1470,7 @@ @article{pelletier86, author = {F. J. Pelletier}, title = {Seventy-five Problems for Testing Automatic Theorem - Provers}, + Provers}, journal = JAR, volume = 2, pages = {191-216}, @@ -1486,13 +1486,13 @@ publisher = CUP, year = 1993} -@Article{pitts94, +@Article{pitts94, author = {Andrew M. Pitts}, title = {A Co-induction Principle for Recursively Defined Domains}, journal = TCS, - volume = 124, + volume = 124, pages = {195-219}, - year = 1994} + year = 1994} @Article{plaisted90, author = {David A. Plaisted}, @@ -1561,7 +1561,7 @@ @inproceedings{saaltink-fme, author = {Mark Saaltink and Sentot Kromodimoeljo and Bill Pase and Dan Craigen and Irwin Meisels}, - title = {An {EVES} Data Abstraction Example}, + title = {An {EVES} Data Abstraction Example}, pages = {578-596}, crossref = {fme93}} @@ -1897,7 +1897,7 @@ author = {A. N. Whitehead and B. Russell}, title = {Principia Mathematica}, year = 1962, - publisher = CUP, + publisher = CUP, note = {Paperback edition to *56, abridged from the 2nd edition (1927)}} @@ -1982,9 +1982,9 @@ @book{birtwistle89, editor = {Graham Birtwistle and P. A. Subrahmanyam}, title = {Current Trends in Hardware Verification and Automated - Theorem Proving}, + Theorem Proving}, booktitle = {Current Trends in Hardware Verification and Automated - Theorem Proving}, + Theorem Proving}, publisher = {Springer}, year = 1989} @@ -1997,9 +1997,9 @@ @Proceedings{cade12, editor = {Alan Bundy}, - title = {Automated Deduction --- {CADE}-12 + title = {Automated Deduction --- {CADE}-12 International Conference}, - booktitle = {Automated Deduction --- {CADE}-12 + booktitle = {Automated Deduction --- {CADE}-12 International Conference}, year = 1994, series = {LNAI 814}, @@ -2059,7 +2059,7 @@ title = {Extensions of Logic Programming}, booktitle = {Extensions of Logic Programming}, year = 1991, - series = {LNAI 475}, + series = {LNAI 475}, publisher = {Springer}} @proceedings{cade10, @@ -2078,9 +2078,9 @@ year = 1993} @book{wos-fest, - title = {Automated Reasoning and its Applications: + title = {Automated Reasoning and its Applications: Essays in Honor of {Larry Wos}}, - booktitle = {Automated Reasoning and its Applications: + booktitle = {Automated Reasoning and its Applications: Essays in Honor of {Larry Wos}}, publisher = MIT, year = 1997, diff -r cfb21e03fe2a -r d64a4ef26edb src/HOL/ATP.thy --- a/src/HOL/ATP.thy Thu Dec 05 17:52:12 2013 +0100 +++ b/src/HOL/ATP.thy Thu Dec 05 17:58:03 2013 +0100 @@ -18,34 +18,34 @@ subsection {* Higher-order reasoning helpers *} -definition fFalse :: bool where [no_atp]: +definition fFalse :: bool where "fFalse \ False" -definition fTrue :: bool where [no_atp]: +definition fTrue :: bool where "fTrue \ True" -definition fNot :: "bool \ bool" where [no_atp]: +definition fNot :: "bool \ bool" where "fNot P \ \ P" -definition fComp :: "('a \ bool) \ 'a \ bool" where [no_atp]: +definition fComp :: "('a \ bool) \ 'a \ bool" where "fComp P = (\x. \ P x)" -definition fconj :: "bool \ bool \ bool" where [no_atp]: +definition fconj :: "bool \ bool \ bool" where "fconj P Q \ P \ Q" -definition fdisj :: "bool \ bool \ bool" where [no_atp]: +definition fdisj :: "bool \ bool \ bool" where "fdisj P Q \ P \ Q" -definition fimplies :: "bool \ bool \ bool" where [no_atp]: +definition fimplies :: "bool \ bool \ bool" where "fimplies P Q \ (P \ Q)" -definition fequal :: "'a \ 'a \ bool" where [no_atp]: +definition fequal :: "'a \ 'a \ bool" where "fequal x y \ (x = y)" -definition fAll :: "('a \ bool) \ bool" where [no_atp]: +definition fAll :: "('a \ bool) \ bool" where "fAll P \ All P" -definition fEx :: "('a \ bool) \ bool" where [no_atp]: +definition fEx :: "('a \ bool) \ bool" where "fEx P \ Ex P" lemma fTrue_ne_fFalse: "fFalse \ fTrue" diff -r cfb21e03fe2a -r d64a4ef26edb src/HOL/Archimedean_Field.thy --- a/src/HOL/Archimedean_Field.thy Thu Dec 05 17:52:12 2013 +0100 +++ b/src/HOL/Archimedean_Field.thy Thu Dec 05 17:58:03 2013 +0100 @@ -129,12 +129,8 @@ fix y z assume "of_int y \ x \ x < of_int (y + 1)" "of_int z \ x \ x < of_int (z + 1)" - then have - "of_int y \ x" "x < of_int (y + 1)" - "of_int z \ x" "x < of_int (z + 1)" - by simp_all - from le_less_trans [OF `of_int y \ x` `x < of_int (z + 1)`] - le_less_trans [OF `of_int z \ x` `x < of_int (y + 1)`] + with le_less_trans [of "of_int y" "x" "of_int (z + 1)"] + le_less_trans [of "of_int z" "x" "of_int (y + 1)"] show "y = z" by (simp del: of_int_add) qed @@ -208,8 +204,8 @@ lemma floor_numeral [simp]: "floor (numeral v) = numeral v" using floor_of_int [of "numeral v"] by simp -lemma floor_neg_numeral [simp]: "floor (neg_numeral v) = neg_numeral v" - using floor_of_int [of "neg_numeral v"] by simp +lemma floor_neg_numeral [simp]: "floor (- numeral v) = - numeral v" + using floor_of_int [of "- numeral v"] by simp lemma zero_le_floor [simp]: "0 \ floor x \ 0 \ x" by (simp add: le_floor_iff) @@ -222,7 +218,7 @@ by (simp add: le_floor_iff) lemma neg_numeral_le_floor [simp]: - "neg_numeral v \ floor x \ neg_numeral v \ x" + "- numeral v \ floor x \ - numeral v \ x" by (simp add: le_floor_iff) lemma zero_less_floor [simp]: "0 < floor x \ 1 \ x" @@ -236,7 +232,7 @@ by (simp add: less_floor_iff) lemma neg_numeral_less_floor [simp]: - "neg_numeral v < floor x \ neg_numeral v + 1 \ x" + "- numeral v < floor x \ - numeral v + 1 \ x" by (simp add: less_floor_iff) lemma floor_le_zero [simp]: "floor x \ 0 \ x < 1" @@ -250,7 +246,7 @@ by (simp add: floor_le_iff) lemma floor_le_neg_numeral [simp]: - "floor x \ neg_numeral v \ x < neg_numeral v + 1" + "floor x \ - numeral v \ x < - numeral v + 1" by (simp add: floor_le_iff) lemma floor_less_zero [simp]: "floor x < 0 \ x < 0" @@ -264,7 +260,7 @@ by (simp add: floor_less_iff) lemma floor_less_neg_numeral [simp]: - "floor x < neg_numeral v \ x < neg_numeral v" + "floor x < - numeral v \ x < - numeral v" by (simp add: floor_less_iff) text {* Addition and subtraction of integers *} @@ -276,10 +272,6 @@ "floor (x + numeral v) = floor x + numeral v" using floor_add_of_int [of x "numeral v"] by simp -lemma floor_add_neg_numeral [simp]: - "floor (x + neg_numeral v) = floor x + neg_numeral v" - using floor_add_of_int [of x "neg_numeral v"] by simp - lemma floor_add_one [simp]: "floor (x + 1) = floor x + 1" using floor_add_of_int [of x 1] by simp @@ -290,10 +282,6 @@ "floor (x - numeral v) = floor x - numeral v" using floor_diff_of_int [of x "numeral v"] by simp -lemma floor_diff_neg_numeral [simp]: - "floor (x - neg_numeral v) = floor x - neg_numeral v" - using floor_diff_of_int [of x "neg_numeral v"] by simp - lemma floor_diff_one [simp]: "floor (x - 1) = floor x - 1" using floor_diff_of_int [of x 1] by simp @@ -357,8 +345,8 @@ lemma ceiling_numeral [simp]: "ceiling (numeral v) = numeral v" using ceiling_of_int [of "numeral v"] by simp -lemma ceiling_neg_numeral [simp]: "ceiling (neg_numeral v) = neg_numeral v" - using ceiling_of_int [of "neg_numeral v"] by simp +lemma ceiling_neg_numeral [simp]: "ceiling (- numeral v) = - numeral v" + using ceiling_of_int [of "- numeral v"] by simp lemma ceiling_le_zero [simp]: "ceiling x \ 0 \ x \ 0" by (simp add: ceiling_le_iff) @@ -371,7 +359,7 @@ by (simp add: ceiling_le_iff) lemma ceiling_le_neg_numeral [simp]: - "ceiling x \ neg_numeral v \ x \ neg_numeral v" + "ceiling x \ - numeral v \ x \ - numeral v" by (simp add: ceiling_le_iff) lemma ceiling_less_zero [simp]: "ceiling x < 0 \ x \ -1" @@ -385,7 +373,7 @@ by (simp add: ceiling_less_iff) lemma ceiling_less_neg_numeral [simp]: - "ceiling x < neg_numeral v \ x \ neg_numeral v - 1" + "ceiling x < - numeral v \ x \ - numeral v - 1" by (simp add: ceiling_less_iff) lemma zero_le_ceiling [simp]: "0 \ ceiling x \ -1 < x" @@ -399,7 +387,7 @@ by (simp add: le_ceiling_iff) lemma neg_numeral_le_ceiling [simp]: - "neg_numeral v \ ceiling x \ neg_numeral v - 1 < x" + "- numeral v \ ceiling x \ - numeral v - 1 < x" by (simp add: le_ceiling_iff) lemma zero_less_ceiling [simp]: "0 < ceiling x \ 0 < x" @@ -413,7 +401,7 @@ by (simp add: less_ceiling_iff) lemma neg_numeral_less_ceiling [simp]: - "neg_numeral v < ceiling x \ neg_numeral v < x" + "- numeral v < ceiling x \ - numeral v < x" by (simp add: less_ceiling_iff) text {* Addition and subtraction of integers *} @@ -425,10 +413,6 @@ "ceiling (x + numeral v) = ceiling x + numeral v" using ceiling_add_of_int [of x "numeral v"] by simp -lemma ceiling_add_neg_numeral [simp]: - "ceiling (x + neg_numeral v) = ceiling x + neg_numeral v" - using ceiling_add_of_int [of x "neg_numeral v"] by simp - lemma ceiling_add_one [simp]: "ceiling (x + 1) = ceiling x + 1" using ceiling_add_of_int [of x 1] by simp @@ -439,10 +423,6 @@ "ceiling (x - numeral v) = ceiling x - numeral v" using ceiling_diff_of_int [of x "numeral v"] by simp -lemma ceiling_diff_neg_numeral [simp]: - "ceiling (x - neg_numeral v) = ceiling x - neg_numeral v" - using ceiling_diff_of_int [of x "neg_numeral v"] by simp - lemma ceiling_diff_one [simp]: "ceiling (x - 1) = ceiling x - 1" using ceiling_diff_of_int [of x 1] by simp diff -r cfb21e03fe2a -r d64a4ef26edb src/HOL/BNF/BNF.thy --- a/src/HOL/BNF/BNF.thy Thu Dec 05 17:52:12 2013 +0100 +++ b/src/HOL/BNF/BNF.thy Thu Dec 05 17:58:03 2013 +0100 @@ -10,7 +10,7 @@ header {* Bounded Natural Functors for (Co)datatypes *} theory BNF -imports More_BNFs BNF_LFP BNF_GFP Coinduction +imports Countable_Set_Type BNF_LFP BNF_GFP BNF_Decl begin hide_const (open) image2 image2p vimage2p Gr Grp collect fsts snds setl setr diff -r cfb21e03fe2a -r d64a4ef26edb src/HOL/BNF/BNF_Comp.thy --- a/src/HOL/BNF/BNF_Comp.thy Thu Dec 05 17:52:12 2013 +0100 +++ b/src/HOL/BNF/BNF_Comp.thy Thu Dec 05 17:58:03 2013 +0100 @@ -11,6 +11,9 @@ imports Basic_BNFs begin +lemma wpull_id: "wpull UNIV B1 B2 id id id id" +unfolding wpull_def by simp + lemma empty_natural: "(\_. {}) o f = image g o (\_. {})" by (rule ext) simp diff -r cfb21e03fe2a -r d64a4ef26edb src/HOL/BNF/BNF_Decl.thy --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/HOL/BNF/BNF_Decl.thy Thu Dec 05 17:58:03 2013 +0100 @@ -0,0 +1,18 @@ +(* Title: HOL/BNF/BNF_Decl.thy + Author: Dmitriy Traytel, TU Muenchen + Copyright 2013 + +Axiomatic declaration of bounded natural functors. +*) + +header {* Axiomatic declaration of Bounded Natural Functors *} + +theory BNF_Decl +imports BNF_Def +keywords + "bnf_decl" :: thy_decl +begin + +ML_file "Tools/bnf_decl.ML" + +end diff -r cfb21e03fe2a -r d64a4ef26edb src/HOL/BNF/BNF_Def.thy --- a/src/HOL/BNF/BNF_Def.thy Thu Dec 05 17:52:12 2013 +0100 +++ b/src/HOL/BNF/BNF_Def.thy Thu Dec 05 17:58:03 2013 +0100 @@ -9,6 +9,8 @@ theory BNF_Def imports BNF_Util + (*FIXME: register fundef_cong attribute in an interpretation to remove this dependency*) + FunDef keywords "print_bnfs" :: diag and "bnf" :: thy_goal @@ -190,17 +192,17 @@ lemma vimage2pI: "R (f x) (g y) \ vimage2p f g R x y" unfolding vimage2p_def by - -lemma vimage2pD: "vimage2p f g R x y \ R (f x) (g y)" - unfolding vimage2p_def by - - lemma fun_rel_iff_leq_vimage2p: "(fun_rel R S) f g = (R \ vimage2p f g S)" unfolding fun_rel_def vimage2p_def by auto lemma convol_image_vimage2p: " ` Collect (split (vimage2p f g R)) \ Collect (split R)" unfolding vimage2p_def convol_def by auto +(*FIXME: duplicates lemma from Record.thy*) +lemma o_eq_dest_lhs: "a o b = c \ a (b v) = c v" + by clarsimp + ML_file "Tools/bnf_def_tactics.ML" ML_file "Tools/bnf_def.ML" - end diff -r cfb21e03fe2a -r d64a4ef26edb src/HOL/BNF/BNF_FP_Base.thy --- a/src/HOL/BNF/BNF_FP_Base.thy Thu Dec 05 17:52:12 2013 +0100 +++ b/src/HOL/BNF/BNF_FP_Base.thy Thu Dec 05 17:58:03 2013 +0100 @@ -13,12 +13,6 @@ imports BNF_Comp Ctr_Sugar begin -lemma not_TrueE: "\ True \ P" -by (erule notE, rule TrueI) - -lemma neq_eq_eq_contradict: "\t \ u; s = t; s = u\ \ P" -by fast - lemma mp_conj: "(P \ Q) \ R \ P \ R \ Q" by auto @@ -172,7 +166,5 @@ ML_file "Tools/bnf_fp_n2m.ML" ML_file "Tools/bnf_fp_n2m_sugar.ML" ML_file "Tools/bnf_fp_rec_sugar_util.ML" -ML_file "Tools/bnf_fp_rec_sugar_tactics.ML" -ML_file "Tools/bnf_fp_rec_sugar.ML" end diff -r cfb21e03fe2a -r d64a4ef26edb src/HOL/BNF/BNF_GFP.thy --- a/src/HOL/BNF/BNF_GFP.thy Thu Dec 05 17:52:12 2013 +0100 +++ b/src/HOL/BNF/BNF_GFP.thy Thu Dec 05 17:58:03 2013 +0100 @@ -8,21 +8,29 @@ header {* Greatest Fixed Point Operation on Bounded Natural Functors *} theory BNF_GFP -imports BNF_FP_Base Equiv_Relations_More "~~/src/HOL/Library/Sublist" +imports BNF_FP_Base Equiv_Relations_More List_Prefix keywords "codatatype" :: thy_decl and "primcorecursive" :: thy_goal and "primcorec" :: thy_decl begin +lemma not_TrueE: "\ True \ P" +by (erule notE, rule TrueI) + +lemma neq_eq_eq_contradict: "\t \ u; s = t; s = u\ \ P" +by fast + lemma sum_case_expand_Inr: "f o Inl = g \ f x = sum_case g (f o Inr) x" by (auto split: sum.splits) lemma sum_case_expand_Inr': "f o Inl = g \ h = f o Inr \ sum_case g h = f" -by (metis sum_case_o_inj(1,2) surjective_sum) +apply rule + apply (rule ext, force split: sum.split) +by (rule ext, metis sum_case_o_inj(2)) lemma converse_Times: "(A \ B) ^-1 = B \ A" -by auto +by fast lemma equiv_proj: assumes e: "equiv A R" and "z \ R" @@ -37,7 +45,6 @@ (* Operators: *) definition image2 where "image2 A f g = {(f a, g a) | a. a \ A}" - lemma Id_onD: "(a, b) \ Id_on A \ a = b" unfolding Id_on_def by simp @@ -56,9 +63,6 @@ lemma Id_on_Gr: "Id_on A = Gr A id" unfolding Id_on_def Gr_def by auto -lemma Id_on_UNIV_I: "x = y \ (x, y) \ Id_on UNIV" -unfolding Id_on_def by auto - lemma image2_eqI: "\b = f x; c = g x; x \ A\ \ (b, c) \ image2 A f g" unfolding image2_def by auto @@ -77,6 +81,12 @@ lemma Gr_incl: "Gr A f \ A <*> B \ f ` A \ B" unfolding Gr_def by auto +lemma subset_Collect_iff: "B \ A \ (B \ {x \ A. P x}) = (\x \ B. P x)" +by blast + +lemma subset_CollectI: "B \ A \ (\x. x \ B \ Q x \ P x) \ ({x \ B. Q x} \ {x \ A. P x})" +by blast + lemma in_rel_Collect_split_eq: "in_rel (Collect (split X)) = X" unfolding fun_eq_iff by auto @@ -130,9 +140,6 @@ "R \ relInvImage UNIV (relImage R f) f" unfolding relInvImage_def relImage_def by auto -lemma equiv_Image: "equiv A R \ (\a b. (a, b) \ R \ a \ A \ b \ A \ R `` {a} = R `` {b})" -unfolding equiv_def refl_on_def Image_def by (auto intro: transD symD) - lemma relImage_proj: assumes "equiv A R" shows "relImage R (proj R) \ Id_on (A//R)" @@ -143,7 +150,7 @@ lemma relImage_relInvImage: assumes "R \ f ` A <*> f ` A" shows "relImage (relInvImage A R f) f = R" -using assms unfolding relImage_def relInvImage_def by fastforce +using assms unfolding relImage_def relInvImage_def by fast lemma subst_Pair: "P x y \ a = (x, y) \ P (fst a) (snd a)" by simp @@ -159,6 +166,8 @@ (*Extended Sublist*) +definition clists where "clists r = |lists (Field r)|" + definition prefCl where "prefCl Kl = (\ kl1 kl2. prefixeq kl1 kl2 \ kl2 \ Kl \ kl1 \ Kl)" definition PrefCl where @@ -255,13 +264,18 @@ shows "\ a. a \ A \ p1 a = b1 \ p2 a = b2" using assms unfolding wpull_def by blast -lemma pickWP: +lemma pickWP_raw: assumes "wpull A B1 B2 f1 f2 p1 p2" and "b1 \ B1" and "b2 \ B2" and "f1 b1 = f2 b2" -shows "pickWP A p1 p2 b1 b2 \ A" - "p1 (pickWP A p1 p2 b1 b2) = b1" - "p2 (pickWP A p1 p2 b1 b2) = b2" -unfolding pickWP_def using assms someI_ex[OF pickWP_pred] by fastforce+ +shows "pickWP A p1 p2 b1 b2 \ A + \ p1 (pickWP A p1 p2 b1 b2) = b1 + \ p2 (pickWP A p1 p2 b1 b2) = b2" +unfolding pickWP_def using assms someI_ex[OF pickWP_pred] by fastforce + +lemmas pickWP = + pickWP_raw[THEN conjunct1] + pickWP_raw[THEN conjunct2, THEN conjunct1] + pickWP_raw[THEN conjunct2, THEN conjunct2] lemma Inl_Field_csum: "a \ Field r \ Inl a \ Field (r +c s)" unfolding Field_card_of csum_def by auto @@ -293,21 +307,17 @@ lemma image2pI: "R x y \ (image2p f g R) (f x) (g y)" unfolding image2p_def by blast -lemma image2p_eqI: "\fx = f x; gy = g y; R x y\ \ (image2p f g R) fx gy" - unfolding image2p_def by blast - lemma image2pE: "\(image2p f g R) fx gy; (\x y. fx = f x \ gy = g y \ R x y \ P)\ \ P" unfolding image2p_def by blast lemma fun_rel_iff_geq_image2p: "(fun_rel R S) f g = (image2p f g R \ S)" unfolding fun_rel_def image2p_def by auto -lemma convol_image_image2p: " ` Collect (split R) \ Collect (split (image2p f g R))" - unfolding convol_def image2p_def by fastforce - lemma fun_rel_image2p: "(fun_rel R (image2p f g R)) f g" unfolding fun_rel_def image2p_def by auto +ML_file "Tools/bnf_gfp_rec_sugar_tactics.ML" +ML_file "Tools/bnf_gfp_rec_sugar.ML" ML_file "Tools/bnf_gfp_util.ML" ML_file "Tools/bnf_gfp_tactics.ML" ML_file "Tools/bnf_gfp.ML" diff -r cfb21e03fe2a -r d64a4ef26edb src/HOL/BNF/BNF_LFP.thy --- a/src/HOL/BNF/BNF_LFP.thy Thu Dec 05 17:52:12 2013 +0100 +++ b/src/HOL/BNF/BNF_LFP.thy Thu Dec 05 17:58:03 2013 +0100 @@ -230,6 +230,7 @@ lemma predicate2D_vimage2p: "\R \ vimage2p f g S; R x y\ \ S (f x) (g y)" unfolding vimage2p_def by auto +ML_file "Tools/bnf_lfp_rec_sugar.ML" ML_file "Tools/bnf_lfp_util.ML" ML_file "Tools/bnf_lfp_tactics.ML" ML_file "Tools/bnf_lfp.ML" diff -r cfb21e03fe2a -r d64a4ef26edb src/HOL/BNF/BNF_Util.thy --- a/src/HOL/BNF/BNF_Util.thy Thu Dec 05 17:52:12 2013 +0100 +++ b/src/HOL/BNF/BNF_Util.thy Thu Dec 05 17:58:03 2013 +0100 @@ -9,15 +9,11 @@ header {* Library for Bounded Natural Functors *} theory BNF_Util -imports Ctr_Sugar "../Cardinals/Cardinal_Arithmetic" +imports "../Cardinals/Cardinal_Arithmetic_FP" + (*FIXME: define fun_rel here, reuse in Transfer once this theory is in HOL*) + Transfer begin -lemma subset_Collect_iff: "B \ A \ (B \ {x \ A. P x}) = (\x \ B. P x)" -by blast - -lemma subset_CollectI: "B \ A \ (\x. x \ B \ Q x \ P x) \ ({x \ B. Q x} \ {x \ A. P x})" -by blast - definition collect where "collect F x = (\f \ F. f x)" @@ -32,12 +28,6 @@ (\ b1 b2. b1 \ B1 \ b2 \ B2 \ f1 b1 = f2 b2 \ (\ a \ A. e1 (p1 a) = e1 b1 \ e2 (p2 a) = e2 b2))" -lemma fst_snd: "\snd x = (y, z)\ \ fst (snd x) = y" -by simp - -lemma snd_snd: "\snd x = (y, z)\ \ snd (snd x) = z" -by simp - lemma fstI: "x = (y, z) \ fst x = y" by simp @@ -47,9 +37,6 @@ lemma bijI: "\\x y. (f x = f y) = (x = y); \y. \x. y = f x\ \ bij f" unfolding bij_def inj_on_def by auto blast -lemma Collect_pair_mem_eq: "{(x, y). (x, y) \ R} = R" -by simp - (* Operator: *) definition "Gr A f = {(a, f a) | a. a \ A}" diff -r cfb21e03fe2a -r d64a4ef26edb src/HOL/BNF/Basic_BNFs.thy --- a/src/HOL/BNF/Basic_BNFs.thy Thu Dec 05 17:52:12 2013 +0100 +++ b/src/HOL/BNF/Basic_BNFs.thy Thu Dec 05 17:58:03 2013 +0100 @@ -11,31 +11,29 @@ theory Basic_BNFs imports BNF_Def + (*FIXME: define relators here, reuse in Lifting_* once this theory is in HOL*) + Lifting_Sum + Lifting_Product + Main begin -lemma wpull_id: "wpull UNIV B1 B2 id id id id" -unfolding wpull_def by simp - -lemmas natLeq_card_order = natLeq_Card_order[unfolded Field_natLeq] - -lemma ctwo_card_order: "card_order ctwo" -using Card_order_ctwo by (unfold ctwo_def Field_card_of) - -lemma natLeq_cinfinite: "cinfinite natLeq" -unfolding cinfinite_def Field_natLeq by (rule nat_infinite) - lemma wpull_Grp_def: "wpull A B1 B2 f1 f2 p1 p2 \ Grp B1 f1 OO (Grp B2 f2)\\ \ (Grp A p1)\\ OO Grp A p2" unfolding wpull_def Grp_def by auto -bnf ID: "id :: ('a \ 'b) \ 'a \ 'b" ["\x. {x}"] "\_:: 'a. natLeq" ["id :: 'a \ 'a"] - "id :: ('a \ 'b \ bool) \ 'a \ 'b \ bool" +bnf ID: 'a + map: "id :: ('a \ 'b) \ 'a \ 'b" + sets: "\x. {x}" + bd: natLeq + rel: "id :: ('a \ 'b \ bool) \ 'a \ 'b \ bool" apply (auto simp: Grp_def fun_eq_iff relcompp.simps natLeq_card_order natLeq_cinfinite) apply (rule ordLess_imp_ordLeq[OF finite_ordLess_infinite[OF _ natLeq_Well_order]]) apply (auto simp add: Field_card_of Field_natLeq card_of_well_order_on)[3] done -bnf DEADID: "id :: 'a \ 'a" [] "\_:: 'a. natLeq +c |UNIV :: 'a set|" ["SOME x :: 'a. True"] - "op = :: 'a \ 'a \ bool" +bnf DEADID: 'a + map: "id :: 'a \ 'a" + bd: "natLeq +c |UNIV :: 'a set|" + rel: "op = :: 'a \ 'a \ bool" by (auto simp add: wpull_Grp_def Grp_def card_order_csum natLeq_card_order card_of_card_order_on cinfinite_csum natLeq_cinfinite) @@ -48,15 +46,20 @@ lemmas sum_set_defs = setl_def[abs_def] setr_def[abs_def] -bnf sum_map [setl, setr] "\_::'a + 'b. natLeq" [Inl, Inr] sum_rel +bnf "'a + 'b" + map: sum_map + sets: setl setr + bd: natLeq + wits: Inl Inr + rel: sum_rel proof - show "sum_map id id = id" by (rule sum_map.id) next - fix f1 f2 g1 g2 + fix f1 :: "'o \ 's" and f2 :: "'p \ 't" and g1 :: "'s \ 'q" and g2 :: "'t \ 'r" show "sum_map (g1 o f1) (g2 o f2) = sum_map g1 g2 o sum_map f1 f2" by (rule sum_map.comp[symmetric]) next - fix x f1 f2 g1 g2 + fix x and f1 :: "'o \ 'q" and f2 :: "'p \ 'r" and g1 g2 assume a1: "\z. z \ setl x \ f1 z = g1 z" and a2: "\z. z \ setr x \ f2 z = g2 z" thus "sum_map f1 f2 x = sum_map g1 g2 x" @@ -66,11 +69,11 @@ case Inr thus ?thesis using a2 by (clarsimp simp: setr_def) qed next - fix f1 f2 + fix f1 :: "'o \ 'q" and f2 :: "'p \ 'r" show "setl o sum_map f1 f2 = image f1 o setl" by (rule ext, unfold o_apply) (simp add: setl_def split: sum.split) next - fix f1 f2 + fix f1 :: "'o \ 'q" and f2 :: "'p \ 'r" show "setr o sum_map f1 f2 = image f2 o setr" by (rule ext, unfold o_apply) (simp add: setr_def split: sum.split) next @@ -78,13 +81,13 @@ next show "cinfinite natLeq" by (rule natLeq_cinfinite) next - fix x + fix x :: "'o + 'p" show "|setl x| \o natLeq" apply (rule ordLess_imp_ordLeq) apply (rule finite_iff_ordLess_natLeq[THEN iffD1]) by (simp add: setl_def split: sum.split) next - fix x + fix x :: "'o + 'p" show "|setr x| \o natLeq" apply (rule ordLess_imp_ordLeq) apply (rule finite_iff_ordLess_natLeq[THEN iffD1]) @@ -148,7 +151,11 @@ lemmas prod_set_defs = fsts_def[abs_def] snds_def[abs_def] -bnf map_pair [fsts, snds] "\_::'a \ 'b. natLeq" [Pair] prod_rel +bnf "'a \ 'b" + map: map_pair + sets: fsts snds + bd: natLeq + rel: prod_rel proof (unfold prod_set_defs) show "map_pair id id = id" by (rule map_pair.id) next @@ -193,7 +200,7 @@ Grp {x. {fst x} \ Collect (split R) \ {snd x} \ Collect (split S)} (map_pair snd snd)" unfolding prod_set_defs prod_rel_def Grp_def relcompp.simps conversep.simps fun_eq_iff by auto -qed simp+ +qed (* Categorical version of pullback: *) lemma wpull_cat: @@ -215,24 +222,11 @@ thus ?thesis using that by fastforce qed -lemma card_of_bounded_range: - "|{f :: 'd \ 'a. range f \ B}| \o |Func (UNIV :: 'd set) B|" (is "|?LHS| \o |?RHS|") -proof - - let ?f = "\f. %x. if f x \ B then f x else undefined" - have "inj_on ?f ?LHS" unfolding inj_on_def - proof (unfold fun_eq_iff, safe) - fix g :: "'d \ 'a" and f :: "'d \ 'a" and x - assume "range f \ B" "range g \ B" and eq: "\x. ?f f x = ?f g x" - hence "f x \ B" "g x \ B" by auto - with eq have "Some (f x) = Some (g x)" by metis - thus "f x = g x" by simp - qed - moreover have "?f ` ?LHS \ ?RHS" unfolding Func_def by fastforce - ultimately show ?thesis using card_of_ordLeq by fast -qed - -bnf "op \" [range] "\_:: 'a \ 'b. natLeq +c |UNIV :: 'a set|" ["%c x::'b::type. c::'a::type"] - "fun_rel op =" +bnf "'a \ 'b" + map: "op \" + sets: range + bd: "natLeq +c |UNIV :: 'a set|" + rel: "fun_rel op =" proof fix f show "id \ f = id f" by simp next @@ -258,7 +252,7 @@ next fix f :: "'d => 'a" have "|range f| \o | (UNIV::'d set) |" (is "_ \o ?U") by (rule card_of_image) - also have "?U \o natLeq +c ?U" by (rule ordLeq_csum2) (rule card_of_Card_order) + also have "?U \o natLeq +c ?U" by (rule ordLeq_csum2) (rule card_of_Card_order) finally show "|range f| \o natLeq +c ?U" . next fix A B1 B2 f1 f2 p1 p2 assume p: "wpull A B1 B2 f1 f2 p1 p2" @@ -277,7 +271,7 @@ (Grp {x. range x \ Collect (split R)} (op \ fst))\\ OO Grp {x. range x \ Collect (split R)} (op \ snd)" unfolding fun_rel_def Grp_def fun_eq_iff relcompp.simps conversep.simps subset_iff image_iff - by auto (force, metis pair_collapse) -qed auto + by auto (force, metis (no_types) pair_collapse) +qed end diff -r cfb21e03fe2a -r d64a4ef26edb src/HOL/BNF/Coinduction.thy --- a/src/HOL/BNF/Coinduction.thy Thu Dec 05 17:52:12 2013 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,19 +0,0 @@ -(* Title: HOL/BNF/Coinduction.thy - Author: Johannes Hölzl, TU Muenchen - Author: Dmitriy Traytel, TU Muenchen - Copyright 2013 - -Coinduction method that avoids some boilerplate compared to coinduct. -*) - -header {* Coinduction Method *} - -theory Coinduction -imports BNF_Util -begin - -ML_file "Tools/coinduction.ML" - -setup Coinduction.setup - -end diff -r cfb21e03fe2a -r d64a4ef26edb src/HOL/BNF/Countable_Set_Type.thy --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/HOL/BNF/Countable_Set_Type.thy Thu Dec 05 17:58:03 2013 +0100 @@ -0,0 +1,212 @@ +(* Title: HOL/BNF/Countable_Set_Type.thy + Author: Andrei Popescu, TU Muenchen + Copyright 2012 + +Type of (at most) countable sets. +*) + +header {* Type of (at Most) Countable Sets *} + +theory Countable_Set_Type +imports + More_BNFs + "~~/src/HOL/Cardinals/Cardinals" + "~~/src/HOL/Library/Countable_Set" +begin + +subsection{* Cardinal stuff *} + +lemma countable_card_of_nat: "countable A \ |A| \o |UNIV::nat set|" + unfolding countable_def card_of_ordLeq[symmetric] by auto + +lemma countable_card_le_natLeq: "countable A \ |A| \o natLeq" + unfolding countable_card_of_nat using card_of_nat ordLeq_ordIso_trans ordIso_symmetric by blast + +lemma countable_or_card_of: +assumes "countable A" +shows "(finite A \ |A| + (infinite A \ |A| =o |UNIV::nat set| )" +proof (cases "finite A") + case True thus ?thesis by (metis finite_iff_cardOf_nat) +next + case False with assms show ?thesis + by (metis countable_card_of_nat infinite_iff_card_of_nat ordIso_iff_ordLeq) +qed + +lemma countable_cases_card_of[elim]: + assumes "countable A" + obtains (Fin) "finite A" "|A| (\ f::'a\nat. finite A \ inj_on f A) \ (\ f::'a\nat. infinite A \ bij_betw f A UNIV)" + by (elim countable_enum_cases) fastforce+ + +lemma countable_cases[elim]: + assumes "countable A" + obtains (Fin) f :: "'a\nat" where "finite A" "inj_on f A" + | (Inf) f :: "'a\nat" where "infinite A" "bij_betw f A UNIV" + using assms countable_or by metis + +lemma countable_ordLeq: +assumes "|A| \o |B|" and "countable B" +shows "countable A" +using assms unfolding countable_card_of_nat by(rule ordLeq_transitive) + +lemma countable_ordLess: +assumes AB: "|A| 'a cset \ bool" is "op \" parametric member_transfer + .. +lift_definition cempty :: "'a cset" is "{}" parametric empty_transfer + by (rule countable_empty) +lift_definition cinsert :: "'a \ 'a cset \ 'a cset" is insert parametric Lifting_Set.insert_transfer + by (rule countable_insert) +lift_definition csingle :: "'a \ 'a cset" is "\x. {x}" + by (rule countable_insert[OF countable_empty]) +lift_definition cUn :: "'a cset \ 'a cset \ 'a cset" is "op \" parametric union_transfer + by (rule countable_Un) +lift_definition cInt :: "'a cset \ 'a cset \ 'a cset" is "op \" parametric inter_transfer + by (rule countable_Int1) +lift_definition cDiff :: "'a cset \ 'a cset \ 'a cset" is "op -" parametric Diff_transfer + by (rule countable_Diff) +lift_definition cimage :: "('a \ 'b) \ 'a cset \ 'b cset" is "op `" parametric image_transfer + by (rule countable_image) + +subsection {* Registration as BNF *} + +lemma card_of_countable_sets_range: +fixes A :: "'a set" +shows "|{X. X \ A \ countable X \ X \ {}}| \o |{f::nat \ 'a. range f \ A}|" +apply(rule card_of_ordLeqI[of from_nat_into]) using inj_on_from_nat_into +unfolding inj_on_def by auto + +lemma card_of_countable_sets_Func: +"|{X. X \ A \ countable X \ X \ {}}| \o |A| ^c natLeq" +using card_of_countable_sets_range card_of_Func_UNIV[THEN ordIso_symmetric] +unfolding cexp_def Field_natLeq Field_card_of +by (rule ordLeq_ordIso_trans) + +lemma ordLeq_countable_subsets: +"|A| \o |{X. X \ A \ countable X}|" +apply (rule card_of_ordLeqI[of "\ a. {a}"]) unfolding inj_on_def by auto + +lemma finite_countable_subset: +"finite {X. X \ A \ countable X} \ finite A" +apply default + apply (erule contrapos_pp) + apply (rule card_of_ordLeq_infinite) + apply (rule ordLeq_countable_subsets) + apply assumption +apply (rule finite_Collect_conjI) +apply (rule disjI1) +by (erule finite_Collect_subsets) + +lemma rcset_to_rcset: "countable A \ rcset (the_inv rcset A) = A" + apply (rule f_the_inv_into_f[unfolded inj_on_def image_iff]) + apply transfer' apply simp + apply transfer' apply simp + done + +lemma Collect_Int_Times: +"{(x, y). R x y} \ A \ B = {(x, y). R x y \ x \ A \ y \ B}" +by auto + +definition cset_rel :: "('a \ 'b \ bool) \ 'a cset \ 'b cset \ bool" where +"cset_rel R a b \ + (\t \ rcset a. \u \ rcset b. R t u) \ + (\t \ rcset b. \u \ rcset a. R u t)" + +lemma cset_rel_aux: +"(\t \ rcset a. \u \ rcset b. R t u) \ (\t \ rcset b. \u \ rcset a. R u t) \ + ((Grp {x. rcset x \ {(a, b). R a b}} (cimage fst))\\ OO + Grp {x. rcset x \ {(a, b). R a b}} (cimage snd)) a b" (is "?L = ?R") +proof + assume ?L + def R' \ "the_inv rcset (Collect (split R) \ (rcset a \ rcset b))" + (is "the_inv rcset ?L'") + have L: "countable ?L'" by auto + hence *: "rcset R' = ?L'" unfolding R'_def using fset_to_fset by (intro rcset_to_rcset) + thus ?R unfolding Grp_def relcompp.simps conversep.simps + proof (intro CollectI prod_caseI exI[of _ a] exI[of _ b] exI[of _ R'] conjI refl) + from * `?L` show "a = cimage fst R'" by transfer (auto simp: image_def Collect_Int_Times) + next + from * `?L` show "b = cimage snd R'" by transfer (auto simp: image_def Collect_Int_Times) + qed simp_all +next + assume ?R thus ?L unfolding Grp_def relcompp.simps conversep.simps + by transfer force +qed + +bnf "'a cset" + map: cimage + sets: rcset + bd: natLeq + wits: "cempty" + rel: cset_rel +proof - + show "cimage id = id" by transfer' simp +next + fix f g show "cimage (g \ f) = cimage g \ cimage f" by transfer' fastforce +next + fix C f g assume eq: "\a. a \ rcset C \ f a = g a" + thus "cimage f C = cimage g C" by transfer force +next + fix f show "rcset \ cimage f = op ` f \ rcset" by transfer' fastforce +next + show "card_order natLeq" by (rule natLeq_card_order) +next + show "cinfinite natLeq" by (rule natLeq_cinfinite) +next + fix C show "|rcset C| \o natLeq" by transfer (unfold countable_card_le_natLeq) +next + fix A B1 B2 f1 f2 p1 p2 + assume wp: "wpull A B1 B2 f1 f2 p1 p2" + show "wpull {x. rcset x \ A} {x. rcset x \ B1} {x. rcset x \ B2} + (cimage f1) (cimage f2) (cimage p1) (cimage p2)" + unfolding wpull_def proof safe + fix y1 y2 + assume Y1: "rcset y1 \ B1" and Y2: "rcset y2 \ B2" + assume "cimage f1 y1 = cimage f2 y2" + hence EQ: "f1 ` (rcset y1) = f2 ` (rcset y2)" by transfer + with Y1 Y2 obtain X where X: "X \ A" + and Y1: "p1 ` X = rcset y1" and Y2: "p2 ` X = rcset y2" + using wpull_image[OF wp] unfolding wpull_def Pow_def Bex_def mem_Collect_eq + by (auto elim!: allE[of _ "rcset y1"] allE[of _ "rcset y2"]) + have "\ y1' \ rcset y1. \ x. x \ X \ y1' = p1 x" using Y1 by auto + then obtain q1 where q1: "\ y1' \ rcset y1. q1 y1' \ X \ y1' = p1 (q1 y1')" by metis + have "\ y2' \ rcset y2. \ x. x \ X \ y2' = p2 x" using Y2 by auto + then obtain q2 where q2: "\ y2' \ rcset y2. q2 y2' \ X \ y2' = p2 (q2 y2')" by metis + def X' \ "q1 ` (rcset y1) \ q2 ` (rcset y2)" + have X': "X' \ A" and Y1: "p1 ` X' = rcset y1" and Y2: "p2 ` X' = rcset y2" + using X Y1 Y2 q1 q2 unfolding X'_def by fast+ + have fX': "countable X'" unfolding X'_def by simp + then obtain x where X'eq: "X' = rcset x" by transfer blast + show "\x\{x. rcset x \ A}. cimage p1 x = y1 \ cimage p2 x = y2" + using X' Y1 Y2 unfolding X'eq by (intro bexI[of _ "x"]) (transfer, auto) + qed +next + fix R + show "cset_rel R = + (Grp {x. rcset x \ Collect (split R)} (cimage fst))\\ OO + Grp {x. rcset x \ Collect (split R)} (cimage snd)" + unfolding cset_rel_def[abs_def] cset_rel_aux by simp +qed (transfer, simp) + +end diff -r cfb21e03fe2a -r d64a4ef26edb src/HOL/BNF/Countable_Type.thy --- a/src/HOL/BNF/Countable_Type.thy Thu Dec 05 17:52:12 2013 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,91 +0,0 @@ -(* Title: HOL/BNF/Countable_Type.thy - Author: Andrei Popescu, TU Muenchen - Copyright 2012 - -(At most) countable sets. -*) - -header {* (At Most) Countable Sets *} - -theory Countable_Type -imports - "~~/src/HOL/Cardinals/Cardinals" - "~~/src/HOL/Library/Countable_Set" -begin - -subsection{* Cardinal stuff *} - -lemma countable_card_of_nat: "countable A \ |A| \o |UNIV::nat set|" - unfolding countable_def card_of_ordLeq[symmetric] by auto - -lemma countable_card_le_natLeq: "countable A \ |A| \o natLeq" - unfolding countable_card_of_nat using card_of_nat ordLeq_ordIso_trans ordIso_symmetric by blast - -lemma countable_or_card_of: -assumes "countable A" -shows "(finite A \ |A| - (infinite A \ |A| =o |UNIV::nat set| )" -proof (cases "finite A") - case True thus ?thesis by (metis finite_iff_cardOf_nat) -next - case False with assms show ?thesis - by (metis countable_card_of_nat infinite_iff_card_of_nat ordIso_iff_ordLeq) -qed - -lemma countable_cases_card_of[elim]: - assumes "countable A" - obtains (Fin) "finite A" "|A| (\ f::'a\nat. finite A \ inj_on f A) \ (\ f::'a\nat. infinite A \ bij_betw f A UNIV)" - by (elim countable_enum_cases) fastforce+ - -lemma countable_cases[elim]: - assumes "countable A" - obtains (Fin) f :: "'a\nat" where "finite A" "inj_on f A" - | (Inf) f :: "'a\nat" where "infinite A" "bij_betw f A UNIV" - using assms countable_or by metis - -lemma countable_ordLeq: -assumes "|A| \o |B|" and "countable B" -shows "countable A" -using assms unfolding countable_card_of_nat by(rule ordLeq_transitive) - -lemma countable_ordLess: -assumes AB: "|A| 'a cset \ bool" is "op \" parametric member_transfer - .. -lift_definition cempty :: "'a cset" is "{}" parametric empty_transfer - by (rule countable_empty) -lift_definition cinsert :: "'a \ 'a cset \ 'a cset" is insert parametric Lifting_Set.insert_transfer - by (rule countable_insert) -lift_definition csingle :: "'a \ 'a cset" is "\x. {x}" - by (rule countable_insert[OF countable_empty]) -lift_definition cUn :: "'a cset \ 'a cset \ 'a cset" is "op \" parametric union_transfer - by (rule countable_Un) -lift_definition cInt :: "'a cset \ 'a cset \ 'a cset" is "op \" parametric inter_transfer - by (rule countable_Int1) -lift_definition cDiff :: "'a cset \ 'a cset \ 'a cset" is "op -" parametric Diff_transfer - by (rule countable_Diff) -lift_definition cimage :: "('a \ 'b) \ 'a cset \ 'b cset" is "op `" parametric image_transfer - by (rule countable_image) - -end diff -r cfb21e03fe2a -r d64a4ef26edb src/HOL/BNF/Ctr_Sugar.thy --- a/src/HOL/BNF/Ctr_Sugar.thy Thu Dec 05 17:52:12 2013 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,30 +0,0 @@ -(* Title: HOL/BNF/Ctr_Sugar.thy - Author: Jasmin Blanchette, TU Muenchen - Copyright 2012 - -Wrapping existing freely generated type's constructors. -*) - -header {* Wrapping Existing Freely Generated Type's Constructors *} - -theory Ctr_Sugar -imports Main -keywords - "wrap_free_constructors" :: thy_goal and - "no_discs_sels" and - "rep_compat" -begin - -lemma iffI_np: "\x \ \ y; \ x \ y\ \ \ x \ y" -by (erule iffI) (erule contrapos_pn) - -lemma iff_contradict: -"\ P \ P \ Q \ Q \ R" -"\ Q \ P \ Q \ P \ R" -by blast+ - -ML_file "Tools/ctr_sugar_util.ML" -ML_file "Tools/ctr_sugar_tactics.ML" -ML_file "Tools/ctr_sugar.ML" - -end diff -r cfb21e03fe2a -r d64a4ef26edb src/HOL/BNF/Equiv_Relations_More.thy --- a/src/HOL/BNF/Equiv_Relations_More.thy Thu Dec 05 17:52:12 2013 +0100 +++ b/src/HOL/BNF/Equiv_Relations_More.thy Thu Dec 05 17:58:03 2013 +0100 @@ -59,7 +59,7 @@ lemma in_quotient_imp_in_rel: "\equiv A r; X \ A//r; {x,y} \ X\ \ (x,y) \ r" -using quotient_eq_iff by fastforce +using quotient_eq_iff[THEN iffD1] by fastforce lemma in_quotient_imp_closed: "\equiv A r; X \ A//r; x \ X; (x,y) \ r\ \ y \ X" diff -r cfb21e03fe2a -r d64a4ef26edb src/HOL/BNF/Examples/Derivation_Trees/DTree.thy --- a/src/HOL/BNF/Examples/Derivation_Trees/DTree.thy Thu Dec 05 17:52:12 2013 +0100 +++ b/src/HOL/BNF/Examples/Derivation_Trees/DTree.thy Thu Dec 05 17:58:03 2013 +0100 @@ -11,8 +11,6 @@ imports Prelim begin -hide_fact (open) Lifting_Product.prod_rel_def - typedecl N typedecl T @@ -22,8 +20,8 @@ definition "Node n as \ NNode n (the_inv fset as)" definition "cont \ fset o ccont" -definition "unfold rt ct \ dtree_unfold rt (the_inv fset o ct)" -definition "corec rt ct \ dtree_corec rt (the_inv fset o ct)" +definition "unfold rt ct \ unfold_dtree rt (the_inv fset o ct)" +definition "corec rt ct \ corec_dtree rt (the_inv fset o ct)" lemma finite_cont[simp]: "finite (cont tr)" unfolding cont_def o_apply by (cases tr, clarsimp) diff -r cfb21e03fe2a -r d64a4ef26edb src/HOL/BNF/Examples/Derivation_Trees/Parallel.thy --- a/src/HOL/BNF/Examples/Derivation_Trees/Parallel.thy Thu Dec 05 17:52:12 2013 +0100 +++ b/src/HOL/BNF/Examples/Derivation_Trees/Parallel.thy Thu Dec 05 17:58:03 2013 +0100 @@ -12,7 +12,6 @@ begin no_notation plus_class.plus (infixl "+" 65) -no_notation Sublist.parallel (infixl "\" 50) consts Nplus :: "N \ N \ N" (infixl "+" 60) @@ -145,4 +144,4 @@ thus ?thesis by blast qed -end \ No newline at end of file +end diff -r cfb21e03fe2a -r d64a4ef26edb src/HOL/BNF/Examples/Koenig.thy --- a/src/HOL/BNF/Examples/Koenig.thy Thu Dec 05 17:52:12 2013 +0100 +++ b/src/HOL/BNF/Examples/Koenig.thy Thu Dec 05 17:58:03 2013 +0100 @@ -12,44 +12,33 @@ imports TreeFI Stream begin -(* selectors for streams *) -lemma shd_def': "shd as = fst (stream_dtor as)" -apply (case_tac as) -apply (auto simp add: shd_def) -by (simp add: Stream_def stream.dtor_ctor) - -lemma stl_def': "stl as = snd (stream_dtor as)" -apply (case_tac as) -apply (auto simp add: stl_def) -by (simp add: Stream_def stream.dtor_ctor) - (* infinite trees: *) coinductive infiniteTr where -"\tr' \ listF_set (sub tr); infiniteTr tr'\ \ infiniteTr tr" +"\tr' \ set_listF (sub tr); infiniteTr tr'\ \ infiniteTr tr" lemma infiniteTr_strong_coind[consumes 1, case_names sub]: assumes *: "phi tr" and -**: "\ tr. phi tr \ \ tr' \ listF_set (sub tr). phi tr' \ infiniteTr tr'" +**: "\ tr. phi tr \ \ tr' \ set_listF (sub tr). phi tr' \ infiniteTr tr'" shows "infiniteTr tr" using assms by (elim infiniteTr.coinduct) blast lemma infiniteTr_coind[consumes 1, case_names sub, induct pred: infiniteTr]: assumes *: "phi tr" and -**: "\ tr. phi tr \ \ tr' \ listF_set (sub tr). phi tr'" +**: "\ tr. phi tr \ \ tr' \ set_listF (sub tr). phi tr'" shows "infiniteTr tr" using assms by (elim infiniteTr.coinduct) blast lemma infiniteTr_sub[simp]: -"infiniteTr tr \ (\ tr' \ listF_set (sub tr). infiniteTr tr')" +"infiniteTr tr \ (\ tr' \ set_listF (sub tr). infiniteTr tr')" by (erule infiniteTr.cases) blast primcorec konigPath where "shd (konigPath t) = lab t" -| "stl (konigPath t) = konigPath (SOME tr. tr \ listF_set (sub t) \ infiniteTr tr)" +| "stl (konigPath t) = konigPath (SOME tr. tr \ set_listF (sub t) \ infiniteTr tr)" (* proper paths in trees: *) coinductive properPath where -"\shd as = lab tr; tr' \ listF_set (sub tr); properPath (stl as) tr'\ \ +"\shd as = lab tr; tr' \ set_listF (sub tr); properPath (stl as) tr'\ \ properPath as tr" lemma properPath_strong_coind[consumes 1, case_names shd_lab sub]: @@ -57,7 +46,7 @@ **: "\ as tr. phi as tr \ shd as = lab tr" and ***: "\ as tr. phi as tr \ - \ tr' \ listF_set (sub tr). phi (stl as) tr' \ properPath (stl as) tr'" + \ tr' \ set_listF (sub tr). phi (stl as) tr' \ properPath (stl as) tr'" shows "properPath as tr" using assms by (elim properPath.coinduct) blast @@ -66,7 +55,7 @@ **: "\ as tr. phi as tr \ shd as = lab tr" and ***: "\ as tr. phi as tr \ - \ tr' \ listF_set (sub tr). phi (stl as) tr'" + \ tr' \ set_listF (sub tr). phi (stl as) tr'" shows "properPath as tr" using properPath_strong_coind[of phi, OF * **] *** by blast @@ -76,7 +65,7 @@ lemma properPath_sub: "properPath as tr \ - \ tr' \ listF_set (sub tr). phi (stl as) tr' \ properPath (stl as) tr'" + \ tr' \ set_listF (sub tr). phi (stl as) tr' \ properPath (stl as) tr'" by (erule properPath.cases) blast (* prove the following by coinduction *) @@ -88,10 +77,10 @@ assume "infiniteTr tr \ as = konigPath tr" hence "properPath as tr" proof (coinduction arbitrary: tr as rule: properPath_coind) case (sub tr as) - let ?t = "SOME t'. t' \ listF_set (sub tr) \ infiniteTr t'" - from sub have "\t' \ listF_set (sub tr). infiniteTr t'" by simp - then have "\t'. t' \ listF_set (sub tr) \ infiniteTr t'" by blast - then have "?t \ listF_set (sub tr) \ infiniteTr ?t" by (rule someI_ex) + let ?t = "SOME t'. t' \ set_listF (sub tr) \ infiniteTr t'" + from sub have "\t' \ set_listF (sub tr). infiniteTr t'" by simp + then have "\t'. t' \ set_listF (sub tr) \ infiniteTr t'" by blast + then have "?t \ set_listF (sub tr) \ infiniteTr ?t" by (rule someI_ex) moreover have "stl (konigPath tr) = konigPath ?t" by simp ultimately show ?case using sub by blast qed simp diff -r cfb21e03fe2a -r d64a4ef26edb src/HOL/BNF/Examples/ListF.thy --- a/src/HOL/BNF/Examples/ListF.thy Thu Dec 05 17:52:12 2013 +0100 +++ b/src/HOL/BNF/Examples/ListF.thy Thu Dec 05 17:58:03 2013 +0100 @@ -62,7 +62,7 @@ "i < lengthh xs \ nthh (mapF f xs) i = f (nthh xs i)" by (induct rule: nthh.induct) auto -lemma nthh_listF_set[simp]: "i < lengthh xs \ nthh xs i \ listF_set xs" +lemma nthh_listF_set[simp]: "i < lengthh xs \ nthh xs i \ set_listF xs" by (induct rule: nthh.induct) auto lemma NilF_iff[iff]: "(lengthh xs = 0) = (xs = NilF)" @@ -105,7 +105,7 @@ qed simp lemma list_set_nthh[simp]: - "(x \ listF_set xs) \ (\i < lengthh xs. nthh xs i = x)" + "(x \ set_listF xs) \ (\i < lengthh xs. nthh xs i = x)" by (induct xs) (auto, induct rule: nthh.induct, auto) end diff -r cfb21e03fe2a -r d64a4ef26edb src/HOL/BNF/Examples/Misc_Codatatype.thy --- a/src/HOL/BNF/Examples/Misc_Codatatype.thy Thu Dec 05 17:52:12 2013 +0100 +++ b/src/HOL/BNF/Examples/Misc_Codatatype.thy Thu Dec 05 17:58:03 2013 +0100 @@ -19,9 +19,9 @@ codatatype simple'' = X1'' nat int | X2'' -codatatype 'a stream = Stream 'a "'a stream" +codatatype 'a stream = Stream (shd: 'a) (stl: "'a stream") -codatatype 'a mylist = MyNil | MyCons 'a "'a mylist" +codatatype 'a mylist = MyNil | MyCons (myhd: 'a) (mytl: "'a mylist") codatatype ('b, 'c, 'd, 'e) some_passive = SP1 "('b, 'c, 'd, 'e) some_passive" | SP2 'b | SP3 'c | SP4 'd | SP5 'e diff -r cfb21e03fe2a -r d64a4ef26edb src/HOL/BNF/Examples/Misc_Datatype.thy --- a/src/HOL/BNF/Examples/Misc_Datatype.thy Thu Dec 05 17:52:12 2013 +0100 +++ b/src/HOL/BNF/Examples/Misc_Datatype.thy Thu Dec 05 17:58:03 2013 +0100 @@ -19,7 +19,7 @@ datatype_new simple'' = X1'' nat int | X2'' -datatype_new 'a mylist = MyNil | MyCons 'a "'a mylist" +datatype_new 'a mylist = MyNil | MyCons (myhd: 'a) (mytl: "'a mylist") datatype_new ('b, 'c, 'd, 'e) some_passive = SP1 "('b, 'c, 'd, 'e) some_passive" | SP2 'b | SP3 'c | SP4 'd | SP5 'e diff -r cfb21e03fe2a -r d64a4ef26edb src/HOL/BNF/Examples/Misc_Primcorec.thy --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/HOL/BNF/Examples/Misc_Primcorec.thy Thu Dec 05 17:58:03 2013 +0100 @@ -0,0 +1,112 @@ +(* Title: HOL/BNF/Examples/Misc_Primcorec.thy + Author: Jasmin Blanchette, TU Muenchen + Copyright 2013 + +Miscellaneous primitive corecursive function definitions. +*) + +header {* Miscellaneous Primitive Corecursive Function Definitions *} + +theory Misc_Primcorec +imports Misc_Codatatype +begin + +primcorec simple_of_bools :: "bool \ bool \ simple" where + "simple_of_bools b b' = (if b then if b' then X1 else X2 else if b' then X3 else X4)" + +primcorec simple'_of_bools :: "bool \ bool \ simple'" where + "simple'_of_bools b b' = + (if b then if b' then X1' () else X2' () else if b' then X3' () else X4' ())" + +primcorec inc_simple'' :: "nat \ simple'' \ simple''" where + "inc_simple'' k s = (case s of X1'' n i \ X1'' (n + k) (i + int k) | X2'' \ X2'')" + +primcorec sinterleave :: "'a stream \ 'a stream \ 'a stream" where + "sinterleave s s' = Stream (shd s) (sinterleave s' (stl s))" + +primcorec myapp :: "'a mylist \ 'a mylist \ 'a mylist" where + "myapp xs ys = + (if xs = MyNil then ys + else if ys = MyNil then xs + else MyCons (myhd xs) (myapp (mytl xs) ys))" + +primcorec shuffle_sp :: "('a, 'b, 'c, 'd) some_passive \ ('d, 'a, 'b, 'c) some_passive" where + "shuffle_sp sp = + (case sp of + SP1 sp' \ SP1 (shuffle_sp sp') + | SP2 a \ SP3 a + | SP3 b \ SP4 b + | SP4 c \ SP5 c + | SP5 d \ SP2 d)" + +primcorec rename_lam :: "(string \ string) \ lambda \ lambda" where + "rename_lam f l = + (case l of + Var s \ Var (f s) + | App l l' \ App (rename_lam f l) (rename_lam f l') + | Abs s l \ Abs (f s) (rename_lam f l) + | Let SL l \ Let (fimage (map_pair f (rename_lam f)) SL) (rename_lam f l))" + +primcorec + j1_sum :: "('a\{zero,one,plus}) \ 'a J1" and + j2_sum :: "'a \ 'a J2" +where + "n = 0 \ is_J11 (j1_sum n)" | + "un_J111 (j1_sum _) = 0" | + "un_J112 (j1_sum _) = j1_sum 0" | + "un_J121 (j1_sum n) = n + 1" | + "un_J122 (j1_sum n) = j2_sum (n + 1)" | + "n = 0 \ is_J21 (j2_sum n)" | + "un_J221 (j2_sum n) = j1_sum (n + 1)" | + "un_J222 (j2_sum n) = j2_sum (n + 1)" + +primcorec forest_of_mylist :: "'a tree mylist \ 'a forest" where + "forest_of_mylist ts = + (case ts of + MyNil \ FNil + | MyCons t ts \ FCons t (forest_of_mylist ts))" + +primcorec mylist_of_forest :: "'a forest \ 'a tree mylist" where + "mylist_of_forest f = + (case f of + FNil \ MyNil + | FCons t ts \ MyCons t (mylist_of_forest ts))" + +primcorec semi_stream :: "'a stream \ 'a stream" where + "semi_stream s = Stream (shd s) (semi_stream (stl (stl s)))" + +primcorec + tree'_of_stream :: "'a stream \ 'a tree'" and + branch_of_stream :: "'a stream \ 'a branch" +where + "tree'_of_stream s = + TNode' (branch_of_stream (semi_stream s)) (branch_of_stream (semi_stream (stl s)))" | + "branch_of_stream s = (case s of Stream h t \ Branch h (tree'_of_stream t))" + +primcorec + freeze_exp :: "('b \ 'a) \ ('a, 'b) exp \ ('a, 'b) exp" and + freeze_trm :: "('b \ 'a) \ ('a, 'b) trm \ ('a, 'b) trm" and + freeze_factor :: "('b \ 'a) \ ('a, 'b) factor \ ('a, 'b) factor" +where + "freeze_exp g e = + (case e of + Term t \ Term (freeze_trm g t) + | Sum t e \ Sum (freeze_trm g t) (freeze_exp g e))" | + "freeze_trm g t = + (case t of + Factor f \ Factor (freeze_factor g f) + | Prod f t \ Prod (freeze_factor g f) (freeze_trm g t))" | + "freeze_factor g f = + (case f of + C a \ C a + | V b \ C (g b) + | Paren e \ Paren (freeze_exp g e))" + +primcorec poly_unity :: "'a poly_unit" where + "poly_unity = U (\_. poly_unity)" + +primcorec build_cps :: "('a \ 'a) \ ('a \ bool stream) \ 'a \ bool stream \ 'a cps" where + "shd b \ build_cps f g a b = CPS1 a" | + "_ \ build_cps f g a b = CPS2 (\a. build_cps f g (f a) (g a))" + +end diff -r cfb21e03fe2a -r d64a4ef26edb src/HOL/BNF/Examples/Misc_Primrec.thy --- a/src/HOL/BNF/Examples/Misc_Primrec.thy Thu Dec 05 17:52:12 2013 +0100 +++ b/src/HOL/BNF/Examples/Misc_Primrec.thy Thu Dec 05 17:58:03 2013 +0100 @@ -14,7 +14,7 @@ primrec_new nat_of_simple :: "simple \ nat" where "nat_of_simple X1 = 1" | "nat_of_simple X2 = 2" | - "nat_of_simple X3 = 2" | + "nat_of_simple X3 = 3" | "nat_of_simple X4 = 4" primrec_new simple_of_simple' :: "simple' \ simple" where diff -r cfb21e03fe2a -r d64a4ef26edb src/HOL/BNF/Examples/Process.thy --- a/src/HOL/BNF/Examples/Process.thy Thu Dec 05 17:52:12 2013 +0100 +++ b/src/HOL/BNF/Examples/Process.thy Thu Dec 05 17:58:03 2013 +0100 @@ -22,7 +22,7 @@ subsection {* Basic properties *} declare - pre_process_rel_def[simp] + rel_pre_process_def[simp] sum_rel_def[simp] prod_rel_def[simp] @@ -81,24 +81,17 @@ datatype x_y_ax = x | y | ax -definition "isA \ \ K. case K of x \ False |y \ True |ax \ True" -definition "pr \ \ K. case K of x \ undefined |y \ ''b'' |ax \ ''a''" -definition "co \ \ K. case K of x \ undefined |y \ x |ax \ x" -lemmas Action_defs = isA_def pr_def co_def +primcorec F :: "x_y_ax \ char list process" where + "xyax = x \ isChoice (F xyax)" +| "ch1Of (F xyax) = F ax" +| "ch2Of (F xyax) = F y" +| "prefOf (F xyax) = (if xyax = y then ''b'' else ''a'')" +| "contOf (F xyax) = F x" -definition "c1 \ \ K. case K of x \ ax |y \ undefined |ax \ undefined" -definition "c2 \ \ K. case K of x \ y |y \ undefined |ax \ undefined" -lemmas Choice_defs = c1_def c2_def - -definition "F \ process_unfold isA pr co c1 c2" definition "X = F x" definition "Y = F y" definition "AX = F ax" lemma X_Y_AX: "X = Choice AX Y" "Y = Action ''b'' X" "AX = Action ''a'' X" -unfolding X_def Y_def AX_def F_def -using process.unfold(2)[of isA x "pr" co c1 c2] - process.unfold(1)[of isA y "pr" co c1 c2] - process.unfold(1)[of isA ax "pr" co c1 c2] -unfolding Action_defs Choice_defs by simp_all +unfolding X_def Y_def AX_def by (subst F.code, simp)+ (* end product: *) lemma X_AX: diff -r cfb21e03fe2a -r d64a4ef26edb src/HOL/BNF/Examples/Stream.thy --- a/src/HOL/BNF/Examples/Stream.thy Thu Dec 05 17:52:12 2013 +0100 +++ b/src/HOL/BNF/Examples/Stream.thy Thu Dec 05 17:58:03 2013 +0100 @@ -18,7 +18,7 @@ code_datatype Stream lemma stream_case_cert: - assumes "CASE \ stream_case c" + assumes "CASE \ case_stream c" shows "CASE (a ## s) \ c a s" using assms by simp_all @@ -87,10 +87,10 @@ by (induct xs) auto -subsection {* set of streams with elements in some fixes set *} +subsection {* set of streams with elements in some fixed set *} coinductive_set - streams :: "'a set => 'a stream set" + streams :: "'a set \ 'a stream set" for A :: "'a set" where Stream[intro!, simp, no_atp]: "\a \ A; s \ streams A\ \ a ## s \ streams A" @@ -98,6 +98,15 @@ lemma shift_streams: "\w \ lists A; s \ streams A\ \ w @- s \ streams A" by (induct w) auto +lemma streams_Stream: "x ## s \ streams A \ x \ A \ s \ streams A" + by (auto elim: streams.cases) + +lemma streams_stl: "s \ streams A \ stl s \ streams A" + by (cases s) (auto simp: streams_Stream) + +lemma streams_shd: "s \ streams A \ shd s \ A" + by (cases s) (auto simp: streams_Stream) + lemma sset_streams: assumes "sset s \ A" shows "s \ streams A" @@ -105,6 +114,28 @@ case streams then show ?case by (cases s) simp qed +lemma streams_sset: + assumes "s \ streams A" + shows "sset s \ A" +proof + fix x assume "x \ sset s" from this `s \ streams A` show "x \ A" + by (induct s) (auto intro: streams_shd streams_stl) +qed + +lemma streams_iff_sset: "s \ streams A \ sset s \ A" + by (metis sset_streams streams_sset) + +lemma streams_mono: "s \ streams A \ A \ B \ s \ streams B" + unfolding streams_iff_sset by auto + +lemma smap_streams: "s \ streams A \ (\x. x \ A \ f x \ B) \ smap f s \ streams B" + unfolding streams_iff_sset stream.set_map by auto + +lemma streams_empty: "streams {} = {}" + by (auto elim: streams.cases) + +lemma streams_UNIV[simp]: "streams UNIV = UNIV" + by (auto simp: streams_iff_sset) subsection {* nth, take, drop for streams *} @@ -234,6 +265,9 @@ lemma stream_all_shift[simp]: "stream_all P (xs @- s) = (list_all P xs \ stream_all P s)" unfolding stream_all_iff list_all_iff by auto +lemma stream_all_Stream: "stream_all P (x ## X) \ P x \ stream_all P X" + by simp + subsection {* recurring stream out of a list *} @@ -285,59 +319,60 @@ by (induct n arbitrary: u) (auto simp: rotate1_rotate_swap rotate1_hd_tl rotate_conv_mod[symmetric]) +subsection {* iterated application of a function *} + +primcorec siterate where + "shd (siterate f x) = x" +| "stl (siterate f x) = siterate f (f x)" + +lemma stake_Suc: "stake (Suc n) s = stake n s @ [s !! n]" + by (induct n arbitrary: s) auto + +lemma snth_siterate[simp]: "siterate f x !! n = (f^^n) x" + by (induct n arbitrary: x) (auto simp: funpow_swap1) + +lemma sdrop_siterate[simp]: "sdrop n (siterate f x) = siterate f ((f^^n) x)" + by (induct n arbitrary: x) (auto simp: funpow_swap1) + +lemma stake_siterate[simp]: "stake n (siterate f x) = map (\n. (f^^n) x) [0 ..< n]" + by (induct n arbitrary: x) (auto simp del: stake.simps(2) simp: stake_Suc) + +lemma sset_siterate: "sset (siterate f x) = {(f^^n) x | n. True}" + by (auto simp: sset_range) + +lemma smap_siterate: "smap f (siterate f x) = siterate f (f x)" + by (coinduction arbitrary: x) auto + + subsection {* stream repeating a single element *} -primcorec same where - "shd (same x) = x" -| "stl (same x) = same x" +abbreviation "sconst \ siterate id" -lemma snth_same[simp]: "same x !! n = x" - unfolding same_def by (induct n) auto +lemma shift_replicate_sconst[simp]: "replicate n x @- sconst x = sconst x" + by (subst (3) stake_sdrop[symmetric]) (simp add: map_replicate_trivial) -lemma stake_same[simp]: "stake n (same x) = replicate n x" - unfolding same_def by (induct n) (auto simp: upt_rec) +lemma stream_all_same[simp]: "sset (sconst x) = {x}" + by (simp add: sset_siterate) -lemma sdrop_same[simp]: "sdrop n (same x) = same x" - unfolding same_def by (induct n) auto +lemma same_cycle: "sconst x = cycle [x]" + by coinduction auto -lemma shift_replicate_same[simp]: "replicate n x @- same x = same x" - by (metis sdrop_same stake_same stake_sdrop) +lemma smap_sconst: "smap f (sconst x) = sconst (f x)" + by coinduction auto -lemma stream_all_same[simp]: "stream_all P (same x) \ P x" - unfolding stream_all_def by auto - -lemma same_cycle: "same x = cycle [x]" - by coinduction auto +lemma sconst_streams: "x \ A \ sconst x \ streams A" + by (simp add: streams_iff_sset) subsection {* stream of natural numbers *} -primcorec fromN :: "nat \ nat stream" where - "fromN n = n ## fromN (n + 1)" - -lemma snth_fromN[simp]: "fromN n !! m = n + m" - unfolding fromN_def by (induct m arbitrary: n) auto - -lemma stake_fromN[simp]: "stake m (fromN n) = [n ..< m + n]" - unfolding fromN_def by (induct m arbitrary: n) (auto simp: upt_rec) - -lemma sdrop_fromN[simp]: "sdrop m (fromN n) = fromN (n + m)" - unfolding fromN_def by (induct m arbitrary: n) auto - -lemma sset_fromN[simp]: "sset (fromN n) = {n ..}" (is "?L = ?R") -proof safe - fix m assume "m \ ?L" - moreover - { fix s assume "m \ sset s" "\n'\n. s = fromN n'" - hence "n \ m" by (induct arbitrary: n rule: sset_induct1) fastforce+ - } - ultimately show "n \ m" by auto -next - fix m assume "n \ m" thus "m \ ?L" by (metis le_iff_add snth_fromN snth_sset) -qed +abbreviation "fromN \ siterate Suc" abbreviation "nats \ fromN 0" +lemma sset_fromN[simp]: "sset (fromN n) = {n ..}" + by (auto simp add: sset_siterate) arith + subsection {* flatten a stream of lists *} @@ -498,26 +533,4 @@ "smap2 f s1 s2 = smap (split f) (szip s1 s2)" by (coinduction arbitrary: s1 s2) auto - -subsection {* iterated application of a function *} - -primcorec siterate where - "shd (siterate f x) = x" -| "stl (siterate f x) = siterate f (f x)" - -lemma stake_Suc: "stake (Suc n) s = stake n s @ [s !! n]" - by (induct n arbitrary: s) auto - -lemma snth_siterate[simp]: "siterate f x !! n = (f^^n) x" - by (induct n arbitrary: x) (auto simp: funpow_swap1) - -lemma sdrop_siterate[simp]: "sdrop n (siterate f x) = siterate f ((f^^n) x)" - by (induct n arbitrary: x) (auto simp: funpow_swap1) - -lemma stake_siterate[simp]: "stake n (siterate f x) = map (\n. (f^^n) x) [0 ..< n]" - by (induct n arbitrary: x) (auto simp del: stake.simps(2) simp: stake_Suc) - -lemma sset_siterate: "sset (siterate f x) = {(f^^n) x | n. True}" - by (auto simp: sset_range) - end diff -r cfb21e03fe2a -r d64a4ef26edb src/HOL/BNF/More_BNFs.thy --- a/src/HOL/BNF/More_BNFs.thy Thu Dec 05 17:52:12 2013 +0100 +++ b/src/HOL/BNF/More_BNFs.thy Thu Dec 05 17:58:03 2013 +0100 @@ -15,13 +15,17 @@ Basic_BNFs "~~/src/HOL/Library/FSet" "~~/src/HOL/Library/Multiset" - Countable_Type begin lemma option_rec_conv_option_case: "option_rec = option_case" by (simp add: fun_eq_iff split: option.split) -bnf Option.map [Option.set] "\_::'a option. natLeq" ["None"] option_rel +bnf "'a option" + map: Option.map + sets: Option.set + bd: natLeq + wits: None + rel: option_rel proof - show "Option.map id = id" by (simp add: fun_eq_iff Option.map_def split: option.split) next @@ -94,7 +98,12 @@ (\zs \ ?A. map p1 zs = as \ map p2 zs = bs)" by blast qed -bnf map [set] "\_::'a list. natLeq" ["[]"] +bnf "'a list" + map: map + sets: set + bd: natLeq + wits: Nil + rel: list_all2 proof - show "map id = id" by (rule List.map.id) next @@ -115,8 +124,16 @@ fix x show "|set x| \o natLeq" by (metis List.finite_set finite_iff_ordLess_natLeq ordLess_imp_ordLeq) +next + fix R + show "list_all2 R = + (Grp {x. set x \ {(x, y). R x y}} (map fst))\\ OO + Grp {x. set x \ {(x, y). R x y}} (map snd)" + unfolding list_all2_def[abs_def] Grp_def fun_eq_iff relcompp.simps conversep.simps + by (force simp: zip_map_fst_snd) qed (simp add: wpull_map)+ + (* Finite sets *) lemma wpull_image: @@ -189,7 +206,7 @@ by (transfer, clarsimp, metis fst_conv) qed -lemma wpull_fmap: +lemma wpull_fimage: assumes "wpull A B1 B2 f1 f2 p1 p2" shows "wpull {x. fset x \ A} {x. fset x \ B1} {x. fset x \ B2} (fimage f1) (fimage f2) (fimage p1) (fimage p2)" @@ -214,7 +231,12 @@ using X' Y1 Y2 by (auto simp: X'eq intro!: exI[of _ "x"]) (transfer, blast)+ qed -bnf fimage [fset] "\_::'a fset. natLeq" ["{||}"] fset_rel +bnf "'a fset" + map: fimage + sets: fset + bd: natLeq + wits: "{||}" + rel: fset_rel apply - apply transfer' apply simp apply transfer' apply force @@ -223,7 +245,7 @@ apply (rule natLeq_card_order) apply (rule natLeq_cinfinite) apply transfer apply (metis ordLess_imp_ordLeq finite_iff_ordLess_natLeq) - apply (erule wpull_fmap) + apply (erule wpull_fimage) apply (simp add: Grp_def relcompp.simps conversep.simps fun_eq_iff fset_rel_alt fset_rel_aux) apply transfer apply simp done @@ -235,121 +257,6 @@ lemmas [simp] = fset.map_comp fset.map_id fset.set_map -(* Countable sets *) - -lemma card_of_countable_sets_range: -fixes A :: "'a set" -shows "|{X. X \ A \ countable X \ X \ {}}| \o |{f::nat \ 'a. range f \ A}|" -apply(rule card_of_ordLeqI[of from_nat_into]) using inj_on_from_nat_into -unfolding inj_on_def by auto - -lemma card_of_countable_sets_Func: -"|{X. X \ A \ countable X \ X \ {}}| \o |A| ^c natLeq" -using card_of_countable_sets_range card_of_Func_UNIV[THEN ordIso_symmetric] -unfolding cexp_def Field_natLeq Field_card_of -by (rule ordLeq_ordIso_trans) - -lemma ordLeq_countable_subsets: -"|A| \o |{X. X \ A \ countable X}|" -apply (rule card_of_ordLeqI[of "\ a. {a}"]) unfolding inj_on_def by auto - -lemma finite_countable_subset: -"finite {X. X \ A \ countable X} \ finite A" -apply default - apply (erule contrapos_pp) - apply (rule card_of_ordLeq_infinite) - apply (rule ordLeq_countable_subsets) - apply assumption -apply (rule finite_Collect_conjI) -apply (rule disjI1) -by (erule finite_Collect_subsets) - -lemma rcset_to_rcset: "countable A \ rcset (the_inv rcset A) = A" - apply (rule f_the_inv_into_f[unfolded inj_on_def image_iff]) - apply transfer' apply simp - apply transfer' apply simp - done - -lemma Collect_Int_Times: -"{(x, y). R x y} \ A \ B = {(x, y). R x y \ x \ A \ y \ B}" -by auto - -definition cset_rel :: "('a \ 'b \ bool) \ 'a cset \ 'b cset \ bool" where -"cset_rel R a b \ - (\t \ rcset a. \u \ rcset b. R t u) \ - (\t \ rcset b. \u \ rcset a. R u t)" - -lemma cset_rel_aux: -"(\t \ rcset a. \u \ rcset b. R t u) \ (\t \ rcset b. \u \ rcset a. R u t) \ - ((Grp {x. rcset x \ {(a, b). R a b}} (cimage fst))\\ OO - Grp {x. rcset x \ {(a, b). R a b}} (cimage snd)) a b" (is "?L = ?R") -proof - assume ?L - def R' \ "the_inv rcset (Collect (split R) \ (rcset a \ rcset b))" - (is "the_inv rcset ?L'") - have L: "countable ?L'" by auto - hence *: "rcset R' = ?L'" unfolding R'_def using fset_to_fset by (intro rcset_to_rcset) - thus ?R unfolding Grp_def relcompp.simps conversep.simps - proof (intro CollectI prod_caseI exI[of _ a] exI[of _ b] exI[of _ R'] conjI refl) - from * `?L` show "a = cimage fst R'" by transfer (auto simp: image_def Collect_Int_Times) - next - from * `?L` show "b = cimage snd R'" by transfer (auto simp: image_def Collect_Int_Times) - qed simp_all -next - assume ?R thus ?L unfolding Grp_def relcompp.simps conversep.simps - by transfer force -qed - -bnf cimage [rcset] "\_::'a cset. natLeq" ["cempty"] cset_rel -proof - - show "cimage id = id" by transfer' simp -next - fix f g show "cimage (g \ f) = cimage g \ cimage f" by transfer' fastforce -next - fix C f g assume eq: "\a. a \ rcset C \ f a = g a" - thus "cimage f C = cimage g C" by transfer force -next - fix f show "rcset \ cimage f = op ` f \ rcset" by transfer' fastforce -next - show "card_order natLeq" by (rule natLeq_card_order) -next - show "cinfinite natLeq" by (rule natLeq_cinfinite) -next - fix C show "|rcset C| \o natLeq" by transfer (unfold countable_card_le_natLeq) -next - fix A B1 B2 f1 f2 p1 p2 - assume wp: "wpull A B1 B2 f1 f2 p1 p2" - show "wpull {x. rcset x \ A} {x. rcset x \ B1} {x. rcset x \ B2} - (cimage f1) (cimage f2) (cimage p1) (cimage p2)" - unfolding wpull_def proof safe - fix y1 y2 - assume Y1: "rcset y1 \ B1" and Y2: "rcset y2 \ B2" - assume "cimage f1 y1 = cimage f2 y2" - hence EQ: "f1 ` (rcset y1) = f2 ` (rcset y2)" by transfer - with Y1 Y2 obtain X where X: "X \ A" - and Y1: "p1 ` X = rcset y1" and Y2: "p2 ` X = rcset y2" - using wpull_image[OF wp] unfolding wpull_def Pow_def Bex_def mem_Collect_eq - by (auto elim!: allE[of _ "rcset y1"] allE[of _ "rcset y2"]) - have "\ y1' \ rcset y1. \ x. x \ X \ y1' = p1 x" using Y1 by auto - then obtain q1 where q1: "\ y1' \ rcset y1. q1 y1' \ X \ y1' = p1 (q1 y1')" by metis - have "\ y2' \ rcset y2. \ x. x \ X \ y2' = p2 x" using Y2 by auto - then obtain q2 where q2: "\ y2' \ rcset y2. q2 y2' \ X \ y2' = p2 (q2 y2')" by metis - def X' \ "q1 ` (rcset y1) \ q2 ` (rcset y2)" - have X': "X' \ A" and Y1: "p1 ` X' = rcset y1" and Y2: "p2 ` X' = rcset y2" - using X Y1 Y2 q1 q2 unfolding X'_def by fast+ - have fX': "countable X'" unfolding X'_def by simp - then obtain x where X'eq: "X' = rcset x" by transfer blast - show "\x\{x. rcset x \ A}. cimage p1 x = y1 \ cimage p2 x = y2" - using X' Y1 Y2 unfolding X'eq by (intro bexI[of _ "x"]) (transfer, auto) - qed -next - fix R - show "cset_rel R = - (Grp {x. rcset x \ Collect (split R)} (cimage fst))\\ OO - Grp {x. rcset x \ Collect (split R)} (cimage snd)" - unfolding cset_rel_def[abs_def] cset_rel_aux by simp -qed (transfer, simp) - (* Multisets *) @@ -874,22 +781,26 @@ by transfer (auto intro!: ordLess_imp_ordLeq simp: finite_iff_ordLess_natLeq[symmetric] multiset_def) -bnf mmap [set_of] "\_::'a multiset. natLeq" ["{#}"] +bnf "'a multiset" + map: mmap + sets: set_of + bd: natLeq + wits: "{#}" by (auto simp add: mmap_id0 mmap_comp set_of_mmap natLeq_card_order natLeq_cinfinite set_of_bd intro: mmap_cong wpull_mmap) -inductive multiset_rel' where -Zero: "multiset_rel' R {#} {#}" +inductive rel_multiset' where +Zero: "rel_multiset' R {#} {#}" | -Plus: "\R a b; multiset_rel' R M N\ \ multiset_rel' R (M + {#a#}) (N + {#b#})" +Plus: "\R a b; rel_multiset' R M N\ \ rel_multiset' R (M + {#a#}) (N + {#b#})" -lemma multiset_map_Zero_iff[simp]: "mmap f M = {#} \ M = {#}" +lemma map_multiset_Zero_iff[simp]: "mmap f M = {#} \ M = {#}" by (metis image_is_empty multiset.set_map set_of_eq_empty_iff) -lemma multiset_map_Zero[simp]: "mmap f {#} = {#}" by simp +lemma map_multiset_Zero[simp]: "mmap f {#} = {#}" by simp -lemma multiset_rel_Zero: "multiset_rel R {#} {#}" -unfolding multiset_rel_def Grp_def by auto +lemma rel_multiset_Zero: "rel_multiset R {#} {#}" +unfolding rel_multiset_def Grp_def by auto declare multiset.count[simp] declare Abs_multiset_inverse[simp] @@ -897,7 +808,7 @@ declare union_preserves_multiset[simp] -lemma multiset_map_Plus[simp]: "mmap f (M1 + M2) = mmap f M1 + mmap f M2" +lemma map_multiset_Plus[simp]: "mmap f (M1 + M2) = mmap f M1 + mmap f M2" proof (intro multiset_eqI, transfer fixing: f) fix x :: 'a and M1 M2 :: "'b \ nat" assume "M1 \ multiset" "M2 \ multiset" @@ -910,12 +821,12 @@ by (auto simp: setsum.distrib[symmetric]) qed -lemma multiset_map_singl[simp]: "mmap f {#a#} = {#f a#}" +lemma map_multiset_singl[simp]: "mmap f {#a#} = {#f a#}" by transfer auto -lemma multiset_rel_Plus: -assumes ab: "R a b" and MN: "multiset_rel R M N" -shows "multiset_rel R (M + {#a#}) (N + {#b#})" +lemma rel_multiset_Plus: +assumes ab: "R a b" and MN: "rel_multiset R M N" +shows "rel_multiset R (M + {#a#}) (N + {#b#})" proof- {fix y assume "R a b" and "set_of y \ {(x, y). R x y}" hence "\ya. mmap fst y + {#a#} = mmap fst ya \ @@ -925,13 +836,13 @@ } thus ?thesis using assms - unfolding multiset_rel_def Grp_def by force + unfolding rel_multiset_def Grp_def by force qed -lemma multiset_rel'_imp_multiset_rel: -"multiset_rel' R M N \ multiset_rel R M N" -apply(induct rule: multiset_rel'.induct) -using multiset_rel_Zero multiset_rel_Plus by auto +lemma rel_multiset'_imp_rel_multiset: +"rel_multiset' R M N \ rel_multiset R M N" +apply(induct rule: rel_multiset'.induct) +using rel_multiset_Zero rel_multiset_Plus by auto lemma mcard_mmap[simp]: "mcard (mmap f M) = mcard M" proof - @@ -942,8 +853,7 @@ using finite_Collect_mem . ultimately have fin: "finite {b. \a. f a = b \ a \# M}" by(rule finite_subset) have i: "inj_on A ?B" unfolding inj_on_def A_def apply clarsimp - by (metis (lifting, mono_tags) mem_Collect_eq rel_simps(54) - setsum_gt_0_iff setsum_infinite) + by (metis (lifting, full_types) mem_Collect_eq neq0_conv setsum.neutral) have 0: "\ b. 0 < setsum (count M) (A b) \ (\ a \ A b. count M a > 0)" apply safe apply (metis less_not_refl setsum_gt_0_iff setsum_infinite) @@ -964,10 +874,10 @@ then show ?thesis unfolding mcard_unfold_setsum A_def by transfer qed -lemma multiset_rel_mcard: -assumes "multiset_rel R M N" +lemma rel_multiset_mcard: +assumes "rel_multiset R M N" shows "mcard M = mcard N" -using assms unfolding multiset_rel_def Grp_def by auto +using assms unfolding rel_multiset_def Grp_def by auto lemma multiset_induct2[case_names empty addL addR]: assumes empty: "P {#} {#}" @@ -1022,68 +932,67 @@ qed lemma msed_rel_invL: -assumes "multiset_rel R (M + {#a#}) N" -shows "\ N1 b. N = N1 + {#b#} \ R a b \ multiset_rel R M N1" +assumes "rel_multiset R (M + {#a#}) N" +shows "\ N1 b. N = N1 + {#b#} \ R a b \ rel_multiset R M N1" proof- obtain K where KM: "mmap fst K = M + {#a#}" and KN: "mmap snd K = N" and sK: "set_of K \ {(a, b). R a b}" using assms - unfolding multiset_rel_def Grp_def by auto + unfolding rel_multiset_def Grp_def by auto obtain K1 ab where K: "K = K1 + {#ab#}" and a: "fst ab = a" and K1M: "mmap fst K1 = M" using msed_map_invR[OF KM] by auto obtain N1 where N: "N = N1 + {#snd ab#}" and K1N1: "mmap snd K1 = N1" using msed_map_invL[OF KN[unfolded K]] by auto have Rab: "R a (snd ab)" using sK a unfolding K by auto - have "multiset_rel R M N1" using sK K1M K1N1 - unfolding K multiset_rel_def Grp_def by auto + have "rel_multiset R M N1" using sK K1M K1N1 + unfolding K rel_multiset_def Grp_def by auto thus ?thesis using N Rab by auto qed lemma msed_rel_invR: -assumes "multiset_rel R M (N + {#b#})" -shows "\ M1 a. M = M1 + {#a#} \ R a b \ multiset_rel R M1 N" +assumes "rel_multiset R M (N + {#b#})" +shows "\ M1 a. M = M1 + {#a#} \ R a b \ rel_multiset R M1 N" proof- obtain K where KN: "mmap snd K = N + {#b#}" and KM: "mmap fst K = M" and sK: "set_of K \ {(a, b). R a b}" using assms - unfolding multiset_rel_def Grp_def by auto + unfolding rel_multiset_def Grp_def by auto obtain K1 ab where K: "K = K1 + {#ab#}" and b: "snd ab = b" and K1N: "mmap snd K1 = N" using msed_map_invR[OF KN] by auto obtain M1 where M: "M = M1 + {#fst ab#}" and K1M1: "mmap fst K1 = M1" using msed_map_invL[OF KM[unfolded K]] by auto have Rab: "R (fst ab) b" using sK b unfolding K by auto - have "multiset_rel R M1 N" using sK K1N K1M1 - unfolding K multiset_rel_def Grp_def by auto + have "rel_multiset R M1 N" using sK K1N K1M1 + unfolding K rel_multiset_def Grp_def by auto thus ?thesis using M Rab by auto qed -lemma multiset_rel_imp_multiset_rel': -assumes "multiset_rel R M N" -shows "multiset_rel' R M N" +lemma rel_multiset_imp_rel_multiset': +assumes "rel_multiset R M N" +shows "rel_multiset' R M N" using assms proof(induct M arbitrary: N rule: measure_induct_rule[of mcard]) case (less M) - have c: "mcard M = mcard N" using multiset_rel_mcard[OF less.prems] . + have c: "mcard M = mcard N" using rel_multiset_mcard[OF less.prems] . show ?case proof(cases "M = {#}") case True hence "N = {#}" using c by simp - thus ?thesis using True multiset_rel'.Zero by auto + thus ?thesis using True rel_multiset'.Zero by auto next case False then obtain M1 a where M: "M = M1 + {#a#}" by (metis multi_nonempty_split) - obtain N1 b where N: "N = N1 + {#b#}" and R: "R a b" and ms: "multiset_rel R M1 N1" + obtain N1 b where N: "N = N1 + {#b#}" and R: "R a b" and ms: "rel_multiset R M1 N1" using msed_rel_invL[OF less.prems[unfolded M]] by auto - have "multiset_rel' R M1 N1" using less.hyps[of M1 N1] ms unfolding M by simp - thus ?thesis using multiset_rel'.Plus[of R a b, OF R] unfolding M N by simp + have "rel_multiset' R M1 N1" using less.hyps[of M1 N1] ms unfolding M by simp + thus ?thesis using rel_multiset'.Plus[of R a b, OF R] unfolding M N by simp qed qed -lemma multiset_rel_multiset_rel': -"multiset_rel R M N = multiset_rel' R M N" -using multiset_rel_imp_multiset_rel' multiset_rel'_imp_multiset_rel by auto +lemma rel_multiset_rel_multiset': +"rel_multiset R M N = rel_multiset' R M N" +using rel_multiset_imp_rel_multiset' rel_multiset'_imp_rel_multiset by auto -(* The main end product for multiset_rel: inductive characterization *) -theorems multiset_rel_induct[case_names empty add, induct pred: multiset_rel] = - multiset_rel'.induct[unfolded multiset_rel_multiset_rel'[symmetric]] - +(* The main end product for rel_multiset: inductive characterization *) +theorems rel_multiset_induct[case_names empty add, induct pred: rel_multiset] = + rel_multiset'.induct[unfolded rel_multiset_rel_multiset'[symmetric]] (* Advanced relator customization *) @@ -1153,5 +1062,4 @@ qed qed - end diff -r cfb21e03fe2a -r d64a4ef26edb src/HOL/BNF/README.html --- a/src/HOL/BNF/README.html Thu Dec 05 17:52:12 2013 +0100 +++ b/src/HOL/BNF/README.html Thu Dec 05 17:58:03 2013 +0100 @@ -20,7 +20,8 @@ possibly infinite depth. The framework draws heavily from category theory.

-The package is described in the following paper: +The package is described in isabelle doc datatypes and in the following +paper: