doc-src/Logics/LK.tex
author Walther Neuper <neuper@ist.tugraz.at>
Thu, 12 Aug 2010 15:03:34 +0200
branchisac-from-Isabelle2009-2
changeset 37913 20e3616b2d9c
parent 19152 d81fae81f385
child 43508 381fdcab0f36
permissions -rw-r--r--
prepare reactivation of isac-update-Isa09-2
lcp@104
     1
%% $Id$
lcp@291
     2
\chapter{First-Order Sequent Calculus}
lcp@316
     3
\index{sequent calculus|(}
lcp@316
     4
paulson@7116
     5
The theory~\thydx{LK} implements classical first-order logic through Gentzen's
paulson@7116
     6
sequent calculus (see Gallier~\cite{gallier86} or Takeuti~\cite{takeuti87}).
paulson@7116
     7
Resembling the method of semantic tableaux, the calculus is well suited for
paulson@7116
     8
backwards proof.  Assertions have the form \(\Gamma\turn \Delta\), where
paulson@7116
     9
\(\Gamma\) and \(\Delta\) are lists of formulae.  Associative unification,
paulson@7116
    10
simulated by higher-order unification, handles lists
paulson@7116
    11
(\S\ref{sec:assoc-unification} presents details, if you are interested).
lcp@104
    12
lcp@316
    13
The logic is many-sorted, using Isabelle's type classes.  The class of
lcp@316
    14
first-order terms is called \cldx{term}.  No types of individuals are
lcp@316
    15
provided, but extensions can define types such as {\tt nat::term} and type
lcp@316
    16
constructors such as {\tt list::(term)term}.  Below, the type variable
lcp@316
    17
$\alpha$ ranges over class {\tt term}; the equality symbol and quantifiers
lcp@316
    18
are polymorphic (many-sorted).  The type of formulae is~\tydx{o}, which
lcp@316
    19
belongs to class {\tt logic}.
lcp@104
    20
wenzelm@9695
    21
LK implements a classical logic theorem prover that is nearly as powerful as
wenzelm@9695
    22
the generic classical reasoner.  The simplifier is now available too.
lcp@104
    23
paulson@5151
    24
To work in LK, start up Isabelle specifying  \texttt{Sequents} as the
paulson@5151
    25
object-logic.  Once in Isabelle, change the context to theory \texttt{LK.thy}:
paulson@5151
    26
\begin{ttbox}
paulson@5151
    27
isabelle Sequents
paulson@5151
    28
context LK.thy;
paulson@5151
    29
\end{ttbox}
paulson@19152
    30
Modal logic and linear logic are also available, but unfortunately they are
paulson@5151
    31
not documented.
paulson@5151
    32
lcp@104
    33
lcp@104
    34
\begin{figure} 
lcp@104
    35
\begin{center}
lcp@104
    36
\begin{tabular}{rrr} 
lcp@111
    37
  \it name      &\it meta-type          & \it description       \\ 
lcp@316
    38
  \cdx{Trueprop}& $[sobj\To sobj, sobj\To sobj]\To prop$ & coercion to $prop$\\
lcp@316
    39
  \cdx{Seqof}   & $[o,sobj]\To sobj$    & singleton sequence    \\
lcp@316
    40
  \cdx{Not}     & $o\To o$              & negation ($\neg$)     \\
lcp@316
    41
  \cdx{True}    & $o$                   & tautology ($\top$)    \\
lcp@316
    42
  \cdx{False}   & $o$                   & absurdity ($\bot$)
lcp@104
    43
\end{tabular}
lcp@104
    44
\end{center}
lcp@104
    45
\subcaption{Constants}
lcp@104
    46
lcp@104
    47
\begin{center}
lcp@104
    48
\begin{tabular}{llrrr} 
lcp@316
    49
  \it symbol &\it name     &\it meta-type & \it priority & \it description \\
lcp@316
    50
  \sdx{ALL}  & \cdx{All}  & $(\alpha\To o)\To o$ & 10 & 
lcp@111
    51
        universal quantifier ($\forall$) \\
lcp@316
    52
  \sdx{EX}   & \cdx{Ex}   & $(\alpha\To o)\To o$ & 10 & 
lcp@111
    53
        existential quantifier ($\exists$) \\
lcp@316
    54
  \sdx{THE} & \cdx{The}  & $(\alpha\To o)\To \alpha$ & 10 & 
lcp@111
    55
        definite description ($\iota$)
lcp@104
    56
\end{tabular}
lcp@104
    57
\end{center}
lcp@104
    58
\subcaption{Binders} 
lcp@104
    59
lcp@104
    60
\begin{center}
lcp@316
    61
\index{*"= symbol}
lcp@316
    62
\index{&@{\tt\&} symbol}
lcp@316
    63
\index{*"| symbol}
lcp@316
    64
\index{*"-"-"> symbol}
lcp@316
    65
\index{*"<"-"> symbol}
lcp@104
    66
\begin{tabular}{rrrr} 
lcp@316
    67
    \it symbol  & \it meta-type         & \it priority & \it description \\ 
lcp@316
    68
    \tt = &     $[\alpha,\alpha]\To o$  & Left 50 & equality ($=$) \\
lcp@104
    69
    \tt \& &    $[o,o]\To o$ & Right 35 & conjunction ($\conj$) \\
lcp@104
    70
    \tt | &     $[o,o]\To o$ & Right 30 & disjunction ($\disj$) \\
lcp@104
    71
    \tt --> &   $[o,o]\To o$ & Right 25 & implication ($\imp$) \\
lcp@104
    72
    \tt <-> &   $[o,o]\To o$ & Right 25 & biconditional ($\bimp$) 
lcp@104
    73
\end{tabular}
lcp@104
    74
\end{center}
lcp@104
    75
\subcaption{Infixes}
lcp@104
    76
lcp@104
    77
\begin{center}
lcp@104
    78
\begin{tabular}{rrr} 
lcp@111
    79
  \it external          & \it internal  & \it description \\ 
lcp@104
    80
  \tt $\Gamma$ |- $\Delta$  &  \tt Trueprop($\Gamma$, $\Delta$) &
lcp@104
    81
        sequent $\Gamma\turn \Delta$ 
lcp@104
    82
\end{tabular}
lcp@104
    83
\end{center}
lcp@104
    84
\subcaption{Translations} 
lcp@104
    85
\caption{Syntax of {\tt LK}} \label{lk-syntax}
lcp@104
    86
\end{figure}
lcp@104
    87
lcp@104
    88
lcp@104
    89
\begin{figure} 
lcp@104
    90
\dquotes
lcp@104
    91
\[\begin{array}{rcl}
lcp@104
    92
    prop & = & sequence " |- " sequence 
lcp@104
    93
\\[2ex]
lcp@104
    94
sequence & = & elem \quad (", " elem)^* \\
lcp@104
    95
         & | & empty 
lcp@104
    96
\\[2ex]
paulson@7116
    97
    elem & = & "\$ " term \\
paulson@7116
    98
         & | & formula  \\
paulson@7116
    99
         & | & "<<" sequence ">>" 
lcp@104
   100
\\[2ex]
lcp@104
   101
 formula & = & \hbox{expression of type~$o$} \\
lcp@111
   102
         & | & term " = " term \\
lcp@111
   103
         & | & "\ttilde\ " formula \\
lcp@111
   104
         & | & formula " \& " formula \\
lcp@111
   105
         & | & formula " | " formula \\
lcp@111
   106
         & | & formula " --> " formula \\
lcp@111
   107
         & | & formula " <-> " formula \\
lcp@111
   108
         & | & "ALL~" id~id^* " . " formula \\
lcp@111
   109
         & | & "EX~~" id~id^* " . " formula \\
lcp@111
   110
         & | & "THE~" id~     " . " formula
lcp@104
   111
  \end{array}
lcp@104
   112
\]
lcp@104
   113
\caption{Grammar of {\tt LK}} \label{lk-grammar}
lcp@104
   114
\end{figure}
lcp@104
   115
lcp@104
   116
lcp@316
   117
lcp@316
   118
lcp@104
   119
\begin{figure} 
lcp@104
   120
\begin{ttbox}
lcp@316
   121
\tdx{basic}       $H, P, $G |- $E, P, $F
paulson@7116
   122
paulson@7116
   123
\tdx{contRS}      $H |- $E, $S, $S, $F ==> $H |- $E, $S, $F
paulson@7116
   124
\tdx{contLS}      $H, $S, $S, $G |- $E ==> $H, $S, $G |- $E
paulson@7116
   125
paulson@7116
   126
\tdx{thinRS}      $H |- $E, $F ==> $H |- $E, $S, $F
paulson@7116
   127
\tdx{thinLS}      $H, $G |- $E ==> $H, $S, $G |- $E
paulson@7116
   128
lcp@316
   129
\tdx{cut}         [| $H |- $E, P;  $H, P |- $E |] ==> $H |- $E
lcp@104
   130
\subcaption{Structural rules}
lcp@104
   131
lcp@316
   132
\tdx{refl}        $H |- $E, a=a, $F
paulson@7116
   133
\tdx{subst}       $H(a), $G(a) |- $E(a) ==> $H(b), a=b, $G(b) |- $E(b)
lcp@104
   134
\subcaption{Equality rules}
paulson@7116
   135
\end{ttbox}
lcp@104
   136
paulson@7116
   137
\caption{Basic Rules of {\tt LK}}  \label{lk-basic-rules}
paulson@7116
   138
\end{figure}
paulson@7116
   139
paulson@7116
   140
\begin{figure} 
paulson@7116
   141
\begin{ttbox}
lcp@316
   142
\tdx{True_def}    True  == False-->False
lcp@316
   143
\tdx{iff_def}     P<->Q == (P-->Q) & (Q-->P)
lcp@104
   144
lcp@316
   145
\tdx{conjR}   [| $H|- $E, P, $F;  $H|- $E, Q, $F |] ==> $H|- $E, P&Q, $F
lcp@316
   146
\tdx{conjL}   $H, P, Q, $G |- $E ==> $H, P & Q, $G |- $E
lcp@104
   147
lcp@316
   148
\tdx{disjR}   $H |- $E, P, Q, $F ==> $H |- $E, P|Q, $F
lcp@316
   149
\tdx{disjL}   [| $H, P, $G |- $E;  $H, Q, $G |- $E |] ==> $H, P|Q, $G |- $E
lcp@104
   150
            
wenzelm@841
   151
\tdx{impR}    $H, P |- $E, Q, $F ==> $H |- $E, P-->Q, $F
lcp@316
   152
\tdx{impL}    [| $H,$G |- $E,P;  $H, Q, $G |- $E |] ==> $H, P-->Q, $G |- $E
lcp@104
   153
            
lcp@316
   154
\tdx{notR}    $H, P |- $E, $F ==> $H |- $E, ~P, $F
lcp@316
   155
\tdx{notL}    $H, $G |- $E, P ==> $H, ~P, $G |- $E
lcp@104
   156
lcp@316
   157
\tdx{FalseL}  $H, False, $G |- $E
lcp@316
   158
paulson@5151
   159
\tdx{allR}    (!!x. $H|- $E, P(x), $F) ==> $H|- $E, ALL x. P(x), $F
paulson@5151
   160
\tdx{allL}    $H, P(x), $G, ALL x. P(x) |- $E ==> $H, ALL x. P(x), $G|- $E
lcp@316
   161
            
paulson@5151
   162
\tdx{exR}     $H|- $E, P(x), $F, EX x. P(x) ==> $H|- $E, EX x. P(x), $F
paulson@5151
   163
\tdx{exL}     (!!x. $H, P(x), $G|- $E) ==> $H, EX x. P(x), $G|- $E
lcp@316
   164
paulson@5151
   165
\tdx{The}     [| $H |- $E, P(a), $F;  !!x. $H, P(x) |- $E, x=a, $F |] ==>
paulson@5151
   166
        $H |- $E, P(THE x. P(x)), $F
lcp@316
   167
\subcaption{Logical rules}
lcp@111
   168
\end{ttbox}
lcp@316
   169
lcp@111
   170
\caption{Rules of {\tt LK}}  \label{lk-rules}
lcp@111
   171
\end{figure}
lcp@104
   172
lcp@104
   173
lcp@104
   174
\section{Syntax and rules of inference}
lcp@316
   175
\index{*sobj type}
lcp@316
   176
lcp@104
   177
Figure~\ref{lk-syntax} gives the syntax for {\tt LK}, which is complicated
lcp@104
   178
by the representation of sequents.  Type $sobj\To sobj$ represents a list
lcp@104
   179
of formulae.
lcp@104
   180
paulson@7116
   181
The \textbf{definite description} operator~$\iota x. P[x]$ stands for some~$a$
wenzelm@9695
   182
satisfying~$P[a]$, if one exists and is unique.  Since all terms in LK denote
wenzelm@9695
   183
something, a description is always meaningful, but we do not know its value
wenzelm@9695
   184
unless $P[x]$ defines it uniquely.  The Isabelle notation is \hbox{\tt THE
wenzelm@9695
   185
  $x$.\ $P[x]$}.  The corresponding rule (Fig.\ts\ref{lk-rules}) does not
wenzelm@9695
   186
entail the Axiom of Choice because it requires uniqueness.
lcp@104
   187
paulson@7116
   188
Conditional expressions are available with the notation 
paulson@7116
   189
\[ \dquotes
paulson@7116
   190
   "if"~formula~"then"~term~"else"~term. \]
paulson@7116
   191
wenzelm@9695
   192
Figure~\ref{lk-grammar} presents the grammar of LK.  Traditionally,
lcp@316
   193
\(\Gamma\) and \(\Delta\) are meta-variables for sequences.  In Isabelle's
paulson@7116
   194
notation, the prefix~\verb|$| on a term makes it range over sequences.
lcp@316
   195
In a sequent, anything not prefixed by \verb|$| is taken as a formula.
lcp@104
   196
paulson@7116
   197
The notation \texttt{<<$sequence$>>} stands for a sequence of formul\ae{}.
paulson@7116
   198
For example, you can declare the constant \texttt{imps} to consist of two
paulson@7116
   199
implications: 
paulson@7116
   200
\begin{ttbox}
paulson@7116
   201
consts     P,Q,R :: o
paulson@7116
   202
constdefs imps :: seq'=>seq'
paulson@7116
   203
         "imps == <<P --> Q, Q --> R>>"
paulson@7116
   204
\end{ttbox}
paulson@7116
   205
Then you can use it in axioms and goals, for example
paulson@7116
   206
\begin{ttbox}
paulson@7116
   207
Goalw [imps_def] "P, $imps |- R";  
paulson@7116
   208
{\out Level 0}
paulson@7116
   209
{\out P, $imps |- R}
paulson@7116
   210
{\out  1. P, P --> Q, Q --> R |- R}
paulson@7116
   211
by (Fast_tac 1);
paulson@7116
   212
{\out Level 1}
paulson@7116
   213
{\out P, $imps |- R}
paulson@7116
   214
{\out No subgoals!}
paulson@7116
   215
\end{ttbox}
paulson@7116
   216
paulson@7116
   217
Figures~\ref{lk-basic-rules} and~\ref{lk-rules} present the rules of theory
paulson@7116
   218
\thydx{LK}.  The connective $\bimp$ is defined using $\conj$ and $\imp$.  The
paulson@7116
   219
axiom for basic sequents is expressed in a form that provides automatic
paulson@7116
   220
thinning: redundant formulae are simply ignored.  The other rules are
paulson@7116
   221
expressed in the form most suitable for backward proof; exchange and
paulson@7116
   222
contraction rules are not normally required, although they are provided
paulson@7116
   223
anyway. 
paulson@7116
   224
paulson@7116
   225
paulson@7116
   226
\begin{figure} 
paulson@7116
   227
\begin{ttbox}
paulson@7116
   228
\tdx{thinR}        $H |- $E, $F ==> $H |- $E, P, $F
paulson@7116
   229
\tdx{thinL}        $H, $G |- $E ==> $H, P, $G |- $E
paulson@7116
   230
paulson@7116
   231
\tdx{contR}        $H |- $E, P, P, $F ==> $H |- $E, P, $F
paulson@7116
   232
\tdx{contL}        $H, P, P, $G |- $E ==> $H, P, $G |- $E
paulson@7116
   233
paulson@7116
   234
\tdx{symR}         $H |- $E, $F, a=b ==> $H |- $E, b=a, $F
paulson@7116
   235
\tdx{symL}         $H, $G, b=a |- $E ==> $H, a=b, $G |- $E
paulson@7116
   236
paulson@7116
   237
\tdx{transR}       [| $H|- $E, $F, a=b;  $H|- $E, $F, b=c |] 
paulson@7116
   238
             ==> $H|- $E, a=c, $F
paulson@7116
   239
paulson@7116
   240
\tdx{TrueR}        $H |- $E, True, $F
paulson@7116
   241
paulson@7116
   242
\tdx{iffR}         [| $H, P |- $E, Q, $F;  $H, Q |- $E, P, $F |]
paulson@7116
   243
             ==> $H |- $E, P<->Q, $F
paulson@7116
   244
paulson@7116
   245
\tdx{iffL}         [| $H, $G |- $E, P, Q;  $H, Q, P, $G |- $E |]
paulson@7116
   246
             ==> $H, P<->Q, $G |- $E
paulson@7116
   247
paulson@7116
   248
\tdx{allL_thin}    $H, P(x), $G |- $E ==> $H, ALL x. P(x), $G |- $E
paulson@7116
   249
\tdx{exR_thin}     $H |- $E, P(x), $F ==> $H |- $E, EX x. P(x), $F
paulson@7116
   250
paulson@7116
   251
\tdx{the_equality} [| $H |- $E, P(a), $F;  
paulson@7116
   252
                !!x. $H, P(x) |- $E, x=a, $F |] 
paulson@7116
   253
             ==> $H |- $E, (THE x. P(x)) = a, $F
paulson@7116
   254
\end{ttbox}
paulson@7116
   255
paulson@7116
   256
\caption{Derived rules for {\tt LK}} \label{lk-derived}
paulson@7116
   257
\end{figure}
lcp@104
   258
lcp@104
   259
Figure~\ref{lk-derived} presents derived rules, including rules for
lcp@104
   260
$\bimp$.  The weakened quantifier rules discard each quantification after a
lcp@104
   261
single use; in an automatic proof procedure, they guarantee termination,
lcp@104
   262
but are incomplete.  Multiple use of a quantifier can be obtained by a
lcp@104
   263
contraction rule, which in backward proof duplicates a formula.  The tactic
lcp@316
   264
{\tt res_inst_tac} can instantiate the variable~{\tt?P} in these rules,
lcp@104
   265
specifying the formula to duplicate.
paulson@7116
   266
See theory {\tt Sequents/LK0} in the sources for complete listings of
wenzelm@3148
   267
the rules and derived rules.
lcp@104
   268
paulson@7116
   269
To support the simplifier, hundreds of equivalences are proved for
paulson@7116
   270
the logical connectives and for if-then-else expressions.  See the file
paulson@7116
   271
\texttt{Sequents/simpdata.ML}.
lcp@104
   272
paulson@7116
   273
\section{Automatic Proof}
lcp@316
   274
wenzelm@9695
   275
LK instantiates Isabelle's simplifier.  Both equality ($=$) and the
paulson@7116
   276
biconditional ($\bimp$) may be used for rewriting.  The tactic
paulson@7116
   277
\texttt{Simp_tac} refers to the default simpset (\texttt{simpset()}).  With
paulson@7116
   278
sequents, the \texttt{full_} and \texttt{asm_} forms of the simplifier are not
wenzelm@9695
   279
required; all the formulae{} in the sequent will be simplified.  The left-hand
wenzelm@9695
   280
formulae{} are taken as rewrite rules.  (Thus, the behaviour is what you would
wenzelm@9695
   281
normally expect from calling \texttt{Asm_full_simp_tac}.)
lcp@316
   282
paulson@7116
   283
For classical reasoning, several tactics are available:
paulson@7116
   284
\begin{ttbox} 
paulson@7116
   285
Safe_tac : int -> tactic
paulson@7116
   286
Step_tac : int -> tactic
paulson@7116
   287
Fast_tac : int -> tactic
paulson@7116
   288
Best_tac : int -> tactic
paulson@7116
   289
Pc_tac   : int -> tactic
lcp@316
   290
\end{ttbox}
paulson@7116
   291
These refer not to the standard classical reasoner but to a separate one
paulson@7116
   292
provided for the sequent calculus.  Two commands are available for adding new
paulson@7116
   293
sequent calculus rules, safe or unsafe, to the default ``theorem pack'':
paulson@7116
   294
\begin{ttbox} 
paulson@7116
   295
Add_safes   : thm list -> unit
paulson@7116
   296
Add_unsafes : thm list -> unit
paulson@7116
   297
\end{ttbox}
paulson@7116
   298
To control the set of rules for individual invocations, lower-case versions of
paulson@7116
   299
all these primitives are available.  Sections~\ref{sec:thm-pack}
paulson@7116
   300
and~\ref{sec:sequent-provers} give full details.
lcp@316
   301
lcp@316
   302
lcp@104
   303
\section{Tactics for the cut rule}
paulson@7116
   304
lcp@104
   305
According to the cut-elimination theorem, the cut rule can be eliminated
lcp@104
   306
from proofs of sequents.  But the rule is still essential.  It can be used
lcp@104
   307
to structure a proof into lemmas, avoiding repeated proofs of the same
paulson@19152
   308
formula.  More importantly, the cut rule cannot be eliminated from
lcp@104
   309
derivations of rules.  For example, there is a trivial cut-free proof of
lcp@104
   310
the sequent \(P\conj Q\turn Q\conj P\).
lcp@104
   311
Noting this, we might want to derive a rule for swapping the conjuncts
lcp@104
   312
in a right-hand formula:
lcp@104
   313
\[ \Gamma\turn \Delta, P\conj Q\over \Gamma\turn \Delta, Q\conj P \]
lcp@104
   314
The cut rule must be used, for $P\conj Q$ is not a subformula of $Q\conj
lcp@104
   315
P$.  Most cuts directly involve a premise of the rule being derived (a
lcp@104
   316
meta-assumption).  In a few cases, the cut formula is not part of any
lcp@104
   317
premise, but serves as a bridge between the premises and the conclusion.
lcp@104
   318
In such proofs, the cut formula is specified by calling an appropriate
lcp@104
   319
tactic.
lcp@104
   320
lcp@104
   321
\begin{ttbox} 
lcp@104
   322
cutR_tac : string -> int -> tactic
lcp@104
   323
cutL_tac : string -> int -> tactic
lcp@104
   324
\end{ttbox}
lcp@104
   325
These tactics refine a subgoal into two by applying the cut rule.  The cut
lcp@104
   326
formula is given as a string, and replaces some other formula in the sequent.
lcp@316
   327
\begin{ttdescription}
wenzelm@9695
   328
\item[\ttindexbold{cutR_tac} {\it P\/} {\it i}] reads an LK formula~$P$, and
wenzelm@9695
   329
  applies the cut rule to subgoal~$i$.  It then deletes some formula from the
wenzelm@9695
   330
  right side of subgoal~$i$, replacing that formula by~$P$.
wenzelm@9695
   331
  
wenzelm@9695
   332
\item[\ttindexbold{cutL_tac} {\it P\/} {\it i}] reads an LK formula~$P$, and
wenzelm@9695
   333
  applies the cut rule to subgoal~$i$.  It then deletes some formula from the
wenzelm@9695
   334
  left side of the new subgoal $i+1$, replacing that formula by~$P$.
lcp@316
   335
\end{ttdescription}
lcp@104
   336
All the structural rules --- cut, contraction, and thinning --- can be
lcp@316
   337
applied to particular formulae using {\tt res_inst_tac}.
lcp@104
   338
lcp@104
   339
lcp@104
   340
\section{Tactics for sequents}
lcp@104
   341
\begin{ttbox} 
lcp@104
   342
forms_of_seq       : term -> term list
lcp@104
   343
could_res          : term * term -> bool
lcp@104
   344
could_resolve_seq  : term * term -> bool
lcp@104
   345
filseq_resolve_tac : thm list -> int -> int -> tactic
lcp@104
   346
\end{ttbox}
lcp@104
   347
Associative unification is not as efficient as it might be, in part because
lcp@104
   348
the representation of lists defeats some of Isabelle's internal
lcp@333
   349
optimisations.  The following operations implement faster rule application,
lcp@104
   350
and may have other uses.
lcp@316
   351
\begin{ttdescription}
lcp@104
   352
\item[\ttindexbold{forms_of_seq} {\it t}] 
lcp@104
   353
returns the list of all formulae in the sequent~$t$, removing sequence
lcp@104
   354
variables.
lcp@104
   355
lcp@316
   356
\item[\ttindexbold{could_res} ($t$,$u$)] 
lcp@104
   357
tests whether two formula lists could be resolved.  List $t$ is from a
lcp@316
   358
premise or subgoal, while $u$ is from the conclusion of an object-rule.
lcp@104
   359
Assuming that each formula in $u$ is surrounded by sequence variables, it
lcp@104
   360
checks that each conclusion formula is unifiable (using {\tt could_unify})
lcp@104
   361
with some subgoal formula.
lcp@104
   362
lcp@316
   363
\item[\ttindexbold{could_resolve_seq} ($t$,$u$)] 
lcp@291
   364
  tests whether two sequents could be resolved.  Sequent $t$ is a premise
lcp@316
   365
  or subgoal, while $u$ is the conclusion of an object-rule.  It simply
lcp@291
   366
  calls {\tt could_res} twice to check that both the left and the right
lcp@291
   367
  sides of the sequents are compatible.
lcp@104
   368
lcp@104
   369
\item[\ttindexbold{filseq_resolve_tac} {\it thms} {\it maxr} {\it i}] 
lcp@104
   370
uses {\tt filter_thms could_resolve} to extract the {\it thms} that are
lcp@104
   371
applicable to subgoal~$i$.  If more than {\it maxr\/} theorems are
lcp@104
   372
applicable then the tactic fails.  Otherwise it calls {\tt resolve_tac}.
lcp@104
   373
Thus, it is the sequent calculus analogue of \ttindex{filt_resolve_tac}.
lcp@316
   374
\end{ttdescription}
lcp@104
   375
lcp@104
   376
lcp@104
   377
\section{A simple example of classical reasoning} 
lcp@104
   378
The theorem $\turn\ex{y}\all{x}P(y)\imp P(x)$ is a standard example of the
wenzelm@9695
   379
classical treatment of the existential quantifier.  Classical reasoning is
wenzelm@9695
   380
easy using~LK, as you can see by comparing this proof with the one given in
wenzelm@9695
   381
the FOL manual~\cite{isabelle-ZF}.  From a logical point of view, the proofs
wenzelm@9695
   382
are essentially the same; the key step here is to use \tdx{exR} rather than
wenzelm@9695
   383
the weaker~\tdx{exR_thin}.
lcp@104
   384
\begin{ttbox}
paulson@5151
   385
Goal "|- EX y. ALL x. P(y)-->P(x)";
lcp@104
   386
{\out Level 0}
lcp@104
   387
{\out  |- EX y. ALL x. P(y) --> P(x)}
lcp@104
   388
{\out  1.  |- EX y. ALL x. P(y) --> P(x)}
lcp@104
   389
by (resolve_tac [exR] 1);
lcp@104
   390
{\out Level 1}
lcp@104
   391
{\out  |- EX y. ALL x. P(y) --> P(x)}
lcp@104
   392
{\out  1.  |- ALL x. P(?x) --> P(x), EX x. ALL xa. P(x) --> P(xa)}
lcp@104
   393
\end{ttbox}
lcp@104
   394
There are now two formulae on the right side.  Keeping the existential one
lcp@104
   395
in reserve, we break down the universal one.
lcp@104
   396
\begin{ttbox}
lcp@104
   397
by (resolve_tac [allR] 1);
lcp@104
   398
{\out Level 2}
lcp@104
   399
{\out  |- EX y. ALL x. P(y) --> P(x)}
lcp@104
   400
{\out  1. !!x.  |- P(?x) --> P(x), EX x. ALL xa. P(x) --> P(xa)}
lcp@104
   401
by (resolve_tac [impR] 1);
lcp@104
   402
{\out Level 3}
lcp@104
   403
{\out  |- EX y. ALL x. P(y) --> P(x)}
lcp@104
   404
{\out  1. !!x. P(?x) |- P(x), EX x. ALL xa. P(x) --> P(xa)}
lcp@104
   405
\end{ttbox}
wenzelm@9695
   406
Because LK is a sequent calculus, the formula~$P(\Var{x})$ does not become an
wenzelm@9695
   407
assumption; instead, it moves to the left side.  The resulting subgoal cannot
wenzelm@9695
   408
be instantiated to a basic sequent: the bound variable~$x$ is not unifiable
wenzelm@9695
   409
with the unknown~$\Var{x}$.
lcp@104
   410
\begin{ttbox}
lcp@104
   411
by (resolve_tac [basic] 1);
lcp@104
   412
{\out by: tactic failed}
lcp@104
   413
\end{ttbox}
lcp@316
   414
We reuse the existential formula using~\tdx{exR_thin}, which discards
lcp@316
   415
it; we shall not need it a third time.  We again break down the resulting
lcp@104
   416
formula.
lcp@104
   417
\begin{ttbox}
lcp@104
   418
by (resolve_tac [exR_thin] 1);
lcp@104
   419
{\out Level 4}
lcp@104
   420
{\out  |- EX y. ALL x. P(y) --> P(x)}
lcp@104
   421
{\out  1. !!x. P(?x) |- P(x), ALL xa. P(?x7(x)) --> P(xa)}
lcp@104
   422
by (resolve_tac [allR] 1);
lcp@104
   423
{\out Level 5}
lcp@104
   424
{\out  |- EX y. ALL x. P(y) --> P(x)}
lcp@104
   425
{\out  1. !!x xa. P(?x) |- P(x), P(?x7(x)) --> P(xa)}
lcp@104
   426
by (resolve_tac [impR] 1);
lcp@104
   427
{\out Level 6}
lcp@104
   428
{\out  |- EX y. ALL x. P(y) --> P(x)}
lcp@104
   429
{\out  1. !!x xa. P(?x), P(?x7(x)) |- P(x), P(xa)}
lcp@104
   430
\end{ttbox}
lcp@104
   431
Subgoal~1 seems to offer lots of possibilities.  Actually the only useful
paulson@5151
   432
step is instantiating~$\Var{x@7}$ to $\lambda x. x$,
lcp@104
   433
transforming~$\Var{x@7}(x)$ into~$x$.
lcp@104
   434
\begin{ttbox}
lcp@104
   435
by (resolve_tac [basic] 1);
lcp@104
   436
{\out Level 7}
lcp@104
   437
{\out  |- EX y. ALL x. P(y) --> P(x)}
lcp@104
   438
{\out No subgoals!}
lcp@104
   439
\end{ttbox}
lcp@104
   440
This theorem can be proved automatically.  Because it involves quantifier
lcp@104
   441
duplication, we employ best-first search:
lcp@104
   442
\begin{ttbox}
paulson@5151
   443
Goal "|- EX y. ALL x. P(y)-->P(x)";
lcp@104
   444
{\out Level 0}
lcp@104
   445
{\out  |- EX y. ALL x. P(y) --> P(x)}
lcp@104
   446
{\out  1.  |- EX y. ALL x. P(y) --> P(x)}
lcp@104
   447
by (best_tac LK_dup_pack 1);
lcp@104
   448
{\out Level 1}
lcp@104
   449
{\out  |- EX y. ALL x. P(y) --> P(x)}
lcp@104
   450
{\out No subgoals!}
lcp@104
   451
\end{ttbox}
lcp@104
   452
lcp@104
   453
lcp@104
   454
lcp@104
   455
\section{A more complex proof}
lcp@104
   456
Many of Pelletier's test problems for theorem provers \cite{pelletier86}
lcp@104
   457
can be solved automatically.  Problem~39 concerns set theory, asserting
lcp@104
   458
that there is no Russell set --- a set consisting of those sets that are
lcp@104
   459
not members of themselves:
lcp@104
   460
\[  \turn \neg (\exists x. \forall y. y\in x \bimp y\not\in y) \]
paulson@7116
   461
This does not require special properties of membership; we may generalize
paulson@7116
   462
$x\in y$ to an arbitrary predicate~$F(x,y)$.  The theorem, which is trivial
paulson@7116
   463
for \texttt{Fast_tac}, has a short manual proof.  See the directory {\tt
paulson@7116
   464
  Sequents/LK} for many more examples.
lcp@104
   465
lcp@104
   466
We set the main goal and move the negated formula to the left.
lcp@104
   467
\begin{ttbox}
paulson@5151
   468
Goal "|- ~ (EX x. ALL y. F(y,x) <-> ~F(y,y))";
lcp@104
   469
{\out Level 0}
lcp@104
   470
{\out  |- ~ (EX x. ALL y. F(y,x) <-> ~ F(y,y))}
lcp@104
   471
{\out  1.  |- ~ (EX x. ALL y. F(y,x) <-> ~ F(y,y))}
lcp@104
   472
by (resolve_tac [notR] 1);
lcp@104
   473
{\out Level 1}
lcp@104
   474
{\out  |- ~ (EX x. ALL y. F(y,x) <-> ~ F(y,y))}
lcp@104
   475
{\out  1. EX x. ALL y. F(y,x) <-> ~ F(y,y) |-}
lcp@104
   476
\end{ttbox}
lcp@104
   477
The right side is empty; we strip both quantifiers from the formula on the
lcp@104
   478
left.
lcp@104
   479
\begin{ttbox}
lcp@316
   480
by (resolve_tac [exL] 1);
lcp@104
   481
{\out Level 2}
lcp@104
   482
{\out  |- ~ (EX x. ALL y. F(y,x) <-> ~ F(y,y))}
lcp@104
   483
{\out  1. !!x. ALL y. F(y,x) <-> ~ F(y,y) |-}
lcp@104
   484
by (resolve_tac [allL_thin] 1);
lcp@104
   485
{\out Level 3}
lcp@104
   486
{\out  |- ~ (EX x. ALL y. F(y,x) <-> ~ F(y,y))}
lcp@104
   487
{\out  1. !!x. F(?x2(x),x) <-> ~ F(?x2(x),?x2(x)) |-}
lcp@104
   488
\end{ttbox}
lcp@316
   489
The rule \tdx{iffL} says, if $P\bimp Q$ then $P$ and~$Q$ are either
lcp@104
   490
both true or both false.  It yields two subgoals.
lcp@104
   491
\begin{ttbox}
lcp@104
   492
by (resolve_tac [iffL] 1);
lcp@104
   493
{\out Level 4}
lcp@104
   494
{\out  |- ~ (EX x. ALL y. F(y,x) <-> ~ F(y,y))}
lcp@104
   495
{\out  1. !!x.  |- F(?x2(x),x), ~ F(?x2(x),?x2(x))}
lcp@104
   496
{\out  2. !!x. ~ F(?x2(x),?x2(x)), F(?x2(x),x) |-}
lcp@104
   497
\end{ttbox}
lcp@104
   498
We must instantiate~$\Var{x@2}$, the shared unknown, to satisfy both
lcp@104
   499
subgoals.  Beginning with subgoal~2, we move a negated formula to the left
lcp@104
   500
and create a basic sequent.
lcp@104
   501
\begin{ttbox}
lcp@104
   502
by (resolve_tac [notL] 2);
lcp@104
   503
{\out Level 5}
lcp@104
   504
{\out  |- ~ (EX x. ALL y. F(y,x) <-> ~ F(y,y))}
lcp@104
   505
{\out  1. !!x.  |- F(?x2(x),x), ~ F(?x2(x),?x2(x))}
lcp@104
   506
{\out  2. !!x. F(?x2(x),x) |- F(?x2(x),?x2(x))}
lcp@104
   507
by (resolve_tac [basic] 2);
lcp@104
   508
{\out Level 6}
lcp@104
   509
{\out  |- ~ (EX x. ALL y. F(y,x) <-> ~ F(y,y))}
lcp@104
   510
{\out  1. !!x.  |- F(x,x), ~ F(x,x)}
lcp@104
   511
\end{ttbox}
lcp@104
   512
Thanks to the instantiation of~$\Var{x@2}$, subgoal~1 is obviously true.
lcp@104
   513
\begin{ttbox}
lcp@104
   514
by (resolve_tac [notR] 1);
lcp@104
   515
{\out Level 7}
lcp@104
   516
{\out  |- ~ (EX x. ALL y. F(y,x) <-> ~ F(y,y))}
lcp@104
   517
{\out  1. !!x. F(x,x) |- F(x,x)}
lcp@104
   518
by (resolve_tac [basic] 1);
lcp@104
   519
{\out Level 8}
lcp@104
   520
{\out  |- ~ (EX x. ALL y. F(y,x) <-> ~ F(y,y))}
lcp@104
   521
{\out No subgoals!}
lcp@104
   522
\end{ttbox}
lcp@316
   523
paulson@7116
   524
\section{*Unification for lists}\label{sec:assoc-unification}
paulson@7116
   525
paulson@7116
   526
Higher-order unification includes associative unification as a special
paulson@7116
   527
case, by an encoding that involves function composition
paulson@7116
   528
\cite[page~37]{huet78}.  To represent lists, let $C$ be a new constant.
paulson@7116
   529
The empty list is $\lambda x. x$, while $[t@1,t@2,\ldots,t@n]$ is
paulson@7116
   530
represented by
paulson@7116
   531
\[ \lambda x. C(t@1,C(t@2,\ldots,C(t@n,x))).  \]
paulson@7116
   532
The unifiers of this with $\lambda x.\Var{f}(\Var{g}(x))$ give all the ways
paulson@7116
   533
of expressing $[t@1,t@2,\ldots,t@n]$ as the concatenation of two lists.
paulson@7116
   534
paulson@7116
   535
Unlike orthodox associative unification, this technique can represent certain
paulson@7116
   536
infinite sets of unifiers by flex-flex equations.   But note that the term
paulson@7116
   537
$\lambda x. C(t,\Var{a})$ does not represent any list.  Flex-flex constraints
paulson@7116
   538
containing such garbage terms may accumulate during a proof.
paulson@7116
   539
\index{flex-flex constraints}
paulson@7116
   540
paulson@7116
   541
This technique lets Isabelle formalize sequent calculus rules,
paulson@7116
   542
where the comma is the associative operator:
paulson@7116
   543
\[ \infer[(\conj\hbox{-left})]
paulson@7116
   544
         {\Gamma,P\conj Q,\Delta \turn \Theta}
paulson@7116
   545
         {\Gamma,P,Q,\Delta \turn \Theta}  \] 
paulson@7116
   546
Multiple unifiers occur whenever this is resolved against a goal containing
paulson@7116
   547
more than one conjunction on the left.  
paulson@7116
   548
wenzelm@9695
   549
LK exploits this representation of lists.  As an alternative, the sequent
wenzelm@9695
   550
calculus can be formalized using an ordinary representation of lists, with a
wenzelm@9695
   551
logic program for removing a formula from a list.  Amy Felty has applied this
wenzelm@9695
   552
technique using the language $\lambda$Prolog~\cite{felty91a}.
paulson@7116
   553
paulson@7116
   554
Explicit formalization of sequents can be tiresome.  But it gives precise
paulson@7116
   555
control over contraction and weakening, and is essential to handle relevant
paulson@7116
   556
and linear logics.
paulson@7116
   557
paulson@7116
   558
paulson@7116
   559
\section{*Packaging sequent rules}\label{sec:thm-pack}
paulson@7116
   560
paulson@7116
   561
The sequent calculi come with simple proof procedures.  These are incomplete
paulson@7116
   562
but are reasonably powerful for interactive use.  They expect rules to be
paulson@7116
   563
classified as \textbf{safe} or \textbf{unsafe}.  A rule is safe if applying it to a
paulson@7116
   564
provable goal always yields provable subgoals.  If a rule is safe then it can
paulson@7116
   565
be applied automatically to a goal without destroying our chances of finding a
paulson@7116
   566
proof.  For instance, all the standard rules of the classical sequent calculus
paulson@7116
   567
{\sc lk} are safe.  An unsafe rule may render the goal unprovable; typical
paulson@7116
   568
examples are the weakened quantifier rules {\tt allL_thin} and {\tt exR_thin}.
paulson@7116
   569
paulson@7116
   570
Proof procedures use safe rules whenever possible, using an unsafe rule as a
paulson@7116
   571
last resort.  Those safe rules are preferred that generate the fewest
paulson@7116
   572
subgoals.  Safe rules are (by definition) deterministic, while the unsafe
paulson@7116
   573
rules require a search strategy, such as backtracking.
paulson@7116
   574
paulson@7116
   575
A \textbf{pack} is a pair whose first component is a list of safe rules and
wenzelm@9695
   576
whose second is a list of unsafe rules.  Packs can be extended in an obvious
wenzelm@9695
   577
way to allow reasoning with various collections of rules.  For clarity, LK
wenzelm@9695
   578
declares \mltydx{pack} as an \ML{} datatype, although is essentially a type
wenzelm@9695
   579
synonym:
paulson@7116
   580
\begin{ttbox}
paulson@7116
   581
datatype pack = Pack of thm list * thm list;
paulson@7116
   582
\end{ttbox}
paulson@7116
   583
Pattern-matching using constructor {\tt Pack} can inspect a pack's
paulson@7116
   584
contents.  Packs support the following operations:
paulson@7116
   585
\begin{ttbox} 
paulson@7116
   586
pack        : unit -> pack
paulson@7116
   587
pack_of     : theory -> pack
paulson@7116
   588
empty_pack  : pack
paulson@7116
   589
prop_pack   : pack
paulson@7116
   590
LK_pack     : pack
paulson@7116
   591
LK_dup_pack : pack
paulson@7116
   592
add_safes   : pack * thm list -> pack               \hfill\textbf{infix 4}
paulson@7116
   593
add_unsafes : pack * thm list -> pack               \hfill\textbf{infix 4}
paulson@7116
   594
\end{ttbox}
paulson@7116
   595
\begin{ttdescription}
paulson@7116
   596
\item[\ttindexbold{pack}] returns the pack attached to the current theory.
paulson@7116
   597
paulson@7116
   598
\item[\ttindexbold{pack_of $thy$}] returns the pack attached to theory $thy$.
paulson@7116
   599
paulson@7116
   600
\item[\ttindexbold{empty_pack}] is the empty pack.
paulson@7116
   601
paulson@7116
   602
\item[\ttindexbold{prop_pack}] contains the propositional rules, namely
paulson@7116
   603
those for $\conj$, $\disj$, $\neg$, $\imp$ and~$\bimp$, along with the
paulson@7116
   604
rules {\tt basic} and {\tt refl}.  These are all safe.
paulson@7116
   605
paulson@7116
   606
\item[\ttindexbold{LK_pack}] 
paulson@7116
   607
extends {\tt prop_pack} with the safe rules {\tt allR}
paulson@7116
   608
and~{\tt exL} and the unsafe rules {\tt allL_thin} and
paulson@7116
   609
{\tt exR_thin}.  Search using this is incomplete since quantified
paulson@7116
   610
formulae are used at most once.
paulson@7116
   611
paulson@7116
   612
\item[\ttindexbold{LK_dup_pack}] 
paulson@7116
   613
extends {\tt prop_pack} with the safe rules {\tt allR}
paulson@7116
   614
and~{\tt exL} and the unsafe rules \tdx{allL} and~\tdx{exR}.
paulson@7116
   615
Search using this is complete, since quantified formulae may be reused, but
paulson@7116
   616
frequently fails to terminate.  It is generally unsuitable for depth-first
paulson@7116
   617
search.
paulson@7116
   618
paulson@7116
   619
\item[$pack$ \ttindexbold{add_safes} $rules$] 
paulson@7116
   620
adds some safe~$rules$ to the pack~$pack$.
paulson@7116
   621
paulson@7116
   622
\item[$pack$ \ttindexbold{add_unsafes} $rules$] 
paulson@7116
   623
adds some unsafe~$rules$ to the pack~$pack$.
paulson@7116
   624
\end{ttdescription}
paulson@7116
   625
paulson@7116
   626
paulson@7116
   627
\section{*Proof procedures}\label{sec:sequent-provers}
paulson@7116
   628
wenzelm@9695
   629
The LK proof procedure is similar to the classical reasoner described in
paulson@7116
   630
\iflabelundefined{chap:classical}{the {\em Reference Manual\/}}%
paulson@7116
   631
            {Chap.\ts\ref{chap:classical}}.  
paulson@7116
   632
%
paulson@7116
   633
In fact it is simpler, since it works directly with sequents rather than
paulson@7116
   634
simulating them.  There is no need to distinguish introduction rules from
paulson@7116
   635
elimination rules, and of course there is no swap rule.  As always,
paulson@7116
   636
Isabelle's classical proof procedures are less powerful than resolution
paulson@7116
   637
theorem provers.  But they are more natural and flexible, working with an
paulson@7116
   638
open-ended set of rules.
paulson@7116
   639
paulson@7116
   640
Backtracking over the choice of a safe rule accomplishes nothing: applying
paulson@7116
   641
them in any order leads to essentially the same result.  Backtracking may
paulson@7116
   642
be necessary over basic sequents when they perform unification.  Suppose
paulson@7116
   643
that~0, 1, 2,~3 are constants in the subgoals
paulson@7116
   644
\[  \begin{array}{c}
paulson@7116
   645
      P(0), P(1), P(2) \turn P(\Var{a})  \\
paulson@7116
   646
      P(0), P(2), P(3) \turn P(\Var{a})  \\
paulson@7116
   647
      P(1), P(3), P(2) \turn P(\Var{a})  
paulson@7116
   648
    \end{array}
paulson@7116
   649
\]
paulson@7116
   650
The only assignment that satisfies all three subgoals is $\Var{a}\mapsto 2$,
paulson@7116
   651
and this can only be discovered by search.  The tactics given below permit
paulson@7116
   652
backtracking only over axioms, such as {\tt basic} and {\tt refl};
paulson@7116
   653
otherwise they are deterministic.
paulson@7116
   654
paulson@7116
   655
paulson@7116
   656
\subsection{Method A}
paulson@7116
   657
\begin{ttbox} 
paulson@7116
   658
reresolve_tac   : thm list -> int -> tactic
paulson@7116
   659
repeat_goal_tac : pack -> int -> tactic
paulson@7116
   660
pc_tac          : pack -> int -> tactic
paulson@7116
   661
\end{ttbox}
paulson@7116
   662
These tactics use a method developed by Philippe de Groote.  A subgoal is
paulson@7116
   663
refined and the resulting subgoals are attempted in reverse order.  For
paulson@7116
   664
some reason, this is much faster than attempting the subgoals in order.
paulson@7116
   665
The method is inherently depth-first.
paulson@7116
   666
paulson@7116
   667
At present, these tactics only work for rules that have no more than two
paulson@7116
   668
premises.  They fail --- return no next state --- if they can do nothing.
paulson@7116
   669
\begin{ttdescription}
paulson@7116
   670
\item[\ttindexbold{reresolve_tac} $thms$ $i$] 
paulson@7116
   671
repeatedly applies the $thms$ to subgoal $i$ and the resulting subgoals.
paulson@7116
   672
paulson@7116
   673
\item[\ttindexbold{repeat_goal_tac} $pack$ $i$] 
paulson@7116
   674
applies the safe rules in the pack to a goal and the resulting subgoals.
paulson@7116
   675
If no safe rule is applicable then it applies an unsafe rule and continues.
paulson@7116
   676
paulson@7116
   677
\item[\ttindexbold{pc_tac} $pack$ $i$] 
paulson@7116
   678
applies {\tt repeat_goal_tac} using depth-first search to solve subgoal~$i$.
paulson@7116
   679
\end{ttdescription}
paulson@7116
   680
paulson@7116
   681
paulson@7116
   682
\subsection{Method B}
paulson@7116
   683
\begin{ttbox} 
paulson@7116
   684
safe_tac : pack -> int -> tactic
paulson@7116
   685
step_tac : pack -> int -> tactic
paulson@7116
   686
fast_tac : pack -> int -> tactic
paulson@7116
   687
best_tac : pack -> int -> tactic
paulson@7116
   688
\end{ttbox}
paulson@7116
   689
These tactics are analogous to those of the generic classical
paulson@7116
   690
reasoner.  They use `Method~A' only on safe rules.  They fail if they
paulson@7116
   691
can do nothing.
paulson@7116
   692
\begin{ttdescription}
paulson@7116
   693
\item[\ttindexbold{safe_goal_tac} $pack$ $i$] 
paulson@7116
   694
applies the safe rules in the pack to a goal and the resulting subgoals.
paulson@7116
   695
It ignores the unsafe rules.  
paulson@7116
   696
paulson@7116
   697
\item[\ttindexbold{step_tac} $pack$ $i$] 
paulson@7116
   698
either applies safe rules (using {\tt safe_goal_tac}) or applies one unsafe
paulson@7116
   699
rule.
paulson@7116
   700
paulson@7116
   701
\item[\ttindexbold{fast_tac} $pack$ $i$] 
paulson@7116
   702
applies {\tt step_tac} using depth-first search to solve subgoal~$i$.
paulson@7116
   703
Despite its name, it is frequently slower than {\tt pc_tac}.
paulson@7116
   704
paulson@7116
   705
\item[\ttindexbold{best_tac} $pack$ $i$] 
paulson@7116
   706
applies {\tt step_tac} using best-first search to solve subgoal~$i$.  It is
paulson@7116
   707
particularly useful for quantifier duplication (using \ttindex{LK_dup_pack}).
paulson@7116
   708
\end{ttdescription}
paulson@7116
   709
paulson@7116
   710
paulson@7116
   711
lcp@316
   712
\index{sequent calculus|)}