text
stringlengths 0
3.34M
|
---|
Formal statement is: lemma poly_replicate_append: "poly (monom 1 n * p) (x::'a::comm_ring_1) = x^n * poly p x" Informal statement is: If $p$ is a polynomial, then $x^n p(x)$ is the same as $x^n$ times $p(x)$. |
PROGRAM COMPTEST
PRINT*, 'SUCCESS'
STOP
END
|
(* 1. 介绍如何利用辅助引理来进行正向证明和反向证明的方法 *)
(* 2. 关于data constructors的reason*)
(* 3. 如何获得更强的归纳假设 *)
Set Warnings "-notation-overridden,-parsing".
From LF Require Export Poly.
Theorem silly1 : forall (n m o p : nat),
n = m ->
[n;o] = [n;p] ->
[n;o] = [m;p].
Proof.
intros n m o p eq1 eq2.
rewrite <- eq1.
apply eq2. Qed.
Theorem silly2 : forall (n m o p : nat),
n = m ->
(forall (q r : nat), q = r -> [q;o] = [r;p]) ->
[n;o] = [m;p].
Proof.
intros n m o p eq1 eq2.
apply eq2. apply eq1. Qed.
Theorem silly2a : forall (n m : nat),
(n,n) = (m,m) ->
(forall (q r : nat), (q,q) = (r,r) -> [q] = [r]) ->
[n] = [m].
Proof.
intros n m eq1 eq2.
apply eq2. apply eq1. Qed.
(* Exercise: 2 stars, standard, optional (silly_ex) *)
Theorem silly_ex :
(forall n, evenb n = true -> oddb (S n) = true) ->
oddb 3 = true ->
evenb 4 = true.
Proof.
intros n eq1. apply eq1.
Qed.
Theorem silly3_firsttry : forall (n : nat),
true = (n =? 5) ->
(S (S n)) =? 7 = true.
Proof.
intros n H.
symmetry.
simpl.
apply H. Qed.
Theorem silly3: forall (n:nat),
true = (n=?5)-> (S (S n)) =? 7 = true.
Proof.
intros n eql. simpl. rewrite -> eql. destruct n.
reflexivity. simpl. reflexivity.
Qed.
(* Exercise: 3 stars, standard (apply_exercise1) *)
Theorem rev_exercise1 : forall (l l' : list nat),
l = rev l' ->
l' = rev l.
Proof.
intros l l' H. rewrite <- rev_involutive.
rewrite H. rewrite -> rev_involutive. rewrite -> rev_involutive.
reflexivity. Qed.
Theorem rev_exercise1_apply : forall (l l' : list nat),
l = rev l' ->
l' = rev l.
Proof.
intros l l' H.
rewrite H.
symmetry.
apply rev_involutive.
Qed.
(* apply with Tactic*)
Example trans_eq_example : forall (a b c d e f : nat),
[a;b] = [c;d] ->
[c;d] = [e;f] ->
[a;b] = [e;f].
Proof.
intros a b c d e f eq1 eq2.
rewrite -> eq1. rewrite -> eq2. reflexivity. Qed.
Theorem trans_eq : forall (X:Type) (n m o : X),
n = m -> m = o -> n = o.
Proof.
intros X n m o eq1 eq2. rewrite -> eq1. rewrite -> eq2.
reflexivity. Qed.
Example trans_eq_example' : forall (a b c d e f : nat),
[a;b] = [c;d] ->
[c;d] = [e;f] ->
[a;b] = [e;f].
Proof.
intros a b c d e f eq1 eq2.
apply trans_eq with (m:=[c;d]).
apply eq1. apply eq2.
Qed.
(* Exercise: 3 stars, standard, optional (apply_with_exercise) *)
Example trans_eq_exercise : forall (n m o p : nat),
m = (minustwo o) ->
(n + p) = m ->
(n + p) = (minustwo o).
Proof.
intros n m' o p eq1 eq2.
apply trans_eq with (m :=m').
apply eq2. apply eq1.
Qed.
(* The 内射injection and discriminate Tactics *)
(* 通过在此时编写注入H,我们要求Coq使用构造函数的注入性生成可以从H推断出的所有方程式。
每个这样的方程式都作为前提添加到目标中。在本示例中,添加前提n = m。*)
Theorem S_injective : forall (n m : nat),
S n = S m ->
n = m.
Proof.
intros n m H1.
assert (H2: n = pred (S n)). { reflexivity. }
rewrite H2. rewrite H1. reflexivity.
Qed.
Theorem S_injective' : forall (n m : nat),
S n = S m ->
n = m.
Proof.
intros n m H.
injection H. intros Hnm. apply Hnm.
Qed.
Theorem injection_ex1 : forall (n m o : nat),
[n; m] = [o; o] ->
[n] = [m].
Proof.
intros n m o H.
injection H. intros H1 H2.
rewrite H1. rewrite H2. reflexivity.
Qed.
Theorem injection_ex2 : forall (n m:nat),
[n] = [m] -> n = m.
Proof.
intros n m H.
injection H as Hnm. apply Hnm. Qed.
(* Exercise: 1 star, standard (injection_ex3) *)
Example injection_ex3 : forall (X : Type) (x y z : X) (l j : list X),
x :: y :: l = z :: j ->
y :: l = x :: j -> x = y.
Proof.
intros X x y z l j.
intros H HH. injection HH.
intros HH1 HH2. symmetry. apply HH2. Qed.
Theorem eqb_0_l : forall n,
0 =? n = true -> n = 0.
Proof.
intros n. intros H. destruct n. reflexivity. discriminate H. Qed.
Theorem discriminate_ex1 : forall (n : nat),
S n = O ->
2 + 2 = 5.
Proof.
intros n contra. discriminate contra. Qed.
Theorem discriminate_ex2 : forall (n m : nat),
false = true ->
[n] = [m].
Proof.
intros n m contra. discriminate contra. Qed.
(* Exercise: 1 star, standard (discriminate_ex3) *)
Example discriminate_ex3 :
forall (X : Type) (x y z : X) (l j : list X),
x :: y :: l = [] ->
x = z.
Proof.
intros X x y z l j.
intros contra. discriminate contra. Qed.
Theorem f_equal : forall (A B : Type) (f: A -> B) (x y: A),
x = y -> f x = f y.
Proof. intros A B f x y eq. rewrite eq. reflexivity. Qed.
(* Using Tactics on Hypotheses *)
Theorem S_inj : forall (n m : nat) (b : bool),
(S n) =? (S m) = b ->
n =? m = b.
Proof.
intros n m b H. simpl in H. apply H. Qed.
Theorem silly3' : forall(n : nat),
(n =? 5 = true -> (S (S n)) =? 7 = true) ->
true = (n =? 5) ->
true = ((S (S n)) =? 7).
Proof.
intros n eq H.
symmetry in H. apply eq in H. symmetry in H.
apply H. Qed.
Theorem plus_n_n_injective_1 : forall n m,
n + n = m + m ->
n = m.
Proof.
intros n. induction n as [| n'].
- simpl. intros m H. destruct m as [|m'].
+ reflexivity.
+ inversion H.
- simpl. intros m H. destruct m as [|m'].
+ inversion H.
+ apply f_equal.
apply IHn'. inversion H.
rewrite <- plus_n_Sm in H1.
symmetry in H1. rewrite <- plus_n_Sm in H1.
inversion H1. reflexivity.
Qed.
(*varying the Induction Hypothesis*)
(* 有时候,控制得到H是什么是很重要的。不同的intros时机会导致不同的H产生。
上一题就明显体现了这一点。
*)
(* Exercise: 2 stars, standard (eqb_true)*)
Theorem eqb_true: forall n m,
n =? m = true -> n=m.
Proof.
intros n. induction n as [|n'].
- intros m H. destruct m. reflexivity. inversion H.
- intros m H. destruct m. inversion H. simpl in H.
apply IHn' in H. rewrite H. reflexivity.
Qed.
Theorem double_injective_take2_FAILED : forall n m,
double n = double m ->
n = m.
Proof.
intros n m. induction m as [| m'].
- (* m = O *) simpl. intros eq. destruct n as [| n'] eqn:E.
+ (* n = O *) reflexivity.
+ (* n = S n' *) discriminate eq.
- (* m = S m' *) intros eq. destruct n as [| n'] eqn:E.
+ (* n = O *) discriminate eq.
+ (* n = S n' *) apply f_equal.
(* Stuck again here, just like before. *)
Abort.
Theorem double_injective_take2 : forall n m,
double n = double m ->
n = m.
Proof.
intros n m.
(* n and m are both in the context *)
generalize dependent n.
(* Now n is back in the goal and we can do induction on
m and get a sufficiently general IH. *)
induction m as [| m'].
- (* m = O *) simpl. intros n eq. destruct n as [| n'] eqn:E.
+ (* n = O *) reflexivity.
+ (* n = S n' *) discriminate eq.
- (* m = S m' *) intros n eq. destruct n as [| n'] eqn:E.
+ (* n = O *) discriminate eq.
+ (* n = S n' *) apply f_equal.
apply IHm'. injection eq as goal. apply goal. Qed.
Theorem eqb_id_true : forall x y,
eqb_id x y = true -> x = y.
Proof.
intros [m] [n]. simpl. intros H.
assert (H' : m = n). { apply eqb_true. apply H. }
rewrite H'. reflexivity.
Qed.
(* Exercise: 3 stars, standard, recommended (gen_dep_practice) *)
(* Prove this by induction on l. *)
(* nth_error:如果list不够n长,则nth_error返回None,否则返回从左向右数第n个元素*)
(* 求证:如果length l==n,那么nth_err l n返回一定是None*)
Theorem nth_error_after_last: forall (n : nat) (X : Type) (l : list X),
length l = n -> nth_error l n = None.
Proof.
intros n X l.
generalize dependent n.
induction l as [| l']. simpl. reflexivity.
intros n H. rewrite <- H. apply IHl. reflexivity.
Qed.
(*Unfolding Definitions*)
Theorem mult_comm: forall n m:nat, n*m=m*n.
Proof.
intros n m.
generalize dependent m.
induction n as [| n IHn].
- induction m as [| m IHm]. reflexivity.
simpl. apply IHm.
- induction m as [| m IHm]. simpl.
apply IHn.
(* assert(S n * m + S n = m * S n + S n). *)
(* rewrite IHm. reflexivity. *)
assert(n * S m= n * m + n).
{
rewrite IHn with (m:=S m).
simpl. rewrite plus_comm. symmetry.
rewrite IHn with (m:=m). reflexivity.
}
simpl in IHm. symmetry in IHm.
simpl.
assert(m + n * S m = n + m * S n).
rewrite H. rewrite IHm.
rewrite plus_assoc.
symmetry. rewrite plus_assoc.
rewrite plus_comm with (n:=m + n*m).
rewrite plus_assoc.
reflexivity.
rewrite H0. reflexivity.
Qed.
Theorem subthe1: forall n m p o:nat, n+m+(p+o)=n+p+(m+o).
Proof.
intros n m p o. rewrite plus_assoc.
symmetry. rewrite plus_assoc.
rewrite plus_comm with (m:=p).
rewrite plus_comm with (n:=n+m).
rewrite plus_assoc.
reflexivity.
Qed.
Theorem subthe2: forall n m p:nat, n*(m+p)=n*m+n*p.
Proof.
intros n m p. induction n as [| n' IHn']. simpl. reflexivity.
simpl. rewrite IHn'. apply subthe1.
Qed.
Theorem mult_assoc: forall n m p:nat,n*(m*p)=(n*m)*p.
Proof.
intros n m p. induction n as [| n' IHn].
- simpl. reflexivity.
- simpl. rewrite IHn. rewrite mult_comm with (n:=m+n'*m).
rewrite subthe2 with(n:=p).
rewrite mult_comm with(n:=p).
rewrite mult_comm with(m:=n'*m).
reflexivity.
Qed.
Definition square n:=n*n.
Lemma squar_mult: forall n m, square (n*m) = square n * square m.
Proof.
intros n m. unfold square.
rewrite mult_assoc.
assert (H : n * m * n = n * n * m).
{ rewrite mult_comm. apply mult_assoc. }
rewrite H. rewrite mult_assoc. reflexivity.
Qed.
Definition foo (x:nat) := 5.
Fact silly_fact_1: forall m, foo m+1 = foo (m + 1) + 1.
Proof.
intros m.
simpl.
reflexivity.
Qed.
Definition bar x:=
match x with
| 0 => 5
| S _ => 5
end.
Fact silly_fact_2_FAILED : forall m, bar m + 1 = bar (m + 1) + 1.
Proof.
intros m.
simpl. (* Does nothing! *)
Abort.
Fact silly_fact_2 : forall m, bar m + 1 = bar (m + 1) + 1.
Proof.
intros m.
destruct m eqn:E.
- simpl. reflexivity.
- simpl. reflexivity.
Qed.
Fact silly_fact_2': forall m, bar m+1 = bar (m+1)+1.
Proof.
intros m.
unfold bar.
destruct m eqn:E.
- reflexivity.
- reflexivity.
Qed.
Definition sillyfun (n : nat) : bool :=
if n =? 3 then false
else if n =? 5 then false
else false.
Theorem sillyfun_false : forall (n : nat),
sillyfun n = false.
Proof.
intros n. unfold sillyfun.
destruct (n =? 3) eqn:E1.
- (* n =? 3 = true *) reflexivity.
- (* n =? 3 = false *) destruct (n =? 5) eqn:E2.
+ (* n =? 5 = true *) reflexivity.
+ (* n =? 5 = false *) reflexivity. Qed.
(* Here is an implementation of the split function mentioned in chapter Poly: *)
Fixpoint split {X Y : Type} (l : list (X*Y))
: (list X) * (list Y) :=
match l with
| [] => ([], [])
| (x, y) :: t =>
match split t with
| (lx, ly) => (x :: lx, y :: ly)
end
end.
(* Prove that split and combine are inverses in the following sense: *)
Theorem combine_split : forall X Y (l : list (X * Y)) l1 l2,
split l = (l1, l2) ->
combine l1 l2 = l.
Proof.
intros X Y l.
induction l.
- intros l1 l2 H. simpl in H.
injection H as H. rewrite <- H. rewrite <- H0. simpl. reflexivity.
- destruct x as (x,y).
destruct l1 as [| x'].
+ intros l2 H. simpl in H.
destruct (split l) in H. discriminate H.
+ destruct l2 as [| y'].
* intros H. simpl in H. destruct (split l). discriminate H.
* intros H. simpl.
assert(G: split l = (l1, l2)). {
simpl in H. destruct (split l).
injection H as H. rewrite <- H0. rewrite <- H2. reflexivity.
}
apply IHl in G. rewrite <- G.
simpl in H. destruct (split l) in H. injection H as H.
rewrite <- H. rewrite <- H1. reflexivity.
Qed.
(*↑ referring to https://github.com/marshall-lee/software_foundations/blob/master/lf/Tactics.v*)
Definition sillyfun1 (n : nat) : bool :=
if n =? 3 then true
else if n =? 5 then true
else false.
Theorem sillyfun1_odd_FAILED : forall (n : nat),
sillyfun1 n = true ->
oddb n = true.
Proof.
intros n eq. unfold sillyfun1 in eq.
destruct (n =? 3).
(* stuck... *)
Abort.
Theorem sillyfun1_odd : forall (n : nat),
sillyfun1 n = true ->
oddb n = true.
Proof.
intros n eq. unfold sillyfun1 in eq.
destruct (n =? 3) eqn:Heqe3.
(* Now we have the same state as at the point where we got
stuck above, except that the context contains an extra
equality assumption, which is exactly what we need to
make progress. *)
- (* e3 = true *) apply eqb_true in Heqe3.
rewrite -> Heqe3. reflexivity.
- (* e3 = false *)
(* When we come to the second equality test in the body
of the function we are reasoning about, we can use
eqn: again in the same way, allowing us to finish the
proof. *)
destruct (n =? 5) eqn:Heqe5.
+ (* e5 = true *)
apply eqb_true in Heqe5.
rewrite -> Heqe5. reflexivity.
+ (* e5 = false *) discriminate eq. Qed.
(** **** Exercise: 2 stars, standard (destruct_eqn_practice) *)
Theorem bool_fn_applied_thrice :
forall (f : bool -> bool) (b : bool),
f (f (f b)) = f b.
Proof.
intros f b. destruct b eqn:H0.
- destruct (f true) eqn:H1.
+ destruct (f true)eqn:H2.
* apply H2.
* discriminate H1.
+ destruct (f false)eqn:H2.
* apply H1.
* apply H2.
- destruct (f false) eqn:H1.
+ destruct (f true)eqn:H2.
* apply H2.
* apply H1.
+ rewrite H1. apply H1.
Qed.
(** [] *)
(** We've now seen many of Coq's most fundamental tactics. We'll
introduce a few more in the coming chapters, and later on we'll
see some more powerful _automation_ tactics that make Coq help us
with low-level details. But basically we've got what we need to
get work done.
Here are the ones we've seen:
- [intros]: move hypotheses/variables from goal to context
- [reflexivity]: finish the proof (when the goal looks like [e =
e])
- [apply]: prove goal using a hypothesis, lemma, or constructor
- [apply... in H]: apply a hypothesis, lemma, or constructor to
a hypothesis in the context (forward reasoning)
- [apply... with...]: explicitly specify values for variables
that cannot be determined by pattern matching
- [simpl]: simplify computations in the goal
- [simpl in H]: ... or a hypothesis
- [rewrite]: use an equality hypothesis (or lemma) to rewrite
the goal
- [rewrite ... in H]: ... or a hypothesis
- [symmetry]: changes a goal of the form [t=u] into [u=t]
- [symmetry in H]: changes a hypothesis of the form [t=u] into
[u=t]
- [unfold]: replace a defined constant by its right-hand side in
the goal
- [unfold... in H]: ... or a hypothesis
- [destruct... as...]: case analysis on values of inductively
defined types
- [destruct... eqn:...]: specify the name of an equation to be
added to the context, recording the result of the case
analysis
- [induction... as...]: induction on values of inductively
defined types
- [injection]: reason by injectivity on equalities
between values of inductively defined types
- [discriminate]: reason by disjointness of constructors on
equalities between values of inductively defined types
- [assert (H: e)] (or [assert (e) as H]): introduce a "local
lemma" [e] and call it [H]
- [generalize dependent x]: move the variable [x] (and anything
else that depends on it) from the context back to an explicit
hypothesis in the goal formula *)
(* ################################################################# *)
(** * Additional Exercises *)
(** **** Exercise: 3 stars, standard (eqb_sym) *)
Theorem eqb_sym : forall (n m : nat),
(n =? m) = (m =? n).
Proof.
intros n m.
generalize dependent m.
induction n as [| n' IHn].
- induction m as [| m' IHm].
+ reflexivity.
+ simpl. reflexivity.
- induction m as [| m' IHm].
+ simpl. reflexivity.
+ simpl. rewrite IHn with (m:=m'). reflexivity.
Qed.
(** **** Exercise: 3 stars, standard, optional (eqb_trans) *)
Theorem eqb_trans : forall n m p,
n =? m = true ->
m =? p = true ->
n =? p = true.
Proof.
intros n m p.
generalize dependent m.
generalize dependent p.
induction n as [| n' IHn].
- induction m as [| m' IHm].
+ induction p as [| p' IHp].
* simpl. reflexivity.
* discriminate.
+ discriminate.
- induction m as [| m' IHm].
+ induction p as [| p' IHp].
* simpl. discriminate.
* simpl. discriminate.
+ induction p as [| p' IHp].
* simpl. discriminate.
* simpl. apply IHn.
Qed.
(** **** Exercise: 3 stars, advanced (split_combine)
We proved, in an exercise above, that for all lists of pairs,
[combine] is the inverse of [split]. How would you formalize the
statement that [split] is the inverse of [combine]? When is this
property true?
Complete the definition of [split_combine_statement] below with a
property that states that [split] is the inverse of
[combine]. Then, prove that the property holds. (Be sure to leave
your induction hypothesis general by not doing [intros] on more
things than necessary. Hint: what property do you need of [l1]
and [l2] for [split (combine l1 l2) = (l1,l2)] to be true?) *)
Definition split_combine_statement : Prop
(* ("[: Prop]" means that we are giving a name to a
logical proposition here.) *)
:= forall X Y (l1:list X) (l2:list Y), length l1 = length l2->split (combine l1 l2) = (l1,l2).
Theorem split_combine : split_combine_statement.
Proof.
intros X Y. induction l1 as [| x].
- simpl. intros l2 H. destruct l2 as [| y].
+ reflexivity.
+ discriminate.
- destruct l2 as [| y].
+ discriminate.
+ intros H. injection H as H.
apply IHl1 in H. simpl.
rewrite H. reflexivity.
Qed.
(** **** Exercise: 3 stars, advanced (filter_exercise)
This one is a bit challenging. Pay attention to the form of your
induction hypothesis. *)
Theorem filter_exercise : forall (X : Type) (test : X -> bool)
(x : X) (l lf : list X),
filter test l = x :: lf ->
test x = true.
Proof.
intros X test x l lf.
induction l as [| x'].
- simpl. discriminate.
- simpl. destruct (test x') eqn:H.
+ intros H1. injection H1 as H1. rewrite H1 in H. apply H.
+ apply IHl.
Qed.
(** **** Exercise: 4 stars, advanced, recommended (forall_exists_challenge)
Define two recursive [Fixpoints], [forallb] and [existsb]. The
first checks whether every element in a list satisfies a given
predicate:
forallb oddb [1;3;5;7;9] = true
forallb negb [false;false] = true
forallb evenb [0;2;4;5] = false
forallb (eqb 5) [] = true
The second checks whether there exists an element in the list that
satisfies a given predicate:
existsb (eqb 5) [0;2;3;6] = false
existsb (andb true) [true;true;false] = true
existsb oddb [1;0;0;0;0;3] = true
existsb evenb [] = false
Next, define a _nonrecursive_ version of [existsb] -- call it
[existsb'] -- using [forallb] and [negb].
Finally, prove a theorem [existsb_existsb'] stating that
[existsb'] and [existsb] have the same behavior. *)
Fixpoint forallb {X : Type} (test : X -> bool) (l : list X) : bool
(* REPLACE THIS LINE WITH ":= _your_definition_ ." *). Admitted.
Example test_forallb_1 : forallb oddb [1;3;5;7;9] = true.
Proof. (* FILL IN HERE *) Admitted.
Example test_forallb_2 : forallb negb [false;false] = true.
Proof. (* FILL IN HERE *) Admitted.
Example test_forallb_3 : forallb evenb [0;2;4;5] = false.
Proof. (* FILL IN HERE *) Admitted.
Example test_forallb_4 : forallb (eqb 5) [] = true.
Proof. (* FILL IN HERE *) Admitted.
Fixpoint existsb {X : Type} (test : X -> bool) (l : list X) : bool
(* REPLACE THIS LINE WITH ":= _your_definition_ ." *). Admitted.
Example test_existsb_1 : existsb (eqb 5) [0;2;3;6] = false.
Proof. (* FILL IN HERE *) Admitted.
Example test_existsb_2 : existsb (andb true) [true;true;false] = true.
Proof. (* FILL IN HERE *) Admitted.
Example test_existsb_3 : existsb oddb [1;0;0;0;0;3] = true.
Proof. (* FILL IN HERE *) Admitted.
Example test_existsb_4 : existsb evenb [] = false.
Proof. (* FILL IN HERE *) Admitted.
Definition existsb' {X : Type} (test : X -> bool) (l : list X) : bool
(* REPLACE THIS LINE WITH ":= _your_definition_ ." *). Admitted.
Theorem existsb_existsb' : forall (X : Type) (test : X -> bool) (l : list X),
existsb test l = existsb' test l.
Proof. (* FILL IN HERE *) Admitted.
(** [] *)
|
State Before: V : Type u_2
α : Type u_1
β : Type ?u.66478
G : SimpleGraph V
inst✝² : DecidableRel G.Adj
inst✝¹ : Fintype V
inst✝ : Semiring α
d : ℕ
a : α
hd : IsRegularOfDegree G d
v : V
⊢ mulVec (adjMatrix α G) (Function.const V a) v = ↑d * a State After: no goals Tactic: simp [hd v] |
(*
* Copyright 2020, Data61, CSIRO (ABN 41 687 119 230)
*
* SPDX-License-Identifier: GPL-2.0-only
*)
(*
Properties of machine operations.
*)
theory Machine_AI
imports Bits_AI
begin
definition
"no_irq f \<equiv> \<forall>P. \<lbrace>\<lambda>s. P (irq_masks s)\<rbrace> f \<lbrace>\<lambda>_ s. P (irq_masks s)\<rbrace>"
lemma wpc_helper_no_irq:
"no_irq f \<Longrightarrow> wpc_helper (P, P') (Q, Q') (no_irq f)"
by (simp add: wpc_helper_def)
wpc_setup "\<lambda>m. no_irq m" wpc_helper_no_irq
ML \<open>
structure CrunchNoIrqInstance : CrunchInstance =
struct
val name = "no_irq";
val prefix_name_scheme = true;
type extra = unit;
val eq_extra = op =;
fun parse_extra ctxt extra
= case extra of
"" => (Syntax.parse_term ctxt "%_. True", ())
| _ => error "no_irq does not need a precondition";
val has_preconds = false;
fun mk_term _ body _ =
(Syntax.parse_term @{context} "no_irq") $ body;
fun dest_term (Const (@{const_name no_irq}, _) $ body)
= SOME (Term.dummy, body, ())
| dest_term _ = NONE;
fun put_precond _ _ = error "crunch no_irq should not be calling put_precond";
val pre_thms = [];
val wpc_tactic = wp_cases_tactic_weak;
fun wps_tactic _ _ _ = no_tac;
val magic = Syntax.parse_term @{context}
"\<lambda>mapp_lambda_ignore. no_irq mapp_lambda_ignore";
val get_monad_state_type = get_nondet_monad_state_type;
end;
structure CrunchNoIrq : CRUNCH = Crunch(CrunchNoIrqInstance);
\<close>
setup \<open>
add_crunch_instance "no_irq" (CrunchNoIrq.crunch_x, CrunchNoIrq.crunch_ignore_add_dels)
\<close>
crunch_ignore (no_irq) (add:
NonDetMonad.bind return "when" get gets fail
assert put modify unless select
alternative assert_opt gets_the
returnOk throwError lift bindE
liftE whenE unlessE throw_opt
assertE liftM liftME sequence_x
zipWithM_x mapM_x sequence mapM sequenceE_x
mapME_x catch select_f
handleE' handleE handle_elseE forM forM_x
zipWithM ignore_failure)
context Arch begin
lemma det_getRegister: "det (getRegister x)"
by (simp add: getRegister_def)
lemma det_setRegister: "det (setRegister x w)"
by (simp add: setRegister_def det_def modify_def get_def put_def bind_def)
lemma det_getRestartPC: "det getRestartPC"
by (simp add: getRestartPC_def det_getRegister)
lemma det_setNextPC: "det (setNextPC p)"
by (simp add: setNextPC_def det_setRegister)
(* FIXME empty_fail: make all empty_fail [intro!, wp], and non-conditional ones [simp] *)
lemma ef_loadWord: "empty_fail (loadWord x)"
by (fastforce simp: loadWord_def)
lemma ef_storeWord: "empty_fail (storeWord x y)"
by (fastforce simp: storeWord_def)
lemma no_fail_getRestartPC: "no_fail \<top> getRestartPC"
by (simp add: getRestartPC_def getRegister_def)
lemma no_fail_loadWord [wp]: "no_fail (\<lambda>_. is_aligned p 3) (loadWord p)"
apply (simp add: loadWord_def is_aligned_mask [symmetric])
apply (rule no_fail_pre)
apply wp
apply simp
done
lemma no_fail_storeWord: "no_fail (\<lambda>_. is_aligned p 3) (storeWord p w)"
apply (simp add: storeWord_def is_aligned_mask [symmetric])
apply (rule no_fail_pre)
apply (wp)
apply simp
done
lemma no_fail_machine_op_lift [simp]:
"no_fail \<top> (machine_op_lift f)"
by (simp add: machine_op_lift_def)
lemma ef_machine_op_lift [simp]:
"empty_fail (machine_op_lift f)"
by (simp add: machine_op_lift_def)
lemma no_fail_setNextPC: "no_fail \<top> (setNextPC pc)"
by (simp add: setNextPC_def setRegister_def)
lemma no_fail_initL2Cache: "no_fail \<top> initL2Cache"
by (simp add: initL2Cache_def)
lemma no_fail_resetTimer[wp]: "no_fail \<top> resetTimer"
by (simp add: resetTimer_def)
lemma loadWord_inv: "\<lbrace>P\<rbrace> loadWord x \<lbrace>\<lambda>x. P\<rbrace>"
apply (simp add: loadWord_def)
apply wp
apply simp
done
lemma getRestartPC_inv: "\<lbrace>P\<rbrace> getRestartPC \<lbrace>\<lambda>rv. P\<rbrace>"
by (simp add: getRestartPC_def getRegister_def)
lemma no_fail_clearMemory[simp, wp]:
"no_fail (\<lambda>_. is_aligned p 3) (clearMemory p b)"
apply (simp add: clearMemory_def mapM_x_mapM)
apply (rule no_fail_pre)
apply (wp no_fail_mapM' no_fail_storeWord )
apply (clarsimp simp: upto_enum_step_def)
apply (erule aligned_add_aligned)
apply (simp add: word_size_def)
apply (rule is_aligned_mult_triv2 [where n = 3, simplified])
apply simp
done
lemma no_fail_freeMemory[simp, wp]:
"no_fail (\<lambda>_. is_aligned p 3) (freeMemory p b)"
apply (simp add: freeMemory_def mapM_x_mapM)
apply (rule no_fail_pre)
apply (wp no_fail_mapM' no_fail_storeWord)
apply (clarsimp simp: upto_enum_step_def)
apply (erule aligned_add_aligned)
apply (simp add: word_size_def)
apply (rule is_aligned_mult_triv2 [where n = 3, simplified])
apply simp
done
lemma no_fail_getActiveIRQ[wp]:
"no_fail \<top> (getActiveIRQ in_kernel)"
apply (simp add: getActiveIRQ_def)
apply (rule no_fail_pre)
apply (wp no_fail_select)
apply simp
done
definition "irq_state_independent P \<equiv> \<forall>f s. P s \<longrightarrow> P (irq_state_update f s)"
lemma getActiveIRQ_inv [wp]:
"\<lbrakk>irq_state_independent P\<rbrakk> \<Longrightarrow> \<lbrace>P\<rbrace> getActiveIRQ in_kernel \<lbrace>\<lambda>rv. P\<rbrace>"
apply (simp add: getActiveIRQ_def)
apply (wp alternative_wp select_wp)
apply (simp add: irq_state_independent_def)
done
lemma no_fail_ackInterrupt[wp]: "no_fail \<top> (ackInterrupt irq)"
by (simp add: ackInterrupt_def)
lemma no_fail_maskInterrupt[wp]: "no_fail \<top> (maskInterrupt irq bool)"
by (simp add: maskInterrupt_def)
lemma no_irq_use:
"\<lbrakk> no_irq f; (rv,s') \<in> fst (f s) \<rbrakk> \<Longrightarrow> irq_masks s' = irq_masks s"
apply (simp add: no_irq_def valid_def)
apply (erule_tac x="\<lambda>x. x = irq_masks s" in allE)
apply fastforce
done
lemma no_irq_machine_rest_lift:
"no_irq (machine_rest_lift f)"
apply (clarsimp simp: no_irq_def machine_rest_lift_def split_def)
apply wp
apply simp
done
crunch (no_irq) no_irq[wp, simp]: machine_op_lift
lemma no_irq:
"no_irq f \<Longrightarrow> \<lbrace>\<lambda>s. P (irq_masks s)\<rbrace> f \<lbrace>\<lambda>_ s. P (irq_masks s)\<rbrace>"
by (simp add: no_irq_def)
lemma no_irq_initL2Cache: "no_irq initL2Cache"
by (simp add: initL2Cache_def)
lemma no_irq_gets [simp]:
"no_irq (gets f)"
by (simp add: no_irq_def)
lemma no_irq_resetTimer: "no_irq resetTimer"
by (simp add: resetTimer_def)
lemma no_irq_debugPrint: "no_irq (debugPrint $ xs)"
by (simp add: no_irq_def)
context notes no_irq[wp] begin
lemma no_irq_ackInterrupt: "no_irq (ackInterrupt irq)"
by (wp | clarsimp simp: no_irq_def ackInterrupt_def)+
lemma no_irq_setIRQTrigger: "no_irq (setIRQTrigger irq bool)"
by (wp | clarsimp simp: no_irq_def setIRQTrigger_def)+
lemma no_irq_loadWord: "no_irq (loadWord x)"
apply (clarsimp simp: no_irq_def)
apply (rule loadWord_inv)
done
lemma no_irq_getActiveIRQ: "no_irq (getActiveIRQ in_kernel)"
apply (clarsimp simp: no_irq_def)
apply (rule getActiveIRQ_inv)
apply (simp add: irq_state_independent_def)
done
lemma no_irq_mapM:
"(\<And>x. x \<in> set xs \<Longrightarrow> no_irq (f x)) \<Longrightarrow> no_irq (mapM f xs)"
apply (subst no_irq_def)
apply clarify
apply (rule mapM_wp)
prefer 2
apply (rule order_refl)
apply (wp; simp)
done
lemma no_irq_mapM_x:
"(\<And>x. x \<in> set xs \<Longrightarrow> no_irq (f x)) \<Longrightarrow> no_irq (mapM_x f xs)"
apply (subst no_irq_def)
apply clarify
apply (rule mapM_x_wp)
prefer 2
apply (rule order_refl)
apply (wp; simp)
done
lemma no_irq_swp:
"no_irq (f y x) \<Longrightarrow> no_irq (swp f x y)"
by (simp add: swp_def)
lemma no_irq_seq [wp]:
"\<lbrakk> no_irq f; \<And>x. no_irq (g x) \<rbrakk> \<Longrightarrow> no_irq (f >>= g)"
apply (subst no_irq_def)
apply clarsimp
apply (rule hoare_seq_ext)
apply (wp|simp)+
done
lemma no_irq_return [simp, wp]: "no_irq (return v)"
unfolding no_irq_def return_def
by (rule allI, simp add: valid_def)
lemma no_irq_fail [simp, wp]: "no_irq fail"
unfolding no_irq_def fail_def
by (rule allI, simp add: valid_def)
lemma no_irq_assert [simp, wp]: "no_irq (assert P)"
unfolding assert_def by simp
lemma no_irq_modify:
"(\<And>s. irq_masks (f s) = irq_masks s) \<Longrightarrow> no_irq (modify f)"
unfolding modify_def no_irq_def
apply (rule allI, simp add: valid_def put_def get_def)
apply (clarsimp simp: in_monad)
done
lemma no_irq_storeWord: "no_irq (storeWord w p)"
apply (simp add: storeWord_def)
apply (wp no_irq_modify)
apply simp
done
lemma no_irq_when:
"\<lbrakk>P \<Longrightarrow> no_irq f\<rbrakk> \<Longrightarrow> no_irq (when P f)"
by (simp add: when_def)
lemma no_irq_clearMemory: "no_irq (clearMemory a b)"
apply (simp add: clearMemory_def)
apply (wp no_irq_mapM_x no_irq_storeWord)
done
lemma getActiveIRQ_le_maxIRQ':
"\<lbrace>\<lambda>s. \<forall>irq > maxIRQ. irq_masks s irq\<rbrace>
getActiveIRQ in_kernel
\<lbrace>\<lambda>rv s. \<forall>x. rv = Some x \<longrightarrow> x \<le> maxIRQ\<rbrace>"
apply (simp add: getActiveIRQ_def)
apply (wp alternative_wp select_wp)
apply clarsimp
apply (rule ccontr)
apply (simp add: linorder_not_le)
done
lemma getActiveIRQ_neq_non_kernel:
"\<lbrace>\<top>\<rbrace> getActiveIRQ True \<lbrace>\<lambda>rv s. rv \<notin> Some ` non_kernel_IRQs \<rbrace>"
apply (simp add: getActiveIRQ_def)
apply (wp alternative_wp select_wp)
apply auto
done
lemma dmo_getActiveIRQ_non_kernel[wp]:
"\<lbrace>\<top>\<rbrace> do_machine_op (getActiveIRQ True)
\<lbrace>\<lambda>rv s. \<forall>irq. rv = Some irq \<longrightarrow> irq \<in> non_kernel_IRQs \<longrightarrow> P irq s\<rbrace>"
unfolding do_machine_op_def
apply wpsimp
apply (drule use_valid, rule getActiveIRQ_neq_non_kernel, rule TrueI)
apply clarsimp
done
lemma empty_fail_initL2Cache: "empty_fail initL2Cache"
by (simp add: initL2Cache_def)
lemma empty_fail_clearMemory [simp, intro!]:
"\<And>a b. empty_fail (clearMemory a b)"
by (fastforce simp: clearMemory_def mapM_x_mapM ef_storeWord)
lemma no_irq_setVSpaceRoot:
"no_irq (setVSpaceRoot r a)"
unfolding setVSpaceRoot_def by wpsimp
lemma no_irq_hwASIDFlush:
"no_irq (hwASIDFlush r)"
unfolding hwASIDFlush_def by wpsimp
end
end
context begin interpretation Arch .
requalify_facts
det_getRegister
det_setRegister
det_getRestartPC
det_setNextPC
end
end
|
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
x y : α × α
⊢ Rel α x y → Rel α y x
[PROOFSTEP]
aesop (rule_sets [Sym2])
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
x y z : α × α
a : Rel α x y
b : Rel α y z
⊢ Rel α x z
[PROOFSTEP]
aesop (rule_sets [Sym2])
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
x y z w : α
⊢ Rel α (x, y) (z, w) ↔ x = z ∧ y = w ∨ x = w ∧ y = z
[PROOFSTEP]
aesop (rule_sets [Sym2])
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
f : Sym2 α → Sym2 β → Prop
i : Sym2 α
j : Sym2 β
hf : ∀ (a₁ a₂ : α) (b₁ b₂ : β), f (Quotient.mk (Rel.setoid α) (a₁, a₂)) (Quotient.mk (Rel.setoid β) (b₁, b₂))
⊢ ∀ (a : α × α) (b : β × β), f (Quotient.mk (Rel.setoid α) a) (Quotient.mk (Rel.setoid β) b)
[PROOFSTEP]
intro ⟨a₁, a₂⟩ ⟨b₁, b₂⟩
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
f : Sym2 α → Sym2 β → Prop
i : Sym2 α
j : Sym2 β
hf : ∀ (a₁ a₂ : α) (b₁ b₂ : β), f (Quotient.mk (Rel.setoid α) (a₁, a₂)) (Quotient.mk (Rel.setoid β) (b₁, b₂))
a₁ a₂ : α
b₁ b₂ : β
⊢ f (Quotient.mk (Rel.setoid α) (a₁, a₂)) (Quotient.mk (Rel.setoid β) (b₁, b₂))
[PROOFSTEP]
exact hf _ _ _ _
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
a b : α
⊢ Quotient.mk (Rel.setoid α) (a, b) = Quotient.mk (Rel.setoid α) (b, a)
[PROOFSTEP]
rw [Quotient.eq]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
a b : α
⊢ (a, b) ≈ (b, a)
[PROOFSTEP]
apply Rel.swap
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
p : α × α
⊢ Quotient.mk (Rel.setoid α) (Prod.swap p) = Quotient.mk (Rel.setoid α) p
[PROOFSTEP]
cases p
[GOAL]
case mk
α : Type u_1
β : Type u_2
γ : Type u_3
fst✝ snd✝ : α
⊢ Quotient.mk (Rel.setoid α) (Prod.swap (fst✝, snd✝)) = Quotient.mk (Rel.setoid α) (fst✝, snd✝)
[PROOFSTEP]
exact eq_swap
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
a b c : α
⊢ Quotient.mk (Rel.setoid α) (a, b) = Quotient.mk (Rel.setoid α) (a, c) ↔ b = c
[PROOFSTEP]
constructor
[GOAL]
case mp
α : Type u_1
β : Type u_2
γ : Type u_3
a b c : α
⊢ Quotient.mk (Rel.setoid α) (a, b) = Quotient.mk (Rel.setoid α) (a, c) → b = c
[PROOFSTEP]
intro h
[GOAL]
case mpr
α : Type u_1
β : Type u_2
γ : Type u_3
a b c : α
⊢ b = c → Quotient.mk (Rel.setoid α) (a, b) = Quotient.mk (Rel.setoid α) (a, c)
[PROOFSTEP]
intro h
[GOAL]
case mp
α : Type u_1
β : Type u_2
γ : Type u_3
a b c : α
h : Quotient.mk (Rel.setoid α) (a, b) = Quotient.mk (Rel.setoid α) (a, c)
⊢ b = c
[PROOFSTEP]
rw [Quotient.eq] at h
[GOAL]
case mp
α : Type u_1
β : Type u_2
γ : Type u_3
a b c : α
h : (a, b) ≈ (a, c)
⊢ b = c
[PROOFSTEP]
cases h
[GOAL]
case mp.refl
α : Type u_1
β : Type u_2
γ : Type u_3
a b : α
⊢ b = b
[PROOFSTEP]
rfl
[GOAL]
case mp.swap
α : Type u_1
β : Type u_2
γ : Type u_3
a : α
⊢ a = a
[PROOFSTEP]
rfl
[GOAL]
case mpr
α : Type u_1
β : Type u_2
γ : Type u_3
a b c : α
h : b = c
⊢ Quotient.mk (Rel.setoid α) (a, b) = Quotient.mk (Rel.setoid α) (a, c)
[PROOFSTEP]
rw [h]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
a b c : α
⊢ Quotient.mk (Rel.setoid α) (b, a) = Quotient.mk (Rel.setoid α) (c, a) ↔ b = c
[PROOFSTEP]
constructor
[GOAL]
case mp
α : Type u_1
β : Type u_2
γ : Type u_3
a b c : α
⊢ Quotient.mk (Rel.setoid α) (b, a) = Quotient.mk (Rel.setoid α) (c, a) → b = c
[PROOFSTEP]
intro h
[GOAL]
case mpr
α : Type u_1
β : Type u_2
γ : Type u_3
a b c : α
⊢ b = c → Quotient.mk (Rel.setoid α) (b, a) = Quotient.mk (Rel.setoid α) (c, a)
[PROOFSTEP]
intro h
[GOAL]
case mp
α : Type u_1
β : Type u_2
γ : Type u_3
a b c : α
h : Quotient.mk (Rel.setoid α) (b, a) = Quotient.mk (Rel.setoid α) (c, a)
⊢ b = c
[PROOFSTEP]
rw [Quotient.eq] at h
[GOAL]
case mp
α : Type u_1
β : Type u_2
γ : Type u_3
a b c : α
h : (b, a) ≈ (c, a)
⊢ b = c
[PROOFSTEP]
cases h
[GOAL]
case mp.refl
α : Type u_1
β : Type u_2
γ : Type u_3
a b : α
⊢ b = b
[PROOFSTEP]
rfl
[GOAL]
case mp.swap
α : Type u_1
β : Type u_2
γ : Type u_3
a : α
⊢ a = a
[PROOFSTEP]
rfl
[GOAL]
case mpr
α : Type u_1
β : Type u_2
γ : Type u_3
a b c : α
h : b = c
⊢ Quotient.mk (Rel.setoid α) (b, a) = Quotient.mk (Rel.setoid α) (c, a)
[PROOFSTEP]
rw [h]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
x y z w : α
⊢ Quotient.mk (Rel.setoid α) (x, y) = Quotient.mk (Rel.setoid α) (z, w) ↔ x = z ∧ y = w ∨ x = w ∧ y = z
[PROOFSTEP]
simp
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
p q : α × α
⊢ Quotient.mk (Rel.setoid α) p = Quotient.mk (Rel.setoid α) q ↔ p = q ∨ p = Prod.swap q
[PROOFSTEP]
cases p
[GOAL]
case mk
α : Type u_1
β : Type u_2
γ : Type u_3
q : α × α
fst✝ snd✝ : α
⊢ Quotient.mk (Rel.setoid α) (fst✝, snd✝) = Quotient.mk (Rel.setoid α) q ↔ (fst✝, snd✝) = q ∨ (fst✝, snd✝) = Prod.swap q
[PROOFSTEP]
cases q
[GOAL]
case mk.mk
α : Type u_1
β : Type u_2
γ : Type u_3
fst✝¹ snd✝¹ fst✝ snd✝ : α
⊢ Quotient.mk (Rel.setoid α) (fst✝¹, snd✝¹) = Quotient.mk (Rel.setoid α) (fst✝, snd✝) ↔
(fst✝¹, snd✝¹) = (fst✝, snd✝) ∨ (fst✝¹, snd✝¹) = Prod.swap (fst✝, snd✝)
[PROOFSTEP]
simp only [eq_iff, Prod.mk.inj_iff, Prod.swap_prod_mk]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
f : { f // ∀ (a₁ a₂ : α), f a₁ a₂ = f a₂ a₁ }
⊢ ∀ (a b : α × α), a ≈ b → uncurry (↑f) a = uncurry (↑f) b
[PROOFSTEP]
rintro _ _ ⟨⟩
[GOAL]
case refl
α : Type u_1
β : Type u_2
γ : Type u_3
f : { f // ∀ (a₁ a₂ : α), f a₁ a₂ = f a₂ a₁ }
x✝ y✝ : α
⊢ uncurry ↑f (x✝, y✝) = uncurry ↑f (x✝, y✝)
case swap
α : Type u_1
β : Type u_2
γ : Type u_3
f : { f // ∀ (a₁ a₂ : α), f a₁ a₂ = f a₂ a₁ }
x✝ y✝ : α
⊢ uncurry ↑f (x✝, y✝) = uncurry ↑f (y✝, x✝)
[PROOFSTEP]
exacts [rfl, f.prop _ _]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
f : { f // ∀ (a₁ a₂ : α) (b₁ b₂ : β), f a₁ a₂ b₁ b₂ = f a₂ a₁ b₁ b₂ ∧ f a₁ a₂ b₁ b₂ = f a₁ a₂ b₂ b₁ }
⊢ ∀ (a₁ : α × α) (b₁ : β × β) (a₂ : α × α) (b₂ : β × β),
a₁ ≈ a₂ → b₁ ≈ b₂ → (fun a b => ↑f a.fst a.snd b.fst b.snd) a₁ b₁ = (fun a b => ↑f a.fst a.snd b.fst b.snd) a₂ b₂
[PROOFSTEP]
rintro _ _ _ _ ⟨⟩ ⟨⟩
[GOAL]
case refl.refl
α : Type u_1
β : Type u_2
γ : Type u_3
f : { f // ∀ (a₁ a₂ : α) (b₁ b₂ : β), f a₁ a₂ b₁ b₂ = f a₂ a₁ b₁ b₂ ∧ f a₁ a₂ b₁ b₂ = f a₁ a₂ b₂ b₁ }
x✝¹ y✝¹ : α
x✝ y✝ : β
⊢ (fun a b => ↑f a.fst a.snd b.fst b.snd) (x✝¹, y✝¹) (x✝, y✝) =
(fun a b => ↑f a.fst a.snd b.fst b.snd) (x✝¹, y✝¹) (x✝, y✝)
case refl.swap
α : Type u_1
β : Type u_2
γ : Type u_3
f : { f // ∀ (a₁ a₂ : α) (b₁ b₂ : β), f a₁ a₂ b₁ b₂ = f a₂ a₁ b₁ b₂ ∧ f a₁ a₂ b₁ b₂ = f a₁ a₂ b₂ b₁ }
x✝¹ y✝¹ : α
x✝ y✝ : β
⊢ (fun a b => ↑f a.fst a.snd b.fst b.snd) (x✝¹, y✝¹) (x✝, y✝) =
(fun a b => ↑f a.fst a.snd b.fst b.snd) (x✝¹, y✝¹) (y✝, x✝)
case swap.refl
α : Type u_1
β : Type u_2
γ : Type u_3
f : { f // ∀ (a₁ a₂ : α) (b₁ b₂ : β), f a₁ a₂ b₁ b₂ = f a₂ a₁ b₁ b₂ ∧ f a₁ a₂ b₁ b₂ = f a₁ a₂ b₂ b₁ }
x✝¹ y✝¹ : α
x✝ y✝ : β
⊢ (fun a b => ↑f a.fst a.snd b.fst b.snd) (x✝¹, y✝¹) (x✝, y✝) =
(fun a b => ↑f a.fst a.snd b.fst b.snd) (y✝¹, x✝¹) (x✝, y✝)
case swap.swap
α : Type u_1
β : Type u_2
γ : Type u_3
f : { f // ∀ (a₁ a₂ : α) (b₁ b₂ : β), f a₁ a₂ b₁ b₂ = f a₂ a₁ b₁ b₂ ∧ f a₁ a₂ b₁ b₂ = f a₁ a₂ b₂ b₁ }
x✝¹ y✝¹ : α
x✝ y✝ : β
⊢ (fun a b => ↑f a.fst a.snd b.fst b.snd) (x✝¹, y✝¹) (x✝, y✝) =
(fun a b => ↑f a.fst a.snd b.fst b.snd) (y✝¹, x✝¹) (y✝, x✝)
[PROOFSTEP]
exacts [rfl, (f.2 _ _ _ _).2, (f.2 _ _ _ _).1, (f.2 _ _ _ _).1.trans (f.2 _ _ _ _).2]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
F : Sym2 α → Sym2 β → γ
a₁ a₂ : α
b₁ b₂ : β
⊢ (fun a₁ a₂ b₁ b₂ => F (Quotient.mk (Rel.setoid α) (a₁, a₂)) (Quotient.mk (Rel.setoid β) (b₁, b₂))) a₁ a₂ b₁ b₂ =
(fun a₁ a₂ b₁ b₂ => F (Quotient.mk (Rel.setoid α) (a₁, a₂)) (Quotient.mk (Rel.setoid β) (b₁, b₂))) a₂ a₁ b₁ b₂ ∧
(fun a₁ a₂ b₁ b₂ => F (Quotient.mk (Rel.setoid α) (a₁, a₂)) (Quotient.mk (Rel.setoid β) (b₁, b₂))) a₁ a₂ b₁ b₂ =
(fun a₁ a₂ b₁ b₂ => F (Quotient.mk (Rel.setoid α) (a₁, a₂)) (Quotient.mk (Rel.setoid β) (b₁, b₂))) a₁ a₂ b₂ b₁
[PROOFSTEP]
constructor
[GOAL]
case left
α : Type u_1
β : Type u_2
γ : Type u_3
F : Sym2 α → Sym2 β → γ
a₁ a₂ : α
b₁ b₂ : β
⊢ (fun a₁ a₂ b₁ b₂ => F (Quotient.mk (Rel.setoid α) (a₁, a₂)) (Quotient.mk (Rel.setoid β) (b₁, b₂))) a₁ a₂ b₁ b₂ =
(fun a₁ a₂ b₁ b₂ => F (Quotient.mk (Rel.setoid α) (a₁, a₂)) (Quotient.mk (Rel.setoid β) (b₁, b₂))) a₂ a₁ b₁ b₂
case right
α : Type u_1
β : Type u_2
γ : Type u_3
F : Sym2 α → Sym2 β → γ
a₁ a₂ : α
b₁ b₂ : β
⊢ (fun a₁ a₂ b₁ b₂ => F (Quotient.mk (Rel.setoid α) (a₁, a₂)) (Quotient.mk (Rel.setoid β) (b₁, b₂))) a₁ a₂ b₁ b₂ =
(fun a₁ a₂ b₁ b₂ => F (Quotient.mk (Rel.setoid α) (a₁, a₂)) (Quotient.mk (Rel.setoid β) (b₁, b₂))) a₁ a₂ b₂ b₁
[PROOFSTEP]
exacts [congr_arg₂ F eq_swap rfl, congr_arg₂ F rfl eq_swap]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
f : α → β
⊢ ((fun x x_1 => x ≈ x_1) ⇒ fun x x_1 => x ≈ x_1) (Prod.map f f) (Prod.map f f)
[PROOFSTEP]
intro _ _ h
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
f : α → β
a✝ b✝ : α × α
h : a✝ ≈ b✝
⊢ Prod.map f f a✝ ≈ Prod.map f f b✝
[PROOFSTEP]
cases h
[GOAL]
case refl
α : Type u_1
β : Type u_2
γ : Type u_3
f : α → β
x✝ y✝ : α
⊢ Prod.map f f (x✝, y✝) ≈ Prod.map f f (x✝, y✝)
[PROOFSTEP]
constructor
[GOAL]
case swap
α : Type u_1
β : Type u_2
γ : Type u_3
f : α → β
x✝ y✝ : α
⊢ Prod.map f f (x✝, y✝) ≈ Prod.map f f (y✝, x✝)
[PROOFSTEP]
apply Rel.swap
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
⊢ map id = id
[PROOFSTEP]
ext ⟨⟨x, y⟩⟩
[GOAL]
case h.mk.mk
α : Type u_1
β : Type u_2
γ : Type u_3
x✝ : Sym2 α
x y : α
⊢ map id (Quot.mk Setoid.r (x, y)) = id (Quot.mk Setoid.r (x, y))
[PROOFSTEP]
rfl
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
g : β → γ
f : α → β
⊢ map (g ∘ f) = map g ∘ map f
[PROOFSTEP]
ext ⟨⟨x, y⟩⟩
[GOAL]
case h.mk.mk
α : Type u_1
β : Type u_2
γ : Type u_3
g : β → γ
f : α → β
x✝ : Sym2 α
x y : α
⊢ map (g ∘ f) (Quot.mk Setoid.r (x, y)) = (map g ∘ map f) (Quot.mk Setoid.r (x, y))
[PROOFSTEP]
rfl
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
g : β → γ
f : α → β
x : Sym2 α
⊢ map g (map f x) = map (g ∘ f) x
[PROOFSTEP]
revert x
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
g : β → γ
f : α → β
⊢ ∀ (x : Sym2 α), map g (map f x) = map (g ∘ f) x
[PROOFSTEP]
apply Sym2.ind
[GOAL]
case h
α : Type u_1
β : Type u_2
γ : Type u_3
g : β → γ
f : α → β
⊢ ∀ (x y : α), map g (map f (Quotient.mk (Rel.setoid α) (x, y))) = map (g ∘ f) (Quotient.mk (Rel.setoid α) (x, y))
[PROOFSTEP]
aesop
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
f : α → β
hinj : Injective f
⊢ Injective (map f)
[PROOFSTEP]
intro z z'
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
f : α → β
hinj : Injective f
z z' : Sym2 α
⊢ map f z = map f z' → z = z'
[PROOFSTEP]
refine' Quotient.ind₂ (fun z z' => _) z z'
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
f : α → β
hinj : Injective f
z✝ z'✝ : Sym2 α
z z' : α × α
⊢ map f (Quotient.mk (Rel.setoid α) z) = map f (Quotient.mk (Rel.setoid α) z') →
Quotient.mk (Rel.setoid α) z = Quotient.mk (Rel.setoid α) z'
[PROOFSTEP]
cases' z with x y
[GOAL]
case mk
α : Type u_1
β : Type u_2
γ : Type u_3
f : α → β
hinj : Injective f
z z'✝ : Sym2 α
z' : α × α
x y : α
⊢ map f (Quotient.mk (Rel.setoid α) (x, y)) = map f (Quotient.mk (Rel.setoid α) z') →
Quotient.mk (Rel.setoid α) (x, y) = Quotient.mk (Rel.setoid α) z'
[PROOFSTEP]
cases' z' with x' y'
[GOAL]
case mk.mk
α : Type u_1
β : Type u_2
γ : Type u_3
f : α → β
hinj : Injective f
z z' : Sym2 α
x y x' y' : α
⊢ map f (Quotient.mk (Rel.setoid α) (x, y)) = map f (Quotient.mk (Rel.setoid α) (x', y')) →
Quotient.mk (Rel.setoid α) (x, y) = Quotient.mk (Rel.setoid α) (x', y')
[PROOFSTEP]
repeat' rw [map_pair_eq, eq_iff]
[GOAL]
case mk.mk
α : Type u_1
β : Type u_2
γ : Type u_3
f : α → β
hinj : Injective f
z z' : Sym2 α
x y x' y' : α
⊢ map f (Quotient.mk (Rel.setoid α) (x, y)) = map f (Quotient.mk (Rel.setoid α) (x', y')) →
Quotient.mk (Rel.setoid α) (x, y) = Quotient.mk (Rel.setoid α) (x', y')
[PROOFSTEP]
rw [map_pair_eq, eq_iff]
[GOAL]
case mk.mk
α : Type u_1
β : Type u_2
γ : Type u_3
f : α → β
hinj : Injective f
z z' : Sym2 α
x y x' y' : α
⊢ Quotient.mk (Rel.setoid β) (f x, f y) = map f (Quotient.mk (Rel.setoid α) (x', y')) →
x = x' ∧ y = y' ∨ x = y' ∧ y = x'
[PROOFSTEP]
rw [map_pair_eq, eq_iff]
[GOAL]
case mk.mk
α : Type u_1
β : Type u_2
γ : Type u_3
f : α → β
hinj : Injective f
z z' : Sym2 α
x y x' y' : α
⊢ f x = f x' ∧ f y = f y' ∨ f x = f y' ∧ f y = f x' → x = x' ∧ y = y' ∨ x = y' ∧ y = x'
[PROOFSTEP]
rw [map_pair_eq, eq_iff]
[GOAL]
case mk.mk
α : Type u_1
β : Type u_2
γ : Type u_3
f : α → β
hinj : Injective f
z z' : Sym2 α
x y x' y' : α
⊢ f x = f x' ∧ f y = f y' ∨ f x = f y' ∧ f y = f x' → x = x' ∧ y = y' ∨ x = y' ∧ y = x'
[PROOFSTEP]
rintro (h | h)
[GOAL]
case mk.mk.inl
α : Type u_1
β : Type u_2
γ : Type u_3
f : α → β
hinj : Injective f
z z' : Sym2 α
x y x' y' : α
h : f x = f x' ∧ f y = f y'
⊢ x = x' ∧ y = y' ∨ x = y' ∧ y = x'
[PROOFSTEP]
simp [hinj h.1, hinj h.2]
[GOAL]
case mk.mk.inr
α : Type u_1
β : Type u_2
γ : Type u_3
f : α → β
hinj : Injective f
z z' : Sym2 α
x y x' y' : α
h : f x = f y' ∧ f y = f x'
⊢ x = x' ∧ y = y' ∨ x = y' ∧ y = x'
[PROOFSTEP]
simp [hinj h.1, hinj h.2]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
a b c : α
⊢ Sym2.Mem a (Quotient.mk (Rel.setoid α) (b, c)) → a = b ∨ a = c
[PROOFSTEP]
rintro ⟨_, h⟩
[GOAL]
case intro
α : Type u_1
β : Type u_2
γ : Type u_3
a b c w✝ : α
h : Quotient.mk (Rel.setoid α) (b, c) = Quotient.mk (Rel.setoid α) (a, w✝)
⊢ a = b ∨ a = c
[PROOFSTEP]
rw [eq_iff] at h
[GOAL]
case intro
α : Type u_1
β : Type u_2
γ : Type u_3
a b c w✝ : α
h : b = a ∧ c = w✝ ∨ b = w✝ ∧ c = a
⊢ a = b ∨ a = c
[PROOFSTEP]
aesop
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
a b c : α
⊢ a = b ∨ a = c → Sym2.Mem a (Quotient.mk (Rel.setoid α) (b, c))
[PROOFSTEP]
rintro (rfl | rfl)
[GOAL]
case inl
α : Type u_1
β : Type u_2
γ : Type u_3
a c : α
⊢ Sym2.Mem a (Quotient.mk (Rel.setoid α) (a, c))
[PROOFSTEP]
exact ⟨_, rfl⟩
[GOAL]
case inr
α : Type u_1
β : Type u_2
γ : Type u_3
a b : α
⊢ Sym2.Mem a (Quotient.mk (Rel.setoid α) (b, a))
[PROOFSTEP]
rw [eq_swap]
[GOAL]
case inr
α : Type u_1
β : Type u_2
γ : Type u_3
a b : α
⊢ Sym2.Mem a (Quotient.mk (Rel.setoid α) (a, b))
[PROOFSTEP]
exact ⟨_, rfl⟩
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
z z' : Sym2 α
h : (fun z => {x | Sym2.Mem x z}) z = (fun z => {x | Sym2.Mem x z}) z'
⊢ z = z'
[PROOFSTEP]
simp only [Set.ext_iff, Set.mem_setOf_eq] at h
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
z z' : Sym2 α
h : ∀ (x : α), Sym2.Mem x z ↔ Sym2.Mem x z'
⊢ z = z'
[PROOFSTEP]
induction' z using Sym2.ind with x y
[GOAL]
case h
α : Type u_1
β : Type u_2
γ : Type u_3
z z' : Sym2 α
h✝ : ∀ (x : α), Sym2.Mem x z ↔ Sym2.Mem x z'
x y : α
h : ∀ (x_1 : α), Sym2.Mem x_1 (Quotient.mk (Rel.setoid α) (x, y)) ↔ Sym2.Mem x_1 z'
⊢ Quotient.mk (Rel.setoid α) (x, y) = z'
[PROOFSTEP]
induction' z' using Sym2.ind with x' y'
[GOAL]
case h.h
α : Type u_1
β : Type u_2
γ : Type u_3
z z' : Sym2 α
h✝² : ∀ (x : α), Sym2.Mem x z ↔ Sym2.Mem x z'
x y : α
h✝¹ : ∀ (x_1 : α), Sym2.Mem x_1 (Quotient.mk (Rel.setoid α) (x, y)) ↔ Sym2.Mem x_1 z'
x' y' : α
h✝ : ∀ (x : α), Sym2.Mem x z ↔ Sym2.Mem x (Quotient.mk (Rel.setoid α) (x', y'))
h : ∀ (x_1 : α), Sym2.Mem x_1 (Quotient.mk (Rel.setoid α) (x, y)) ↔ Sym2.Mem x_1 (Quotient.mk (Rel.setoid α) (x', y'))
⊢ Quotient.mk (Rel.setoid α) (x, y) = Quotient.mk (Rel.setoid α) (x', y')
[PROOFSTEP]
have hx := h x
[GOAL]
case h.h
α : Type u_1
β : Type u_2
γ : Type u_3
z z' : Sym2 α
h✝² : ∀ (x : α), Sym2.Mem x z ↔ Sym2.Mem x z'
x y : α
h✝¹ : ∀ (x_1 : α), Sym2.Mem x_1 (Quotient.mk (Rel.setoid α) (x, y)) ↔ Sym2.Mem x_1 z'
x' y' : α
h✝ : ∀ (x : α), Sym2.Mem x z ↔ Sym2.Mem x (Quotient.mk (Rel.setoid α) (x', y'))
h : ∀ (x_1 : α), Sym2.Mem x_1 (Quotient.mk (Rel.setoid α) (x, y)) ↔ Sym2.Mem x_1 (Quotient.mk (Rel.setoid α) (x', y'))
hx : Sym2.Mem x (Quotient.mk (Rel.setoid α) (x, y)) ↔ Sym2.Mem x (Quotient.mk (Rel.setoid α) (x', y'))
⊢ Quotient.mk (Rel.setoid α) (x, y) = Quotient.mk (Rel.setoid α) (x', y')
[PROOFSTEP]
have hy := h y
[GOAL]
case h.h
α : Type u_1
β : Type u_2
γ : Type u_3
z z' : Sym2 α
h✝² : ∀ (x : α), Sym2.Mem x z ↔ Sym2.Mem x z'
x y : α
h✝¹ : ∀ (x_1 : α), Sym2.Mem x_1 (Quotient.mk (Rel.setoid α) (x, y)) ↔ Sym2.Mem x_1 z'
x' y' : α
h✝ : ∀ (x : α), Sym2.Mem x z ↔ Sym2.Mem x (Quotient.mk (Rel.setoid α) (x', y'))
h : ∀ (x_1 : α), Sym2.Mem x_1 (Quotient.mk (Rel.setoid α) (x, y)) ↔ Sym2.Mem x_1 (Quotient.mk (Rel.setoid α) (x', y'))
hx : Sym2.Mem x (Quotient.mk (Rel.setoid α) (x, y)) ↔ Sym2.Mem x (Quotient.mk (Rel.setoid α) (x', y'))
hy : Sym2.Mem y (Quotient.mk (Rel.setoid α) (x, y)) ↔ Sym2.Mem y (Quotient.mk (Rel.setoid α) (x', y'))
⊢ Quotient.mk (Rel.setoid α) (x, y) = Quotient.mk (Rel.setoid α) (x', y')
[PROOFSTEP]
have hx' := h x'
[GOAL]
case h.h
α : Type u_1
β : Type u_2
γ : Type u_3
z z' : Sym2 α
h✝² : ∀ (x : α), Sym2.Mem x z ↔ Sym2.Mem x z'
x y : α
h✝¹ : ∀ (x_1 : α), Sym2.Mem x_1 (Quotient.mk (Rel.setoid α) (x, y)) ↔ Sym2.Mem x_1 z'
x' y' : α
h✝ : ∀ (x : α), Sym2.Mem x z ↔ Sym2.Mem x (Quotient.mk (Rel.setoid α) (x', y'))
h : ∀ (x_1 : α), Sym2.Mem x_1 (Quotient.mk (Rel.setoid α) (x, y)) ↔ Sym2.Mem x_1 (Quotient.mk (Rel.setoid α) (x', y'))
hx : Sym2.Mem x (Quotient.mk (Rel.setoid α) (x, y)) ↔ Sym2.Mem x (Quotient.mk (Rel.setoid α) (x', y'))
hy : Sym2.Mem y (Quotient.mk (Rel.setoid α) (x, y)) ↔ Sym2.Mem y (Quotient.mk (Rel.setoid α) (x', y'))
hx' : Sym2.Mem x' (Quotient.mk (Rel.setoid α) (x, y)) ↔ Sym2.Mem x' (Quotient.mk (Rel.setoid α) (x', y'))
⊢ Quotient.mk (Rel.setoid α) (x, y) = Quotient.mk (Rel.setoid α) (x', y')
[PROOFSTEP]
have hy' := h y'
[GOAL]
case h.h
α : Type u_1
β : Type u_2
γ : Type u_3
z z' : Sym2 α
h✝² : ∀ (x : α), Sym2.Mem x z ↔ Sym2.Mem x z'
x y : α
h✝¹ : ∀ (x_1 : α), Sym2.Mem x_1 (Quotient.mk (Rel.setoid α) (x, y)) ↔ Sym2.Mem x_1 z'
x' y' : α
h✝ : ∀ (x : α), Sym2.Mem x z ↔ Sym2.Mem x (Quotient.mk (Rel.setoid α) (x', y'))
h : ∀ (x_1 : α), Sym2.Mem x_1 (Quotient.mk (Rel.setoid α) (x, y)) ↔ Sym2.Mem x_1 (Quotient.mk (Rel.setoid α) (x', y'))
hx : Sym2.Mem x (Quotient.mk (Rel.setoid α) (x, y)) ↔ Sym2.Mem x (Quotient.mk (Rel.setoid α) (x', y'))
hy : Sym2.Mem y (Quotient.mk (Rel.setoid α) (x, y)) ↔ Sym2.Mem y (Quotient.mk (Rel.setoid α) (x', y'))
hx' : Sym2.Mem x' (Quotient.mk (Rel.setoid α) (x, y)) ↔ Sym2.Mem x' (Quotient.mk (Rel.setoid α) (x', y'))
hy' : Sym2.Mem y' (Quotient.mk (Rel.setoid α) (x, y)) ↔ Sym2.Mem y' (Quotient.mk (Rel.setoid α) (x', y'))
⊢ Quotient.mk (Rel.setoid α) (x, y) = Quotient.mk (Rel.setoid α) (x', y')
[PROOFSTEP]
simp only [mem_iff', eq_self_iff_true, or_true_iff, iff_true_iff, true_or_iff, true_iff_iff] at hx hy hx' hy'
[GOAL]
case h.h
α : Type u_1
β : Type u_2
γ : Type u_3
z z' : Sym2 α
h✝² : ∀ (x : α), Sym2.Mem x z ↔ Sym2.Mem x z'
x y : α
h✝¹ : ∀ (x_1 : α), Sym2.Mem x_1 (Quotient.mk (Rel.setoid α) (x, y)) ↔ Sym2.Mem x_1 z'
x' y' : α
h✝ : ∀ (x : α), Sym2.Mem x z ↔ Sym2.Mem x (Quotient.mk (Rel.setoid α) (x', y'))
h : ∀ (x_1 : α), Sym2.Mem x_1 (Quotient.mk (Rel.setoid α) (x, y)) ↔ Sym2.Mem x_1 (Quotient.mk (Rel.setoid α) (x', y'))
hx : x = x' ∨ x = y'
hy : y = x' ∨ y = y'
hx' : x' = x ∨ x' = y
hy' : y' = x ∨ y' = y
⊢ Quotient.mk (Rel.setoid α) (x, y) = Quotient.mk (Rel.setoid α) (x', y')
[PROOFSTEP]
aesop
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
e : Sym2 α
⊢ e = Quotient.mk (Rel.setoid α) ((Quotient.out e).fst, (Quotient.out e).snd)
[PROOFSTEP]
rw [Prod.mk.eta, e.out_eq]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
e : Sym2 α
⊢ e = Quotient.mk (Rel.setoid α) ((Quotient.out e).snd, (Quotient.out e).fst)
[PROOFSTEP]
rw [eq_swap, Prod.mk.eta, e.out_eq]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
p : α → Prop
a b : α
⊢ (∀ (c : α), c ∈ Quotient.mk (Rel.setoid α) (a, b) → p c) ↔ p a ∧ p b
[PROOFSTEP]
refine' ⟨fun h => ⟨h _ <| mem_mk''_left _ _, h _ <| mem_mk''_right _ _⟩, fun h c hc => _⟩
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
p : α → Prop
a b : α
h : p a ∧ p b
c : α
hc : c ∈ Quotient.mk (Rel.setoid α) (a, b)
⊢ p c
[PROOFSTEP]
obtain rfl | rfl := Sym2.mem_iff.1 hc
[GOAL]
case inl
α : Type u_1
β : Type u_2
γ : Type u_3
p : α → Prop
b c : α
h : p c ∧ p b
hc : c ∈ Quotient.mk (Rel.setoid α) (c, b)
⊢ p c
[PROOFSTEP]
exact h.1
[GOAL]
case inr
α : Type u_1
β : Type u_2
γ : Type u_3
p : α → Prop
a c : α
h : p a ∧ p c
hc : c ∈ Quotient.mk (Rel.setoid α) (a, c)
⊢ p c
[PROOFSTEP]
exact h.2
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
a : α
z : Sym2 α
h : a ∈ z
⊢ Quotient.mk (Rel.setoid α) (a, Mem.other h) = z
[PROOFSTEP]
erw [← Classical.choose_spec h]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
a : α
z : Sym2 α
h : a ∈ z
⊢ Mem.other h ∈ z
[PROOFSTEP]
convert mem_mk''_right a <| Mem.other h
[GOAL]
case h.e'_5
α : Type u_1
β : Type u_2
γ : Type u_3
a : α
z : Sym2 α
h : a ∈ z
⊢ z = Quotient.mk (Rel.setoid α) (a, Mem.other h)
[PROOFSTEP]
rw [other_spec h]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
x y : α
z : Sym2 α
hne : x ≠ y
⊢ x ∈ z ∧ y ∈ z ↔ z = Quotient.mk (Rel.setoid α) (x, y)
[PROOFSTEP]
constructor
[GOAL]
case mp
α : Type u_1
β : Type u_2
γ : Type u_3
x y : α
z : Sym2 α
hne : x ≠ y
⊢ x ∈ z ∧ y ∈ z → z = Quotient.mk (Rel.setoid α) (x, y)
[PROOFSTEP]
induction' z using Sym2.ind with x' y'
[GOAL]
case mp.h
α : Type u_1
β : Type u_2
γ : Type u_3
x y : α
hne : x ≠ y
x' y' : α
⊢ x ∈ Quotient.mk (Rel.setoid α) (x', y') ∧ y ∈ Quotient.mk (Rel.setoid α) (x', y') →
Quotient.mk (Rel.setoid α) (x', y') = Quotient.mk (Rel.setoid α) (x, y)
[PROOFSTEP]
rw [mem_iff, mem_iff]
[GOAL]
case mp.h
α : Type u_1
β : Type u_2
γ : Type u_3
x y : α
hne : x ≠ y
x' y' : α
⊢ (x = x' ∨ x = y') ∧ (y = x' ∨ y = y') → Quotient.mk (Rel.setoid α) (x', y') = Quotient.mk (Rel.setoid α) (x, y)
[PROOFSTEP]
aesop
[GOAL]
case mpr
α : Type u_1
β : Type u_2
γ : Type u_3
x y : α
z : Sym2 α
hne : x ≠ y
⊢ z = Quotient.mk (Rel.setoid α) (x, y) → x ∈ z ∧ y ∈ z
[PROOFSTEP]
rintro rfl
[GOAL]
case mpr
α : Type u_1
β : Type u_2
γ : Type u_3
x y : α
hne : x ≠ y
⊢ x ∈ Quotient.mk (Rel.setoid α) (x, y) ∧ y ∈ Quotient.mk (Rel.setoid α) (x, y)
[PROOFSTEP]
simp
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
f : α → β
b : β
z : Sym2 α
⊢ b ∈ map f z ↔ ∃ a, a ∈ z ∧ f a = b
[PROOFSTEP]
induction' z using Sym2.ind with x y
[GOAL]
case h
α : Type u_1
β : Type u_2
γ : Type u_3
f : α → β
b : β
x y : α
⊢ b ∈ map f (Quotient.mk (Rel.setoid α) (x, y)) ↔ ∃ a, a ∈ Quotient.mk (Rel.setoid α) (x, y) ∧ f a = b
[PROOFSTEP]
simp only [map, Quotient.map_mk, Prod.map_mk, mem_iff]
[GOAL]
case h
α : Type u_1
β : Type u_2
γ : Type u_3
f : α → β
b : β
x y : α
⊢ b = f x ∨ b = f y ↔ ∃ a, (a = x ∨ a = y) ∧ f a = b
[PROOFSTEP]
constructor
[GOAL]
case h.mp
α : Type u_1
β : Type u_2
γ : Type u_3
f : α → β
b : β
x y : α
⊢ b = f x ∨ b = f y → ∃ a, (a = x ∨ a = y) ∧ f a = b
[PROOFSTEP]
rintro (rfl | rfl)
[GOAL]
case h.mp.inl
α : Type u_1
β : Type u_2
γ : Type u_3
f : α → β
x y : α
⊢ ∃ a, (a = x ∨ a = y) ∧ f a = f x
[PROOFSTEP]
exact ⟨x, by simp⟩
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
f : α → β
x y : α
⊢ (x = x ∨ x = y) ∧ f x = f x
[PROOFSTEP]
simp
[GOAL]
case h.mp.inr
α : Type u_1
β : Type u_2
γ : Type u_3
f : α → β
x y : α
⊢ ∃ a, (a = x ∨ a = y) ∧ f a = f y
[PROOFSTEP]
exact ⟨y, by simp⟩
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
f : α → β
x y : α
⊢ (y = x ∨ y = y) ∧ f y = f y
[PROOFSTEP]
simp
[GOAL]
case h.mpr
α : Type u_1
β : Type u_2
γ : Type u_3
f : α → β
b : β
x y : α
⊢ (∃ a, (a = x ∨ a = y) ∧ f a = b) → b = f x ∨ b = f y
[PROOFSTEP]
rintro ⟨w, rfl | rfl, rfl⟩
[GOAL]
case h.mpr.intro.intro.inl
α : Type u_1
β : Type u_2
γ : Type u_3
f : α → β
y w : α
⊢ f w = f w ∨ f w = f y
[PROOFSTEP]
simp
[GOAL]
case h.mpr.intro.intro.inr
α : Type u_1
β : Type u_2
γ : Type u_3
f : α → β
x w : α
⊢ f w = f x ∨ f w = f w
[PROOFSTEP]
simp
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
f g : α → β
s : Sym2 α
h : ∀ (x : α), x ∈ s → f x = g x
⊢ map f s = map g s
[PROOFSTEP]
ext y
[GOAL]
case h
α : Type u_1
β : Type u_2
γ : Type u_3
f g : α → β
s : Sym2 α
h : ∀ (x : α), x ∈ s → f x = g x
y : β
⊢ y ∈ map f s ↔ y ∈ map g s
[PROOFSTEP]
simp only [mem_map]
[GOAL]
case h
α : Type u_1
β : Type u_2
γ : Type u_3
f g : α → β
s : Sym2 α
h : ∀ (x : α), x ∈ s → f x = g x
y : β
⊢ (∃ a, a ∈ s ∧ f a = y) ↔ ∃ a, a ∈ s ∧ g a = y
[PROOFSTEP]
constructor
[GOAL]
case h.mp
α : Type u_1
β : Type u_2
γ : Type u_3
f g : α → β
s : Sym2 α
h : ∀ (x : α), x ∈ s → f x = g x
y : β
⊢ (∃ a, a ∈ s ∧ f a = y) → ∃ a, a ∈ s ∧ g a = y
[PROOFSTEP]
rintro ⟨w, hw, rfl⟩
[GOAL]
case h.mp.intro.intro
α : Type u_1
β : Type u_2
γ : Type u_3
f g : α → β
s : Sym2 α
h : ∀ (x : α), x ∈ s → f x = g x
w : α
hw : w ∈ s
⊢ ∃ a, a ∈ s ∧ g a = f w
[PROOFSTEP]
exact ⟨w, hw, by simp [hw, h]⟩
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
f g : α → β
s : Sym2 α
h : ∀ (x : α), x ∈ s → f x = g x
w : α
hw : w ∈ s
⊢ g w = f w
[PROOFSTEP]
simp [hw, h]
[GOAL]
case h.mpr
α : Type u_1
β : Type u_2
γ : Type u_3
f g : α → β
s : Sym2 α
h : ∀ (x : α), x ∈ s → f x = g x
y : β
⊢ (∃ a, a ∈ s ∧ g a = y) → ∃ a, a ∈ s ∧ f a = y
[PROOFSTEP]
rintro ⟨w, hw, rfl⟩
[GOAL]
case h.mpr.intro.intro
α : Type u_1
β : Type u_2
γ : Type u_3
f g : α → β
s : Sym2 α
h : ∀ (x : α), x ∈ s → f x = g x
w : α
hw : w ∈ s
⊢ ∃ a, a ∈ s ∧ f a = g w
[PROOFSTEP]
exact ⟨w, hw, by simp [hw, h]⟩
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
f g : α → β
s : Sym2 α
h : ∀ (x : α), x ∈ s → f x = g x
w : α
hw : w ∈ s
⊢ f w = g w
[PROOFSTEP]
simp [hw, h]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
x y : α
h : diag x = diag y
⊢ x = y
[PROOFSTEP]
cases Quotient.exact h
[GOAL]
case refl
α : Type u_1
β : Type u_2
γ : Type u_3
x : α
h : diag x = diag x
⊢ x = x
[PROOFSTEP]
rfl
[GOAL]
case swap
α : Type u_1
β : Type u_2
γ : Type u_3
x : α
h : diag x = diag x
⊢ x = x
[PROOFSTEP]
rfl
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
z : Sym2 α
⊢ IsDiag z → z ∈ Set.range diag
[PROOFSTEP]
induction' z using Sym2.ind with x y
[GOAL]
case h
α : Type u_1
β : Type u_2
γ : Type u_3
x y : α
⊢ IsDiag (Quotient.mk (Rel.setoid α) (x, y)) → Quotient.mk (Rel.setoid α) (x, y) ∈ Set.range diag
[PROOFSTEP]
rintro (rfl : x = y)
[GOAL]
case h
α : Type u_1
β : Type u_2
γ : Type u_3
x : α
⊢ Quotient.mk (Rel.setoid α) (x, x) ∈ Set.range diag
[PROOFSTEP]
exact ⟨_, rfl⟩
[GOAL]
α✝ : Type u_1
β : Type u_2
γ : Type u_3
α : Type u
inst✝ : DecidableEq α
⊢ DecidablePred IsDiag
[PROOFSTEP]
refine' fun z => Quotient.recOnSubsingleton z fun a => _
[GOAL]
α✝ : Type u_1
β : Type u_2
γ : Type u_3
α : Type u
inst✝ : DecidableEq α
z : Sym2 α
a : α × α
⊢ Decidable (IsDiag (Quotient.mk (Rel.setoid α) a))
[PROOFSTEP]
erw [isDiag_iff_proj_eq]
[GOAL]
α✝ : Type u_1
β : Type u_2
γ : Type u_3
α : Type u
inst✝ : DecidableEq α
z : Sym2 α
a : α × α
⊢ Decidable (a.fst = a.snd)
[PROOFSTEP]
infer_instance
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
a : α
z : Sym2 α
hd : ¬IsDiag z
h : a ∈ z
⊢ Mem.other h ≠ a
[PROOFSTEP]
contrapose! hd
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
a : α
z : Sym2 α
h : a ∈ z
hd : Mem.other h = a
⊢ IsDiag z
[PROOFSTEP]
have h' := Sym2.other_spec h
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
a : α
z : Sym2 α
h : a ∈ z
hd : Mem.other h = a
h' : Quotient.mk (Rel.setoid α) (a, Mem.other h) = z
⊢ IsDiag z
[PROOFSTEP]
rw [hd] at h'
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
a : α
z : Sym2 α
h : a ∈ z
hd : Mem.other h = a
h' : Quotient.mk (Rel.setoid α) (a, a) = z
⊢ IsDiag z
[PROOFSTEP]
rw [← h']
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
a : α
z : Sym2 α
h : a ∈ z
hd : Mem.other h = a
h' : Quotient.mk (Rel.setoid α) (a, a) = z
⊢ IsDiag (Quotient.mk (Rel.setoid α) (a, a))
[PROOFSTEP]
simp
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
r : α → α → Prop
⊢ fromRel (_ : ∀ (x y : α), ⊥ x y → ⊥ x y) = ∅
[PROOFSTEP]
apply Set.eq_empty_of_forall_not_mem fun e => _
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
r : α → α → Prop
⊢ ∀ (e : Sym2 α), ¬e ∈ fromRel (_ : ∀ (x y : α), ⊥ x y → ⊥ x y)
[PROOFSTEP]
apply Sym2.ind
[GOAL]
case h
α : Type u_1
β : Type u_2
γ : Type u_3
r : α → α → Prop
⊢ ∀ (x y : α), ¬Quotient.mk (Rel.setoid α) (x, y) ∈ fromRel (_ : ∀ (x y : α), ⊥ x y → ⊥ x y)
[PROOFSTEP]
simp [-Set.bot_eq_empty, Prop.bot_eq_false]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
r : α → α → Prop
⊢ fromRel (_ : ∀ (x y : α), ⊤ x y → ⊤ x y) = Set.univ
[PROOFSTEP]
apply Set.eq_univ_of_forall fun e => _
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
r : α → α → Prop
⊢ ∀ (e : Sym2 α), e ∈ fromRel (_ : ∀ (x y : α), ⊤ x y → ⊤ x y)
[PROOFSTEP]
apply Sym2.ind
[GOAL]
case h
α : Type u_1
β : Type u_2
γ : Type u_3
r : α → α → Prop
⊢ ∀ (x y : α), Quotient.mk (Rel.setoid α) (x, y) ∈ fromRel (_ : ∀ (x y : α), ⊤ x y → ⊤ x y)
[PROOFSTEP]
simp [-Set.top_eq_univ, Prop.top_eq_true]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
r : α → α → Prop
sym : Symmetric r
⊢ Irreflexive r → ∀ {z : Sym2 α}, z ∈ fromRel sym → ¬IsDiag z
[PROOFSTEP]
intro h
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
r : α → α → Prop
sym : Symmetric r
h : Irreflexive r
⊢ ∀ {z : Sym2 α}, z ∈ fromRel sym → ¬IsDiag z
[PROOFSTEP]
apply Sym2.ind
[GOAL]
case h
α : Type u_1
β : Type u_2
γ : Type u_3
r : α → α → Prop
sym : Symmetric r
h : Irreflexive r
⊢ ∀ (x y : α), Quotient.mk (Rel.setoid α) (x, y) ∈ fromRel sym → ¬IsDiag (Quotient.mk (Rel.setoid α) (x, y))
[PROOFSTEP]
aesop
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
r : α → α → Prop
s : Set (Sym2 α)
x y : α
⊢ ToRel s x y → ToRel s y x
[PROOFSTEP]
simp [eq_swap]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
a₁ b₁ a₂ b₂ : α
⊢ [a₁, b₁] ~ [a₂, b₂] → a₁ = a₂ ∧ b₁ = b₂ ∨ a₁ = b₂ ∧ b₁ = a₂
[PROOFSTEP]
simp [← Multiset.coe_eq_coe, ← Multiset.cons_coe, Multiset.cons_eq_cons]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
a₁ b₁ a₂ b₂ : α
⊢ a₁ = a₂ ∧ b₁ = b₂ ∨ ¬a₁ = a₂ ∧ b₁ = a₂ ∧ b₂ = a₁ → a₁ = a₂ ∧ b₁ = b₂ ∨ a₁ = b₂ ∧ b₁ = a₂
[PROOFSTEP]
aesop
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
a₁ b₁ a₂ b₂ : α
h₁ : a₁ = a₂
h₂ : b₁ = b₂
⊢ [a₁, b₁] ~ [a₂, b₂]
[PROOFSTEP]
rw [h₁, h₂]
[GOAL]
[PROOFSTEP]
first
| done
| apply List.Perm.swap'; rfl
[GOAL]
[PROOFSTEP]
done
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
a₁ b₁ a₂ b₂ : α
h₁ : a₁ = b₂
h₂ : b₁ = a₂
⊢ [a₁, b₁] ~ [a₂, b₂]
[PROOFSTEP]
rw [h₁, h₂]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
a₁ b₁ a₂ b₂ : α
h₁ : a₁ = b₂
h₂ : b₁ = a₂
⊢ [b₂, a₂] ~ [a₂, b₂]
[PROOFSTEP]
first
| done
| apply List.Perm.swap'; rfl
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
a₁ b₁ a₂ b₂ : α
h₁ : a₁ = b₂
h₂ : b₁ = a₂
⊢ [b₂, a₂] ~ [a₂, b₂]
[PROOFSTEP]
done
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
a₁ b₁ a₂ b₂ : α
h₁ : a₁ = b₂
h₂ : b₁ = a₂
⊢ [b₂, a₂] ~ [a₂, b₂]
[PROOFSTEP]
apply List.Perm.swap'
[GOAL]
case p
α : Type u_1
β : Type u_2
γ : Type u_3
a₁ b₁ a₂ b₂ : α
h₁ : a₁ = b₂
h₂ : b₁ = a₂
⊢ [] ~ []
[PROOFSTEP]
rfl
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
⊢ ((fun x x_1 => x ≈ x_1) ⇒ fun x x_1 => x ≈ x_1)
(fun x => { val := [x.fst, x.snd], property := (_ : List.length [x.fst, x.snd] = List.length [x.fst, x.snd]) })
fun x => { val := [x.fst, x.snd], property := (_ : List.length [x.fst, x.snd] = List.length [x.fst, x.snd]) }
[PROOFSTEP]
rintro _ _ ⟨_⟩
[GOAL]
case refl
α : Type u_1
β : Type u_2
γ : Type u_3
x✝ y✝ : α
⊢ (fun x => { val := [x.fst, x.snd], property := (_ : List.length [x.fst, x.snd] = List.length [x.fst, x.snd]) })
(x✝, y✝) ≈
(fun x => { val := [x.fst, x.snd], property := (_ : List.length [x.fst, x.snd] = List.length [x.fst, x.snd]) })
(x✝, y✝)
[PROOFSTEP]
constructor
[GOAL]
case refl.a
α : Type u_1
β : Type u_2
γ : Type u_3
x✝ y✝ : α
⊢ [(x✝, y✝).snd] ~ [(x✝, y✝).snd]
[PROOFSTEP]
apply List.Perm.refl
[GOAL]
case swap
α : Type u_1
β : Type u_2
γ : Type u_3
x✝ y✝ : α
⊢ (fun x => { val := [x.fst, x.snd], property := (_ : List.length [x.fst, x.snd] = List.length [x.fst, x.snd]) })
(x✝, y✝) ≈
(fun x => { val := [x.fst, x.snd], property := (_ : List.length [x.fst, x.snd] = List.length [x.fst, x.snd]) })
(y✝, x✝)
[PROOFSTEP]
apply List.Perm.swap'
[GOAL]
case swap.p
α : Type u_1
β : Type u_2
γ : Type u_3
x✝ y✝ : α
⊢ [] ~ []
[PROOFSTEP]
rfl
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
⊢ ((fun x x_1 => x ≈ x_1) ⇒ fun x x_1 => x ≈ x_1) Sym2.fromVector Sym2.fromVector
[PROOFSTEP]
rintro ⟨x, hx⟩ ⟨y, hy⟩ h
[GOAL]
case mk.mk
α : Type u_1
β : Type u_2
γ : Type u_3
x : List α
hx : List.length x = 2
y : List α
hy : List.length y = 2
h : { val := x, property := hx } ≈ { val := y, property := hy }
⊢ Sym2.fromVector { val := x, property := hx } ≈ Sym2.fromVector { val := y, property := hy }
[PROOFSTEP]
cases' x with _ x
[GOAL]
case mk.mk.nil
α : Type u_1
β : Type u_2
γ : Type u_3
y : List α
hy : List.length y = 2
hx : List.length [] = 2
h : { val := [], property := hx } ≈ { val := y, property := hy }
⊢ Sym2.fromVector { val := [], property := hx } ≈ Sym2.fromVector { val := y, property := hy }
[PROOFSTEP]
simp at hx
[GOAL]
case mk.mk.cons
α : Type u_1
β : Type u_2
γ : Type u_3
y : List α
hy : List.length y = 2
head✝ : α
x : List α
hx : List.length (head✝ :: x) = 2
h : { val := head✝ :: x, property := hx } ≈ { val := y, property := hy }
⊢ Sym2.fromVector { val := head✝ :: x, property := hx } ≈ Sym2.fromVector { val := y, property := hy }
[PROOFSTEP]
cases' x with _ x
[GOAL]
case mk.mk.cons.nil
α : Type u_1
β : Type u_2
γ : Type u_3
y : List α
hy : List.length y = 2
head✝ : α
hx : List.length [head✝] = 2
h : { val := [head✝], property := hx } ≈ { val := y, property := hy }
⊢ Sym2.fromVector { val := [head✝], property := hx } ≈ Sym2.fromVector { val := y, property := hy }
[PROOFSTEP]
simp at hx
[GOAL]
case mk.mk.cons.cons
α : Type u_1
β : Type u_2
γ : Type u_3
y : List α
hy : List.length y = 2
head✝¹ head✝ : α
x : List α
hx : List.length (head✝¹ :: head✝ :: x) = 2
h : { val := head✝¹ :: head✝ :: x, property := hx } ≈ { val := y, property := hy }
⊢ Sym2.fromVector { val := head✝¹ :: head✝ :: x, property := hx } ≈ Sym2.fromVector { val := y, property := hy }
[PROOFSTEP]
cases' x with _ x
[GOAL]
case mk.mk.cons.cons.nil
α : Type u_1
β : Type u_2
γ : Type u_3
y : List α
hy : List.length y = 2
head✝¹ head✝ : α
hx : List.length [head✝¹, head✝] = 2
h : { val := [head✝¹, head✝], property := hx } ≈ { val := y, property := hy }
⊢ Sym2.fromVector { val := [head✝¹, head✝], property := hx } ≈ Sym2.fromVector { val := y, property := hy }
case mk.mk.cons.cons.cons
α : Type u_1
β : Type u_2
γ : Type u_3
y : List α
hy : List.length y = 2
head✝² head✝¹ head✝ : α
x : List α
hx : List.length (head✝² :: head✝¹ :: head✝ :: x) = 2
h : { val := head✝² :: head✝¹ :: head✝ :: x, property := hx } ≈ { val := y, property := hy }
⊢ Sym2.fromVector { val := head✝² :: head✝¹ :: head✝ :: x, property := hx } ≈
Sym2.fromVector { val := y, property := hy }
[PROOFSTEP]
swap
[GOAL]
case mk.mk.cons.cons.cons
α : Type u_1
β : Type u_2
γ : Type u_3
y : List α
hy : List.length y = 2
head✝² head✝¹ head✝ : α
x : List α
hx : List.length (head✝² :: head✝¹ :: head✝ :: x) = 2
h : { val := head✝² :: head✝¹ :: head✝ :: x, property := hx } ≈ { val := y, property := hy }
⊢ Sym2.fromVector { val := head✝² :: head✝¹ :: head✝ :: x, property := hx } ≈
Sym2.fromVector { val := y, property := hy }
[PROOFSTEP]
exfalso
[GOAL]
case mk.mk.cons.cons.cons.h
α : Type u_1
β : Type u_2
γ : Type u_3
y : List α
hy : List.length y = 2
head✝² head✝¹ head✝ : α
x : List α
hx : List.length (head✝² :: head✝¹ :: head✝ :: x) = 2
h : { val := head✝² :: head✝¹ :: head✝ :: x, property := hx } ≈ { val := y, property := hy }
⊢ False
[PROOFSTEP]
simp at hx
[GOAL]
case mk.mk.cons.cons.nil
α : Type u_1
β : Type u_2
γ : Type u_3
y : List α
hy : List.length y = 2
head✝¹ head✝ : α
hx : List.length [head✝¹, head✝] = 2
h : { val := [head✝¹, head✝], property := hx } ≈ { val := y, property := hy }
⊢ Sym2.fromVector { val := [head✝¹, head✝], property := hx } ≈ Sym2.fromVector { val := y, property := hy }
[PROOFSTEP]
cases' y with _ y
[GOAL]
case mk.mk.cons.cons.nil.nil
α : Type u_1
β : Type u_2
γ : Type u_3
head✝¹ head✝ : α
hx : List.length [head✝¹, head✝] = 2
hy : List.length [] = 2
h : { val := [head✝¹, head✝], property := hx } ≈ { val := [], property := hy }
⊢ Sym2.fromVector { val := [head✝¹, head✝], property := hx } ≈ Sym2.fromVector { val := [], property := hy }
[PROOFSTEP]
simp at hy
[GOAL]
case mk.mk.cons.cons.nil.cons
α : Type u_1
β : Type u_2
γ : Type u_3
head✝² head✝¹ : α
hx : List.length [head✝², head✝¹] = 2
head✝ : α
y : List α
hy : List.length (head✝ :: y) = 2
h : { val := [head✝², head✝¹], property := hx } ≈ { val := head✝ :: y, property := hy }
⊢ Sym2.fromVector { val := [head✝², head✝¹], property := hx } ≈ Sym2.fromVector { val := head✝ :: y, property := hy }
[PROOFSTEP]
cases' y with _ y
[GOAL]
case mk.mk.cons.cons.nil.cons.nil
α : Type u_1
β : Type u_2
γ : Type u_3
head✝² head✝¹ : α
hx : List.length [head✝², head✝¹] = 2
head✝ : α
hy : List.length [head✝] = 2
h : { val := [head✝², head✝¹], property := hx } ≈ { val := [head✝], property := hy }
⊢ Sym2.fromVector { val := [head✝², head✝¹], property := hx } ≈ Sym2.fromVector { val := [head✝], property := hy }
[PROOFSTEP]
simp at hy
[GOAL]
case mk.mk.cons.cons.nil.cons.cons
α : Type u_1
β : Type u_2
γ : Type u_3
head✝³ head✝² : α
hx : List.length [head✝³, head✝²] = 2
head✝¹ head✝ : α
y : List α
hy : List.length (head✝¹ :: head✝ :: y) = 2
h : { val := [head✝³, head✝²], property := hx } ≈ { val := head✝¹ :: head✝ :: y, property := hy }
⊢ Sym2.fromVector { val := [head✝³, head✝²], property := hx } ≈
Sym2.fromVector { val := head✝¹ :: head✝ :: y, property := hy }
[PROOFSTEP]
cases' y with _ y
[GOAL]
case mk.mk.cons.cons.nil.cons.cons.nil
α : Type u_1
β : Type u_2
γ : Type u_3
head✝³ head✝² : α
hx : List.length [head✝³, head✝²] = 2
head✝¹ head✝ : α
hy : List.length [head✝¹, head✝] = 2
h : { val := [head✝³, head✝²], property := hx } ≈ { val := [head✝¹, head✝], property := hy }
⊢ Sym2.fromVector { val := [head✝³, head✝²], property := hx } ≈
Sym2.fromVector { val := [head✝¹, head✝], property := hy }
case mk.mk.cons.cons.nil.cons.cons.cons
α : Type u_1
β : Type u_2
γ : Type u_3
head✝⁴ head✝³ : α
hx : List.length [head✝⁴, head✝³] = 2
head✝² head✝¹ head✝ : α
y : List α
hy : List.length (head✝² :: head✝¹ :: head✝ :: y) = 2
h : { val := [head✝⁴, head✝³], property := hx } ≈ { val := head✝² :: head✝¹ :: head✝ :: y, property := hy }
⊢ Sym2.fromVector { val := [head✝⁴, head✝³], property := hx } ≈
Sym2.fromVector { val := head✝² :: head✝¹ :: head✝ :: y, property := hy }
[PROOFSTEP]
swap
[GOAL]
case mk.mk.cons.cons.nil.cons.cons.cons
α : Type u_1
β : Type u_2
γ : Type u_3
head✝⁴ head✝³ : α
hx : List.length [head✝⁴, head✝³] = 2
head✝² head✝¹ head✝ : α
y : List α
hy : List.length (head✝² :: head✝¹ :: head✝ :: y) = 2
h : { val := [head✝⁴, head✝³], property := hx } ≈ { val := head✝² :: head✝¹ :: head✝ :: y, property := hy }
⊢ Sym2.fromVector { val := [head✝⁴, head✝³], property := hx } ≈
Sym2.fromVector { val := head✝² :: head✝¹ :: head✝ :: y, property := hy }
[PROOFSTEP]
exfalso
[GOAL]
case mk.mk.cons.cons.nil.cons.cons.cons.h
α : Type u_1
β : Type u_2
γ : Type u_3
head✝⁴ head✝³ : α
hx : List.length [head✝⁴, head✝³] = 2
head✝² head✝¹ head✝ : α
y : List α
hy : List.length (head✝² :: head✝¹ :: head✝ :: y) = 2
h : { val := [head✝⁴, head✝³], property := hx } ≈ { val := head✝² :: head✝¹ :: head✝ :: y, property := hy }
⊢ False
[PROOFSTEP]
simp at hy
[GOAL]
case mk.mk.cons.cons.nil.cons.cons.nil
α : Type u_1
β : Type u_2
γ : Type u_3
head✝³ head✝² : α
hx : List.length [head✝³, head✝²] = 2
head✝¹ head✝ : α
hy : List.length [head✝¹, head✝] = 2
h : { val := [head✝³, head✝²], property := hx } ≈ { val := [head✝¹, head✝], property := hy }
⊢ Sym2.fromVector { val := [head✝³, head✝²], property := hx } ≈
Sym2.fromVector { val := [head✝¹, head✝], property := hy }
[PROOFSTEP]
rcases perm_card_two_iff.mp h with (⟨rfl, rfl⟩ | ⟨rfl, rfl⟩)
[GOAL]
case mk.mk.cons.cons.nil.cons.cons.nil.inl.intro
α : Type u_1
β : Type u_2
γ : Type u_3
head✝¹ head✝ : α
hx hy : List.length [head✝¹, head✝] = 2
h : { val := [head✝¹, head✝], property := hx } ≈ { val := [head✝¹, head✝], property := hy }
⊢ Sym2.fromVector { val := [head✝¹, head✝], property := hx } ≈
Sym2.fromVector { val := [head✝¹, head✝], property := hy }
[PROOFSTEP]
constructor
[GOAL]
case mk.mk.cons.cons.nil.cons.cons.nil.inr.intro
α : Type u_1
β : Type u_2
γ : Type u_3
head✝¹ head✝ : α
hx : List.length [head✝¹, head✝] = 2
hy : List.length [head✝, head✝¹] = 2
h : { val := [head✝¹, head✝], property := hx } ≈ { val := [head✝, head✝¹], property := hy }
⊢ Sym2.fromVector { val := [head✝¹, head✝], property := hx } ≈
Sym2.fromVector { val := [head✝, head✝¹], property := hy }
[PROOFSTEP]
apply Sym2.Rel.swap
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
⊢ LeftInverse
(Quotient.map Sym2.fromVector
(_ :
∀ ⦃a b : Vector α 2⦄,
(fun x x_1 => x ≈ x_1) a b → (fun x x_1 => x ≈ x_1) (Sym2.fromVector a) (Sym2.fromVector b)))
(Quotient.map
(fun x => { val := [x.fst, x.snd], property := (_ : List.length [x.fst, x.snd] = List.length [x.fst, x.snd]) })
(_ :
∀ ⦃a b : α × α⦄,
a ≈ b →
(fun x =>
{ val := [x.fst, x.snd], property := (_ : List.length [x.fst, x.snd] = List.length [x.fst, x.snd]) })
a ≈
(fun x =>
{ val := [x.fst, x.snd], property := (_ : List.length [x.fst, x.snd] = List.length [x.fst, x.snd]) })
b))
[PROOFSTEP]
apply Sym2.ind
[GOAL]
case h
α : Type u_1
β : Type u_2
γ : Type u_3
⊢ ∀ (x y : α),
Quotient.map Sym2.fromVector
(_ :
∀ ⦃a b : Vector α 2⦄,
(fun x x_1 => x ≈ x_1) a b → (fun x x_1 => x ≈ x_1) (Sym2.fromVector a) (Sym2.fromVector b))
(Quotient.map
(fun x =>
{ val := [x.fst, x.snd], property := (_ : List.length [x.fst, x.snd] = List.length [x.fst, x.snd]) })
(_ :
∀ ⦃a b : α × α⦄,
a ≈ b →
(fun x =>
{ val := [x.fst, x.snd],
property := (_ : List.length [x.fst, x.snd] = List.length [x.fst, x.snd]) })
a ≈
(fun x =>
{ val := [x.fst, x.snd],
property := (_ : List.length [x.fst, x.snd] = List.length [x.fst, x.snd]) })
b)
(Quotient.mk (Rel.setoid α) (x, y))) =
Quotient.mk (Rel.setoid α) (x, y)
[PROOFSTEP]
aesop (add norm unfold [Sym2.fromVector])
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
x : Sym' α 2
⊢ Quotient.map
(fun x => { val := [x.fst, x.snd], property := (_ : List.length [x.fst, x.snd] = List.length [x.fst, x.snd]) })
(_ :
∀ ⦃a b : α × α⦄,
a ≈ b →
(fun x =>
{ val := [x.fst, x.snd], property := (_ : List.length [x.fst, x.snd] = List.length [x.fst, x.snd]) })
a ≈
(fun x =>
{ val := [x.fst, x.snd], property := (_ : List.length [x.fst, x.snd] = List.length [x.fst, x.snd]) })
b)
(Quotient.map Sym2.fromVector
(_ :
∀ ⦃a b : Vector α 2⦄,
(fun x x_1 => x ≈ x_1) a b → (fun x x_1 => x ≈ x_1) (Sym2.fromVector a) (Sym2.fromVector b))
x) =
x
[PROOFSTEP]
refine' Quotient.recOnSubsingleton x fun x => _
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
x✝ : Sym' α 2
x : Vector α 2
⊢ Quotient.map
(fun x => { val := [x.fst, x.snd], property := (_ : List.length [x.fst, x.snd] = List.length [x.fst, x.snd]) })
(_ :
∀ ⦃a b : α × α⦄,
a ≈ b →
(fun x =>
{ val := [x.fst, x.snd], property := (_ : List.length [x.fst, x.snd] = List.length [x.fst, x.snd]) })
a ≈
(fun x =>
{ val := [x.fst, x.snd], property := (_ : List.length [x.fst, x.snd] = List.length [x.fst, x.snd]) })
b)
(Quotient.map Sym2.fromVector
(_ :
∀ ⦃a b : Vector α 2⦄,
(fun x x_1 => x ≈ x_1) a b → (fun x x_1 => x ≈ x_1) (Sym2.fromVector a) (Sym2.fromVector b))
(Quotient.mk (Vector.Perm.isSetoid α 2) x)) =
Quotient.mk (Vector.Perm.isSetoid α 2) x
[PROOFSTEP]
cases' x with x hx
[GOAL]
case mk
α : Type u_1
β : Type u_2
γ : Type u_3
x✝ : Sym' α 2
x : List α
hx : List.length x = 2
⊢ Quotient.map
(fun x => { val := [x.fst, x.snd], property := (_ : List.length [x.fst, x.snd] = List.length [x.fst, x.snd]) })
(_ :
∀ ⦃a b : α × α⦄,
a ≈ b →
(fun x =>
{ val := [x.fst, x.snd], property := (_ : List.length [x.fst, x.snd] = List.length [x.fst, x.snd]) })
a ≈
(fun x =>
{ val := [x.fst, x.snd], property := (_ : List.length [x.fst, x.snd] = List.length [x.fst, x.snd]) })
b)
(Quotient.map Sym2.fromVector
(_ :
∀ ⦃a b : Vector α 2⦄,
(fun x x_1 => x ≈ x_1) a b → (fun x x_1 => x ≈ x_1) (Sym2.fromVector a) (Sym2.fromVector b))
(Quotient.mk (Vector.Perm.isSetoid α 2) { val := x, property := hx })) =
Quotient.mk (Vector.Perm.isSetoid α 2) { val := x, property := hx }
[PROOFSTEP]
cases' x with _ x
[GOAL]
case mk.nil
α : Type u_1
β : Type u_2
γ : Type u_3
x : Sym' α 2
hx : List.length [] = 2
⊢ Quotient.map
(fun x => { val := [x.fst, x.snd], property := (_ : List.length [x.fst, x.snd] = List.length [x.fst, x.snd]) })
(_ :
∀ ⦃a b : α × α⦄,
a ≈ b →
(fun x =>
{ val := [x.fst, x.snd], property := (_ : List.length [x.fst, x.snd] = List.length [x.fst, x.snd]) })
a ≈
(fun x =>
{ val := [x.fst, x.snd], property := (_ : List.length [x.fst, x.snd] = List.length [x.fst, x.snd]) })
b)
(Quotient.map Sym2.fromVector
(_ :
∀ ⦃a b : Vector α 2⦄,
(fun x x_1 => x ≈ x_1) a b → (fun x x_1 => x ≈ x_1) (Sym2.fromVector a) (Sym2.fromVector b))
(Quotient.mk (Vector.Perm.isSetoid α 2) { val := [], property := hx })) =
Quotient.mk (Vector.Perm.isSetoid α 2) { val := [], property := hx }
[PROOFSTEP]
simp at hx
[GOAL]
case mk.cons
α : Type u_1
β : Type u_2
γ : Type u_3
x✝ : Sym' α 2
head✝ : α
x : List α
hx : List.length (head✝ :: x) = 2
⊢ Quotient.map
(fun x => { val := [x.fst, x.snd], property := (_ : List.length [x.fst, x.snd] = List.length [x.fst, x.snd]) })
(_ :
∀ ⦃a b : α × α⦄,
a ≈ b →
(fun x =>
{ val := [x.fst, x.snd], property := (_ : List.length [x.fst, x.snd] = List.length [x.fst, x.snd]) })
a ≈
(fun x =>
{ val := [x.fst, x.snd], property := (_ : List.length [x.fst, x.snd] = List.length [x.fst, x.snd]) })
b)
(Quotient.map Sym2.fromVector
(_ :
∀ ⦃a b : Vector α 2⦄,
(fun x x_1 => x ≈ x_1) a b → (fun x x_1 => x ≈ x_1) (Sym2.fromVector a) (Sym2.fromVector b))
(Quotient.mk (Vector.Perm.isSetoid α 2) { val := head✝ :: x, property := hx })) =
Quotient.mk (Vector.Perm.isSetoid α 2) { val := head✝ :: x, property := hx }
[PROOFSTEP]
cases' x with _ x
[GOAL]
case mk.cons.nil
α : Type u_1
β : Type u_2
γ : Type u_3
x : Sym' α 2
head✝ : α
hx : List.length [head✝] = 2
⊢ Quotient.map
(fun x => { val := [x.fst, x.snd], property := (_ : List.length [x.fst, x.snd] = List.length [x.fst, x.snd]) })
(_ :
∀ ⦃a b : α × α⦄,
a ≈ b →
(fun x =>
{ val := [x.fst, x.snd], property := (_ : List.length [x.fst, x.snd] = List.length [x.fst, x.snd]) })
a ≈
(fun x =>
{ val := [x.fst, x.snd], property := (_ : List.length [x.fst, x.snd] = List.length [x.fst, x.snd]) })
b)
(Quotient.map Sym2.fromVector
(_ :
∀ ⦃a b : Vector α 2⦄,
(fun x x_1 => x ≈ x_1) a b → (fun x x_1 => x ≈ x_1) (Sym2.fromVector a) (Sym2.fromVector b))
(Quotient.mk (Vector.Perm.isSetoid α 2) { val := [head✝], property := hx })) =
Quotient.mk (Vector.Perm.isSetoid α 2) { val := [head✝], property := hx }
[PROOFSTEP]
simp at hx
[GOAL]
case mk.cons.cons
α : Type u_1
β : Type u_2
γ : Type u_3
x✝ : Sym' α 2
head✝¹ head✝ : α
x : List α
hx : List.length (head✝¹ :: head✝ :: x) = 2
⊢ Quotient.map
(fun x => { val := [x.fst, x.snd], property := (_ : List.length [x.fst, x.snd] = List.length [x.fst, x.snd]) })
(_ :
∀ ⦃a b : α × α⦄,
a ≈ b →
(fun x =>
{ val := [x.fst, x.snd], property := (_ : List.length [x.fst, x.snd] = List.length [x.fst, x.snd]) })
a ≈
(fun x =>
{ val := [x.fst, x.snd], property := (_ : List.length [x.fst, x.snd] = List.length [x.fst, x.snd]) })
b)
(Quotient.map Sym2.fromVector
(_ :
∀ ⦃a b : Vector α 2⦄,
(fun x x_1 => x ≈ x_1) a b → (fun x x_1 => x ≈ x_1) (Sym2.fromVector a) (Sym2.fromVector b))
(Quotient.mk (Vector.Perm.isSetoid α 2) { val := head✝¹ :: head✝ :: x, property := hx })) =
Quotient.mk (Vector.Perm.isSetoid α 2) { val := head✝¹ :: head✝ :: x, property := hx }
[PROOFSTEP]
cases' x with _ x
[GOAL]
case mk.cons.cons.nil
α : Type u_1
β : Type u_2
γ : Type u_3
x : Sym' α 2
head✝¹ head✝ : α
hx : List.length [head✝¹, head✝] = 2
⊢ Quotient.map
(fun x => { val := [x.fst, x.snd], property := (_ : List.length [x.fst, x.snd] = List.length [x.fst, x.snd]) })
(_ :
∀ ⦃a b : α × α⦄,
a ≈ b →
(fun x =>
{ val := [x.fst, x.snd], property := (_ : List.length [x.fst, x.snd] = List.length [x.fst, x.snd]) })
a ≈
(fun x =>
{ val := [x.fst, x.snd], property := (_ : List.length [x.fst, x.snd] = List.length [x.fst, x.snd]) })
b)
(Quotient.map Sym2.fromVector
(_ :
∀ ⦃a b : Vector α 2⦄,
(fun x x_1 => x ≈ x_1) a b → (fun x x_1 => x ≈ x_1) (Sym2.fromVector a) (Sym2.fromVector b))
(Quotient.mk (Vector.Perm.isSetoid α 2) { val := [head✝¹, head✝], property := hx })) =
Quotient.mk (Vector.Perm.isSetoid α 2) { val := [head✝¹, head✝], property := hx }
case mk.cons.cons.cons
α : Type u_1
β : Type u_2
γ : Type u_3
x✝ : Sym' α 2
head✝² head✝¹ head✝ : α
x : List α
hx : List.length (head✝² :: head✝¹ :: head✝ :: x) = 2
⊢ Quotient.map
(fun x => { val := [x.fst, x.snd], property := (_ : List.length [x.fst, x.snd] = List.length [x.fst, x.snd]) })
(_ :
∀ ⦃a b : α × α⦄,
a ≈ b →
(fun x =>
{ val := [x.fst, x.snd], property := (_ : List.length [x.fst, x.snd] = List.length [x.fst, x.snd]) })
a ≈
(fun x =>
{ val := [x.fst, x.snd], property := (_ : List.length [x.fst, x.snd] = List.length [x.fst, x.snd]) })
b)
(Quotient.map Sym2.fromVector
(_ :
∀ ⦃a b : Vector α 2⦄,
(fun x x_1 => x ≈ x_1) a b → (fun x x_1 => x ≈ x_1) (Sym2.fromVector a) (Sym2.fromVector b))
(Quotient.mk (Vector.Perm.isSetoid α 2) { val := head✝² :: head✝¹ :: head✝ :: x, property := hx })) =
Quotient.mk (Vector.Perm.isSetoid α 2) { val := head✝² :: head✝¹ :: head✝ :: x, property := hx }
[PROOFSTEP]
swap
[GOAL]
case mk.cons.cons.cons
α : Type u_1
β : Type u_2
γ : Type u_3
x✝ : Sym' α 2
head✝² head✝¹ head✝ : α
x : List α
hx : List.length (head✝² :: head✝¹ :: head✝ :: x) = 2
⊢ Quotient.map
(fun x => { val := [x.fst, x.snd], property := (_ : List.length [x.fst, x.snd] = List.length [x.fst, x.snd]) })
(_ :
∀ ⦃a b : α × α⦄,
a ≈ b →
(fun x =>
{ val := [x.fst, x.snd], property := (_ : List.length [x.fst, x.snd] = List.length [x.fst, x.snd]) })
a ≈
(fun x =>
{ val := [x.fst, x.snd], property := (_ : List.length [x.fst, x.snd] = List.length [x.fst, x.snd]) })
b)
(Quotient.map Sym2.fromVector
(_ :
∀ ⦃a b : Vector α 2⦄,
(fun x x_1 => x ≈ x_1) a b → (fun x x_1 => x ≈ x_1) (Sym2.fromVector a) (Sym2.fromVector b))
(Quotient.mk (Vector.Perm.isSetoid α 2) { val := head✝² :: head✝¹ :: head✝ :: x, property := hx })) =
Quotient.mk (Vector.Perm.isSetoid α 2) { val := head✝² :: head✝¹ :: head✝ :: x, property := hx }
[PROOFSTEP]
exfalso
[GOAL]
case mk.cons.cons.cons.h
α : Type u_1
β : Type u_2
γ : Type u_3
x✝ : Sym' α 2
head✝² head✝¹ head✝ : α
x : List α
hx : List.length (head✝² :: head✝¹ :: head✝ :: x) = 2
⊢ False
[PROOFSTEP]
simp at hx
[GOAL]
case mk.cons.cons.nil
α : Type u_1
β : Type u_2
γ : Type u_3
x : Sym' α 2
head✝¹ head✝ : α
hx : List.length [head✝¹, head✝] = 2
⊢ Quotient.map
(fun x => { val := [x.fst, x.snd], property := (_ : List.length [x.fst, x.snd] = List.length [x.fst, x.snd]) })
(_ :
∀ ⦃a b : α × α⦄,
a ≈ b →
(fun x =>
{ val := [x.fst, x.snd], property := (_ : List.length [x.fst, x.snd] = List.length [x.fst, x.snd]) })
a ≈
(fun x =>
{ val := [x.fst, x.snd], property := (_ : List.length [x.fst, x.snd] = List.length [x.fst, x.snd]) })
b)
(Quotient.map Sym2.fromVector
(_ :
∀ ⦃a b : Vector α 2⦄,
(fun x x_1 => x ≈ x_1) a b → (fun x x_1 => x ≈ x_1) (Sym2.fromVector a) (Sym2.fromVector b))
(Quotient.mk (Vector.Perm.isSetoid α 2) { val := [head✝¹, head✝], property := hx })) =
Quotient.mk (Vector.Perm.isSetoid α 2) { val := [head✝¹, head✝], property := hx }
[PROOFSTEP]
rfl
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝ : DecidableEq α
x y : α × α
⊢ relBool x y = true ↔ Rel α x y
[PROOFSTEP]
cases' x with x₁ x₂
[GOAL]
case mk
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝ : DecidableEq α
y : α × α
x₁ x₂ : α
⊢ relBool (x₁, x₂) y = true ↔ Rel α (x₁, x₂) y
[PROOFSTEP]
cases' y with y₁ y₂
[GOAL]
case mk.mk
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝ : DecidableEq α
x₁ x₂ y₁ y₂ : α
⊢ relBool (x₁, x₂) (y₁, y₂) = true ↔ Rel α (x₁, x₂) (y₁, y₂)
[PROOFSTEP]
aesop (rule_sets [Sym2]) (add norm unfold [relBool])
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝ : DecidableEq α
⊢ ∀ (a₁ a₂ b₁ b₂ : α),
(fun x₁ x₂ y₁ y₂ => relBool (x₁, x₂) (y₁, y₂)) a₁ a₂ b₁ b₂ =
(fun x₁ x₂ y₁ y₂ => relBool (x₁, x₂) (y₁, y₂)) a₂ a₁ b₁ b₂ ∧
(fun x₁ x₂ y₁ y₂ => relBool (x₁, x₂) (y₁, y₂)) a₁ a₂ b₁ b₂ =
(fun x₁ x₂ y₁ y₂ => relBool (x₁, x₂) (y₁, y₂)) a₁ a₂ b₂ b₁
[PROOFSTEP]
aesop (add norm unfold [relBool])
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝ : DecidableEq α
a b : Sym2 α
⊢ ∀ (a₁ a₂ b₁ b₂ : α),
eqBool (Quotient.mk (Rel.setoid α) (a₁, a₂)) (Quotient.mk (Rel.setoid α) (b₁, b₂)) = true ↔
Quotient.mk (Rel.setoid α) (a₁, a₂) = Quotient.mk (Rel.setoid α) (b₁, b₂)
[PROOFSTEP]
aesop (rule_sets [Sym2])
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝ : DecidableEq α
a : α
z : Sym2 α
h : a ∈ z
⊢ ∀ (a_1 b : α × α) (p : a_1 ≈ b),
Eq.ndrec (motive := fun x => a ∈ x → α) (fun x => Sym2.pairOther a a_1)
(_ : Quotient.mk (Rel.setoid α) a_1 = Quotient.mk (Rel.setoid α) b) =
fun x => Sym2.pairOther a b
[PROOFSTEP]
clear h z
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝ : DecidableEq α
a : α
⊢ ∀ (a_1 b : α × α) (p : a_1 ≈ b),
Eq.ndrec (motive := fun x => a ∈ x → α) (fun x => Sym2.pairOther a a_1)
(_ : Quotient.mk (Rel.setoid α) a_1 = Quotient.mk (Rel.setoid α) b) =
fun x => Sym2.pairOther a b
[PROOFSTEP]
intro x y h
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝ : DecidableEq α
a : α
x y : α × α
h : x ≈ y
⊢ Eq.ndrec (motive := fun x => a ∈ x → α) (fun x_1 => Sym2.pairOther a x)
(_ : Quotient.mk (Rel.setoid α) x = Quotient.mk (Rel.setoid α) y) =
fun x => Sym2.pairOther a y
[PROOFSTEP]
ext hy
[GOAL]
case h
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝ : DecidableEq α
a : α
x y : α × α
h : x ≈ y
hy : a ∈ Quotient.mk (Rel.setoid α) y
⊢ Eq.ndrec (motive := fun x => a ∈ x → α) (fun x_1 => Sym2.pairOther a x)
(_ : Quotient.mk (Rel.setoid α) x = Quotient.mk (Rel.setoid α) y) hy =
Sym2.pairOther a y
[PROOFSTEP]
convert_to Sym2.pairOther a x = _
[GOAL]
case h.e'_2
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝ : DecidableEq α
a : α
x y : α × α
h : x ≈ y
hy : a ∈ Quotient.mk (Rel.setoid α) y
⊢ Eq.ndrec (motive := fun x => a ∈ x → α) (fun x_1 => Sym2.pairOther a x)
(_ : Quotient.mk (Rel.setoid α) x = Quotient.mk (Rel.setoid α) y) hy =
Sym2.pairOther a x
[PROOFSTEP]
have :
∀ {c e h},
@Eq.ndrec (Quotient (Rel.setoid α)) (Quotient.mk (Rel.setoid α) x) (fun x => a ∈ x → α)
(fun _ => Sym2.pairOther a x) c e h =
Sym2.pairOther a x :=
by intro _ e _; subst e; rfl
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝ : DecidableEq α
a : α
x y : α × α
h : x ≈ y
hy : a ∈ Quotient.mk (Rel.setoid α) y
⊢ ∀ {c : Quotient (Rel.setoid α)} {e : Quotient.mk (Rel.setoid α) x = c} {h : a ∈ c},
Eq.ndrec (motive := fun x => a ∈ x → α) (fun x_1 => Sym2.pairOther a x) e h = Sym2.pairOther a x
[PROOFSTEP]
intro _ e _
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝ : DecidableEq α
a : α
x y : α × α
h : x ≈ y
hy : a ∈ Quotient.mk (Rel.setoid α) y
c✝ : Quotient (Rel.setoid α)
e : Quotient.mk (Rel.setoid α) x = c✝
h✝ : a ∈ c✝
⊢ Eq.ndrec (motive := fun x => a ∈ x → α) (fun x_1 => Sym2.pairOther a x) e h✝ = Sym2.pairOther a x
[PROOFSTEP]
subst e
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝ : DecidableEq α
a : α
x y : α × α
h : x ≈ y
hy : a ∈ Quotient.mk (Rel.setoid α) y
h✝ : a ∈ Quotient.mk (Rel.setoid α) x
⊢ Eq.ndrec (motive := fun x => a ∈ x → α) (fun x_1 => Sym2.pairOther a x)
(_ : Quotient.mk (Rel.setoid α) x = Quotient.mk (Rel.setoid α) x) h✝ =
Sym2.pairOther a x
[PROOFSTEP]
rfl
[GOAL]
case h.e'_2
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝ : DecidableEq α
a : α
x y : α × α
h : x ≈ y
hy : a ∈ Quotient.mk (Rel.setoid α) y
this :
∀ {c : Quotient (Rel.setoid α)} {e : Quotient.mk (Rel.setoid α) x = c} {h : a ∈ c},
Eq.ndrec (motive := fun x => a ∈ x → α) (fun x_1 => Sym2.pairOther a x) e h = Sym2.pairOther a x
⊢ Eq.ndrec (motive := fun x => a ∈ x → α) (fun x_1 => Sym2.pairOther a x)
(_ : Quotient.mk (Rel.setoid α) x = Quotient.mk (Rel.setoid α) y) hy =
Sym2.pairOther a x
[PROOFSTEP]
apply this
[GOAL]
case h.convert_2
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝ : DecidableEq α
a : α
x y : α × α
h : x ≈ y
hy : a ∈ Quotient.mk (Rel.setoid α) y
⊢ Sym2.pairOther a x = Sym2.pairOther a y
[PROOFSTEP]
rw [mem_iff] at hy
[GOAL]
case h.convert_2
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝ : DecidableEq α
a : α
x y : α × α
h : x ≈ y
hy : a = y.fst ∨ a = y.snd
⊢ Sym2.pairOther a x = Sym2.pairOther a y
[PROOFSTEP]
have : relBool x y := (relBool_spec x y).mpr h
[GOAL]
case h.convert_2
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝ : DecidableEq α
a : α
x y : α × α
h : x ≈ y
hy : a = y.fst ∨ a = y.snd
this : relBool x y = true
⊢ Sym2.pairOther a x = Sym2.pairOther a y
[PROOFSTEP]
aesop (add norm unfold [pairOther, relBool])
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝ : DecidableEq α
a : α
z : Sym2 α
h : a ∈ z
⊢ Quotient.mk (Rel.setoid α) (a, Mem.other' h) = z
[PROOFSTEP]
induction z using Sym2.ind
[GOAL]
case h
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝ : DecidableEq α
a x✝ y✝ : α
h : a ∈ Quotient.mk (Rel.setoid α) (x✝, y✝)
⊢ Quotient.mk (Rel.setoid α) (a, Mem.other' h) = Quotient.mk (Rel.setoid α) (x✝, y✝)
[PROOFSTEP]
have h' := mem_iff.mp h
[GOAL]
case h
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝ : DecidableEq α
a x✝ y✝ : α
h : a ∈ Quotient.mk (Rel.setoid α) (x✝, y✝)
h' : a = x✝ ∨ a = y✝
⊢ Quotient.mk (Rel.setoid α) (a, Mem.other' h) = Quotient.mk (Rel.setoid α) (x✝, y✝)
[PROOFSTEP]
aesop (add norm unfold [Quotient.rec, Quot.rec]) (rule_sets [Sym2])
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝ : DecidableEq α
a : α
z : Sym2 α
h : a ∈ z
⊢ Mem.other h = Mem.other' h
[PROOFSTEP]
rw [← congr_right, other_spec' h, other_spec]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝ : DecidableEq α
a : α
z : Sym2 α
h : a ∈ z
⊢ Mem.other' h ∈ z
[PROOFSTEP]
rw [← other_eq_other']
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝ : DecidableEq α
a : α
z : Sym2 α
h : a ∈ z
⊢ Mem.other h ∈ z
[PROOFSTEP]
exact other_mem h
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝ : DecidableEq α
a : α
z : Sym2 α
ha : a ∈ z
hb : Mem.other' ha ∈ z
⊢ Mem.other' hb = a
[PROOFSTEP]
induction z using Sym2.ind
[GOAL]
case h
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝ : DecidableEq α
a x✝ y✝ : α
ha : a ∈ Quotient.mk (Rel.setoid α) (x✝, y✝)
hb : Mem.other' ha ∈ Quotient.mk (Rel.setoid α) (x✝, y✝)
⊢ Mem.other' hb = a
[PROOFSTEP]
aesop (rule_sets [Sym2]) (add norm unfold [Quotient.rec, Quot.rec])
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
a : α
z : Sym2 α
ha : a ∈ z
hb : Mem.other ha ∈ z
⊢ Mem.other hb = a
[PROOFSTEP]
classical
rw [other_eq_other'] at hb ⊢
convert other_invol' ha hb using 2
apply other_eq_other'
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
a : α
z : Sym2 α
ha : a ∈ z
hb : Mem.other ha ∈ z
⊢ Mem.other hb = a
[PROOFSTEP]
rw [other_eq_other'] at hb ⊢
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
a : α
z : Sym2 α
ha : a ∈ z
hb✝ : Mem.other ha ∈ z
hb : Mem.other' ha ∈ z
⊢ Mem.other' hb✝ = a
[PROOFSTEP]
convert other_invol' ha hb using 2
[GOAL]
case h.e'_2.h.e'_3
α : Type u_1
β : Type u_2
γ : Type u_3
a : α
z : Sym2 α
ha : a ∈ z
hb✝ : Mem.other ha ∈ z
hb : Mem.other' ha ∈ z
⊢ Mem.other ha = Mem.other' ha
[PROOFSTEP]
apply other_eq_other'
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝ : DecidableEq α
s : Finset α
⊢ filter IsDiag (image Quotient.mk'' (s ×ˢ s)) = image Quotient.mk'' (Finset.diag s)
[PROOFSTEP]
ext z
[GOAL]
case a
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝ : DecidableEq α
s : Finset α
z : Sym2 α
⊢ z ∈ filter IsDiag (image Quotient.mk'' (s ×ˢ s)) ↔ z ∈ image Quotient.mk'' (Finset.diag s)
[PROOFSTEP]
induction' z using Sym2.inductionOn
[GOAL]
case a.hf
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝ : DecidableEq α
s : Finset α
x✝ y✝ : α
⊢ Quotient.mk (Rel.setoid α) (x✝, y✝) ∈ filter IsDiag (image Quotient.mk'' (s ×ˢ s)) ↔
Quotient.mk (Rel.setoid α) (x✝, y✝) ∈ image Quotient.mk'' (Finset.diag s)
[PROOFSTEP]
simp only [mem_image, mem_diag, exists_prop, mem_filter, Prod.exists, mem_product]
[GOAL]
case a.hf
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝ : DecidableEq α
s : Finset α
x✝ y✝ : α
⊢ (∃ a b, (a ∈ s ∧ b ∈ s) ∧ Quotient.mk'' (a, b) = Quotient.mk (Rel.setoid α) (x✝, y✝)) ∧
IsDiag (Quotient.mk (Rel.setoid α) (x✝, y✝)) ↔
∃ a b, (a ∈ s ∧ a = b) ∧ Quotient.mk'' (a, b) = Quotient.mk (Rel.setoid α) (x✝, y✝)
[PROOFSTEP]
constructor
[GOAL]
case a.hf.mp
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝ : DecidableEq α
s : Finset α
x✝ y✝ : α
⊢ (∃ a b, (a ∈ s ∧ b ∈ s) ∧ Quotient.mk'' (a, b) = Quotient.mk (Rel.setoid α) (x✝, y✝)) ∧
IsDiag (Quotient.mk (Rel.setoid α) (x✝, y✝)) →
∃ a b, (a ∈ s ∧ a = b) ∧ Quotient.mk'' (a, b) = Quotient.mk (Rel.setoid α) (x✝, y✝)
[PROOFSTEP]
rintro ⟨⟨a, b, ⟨ha, hb⟩, (h : Quotient.mk _ _ = _)⟩, hab⟩
[GOAL]
case a.hf.mp.intro.intro.intro.intro.intro
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝ : DecidableEq α
s : Finset α
x✝ y✝ : α
hab : IsDiag (Quotient.mk (Rel.setoid α) (x✝, y✝))
a b : α
h : Quotient.mk (Rel.setoid α) (a, b) = Quotient.mk (Rel.setoid α) (x✝, y✝)
ha : a ∈ s
hb : b ∈ s
⊢ ∃ a b, (a ∈ s ∧ a = b) ∧ Quotient.mk'' (a, b) = Quotient.mk (Rel.setoid α) (x✝, y✝)
[PROOFSTEP]
rw [← h, Sym2.mk''_isDiag_iff] at hab
[GOAL]
case a.hf.mp.intro.intro.intro.intro.intro
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝ : DecidableEq α
s : Finset α
x✝ y✝ a b : α
hab : a = b
h : Quotient.mk (Rel.setoid α) (a, b) = Quotient.mk (Rel.setoid α) (x✝, y✝)
ha : a ∈ s
hb : b ∈ s
⊢ ∃ a b, (a ∈ s ∧ a = b) ∧ Quotient.mk'' (a, b) = Quotient.mk (Rel.setoid α) (x✝, y✝)
[PROOFSTEP]
exact ⟨a, b, ⟨ha, hab⟩, h⟩
[GOAL]
case a.hf.mpr
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝ : DecidableEq α
s : Finset α
x✝ y✝ : α
⊢ (∃ a b, (a ∈ s ∧ a = b) ∧ Quotient.mk'' (a, b) = Quotient.mk (Rel.setoid α) (x✝, y✝)) →
(∃ a b, (a ∈ s ∧ b ∈ s) ∧ Quotient.mk'' (a, b) = Quotient.mk (Rel.setoid α) (x✝, y✝)) ∧
IsDiag (Quotient.mk (Rel.setoid α) (x✝, y✝))
[PROOFSTEP]
rintro ⟨a, b, ⟨ha, rfl⟩, h⟩
[GOAL]
case a.hf.mpr.intro.intro.intro.intro
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝ : DecidableEq α
s : Finset α
x✝ y✝ a : α
ha : a ∈ s
h : Quotient.mk'' (a, a) = Quotient.mk (Rel.setoid α) (x✝, y✝)
⊢ (∃ a b, (a ∈ s ∧ b ∈ s) ∧ Quotient.mk'' (a, b) = Quotient.mk (Rel.setoid α) (x✝, y✝)) ∧
IsDiag (Quotient.mk (Rel.setoid α) (x✝, y✝))
[PROOFSTEP]
rw [← h]
[GOAL]
case a.hf.mpr.intro.intro.intro.intro
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝ : DecidableEq α
s : Finset α
x✝ y✝ a : α
ha : a ∈ s
h : Quotient.mk'' (a, a) = Quotient.mk (Rel.setoid α) (x✝, y✝)
⊢ (∃ a_1 b, (a_1 ∈ s ∧ b ∈ s) ∧ Quotient.mk'' (a_1, b) = Quotient.mk'' (a, a)) ∧ IsDiag (Quotient.mk'' (a, a))
[PROOFSTEP]
exact ⟨⟨a, a, ⟨ha, ha⟩, rfl⟩, rfl⟩
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝ : DecidableEq α
s : Finset α
⊢ filter (fun a => ¬IsDiag a) (image Quotient.mk'' (s ×ˢ s)) = image Quotient.mk'' (offDiag s)
[PROOFSTEP]
ext z
[GOAL]
case a
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝ : DecidableEq α
s : Finset α
z : Sym2 α
⊢ z ∈ filter (fun a => ¬IsDiag a) (image Quotient.mk'' (s ×ˢ s)) ↔ z ∈ image Quotient.mk'' (offDiag s)
[PROOFSTEP]
induction z using Sym2.inductionOn
[GOAL]
case a.hf
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝ : DecidableEq α
s : Finset α
x✝ y✝ : α
⊢ Quotient.mk (Rel.setoid α) (x✝, y✝) ∈ filter (fun a => ¬IsDiag a) (image Quotient.mk'' (s ×ˢ s)) ↔
Quotient.mk (Rel.setoid α) (x✝, y✝) ∈ image Quotient.mk'' (offDiag s)
[PROOFSTEP]
simp only [mem_image, mem_offDiag, mem_filter, Prod.exists, mem_product]
[GOAL]
case a.hf
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝ : DecidableEq α
s : Finset α
x✝ y✝ : α
⊢ (∃ a b, (a ∈ s ∧ b ∈ s) ∧ Quotient.mk'' (a, b) = Quotient.mk (Rel.setoid α) (x✝, y✝)) ∧
¬IsDiag (Quotient.mk (Rel.setoid α) (x✝, y✝)) ↔
∃ a b, (a ∈ s ∧ b ∈ s ∧ a ≠ b) ∧ Quotient.mk'' (a, b) = Quotient.mk (Rel.setoid α) (x✝, y✝)
[PROOFSTEP]
constructor
[GOAL]
case a.hf.mp
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝ : DecidableEq α
s : Finset α
x✝ y✝ : α
⊢ (∃ a b, (a ∈ s ∧ b ∈ s) ∧ Quotient.mk'' (a, b) = Quotient.mk (Rel.setoid α) (x✝, y✝)) ∧
¬IsDiag (Quotient.mk (Rel.setoid α) (x✝, y✝)) →
∃ a b, (a ∈ s ∧ b ∈ s ∧ a ≠ b) ∧ Quotient.mk'' (a, b) = Quotient.mk (Rel.setoid α) (x✝, y✝)
[PROOFSTEP]
rintro ⟨⟨a, b, ⟨ha, hb⟩, (h : Quotient.mk _ _ = _)⟩, hab⟩
[GOAL]
case a.hf.mp.intro.intro.intro.intro.intro
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝ : DecidableEq α
s : Finset α
x✝ y✝ : α
hab : ¬IsDiag (Quotient.mk (Rel.setoid α) (x✝, y✝))
a b : α
h : Quotient.mk (Rel.setoid α) (a, b) = Quotient.mk (Rel.setoid α) (x✝, y✝)
ha : a ∈ s
hb : b ∈ s
⊢ ∃ a b, (a ∈ s ∧ b ∈ s ∧ a ≠ b) ∧ Quotient.mk'' (a, b) = Quotient.mk (Rel.setoid α) (x✝, y✝)
[PROOFSTEP]
rw [← h, Sym2.mk''_isDiag_iff] at hab
[GOAL]
case a.hf.mp.intro.intro.intro.intro.intro
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝ : DecidableEq α
s : Finset α
x✝ y✝ a b : α
hab : ¬a = b
h : Quotient.mk (Rel.setoid α) (a, b) = Quotient.mk (Rel.setoid α) (x✝, y✝)
ha : a ∈ s
hb : b ∈ s
⊢ ∃ a b, (a ∈ s ∧ b ∈ s ∧ a ≠ b) ∧ Quotient.mk'' (a, b) = Quotient.mk (Rel.setoid α) (x✝, y✝)
[PROOFSTEP]
exact ⟨a, b, ⟨ha, hb, hab⟩, h⟩
[GOAL]
case a.hf.mpr
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝ : DecidableEq α
s : Finset α
x✝ y✝ : α
⊢ (∃ a b, (a ∈ s ∧ b ∈ s ∧ a ≠ b) ∧ Quotient.mk'' (a, b) = Quotient.mk (Rel.setoid α) (x✝, y✝)) →
(∃ a b, (a ∈ s ∧ b ∈ s) ∧ Quotient.mk'' (a, b) = Quotient.mk (Rel.setoid α) (x✝, y✝)) ∧
¬IsDiag (Quotient.mk (Rel.setoid α) (x✝, y✝))
[PROOFSTEP]
rintro ⟨a, b, ⟨ha, hb, hab⟩, (h : Quotient.mk _ _ = _)⟩
[GOAL]
case a.hf.mpr.intro.intro.intro.intro.intro
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝ : DecidableEq α
s : Finset α
x✝ y✝ a b : α
h : Quotient.mk (Rel.setoid α) (a, b) = Quotient.mk (Rel.setoid α) (x✝, y✝)
ha : a ∈ s
hb : b ∈ s
hab : a ≠ b
⊢ (∃ a b, (a ∈ s ∧ b ∈ s) ∧ Quotient.mk'' (a, b) = Quotient.mk (Rel.setoid α) (x✝, y✝)) ∧
¬IsDiag (Quotient.mk (Rel.setoid α) (x✝, y✝))
[PROOFSTEP]
rw [Ne.def, ← Sym2.mk''_isDiag_iff, h] at hab
[GOAL]
case a.hf.mpr.intro.intro.intro.intro.intro
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝ : DecidableEq α
s : Finset α
x✝ y✝ a b : α
h : Quotient.mk (Rel.setoid α) (a, b) = Quotient.mk (Rel.setoid α) (x✝, y✝)
ha : a ∈ s
hb : b ∈ s
hab : ¬IsDiag (Quotient.mk (Rel.setoid α) (x✝, y✝))
⊢ (∃ a b, (a ∈ s ∧ b ∈ s) ∧ Quotient.mk'' (a, b) = Quotient.mk (Rel.setoid α) (x✝, y✝)) ∧
¬IsDiag (Quotient.mk (Rel.setoid α) (x✝, y✝))
[PROOFSTEP]
exact ⟨⟨a, b, ⟨ha, hb⟩, h⟩, hab⟩
|
If $f$ is a continuous function from a set $S$ to a set $T$, and $f$ maps open sets in $S$ to open sets in $T$, and the preimage of any point in $T$ is connected, then the preimage of any connected set in $T$ is connected. |
module Test.Transducers.Core
import Transducers
import Test.Transducers.Utils
--------------------------------------------------------------------------------
-- Unit tests
--------------------------------------------------------------------------------
should_map : List Int -> Test
should_map input =
assertEq
(foldl (+) 0 (map (*2) input))
(transduce (mapping (*2)) (+) 0 input)
should_follow_map_law : Test
should_follow_map_law =
assertEq
(transduce (mapping length . mapping (*2)) (+) 0 ["abc", "", "cdef", "g"])
(transduce (mapping ((*2) . length)) (+) 0 ["abc", "", "cdef", "g"])
should_filter : List Int -> Test
should_filter input =
assertEq
(foldl (+) 0 (filter odd input))
(transduce (filtering odd) (+) 0 input)
should_concat_map : List Int -> Test
should_concat_map input =
assertEq
(foldl (+) 0 (concatMap twice input))
(transduce (catMapping twice) (+) 0 input)
should_reduce_terminal : List Int -> Test
should_reduce_terminal input =
assertEq
(foldl (+) 0 (filter odd (map (*2) input)))
(reduce (mapping (*2) . filtering odd $ terminal (+)) 0 input)
should_take : List Int -> Test
should_take input =
assertEq
(foldl (+) 0 (take 10 input))
(transduce (taking 10) (+) 0 input)
should_take_while : Test
should_take_while =
assertEq 55 $ transduce (takingWhile (<= 10)) (+) 0 [1..100]
should_drop : List Int -> Test
should_drop input =
assertEq
(foldl (+) 0 (drop 10 input))
(transduce (dropping 10) (+) 0 input)
should_pipe_from_left_to_right : List Int -> Test
should_pipe_from_left_to_right input =
assertEq
(foldl (+) 0 (map (+1) (concatMap twice (filter odd input))))
(transduce (filtering odd . catMapping twice . mapping (+1)) (+) 0 input)
should_allow_pure_xf_composition : Test
should_allow_pure_xf_composition =
let xf = taking 10 . filtering odd . mapping (*2)
in do
assertEq 50 (transduce xf (+) 0 [1..100])
assertEq 30240 (transduce xf (*) 1 [1..100])
assertEq 0 (transduce (mapping length . mapping fromNat . xf) (+) 0 (replicate 100 "ab"))
should_index : Test
should_index = do
let xf = indexing . mapping (\(idx, s) => show idx ++ ": " ++ s) . interspersing ", "
assertEq "0: Zero, 1: One, 2: Two" (transduce xf (++) "" ["Zero", "One", "Two"])
should_chunk_of : Test
should_chunk_of = do
let xf = chunksOf 4 . mapping pack . mapping (++ " ")
assertEq "abcd efgh ijkl " (transduce xf (++) "" ['a'..'l'])
assertEq "abcd efgh ij " (transduce xf (++) "" ['a'..'j'])
should_intersperse : Test
should_intersperse = do
let cs = ["a", "list", "of", "words"]
assertEq "a, list, of, words" (transduce (interspersing ", ") (++) "" cs)
should_deduplicate : Test
should_deduplicate = do
assertEq "abcdcbad" $ transduce (mapping singleton . deduplicate) (++) "" (unpack "abbcddccbaad")
should_group_by : Test
should_group_by =
assertEq ["aa", "b", "ccc", "b"] $
reverse $ into [] (groupingBy (==) . mapping pack) (unpack "aabcccb")
should_group_by_with_custom_predicate : Test
should_group_by_with_custom_predicate =
assertEq 3 $ longestIncreasingSeq [1, 2, 1, 3, 4, 1, 5, 3, 4]
where
longestIncreasingSeq = transduce (groupingBy (<) . mapping length . mapping fromNat) max 0
should_support_isomorphisms : Test
should_support_isomorphisms =
assertEq "ei" $
into "" (under (MkIso ord chr) (mapping (+1)) . filtering vowel) (unpack "abcdefgh")
export
run_tests : IO ()
run_tests =
runTestSuite [
should_map [1..100],
should_follow_map_law,
should_filter [1..100],
should_concat_map [1..100],
should_reduce_terminal [1..100],
should_take [1..100],
should_take_while,
should_drop [1..100],
should_pipe_from_left_to_right [1..100],
should_allow_pure_xf_composition,
should_index,
should_chunk_of,
should_intersperse,
should_deduplicate,
should_group_by,
should_group_by_with_custom_predicate,
should_support_isomorphisms]
|
(** Algebra1
author Hidetsune Kobayashi
Department of Mathematics
Nihon University
[email protected]
May 3, 2004.
April 6, 2007 (revised)
chapter 0. Preliminaries
section 1. Natural numbers and Integers
section 2. Sets
section 3. Functions
section 4. Nsets, set of natural numbers
section 4'. Lower bounded set of integers
section 5. Augmented integer: integer and \<infinity> -\<infinity>
section 6. amin, amax
section 7. cardinality of sets
Note. Some lemmas in this chapter are already formalized by L. Paulson
and others.
chapter 1. Ordered Set
section 1. Basic Concepts of Ordered Sets
**)
theory Algebra1
imports Main "HOL-Library.FuncSet"
begin
chapter "Preliminaries"
text\<open>Some of the lemmas of this section are proved in src/HOL/Integ
of Isabelle version 2003.\<close>
section "Lemmas for logical manipulation"
lemma True_then:"True \<longrightarrow> P \<Longrightarrow> P"
by simp
lemma ex_conjI:"\<lbrakk>P c; Q c\<rbrakk> \<Longrightarrow> \<exists>c. P c \<and> Q c"
by blast
lemma forall_spec:"\<lbrakk> \<forall>b. P b \<longrightarrow> Q b; P a\<rbrakk> \<Longrightarrow> Q a"
by simp
lemma a_b_exchange:"\<lbrakk>a; a = b\<rbrakk> \<Longrightarrow> b"
by simp
lemma eq_prop:"\<lbrakk> P; P = Q\<rbrakk> \<Longrightarrow> Q"
by simp
lemma forball_contra:"\<lbrakk>\<forall>y\<in>A. P x y \<longrightarrow> \<not> Q y; \<forall>y\<in>A. Q y \<or> R y\<rbrakk> \<Longrightarrow>
\<forall>y\<in>A. (\<not> P x y) \<or> R y"
by blast
lemma forball_contra1:"\<lbrakk>\<forall>y\<in>A. P x y \<longrightarrow> Q y; \<forall>y\<in>A. \<not> Q y\<rbrakk> \<Longrightarrow> \<forall>y\<in>A. \<not> P x y"
by blast
section "Natural numbers and Integers"
text\<open>Elementary properties of natural numbers and integers\<close>
lemma nat_nonzero_pos:"(a::nat) \<noteq> 0 \<Longrightarrow> 0 < a"
by simp
lemma add_both:"(a::nat) = b \<Longrightarrow> a + c = b + c"
by simp
lemma add_bothl:"a = b \<Longrightarrow> c + a = c + b"
by simp
lemma diff_Suc:"(n::nat) \<le> m \<Longrightarrow> m - n + Suc 0 = Suc m - n"
by arith
lemma le_convert:"\<lbrakk>a = b; a \<le> c\<rbrakk> \<Longrightarrow> b \<le> c"
by simp
lemma ge_convert:"\<lbrakk>a = b; c \<le> a\<rbrakk> \<Longrightarrow> c \<le> b"
by simp
lemma less_convert:"\<lbrakk> a = b; c < b \<rbrakk> \<Longrightarrow> c < a"
by auto
lemma ineq_conv1:"\<lbrakk>a = b; a < c\<rbrakk> \<Longrightarrow> b < c"
by simp
lemma diff_Suc_pos:"0 < a - Suc 0 \<Longrightarrow> 0 < a"
by simp
lemma minus_SucSuc:"a - Suc (Suc 0) = a - Suc 0 - Suc 0"
by simp
lemma Suc_Suc_Tr:"Suc (Suc 0) \<le> n \<Longrightarrow> Suc (n - Suc (Suc 0)) = n - Suc 0"
by arith
lemma Suc_Suc_less:"Suc 0 < a \<Longrightarrow> Suc (a - Suc (Suc 0)) < a"
by arith
lemma diff_zero_eq:"n = (0::nat) \<Longrightarrow> m = m - n"
by simp
lemma less_le_diff:"x < n \<Longrightarrow> x \<le> n - Suc 0"
by arith
lemma le_pre_le:"x \<le> n - Suc 0 \<Longrightarrow> x \<le> n"
by arith
lemma nat_not_less:"\<not> (m::nat) < n \<Longrightarrow> n \<le> m"
by (rule contrapos_pp, simp+)
lemma less_neq:"n < (m::nat) \<Longrightarrow> n \<noteq> m"
by (simp add:nat_neq_iff[THEN sym, of "n" "m"])
lemma less_le_diff1:"n \<noteq> 0 \<Longrightarrow> ((m::nat) < n) = (m \<le> (n - Suc 0))"
by arith
lemma nat_not_less1:"n \<noteq> 0 \<Longrightarrow> (\<not> (m::nat) < n) = (\<not> m \<le> (n - Suc 0))"
by arith
lemma nat_eq_le:"m = (n::nat) \<Longrightarrow> m \<le> n"
by simp
subsection "Integers"
lemma non_zero_int:" (n::int) \<noteq> 0 \<Longrightarrow> 0 < n \<or> n < 0"
by arith
lemma zgt_0_zge_1:"(0::int) < z \<Longrightarrow> 1 \<le> z"
by arith
lemma not_zle:"(\<not> (n::int) \<le> m) = (m < n)"
by auto
lemma not_zless:"(\<not> (n::int) < m) = (m \<le> n)"
by auto
lemma zle_imp_zless_or_eq:"(n::int) \<le> m \<Longrightarrow> n < m \<or> n = m"
by arith
lemma zminus_zadd_cancel:" - z + (z + w) = (w::int)"
by simp
lemma int_neq_iff:"((w::int) \<noteq> z) = (w < z) \<or> (z < w)"
by auto
lemma zless_imp_zle:"(z::int) < z' \<Longrightarrow> z \<le> z'"
by simp
lemma zdiff:"z - (w::int) = z + (- w)"
by simp
lemma zle_zless_trans:"\<lbrakk> (i::int) \<le> j; j < k\<rbrakk> \<Longrightarrow> i < k"
by arith
lemma zless_zle_trans:"\<lbrakk> (i::int) < j; j \<le> k\<rbrakk> \<Longrightarrow> i < k"
by arith
lemma zless_neq:"(i::int) < j \<Longrightarrow> i \<noteq> j"
by simp
lemma int_mult_mono:"\<lbrakk> i < j; (0::int) < k \<rbrakk> \<Longrightarrow> k * i < k * j"
apply (frule zmult_zless_mono2_lemma [of "i" "j" "nat k"])
apply simp apply simp
done
lemma int_mult_le:"\<lbrakk>i \<le> j; (0::int) \<le> k\<rbrakk> \<Longrightarrow> k * i \<le> k * j"
apply (simp add:order_le_less)
apply (case_tac "i < j")
apply (case_tac "0 < k")
apply simp
apply simp
apply simp
done
lemma int_mult_le1:"\<lbrakk>i \<le> j; (0::int) \<le> k\<rbrakk> \<Longrightarrow> i * k \<le> j * k"
apply (simp add:mult.commute[of _ "k"])
apply (simp add:int_mult_le)
done
lemma zmult_zminus_right:"(w::int) * (- z) = - (w * z)"
apply (insert distrib_left[of "w" "z" "-z"])
apply simp
done
lemma zmult_zle_mono1_neg:"\<lbrakk>(i::int) \<le> j; k \<le> 0\<rbrakk> \<Longrightarrow> j * k \<le> i * k"
apply (subgoal_tac "0 \<le> - k") prefer 2 apply simp
apply (frule int_mult_le [of "i" "j" "- k"], assumption+)
apply (simp add:mult.commute)
done
lemma zmult_zless_mono_neg:"\<lbrakk>(i::int) < j; k < 0\<rbrakk> \<Longrightarrow> j * k < i * k"
apply (subgoal_tac "0 < -k",
frule int_mult_mono[of "i" "j" "-k"], assumption+,
simp add:mult.commute, simp)
done
lemma zmult_neg_neg:"\<lbrakk>i < (0::int); j < 0 \<rbrakk> \<Longrightarrow> 0 < i * j"
apply (frule zmult_zless_mono_neg[of "i" "0" "j"], assumption)
apply simp
done
lemma zmult_pos_pos:"\<lbrakk>(0::int) < i; 0 < j\<rbrakk> \<Longrightarrow> 0 < i * j"
apply (frule int_mult_mono[of "0" "i" "j"], assumption+)
apply (simp add:mult.commute)
done
lemma zmult_pos_neg:"\<lbrakk>(0::int) < i; j < 0\<rbrakk> \<Longrightarrow> i * j < 0"
apply (frule zmult_zless_mono_neg[of "0" "i" "j"], assumption+, simp)
done
lemma zmult_neg_pos:"\<lbrakk>i < (0::int); 0 < j\<rbrakk> \<Longrightarrow> i * j < 0"
apply (frule int_mult_mono[of "i" "0" "j"], assumption+,
simp add:mult.commute)
done
lemma zle:"((z::int) \<le> w) = (\<not> (w < z))"
by auto
lemma times_1_both:"\<lbrakk>(0::int) < z; z * z' = 1\<rbrakk> \<Longrightarrow> z = 1 \<and> z' = 1"
apply (subgoal_tac "0 < z'")
apply (frule zgt_0_zge_1[of "z'"],
subgoal_tac "z' = 1", simp,
subgoal_tac "1 < z' \<or> 1 = z'", thin_tac "1 \<le> z'", thin_tac "0 < z'")
apply (rule contrapos_pp, simp+,
frule int_mult_mono[of "1" "z'" "z"], assumption+, simp, arith)
apply (rule contrapos_pp, simp+, simp add:zle[THEN sym],
frule zless_imp_zle[of "0" "z"], frule int_mult_le[of "z'" "0" "z"],
assumption+, simp)
done
lemma zminus_minus:"i - - (j::int) = i + j"
by simp
lemma zminus_minus_pos:"(n::int) < 0 \<Longrightarrow> 0 < - n"
by simp
lemma zadd_zle_mono:"\<lbrakk>w' \<le> w; z' \<le> (z::int)\<rbrakk> \<Longrightarrow> w' + z' \<le> w + z"
by simp
lemma zmult_zle_mono:"\<lbrakk>i \<le> (j::int); 0 < k\<rbrakk> \<Longrightarrow> k * i \<le> k * j"
apply (case_tac "i = j") apply simp
apply (frule zle_imp_zless_or_eq[of "i" "j"])
apply (thin_tac "i \<le> j") apply simp
done
lemma zmult_zle_mono_r:"\<lbrakk>i \<le> (j::int); 0 < k\<rbrakk> \<Longrightarrow> i * k \<le> j * k"
apply (frule zmult_zle_mono[of "i" "j" "k"], assumption)
apply (simp add:mult.commute)
done
lemma pos_zmult_pos:"\<lbrakk> 0 \<le> (a::int); 0 < (b::int)\<rbrakk> \<Longrightarrow> a \<le> a * b"
apply (case_tac "a = 0") apply simp
apply (frule zle_imp_zless_or_eq[of "0" "a"]) apply (thin_tac "0 \<le> a")
apply simp
done
lemma pos_mult_l_gt:"\<lbrakk>(0::int) < w; i \<le> j; 0 \<le> i\<rbrakk> \<Longrightarrow> i \<le> w * j"
by (metis not_zless pos_zmult_pos order_trans mult.commute)
lemma pos_mult_r_gt:"\<lbrakk>(0::int) < w; i \<le> j; 0 \<le> i\<rbrakk> \<Longrightarrow> i \<le> j * w"
apply (frule pos_mult_l_gt[of "w" "i" "j"], assumption+)
apply (simp add:mult.commute[of "w" "j"])
done
lemma mult_pos_iff:"\<lbrakk>(0::int) < i; 0 \<le> i * j \<rbrakk> \<Longrightarrow> 0 \<le> j"
apply (rule contrapos_pp, simp+)
apply (cut_tac linorder_linear[of "0" "j"]) apply simp
apply (simp add:not_zle)
apply (frule int_mult_mono[of "j" "0" "i"], assumption+) apply simp
done
lemma zmult_eq:"\<lbrakk>(0::int) < w; z = z'\<rbrakk> \<Longrightarrow> w * z = w * z'"
by simp
lemma zmult_eq_r:"\<lbrakk>(0::int) < w; z = z'\<rbrakk> \<Longrightarrow> z * w = z' * w"
by simp
lemma zdiv_eq_l:"\<lbrakk>(0::int) < w; z * w = z' * w \<rbrakk> \<Longrightarrow> z = z'"
by simp
lemma zdiv_eq_r:"\<lbrakk>(0::int) < w; w * z = w * z' \<rbrakk> \<Longrightarrow> z = z'"
by simp
lemma int_nat_minus:"0 < (n::int) \<Longrightarrow> nat (n - 1) = (nat n) - 1"
by arith
lemma int_nat_add:"\<lbrakk>0 < (n::int); 0 < (m::int)\<rbrakk> \<Longrightarrow> (nat (n - 1)) + (nat (m - 1)) + (Suc 0) = nat (n + m - 1)"
by arith
lemma int_equation:"(x::int) = y + z \<Longrightarrow> x - y = z"
by simp
lemma int_pos_mult_monor:"\<lbrakk> 0 < (n::int); 0 \<le> n * m \<rbrakk> \<Longrightarrow> 0 \<le> m"
by (rule mult_pos_iff, assumption+)
lemma int_pos_mult_monol:"\<lbrakk> 0 < (m::int); 0 \<le> n * m \<rbrakk> \<Longrightarrow> 0 \<le> n"
apply (rule int_pos_mult_monor, assumption+)
apply (simp add:mult.commute)
done
lemma zdiv_positive:"\<lbrakk>(0::int) \<le> a; 0 < b\<rbrakk> \<Longrightarrow> 0 \<le> a div b"
apply (frule_tac a = 0 and a' = a and b = b in zdiv_mono1, assumption+)
apply simp
done
lemma zdiv_pos_mono_r:"\<lbrakk> (0::int) < w; w * z \<le> w * z'\<rbrakk> \<Longrightarrow> z \<le> z'"
apply (rule contrapos_pp, simp+)
done (** zmult_div_mono to rename **)
lemma zdiv_pos_mono_l:"\<lbrakk> (0::int) < w; z * w \<le> z' * w\<rbrakk> \<Longrightarrow> z \<le> z'"
apply (simp add:mult.commute)
done
lemma zdiv_pos_pos_l:"\<lbrakk> (0::int) < w; 0 \<le> z * w\<rbrakk> \<Longrightarrow> 0 \<le> z"
by (simp add:mult.commute, frule zdiv_pos_mono_r[of "w" "0" "z"], simp,
assumption)
section "Sets"
(* Preliminary properties of sets are proved here. Some of them are
already proved by L. Paulson and others. *)
subsection "A short notes for proof steps"
subsection "Sets"
lemma inEx:"x \<in> A \<Longrightarrow> \<exists>y\<in>A. y = x"
by simp
lemma inEx_rev:" \<exists>y\<in>A. y = x \<Longrightarrow> x \<in> A"
by blast
lemma nonempty_ex:"A \<noteq> {} \<Longrightarrow> \<exists>x. x \<in> A"
by blast
lemma ex_nonempty:"\<exists>x. x \<in> A \<Longrightarrow> A \<noteq> {}"
by blast
lemma not_eq_outside:"a \<notin> A \<Longrightarrow> \<forall>b\<in>A. b \<noteq> a"
by blast
lemma ex_nonempty_set:"\<exists>a. P a \<Longrightarrow> {x. P x} \<noteq> {}"
by blast
lemma nonempty: "x \<in> A \<Longrightarrow> A \<noteq> {}"
by blast
lemma conditional_subset:"{x\<in>A. P x} \<subseteq> A"
by blast
lemma bsubsetTr:"{x. x \<in> A \<and> P x} \<subseteq> A"
by blast
lemma sets_not_eq:"\<lbrakk>A \<noteq> B; B \<subseteq> A\<rbrakk> \<Longrightarrow> \<exists>a\<in>A. a \<notin> B"
by blast
lemma diff_nonempty:"\<lbrakk>A \<noteq> B; B \<subseteq> A\<rbrakk> \<Longrightarrow> A - B \<noteq> {}"
by blast
lemma sub_which1:"\<lbrakk>A \<subseteq> B \<or> B \<subseteq> A; x \<in> A; x \<notin> B\<rbrakk> \<Longrightarrow> B \<subseteq> A"
by blast
lemma sub_which2:"\<lbrakk>A \<subseteq> B \<or> B \<subseteq> A; x \<notin> A; x \<in> B\<rbrakk> \<Longrightarrow> A \<subseteq> B"
by blast
lemma nonempty_int: "A \<inter> B \<noteq> {} \<Longrightarrow> \<exists>x. x \<in> A \<inter> B "
by blast
lemma no_meet1:"A \<inter> B = {}\<Longrightarrow> \<forall>a \<in> A. a \<notin> B"
by blast
lemma no_meet2:"A \<inter> B = {}\<Longrightarrow> \<forall>a \<in> B. a \<notin> A"
by blast
lemma elem_some:"x \<in> A \<Longrightarrow> \<exists>y\<in>A. x = y"
by blast
lemma singleton_sub:"a \<in> A \<Longrightarrow> {a} \<subseteq> A"
by blast
lemma eq_elem_in: "\<lbrakk> a \<in> A; a = b \<rbrakk> \<Longrightarrow> b \<in> A"
by simp
lemma eq_set_inc: "\<lbrakk> a \<in> A; A = B \<rbrakk> \<Longrightarrow> a \<in> B"
by simp
lemma eq_set_not_inc:"\<lbrakk>a \<notin> A; A = B \<rbrakk> \<Longrightarrow> a \<notin> B"
by simp
lemma int_subsets: "\<lbrakk> A1 \<subseteq> A; B1 \<subseteq> B \<rbrakk> \<Longrightarrow> A1 \<inter> B1 \<subseteq> A \<inter> B"
by blast
lemma inter_mono:"A \<subseteq> B \<Longrightarrow> A \<inter> C \<subseteq> B \<inter> C"
by blast
lemma sub_Un1:"B \<subseteq> B \<union> C"
by blast
lemma sub_Un2:"C \<subseteq> B \<union> C"
by blast
lemma subset_contr:"\<lbrakk> A \<subset> B; B \<subseteq> A \<rbrakk> \<Longrightarrow> False"
by blast
lemma psubset_contr:"\<lbrakk> A \<subset> B; B \<subset> A \<rbrakk> \<Longrightarrow> False"
by blast
lemma eqsets_sub:"A = B \<Longrightarrow> A \<subseteq> B"
by simp
lemma not_subseteq:" \<not> A \<subseteq> B \<Longrightarrow> \<exists>a \<in> A. a \<notin> B"
by blast
lemma in_un1:"\<lbrakk> x \<in> A \<union> B; x \<notin> B \<rbrakk> \<Longrightarrow> x \<in> A"
by blast
lemma proper_subset:"\<lbrakk>A \<subseteq> B; x \<notin> A; x \<in> B\<rbrakk> \<Longrightarrow> A \<noteq> B"
by blast
lemma in_un2:"\<lbrakk> x \<in> A \<union> B; x \<notin> A \<rbrakk> \<Longrightarrow> x \<in> B"
by simp
lemma diff_disj:"x \<notin> A \<Longrightarrow> A - {x} = A"
by auto
lemma in_diff:"\<lbrakk>x \<noteq> a; x \<in> A\<rbrakk> \<Longrightarrow> x \<in> A - {a}"
by simp
lemma in_diff1:"x \<in> A - {a} \<Longrightarrow> x \<noteq> a"
by simp
lemma sub_inserted1:"\<lbrakk>Y \<subseteq> insert a X; \<not> Y \<subseteq> X\<rbrakk> \<Longrightarrow> a \<notin> X \<and> a \<in> Y"
by blast
lemma sub_inserted2:"\<lbrakk>Y \<subseteq> insert a X; \<not> Y \<subseteq> X\<rbrakk> \<Longrightarrow> Y = (Y - {a}) \<union> {a}"
by blast
lemma insert_sub:"\<lbrakk> A \<subseteq> B; a \<in> B\<rbrakk> \<Longrightarrow> (insert a A) \<subseteq> B"
by blast
lemma insert_diff:"A \<subseteq> (insert b B) \<Longrightarrow> A - {b} \<subseteq> B"
by blast
lemma insert_inc1:"A \<subseteq> insert a A"
by blast
lemma insert_inc2:"a \<in> insert a A"
by simp
lemma nonempty_some:"A \<noteq> {} \<Longrightarrow> (SOME x. x \<in> A) \<in> A"
apply (frule nonempty_ex[of "A"])
apply (rule someI2_ex) apply simp+
done
lemma mem_family_sub_Un:"A \<in> C \<Longrightarrow> A \<subseteq> \<Union> C"
by blast
lemma sub_Union:"\<exists>X\<in>C. A \<subseteq> X \<Longrightarrow> A \<subseteq> \<Union> C"
by blast
lemma family_subset_Un_sub:"\<forall>A\<in>C. A \<subseteq> B \<Longrightarrow> \<Union> C \<subseteq> B"
by blast
lemma in_set_with_P:"P x \<Longrightarrow> x \<in> {y. P y}"
by blast
lemma sub_single:"\<lbrakk>A \<noteq> {}; A \<subseteq> {a}\<rbrakk> \<Longrightarrow> A = {a}"
by blast
lemma not_sub_single:"\<lbrakk>A \<noteq> {}; A \<noteq> {a}\<rbrakk> \<Longrightarrow> \<not> A \<subseteq> {a}"
by blast
lemma not_sub:"\<not> A \<subseteq> B \<Longrightarrow> \<exists>a. a\<in>A \<and> a \<notin> B"
by blast
section "Functions"
definition
cmp :: "['b \<Rightarrow> 'c, 'a \<Rightarrow> 'b] \<Rightarrow> ('a \<Rightarrow> 'c)" where
"cmp g f = (\<lambda>x. g (f x))"
definition
idmap :: "'a set \<Rightarrow> ('a \<Rightarrow> 'a)" where
"idmap A = (\<lambda>x\<in>A. x)"
definition
constmap :: "['a set, 'b set] \<Rightarrow> ('a \<Rightarrow>'b)" where
"constmap A B = (\<lambda>x\<in>A. SOME y. y \<in> B)"
definition
invfun :: "['a set, 'b set, 'a \<Rightarrow> 'b] \<Rightarrow> ('b \<Rightarrow> 'a)" where
"invfun A B (f :: 'a \<Rightarrow> 'b) = (\<lambda>y\<in>B.(SOME x. (x \<in> A \<and> f x = y)))"
abbreviation
INVFUN :: "['a \<Rightarrow> 'b, 'b set, 'a set] \<Rightarrow> ('b \<Rightarrow> 'a)" ("(3_\<inverse>\<^bsub>_,_\<^esub>)" [82,82,83]82) where
"f\<inverse>\<^bsub>B,A\<^esub> == invfun A B f"
lemma eq_fun:"\<lbrakk> f \<in> A \<rightarrow> B; f = g \<rbrakk> \<Longrightarrow> g \<in> A \<rightarrow> B"
by simp
lemma eq_fun_eq_val:" f = g \<Longrightarrow> f x = g x"
by simp
lemma eq_elems_eq_val:"x = y \<Longrightarrow> f x = f y"
by simp
lemma cmp_fun:"\<lbrakk>f \<in> A \<rightarrow> B; g \<in> B \<rightarrow> C \<rbrakk> \<Longrightarrow> cmp g f \<in> A \<rightarrow> C"
by (auto simp add:cmp_def)
lemma cmp_fun_image:"\<lbrakk>f \<in> A \<rightarrow> B; g \<in> B \<rightarrow> C \<rbrakk> \<Longrightarrow>
(cmp g f) ` A = g ` (f ` A)"
apply (rule equalityI)
apply (rule subsetI, simp add:image_def)
apply (erule bexE, simp add:cmp_def, blast)
apply (rule subsetI, simp add:image_def[of g])
apply (erule bexE, simp)
apply (simp add:image_def cmp_def)
apply blast
done
lemma cmp_fun_sub_image:"\<lbrakk>f \<in> A \<rightarrow> B; g \<in> B \<rightarrow> C; A1 \<subseteq> A\<rbrakk> \<Longrightarrow>
(cmp g f) ` A1 = g ` (f ` A1)"
apply (rule equalityI)
apply (rule subsetI, simp add:image_def)
apply (erule bexE, simp add:cmp_def, blast)
apply (rule subsetI, simp add:image_def[of g])
apply (erule bexE, simp)
apply (simp add:image_def cmp_def)
apply blast
done
lemma restrict_fun_eq:"\<forall>x\<in>A. f x = g x \<Longrightarrow> (\<lambda>x\<in>A. f x) = (\<lambda>x\<in>A. g x)"
apply (simp add:fun_eq_iff)
done
lemma funcset_mem: "\<lbrakk>f \<in> A \<rightarrow> B; x \<in> A\<rbrakk> \<Longrightarrow> f x \<in> B"
apply (simp add: Pi_def)
done
lemma img_subset:"f \<in> A \<rightarrow> B \<Longrightarrow> f ` A \<subseteq> B"
apply (rule subsetI)
apply (simp add:image_def, erule bexE, simp)
apply (simp add:funcset_mem)
done
lemma funcset_mem1:"\<lbrakk>\<forall>l\<in>A. f l \<in> B; x \<in> A\<rbrakk> \<Longrightarrow> f x \<in> B"
apply simp
done
lemma func_to_img:"f \<in> A \<rightarrow> B \<Longrightarrow> f \<in> A \<rightarrow> f ` A"
by (simp add:Pi_def)
lemma funcset_eq:"\<lbrakk> f \<in> extensional A; g \<in> extensional A; \<forall>x\<in>A. f x = g x \<rbrakk> \<Longrightarrow> f = g"
apply (simp add:extensionalityI)
done
lemma eq_funcs:"\<lbrakk>f \<in> A \<rightarrow> B; g \<in> A \<rightarrow> B; f = g; x \<in> A\<rbrakk> \<Longrightarrow> f x = g x"
by simp
lemma restriction_of_domain:"\<lbrakk> f \<in> A \<rightarrow> B; A1 \<subseteq> A \<rbrakk> \<Longrightarrow>
restrict f A1 \<in> A1 \<rightarrow> B"
by blast
lemma restrict_restrict:"\<lbrakk> restrict f A \<in> A \<rightarrow> B; A1 \<subseteq> A \<rbrakk> \<Longrightarrow>
restrict (restrict f A) A1 = restrict f A1"
apply (rule funcset_eq[of _ "A1"])
apply (simp add:restrict_def extensional_def)
apply (simp add:restrict_def extensional_def)
apply (rule ballI) apply simp
apply (simp add:subsetD)
done
lemma restr_restr_eq:"\<lbrakk> restrict f A \<in> A \<rightarrow> B; restrict f A = restrict g A;
A1 \<subseteq> A \<rbrakk> \<Longrightarrow> restrict f A1 = restrict g A1"
apply (subst restrict_restrict[THEN sym, of "f" "A" "B" "A1"], assumption+)
apply (simp add:restrict_restrict[THEN sym, of "g" "A" "B" "A1"])
done
lemma funcTr:"\<lbrakk> f \<in> A \<rightarrow> B; g \<in> A \<rightarrow> B; f = g; a \<in> A\<rbrakk> \<Longrightarrow> f a = g a"
apply simp
done
lemma funcTr1:"\<lbrakk>f = g; a \<in> A\<rbrakk> \<Longrightarrow> f a = g a"
apply simp
done
lemma restrictfun_im:"\<lbrakk> (restrict f A) \<in> A \<rightarrow> B; A1 \<subseteq> A \<rbrakk> \<Longrightarrow>
(restrict f A) ` A1 = f ` A1"
apply (subgoal_tac "\<forall>x\<in>A1. x \<in> A")
apply (simp add:image_def)
apply (rule ballI) apply (simp add:subsetD)
done
lemma mem_in_image:"\<lbrakk> f \<in> A \<rightarrow> B; a \<in> A\<rbrakk> \<Longrightarrow> f a \<in> f ` A "
apply (simp add:image_def)
apply blast
done
lemma mem_in_image1:"\<lbrakk> \<forall>l\<in>A. f l \<in> B; a \<in> A\<rbrakk> \<Longrightarrow> f a \<in> f ` A "
apply simp
done
lemma mem_in_image2:"a \<in> A \<Longrightarrow> f a \<in> f ` A"
apply simp
done
lemma mem_in_image3:"b \<in> f ` A \<Longrightarrow> \<exists>a \<in> A. b = f a"
by (simp add:image_def)
lemma elem_in_image2: "\<lbrakk> f \<in> A \<rightarrow> B; A1 \<subseteq> A; x \<in> A1\<rbrakk> \<Longrightarrow> f x \<in> f` A1"
apply (simp add:image_def)
apply blast
done
lemma funcs_nonempty:"\<lbrakk> A \<noteq> {}; B \<noteq> {} \<rbrakk> \<Longrightarrow> (A \<rightarrow> B) \<noteq> {}"
apply (subgoal_tac "constmap A B \<in> A \<rightarrow> B") apply (simp add:nonempty)
apply (simp add:Pi_def)
apply (rule allI) apply (rule impI)
apply (simp add:constmap_def)
apply (frule nonempty_ex[of "B"])
apply (rule someI2_ex) apply assumption+
done
lemma idmap_funcs: "idmap A \<in> A \<rightarrow> A"
apply (simp add:Pi_def restrict_def idmap_def)
done
lemma l_idmap_comp: "\<lbrakk>f \<in> extensional A; f \<in> A \<rightarrow> B\<rbrakk> \<Longrightarrow>
compose A (idmap B) f = f"
apply (rule funcset_eq[of _ "A"])
apply (simp add:compose_def)
apply assumption
apply (rule ballI)
apply (simp add:funcset_mem[of "f" "A" "B"] compose_def idmap_def)
done
lemma r_idmap_comp:"\<lbrakk>f \<in> extensional A; f \<in> A \<rightarrow> B\<rbrakk> \<Longrightarrow>
compose A f (idmap A) = f"
apply (rule funcset_eq[of _ "A"])
apply (simp add:compose_def)
apply assumption
apply (rule ballI)
apply (simp add:funcset_mem[of "f" "A" "B"] compose_def idmap_def)
done
lemma extend_fun: "\<lbrakk> f \<in> A \<rightarrow> B; B \<subseteq> B1 \<rbrakk> \<Longrightarrow> f \<in> A \<rightarrow> B1"
apply (simp add:Pi_def restrict_def)
apply (rule allI) apply (rule impI)
apply (simp add:subsetD)
done
lemma restrict_fun: "\<lbrakk> f \<in> A \<rightarrow> B; A1 \<subseteq> A \<rbrakk> \<Longrightarrow> restrict f A1 \<in> A1 \<rightarrow> B"
apply (simp add:Pi_def restrict_def)
apply (rule allI) apply (rule impI)
apply (simp add:subsetD)
done
lemma set_of_hom: "\<forall>x \<in> A. f x \<in> B \<Longrightarrow> restrict f A \<in> A \<rightarrow> B"
apply (simp add:Pi_def restrict_def)
done
lemma composition : "\<lbrakk> f \<in> A \<rightarrow> B; g \<in> B \<rightarrow> C\<rbrakk> \<Longrightarrow> (compose A g f) \<in> A \<rightarrow> C"
apply (simp add:Pi_def restrict_def compose_def)
done
lemma comp_assoc:"\<lbrakk>f \<in> A \<rightarrow> B; g \<in> B \<rightarrow> C; h \<in> C \<rightarrow> D \<rbrakk> \<Longrightarrow>
compose A h (compose A g f) = compose A (compose B h g) f"
apply (rule funcset_eq[of _ "A"])
apply (simp add:compose_def)
apply (simp add:compose_def)
apply (rule ballI)
apply (simp add:funcset_mem[of "f" "A" "B"] compose_def)
done
lemma restrictfun_inj: "\<lbrakk> inj_on f A; A1 \<subseteq> A \<rbrakk> \<Longrightarrow> inj_on (restrict f A1) A1"
apply (simp add:inj_on_def)
apply (simp add:subsetD)
done
lemma restrict_inj:"\<lbrakk>inj_on f A; A1 \<subseteq> A\<rbrakk> \<Longrightarrow> inj_on f A1"
apply (simp add:inj_on_def)
apply ((rule ballI)+, rule impI)
apply (frule_tac c = x in subsetD[of "A1" "A"], assumption+,
frule_tac c = y in subsetD[of "A1" "A"], assumption+)
apply simp
done
lemma injective:"\<lbrakk> inj_on f A; x \<in> A; y \<in> A; x \<noteq> y \<rbrakk> \<Longrightarrow> f x \<noteq> f y"
apply (rule contrapos_pp, simp+)
apply (simp add:inj_on_def)
done
lemma injective_iff:"\<lbrakk> inj_on f A; x \<in> A; y \<in> A\<rbrakk> \<Longrightarrow>
(x = y) = (f x = f y)"
apply (rule iffI, simp)
apply (rule contrapos_pp, simp+)
apply (frule injective[of "f" "A" "x" "y"], assumption+)
apply simp
done
lemma injfun_elim_image:"\<lbrakk>f \<in> A \<rightarrow> B; inj_on f A; x \<in> A\<rbrakk> \<Longrightarrow>
f ` (A - {x}) = (f ` A) - {f x}"
apply (rule equalityI)
apply (rule subsetI, simp add:image_def, erule bexE)
apply (simp, (erule conjE)+)
apply (rule contrapos_pp, simp+)
apply (erule disjE, simp add:inj_on_def, blast)
apply (frule_tac x = xaa and y = x in injective[of f A ], assumption+,
blast)
apply (rule subsetI, simp add:image_def)
apply (rule contrapos_pp, simp+, erule conjE, erule bexE)
apply (frule_tac x = xaa in bspec)
apply (simp, rule contrapos_pp, simp+)
done
lemma cmp_inj:"\<lbrakk>f \<in> A \<rightarrow> B; g \<in> B \<rightarrow> C; inj_on f A; inj_on g B \<rbrakk> \<Longrightarrow>
inj_on (cmp g f) A"
apply (simp add:inj_on_def [of "cmp g f"])
apply (rule ballI)+
apply (simp add:cmp_def) apply (rule impI)
apply (subgoal_tac "f x = f y")
apply (simp add:inj_on_def [of "f"])
apply (frule_tac x = x in funcset_mem [of "f" "A" "B"], assumption+)
apply (frule_tac x = y in funcset_mem [of "f" "A" "B"], assumption+)
apply (simp add:inj_on_def [of "g"])
done
lemma cmp_assoc:"\<lbrakk>f \<in> A \<rightarrow> B; g \<in> B \<rightarrow> C; h \<in> C \<rightarrow> D; x \<in> A\<rbrakk> \<Longrightarrow>
(cmp h (cmp g f)) x = (cmp (cmp h g) f) x"
apply (simp add:cmp_def)
done
lemma bivar_fun: "\<lbrakk> f \<in> A \<rightarrow> (B \<rightarrow> C); a \<in> A \<rbrakk> \<Longrightarrow> f a \<in> B \<rightarrow> C"
by (simp add:Pi_def)
lemma bivar_fun_mem: "\<lbrakk> f \<in> A \<rightarrow> (B \<rightarrow> C); a \<in> A; b \<in> B \<rbrakk> \<Longrightarrow> f a b \<in> C"
apply (frule funcset_mem[of "f" "A" "B \<rightarrow> C"], assumption+)
apply (rule funcset_mem[of "f a" "B" "C"], assumption+)
done
lemma bivar_func_eq:"\<lbrakk>\<forall>a\<in>A. \<forall>b\<in>B. f a b = g a b \<rbrakk> \<Longrightarrow>
(\<lambda>x\<in>A. \<lambda>y\<in>B. f x y) = (\<lambda>x\<in>A. \<lambda>y\<in>B. g x y)"
apply (subgoal_tac "\<forall>x\<in>A. (\<lambda>y\<in>B. f x y) = (\<lambda>y\<in>B. g x y)")
apply (rule funcset_eq [of _ "A"])
apply (simp add:extensional_def restrict_def)
apply (simp add:extensional_def restrict_def)
apply (rule ballI)
apply simp
apply (rule ballI)
apply (rule funcset_eq [of _ "B"])
apply (simp add:restrict_def extensional_def)
apply (simp add:restrict_def extensional_def)
apply (rule ballI) apply simp
done
lemma set_image: "\<lbrakk> f \<in> A \<rightarrow> B; A1 \<subseteq> A; A2 \<subseteq> A \<rbrakk> \<Longrightarrow>
f`(A1 \<inter> A2) \<subseteq> (f` A1) \<inter> (f` A2)"
apply (simp add: image_def)
apply auto
done
lemma image_sub: "\<lbrakk>f \<in> A \<rightarrow> B; A1 \<subseteq> A \<rbrakk> \<Longrightarrow> (f`A1) \<subseteq> B"
by (auto simp add:image_def)
lemma image_sub0: "f \<in> A \<rightarrow> B \<Longrightarrow> (f`A) \<subseteq> B"
by (simp add:image_sub[of "f" "A" "B" "A"])
lemma image_nonempty:"\<lbrakk>f \<in> A \<rightarrow> B; A1 \<subseteq> A; A1 \<noteq> {} \<rbrakk> \<Longrightarrow> f ` A1 \<noteq> {}"
by (frule nonempty_some[of "A1"],
frule elem_in_image2[of "f" "A" "B" "A1" "SOME x. x \<in> A1"],
assumption+, simp add:nonempty)
lemma im_set_mono: "\<lbrakk>f \<in>A \<rightarrow> B; A1 \<subseteq> A2; A2 \<subseteq> A \<rbrakk> \<Longrightarrow> (f ` A1) \<subseteq> (f ` A2)"
apply (simp add:image_def)
apply auto
done
lemma im_set_un:"\<lbrakk> f\<in>A \<rightarrow> B; A1 \<subseteq> A; A2 \<subseteq> A \<rbrakk> \<Longrightarrow>
f`(A1 \<union> A2) = (f`A1) \<union> (f`A2)"
apply (simp add:image_def)
apply auto
done
lemma im_set_un1:"\<lbrakk>\<forall>l\<in>A. f l \<in> B; A = A1 \<union> A2\<rbrakk> \<Longrightarrow>
f `(A1 \<union> A2) = f `(A1) \<union> f `(A2)"
apply (rule equalityI,
rule subsetI, simp add:image_def, erule bexE)
apply blast
apply (rule subsetI,
simp add:image_def, erule disjE, erule bexE, blast)
apply (erule bexE) apply blast
done
lemma im_set_un2:"A = A1 \<union> A2 \<Longrightarrow> f `A = f `(A1) \<union> f `(A2)"
apply (rule equalityI,
rule subsetI, simp add:image_def, erule bexE, blast)
apply (rule subsetI,
simp add:image_def, erule disjE, erule bexE, blast, erule bexE, blast)
done
definition
invim::"['a \<Rightarrow> 'b, 'a set, 'b set] \<Rightarrow> 'a set" where
"invim f A B = {x. x\<in>A \<and> f x \<in> B}"
lemma invim: "\<lbrakk> f:A \<rightarrow> B; B1 \<subseteq> B \<rbrakk> \<Longrightarrow> invim f A B1 \<subseteq> A"
by (auto simp add:invim_def)
lemma setim_cmpfn: "\<lbrakk> f:A \<rightarrow> B; g:B \<rightarrow> C; A1 \<subseteq> A \<rbrakk> \<Longrightarrow>
(compose A g f)` A1 = g`(f` A1)"
apply (simp add:image_def compose_def)
apply auto
done
definition
surj_to :: "['a \<Rightarrow> 'b, 'a set, 'b set] \<Rightarrow> bool" where
"surj_to f A B \<longleftrightarrow> f`A = B"
lemma surj_to_test:"\<lbrakk> f \<in> A \<rightarrow> B; \<forall>b\<in>B. \<exists>a\<in>A. f a = b \<rbrakk> \<Longrightarrow>
surj_to f A B"
by (auto simp add:surj_to_def image_def)
lemma surj_to_image:"f \<in> A \<rightarrow> B \<Longrightarrow> surj_to f A (f ` A)"
apply (rule surj_to_test[of "f" "A" "f ` A"])
apply (rule func_to_img[of "f" "A" "B"], assumption)
apply (rule ballI, simp add:image_def, erule bexE, simp)
apply blast
done
lemma surj_to_el:"\<lbrakk> f \<in> A \<rightarrow> B; surj_to f A B \<rbrakk> \<Longrightarrow> \<forall>b\<in>B. \<exists>a\<in>A. f a = b"
apply (simp add:surj_to_def image_def)
apply auto
done
lemma surj_to_el1:"\<lbrakk> f \<in> A \<rightarrow> B; surj_to f A B; b\<in>B\<rbrakk> \<Longrightarrow> \<exists>a\<in>A. f a = b"
apply (simp add:surj_to_el)
done
lemma surj_to_el2:"\<lbrakk>surj_to f A B; b \<in> B\<rbrakk> \<Longrightarrow> \<exists>a\<in>A. f a = b"
apply (simp add:surj_to_def image_def)
apply (frule sym, thin_tac "{y. \<exists>x\<in>A. y = f x} = B", simp)
apply (erule bexE, simp, blast)
done
lemma compose_surj: "\<lbrakk>f:A \<rightarrow> B; surj_to f A B; g : B \<rightarrow> C; surj_to g B C \<rbrakk>
\<Longrightarrow> surj_to (compose A g f) A C "
apply (simp add:surj_to_def compose_def image_def)
apply auto
done
lemma cmp_surj: "\<lbrakk>f:A \<rightarrow> B; surj_to f A B; g : B \<rightarrow> C; surj_to g B C \<rbrakk>
\<Longrightarrow> surj_to (cmp g f) A C "
apply (rule surj_to_test, simp add:cmp_fun)
apply (rule ballI, simp add:surj_to_def [of "g"], frule sym,
thin_tac "g ` B = C", simp, simp add:image_def,
simp add:cmp_def)
apply auto
apply (simp add:surj_to_def, frule sym,
thin_tac " f ` A = B", simp add:image_def)
apply auto
done
lemma inj_onTr0:"\<lbrakk> f \<in> A \<rightarrow> B; x \<in> A; y \<in> A; inj_on f A; f x = f y\<rbrakk> \<Longrightarrow> x = y"
apply (simp add:inj_on_def)
done
lemma inj_onTr1:"\<lbrakk>inj_on f A; x \<in> A; y \<in> A; f x = f y\<rbrakk> \<Longrightarrow> x = y"
apply (simp add:inj_on_def)
done
lemma inj_onTr2:"\<lbrakk>inj_on f A; x \<in> A; y \<in> A; f x \<noteq> f y\<rbrakk> \<Longrightarrow> x \<noteq> y"
apply (rule contrapos_pp, simp+)
done (* premis inj_on can be changed to some condition indicating f to be
a function *)
lemma comp_inj: "\<lbrakk> f \<in> A \<rightarrow> B; inj_on f A; g \<in> B \<rightarrow> C; inj_on g B \<rbrakk>
\<Longrightarrow> inj_on (compose A g f) A "
apply (simp add:inj_on_def [of "compose A g f"])
apply (rule ballI)+ apply (rule impI)
apply (rule inj_onTr0 [of "f" "A" "B"], assumption+)
apply (frule funcset_mem [of "f" "A" "B" _], assumption+)
apply (rotate_tac -3)
apply (frule funcset_mem [of "f" "A" "B" _], assumption+)
apply (rule inj_onTr0 [of "g" "B" "C" _], assumption+)
apply (simp add:compose_def)
done
lemma cmp_inj_1: "\<lbrakk> f \<in> A \<rightarrow> B; inj_on f A; g \<in> B \<rightarrow> C; inj_on g B \<rbrakk>
\<Longrightarrow> inj_on (cmp g f) A "
apply (simp add:inj_on_def [of "cmp g f"])
apply (rule ballI)+ apply (rule impI)
apply (simp add:cmp_def)
apply (frule_tac x = x in funcset_mem [of "f" "A" "B"], assumption+)
apply (frule_tac x = y in funcset_mem [of "f" "A" "B"], assumption+)
apply (frule_tac x = "f x" and y = "f y" in inj_onTr1 [of "g" "B"],
assumption+)
apply (rule_tac x = x and y = y in inj_onTr1 [of "f" "A"], assumption+)
done
lemma cmp_inj_2: "\<lbrakk>\<forall>l\<in>A. f l \<in> B; inj_on f A; \<forall>k\<in>B. g k \<in> C; inj_on g B \<rbrakk>
\<Longrightarrow> inj_on (cmp g f) A "
apply (simp add:inj_on_def [of "cmp g f"])
apply (rule ballI)+ apply (rule impI)
apply (simp add:cmp_def)
apply (frule_tac x = x in funcset_mem1 [of "A" "f" "B"], assumption+)
apply (frule_tac x = y in funcset_mem1 [of "A" "f" "B"], assumption+)
apply (frule_tac x = "f x" and y = "f y" in inj_onTr1 [of "g" "B"],
assumption+)
apply (rule_tac x = x and y = y in inj_onTr1 [of "f" "A"], assumption+)
done
lemma invfun_mem:"\<lbrakk> f \<in> A \<rightarrow> B; inj_on f A; surj_to f A B; b \<in> B \<rbrakk>
\<Longrightarrow> (invfun A B f) b \<in> A"
apply (simp add:invfun_def)
apply (simp add:surj_to_def image_def) apply (frule sym)
apply (thin_tac "{y. \<exists>x\<in>A. y = f x} = B") apply simp
apply (thin_tac "B = {y. \<exists>x\<in>A. y = f x}") apply auto
apply (rule someI2_ex)
apply blast apply simp
done
lemma inv_func:"\<lbrakk> f \<in> A \<rightarrow> B; inj_on f A; surj_to f A B\<rbrakk>
\<Longrightarrow> (invfun A B f) \<in> B \<rightarrow> A"
apply (simp add:Pi_def)
apply (rule allI) apply (rule impI)
apply (rule invfun_mem) apply (rule funcsetI)
apply simp+
done
lemma invfun_r:"\<lbrakk> f\<in>A \<rightarrow> B; inj_on f A; surj_to f A B; b \<in> B \<rbrakk>
\<Longrightarrow> f ((invfun A B f) b) = b"
apply (simp add:invfun_def)
apply (rule someI2_ex)
apply (simp add:surj_to_def image_def)
apply auto
done
lemma invfun_l:"\<lbrakk>f \<in> A \<rightarrow> B; inj_on f A; surj_to f A B; a \<in> A\<rbrakk>
\<Longrightarrow> (invfun A B f) (f a) = a"
apply (simp add:invfun_def Pi_def restrict_def)
apply (rule someI2_ex) apply auto
apply (simp add:inj_on_def)
done
lemma invfun_inj:"\<lbrakk>f \<in> A \<rightarrow> B; inj_on f A; surj_to f A B\<rbrakk>
\<Longrightarrow> inj_on (invfun A B f) B"
apply (simp add:inj_on_def [of "invfun A B f" "B"] )
apply auto
apply (frule_tac b = y in invfun_r [of "f" "A" "B"], assumption+)
apply (frule_tac b = x in invfun_r [of "f" "A" "B"], assumption+)
apply simp
done
lemma invfun_surj:"\<lbrakk>f \<in> A \<rightarrow> B; inj_on f A; surj_to f A B\<rbrakk>
\<Longrightarrow> surj_to (invfun A B f) B A "
apply (simp add:surj_to_def [of "invfun A B f" "B" "A"] image_def)
apply (rule equalityI)
apply (rule subsetI) apply (simp add:CollectI)
apply auto
apply (simp add:invfun_mem)
apply (frule funcset_mem [of "f" "A" "B"], assumption+)
apply (frule_tac t = x in invfun_l [of "f" "A" "B", THEN sym], assumption+)
apply auto
done
definition
bij_to :: "['a \<Rightarrow> 'b, 'a set, 'b set] \<Rightarrow> bool" where
"bij_to f A B \<longleftrightarrow> surj_to f A B \<and> inj_on f A"
lemma idmap_bij:"bij_to (idmap A) A A"
apply (simp add:bij_to_def)
apply (rule conjI)
apply (simp add:surj_to_def, simp add:image_def, simp add:idmap_def)
apply (simp add:inj_on_def, simp add:idmap_def)
done
lemma bij_invfun:"\<lbrakk>f \<in> A \<rightarrow> B; bij_to f A B\<rbrakk> \<Longrightarrow>
bij_to (invfun A B f) B A"
apply (simp add:bij_to_def)
apply (simp add:invfun_inj invfun_surj)
done
lemma l_inv_invfun:"\<lbrakk> f \<in> A \<rightarrow> B; inj_on f A; surj_to f A B\<rbrakk>
\<Longrightarrow> compose A (invfun A B f) f = idmap A"
apply (rule ext)
apply (simp add:compose_def idmap_def)
apply (rule impI)
apply (simp add:invfun_l)
done
lemma invfun_mem1:"\<lbrakk>f \<in> A \<rightarrow> B; bij_to f A B; b \<in> B\<rbrakk> \<Longrightarrow>
(invfun A B f) b \<in> A"
apply (simp add:bij_to_def, erule conjE)
apply (simp add:invfun_mem)
done
lemma invfun_r1:"\<lbrakk> f\<in>A \<rightarrow> B; bij_to f A B; b \<in> B \<rbrakk>
\<Longrightarrow> f ((invfun A B f) b) = b"
apply (simp add:bij_to_def, erule conjE)
apply (rule invfun_r, assumption+)
done
lemma invfun_l1:"\<lbrakk>f \<in> A \<rightarrow> B; bij_to f A B; a \<in> A\<rbrakk>
\<Longrightarrow> (invfun A B f) (f a) = a"
apply (simp add:bij_to_def, erule conjE)
apply (rule invfun_l, assumption+)
done
lemma compos_invfun_r:"\<lbrakk>f \<in> A \<rightarrow> B; bij_to f A B; g \<in> A \<rightarrow> C; h \<in> B \<rightarrow> C;
g \<in> extensional A; compose B g (invfun A B f) = h\<rbrakk> \<Longrightarrow>
g = compose A h f"
apply (rule funcset_eq[of g A "compose A h f"], assumption)
apply (simp add:compose_def extensional_def)
apply (rule ballI)
apply (frule sym, thin_tac "compose B g (invfun A B f) = h", simp)
apply (simp add:compose_def Pi_def invfun_l1)
done
lemma compos_invfun_l:"\<lbrakk>f \<in> A \<rightarrow> B; bij_to f A B; g \<in> C \<rightarrow> B; h \<in> C \<rightarrow> A;
compose C (invfun A B f) g = h; g \<in> extensional C \<rbrakk> \<Longrightarrow>
g = compose C f h"
apply (rule funcset_eq[of g C "compose C f h"], assumption)
apply (simp add:compose_def extensional_def)
apply (rule ballI)
apply (frule sym, thin_tac "compose C (invfun A B f) g = h",
simp add:compose_def)
apply (frule_tac x = x in funcset_mem[of g C B], assumption)
apply (simp add:invfun_r1)
done
lemma invfun_set:"\<lbrakk>f \<in> A \<rightarrow> B; bij_to f A B; C \<subseteq> B\<rbrakk> \<Longrightarrow>
f ` ((invfun A B f)` C) = C"
apply (rule equalityI)
apply (rule subsetI)
apply (simp add:image_def, erule exE,
erule conjE, erule bexE, simp,
frule_tac c = xb in subsetD[of "C" "B"], assumption+)
apply (simp add:bij_to_def, erule conjE,
simp add:invfun_r)
apply (rule subsetI, simp add:image_def)
apply (frule_tac c = x in subsetD[of "C" "B"], assumption+,
simp add:bij_to_def, erule conjE,
frule_tac b = x in invfun_r[of "f" "A" "B"], assumption+)
apply (frule sym, thin_tac "f (invfun A B f x) = x")
apply blast
done
lemma compos_bij:"\<lbrakk>f \<in> A \<rightarrow> B; bij_to f A B; g \<in> B \<rightarrow> C; bij_to g B C\<rbrakk> \<Longrightarrow>
bij_to (compose A g f) A C"
apply (simp add:bij_to_def, (erule conjE)+)
apply (simp add:comp_inj[of "f" "A" "B" "g" "C"])
apply (simp add:compose_surj)
done
section "Nsets"
(* NSet is the set of natural numbers, and "Nset n" is the set of
natural numbers from 0 through n *)
definition
nset :: "[nat, nat] \<Rightarrow> (nat) set" where
"nset i j = {k. i \<le> k \<and> k \<le> j}"
definition
slide :: "nat \<Rightarrow> nat \<Rightarrow> nat" where
"slide i j == i + j"
definition
sliden :: "nat \<Rightarrow> nat \<Rightarrow> nat" where
"sliden i j == j - i"
definition
jointfun :: "[nat, nat \<Rightarrow> 'a, nat, nat \<Rightarrow> 'a] \<Rightarrow> (nat \<Rightarrow> 'a)" where
"(jointfun n f m g) = (\<lambda>i. if i \<le> n then f i else g ((sliden (Suc n)) i))"
definition
skip :: "nat \<Rightarrow> (nat \<Rightarrow> nat)" where
"skip i = (\<lambda>x. (if i = 0 then Suc x else
(if x \<in> {j. j \<le> (i - Suc 0)} then x else Suc x)))"
lemma nat_pos:"0 \<le> (l::nat)"
apply simp
done
lemma Suc_pos:"Suc k \<le> r \<Longrightarrow> 0 < r"
apply simp
done
lemma nat_pos2:"(k::nat) < r \<Longrightarrow> 0 < r"
apply simp
done
lemma eq_le_not:"\<lbrakk>(a::nat) \<le> b; \<not> a < b \<rbrakk> \<Longrightarrow> a = b"
apply auto
done
lemma im_of_constmap:"(constmap {0} {a}) ` {0} = {a}"
apply (simp add:constmap_def)
done
lemma noteq_le_less:"\<lbrakk> m \<le> (n::nat); m \<noteq> n \<rbrakk> \<Longrightarrow> m < n"
apply auto
done
lemma nat_not_le_less:"(\<not> (n::nat) \<le> m) = (m < n)"
by (simp add: not_le)
lemma self_le:"(n::nat) \<le> n"
apply simp
done
lemma n_less_Suc:"(n::nat) < Suc n"
apply simp
done
lemma less_diff_Suc:"i < (n::nat) \<Longrightarrow> n - (Suc i) = (n - i) - (Suc 0)"
apply auto
done
lemma less_pre_n:"0 < n \<Longrightarrow> n - Suc 0 < n"
apply simp
done
lemma Nset_inc_0:"(0::nat) \<in> {i. i \<le> n}"
apply simp
done
lemma Nset_1:"{i. i \<le> Suc 0} = {0, Suc 0}"
apply auto
done
lemma Nset_1_1:"(k \<le> Suc 0) = (k = 0 \<or> k = Suc 0)"
apply (rule iffI)
apply (case_tac "k = 0", simp+)
apply (erule disjE, simp+)
done
lemma Nset_2:"{i, j} = {j, i}"
apply auto
done
lemma Nset_nonempty:"{i. i \<le> (n::nat)} \<noteq> {}"
apply (subgoal_tac "0 \<in> {i. i \<le> n}")
apply (rule nonempty[of 0], assumption)
apply simp
done
lemma Nset_le:"x \<in> {i. i \<le> n} \<Longrightarrow> x \<le> n"
apply simp
done
lemma Nset_pre:"\<lbrakk> (x::nat) \<in> {i. i \<le> (Suc n)}; x \<noteq> Suc n \<rbrakk> \<Longrightarrow> x \<in> {i. i \<le> n}"
apply simp
done
lemma Nset_pre1:"{i. i \<le> (Suc n)} - {Suc n} = {i. i \<le> n}"
apply (rule equalityI)
apply (rule subsetI, simp)+
done
lemma le_Suc_mem_Nsetn:"x \<le> Suc n \<Longrightarrow> x - Suc 0 \<in> {i. i \<le> n}"
apply (frule diff_le_mono[of x "Suc n" "Suc 0"],
thin_tac "x \<le> Suc n", simp)
done
lemma le_Suc_diff_le:"x \<le> Suc n \<Longrightarrow> x - Suc 0 \<le> n"
apply (frule diff_le_mono[of x "Suc n" "Suc 0"],
thin_tac "x \<le> Suc n", simp)
done
lemma Nset_not_pre:"\<lbrakk> x \<notin> {i. i \<le> n}; x \<in> {i. i \<le> (Suc n)}\<rbrakk> \<Longrightarrow> x = Suc n"
by simp
lemma mem_of_Nset:"x \<le> (n::nat) \<Longrightarrow> x \<in> {i. i \<le> n}"
apply simp
done
lemma less_mem_of_Nset:"x < (n::nat) \<Longrightarrow> x \<in> {i. i \<le> n}"
apply (frule less_imp_le [of "x" "n"])
apply simp
done
lemma Nset_nset:"{i. i \<le> (Suc (n + m))} = {i. i \<le> n} \<union>
nset (Suc n) (Suc (n + m))"
apply (rule equalityI)
apply (rule subsetI)
apply (simp add:nset_def)
apply (auto simp add: nset_def)
done
lemma Nset_nset_1:"\<lbrakk>0 < n; i < n\<rbrakk> \<Longrightarrow> {j. j \<le> n} = {j. j \<le> i} \<union>
nset (Suc i) n"
apply auto
apply (simp add:nset_def)
apply (simp add:nset_def)
done
lemma Nset_img0:"\<lbrakk>f \<in> {j. j \<le> Suc n} \<rightarrow> B; (f (Suc n)) \<in> f ` {j. j \<le> n}\<rbrakk> \<Longrightarrow>
f ` {j. j \<le> Suc n} = f ` {j. j \<le> n}"
by (auto simp add: le_Suc_eq)
lemma Nset_img:"f \<in> {j. j \<le> Suc n} \<rightarrow> B \<Longrightarrow>
insert (f (Suc n)) (f ` {j. j \<le> n}) = f ` {j. j \<le> Suc n}"
by (auto elim: le_SucE)
primrec nasc_seq :: "[nat set, nat, nat] \<Rightarrow> nat"
where
dec_seq_0: "nasc_seq A a 0 = a"
| dec_seq_Suc: "nasc_seq A a (Suc n) =
(SOME b. ((b \<in> A) \<and> (nasc_seq A a n) < b))"
lemma nasc_seq_mem:"\<lbrakk>(a::nat) \<in> A; \<not> (\<exists>m. m\<in>A \<and> (\<forall>x\<in>A. x \<le> m))\<rbrakk> \<Longrightarrow>
(nasc_seq A a n) \<in> A"
apply (induct n)
apply (simp_all add: not_le)
apply (subgoal_tac "\<exists>x\<in>A. (nasc_seq A a n) < x") prefer 2 apply blast
apply (thin_tac "\<forall>m. m \<in> A \<longrightarrow> (\<exists>x\<in>A. m < x)",
rule someI2_ex, blast, simp)
done
lemma nasc_seqn:"\<lbrakk>(a::nat) \<in> A; \<not> (\<exists>m. m\<in>A \<and> (\<forall>x\<in>A. x \<le> m))\<rbrakk> \<Longrightarrow>
(nasc_seq A a n) < (nasc_seq A a (Suc n))"
apply (simp,
frule nasc_seq_mem [of "a" "A" "n"], simp)
apply (simp add: not_le,
subgoal_tac "\<exists>x\<in>A. (nasc_seq A a n) < x") prefer 2 apply simp
apply (thin_tac "\<forall>m. m \<in> A \<longrightarrow> (\<exists>x\<in>A. m < x)",
rule someI2_ex, blast, simp)
done
lemma nasc_seqn1:"\<lbrakk>(a::nat) \<in> A; \<not> (\<exists>m. m\<in>A \<and> (\<forall>x\<in>A. x \<le> m))\<rbrakk> \<Longrightarrow>
Suc (nasc_seq A a n) \<le> (nasc_seq A a (Suc n))"
apply (frule nasc_seqn [of "a" "A" "n"], assumption+)
apply simp
done
lemma ubs_ex_n_maxTr:"\<lbrakk>(a::nat) \<in> A; \<not> (\<exists>m. m\<in>A \<and> (\<forall>x\<in>A. x \<le> m))\<rbrakk>
\<Longrightarrow> (a + n) \<le> (nasc_seq A a n)"
apply (induct_tac n)
apply simp
apply (frule_tac n = n in nasc_seqn1[of "a" "A"], assumption+,
subgoal_tac "Suc (a + n) \<le> Suc (nasc_seq A a n)",
frule_tac i = "Suc (a + n)" and j = "Suc (nasc_seq A a n)" and
k = "nasc_seq A a (Suc n)" in le_trans, assumption+,
simp, thin_tac "Suc (nasc_seq A a n) \<le> nasc_seq A a (Suc n)",
subst Suc_le_mono, assumption+)
done
lemma ubs_ex_n_max:"\<lbrakk>A \<noteq> {}; A \<subseteq> {i. i \<le> (n::nat)}\<rbrakk> \<Longrightarrow>
\<exists>!m. m\<in>A \<and> (\<forall>x\<in>A. x \<le> m)"
apply (frule nonempty_ex[of "A"])
apply (thin_tac "A \<noteq> {}")
apply (erule exE)
apply (rename_tac a)
apply (rule ex_ex1I)
prefer 2
apply (erule conjE)+
apply (frule_tac x = y in bspec, assumption+,
thin_tac "\<forall>x\<in>A. x \<le> m",
frule_tac x = m in bspec, assumption+,
thin_tac "\<forall>x\<in>A. x \<le> y", simp)
apply (rule contrapos_pp, simp+)
apply (frule_tac a = a and A = A and n = "n + 1" in ubs_ex_n_maxTr, simp)
apply (frule_tac a = a in nasc_seq_mem[of _ "A" "n + 1"], simp)
apply (frule_tac c = "nasc_seq A a (n + 1)" in subsetD[of "A" "{i. i \<le> n}"],
assumption+, simp)
done
definition
n_max :: "nat set \<Rightarrow> nat" where
"n_max A = (THE m. m \<in> A \<and> (\<forall>x\<in>A. x \<le> m))"
lemma n_max:"\<lbrakk>A \<subseteq> {i. i \<le> (n::nat)}; A \<noteq> {}\<rbrakk> \<Longrightarrow>
(n_max A) \<in> A \<and> (\<forall>x\<in>A. x \<le> (n_max A))"
apply (simp add:n_max_def)
apply (frule ubs_ex_n_max[of "A" "n"], assumption)
apply (rule theI')
apply assumption
done
lemma n_max_eq_sets:"\<lbrakk>A = B; A \<noteq> {}; \<exists>n. A \<subseteq> {j. j \<le> n}\<rbrakk> \<Longrightarrow>
n_max A = n_max B"
by simp
(* n_max has no meaning unless conditions A \<noteq> {}; \<exists>n. A \<subseteq> {j. j \<le> n} *)
lemma skip_mem:"l \<in> {i. i \<le> n} \<Longrightarrow> (skip i l) \<in> {i. i \<le> (Suc n)}"
apply (case_tac "i = 0")
apply (simp add:skip_def)
apply (simp)+
apply (simp add:skip_def)
done
lemma skip_fun:"(skip i) \<in> {i. i \<le> n} \<rightarrow> {i. i \<le> (Suc n)}"
apply (rule Pi_I)
apply (rule skip_mem, assumption)
done
lemma skip_im_Tr0:"x \<in> {i. i \<le> n} \<Longrightarrow> skip 0 x = Suc x"
apply (simp add:skip_def)
done
lemma skip_im_Tr0_1:"0 < y \<Longrightarrow> skip 0 (y - Suc 0) = y"
apply (simp add:skip_def)
done
lemma skip_im_Tr1:"\<lbrakk> i \<in> {i. i \<le> (Suc n)}; 0 < i; x \<le> i - Suc 0 \<rbrakk> \<Longrightarrow>
skip i x = x"
by (simp add:skip_def)
lemma skip_im_Tr1_1:"\<lbrakk> i \<in> {i. i \<le> (Suc n)}; 0 < i; x < i\<rbrakk> \<Longrightarrow>
skip i x = x"
apply (frule less_le_diff[of x i])
apply (simp add:skip_def)
done
lemma skip_im_Tr1_2:"\<lbrakk> i \<le> (Suc n); x < i\<rbrakk> \<Longrightarrow> skip i x = x"
apply (rule skip_im_Tr1_1[of i n x], simp+)
done
lemma skip_im_Tr2:"\<lbrakk> 0 < i; i \<in> {i. i \<le> (Suc n)}; i \<le> x\<rbrakk> \<Longrightarrow>
skip i x = Suc x"
by (simp add:skip_def)
lemma skip_im_Tr2_1:"\<lbrakk>i \<in> {i. i \<le> (Suc n)}; i \<le> x\<rbrakk> \<Longrightarrow>
skip i x = Suc x"
apply (case_tac "i = 0")
apply (simp add:skip_def)
apply (simp, rule skip_im_Tr2, assumption+, simp+)
done
lemma skip_im_Tr3:"x \<in> {i. i \<le> n} \<Longrightarrow> skip (Suc n) x = x"
apply (simp add:skip_def)
done
lemma skip_im_Tr4:"\<lbrakk>x \<le> Suc n; 0 < x\<rbrakk> \<Longrightarrow> x - Suc 0 \<le> n"
apply (simp add:Suc_le_mono [of "x - Suc 0" "n", THEN sym])
done
lemma skip_fun_im:"i \<in> {j. j \<le> (Suc n)} \<Longrightarrow>
(skip i) ` {j. j \<le> n} = ({j. j \<le> (Suc n)} - {i})"
apply (rule equalityI)
apply (rule subsetI)
apply (case_tac "i = 0", simp)
apply (simp add:image_def, erule exE, erule conjE)
apply (cut_tac x = xa in skip_im_Tr0[of _ n], simp, simp)
apply (simp add:image_def, erule exE, erule conjE, simp)
apply (case_tac "xa < i")
apply (frule_tac x = xa in skip_im_Tr1_2[of i n], simp+)
apply (cut_tac m1 = xa and n1 = i in nat_not_le_less[THEN sym], simp)
apply (cut_tac x = xa and n = n in skip_im_Tr2_1[of i], simp+)
apply (rule subsetI, simp, erule conjE)
apply (cut_tac x = x and y = i in less_linear, simp)
apply (erule disjE)
apply (simp add:image_def)
apply (frule_tac x = x in skip_im_Tr1_2[of i n], assumption,
frule_tac x = x and y = i and z = "Suc n" in less_le_trans,
assumption+,
frule_tac m = x and n = "Suc n" in Suc_leI,
simp only:Suc_le_mono,
frule sym, thin_tac "skip i x = x", blast)
apply (cut_tac x = "x - Suc 0" in skip_im_Tr2_1[of i n],
simp, simp add:less_le_diff)
apply (cut_tac x = i and n = x in less_le_diff, assumption,
simp add:image_def)
apply (frule_tac m = x and n = "Suc n" and l = "Suc 0" in diff_le_mono,
simp)
apply (frule sym, thin_tac "skip i (x - Suc 0) = x", blast)
done
lemma skip_fun_im1:"\<lbrakk>i \<in> {j. j \<le> (Suc n)}; x \<in> {j. j \<le> n}\<rbrakk> \<Longrightarrow>
(skip i) x \<in> ({j. j \<le> (Suc n)} - {i})"
by (subst skip_fun_im[THEN sym], assumption,
simp add:image_def, blast)
lemma skip_id:"l < i \<Longrightarrow> skip i l = l"
apply (simp add:skip_def )
done
lemma Suc_neq:"\<lbrakk>0 < i; i - Suc 0 < l\<rbrakk> \<Longrightarrow> i \<noteq> Suc l"
by (rule contrapos_pp, simp+)
lemma skip_il_neq_i:"skip i l \<noteq> i"
apply (auto simp add:skip_def)
done
lemma skip_inj:"\<lbrakk>i \<in> {k. k \<le> n}; j \<in> {k. k \<le> n}; i \<noteq> j\<rbrakk> \<Longrightarrow>
skip k i \<noteq> skip k j"
apply (simp add:skip_def)
done
lemma le_imp_add_int:" i \<le> (j::nat) \<Longrightarrow> \<exists>k. j = i + k"
apply (case_tac "i = j")
apply simp
apply (frule le_imp_less_or_eq) apply (thin_tac "i \<le> j")
apply simp
apply (insert less_imp_add_positive [of "i" "j"])
apply simp
apply blast
done
lemma jointfun_hom0:"\<lbrakk> f \<in> {j. j \<le> n} \<rightarrow> A; g \<in> {k. k \<le> m} \<rightarrow> B \<rbrakk> \<Longrightarrow>
(jointfun n f m g) \<in> {l. l \<le> (Suc (n + m))} \<rightarrow> (A \<union> B)"
by (simp add:jointfun_def sliden_def Pi_def)
lemma jointfun_mem:"\<lbrakk>\<forall>j \<le> (n::nat). f j \<in> A; \<forall>j \<le> m. g j \<in> B;
l \<le> (Suc (n + m))\<rbrakk> \<Longrightarrow> (jointfun n f m g) l \<in> (A \<union> B)"
apply (rule funcset_mem[of "jointfun n f m g" "{j. j \<le> Suc (n + m)}" "A \<union> B"
l])
apply (rule jointfun_hom0)
apply simp+
done
lemma jointfun_inj:"\<lbrakk>f \<in> {j. j \<le> n} \<rightarrow> B; inj_on f {j. j \<le> n};
b \<notin> f ` {j. j \<le> n}\<rbrakk> \<Longrightarrow>
inj_on (jointfun n f 0 (\<lambda>k\<in>{0::nat}. b)) {j. j \<le> Suc n}"
apply (simp add:inj_on_def, (rule allI, rule impI)+, rule impI)
apply (case_tac "x = Suc n", simp)
apply (case_tac "y = Suc n", simp)
apply (frule_tac m = y and n = "Suc n" in noteq_le_less, assumption)
apply (
frule_tac x = y and n = "Suc n" in less_le_diff,
thin_tac "y < Suc n", thin_tac "y \<le> Suc n",
simp add:jointfun_def sliden_def)
apply (case_tac "y = Suc n", simp,
frule_tac m = x and n = "Suc n" in noteq_le_less, assumption,
frule_tac x = x and n = "Suc n" in less_le_diff,
thin_tac "x < Suc n", thin_tac "x \<le> Suc n",
simp add:jointfun_def sliden_def)
apply (rotate_tac -3, frule sym, thin_tac " f x = b", simp)
apply (frule_tac m = x and n = "Suc n" in noteq_le_less, assumption,
frule_tac x = x and n = "Suc n" in less_le_diff,
thin_tac "x < Suc n", thin_tac "x \<le> Suc n", simp,
frule_tac m = y and n = "Suc n" in noteq_le_less, assumption,
frule_tac x = y and n = "Suc n" in less_le_diff,
thin_tac "y < Suc n", thin_tac "y \<le> Suc n", simp,
simp add:jointfun_def)
done
lemma slide_hom:"i \<le> j \<Longrightarrow> (slide i) \<in> {l. l \<le> (j - i)} \<rightarrow> nset i j"
apply (simp add:Pi_def restrict_def)
apply (rule allI) apply (rule impI)
apply (simp add:slide_def)
apply (simp add:nset_def)
done
lemma slide_mem:"\<lbrakk> i \<le> j; l \<in> {k. k \<le> (j - i)}\<rbrakk> \<Longrightarrow> slide i l \<in> nset i j"
apply (frule slide_hom)
apply (rule funcset_mem, assumption+)
done
lemma slide_iM:"(slide i) ` {l. 0 \<le> l} = {k. i \<le> k}"
apply (simp add:image_def slide_def)
apply (rule equalityI)
apply (rule subsetI)
apply simp
apply auto
apply (rule le_imp_add_int)
apply assumption
done
lemma jointfun_hom:"\<lbrakk> f \<in> {i. i \<le> n} \<rightarrow> A; g \<in> {j. j \<le> m} \<rightarrow> B \<rbrakk> \<Longrightarrow>
(jointfun n f m g) \<in> {j. j \<le> (Suc (n + m))} \<rightarrow> A \<union> B"
by (simp add:sliden_def Pi_def jointfun_def)
lemma im_jointfunTr1:"(jointfun n f m g) ` {i. i \<le> n} = f ` {i. i \<le> n}"
apply auto
apply (simp add:jointfun_def)
apply (simp add:jointfun_def)
done
lemma im_jointfunTr2:"(jointfun n f m g) ` (nset (Suc n) (Suc (n + m))) =
g ` ({j. j \<le> m})"
apply auto
apply (simp add:nset_def) apply auto
apply (frule_tac m = xa and n = "Suc (n + m)" and l = "Suc n" in diff_le_mono)
apply simp
apply (simp add:jointfun_def sliden_def)
apply (simp add:image_def)
apply (cut_tac le_add1[of "n" "m"],
simp only:Suc_le_mono[THEN sym, of "n" "n+m"])
apply (frule_tac l = xa in slide_mem[of "Suc n" "Suc (n + m)"])
apply simp
apply (subst jointfun_def)
apply (subgoal_tac "\<forall>i\<in>nset (Suc n) (Suc (n+m)). \<not> (i \<le> n) ")
apply simp
apply (thin_tac "\<forall>i\<in>nset (Suc n) (Suc (n + m)). \<not> i \<le> n")
apply (subgoal_tac "g xa = g (sliden (Suc n) (slide (Suc n) xa))")
apply blast
apply (simp add:slide_def sliden_def)
apply (auto simp add: nset_def)
done
lemma im_jointfun:"\<lbrakk>f \<in> {j. j \<le> n} \<rightarrow> A; g \<in> {j. j \<le> m} \<rightarrow> B\<rbrakk> \<Longrightarrow>
(jointfun n f m g) `({j. j \<le> (Suc (n + m))}) =
f `{j. j \<le> n} \<union> g `{j. j \<le> m}"
apply (cut_tac im_set_un1 [of "{j. j \<le> (Suc (n + m))}" "jointfun n f m g"
"A \<union> B" "{i. i \<le> n}" "nset (Suc n) (Suc (n + m))"])
apply (simp add:Nset_nset[THEN sym, of n m],
simp add:im_jointfunTr1[of n f m g],
simp add:im_jointfunTr2[of n f m g])
apply (rule ballI)
apply (simp add:jointfun_def,
case_tac "l \<le> n", simp add:Pi_def,
simp add:sliden_def,
simp add:nat_not_le_less,
frule_tac m = n and n = l in Suc_leI,
frule_tac m = l and n = "Suc (n + m)" and l = "Suc n" in diff_le_mono,
thin_tac "l \<le> Suc (n + m)", simp,
simp add:Pi_def)
apply (simp add:Nset_nset[of n m])
done
lemma im_jointfun1:"(jointfun n f m g) `({j. j \<le> (Suc (n + m))}) =
f `{j. j \<le> n} \<union> g ` {j. j \<le> m}"
apply (cut_tac Nset_nset[of "n" "m"])
apply (subst im_set_un2[of "{j. j \<le> (Suc (n + m))}" "{j. j \<le> n}"
"nset (Suc n) (Suc (n + m))" "jointfun n f m g"], assumption)
apply (simp add:im_jointfunTr1 im_jointfunTr2)
done
lemma jointfun_surj:"\<lbrakk>f \<in> {j. j \<le> n} \<rightarrow> A; surj_to f {j. j \<le> (n::nat)} A;
g \<in> {j. j \<le> (m::nat)} \<rightarrow> B; surj_to g {j. j \<le> m} B\<rbrakk> \<Longrightarrow>
surj_to (jointfun n f m g) {j. j \<le> Suc (n + m)} (A \<union> B)"
apply (simp add:surj_to_def [of "jointfun n f m g"])
apply (simp add:im_jointfun)
apply (simp add:surj_to_def)
done
lemma Nset_un:"{j. j \<le> (Suc n)} = {j. j \<le> n} \<union> {Suc n}"
apply (rule equalityI)
apply (rule subsetI)
apply simp
apply auto
done
lemma Nsetn_sub: "{j. j \<le> n} \<subseteq> {j. j \<le> (Suc n)}"
apply (rule subsetI)
apply simp
done
lemma Nset_pre_sub:"(0::nat) < k \<Longrightarrow> {j. j \<le> (k - Suc 0)} \<subseteq> {j. j \<le> k}"
apply (rule subsetI)
apply simp
done
lemma Nset_pre_un:"(0::nat) < k \<Longrightarrow> {j. j \<le> k} = {j. j \<le> (k - Suc 0)} \<union> {k}"
apply (insert Nset_un [of "k - Suc 0"])
apply simp
done
lemma Nsetn_sub_mem:" l \<in> {j. j \<le> n} \<Longrightarrow> l \<in> {j. j \<le> (Suc n)}"
apply simp
done
lemma Nsetn_sub_mem1:"\<forall>j. j \<in> {j. j \<le> n} \<longrightarrow> j \<in> {j. j \<le> (Suc n)}"
by (simp add:Nsetn_sub_mem)
lemma Nset_Suc:"{j. j \<le> (Suc n)} = insert (Suc n) {j. j \<le> n}"
apply (rule equalityI)
apply (rule subsetI)
apply simp
apply auto
done
lemma nsetnm_sub_mem:"\<forall>j. j \<in>nset n (n + m) \<longrightarrow> j \<in> nset n (Suc (n + m))"
by (rule allI, simp add:nset_def)
lemma Nset_0:"{j. j \<le> (0::nat)} = {0}"
by simp
lemma Nset_Suc0:"{i. i \<le> (Suc 0)} = {0, Suc 0}"
apply (rule equalityI)
apply (rule subsetI, simp)
apply (case_tac "x = 0", simp)
apply simp+
done
lemma Nset_Suc_Suc:"Suc (Suc 0) \<le> n \<Longrightarrow>
{j. j \<le> (n - Suc (Suc 0))} = {j. j \<le> n} - {n - Suc 0, n}"
apply (insert Nset_un [of "n - (Suc 0)"])
apply (insert Nset_un [of "n - Suc (Suc 0)"])
apply (subgoal_tac "{j. j \<le> (Suc (n - Suc (Suc 0)))} = {j. j \<le> (n - Suc 0)}")
apply (simp,
thin_tac "{j. j \<le> n} =
insert n (insert (Suc (n - Suc (Suc 0))) {j. j \<le> n - Suc (Suc 0)})",
thin_tac " {j. j \<le> n - Suc 0} =
insert (Suc (n - Suc (Suc 0))) {j. j \<le> n - Suc (Suc 0)}",
thin_tac "{j. j \<le> Suc (n - Suc (Suc 0))} =
insert (Suc (n - Suc (Suc 0))) {j. j \<le> n - Suc (Suc 0)}")
apply (simp add:Suc_Suc_Tr)
apply (auto )
done
lemma image_Nset_Suc:"f ` ({j. j \<le> (Suc n)}) =
insert (f (Suc n)) (f ` {j. j \<le> n})"
apply (cut_tac Nset_un[of "n"])
apply (frule im_set_un2[of "{j. j \<le> (Suc n)}" "{j. j \<le> n}" "{Suc n}" "f"])
apply (simp add:Un_commute)
done
definition
Nleast :: "nat set \<Rightarrow> nat" where
"Nleast A = (THE a. (a \<in> A \<and> (\<forall>x\<in>A. a \<le> x)))"
definition
Nlb :: "[nat set, nat] \<Rightarrow> bool" where
"Nlb A n \<longleftrightarrow> (\<forall>a\<in>A. n \<le> a)"
primrec ndec_seq :: "[nat set, nat, nat] \<Rightarrow> nat" where
ndec_seq_0 :"ndec_seq A a 0 = a"
| ndec_seq_Suc:"ndec_seq A a (Suc n) =
(SOME b. ((b \<in> A) \<and> b < (ndec_seq A a n)))"
lemma ndec_seq_mem:"\<lbrakk>a \<in> (A::nat set); \<not> (\<exists>m. m\<in>A \<and> (\<forall>x\<in>A. m \<le> x))\<rbrakk> \<Longrightarrow>
(ndec_seq A a n) \<in> A"
apply (induct_tac n)
apply simp apply simp
apply (simp add: not_less [symmetric])
apply (subgoal_tac "\<exists>x\<in>A. x < (ndec_seq A a n)") prefer 2 apply blast
apply (thin_tac "\<forall>m. m \<in> A \<longrightarrow> (\<exists>x\<in>A. x < m)")
apply (rule someI2_ex) apply blast
apply simp
done
lemma ndec_seqn:"\<lbrakk>a \<in> (A::nat set);\<not> (\<exists>m. m\<in>A \<and> (\<forall>x\<in>A. m \<le> x))\<rbrakk> \<Longrightarrow>
(ndec_seq A a (Suc n)) < (ndec_seq A a n)"
apply (frule ndec_seq_mem [of "a" "A" "n"], assumption+)
apply simp
apply (simp add: not_less [symmetric])
apply (subgoal_tac "\<exists>x\<in>A. x < (ndec_seq A a n)") prefer 2 apply simp
apply (thin_tac "\<forall>m. m \<in> A \<longrightarrow> (\<exists>x\<in>A. x < m)")
apply (rule someI2_ex) apply blast
apply simp
done
lemma ndec_seqn1:"\<lbrakk>a \<in> (A::nat set); \<not> (\<exists>m. m\<in>A \<and> (\<forall>x\<in>A. m \<le> x))\<rbrakk> \<Longrightarrow>
(ndec_seq A a (Suc n)) \<le> (ndec_seq A a n) - 1"
apply (frule ndec_seqn [of "a" "A" "n"], assumption+,
thin_tac "\<not> (\<exists>m. m \<in> A \<and> (\<forall>x\<in>A. m \<le> x))")
apply (simp del:ndec_seq_Suc)
done
lemma ex_NleastTr:"\<lbrakk>a \<in> (A::nat set); \<not> (\<exists>m. m\<in>A \<and> (\<forall>x\<in>A. m \<le> x))\<rbrakk> \<Longrightarrow>
(ndec_seq A a n) \<le> (a - n)"
apply (induct_tac n)
apply simp
apply (frule_tac n = n in ndec_seqn1[of "a" "A"], assumption+)
apply (subgoal_tac "ndec_seq A a n - 1 \<le> (a - n) - 1") prefer 2
apply arith
apply arith
done
lemma nat_le:"((a::nat) - (a + 1)) \<le> 0"
apply arith
done
lemma ex_Nleast:"(A::nat set) \<noteq> {} \<Longrightarrow> \<exists>!m. m\<in>A \<and> (\<forall>x\<in>A. m \<le> x)"
apply (frule nonempty_ex[of "A"], thin_tac "A \<noteq> {}",
erule exE, rename_tac a)
apply (case_tac "0 \<in> A")
apply (rule ex_ex1I, subgoal_tac "\<forall>x\<in>A. 0 \<le> a", blast,
rule ballI, simp)
apply ((erule conjE)+,
subgoal_tac "m \<le> 0", thin_tac "\<forall>x\<in>A. m \<le> x",
subgoal_tac "y \<le> 0", thin_tac "\<forall>x\<in>A. y \<le> x",
simp, blast, blast)
apply (rule ex_ex1I)
prefer 2 apply (erule conjE)+
apply (subgoal_tac "m \<le> y", thin_tac "\<forall>x\<in>A. m \<le> x",
subgoal_tac "y \<le> m", thin_tac "\<forall>x\<in>A. y \<le> x",
simp, blast, blast)
apply (rule contrapos_pp, simp,
frule_tac a = a and A = A and n = "a + 1" in ex_NleastTr, assumption+)
apply (subgoal_tac "(a - (a + 1)) \<le> 0")
prefer 2 apply (rule nat_le)
apply (frule_tac i = "ndec_seq A a (a + 1)" and j = "a - (a + 1)" and k = 0 in le_trans, assumption+,
frule_tac a = a and n = "a + 1" in ndec_seq_mem [of _ "A"],
assumption+)
apply (thin_tac "\<not> (\<exists>m. m \<in> A \<and> (\<forall>x\<in>A. m \<le> x))",
thin_tac "ndec_seq A a (a + 1) \<le> a - (a + 1)",
thin_tac "a - (a + 1) \<le> 0")
apply simp
done
lemma Nleast:"(A::nat set) \<noteq> {} \<Longrightarrow> Nleast A \<in> A \<and> (\<forall>x\<in>A. (Nleast A) \<le> x)"
apply (frule ex_Nleast [of "A"])
apply (simp add:Nleast_def)
apply (rule theI')
apply simp
done
subsection "Lemmas for existence of reduced chain."
(* Later some of these lemmas should be removed. *)
lemma jointgd_tool1:" 0 < i \<Longrightarrow> 0 \<le> i - Suc 0"
by arith
lemma jointgd_tool2:" 0 < i \<Longrightarrow> i = Suc (i - Suc 0)"
by arith
lemma jointgd_tool3:"\<lbrakk>0 < i; i \<le> m\<rbrakk> \<Longrightarrow> i - Suc 0 \<le> (m - Suc 0)"
by arith
lemma jointgd_tool4:"n < i \<Longrightarrow> i - n = Suc( i - Suc n)"
by arith
lemma pos_prec_less:"0 < i \<Longrightarrow> i - Suc 0 < i"
by arith
lemma Un_less_Un:"\<lbrakk>f \<in> {j. j \<le> (Suc n)} \<rightarrow> (X::'a set set);
A \<subseteq> \<Union>(f ` {j. j \<le> (Suc n)});
i \<in> {j. j \<le> (Suc n)}; j \<in> {l. l \<le> (Suc n)}; i \<noteq> j \<and> f i \<subseteq> f j\<rbrakk>
\<Longrightarrow> A \<subseteq> \<Union>(compose {j. j \<le> n} f (skip i) ` {j. j \<le> n})"
apply (simp add:compose_def)
apply (rule subsetI, simp)
apply (frule_tac c = x and A = A and B = "\<Union>x\<in>{j. j \<le> Suc n}. f x" in
subsetD, assumption+, simp)
apply (erule exE, (erule conjE)+)
apply (case_tac "xa = i", simp,
frule_tac c = x in subsetD[of "f i" "f j"], assumption+)
apply (cut_tac less_linear[of i j], simp, erule disjE,
frule less_le_diff[of i j],
cut_tac skip_im_Tr2_1[of i n "j - Suc 0"],
simp,
frule eq_elems_eq_val[THEN sym, of "skip i (j - Suc 0)" j f],
cut_tac a = x in eq_set_inc[of _ "f j" "f (skip i (j - Suc 0))"],
assumption+,
frule le_Suc_diff_le[of j n], blast, simp, assumption, simp)
apply (frule skip_im_Tr1_2[of i n j], assumption,
frule eq_elems_eq_val[THEN sym, of "skip i j" j f])
apply (cut_tac a = x in eq_set_inc[of _ "f j" "f (skip i j)"],
assumption+)
apply (frule_tac x = j and y = i and z = "Suc n" in less_le_trans,
assumption+,
frule Suc_less_le[of j n], blast)
apply (cut_tac x = xa and y = i in less_linear, simp,
erule disjE,
frule_tac x = xa in skip_im_Tr1_2[of i n], assumption)
apply (frule_tac x1 = "skip i xa" and y1 = xa and f1 = f in
eq_elems_eq_val[THEN sym],
frule_tac a = x and A = "f xa" and B = "f (skip i xa)" in eq_set_inc,
assumption,
frule_tac x = xa and y = i and z = "Suc n" in less_le_trans,
assumption+,
frule_tac x = xa and n = n in Suc_less_le, blast)
apply (frule_tac x = i and n = xa in less_le_diff,
cut_tac x = "xa - Suc 0" and n = n in skip_im_Tr2_1 [of i],
simp, assumption,
simp,
frule_tac x1 = "skip i (xa - Suc 0)" and y1 = xa and f1 = f in
eq_elems_eq_val[THEN sym],
frule_tac a = x and A = "f xa" and B = "f (skip i (xa - Suc 0))" in
eq_set_inc, assumption,
frule_tac x = xa and n = n in le_Suc_diff_le)
apply blast
done
section "Lower bounded set of integers"
(* In this section. I prove that a lower bounded set of integers
has the minimal element *)
definition "Zset = {x. \<exists>(n::int). x = n}"
definition
Zleast :: "int set \<Rightarrow> int" where
"Zleast A = (THE a. (a \<in> A \<and> (\<forall>x\<in>A. a \<le> x)))"
definition
LB :: "[int set, int] \<Rightarrow> bool" where
"LB A n = (\<forall>a\<in>A. n \<le> a)"
lemma linorder_linear1:"(m::int) < n \<or> n \<le> m"
apply (subgoal_tac "m < n \<or> n = m \<or> n < m")
apply (case_tac "m < n") apply simp apply simp
apply (subgoal_tac "m < n \<or> m = n \<or> n < m")
apply blast
apply (simp add:less_linear)
done
primrec dec_seq :: "[int set, int, nat] \<Rightarrow> int"
where
dec_seq_0: "dec_seq A a 0 = a"
| dec_seq_Suc: "dec_seq A a (Suc n) = (SOME b. ((b \<in> A) \<and> b < (dec_seq A a n)))"
lemma dec_seq_mem:"\<lbrakk>a \<in> A; A \<subseteq> Zset;\<not> (\<exists>m. m\<in>A \<and> (\<forall>x\<in>A. m \<le> x))\<rbrakk> \<Longrightarrow>
(dec_seq A a n) \<in> A"
apply (induct_tac n)
apply simp apply simp apply (simp add:not_zle)
apply (subgoal_tac "\<exists>x\<in>A. x < (dec_seq A a n)") prefer 2 apply blast
apply (thin_tac "\<forall>m. m \<in> A \<longrightarrow> (\<exists>x\<in>A. x < m)")
apply (rule someI2_ex) apply blast
apply simp
done
lemma dec_seqn:"\<lbrakk>a \<in> A; A \<subseteq> Zset;\<not> (\<exists>m. m\<in>A \<and> (\<forall>x\<in>A. m \<le> x))\<rbrakk> \<Longrightarrow>
(dec_seq A a (Suc n)) < (dec_seq A a n)"
apply simp
apply (frule dec_seq_mem [of "a" "A" "n"], assumption+)
apply simp
apply (simp add:not_zle)
apply (subgoal_tac "\<exists>x\<in>A. x < (dec_seq A a n)") prefer 2 apply simp
apply (thin_tac "\<forall>m. m \<in> A \<longrightarrow> (\<exists>x\<in>A. x < m)")
apply (rule someI2_ex) apply blast
apply simp
done
lemma dec_seqn1:"\<lbrakk>a \<in> A; A \<subseteq> Zset;\<not> (\<exists>m. m\<in>A \<and> (\<forall>x\<in>A. m \<le> x))\<rbrakk> \<Longrightarrow>
(dec_seq A a (Suc n)) \<le> (dec_seq A a n) - 1"
apply (frule dec_seqn [of "a" "A" "n"], assumption+)
apply simp
done
lemma lbs_ex_ZleastTr:"\<lbrakk>a \<in> A; A \<subseteq> Zset;\<not> (\<exists>m. m\<in>A \<and> (\<forall>x\<in>A. m \<le> x))\<rbrakk> \<Longrightarrow>
(dec_seq A a n) \<le> (a - int(n))"
apply (induct_tac n)
apply simp
apply (frule_tac n = n in dec_seqn1[of "a" "A"], assumption+)
apply (subgoal_tac "dec_seq A a n - 1 \<le> a - (int n) - 1") prefer 2
apply simp apply (thin_tac "dec_seq A a n \<le> a - int n")
apply (frule_tac x = "dec_seq A a (Suc n)" and y = "dec_seq A a n - 1" and
z = "a - int n - 1" in order_trans, assumption+)
apply (thin_tac "\<not> (\<exists>m. m \<in> A \<and> (\<forall>x\<in>A. m \<le> x))")
apply (thin_tac "dec_seq A a (Suc n) \<le> dec_seq A a n - 1")
apply (thin_tac "dec_seq A a n - 1 \<le> a - int n - 1")
apply (subgoal_tac "a - int n - 1 = a - int (Suc n)") apply simp
apply (thin_tac "dec_seq A a (Suc n) \<le> a - int n - 1")
apply simp
done
lemma big_int_less:"a - int(nat(abs(a) + abs(N) + 1)) < N"
apply (simp add:zabs_def)
done
lemma lbs_ex_Zleast:"\<lbrakk>A \<noteq> {}; A \<subseteq> Zset; LB A n\<rbrakk> \<Longrightarrow> \<exists>!m. m\<in>A \<and> (\<forall>x\<in>A. m \<le> x)"
apply (frule nonempty_ex[of "A"])
apply (thin_tac "A \<noteq> {}")
apply (erule exE)
apply (rename_tac a)
apply (rule ex_ex1I)
prefer 2
apply (thin_tac "LB A n") apply (erule conjE)+
apply (subgoal_tac "m \<le> y") prefer 2 apply simp
apply (subgoal_tac "y \<le> m") prefer 2 apply simp
apply (thin_tac "\<forall>x\<in>A. m \<le> x") apply (thin_tac "\<forall>x\<in>A. y \<le> x")
apply simp
apply (rule contrapos_pp) apply simp
apply (frule_tac a = a and A = A and n = "nat(abs(a) + abs(n) + 1)" in lbs_ex_ZleastTr, assumption+)
apply (subgoal_tac "a - int(nat(abs(a) + abs(n) + 1)) < n")
prefer 2 apply (rule big_int_less)
apply (frule_tac x = "dec_seq A a (nat (\<bar>a\<bar> + \<bar>n\<bar> + 1))" and y = "a - int (nat (\<bar>a\<bar> + \<bar>n\<bar> + 1))" and z = n in order_le_less_trans, assumption+)
apply (frule_tac a = a and n = "nat (\<bar>a\<bar> + \<bar>n\<bar> + 1)" in dec_seq_mem [of _ "A"], assumption+)
apply (thin_tac "\<not> (\<exists>m. m \<in> A \<and> (\<forall>x\<in>A. m \<le> x))")
apply (thin_tac "dec_seq A a (nat (\<bar>a\<bar> + \<bar>n\<bar> + 1))
\<le> a - int (nat (\<bar>a\<bar> + \<bar>n\<bar> + 1))")
apply (thin_tac "a - int (nat (\<bar>a\<bar> + \<bar>n\<bar> + 1)) < n")
apply (simp add:LB_def)
apply (subgoal_tac "n \<le> dec_seq A a (nat (\<bar>a\<bar> + \<bar>n\<bar> + 1))")
apply (thin_tac "\<forall>a\<in>A. n \<le> a") apply (simp add:not_zle)
apply blast
done
lemma Zleast:"\<lbrakk>A \<noteq> {}; A \<subseteq> Zset; LB A n\<rbrakk> \<Longrightarrow> Zleast A \<in> A \<and>
(\<forall>x\<in>A. (Zleast A) \<le> x)"
apply (frule lbs_ex_Zleast [of "A" "n"], assumption+)
apply (simp add:Zleast_def)
apply (rule theI')
apply simp
done
lemma less_convert1:"\<lbrakk> a = c; a < b \<rbrakk> \<Longrightarrow> c < b"
apply auto
done
lemma less_convert2:"\<lbrakk>a = b; b < c\<rbrakk> \<Longrightarrow> a < c"
apply auto
done
section \<open>Augmented integer: integer and \<open>\<infinity>-\<infinity>\<close>\<close>
definition
zag :: "(int * int) set" where
"zag = {(x,y) | x y. x * y = (0::int) \<and> (y = -1 \<or> y = 0 \<or> y = 1)}"
definition
zag_pl::"[(int * int), (int * int)] \<Rightarrow> (int * int)" where
"zag_pl x y == if (snd x + snd y) = 2 then (0, 1)
else if (snd x + snd y) = 1 then (0, 1)
else if (snd x + snd y) = 0 then (fst x + fst y, 0)
else if (snd x + snd y) = -1 then (0, -1)
else if (snd x + snd y) = -2 then (0, -1) else undefined"
definition
zag_t :: "[(int * int), (int * int)] \<Rightarrow> (int * int)" where
"zag_t x y = (if (snd x)*(snd y) = 0 then
(if 0 < (fst x)*(snd y) + (snd x)*(fst y) then (0,1)
else (if (fst x)*(snd y) + (snd x)*(fst y) = 0
then ((fst x)*(fst y), 0) else (0, -1)))
else (if 0 < (snd x)*(snd y) then (0, 1) else (0, -1)))"
definition "Ainteg = zag"
typedef ant = Ainteg
morphisms Rep_Ainteg Abs_Ainteg
unfolding Ainteg_def
proof
show "(1, 0) \<in> zag" unfolding zag_def by auto
qed
definition
ant :: "int \<Rightarrow> ant" where
"ant m = Abs_Ainteg( (m, 0))"
definition
tna :: "ant \<Rightarrow> int" where
"tna z = (if Rep_Ainteg(z) \<noteq> (0,1) \<and> Rep_Ainteg(z) \<noteq> (0,-1) then
fst (Rep_Ainteg(z)) else undefined)"
instantiation ant :: "{zero, one, plus, uminus, minus, times, ord}"
begin
definition
Zero_ant_def : "0 == ant 0"
definition
One_ant_def : "1 == ant 1"
definition
add_ant_def:
"z + w ==
Abs_Ainteg (zag_pl (Rep_Ainteg z) (Rep_Ainteg w))"
definition
minus_ant_def : "- z ==
Abs_Ainteg((- (fst (Rep_Ainteg z)), - (snd (Rep_Ainteg z))))"
definition
diff_ant_def: "z - (w::ant) == z + (-w)"
definition
mult_ant_def:
"z * w ==
Abs_Ainteg (zag_t (Rep_Ainteg z) (Rep_Ainteg w))"
definition
le_ant_def:
"(z::ant) \<le> w == if (snd (Rep_Ainteg w)) = 1 then True
else (if (snd (Rep_Ainteg w)) = 0 then (if (snd (Rep_Ainteg z)) = 1
then False else (if (snd (Rep_Ainteg z)) = 0 then
(fst (Rep_Ainteg z)) \<le> (fst (Rep_Ainteg w)) else True))
else (if snd (Rep_Ainteg z) = -1 then True else False))"
definition
less_ant_def: "((z::ant) < (w::ant)) == (z \<le> w \<and> z \<noteq> w)"
instance ..
end
definition
inf_ant :: ant ("\<infinity>") where
"\<infinity> = Abs_Ainteg((0,1))"
definition
an :: "nat \<Rightarrow> ant" where
"an m = ant (int m)"
definition
na :: "ant \<Rightarrow> nat" where
"na x = (if (x < 0) then 0 else
if x \<noteq> \<infinity> then (nat (tna x)) else undefined)"
definition
UBset :: "ant \<Rightarrow> ant set" where
"UBset z = {(x::ant). x \<le> z}"
definition
LBset :: "ant \<Rightarrow> ant set" where
"LBset z = {(x::ant). z \<le> x}"
lemma ant_z_in_Ainteg:"(z::int, 0) \<in> Ainteg"
apply (simp add:Ainteg_def zag_def)
done
lemma ant_inf_in_Ainteg:"((0::int), 1) \<in> Ainteg"
apply (simp add:Ainteg_def zag_def)
done
lemma ant_minf_in_Ainteg:"((0::int), -1) \<in> Ainteg"
apply (simp add:Ainteg_def zag_def)
done
lemma ant_0_in_Ainteg:"((0::int), 0) \<in> Ainteg"
apply (simp add:Ainteg_def zag_def)
done
lemma an_0[simp]:"an 0 = 0"
by (simp add:an_def Zero_ant_def)
lemma an_1[simp]:"an 1 = 1"
by (simp add:an_def One_ant_def)
lemma mem_ant:"(a::ant) = -\<infinity> \<or> (\<exists>(z::int). a = ant z) \<or> a = \<infinity>"
apply (case_tac "a = -\<infinity> \<or> a = \<infinity>")
apply blast
apply (simp, simp add:ant_def,
cut_tac Rep_Ainteg[of "a"],
simp add:Ainteg_def zag_def,
erule conjE, simp add:inf_ant_def,
simp add:minus_ant_def,
cut_tac ant_inf_in_Ainteg,
simp add:Abs_Ainteg_inverse)
apply auto
apply (cut_tac Rep_Ainteg[of "a"],
subgoal_tac "Abs_Ainteg (Rep_Ainteg a) = Abs_Ainteg ((0,-1))",
thin_tac "Rep_Ainteg a = (0, -1)",
simp add:Rep_Ainteg_inverse, simp)
apply (cut_tac Rep_Ainteg[of "a"],
subgoal_tac "Abs_Ainteg (Rep_Ainteg a) = Abs_Ainteg ((0,0))",
thin_tac "Rep_Ainteg a = (0, 0)",
simp add:Rep_Ainteg_inverse, blast, simp)
apply (cut_tac Rep_Ainteg[of "a"],
subgoal_tac "Abs_Ainteg (Rep_Ainteg a) = Abs_Ainteg ((0,1))",
thin_tac "Rep_Ainteg a = (0, 1)",
simp add:Rep_Ainteg_inverse, simp)
apply (cut_tac Rep_Ainteg[of "a"],
subgoal_tac "Abs_Ainteg (Rep_Ainteg a) = Abs_Ainteg ((x,0))",
thin_tac "Rep_Ainteg a = (x, 0)",
simp add:Rep_Ainteg_inverse, blast, simp)
done
lemma minf:"-\<infinity> = Abs_Ainteg((0,-1))"
apply (simp add:inf_ant_def minus_ant_def,
cut_tac ant_inf_in_Ainteg,
simp add:Abs_Ainteg_inverse)
done
lemma z_neq_inf[simp]:"(ant z) \<noteq> \<infinity> "
apply (rule contrapos_pp, simp+)
apply (simp add:ant_def inf_ant_def)
apply (subgoal_tac "Rep_Ainteg (Abs_Ainteg (z,0)) =
Rep_Ainteg (Abs_Ainteg (0,1))",
thin_tac "Abs_Ainteg (z, 0) = Abs_Ainteg (0, 1)",
cut_tac ant_z_in_Ainteg[of "z"],
cut_tac ant_inf_in_Ainteg,
simp add:Abs_Ainteg_inverse)
apply simp
done
lemma z_neq_minf[simp]:"(ant z) \<noteq> -\<infinity>"
apply (rule contrapos_pp, simp+)
apply (subgoal_tac "ant (-z) = \<infinity>")
apply (cut_tac z_neq_inf[of "- z"], simp)
apply (simp add:ant_def inf_ant_def minus_ant_def)
apply (cut_tac ant_inf_in_Ainteg,
simp add:Abs_Ainteg_inverse)
apply (subgoal_tac "- Abs_Ainteg (z, 0) = - Abs_Ainteg (0, -1)",
thin_tac "Abs_Ainteg (z, 0) = Abs_Ainteg (0, -1)",
simp add:minus_ant_def,
cut_tac ant_z_in_Ainteg[of "z"],
cut_tac ant_inf_in_Ainteg,
cut_tac ant_minf_in_Ainteg,
simp add:Abs_Ainteg_inverse)
apply simp
done
lemma minf_neq_inf[simp]:"-\<infinity> \<noteq> \<infinity>"
apply (cut_tac ant_inf_in_Ainteg,
simp add:inf_ant_def minus_ant_def Abs_Ainteg_inverse)
apply (rule contrapos_pp, simp+,
subgoal_tac "Rep_Ainteg (Abs_Ainteg (0,-1)) =
Rep_Ainteg (Abs_Ainteg (0,1))",
thin_tac "Abs_Ainteg (0, -1) = Abs_Ainteg (0, 1)",
cut_tac ant_minf_in_Ainteg,
simp add:Abs_Ainteg_inverse)
apply simp
done
lemma a_ipi[simp]:"\<infinity> + \<infinity> = \<infinity>"
apply (simp add:add_ant_def inf_ant_def,
cut_tac ant_inf_in_Ainteg,
simp add:Abs_Ainteg_inverse,
simp add:zag_pl_def)
done
lemma a_zpi[simp]:"(ant z) + \<infinity> = \<infinity>"
apply (simp add:add_ant_def inf_ant_def ant_def,
cut_tac ant_z_in_Ainteg[of "z"],
cut_tac ant_inf_in_Ainteg,
simp add:Abs_Ainteg_inverse,
simp add:zag_pl_def)
done
lemma a_ipz[simp]:" \<infinity> + (ant z) = \<infinity>"
apply (simp add:add_ant_def inf_ant_def ant_def,
cut_tac ant_z_in_Ainteg[of "z"],
cut_tac ant_inf_in_Ainteg,
simp add:Abs_Ainteg_inverse,
simp add:zag_pl_def)
done
lemma a_zpz:"(ant m) + (ant n) = ant (m + n)"
apply (simp add:add_ant_def inf_ant_def ant_def,
cut_tac ant_z_in_Ainteg[of "m"],
cut_tac ant_z_in_Ainteg[of "n"],
simp add:Abs_Ainteg_inverse,
simp add:zag_pl_def)
done
lemma a_mpi[simp]:"-\<infinity> + \<infinity> = 0"
apply (simp add:add_ant_def inf_ant_def,
cut_tac ant_inf_in_Ainteg,
cut_tac ant_minf_in_Ainteg,
simp add:minus_ant_def,
simp add:Abs_Ainteg_inverse,
simp add:Zero_ant_def ant_def zag_pl_def)
done
lemma a_ipm[simp]:"\<infinity> + (-\<infinity>) = 0"
apply (simp add:add_ant_def inf_ant_def,
cut_tac ant_inf_in_Ainteg,
cut_tac ant_minf_in_Ainteg,
simp add:minus_ant_def,
simp add:Abs_Ainteg_inverse,
simp add:Zero_ant_def ant_def zag_pl_def)
done
lemma a_mpm[simp]:"-\<infinity> + (-\<infinity>) = -\<infinity>"
apply (simp add:add_ant_def inf_ant_def,
cut_tac ant_inf_in_Ainteg,
cut_tac ant_minf_in_Ainteg,
simp add:minus_ant_def,
simp add:Abs_Ainteg_inverse,
simp add:Zero_ant_def ant_def zag_pl_def)
done
lemma a_mpz[simp]:"-\<infinity> + (ant m) = -\<infinity>"
apply (simp add:add_ant_def minus_ant_def inf_ant_def,
cut_tac ant_inf_in_Ainteg,
cut_tac ant_minf_in_Ainteg,
simp add:Abs_Ainteg_inverse,
simp add:ant_def,
cut_tac ant_z_in_Ainteg[of "m"],
simp add:Abs_Ainteg_inverse)
apply (simp add:zag_pl_def)
done
lemma a_zpm[simp]:"(ant m) + (-\<infinity>) = -\<infinity>"
apply (simp add:add_ant_def minus_ant_def inf_ant_def,
cut_tac ant_inf_in_Ainteg,
cut_tac ant_minf_in_Ainteg,
simp add:Abs_Ainteg_inverse,
simp add:ant_def,
cut_tac ant_z_in_Ainteg[of "m"],
simp add:Abs_Ainteg_inverse)
apply (simp add:zag_pl_def)
done
lemma a_mdi[simp]:"-\<infinity> - \<infinity> = - \<infinity>"
apply (simp add:diff_ant_def minus_ant_def inf_ant_def,
cut_tac ant_inf_in_Ainteg,
simp add:Abs_Ainteg_inverse)
apply (simp add:add_ant_def,
cut_tac ant_minf_in_Ainteg,
simp add:Abs_Ainteg_inverse, simp add:zag_pl_def)
done
lemma a_zdz:"(ant m) - (ant n) = ant (m - n)"
apply (simp add:diff_ant_def minus_ant_def ant_def,
cut_tac ant_z_in_Ainteg[of "n"],
simp add:Abs_Ainteg_inverse)
apply (simp add:add_ant_def,
cut_tac ant_z_in_Ainteg[of "m"],
cut_tac ant_z_in_Ainteg[of "-n"],
simp add:Abs_Ainteg_inverse zag_pl_def)
done
lemma a_i_i[simp]:"\<infinity> * \<infinity> = \<infinity>"
apply (simp add:mult_ant_def inf_ant_def,
cut_tac ant_inf_in_Ainteg,
simp add:Abs_Ainteg_inverse)
apply (simp add:zag_t_def)
done
lemma a_0_i[simp]:"0 * \<infinity> = 0"
by (simp add:mult_ant_def inf_ant_def Zero_ant_def, simp add:ant_def,
cut_tac ant_inf_in_Ainteg, cut_tac ant_0_in_Ainteg,
simp add:Abs_Ainteg_inverse, simp add:zag_t_def)
lemma a_i_0[simp]:"\<infinity> * 0 = 0"
by (simp add:mult_ant_def inf_ant_def Zero_ant_def, simp add:ant_def,
cut_tac ant_inf_in_Ainteg, cut_tac ant_0_in_Ainteg,
simp add:Abs_Ainteg_inverse, simp add:zag_t_def)
lemma a_0_m[simp]:"0 * (-\<infinity>) = 0"
by (simp add:mult_ant_def inf_ant_def Zero_ant_def, simp add:ant_def,
cut_tac ant_inf_in_Ainteg, cut_tac ant_0_in_Ainteg,
simp add:Abs_Ainteg_inverse, simp add:zag_t_def)
lemma a_m_0[simp]:"(-\<infinity>) * 0 = 0"
by (simp add:mult_ant_def inf_ant_def Zero_ant_def, simp add:ant_def,
cut_tac ant_inf_in_Ainteg, cut_tac ant_0_in_Ainteg,
simp add:Abs_Ainteg_inverse, simp add:zag_t_def)
lemma a_m_i[simp]:"(-\<infinity>) * \<infinity> = -\<infinity>"
by (simp add:mult_ant_def inf_ant_def minus_ant_def,
cut_tac ant_inf_in_Ainteg, cut_tac ant_minf_in_Ainteg,
simp add:Abs_Ainteg_inverse, simp add:zag_t_def)
lemma a_i_m[simp]:"\<infinity> * (-\<infinity>) = - \<infinity>"
by (simp add:mult_ant_def inf_ant_def minus_ant_def,
cut_tac ant_inf_in_Ainteg, cut_tac ant_minf_in_Ainteg,
simp add:Abs_Ainteg_inverse, simp add:zag_t_def)
lemma a_pos_i[simp]:"0 < m \<Longrightarrow> (ant m) * \<infinity> = \<infinity>"
apply (simp add:mult_ant_def inf_ant_def ant_def,
cut_tac ant_inf_in_Ainteg,
cut_tac ant_z_in_Ainteg[of "m"],
simp add:Abs_Ainteg_inverse)
apply (simp add:zag_t_def)
done
lemma a_i_pos[simp]:"0 < m \<Longrightarrow> \<infinity> * (ant m) = \<infinity>"
apply (simp add:mult_ant_def inf_ant_def ant_def,
cut_tac ant_inf_in_Ainteg,
cut_tac ant_z_in_Ainteg[of "m"],
simp add:Abs_Ainteg_inverse)
apply (simp add:zag_t_def)
done
lemma a_neg_i[simp]:"m < 0 \<Longrightarrow> (ant m) * \<infinity> = -\<infinity>"
apply (simp add:mult_ant_def inf_ant_def ant_def,
cut_tac ant_inf_in_Ainteg,
cut_tac ant_minf_in_Ainteg,
cut_tac ant_z_in_Ainteg[of "m"],
simp add:minus_ant_def,
simp add:Abs_Ainteg_inverse)
apply (simp add:zag_t_def)
done
lemma a_i_neg[simp]:"m < 0 \<Longrightarrow> \<infinity> * (ant m) = -\<infinity>"
apply (simp add:mult_ant_def inf_ant_def ant_def,
cut_tac ant_inf_in_Ainteg,
cut_tac ant_minf_in_Ainteg,
cut_tac ant_z_in_Ainteg[of "m"],
simp add:minus_ant_def,
simp add:Abs_Ainteg_inverse)
apply (simp add:zag_t_def)
done
lemma a_z_z:"(ant m) * (ant n) = ant (m*n)"
apply (simp add:mult_ant_def ant_def,
cut_tac ant_z_in_Ainteg[of "m"],
cut_tac ant_z_in_Ainteg[of "n"],
simp add:Abs_Ainteg_inverse)
apply (simp add:zag_t_def)
done
lemma a_pos_m[simp]:"0 < m \<Longrightarrow> (ant m) * (-\<infinity>) = -\<infinity>"
apply (simp add:mult_ant_def inf_ant_def minus_ant_def ant_def,
cut_tac ant_inf_in_Ainteg,
cut_tac ant_minf_in_Ainteg,
cut_tac ant_z_in_Ainteg[of "m"],
simp add:Abs_Ainteg_inverse)
apply (simp add:zag_t_def)
done
lemma a_m_pos[simp]:"0 < m \<Longrightarrow> (-\<infinity>) * (ant m) = -\<infinity>"
apply (simp add:mult_ant_def inf_ant_def minus_ant_def ant_def,
cut_tac ant_inf_in_Ainteg,
cut_tac ant_minf_in_Ainteg,
cut_tac ant_z_in_Ainteg[of "m"],
simp add:Abs_Ainteg_inverse)
apply (simp add:zag_t_def)
done
lemma a_neg_m[simp]:"m < 0 \<Longrightarrow> (ant m) * (-\<infinity>) = \<infinity>"
apply (simp add:mult_ant_def inf_ant_def minus_ant_def ant_def,
cut_tac ant_inf_in_Ainteg,
cut_tac ant_minf_in_Ainteg,
cut_tac ant_z_in_Ainteg[of "m"],
simp add:Abs_Ainteg_inverse)
apply (simp add:zag_t_def)
done
lemma neg_a_m[simp]:"m < 0 \<Longrightarrow> (-\<infinity>) * (ant m) = \<infinity>"
apply (simp add:mult_ant_def inf_ant_def minus_ant_def ant_def,
cut_tac ant_inf_in_Ainteg,
cut_tac ant_minf_in_Ainteg,
cut_tac ant_z_in_Ainteg[of "m"],
simp add:Abs_Ainteg_inverse)
apply (simp add:zag_t_def)
done
lemma a_m_m[simp]:"(-\<infinity>) * (-\<infinity>) = \<infinity>"
apply (simp add:mult_ant_def inf_ant_def minus_ant_def ant_def,
cut_tac ant_inf_in_Ainteg,
cut_tac ant_minf_in_Ainteg,
simp add:Abs_Ainteg_inverse)
apply (simp add:zag_t_def)
done
lemma inj_on_Abs_Ainteg:"inj_on Abs_Ainteg Ainteg"
apply (simp add:inj_on_def)
apply (rule ballI)+
apply (rule impI,
subgoal_tac "Rep_Ainteg (Abs_Ainteg x) = Rep_Ainteg (Abs_Ainteg y)",
thin_tac "Abs_Ainteg x = Abs_Ainteg y",
simp add:Abs_Ainteg_inverse, simp)
done
lemma an_Suc:"an (Suc n) = an n + 1"
apply (subst an_1[THEN sym])
apply (simp del:an_1 add:an_def)
apply (simp del:an_1 add:a_zpz, simp add:add.commute)
done
lemma aeq_zeq [iff]: "(ant m = ant n) = (m = n)"
apply (rule iffI)
apply (subgoal_tac "Rep_Ainteg (ant m) = Rep_Ainteg (ant n)",
thin_tac "ant m = ant n",
cut_tac ant_z_in_Ainteg[of "m"],
cut_tac ant_z_in_Ainteg[of "n"],
simp add:ant_def Abs_Ainteg_inverse)
apply simp+
done
lemma aminus:"- ant m = ant (-m)"
apply (simp add:ant_def minus_ant_def,
cut_tac ant_z_in_Ainteg[of "m"],
simp add:Abs_Ainteg_inverse)
done
lemma aminusZero:"- ant 0 = ant 0"
apply (simp add:aminus)
done
lemma ant_0: "ant 0 = (0::ant)"
by (simp add: Zero_ant_def)
lemma inf_neq_0[simp]:"\<infinity> \<noteq> 0"
apply (cut_tac z_neq_inf[of "0"], frule not_sym)
apply (simp add:ant_0)
done
lemma zero_neq_inf[simp]:"0 \<noteq> \<infinity>"
by (cut_tac inf_neq_0, frule not_sym, simp)
lemma minf_neq_0[simp]:"-\<infinity> \<noteq> 0"
apply (cut_tac z_neq_minf[of "0"], frule not_sym)
apply (simp add:ant_0)
done
lemma zero_neq_minf[simp]:"0 \<noteq> -\<infinity>"
by (cut_tac minf_neq_0, frule not_sym, simp)
lemma a_minus_zero[simp]:"-(0::ant) = 0"
by (cut_tac aminusZero, simp add:ant_0)
lemma a_minus_minus: "- (- z) = (z::ant)"
apply (cut_tac mem_ant[of "z"])
apply (erule disjE, simp add:minf, simp add: minus_ant_def,
cut_tac ant_minf_in_Ainteg,
cut_tac ant_inf_in_Ainteg,
simp add:Abs_Ainteg_inverse)
apply (erule disjE) apply (erule exE, simp add:aminus)
apply (simp add:minf, simp add: minus_ant_def,
cut_tac ant_minf_in_Ainteg,
cut_tac ant_inf_in_Ainteg,
simp add:Abs_Ainteg_inverse,
simp add:inf_ant_def)
done
lemma aminus_0: "- (- 0) = (0::ant)"
apply (simp add:a_minus_minus)
done
lemma a_a_z_0:"\<lbrakk> 0 < z; a * ant z = 0\<rbrakk> \<Longrightarrow> a = 0"
by (rule contrapos_pp, simp+, cut_tac mem_ant[of "a"], erule disjE,
simp, erule disjE, erule exE, simp add:a_z_z,
simp only:ant_0[THEN sym], simp, simp)
lemma adiv_eq:"\<lbrakk> z \<noteq> 0; a * (ant z) = b * (ant z)\<rbrakk> \<Longrightarrow> a = b"
apply (cut_tac mem_ant[of "a"], cut_tac mem_ant[of "b"],
(erule disjE)+, simp, erule disjE, erule exE,
cut_tac less_linear[of "z" "0"], erule disjE, simp add:a_z_z,
frule sym, thin_tac "\<infinity> = ant (za * z)", simp,
simp add:a_z_z, frule sym, thin_tac "- \<infinity> = ant (za * z)", simp,
cut_tac less_linear[of "z" "0"], erule disjE, simp,
simp, erule disjE, erule exE)
apply (erule disjE,
cut_tac less_linear[of "z" "0"], simp,
erule disjE, simp add:a_z_z, simp add:a_z_z,
erule disjE, erule exE, simp add:a_z_z,
cut_tac less_linear[of "z" "0"], simp,
erule disjE, simp add:a_z_z, simp add:a_z_z,
erule disjE,
cut_tac less_linear[of "z" "0"], simp,
erule disjE, simp+)
apply (erule disjE, erule exE, simp add:a_z_z,
cut_tac less_linear[of "z" "0"], simp, erule disjE, simp,
frule sym, thin_tac "- \<infinity> = ant (za * z)", simp,
simp, frule sym, thin_tac "\<infinity> = ant (za * z)", simp,
cut_tac less_linear[of "z" "0"], simp)
done
lemma aminus_add_distrib: "- (z + w) = (- z) + (- w::ant)"
apply (cut_tac mem_ant[of "z"], cut_tac mem_ant[of "w"],
(erule disjE)+, simp add:a_minus_minus,
erule disjE, erule exE, simp,
simp add:a_minus_minus aminus, simp add:a_minus_minus)
apply ((erule disjE)+, erule exE,
simp add:a_minus_minus, simp add:aminus,
simp add:a_minus_minus)
apply ((erule disjE)+, (erule exE)+, simp add:a_zpz aminus,
erule exE, simp add:aminus,
erule disjE, erule exE, simp add:aminus, simp)
done
lemma aadd_commute:"(x::ant) + y = y + x"
apply (cut_tac mem_ant[of "x"], cut_tac mem_ant[of "y"])
apply (erule disjE, erule disjE, simp,
erule disjE, erule exE, simp+,
(erule disjE)+, erule exE, simp+)
apply ((erule disjE)+, (erule exE)+, simp add:a_zpz,
erule exE, simp, erule disjE, erule exE, simp+)
done
definition
aug_inf :: "ant set" ("Z\<^sub>\<infinity>") where
"Z\<^sub>\<infinity> = {(z::ant). z \<noteq> -\<infinity> }"
definition
aug_minf :: "ant set" ("Z\<^sub>-\<^sub>\<infinity>") where
"Z\<^sub>-\<^sub>\<infinity> = {(z::ant). z \<noteq> \<infinity> }"
lemma z_in_aug_inf:"ant z \<in> Z\<^sub>\<infinity>"
apply (simp add:aug_inf_def)
done
lemma Zero_in_aug_inf:"0 \<in> Z\<^sub>\<infinity>"
by (simp only:Zero_ant_def, simp add: aug_inf_def)
lemma z_in_aug_minf:"ant z \<in> Z\<^sub>-\<^sub>\<infinity>"
by (simp add:aug_minf_def)
lemma mem_aug_minf:"a \<in> Z\<^sub>-\<^sub>\<infinity> \<Longrightarrow> a = - \<infinity> \<or> (\<exists>z. a = ant z)"
by (cut_tac mem_ant[of a], simp add:aug_minf_def)
lemma minus_an_in_aug_minf:" - an n \<in> Z\<^sub>-\<^sub>\<infinity>"
apply (simp add:an_def)
apply (simp add:aminus)
apply (simp add:z_in_aug_minf)
done
lemma Zero_in_aug_minf:"0 \<in> Z\<^sub>-\<^sub>\<infinity>"
by (simp add:Zero_ant_def aug_minf_def)
lemma aadd_assoc_i: "\<lbrakk>x \<in> Z\<^sub>\<infinity>; y \<in> Z\<^sub>\<infinity>; z \<in> Z\<^sub>\<infinity>\<rbrakk> \<Longrightarrow> (x + y) + z = x + (y + z)"
apply (cut_tac mem_ant[of "x"],
cut_tac mem_ant[of "y"],
cut_tac mem_ant[of "z"], simp add:aug_inf_def,
(erule disjE)+, (erule exE)+, (simp add:a_zpz)+,
(erule exE)+, simp add:a_zpz)
apply ((erule disjE)+, (erule exE)+, simp,
erule exE, simp,
(erule disjE)+, (erule exE)+, simp add:a_zpz,
erule exE, simp, erule disjE, erule exE, simp)
apply simp
done
lemma aadd_assoc_m: "\<lbrakk>x \<in> Z\<^sub>-\<^sub>\<infinity>; y \<in> Z\<^sub>-\<^sub>\<infinity>; z \<in> Z\<^sub>-\<^sub>\<infinity>\<rbrakk> \<Longrightarrow>
(x + y) + z = x + (y + z)"
apply (cut_tac mem_ant[of "x"],
cut_tac mem_ant[of "y"],
cut_tac mem_ant[of "z"], simp add:aug_minf_def )
apply ((erule disjE)+, simp, erule exE, simp,
erule disjE, erule exE, simp, (erule exE)+, simp add:a_zpz)
apply ((erule disjE)+, erule exE, simp, (erule exE)+, simp,
erule disjE, erule exE, simp, erule exE, simp add:a_zpz)
apply ((erule exE)+, simp add:a_zpz)
done
lemma aadd_0_r: "x + (0::ant) = x"
apply (cut_tac mem_ant[of "x"], simp add:Zero_ant_def)
apply ((erule disjE)+, simp)
apply (erule disjE, erule exE, simp add:a_zpz,
simp)
done
lemma aadd_0_l: "(0::ant) + x = x"
apply (cut_tac mem_ant[of "x"], simp add:Zero_ant_def)
apply ((erule disjE)+, simp)
apply (erule disjE, erule exE, simp, simp add:a_zpz, simp)
done
lemma aadd_minus_inv: "(- x) + x = (0::ant)" (** \<longrightarrow> aadd_minus_l **)
apply (cut_tac mem_ant[of "x"],
erule disjE, simp add:a_minus_minus,
erule disjE, erule exE, simp add:aminus, simp add:a_zpz,
simp add:Zero_ant_def, simp)
done
lemma aadd_minus_r: "x + (- x) = (0::ant)"
apply (cut_tac aadd_minus_inv[of "x"])
apply (simp add:aadd_commute)
done
lemma ant_minus_inj:"ant z \<noteq> ant w \<Longrightarrow> - ant z \<noteq> - ant w"
by (simp add:aminus)
lemma aminus_mult_minus: "(- (ant z)) * (ant w) = - ((ant z) * (ant w))"
apply (simp add:ant_def minus_ant_def,
cut_tac ant_z_in_Ainteg[of "z"],
cut_tac ant_z_in_Ainteg[of "-z"],
cut_tac ant_z_in_Ainteg[of "w"],
simp add:Abs_Ainteg_inverse)
apply (simp add:mult_ant_def) apply (simp add:Abs_Ainteg_inverse,
simp add:zag_t_def,
cut_tac ant_z_in_Ainteg[of "z * w"])
apply (simp add:Abs_Ainteg_inverse)
done
lemma amult_commute: "(x::ant) * y = y * x"
apply (cut_tac mem_ant[of "x"],
cut_tac mem_ant[of "y"])
apply (erule disjE, erule disjE, simp)
apply (erule disjE, erule exE, simp)
apply (cut_tac x = 0 and y = z in less_linear)
apply (erule disjE, simp)
apply (erule disjE, rotate_tac -1, frule sym, thin_tac "0 = z", simp)
apply (simp add:inf_ant_def ant_def, simp add:minus_ant_def,
cut_tac ant_inf_in_Ainteg,
cut_tac ant_z_in_Ainteg[of "0"],
cut_tac ant_z_in_Ainteg[of "-1"],
cut_tac ant_minf_in_Ainteg,
simp add:Abs_Ainteg_inverse)
apply (simp add:mult_ant_def, simp add:Abs_Ainteg_inverse,
simp add:zag_t_def, simp)
apply (simp add:inf_ant_def)
apply (simp add:mult_ant_def minus_ant_def,
cut_tac ant_inf_in_Ainteg,
simp add:Abs_Ainteg_inverse,
cut_tac ant_minf_in_Ainteg,
simp add:Abs_Ainteg_inverse, simp add:zag_t_def)
apply (erule disjE, erule disjE, simp)
apply (erule exE,
cut_tac x = 0 and y = z in less_linear)
apply (erule disjE, simp)
apply (erule disjE, rotate_tac -1, thin_tac "0 = z", simp add:mult_ant_def,
simp add:ant_def inf_ant_def minus_ant_def,
cut_tac ant_inf_in_Ainteg,
cut_tac z = z in ant_z_in_Ainteg,
cut_tac ant_minf_in_Ainteg,
simp add:Abs_Ainteg_inverse, simp add:zag_t_def,
simp)
apply (simp add:inf_ant_def minus_ant_def,
cut_tac ant_inf_in_Ainteg,
cut_tac z = z in ant_z_in_Ainteg,
cut_tac ant_minf_in_Ainteg,
simp add:Abs_Ainteg_inverse,
simp add:mult_ant_def,
simp add:Abs_Ainteg_inverse, simp add:zag_t_def)
apply ((erule disjE)+, (erule exE)+, simp add:a_z_z)
apply (erule exE,
cut_tac x = 0 and y = z in less_linear,
erule disjE, simp)
apply (erule disjE, rotate_tac -1, frule sym, thin_tac "0 = z", simp,
simp add:mult_ant_def ant_def inf_ant_def,
cut_tac ant_inf_in_Ainteg,
cut_tac ant_z_in_Ainteg[of "0"],
simp add:Abs_Ainteg_inverse, simp add:zag_t_def,
simp)
apply (erule disjE, erule exE,
cut_tac x = 0 and y = z in less_linear,
erule disjE, simp,
erule disjE, rotate_tac -1, frule sym, thin_tac "0 = z", simp,
simp add:mult_ant_def ant_def inf_ant_def,
cut_tac ant_inf_in_Ainteg,
cut_tac ant_z_in_Ainteg[of "0"],
simp add:Abs_Ainteg_inverse, simp add:zag_t_def,
simp+)
done
lemma z_le_i[simp]:"(ant x) \<le> \<infinity> "
apply (simp add:le_ant_def ant_def,
cut_tac ant_z_in_Ainteg[of "0"],
cut_tac ant_z_in_Ainteg[of "x"],
cut_tac ant_inf_in_Ainteg,
simp add:Abs_Ainteg_inverse,
simp add:inf_ant_def,
simp add:Abs_Ainteg_inverse)
done
lemma z_less_i[simp]:"(ant x) < \<infinity> "
apply (cut_tac z_le_i[of "x"],
cut_tac z_neq_inf[of "x"],
simp add:less_ant_def)
done
lemma m_le_z:"-\<infinity> \<le> (ant x) "
apply (simp add:le_ant_def ant_def,
cut_tac ant_z_in_Ainteg[of "0"],
cut_tac ant_z_in_Ainteg[of "x"],
cut_tac ant_minf_in_Ainteg,
cut_tac ant_inf_in_Ainteg,
simp add:Abs_Ainteg_inverse,
simp add:inf_ant_def,
simp add:minus_ant_def,
simp add:Abs_Ainteg_inverse)
done
lemma m_less_z[simp]:"-\<infinity> < (ant x)"
apply (cut_tac m_le_z[of "x"],
cut_tac z_neq_minf[of "x"],
frule not_sym, thin_tac "ant x \<noteq> - \<infinity>",
simp add:less_ant_def)
done
lemma noninf_mem_Z:"\<lbrakk>x \<in> Z\<^sub>\<infinity>; x \<noteq> \<infinity>\<rbrakk> \<Longrightarrow> \<exists>(z::int). x = ant z"
apply (simp add:aug_inf_def)
apply (cut_tac mem_ant[of "x"], simp)
done
lemma z_mem_Z:"ant z \<in> Z\<^sub>\<infinity>"
by (simp add:aug_inf_def)
lemma inf_ge_any[simp]:"x \<le> \<infinity>"
apply (cut_tac mem_ant[of "x"], erule disjE)
apply (simp add:inf_ant_def minus_ant_def,
cut_tac ant_minf_in_Ainteg,
cut_tac ant_inf_in_Ainteg,
simp add:Abs_Ainteg_inverse,
simp add:le_ant_def, simp add:Abs_Ainteg_inverse)
apply (erule disjE, erule exE, simp)
apply (simp add:inf_ant_def,
cut_tac ant_inf_in_Ainteg,
simp add:le_ant_def, simp add:Abs_Ainteg_inverse)
done
lemma zero_lt_inf:"0 < \<infinity>"
by (simp add:less_ant_def)
lemma minf_le_any[simp]:"-\<infinity> \<le> x"
apply (cut_tac mem_ant[of "x"], erule disjE)
apply (simp add:inf_ant_def minus_ant_def,
cut_tac ant_minf_in_Ainteg,
cut_tac ant_inf_in_Ainteg,
simp add:Abs_Ainteg_inverse,
simp add:le_ant_def, simp add:Abs_Ainteg_inverse)
apply (erule disjE, erule exE, simp)
apply (simp add:inf_ant_def, simp add:minus_ant_def,
cut_tac ant_inf_in_Ainteg,
cut_tac ant_minf_in_Ainteg,
simp add:le_ant_def, simp add:Abs_Ainteg_inverse)
apply simp
done
lemma minf_less_0:"-\<infinity> < 0"
by (simp add:less_ant_def)
lemma ale_antisym[simp]:"\<lbrakk>(x::ant) \<le> y; y \<le> x \<rbrakk> \<Longrightarrow> x = y"
apply (rule contrapos_pp, simp+)
apply (cut_tac mem_ant[of "x"], cut_tac mem_ant[of "y"])
apply (erule disjE, erule disjE, simp)
apply (erule disjE, erule exE, simp, simp add:ant_def,
simp add:minus_ant_def inf_ant_def,
cut_tac ant_inf_in_Ainteg,
cut_tac ant_minf_in_Ainteg,
cut_tac z = z in ant_z_in_Ainteg, simp add:Abs_Ainteg_inverse,
simp add:le_ant_def Abs_Ainteg_inverse)
apply (thin_tac "x \<le> y",
simp add:le_ant_def ant_def minus_ant_def inf_ant_def,
cut_tac ant_inf_in_Ainteg,
cut_tac ant_minf_in_Ainteg,
simp add:Abs_Ainteg_inverse)
apply (erule disjE, erule disjE, erule exE,
thin_tac "y \<le> x",
simp add:le_ant_def ant_def minus_ant_def inf_ant_def,
cut_tac ant_inf_in_Ainteg,
cut_tac ant_minf_in_Ainteg,
cut_tac z = z in ant_z_in_Ainteg, simp add:Abs_Ainteg_inverse)
apply (thin_tac "y \<le> x",
simp add:le_ant_def ant_def minus_ant_def inf_ant_def,
cut_tac ant_inf_in_Ainteg,
cut_tac ant_minf_in_Ainteg,
simp add:Abs_Ainteg_inverse)
apply ((erule disjE)+, (erule exE)+,
cut_tac z = z in ant_z_in_Ainteg,
cut_tac z = za in ant_z_in_Ainteg,
simp add:le_ant_def ant_def,
simp add:Abs_Ainteg_inverse)
apply (erule exE,
simp add:le_ant_def ant_def inf_ant_def,
cut_tac ant_inf_in_Ainteg,
cut_tac z = z in ant_z_in_Ainteg, simp add:Abs_Ainteg_inverse)
apply (erule disjE, erule exE, thin_tac "y \<le> x",
simp add:le_ant_def ant_def minus_ant_def inf_ant_def,
cut_tac ant_inf_in_Ainteg,
cut_tac ant_minf_in_Ainteg,
cut_tac z = z in ant_z_in_Ainteg, simp add:Abs_Ainteg_inverse)
apply simp
done
lemma x_gt_inf[simp]:"\<infinity> \<le> x \<Longrightarrow> x = \<infinity>"
apply (cut_tac inf_ge_any[of "x"],
rule ale_antisym[of "x" "\<infinity>"], assumption+)
done
lemma Zinf_pOp_closed:"\<lbrakk>x \<in> Z\<^sub>\<infinity>; y \<in> Z\<^sub>\<infinity>\<rbrakk> \<Longrightarrow> x + y \<in> Z\<^sub>\<infinity>"
apply (cut_tac mem_ant[of "x"], cut_tac mem_ant[of "y"],
simp add:aug_inf_def,
(erule disjE)+, (erule exE)+, simp add:a_zpz,
cut_tac z = "-(z + za)" in z_neq_inf,
rule contrapos_pp, simp+,
cut_tac m1 = "z+za" in aminus[THEN sym], simp add:a_minus_minus,
erule exE, simp, simp add:minf_neq_inf[THEN not_sym],
erule disjE, erule exE, simp,
simp add:minf_neq_inf[THEN not_sym],
simp)
done
lemma Zminf_pOp_closed:"\<lbrakk>x \<in> Z\<^sub>-\<^sub>\<infinity>; y \<in> Z\<^sub>-\<^sub>\<infinity>\<rbrakk> \<Longrightarrow> x + y \<in> Z\<^sub>-\<^sub>\<infinity>"
apply (cut_tac mem_ant[of "x"], cut_tac mem_ant[of "y"],
simp add:aug_minf_def,
(erule disjE)+, simp, erule exE, simp,
erule disjE, erule exE, simp,
(erule exE)+, simp add:a_zpz)
done
lemma amult_distrib1:"(ant z) \<noteq> 0 \<Longrightarrow>
(a + b) * (ant z) = a * (ant z) + b * (ant z)"
apply (cut_tac mem_ant[of "a"], cut_tac mem_ant[of "b"],
(erule disjE)+, simp, cut_tac less_linear[of "z" "0"],
erule disjE, simp, erule disjE, simp, simp add:ant_0, simp,
erule disjE, erule exE, simp,
cut_tac less_linear[of "z" "0"],
erule disjE, simp add:a_z_z, erule disjE, simp add:ant_0,
simp add:a_z_z,
cut_tac less_linear[of "z" "0"], simp,
erule disjE, simp add:ant_0[THEN sym] a_z_z)
apply (erule disjE, simp add:ant_0[THEN sym],
simp, simp add:ant_0[THEN sym], simp add:a_z_z,
(erule disjE)+, (erule exE)+, cut_tac less_linear[of "z" "0"], simp,
erule disjE, simp add:a_z_z,
erule disjE, simp add:ant_0, simp add:a_z_z,
cut_tac less_linear[of "z" "0"],
erule disjE, simp add:ant_0[THEN sym])
apply (simp add:a_z_z, simp,
erule disjE, simp add:ant_0, simp add:ant_0[THEN sym] a_z_z,
(erule disjE)+, (erule exE)+, simp add:a_zpz a_z_z,
simp add: distrib_right, erule exE, simp add:a_z_z,
cut_tac less_linear[of "z" "0"], erule disjE, simp,
erule disjE, simp add:ant_0, simp)
apply (erule disjE, erule exE, simp,
cut_tac less_linear[of "z" "0"], erule disjE, simp add:a_z_z,
erule disjE, simp add:ant_0, simp add:a_z_z,
cut_tac less_linear[of "z" "0"], erule disjE, simp,
erule disjE, simp add:ant_0, simp)
done
lemma amult_0_r:"(ant z) * 0 = 0"
by (simp add:ant_0[THEN sym] a_z_z)
lemma amult_0_l:"0 * (ant z) = 0"
by (simp add:ant_0[THEN sym] a_z_z)
definition
asprod :: "[int, ant] \<Rightarrow> ant" (infixl "*\<^sub>a" 200) where
"m *\<^sub>a x ==
if x = \<infinity> then (if 0 < m then \<infinity> else (if m < 0 then -\<infinity> else
if m = 0 then 0 else undefined))
else (if x = -\<infinity> then
(if 0 < m then -\<infinity> else (if m < 0 then \<infinity> else
if m = 0 then 0 else undefined))
else (ant m) * x)"
lemma asprod_pos_inf[simp]:"0 < m \<Longrightarrow> m *\<^sub>a \<infinity> = \<infinity>"
apply (simp add:asprod_def)
done
lemma asprod_neg_inf[simp]:"m < 0 \<Longrightarrow> m *\<^sub>a \<infinity> = -\<infinity>"
apply (simp add:asprod_def)
done
lemma asprod_pos_minf[simp]:"0 < m \<Longrightarrow> m *\<^sub>a (-\<infinity>) = (-\<infinity>)"
apply (simp add:asprod_def)
done
lemma asprod_neg_minf[simp]:"m < 0 \<Longrightarrow> m *\<^sub>a (-\<infinity>) = \<infinity>"
apply (simp add:asprod_def)
done
lemma asprod_mult:" m *\<^sub>a (ant n) = ant(m * n)"
apply (cut_tac z_neq_inf[of "n"],
cut_tac z_neq_minf[of "n"],
simp add:asprod_def, simp add:a_z_z)
done
lemma asprod_1:"1 *\<^sub>a x = x"
by (cut_tac mem_ant[of "x"], erule disjE, simp,
erule disjE, erule exE, simp add:asprod_mult, simp)
(** atode asprod_1_x to awaseru **)
lemma agsprod_assoc_a:"m *\<^sub>a (n *\<^sub>a (ant x)) = (m * n) *\<^sub>a (ant x)"
apply (simp add:asprod_mult)
done
lemma agsprod_assoc:"\<lbrakk>m \<noteq> 0; n \<noteq> 0\<rbrakk> \<Longrightarrow> m *\<^sub>a (n *\<^sub>a x) = (m * n) *\<^sub>a x"
apply (cut_tac less_linear[of "m" "0"], cut_tac less_linear[of "n" "0"],
cut_tac mem_ant[of "x"],
(erule disjE)+, simp,
frule zmult_neg_neg[of "m" "n"], assumption+, simp)
apply (erule disjE, erule exE, simp add:asprod_mult,
frule zmult_neg_neg[of "m" "n"], assumption+, simp+,
erule disjE, simp,
frule zmult_neg_pos[of "m" "n"], assumption+, simp,
erule disjE, erule exE, simp,
frule zmult_neg_pos[of "m" "n"], assumption+, simp add:asprod_mult,
frule zmult_neg_pos[of "m" "n"], assumption+, simp)
apply (simp, (erule disjE)+,
frule zmult_pos_neg[of "m" "n"], assumption+,
simp,
erule disjE, erule exE, simp add:asprod_mult,
frule zmult_pos_neg[of "m" "n"], assumption+, simp)
apply (frule zmult_pos_pos[of "m" "n"], assumption+,
erule disjE, simp,
erule disjE, erule exE, simp add:asprod_mult, simp)
done
lemma asprod_distrib1:"m \<noteq> 0 \<Longrightarrow> m *\<^sub>a (x + y) = (m *\<^sub>a x) + (m *\<^sub>a y)"
apply (cut_tac mem_ant[of "x"], cut_tac mem_ant[of "y"])
apply (cut_tac less_linear[of "m" "0"],
erule disjE,
erule disjE, erule disjE, simp,
erule disjE, simp add:asprod_def add_ant_def, simp,
simp, (erule disjE)+, erule exE, simp add:asprod_mult,
simp add:Zero_ant_def asprod_mult)
apply (erule disjE, erule exE, simp add:asprod_mult,
simp add: Zero_ant_def asprod_mult,
erule disjE, erule disjE, erule disjE, erule exE,
simp add:asprod_mult,
simp add:Zero_ant_def asprod_mult,
erule disjE, erule exE, simp add:asprod_mult,
simp add:Zero_ant_def asprod_mult)
apply (simp, erule disjE, erule exE, simp,
(erule disjE)+, erule exE, simp add:asprod_mult,
simp add:a_zpz, simp add:asprod_mult distrib_left,
simp add:asprod_mult)
apply (erule disjE, erule exE, simp add:a_zpz asprod_mult,
simp add: distrib_left, simp add:asprod_mult,
(erule disjE)+, erule exE, simp add:asprod_mult, simp,
erule disjE, erule exE, simp add:asprod_mult, simp)
done
lemma asprod_0_x[simp]:"0 *\<^sub>a x = 0"
apply (simp add:asprod_def, (rule impI)+,
cut_tac mem_ant[of "x"], simp, erule exE,
simp add:asprod_def a_z_z, simp add:ant_0)
done
lemma asprod_n_0:"n *\<^sub>a 0 = 0"
apply (simp add:Zero_ant_def asprod_mult)
done
lemma asprod_distrib2:"\<lbrakk>0 < i; 0 < j\<rbrakk> \<Longrightarrow> (i + j) *\<^sub>a x = (i *\<^sub>a x) + (j *\<^sub>a x)"
by (cut_tac mem_ant[of "x"], erule disjE, simp,
erule disjE, erule exE, simp add:asprod_mult,
simp add: distrib_right a_zpz, simp)
lemma asprod_minus:"x \<noteq> -\<infinity> \<and> x \<noteq> \<infinity> \<Longrightarrow> - z *\<^sub>a x = z *\<^sub>a (- x)"
apply (cut_tac mem_ant[of "x"], erule disjE, simp+)
apply (erule exE, simp add:asprod_mult aminus)
done
lemma asprod_div_eq:"\<lbrakk>n \<noteq> 0; n *\<^sub>a x = n *\<^sub>a y\<rbrakk> \<Longrightarrow> x = y"
apply (cut_tac less_linear[of "n" "0"], simp)
apply (cut_tac mem_ant[of "x"], cut_tac mem_ant[of "y"])
apply ((erule disjE)+, simp,
erule disjE, erule exE, rule contrapos_pp, simp+,
simp add:asprod_mult)
apply (cut_tac z1 = "n * z" in z_neq_inf[THEN not_sym], simp+)
apply ((erule disjE)+, erule exE, simp add:asprod_mult,
cut_tac z = "n * z" in z_neq_inf,
rule contrapos_pp, simp, simp,
(erule disjE)+, (erule exE)+, simp add:asprod_mult,
erule exE, simp add: asprod_mult)
apply (erule disjE, erule exE, simp add:asprod_mult,
simp add:z_neq_minf[THEN not_sym], simp)
apply ((erule disjE)+, simp,
erule disjE, erule exE, rule contrapos_pp, simp+,
simp add:asprod_mult,
cut_tac z1 = "n * z" in z_neq_minf[THEN not_sym], simp,
rule contrapos_pp, simp+)
apply ((erule disjE)+, (erule exE)+, simp add:asprod_mult,
erule exE, simp add:asprod_mult,
erule disjE, erule exE, simp add:asprod_mult
z_neq_inf[THEN not_sym], simp)
apply (erule disjE, simp, erule disjE, erule exE, simp add:asprod_mult
z_neq_inf[THEN not_sym], simp)
done
lemma asprod_0:"\<lbrakk>z \<noteq> 0; z *\<^sub>a x = 0 \<rbrakk> \<Longrightarrow> x = 0"
by (rule asprod_div_eq[of "z" "x" "0"], assumption, simp add:asprod_n_0)
lemma asp_z_Z:"z *\<^sub>a ant x \<in> Z\<^sub>\<infinity>"
by (simp add:asprod_mult z_in_aug_inf)
lemma tna_ant:" tna (ant z) = z"
apply (cut_tac z_neq_minf[of "z"], cut_tac z_neq_inf[of "z"],
simp add:ant_def tna_def)
apply (cut_tac ant_z_in_Ainteg[of "z"], simp add:Abs_Ainteg_inverse)
done
lemma ant_tna:"x \<noteq> \<infinity> \<and> x \<noteq> -\<infinity> \<Longrightarrow> ant (tna x) = x"
apply (cut_tac mem_ant[of "x"], simp, erule exE)
apply (simp add:ant_def tna_def)
apply (cut_tac z = z in ant_z_in_Ainteg, simp add:Abs_Ainteg_inverse)
done
lemma ant_sol:"\<lbrakk>a \<in> Z\<^sub>\<infinity>; b \<in> Z\<^sub>\<infinity>; c \<in> Z\<^sub>\<infinity>; b \<noteq> \<infinity>; a = b + c\<rbrakk> \<Longrightarrow> a - b = c"
apply (subgoal_tac "-b \<in> Z\<^sub>\<infinity>", simp add:diff_ant_def,
subgoal_tac "a + (-b) = b + c + (-b)",
subst aadd_commute[of "b" "c"], subst aadd_assoc_i, assumption+,
simp add:aadd_minus_r, simp add:aadd_0_r, simp)
apply (cut_tac mem_ant[of "b"], simp add:aug_inf_def,
erule exE, simp add:aminus)
done
subsection "Ordering of integers and ordering nats"
subsection \<open>The \<open>\<le>\<close> Ordering\<close>
lemma zneq_aneq:"(n \<noteq> m) = ((ant n) \<noteq> (ant m))"
apply (rule iffI)
apply (rule contrapos_pp, simp+)
done
lemma ale:"(n \<le> m) = ((ant n) \<le>(ant m))"
apply (rule iffI)
apply (simp add:ant_def le_ant_def,
cut_tac ant_z_in_Ainteg[of "n"],
cut_tac ant_z_in_Ainteg[of "m"],
simp add:Abs_Ainteg_inverse)+
done
lemma aless:"(n < m) = ((ant n) < (ant m))"
apply (simp add:less_ant_def,
cut_tac ale[of "n" "m"], arith)
done
lemma ale_refl: "w \<le> (w::ant)"
apply (cut_tac mem_ant[of "w"],
erule disjE, simp,
erule disjE, erule exE, simp,
subst ale[THEN sym], simp+)
done
lemma aeq_ale:"(a::ant) = b \<Longrightarrow> a \<le> b"
by (simp add:ale_refl)
lemma ale_trans: "\<lbrakk> (i::ant) \<le> j; j \<le> k \<rbrakk> \<Longrightarrow> i \<le> k"
apply (cut_tac mem_ant[of "i"], cut_tac mem_ant[of "j"],
cut_tac mem_ant[of "k"],
(erule disjE)+, simp add:ale_refl, erule disjE, erule exE, simp+,
(erule disjE)+, simp add:ale_refl, simp add:ale_refl)
apply ((erule disjE)+, erule exE, simp+,
erule exE, simp,
cut_tac x = "ant z" in minf_le_any,
frule_tac x = "ant z" in ale_antisym[of _ "-\<infinity>"], assumption+,
simp+,
cut_tac minf_le_any[of "\<infinity>"], frule ale_antisym[of "-\<infinity>" "\<infinity>"],
simp+)
apply (erule disjE, simp,
(erule disjE)+, (erule exE)+, simp,
cut_tac x = "ant za" in minf_le_any,
frule_tac x = "ant za" in ale_antisym[of _ "-\<infinity>"], assumption+,
simp, erule exE,
cut_tac x = "ant z" in minf_le_any, simp)
apply (cut_tac minf_le_any[of "\<infinity>"],
frule_tac ale_antisym[of "-\<infinity>" "\<infinity>"], assumption+,
simp, erule disjE, erule exE, simp,
cut_tac x = "ant z" in inf_ge_any,
frule_tac x = "ant z" in ale_antisym[of _ "\<infinity>"], assumption+,
simp)
apply (cut_tac minf_le_any[of "\<infinity>"], frule ale_antisym[of "-\<infinity>" "\<infinity>"],
simp+,
(erule disjE)+, (erule exE)+, simp add:ale[THEN sym],
simp, (erule disjE)+, (erule exE)+,
cut_tac x = "ant za" in inf_ge_any,
frule_tac x = "ant za" in ale_antisym[of _ "\<infinity>"],
simp+)
apply (erule disjE, erule exE,
cut_tac inf_ge_any[of "j"],
frule ale_antisym[of "j" "\<infinity>"], assumption+,
cut_tac x = "ant z" in inf_ge_any, simp+)
done
(* Axiom 'order_aless_le_not_le' of class 'order': *)
lemma aless_le_not_le: "((w::ant) < z) = (w \<le> z \<and> \<not> z \<le> w)"
by (auto simp add: less_ant_def)
instance ant :: order
proof qed
(assumption |
rule ale_refl ale_trans ale_antisym aless_le_not_le)+
(* Axiom 'linorder_linear' of class 'linorder': *)
lemma ale_linear: "(z::ant) \<le> w \<or> w \<le> z"
apply (cut_tac mem_ant[of "z"], cut_tac mem_ant[of "w"],
erule disjE, simp,
erule disjE, simp)
apply ((erule disjE)+, (erule exE)+, simp add:ale[THEN sym],
simp add:linorder_linear)
apply simp+
done
instance ant :: linorder
proof qed (rule ale_linear)
lemmas aless_linear = less_linear [where 'a = ant]
lemma ant_eq_0_conv [simp]: "(ant n = 0) = (n = 0)"
apply (simp add:Zero_ant_def)
done
lemma aless_zless: "(ant m < ant n) = (m<n)"
by (simp add: ale ant_def linorder_not_le [symmetric])
lemma a0_less_int_conv [simp]: "(0 < ant n) = (0 < n)"
apply (simp add:Zero_ant_def)
apply (simp add:aless[THEN sym])
done
lemma a0_less_1: "0 < (1::ant)"
apply (simp add:Zero_ant_def One_ant_def)
apply (subst aless_zless) apply simp
done
lemma a0_neq_1 [simp]: "0 \<noteq> (1::ant)"
by (simp only:Zero_ant_def One_ant_def, subst zneq_aneq[THEN sym], simp)
lemma ale_zle [simp]: "((ant i) \<le> (ant j)) = (i\<le>j)"
by (subst ale[of "i" "j"], simp)
lemma ant_1 [simp]: "ant 1 = 1"
by (simp add: One_ant_def)
lemma zpos_apos:"(0 \<le> n) = (0 \<le> (ant n))"
apply (simp only:ale[of "0" "n"], simp only:ant_0[THEN sym])
done
lemma zposs_aposss:"(0 < n) = (0 < (ant n))"
apply (rule iffI)
apply (unfold Zero_ant_def,
subst aless[THEN sym, of "0" "n"], simp,
subst aless[of "0" "n"], simp)
done
lemma an_nat_pos[simp]:"0 \<le> an n"
by (simp add:ant_0[THEN sym] an_def)
lemma amult_one_l:" 1 * (x::ant) = x"
by (cut_tac mem_ant[of "x"], erule disjE, simp
only:ant_1[THEN sym], simp del:ant_1,
erule disjE, erule exE, simp only:ant_1[THEN sym],
simp del:ant_1 add:a_z_z,
simp only:ant_1[THEN sym], simp del:ant_1)
lemma amult_one_r:"(x::ant)* 1 = x"
by (cut_tac amult_one_l[of "x"], simp add:amult_commute)
lemma amult_eq_eq_r:"\<lbrakk>z \<noteq> 0; a * ant z = b * ant z\<rbrakk> \<Longrightarrow> a = b"
apply (cut_tac less_linear[of "z" "0"], simp,
cut_tac mem_ant[of "a"], cut_tac mem_ant[of "b"],
(erule disjE)+, simp,
erule disjE, erule exE, simp add:a_z_z,
frule sym, thin_tac "\<infinity> = ant (za * z)", simp,
simp, (erule disjE)+, simp, erule exE, simp add:a_z_z, simp)
apply ((erule disjE)+, (erule exE)+, simp add:a_z_z,
erule exE, simp add:a_z_z, erule disjE, erule exE,
simp add:a_z_z,
frule sym, thin_tac "- \<infinity> = ant (za * z)", simp, simp,
(erule disjE)+, simp, erule disjE, erule exE, simp add:a_z_z,
frule sym, thin_tac "- \<infinity> = ant (za * z)", simp, simp)
apply ((erule disjE)+, erule exE, simp add:a_z_z, simp,
(erule disjE)+, (erule exE)+, simp add:a_z_z,
erule exE, simp add:a_z_z, erule disjE, erule exE, simp add:a_z_z,
frule sym, thin_tac "\<infinity> = ant (za * z)", simp, simp)
done
lemma amult_eq_eq_l:"\<lbrakk>z \<noteq> 0; (ant z) * a = (ant z) * b\<rbrakk> \<Longrightarrow> a = b"
by (simp add:amult_commute, rule amult_eq_eq_r, assumption+)
lemma amult_pos:"\<lbrakk>0 < b; 0 \<le> x\<rbrakk> \<Longrightarrow> x \<le> (b *\<^sub>a x)"
apply (cut_tac mem_ant[of "x"], erule disjE, simp,
erule disjE, erule exE, simp add:asprod_mult,
simp add:zpos_apos[THEN sym],
frule_tac a = z and b = b in pos_zmult_pos, assumption+,
simp add:mult.commute, simp)
done
lemma asprod_amult:"0 < z \<Longrightarrow> z *\<^sub>a x = (ant z) * x"
apply (simp add:asprod_def)
done
lemma amult_pos1:"\<lbrakk>0 < b; 0 \<le> x\<rbrakk> \<Longrightarrow> x \<le> ((ant b) * x)"
by (frule amult_pos[of "b" "x"], assumption, simp add:asprod_amult)
lemma amult_pos_mono_l:"0 < w \<Longrightarrow> (((ant w) * x) \<le> ((ant w) * y)) = (x \<le> y)"
apply (cut_tac mem_ant[of "x"], cut_tac mem_ant[of "y"],
(erule disjE)+, simp, erule disjE, erule exE, simp, simp,
(erule disjE)+, erule exE, simp add:a_z_z)
apply (rule iffI,
cut_tac x = "ant (w * z)" in minf_le_any, frule_tac x = "ant (w * z)"
in ale_antisym, assumption+, simp,
cut_tac x = "ant z" in minf_le_any, frule_tac x = "ant z"
in ale_antisym, assumption+, simp)
apply simp
apply ((erule disjE)+, (erule exE)+, simp add:a_z_z)
apply (erule exE, simp add:a_z_z)
apply (erule disjE, erule exE, simp add:a_z_z,
rule iffI,
cut_tac x = "ant (w * z)" in inf_ge_any,
frule_tac x = "ant (w * z)" in ale_antisym[of _ "\<infinity>"], assumption+,
simp,
cut_tac x = "ant z" in inf_ge_any,
frule_tac x = "ant z" in ale_antisym[of _ "\<infinity>"], assumption+,
simp, simp)
done
lemma amult_pos_mono_r:"0 < w \<Longrightarrow> ((x * (ant w)) \<le> (y * (ant w))) = (x \<le> y)"
apply (simp add:amult_commute[of _ "ant w"])
apply (rule amult_pos_mono_l, assumption)
done
lemma apos_neq_minf:"0 \<le> a \<Longrightarrow> a \<noteq> -\<infinity>"
by (rule contrapos_pp, simp+,
cut_tac minf_le_any[of "0"],
frule ale_antisym[of "0" "-\<infinity>"], assumption+, simp)
lemma asprod_pos_mono:"0 < w \<Longrightarrow> ((w *\<^sub>a x) \<le> (w *\<^sub>a y)) = (x \<le> y)"
by (simp add:asprod_amult, simp add:amult_pos_mono_l)
lemma a_inv:"(a::ant) + b = 0 \<Longrightarrow> a = -b"
apply (cut_tac mem_ant[of "a"], cut_tac mem_ant[of "b"],
(erule disjE)+, frule sym, thin_tac "a + b = 0",
simp add:ant_0[THEN sym])
apply (erule disjE, erule exE, simp, simp,
(erule disjE)+, erule exE, simp, simp,
simp add:a_minus_minus,
(erule disjE)+, (erule exE)+, simp add:aminus a_zpz,
erule exE, simp,
erule disjE, erule exE, simp, simp)
done
lemma asprod_pos_pos:"0 \<le> x \<Longrightarrow> 0 \<le> int n *\<^sub>a x"
apply (cases "n = 0")
apply simp_all
using asprod_pos_mono [THEN sym, of "int n" "0" "x"]
apply (simp add:asprod_n_0)
done
lemma asprod_1_x[simp]:"1 *\<^sub>a x = x"
apply (simp add:asprod_def)
apply (rule impI)+
apply (cut_tac mem_ant[of "x"], simp, erule exE, simp add:a_z_z)
apply (simp only:ant_1[THEN sym], simp del:ant_1 add:a_z_z)
done
lemma asprod_n_1[simp]:"n *\<^sub>a 1 = ant n"
apply (simp only:ant_1[THEN sym]) apply (simp only:asprod_mult)
apply simp
done
subsection "Aug ordering"
lemma aless_imp_le:" x < (y::ant) \<Longrightarrow> x \<le> y"
by (simp add:less_ant_def)
lemma gt_a0_ge_1:"(0::ant) < x \<Longrightarrow> 1 \<le> x"
apply (cut_tac mem_ant[of "x"],
erule disjE, unfold Zero_ant_def, simp)
apply (cut_tac less_ant_def[of "0" "-\<infinity>"], simp add:ant_0,
cut_tac minf_le_any[of "0"],
frule ale_antisym[of "0" "-\<infinity>"], assumption+,
simp add:ant_0[THEN sym], blast)
apply (erule disjE, erule exE, unfold One_ant_def, simp del:ant_1,
simp add:aless_zless, simp)
done
lemma gt_a0_ge_aN:"\<lbrakk>0 < x; N \<noteq> 0\<rbrakk> \<Longrightarrow> (ant (int N)) \<le> (int N) *\<^sub>a x"
apply (cut_tac mem_ant[of "x"], erule disjE, simp)
apply (cut_tac aless_imp_le[of "0" "-\<infinity>"],
cut_tac minf_le_any[of "0"],
frule ale_antisym[of "0" "-\<infinity>"], simp,
simp only: Zero_ant_def, simp)
apply (erule disjE, erule exE, simp add:asprod_mult, simp)
done
lemma aless_le_trans:"\<lbrakk>(x::ant) < y; y \<le> z\<rbrakk> \<Longrightarrow> x < z"
by auto
lemma ale_less_trans:"\<lbrakk>(x::ant) \<le> y; y < z\<rbrakk> \<Longrightarrow> x < z"
by auto
lemma aless_trans:"\<lbrakk>(x::ant) < y; y < z\<rbrakk> \<Longrightarrow> x < z"
by auto
lemma ale_neq_less:"\<lbrakk> (x::ant)\<le> y; x \<noteq> y\<rbrakk> \<Longrightarrow> x < y"
apply (simp add:less_ant_def)
done
lemma aneg_le:"(\<not> (x::ant) \<le> y) = (y < x)"
apply (cut_tac ale_linear[of "y" "x"])
apply (rule iffI, simp)
apply (rule contrapos_pp, simp+)
done
lemma aneg_less:"(\<not> x < (y::ant)) = (y \<le> x)"
by auto
lemma aadd_le_mono:"x \<le> (y::ant) \<Longrightarrow> x + z \<le> y + z"
apply (cut_tac mem_ant[of "x"], cut_tac mem_ant[of "y"],
cut_tac mem_ant[of "z"],
(erule disjE)+, simp, erule disjE, erule exE, simp+,
(erule disjE)+, erule exE, simp+,
(erule disjE)+, (erule exE)+, simp, erule exE, simp,
erule disjE, erule exE, simp+, (erule disjE)+, simp,
erule exE, simp+,
cut_tac minf_le_any[of "\<infinity>"], frule ale_antisym[of "-\<infinity>" "\<infinity>"],
assumption+, simp, (erule disjE)+, (erule exE)+, simp+,
cut_tac x = "ant za" in minf_le_any,
frule_tac x = "ant za" in ale_antisym[of _ "-\<infinity>"], assumption+, simp)
apply (erule exE, simp,
cut_tac x = "ant za" in minf_le_any,
frule_tac x = "ant za" in ale_antisym[of _ "-\<infinity>"], assumption+, simp,
erule disjE, erule exE, simp+,
cut_tac minf_le_any[of "\<infinity>"], frule ale_antisym[of "-\<infinity>" "\<infinity>"],
assumption+, simp, (erule disjE)+, (erule exE)+, simp+,
erule exE, simp, erule disjE, erule exE, simp+)
apply (cut_tac x = "ant za" in inf_ge_any, frule_tac x = "ant za" in
ale_antisym[of _ "\<infinity>"], assumption+, simp+,
(erule disjE)+, (erule exE)+, simp add:a_zpz,
(erule exE)+, simp add:a_zpz, (erule disjE)+, (erule exE)+,
simp add:a_zpz, erule exE, simp,
(erule disjE)+, (erule exE)+, simp add:a_zpz)
apply (cut_tac x = "ant za" in inf_ge_any, frule_tac x = "ant za" in
ale_antisym[of _ "\<infinity>"], assumption+, simp+,
erule exE, simp, erule disjE, erule exE, simp+)
done
lemma aadd_less_mono_z:"(x::ant) < y \<Longrightarrow> (x + (ant z)) < (y + (ant z))"
apply (simp add:less_ant_def, simp add:aadd_le_mono)
apply (cut_tac mem_ant[of "x"], cut_tac mem_ant[of "y"])
apply auto
apply (metis a_inv a_ipi a_ipz a_zpz aadd_minus_r less_le diff_ant_def minf_less_0)
apply (metis a_inv a_ipi a_ipz a_zpz aadd_minus_r less_le diff_ant_def minf_less_0)
apply (metis a_zpz add_right_cancel aeq_zeq)
apply (metis a_zpz less_le z_less_i)
done
lemma aless_le_suc[simp]:"(a::ant) < b \<Longrightarrow> a + 1 \<le> b"
apply (cut_tac mem_ant[of "b"])
apply (erule disjE,
frule aless_imp_le[of "a" "b"], simp,
cut_tac minf_le_any[of "a"], frule ale_antisym[of "a" "-\<infinity>"],
assumption, simp)
apply (erule disjE, erule exE, cut_tac mem_ant[of "a"], erule disjE,
unfold One_ant_def, simp del:ant_1,
erule disjE, erule exE, simp del:ant_1 add:a_zpz, simp only:aless_zless,
frule aless_imp_le[of "a" "b"], simp del:ant_1, simp)
done
lemma aposs_le_1:"(0::ant) < x \<Longrightarrow> 1 \<le> x"
apply (frule aless_le_suc[of "0" "x"],
simp add:aadd_0_l)
done
lemma pos_in_aug_inf:"(0::ant) \<le> x \<Longrightarrow> x \<in> Z\<^sub>\<infinity>"
apply (simp add:aug_inf_def)
apply (rule contrapos_pp, simp+)
apply (cut_tac minf_le_any[of "0"],
frule ale_antisym[of "0" "-\<infinity>"], assumption+,
unfold Zero_ant_def,
simp )
done
lemma aug_inf_noninf_is_z:"\<lbrakk>x \<in> Z\<^sub>\<infinity>; x \<noteq> \<infinity>\<rbrakk> \<Longrightarrow> \<exists>z. x = ant z"
apply (cut_tac mem_ant[of "x"], simp add:aug_inf_def)
done
lemma aadd_two_pos:"\<lbrakk>0 \<le> (x::ant); 0 \<le> y\<rbrakk> \<Longrightarrow> 0 \<le> x + y"
apply (cut_tac Zero_in_aug_inf,
cut_tac pos_in_aug_inf[of "x"],
cut_tac pos_in_aug_inf[of "y"])
apply (cut_tac aadd_le_mono[of "0" "x" "y"], simp add:aadd_0_l,
assumption+)
done
lemma aadd_pos_poss:"\<lbrakk>(0::ant) \<le> x; 0 < y\<rbrakk> \<Longrightarrow> 0 < (x + y)"
apply (frule aless_imp_le[of "0" "y"],
subst less_ant_def, rule conjI, simp add:aadd_two_pos,
rule contrapos_pp, simp+)
apply (cut_tac Zero_in_aug_inf,
cut_tac pos_in_aug_inf[of "x"],
cut_tac pos_in_aug_inf[of "y"],
case_tac "y = \<infinity>", simp,
cut_tac mem_ant[of "x"], erule disjE,
simp add:aug_inf_def)
apply (erule disjE, erule exE, simp, simp,
case_tac "x = \<infinity>", unfold Zero_ant_def,
frule aug_inf_noninf_is_z[of "y"], assumption, erule exE,
simp, frule sym, thin_tac "\<infinity> = ant 0", simp)
apply (thin_tac "ant 0 \<le> y",
frule aug_inf_noninf_is_z[of "x"], assumption, erule exE,
frule aug_inf_noninf_is_z[of "y"], assumption, erule exE,
simp add:a_zpz, simp add: aless_zless)
apply (simp add:aless_imp_le)+
done
lemma aadd_poss_pos:"\<lbrakk>(0::ant) < x; 0 \<le> y\<rbrakk> \<Longrightarrow> 0 < (x + y)"
apply (subst aadd_commute, rule aadd_pos_poss, assumption+)
done
lemma aadd_pos_le:"0 \<le> (a::ant) \<Longrightarrow> b \<le> a + b"
apply (cut_tac mem_ant[of "a"], (erule disjE)+,
simp, cut_tac minf_le_any[of "0"], frule ale_antisym[of "0" "-\<infinity>"],
assumption+, simp)
apply (erule disjE, erule exE,
simp, thin_tac "a = ant z", cut_tac mem_ant[of "b"],
erule disjE, simp,
erule disjE, erule exE, simp add:a_zpz, simp only:ant_0[THEN sym],
simp only:ale, simp+)
apply (cut_tac mem_ant[of "b"],
erule disjE, simp,
erule disjE, erule exE, simp, simp)
done
lemma aadd_poss_less:"\<lbrakk>b \<noteq> \<infinity>; b \<noteq> -\<infinity>; 0 < a\<rbrakk> \<Longrightarrow> b < a + b"
apply (cut_tac mem_ant[of "b"], simp)
apply (erule exE,
cut_tac mem_ant[of "a"], erule disjE, simp,
thin_tac "a = - \<infinity>",
cut_tac minf_le_any[of "0"],
frule aless_imp_le[of "0" "-\<infinity>"],
frule ale_antisym[of "0" "-\<infinity>"], assumption+,
simp only:ant_0[THEN sym], simp)
apply (erule disjE, erule exE, simp add:a_zpz,
subst aless[THEN sym], simp, simp)
done
lemma ale_neg:"(0::ant) \<le> x \<Longrightarrow> (- x) \<le> 0"
apply (frule pos_in_aug_inf[of "x"])
apply (case_tac "x = \<infinity>", simp,
frule aug_inf_noninf_is_z[of "x"], assumption, erule exE,
simp add:aminus, unfold Zero_ant_def,
simp only:ale_zle)
done
lemma ale_diff_pos:"(x::ant) \<le> y \<Longrightarrow> 0 \<le> (y - x)"
apply (case_tac "y = -\<infinity>", simp,
cut_tac minf_le_any[of "x"],
frule ale_antisym[of "x" "-\<infinity>"], assumption+,
simp add:diff_ant_def a_minus_minus,
cut_tac mem_ant[of "y"], simp, thin_tac "y \<noteq> - \<infinity>",
erule disjE, erule exE)
apply (case_tac "x = \<infinity>", simp,
cut_tac x = "ant z" in inf_ge_any,
frule_tac x = "ant z" in ale_antisym[of _ "\<infinity>"], simp+,
cut_tac mem_ant[of "x"], simp+, erule disjE,
simp add:diff_ant_def a_minus_minus)
apply (erule exE, simp add:a_zdz, unfold Zero_ant_def,
simp only:ale_zle,
cut_tac mem_ant[of "x"], erule disjE,
simp add:diff_ant_def a_minus_minus,
erule disjE, erule exE, simp add:diff_ant_def aminus,
simp add:diff_ant_def ant_0)
done
lemma aless_diff_poss:"(x::ant) < y \<Longrightarrow> 0 < (y - x)"
apply (case_tac "y = -\<infinity>", simp,
cut_tac minf_le_any[of "x"],
frule less_imp_le[of "x" "-\<infinity>"],
frule antisym[of "x" "-\<infinity>"], assumption+,
cut_tac less_le[of "x" "-\<infinity>"], simp)
apply (case_tac "x = -\<infinity>", simp,
case_tac "y = \<infinity>", simp add:diff_ant_def a_minus_minus,
simp add:zero_lt_inf,
cut_tac mem_ant[of "y"], simp, erule exE, simp add:diff_ant_def
a_minus_minus, simp add:zero_lt_inf)
apply (case_tac "x = \<infinity>", simp,
frule aless_imp_le[of "\<infinity>" "y"],
cut_tac inf_ge_any[of "y"], frule ale_antisym[of "y" "\<infinity>"],
assumption+, simp,
cut_tac mem_ant[of "x"], simp, erule exE,
case_tac "y = \<infinity>", simp add:diff_ant_def aminus,
simp add:zero_lt_inf)
apply (cut_tac mem_ant[of "y"], simp, erule exE, simp,
simp add:diff_ant_def, simp add:aminus a_zpz,
simp add:aless_zless)
done
lemma ale_minus:" (x::ant) \<le> y \<Longrightarrow> - y \<le> - x"
apply (cut_tac mem_ant[of "x"], cut_tac mem_ant[of "y"])
apply ((erule disjE)+, simp, erule disjE, erule exE,
simp add:aminus a_minus_minus, simp add:a_minus_minus,
(erule disjE)+, (erule exE)+,
simp, cut_tac x = "ant z" in minf_le_any, frule_tac x = "ant z" in
ale_antisym[of _ "-\<infinity>"], assumption+, simp,
simp, cut_tac x = \<infinity> in minf_le_any,
frule_tac x = \<infinity> in ale_antisym[of _ "-\<infinity>"], assumption+, simp)
apply ((erule disjE)+, (erule exE)+, simp add:aminus, erule exE, simp,
erule disjE, erule exE, simp, cut_tac x = "ant z" in inf_ge_any,
frule_tac x = "ant z" in ale_antisym[of _ "\<infinity>"], assumption+, simp,
simp)
done
lemma aless_minus:"(x::ant) < y \<Longrightarrow> - y < - x"
by (simp add:less_ant_def, erule conjE, simp add:ale_minus,
rule not_sym, rule contrapos_pp, simp+,
cut_tac a_minus_minus[of "x"], simp add:a_minus_minus)
lemma aadd_minus_le:"(a::ant) \<le> 0 \<Longrightarrow> a + b \<le> b"
apply (frule ale_minus[of "a" "0"],
cut_tac aadd_pos_le[of "-a" "-b"], simp add:aminus_0)
apply (frule ale_minus[of "-b" "-a + -b"], simp add:aminus_add_distrib,
simp add:a_minus_minus, simp add:aminus_0)
done
lemma aadd_minus_less:"\<lbrakk>b \<noteq> -\<infinity> \<and> b \<noteq> \<infinity>; (a::ant) < 0\<rbrakk> \<Longrightarrow> a + b < b"
apply (simp add:less_ant_def, erule conjE,
simp add:aadd_minus_le)
apply (rule contrapos_pp, simp+,
cut_tac mem_ant[of "a"], cut_tac mem_ant[of "b"],
simp, erule disjE, erule exE, simp,
frule sym, thin_tac "- \<infinity> = ant z", simp,
erule disjE, (erule exE)+, simp add:a_zpz,
erule exE, simp, frule sym, thin_tac "\<infinity> = ant z", simp)
done
lemma an_inj:"an n = an m \<Longrightarrow> n = m"
apply (simp add:an_def)
done
lemma aneq_natneq:"(an n \<noteq> an m) = (n \<noteq> m)"
apply (simp add:an_def)
done
lemma ale_natle:" (an n \<le> an m) = (n \<le> m)"
apply (simp add:an_def)
done
lemma aless_natless:"(an n < an m) = (n < m)"
apply (simp add:an_def)
apply (simp add:aless_zless)
done
lemma na_an:"na (an n) = n"
by (simp only:na_def an_def,
subgoal_tac "\<not> ant (int n) < 0", simp,
simp add:tna_ant, subst aneg_less[of "ant (int n)" "0"],
simp only:ant_0[THEN sym], subst ale_zle[of "0" "int n"], simp)
lemma asprod_ge:
"0 < b \<Longrightarrow> N \<noteq> 0 \<Longrightarrow> an N \<le> int N *\<^sub>a b"
apply (frule aposs_le_1[of "b"])
apply simp
using asprod_pos_mono [THEN sym, of "int N" "1" "b"]
apply (simp only: ant_1 [THEN sym], simp add: asprod_amult, simp add:an_def)
done
lemma an_npn:"an (n + m) = an n + an m"
by (unfold an_def, simp add:a_zpz)
lemma an_ndn:"n \<le> m \<Longrightarrow> an (m - n) = an m - an n"
apply (cut_tac an_npn[of "m - n" n], simp)
apply (unfold an_def)
apply (simp add:a_zpz[of "int (m - n)" "int n"])
apply (subst a_zdz[of "int (m - n) + int n" "int n"], simp)
done
section "Amin, amax"
definition
amin :: "[ant, ant] \<Rightarrow> ant" where
"amin x y = (if (x \<le> y) then x else y)"
definition
amax :: "[ant, ant] \<Rightarrow> ant" where
"amax x y = (if (x \<le> y) then y else x)"
primrec Amin :: "[nat, nat \<Rightarrow> ant] \<Rightarrow> ant"
where
Amin_0 : "Amin 0 f = (f 0)"
| Amin_Suc :"Amin (Suc n) f = amin (Amin n f) (f (Suc n))"
primrec Amax :: "[nat, nat \<Rightarrow> ant] \<Rightarrow> ant"
where
Amax_0 : "Amax 0 f = f 0"
| Amax_Suc :"Amax (Suc n) f = amax (Amax n f) (f (Suc n))"
lemma amin_ge:"x \<le> amin x y \<or> y \<le> amin x y"
apply (simp add:amin_def)
done
lemma amin_le_l:"amin x y \<le> x"
apply (simp add:amin_def, cut_tac ale_linear[of "x" "y"],
rule impI, simp)
done
lemma amin_le_r:"amin x y \<le> y"
apply (simp add:amin_def)
done
lemma amax_le:"amax x y \<le> x \<or> amax x y \<le> y"
apply (simp add:amax_def)
done
lemma amax_le_n:"\<lbrakk>x \<le> n; y \<le> n\<rbrakk> \<Longrightarrow> amax x y \<le> n"
by (simp add:amax_def)
lemma amax_ge_l:"x \<le> amax x y"
apply (simp add:amax_def)
done
lemma amax_ge_r:"y \<le> amax x y"
apply (simp add:amax_def, cut_tac ale_linear[of "x" "y"],
rule impI, simp)
done
lemma amin_mem_i:"\<lbrakk>x \<in> Z\<^sub>\<infinity>; y \<in> Z\<^sub>\<infinity>\<rbrakk> \<Longrightarrow> amin x y \<in> Z\<^sub>\<infinity>"
apply (cut_tac mem_ant[of "x"], cut_tac mem_ant[of "y"], simp add:aug_inf_def,
(erule disjE)+, (erule exE)+, cut_tac amin_ge[of "x" "y"],
rule contrapos_pp, simp+,
erule disjE,
cut_tac x = "ant z" in minf_le_any,
frule_tac x = "ant z" in ale_antisym[of _ "-\<infinity>"], assumption+, simp,
cut_tac x = "ant za" in minf_le_any,
frule_tac x = "ant za" in ale_antisym[of _ "-\<infinity>"], assumption+, simp)
apply (erule exE, simp add:amin_def, erule disjE,
erule exE, simp add:amin_def, simp add:amin_def)
done
lemma amax_mem_m:"\<lbrakk>x \<in> Z\<^sub>-\<^sub>\<infinity>; y \<in> Z\<^sub>-\<^sub>\<infinity>\<rbrakk> \<Longrightarrow> amax x y \<in> Z\<^sub>-\<^sub>\<infinity>"
apply (cut_tac mem_ant[of "x"], cut_tac mem_ant[of "y"],
simp add:aug_minf_def)
apply ((erule disjE)+, simp add:amax_def,
erule exE, simp add:amax_def,
erule disjE, erule exE, simp add:amax_def)
apply ((erule exE)+, cut_tac amax_le[of "x" "y"],
rule contrapos_pp, simp+) apply (erule disjE,
cut_tac x = "ant z" in inf_ge_any,
frule_tac x = "ant z" in ale_antisym[of _ "\<infinity>"], assumption+, simp,
cut_tac x = "ant za" in inf_ge_any,
frule_tac x = "ant za" in ale_antisym[of _ "\<infinity>"], assumption+, simp)
done
lemma amin_commute:"amin x y = amin y x"
apply (cut_tac ale_linear[of "x" "y"], erule disjE, simp add:amin_def)
apply (simp add:amin_def)
done
lemma amin_mult_pos:"0 < z \<Longrightarrow> amin (z *\<^sub>a x) (z *\<^sub>a y) = z *\<^sub>a amin x y"
by (simp add:amin_def, simp add:asprod_pos_mono)
lemma amin_amult_pos:"0 < z \<Longrightarrow>
amin ((ant z) * x) ((ant z) * y) = (ant z) * amin x y"
by (simp add:asprod_amult[THEN sym], simp add:amin_mult_pos)
lemma times_amin:"\<lbrakk>0 < a; amin (x * (ant a)) (y * (ant a)) \<le> z * (ant a)\<rbrakk> \<Longrightarrow>
amin x y \<le> z"
by (frule amin_mult_pos[of "a" "x" "y"], simp add:asprod_amult,
simp add:amult_commute[of "ant a"], simp add:amult_pos_mono_r)
lemma Amin_memTr:"f \<in> {i. i \<le> n} \<rightarrow> Z\<^sub>\<infinity> \<longrightarrow> Amin n f \<in> Z\<^sub>\<infinity>"
apply (induct_tac n,
simp add:Pi_def)
apply (rule impI,
frule_tac func_pre[of "f" _ "Z\<^sub>\<infinity>"],
simp, rule amin_mem_i, assumption+,
simp add:Pi_def)
done
lemma Amin_mem:"f \<in> {i. i \<le> n} \<rightarrow> Z\<^sub>\<infinity> \<Longrightarrow> Amin n f \<in> Z\<^sub>\<infinity>"
apply (simp add:Amin_memTr)
done
lemma Amax_memTr:"f \<in> {i. i \<le> n} \<rightarrow> Z\<^sub>-\<^sub>\<infinity> \<longrightarrow> Amax n f \<in> Z\<^sub>-\<^sub>\<infinity>"
apply (induct_tac n,
simp add:Pi_def)
apply (rule impI,
frule_tac func_pre[of "f" _ "Z\<^sub>-\<^sub>\<infinity>"],
simp, rule amax_mem_m, assumption+,
simp add:Pi_def)
done
lemma Amax_mem:"f \<in> {i. i \<le> n} \<rightarrow> Z\<^sub>-\<^sub>\<infinity> \<Longrightarrow> Amax n f \<in> Z\<^sub>-\<^sub>\<infinity>"
apply (simp add:Amax_memTr)
done
lemma Amin_mem_mem:"\<forall>j\<le> n. f j \<in> Z\<^sub>\<infinity> \<Longrightarrow> Amin n f \<in> Z\<^sub>\<infinity>"
by (rule Amin_mem, simp)
lemma Amax_mem_mem:"\<forall>j \<le> n. f j \<in> Z\<^sub>-\<^sub>\<infinity> \<Longrightarrow> Amax n f \<in> Z\<^sub>-\<^sub>\<infinity>"
by (rule Amax_mem, simp)
lemma Amin_leTr:"f \<in> {i. i \<le> n} \<rightarrow> Z\<^sub>\<infinity> \<longrightarrow> (\<forall>j\<in>{i. i \<le> n}. Amin n f \<le> (f j))"
apply (induct_tac n,
rule impI, rule ballI,
simp)
apply (rule impI, rule ballI,
frule func_pre, simp)
apply (case_tac "j = Suc n", simp, rule amin_le_r)
apply (cut_tac x = j and n = n in Nset_pre, simp, assumption,
cut_tac x = "Amin n f" and y = "f (Suc n)" in amin_le_l,
thin_tac "j \<le> Suc n", simp)
apply (frule_tac x = j in spec,
thin_tac "\<forall>j\<le>n. Amin n f \<le> f j", simp)
done
lemma Amin_le:"\<lbrakk>f \<in> {j. j \<le> n} \<rightarrow> Z\<^sub>\<infinity>; j \<in> {k. k \<le> n}\<rbrakk> \<Longrightarrow> Amin n f \<le> (f j)"
apply (simp add:Amin_leTr)
done
lemma Amax_ge:"\<lbrakk>f \<in> {j. j \<le> n} \<rightarrow> Z\<^sub>-\<^sub>\<infinity>; j \<in> {j. j \<le> n}\<rbrakk> \<Longrightarrow>
(f j) \<le> (Amax n f)"
apply (simp add:Amax_geTr)
done
lemma Amin_mem_le:"\<lbrakk>\<forall>j \<le> n. (f j) \<in> Z\<^sub>\<infinity>; j \<in> {j. j \<le> n}\<rbrakk> \<Longrightarrow>
(Amin n f) \<le> (f j)"
by (rule Amin_le, simp, simp)
lemma Amax_mem_le:"\<lbrakk>\<forall>j \<le> n. (f j) \<in> Z\<^sub>-\<^sub>\<infinity>; j \<in> {j. j \<le> n}\<rbrakk> \<Longrightarrow>
(f j) \<le> (Amax n f)"
by (rule Amax_ge, simp, simp)
lemma amin_ge1:"\<lbrakk>(z::ant) \<le> x; z \<le> y \<rbrakk> \<Longrightarrow> z \<le> amin x y"
by (simp add:amin_def)
lemma amin_gt:"\<lbrakk>(z::ant) < x; z < y\<rbrakk> \<Longrightarrow> z < amin x y"
apply (simp add:less_ant_def, (erule conjE)+,
rule conjI, simp add:amin_ge1)
apply (rule contrapos_pp, simp+,
case_tac "x \<le> y", simp add:amin_def, simp add:amin_def)
done
lemma Amin_ge1Tr:"(\<forall>j\<le>(Suc n). (f j) \<in> Z\<^sub>\<infinity> \<and> z \<le> (f j)) \<longrightarrow>
z \<le> (Amin (Suc n) f)"
apply (induct_tac n)
apply (rule impI)
apply (frule_tac x = 0 in spec,
frule_tac x = "Suc 0" in spec,
thin_tac "\<forall>j\<le>Suc 0. f j \<in> Z\<^sub>\<infinity> \<and> z \<le> f j", simp, (erule conjE)+,
simp add:amin_ge1)
apply (rule impI,
simp,
frule_tac a = "Suc (Suc n)" in forall_spec,
thin_tac "\<forall>j\<le>Suc (Suc n). f j \<in> Z\<^sub>\<infinity> \<and> z \<le> f j", simp,
thin_tac "\<forall>j\<le>Suc (Suc n). f j \<in> Z\<^sub>\<infinity> \<and> z \<le> f j", erule conjE)
apply (rule amin_ge1, assumption+)
done
lemma Amin_ge1:"\<lbrakk> \<forall>j \<le> (Suc n). f j \<in> Z\<^sub>\<infinity>; \<forall>j \<le> (Suc n). z \<le> (f j)\<rbrakk> \<Longrightarrow>
z \<le> (Amin (Suc n) f)"
apply (simp del:Amin_Suc add:Amin_ge1Tr)
done
lemma amin_trans1:"\<lbrakk>x \<in> Z\<^sub>\<infinity>; y \<in> Z\<^sub>\<infinity>; z \<in> Z\<^sub>\<infinity>; z \<le> x \<rbrakk> \<Longrightarrow> amin z y \<le> amin x y"
by (simp add:amin_def)
lemma inf_in_aug_inf:"\<infinity> \<in> Z\<^sub>\<infinity>"
apply (simp add:aug_inf_def, simp add:not_sym)
done
subsection "Maximum element of a set of ants"
primrec aasc_seq :: "[ant set, ant, nat] \<Rightarrow> ant"
where
aasc_seq_0 : "aasc_seq A a 0 = a"
| aasc_seq_Suc : "aasc_seq A a (Suc n) =
(SOME b. ((b \<in> A) \<and> (aasc_seq A a n) < b))"
lemma aasc_seqn:"\<lbrakk>a \<in> A; \<not> (\<exists>m. m\<in>A \<and> (\<forall>x\<in>A. x \<le> m))\<rbrakk> \<Longrightarrow>
(aasc_seq A a n) < (aasc_seq A a (Suc n))"
apply (frule aasc_seq_mem [of "a" "A" "n"], assumption+,
simp add:aneg_le,
frule_tac a = "aasc_seq A a n" in forall_spec, assumption+,
thin_tac "\<forall>m. m \<in> A \<longrightarrow> (\<exists>x\<in>A. m < x)", rule someI2_ex, blast, simp)
done
lemma aasc_seqn1:"\<lbrakk>a \<in> A; \<not> (\<exists>m. m\<in>A \<and> (\<forall>x\<in>A. x \<le> m))\<rbrakk> \<Longrightarrow>
(aasc_seq A a n) + 1 \<le> (aasc_seq A a (Suc n))"
by (frule aasc_seqn [of "a" "A" "n"], assumption+, simp)
lemma aubs_ex_n_maxTr:"\<lbrakk>a \<in> A; \<not> (\<exists>m. m\<in>A \<and> (\<forall>x\<in>A. x \<le> m))\<rbrakk> \<Longrightarrow>
(a + an n) \<le> (aasc_seq A a n)"
apply (induct_tac n)
apply (simp add:aadd_0_r,
frule_tac n = n in aasc_seqn1[of "a" "A"], assumption+,
cut_tac x = "a + an n" and y = "aasc_seq A a n" in
aadd_le_mono[of _ _ "1"], assumption, simp,
frule_tac i = "a + an n + 1" and j = "aasc_seq A a n + 1" and
k = "(SOME b. b \<in> A \<and> aasc_seq A a n < b)" in ale_trans, assumption+)
apply (simp add:an_Suc,
case_tac "a = -\<infinity>",
subst ant_1[THEN sym], simp del:ant_1 add:a_zpz an_def,
subgoal_tac "a \<in> Z\<^sub>\<infinity>", subgoal_tac "an n \<in> Z\<^sub>\<infinity>",
subgoal_tac "1 \<in> Z\<^sub>\<infinity>",
subst aadd_assoc_i[THEN sym], assumption+)
apply (subst ant_1[THEN sym], simp del:ant_1 add:aug_inf_def,
(simp add:aug_inf_def an_def)+)
done
lemma aubs_ex_AMax:"\<lbrakk>A \<subseteq> UBset (ant z); A \<noteq> {}\<rbrakk> \<Longrightarrow> \<exists>!m. m\<in>A \<and> (\<forall>x\<in>A. x \<le> m)"
apply (case_tac "A = {-\<infinity>}", simp,
frule not_sub_single[of "A" "-\<infinity>"], assumption+,
frule not_sub[of "A" "{-\<infinity>}"],
erule exE, erule conjE, simp, rename_tac a, rule ex_ex1I)
prefer 2
apply ((erule conjE)+,
frule_tac x = y in bspec, assumption+,
thin_tac "\<forall>x\<in>A. x \<le> m",
frule_tac x = m in bspec, assumption+,
thin_tac "\<forall>x\<in>A. x \<le> y", simp)
apply (rule contrapos_pp, simp,
subgoal_tac "\<exists>w. a = ant w", erule exE,
frule_tac a = a and A = A and n = "nat ((abs w) + (abs z) + 1)" in
aubs_ex_n_maxTr, simp,
frule_tac a = a and n = "nat ((abs w) + (abs z) + 1)" in
aasc_seq_mem[of _ "A"], assumption,
thin_tac "\<not> (\<exists>m. m \<in> A \<and> (\<forall>x\<in>A. x \<le> m))",
simp add:UBset_def)
apply (frule_tac c = "aasc_seq A (ant w) (nat (\<bar>w\<bar> + \<bar>z\<bar> + 1))" in
subsetD[of "A" "{x. x \<le> ant z}"], assumption+,
simp)
apply(frule_tac i = "ant w + an (nat (\<bar>w\<bar> + \<bar>z\<bar> + 1))" and
j = "aasc_seq A (ant w) (nat (\<bar>w\<bar> + \<bar>z\<bar> + 1))" and
k = "ant z" in ale_trans, assumption+)
apply (thin_tac "ant w + an (nat (\<bar>w\<bar> + \<bar>z\<bar> + 1))
\<le> aasc_seq A (ant w) (nat (\<bar>w\<bar> + \<bar>z\<bar> + 1))",
thin_tac "aasc_seq A (ant w) (nat (\<bar>w\<bar> + \<bar>z\<bar> + 1)) \<in> A",
thin_tac "aasc_seq A (ant w) (nat (\<bar>w\<bar> + \<bar>z\<bar> + 1)) \<le> ant z",
simp add:an_def a_zpz)
apply (cut_tac a = a in mem_ant, erule disjE, simp, erule disjE, erule exE,
simp, simp add:UBset_def, frule subsetD[of "A" "{x. x \<le> ant z}" "\<infinity>"],
assumption+, simp, cut_tac inf_ge_any[of "ant z"],
frule_tac ale_antisym[of "ant z" "\<infinity>"], assumption+, simp)
done
definition
AMax :: "ant set \<Rightarrow> ant" where
"AMax A = (THE m. m \<in> A \<and> (\<forall>x\<in>A. x \<le> m))"
definition
AMin::"ant set \<Rightarrow> ant" where
"AMin A = (THE m. m \<in> A \<and> (\<forall>x\<in>A. m \<le> x))"
definition
rev_o :: "ant \<Rightarrow> ant" where
"rev_o x = - x"
lemma AMax:"\<lbrakk>A \<subseteq> UBset (ant z); A \<noteq> {}\<rbrakk> \<Longrightarrow>
(AMax A) \<in> A \<and> (\<forall>x\<in>A. x \<le> (AMax A))"
apply (simp add:AMax_def)
apply (frule aubs_ex_AMax[of "A" "z"], assumption)
apply (rule theI')
apply assumption
done
lemma AMax_mem:"\<lbrakk>A \<subseteq> UBset (ant z); A \<noteq> {}\<rbrakk> \<Longrightarrow> (AMax A) \<in> A"
apply (simp add:AMax[of "A" "z"])
done
lemma rev_map_nonempty:"A \<noteq> {} \<Longrightarrow> rev_o ` A \<noteq> {}"
by (rule contrapos_pp, simp+)
lemma rev_map:"rev_o \<in> LBset (ant (-z)) \<rightarrow> UBset (ant z)"
by (rule Pi_I, simp add:UBset_def LBset_def rev_o_def,
frule_tac x = "ant (-z)" and y = x in ale_minus, simp add:aminus)
lemma albs_ex_AMin:"\<lbrakk>A \<subseteq> LBset (ant z); A \<noteq> {}\<rbrakk> \<Longrightarrow> \<exists>!m. m\<in>A \<and> (\<forall>x\<in>A. m \<le> x)"
apply (rule ex_ex1I)
prefer 2 apply ((erule conjE)+,
frule_tac x = y in bspec, assumption+,
thin_tac "\<forall>x\<in>A. m \<le> x",
frule_tac x = m in bspec, assumption+,
thin_tac "\<forall>x\<in>A. y \<le> x", simp)
apply (subgoal_tac "- AMax (rev_o ` A) \<in> A \<and>
(\<forall>x \<in> A. (- AMax (rev_o ` A)) \<le> x)", blast,
cut_tac rev_map[of "-z"], simp add:a_minus_minus,
frule rev_map_nonempty[of "A"],
frule image_sub[of "rev_o" "LBset (ant z)" "UBset (ant (-z))" "A"],
assumption+, frule AMax[of "rev_o ` A" "-z"], assumption+,
erule conjE,
rule conjI, thin_tac "\<forall>x\<in>rev_o ` A. x \<le> AMax (rev_o ` A)",
thin_tac "rev_o \<in> LBset (ant z) \<rightarrow> UBset (ant (- z))",
thin_tac "rev_o ` A \<noteq> {}",
thin_tac "rev_o ` A \<subseteq> UBset (ant (- z))")
apply (simp add:image_def rev_o_def,
erule bexE, simp add:a_minus_minus, rule ballI,
subgoal_tac "rev_o x \<in> rev_o ` A",
frule_tac x = "rev_o x" in bspec, assumption+,
thin_tac "\<forall>x\<in>rev_o ` A. x \<le> AMax (rev_o ` A)",
thin_tac "rev_o \<in> LBset (ant z) \<rightarrow> UBset (ant (- z))",
thin_tac "rev_o ` A \<noteq> {}",
thin_tac "rev_o ` A \<subseteq> UBset (ant (- z))")
apply (simp add:image_def rev_o_def, erule bexE, simp add:a_minus_minus,
frule_tac x = "-x" and y = "-xa" in ale_minus, simp add:a_minus_minus,
simp add:image_def, blast)
done
lemma AMin:"\<lbrakk>A \<subseteq> LBset (ant z); A \<noteq> {}\<rbrakk> \<Longrightarrow>
(AMin A) \<in> A \<and> (\<forall>x\<in>A. (AMin A) \<le> x)"
apply (simp add:AMin_def)
apply (frule albs_ex_AMin[of "A" "z"], assumption)
apply (rule theI')
apply assumption
done
lemma AMin_mem:"\<lbrakk>A \<subseteq> LBset (ant z); A \<noteq> {}\<rbrakk> \<Longrightarrow> (AMin A) \<in> A"
apply (simp add:AMin)
done
primrec ASum :: "(nat \<Rightarrow> ant) \<Rightarrow> nat \<Rightarrow> ant"
where
ASum_0: "ASum f 0 = f 0"
| ASum_Suc: "ASum f (Suc n) = (ASum f n) + (f (Suc n))"
lemma age_plus:"\<lbrakk>0 \<le> (a::ant); 0 \<le> b; a + b \<le> c\<rbrakk> \<Longrightarrow> a \<le> c"
apply (frule aadd_le_mono[of "0" "b" "a"])
apply (simp add:aadd_commute[of "b" "a"] aadd_0_l)
done
lemma age_diff_le:"\<lbrakk>(a::ant) \<le> c; 0 \<le> b\<rbrakk> \<Longrightarrow> a - b \<le> c"
apply (frule ale_minus[of "0" "b"], thin_tac "0 \<le> b", simp)
apply (frule aadd_le_mono[of "a" "c" "-b"])
apply (frule aadd_le_mono[of "-b" "0" "c"])
apply (thin_tac "a \<le> c", thin_tac "- b \<le> 0",
simp add:aadd_commute[of "-b" "c"] aadd_0_l)
apply (simp add:diff_ant_def)
done
lemma adiff_le_adiff:"a \<le> (a'::ant) \<Longrightarrow> a - b \<le> a' - b"
apply (simp add:diff_ant_def)
apply (rule aadd_le_mono[of "a" "a'" "-b"], assumption+)
done
lemma aplus_le_aminus:"\<lbrakk> a \<in> Z\<^sub>-\<^sub>\<infinity>; b \<in> Z\<^sub>-\<^sub>\<infinity>; c \<in> Z\<^sub>-\<^sub>\<infinity>; -b \<in> Z\<^sub>-\<^sub>\<infinity>\<rbrakk> \<Longrightarrow>
((a + b) \<le> (c::ant)) = (a \<le> c - b)"
apply (rule iffI)
apply (frule aadd_le_mono[of "a + b" "c" "-b"])
apply (simp add:aadd_assoc_m, simp add:aadd_minus_r)
apply (simp add:aadd_0_r, simp add:diff_ant_def)
apply (frule aadd_le_mono[of "a" "c - b" "b"])
apply (simp add:diff_ant_def)
apply (simp add:aadd_assoc_m)
apply (simp add:aadd_minus_inv[of "b"])
apply (simp add: aadd_0_r)
done
section "Cardinality of sets"
text \<open>cardinality is defined for the finite sets only\<close>
lemma card_eq:"A = B \<Longrightarrow> card A = card B"
apply simp
done
lemma card0:"card {} = 0"
by simp
lemma card_nonzero:"\<lbrakk>finite A; card A \<noteq> 0\<rbrakk> \<Longrightarrow> A \<noteq> {}"
by (rule contrapos_pp, simp+)
lemma finite1:"finite {a}"
by simp
lemma card1:"card {a} = 1"
by simp
lemma nonempty_card_pos:"\<lbrakk>finite A; A \<noteq> {}\<rbrakk> \<Longrightarrow> 0 < card A"
apply (frule nonempty_ex [of "A"], erule exE,
frule_tac a = x and A = A in singleton_sub)
apply (frule_tac B = A and A = "{x}" in card_mono, assumption+,
simp add:card1)
done
lemma nonempty_card_pos1:"\<lbrakk>finite A; A \<noteq> {}\<rbrakk> \<Longrightarrow> Suc 0 \<le> card A"
apply (frule nonempty_card_pos[of "A"], assumption+)
apply (rule Suc_leI[of "0" "card A"], assumption)
done
lemma card1_tr0:"\<lbrakk> finite A; card A = Suc 0; a \<in> A \<rbrakk> \<Longrightarrow> {a} = A"
apply (cut_tac card1[of "a"])
apply (rule card_seteq[of "A" "{a}"], assumption)
apply (rule singleton_sub[of "a" "A"], assumption)
apply simp
done
lemma card1_tr1:"(constmap {0::nat} {x}) \<in> {0} \<rightarrow> {x} \<and>
surj_to (constmap {0::nat} {x}) {0} {x}"
apply (rule conjI, simp add:constmap_def Pi_def,
simp add:surj_to_def image_def constmap_def)
done
lemma card1_Tr2:"\<lbrakk>finite A; card A = Suc 0\<rbrakk> \<Longrightarrow>
\<exists>f. f \<in> {0::nat} \<rightarrow> A \<and> surj_to f {0} A"
apply (frule card_nonzero[of "A"], simp)
apply (cut_tac nonempty_ex[of "A"], erule exE)
apply (frule_tac a = x in card1_tr0[of "A"], assumption+)
apply (rotate_tac -1, frule sym, thin_tac "{x} = A", simp)
apply (cut_tac x = x in card1_tr1, blast, simp)
done
lemma card2:"\<lbrakk> finite A; a \<in> A; b \<in> A; a \<noteq> b \<rbrakk> \<Longrightarrow> Suc (Suc 0) \<le> card A"
apply (cut_tac card1[of "a"])
apply (frule singleton_sub[of "b" "A"])
apply (frule finite_subset[of "{b}" "A"], assumption)
apply (frule card_insert_disjoint[of "{b}" "a"])
apply simp
apply (simp only:card1)
apply (frule insert_sub[of "{b}" "A" "a"], assumption+)
apply (frule card_mono [of "A" "{a, b}"], assumption)
apply simp
done
lemma card2_inc_two:"\<lbrakk>0 < (n::nat); x \<in> {j. j \<le> n}\<rbrakk> \<Longrightarrow>
\<exists>y \<in> {j. j \<le> n}. x \<noteq> y"
apply (rule contrapos_pp, simp+)
apply (frule_tac m = 0 and n = n in Suc_leI) apply (
frule_tac a = "Suc 0" in forall_spec, assumption)
apply (frule_tac a = 0 in forall_spec)
apply (rule less_imp_le, assumption)
apply simp
done
lemma Nset2_prep1:"\<lbrakk>finite A; card A = Suc (Suc n) \<rbrakk> \<Longrightarrow> \<exists>x. x\<in>A"
apply (frule card_nonzero[of "A"])
apply simp
apply (simp add:nonempty_ex)
done
lemma ex_least_set:"\<lbrakk>A = {H. finite H \<and> P H}; H \<in> A\<rbrakk> \<Longrightarrow>
\<exists>K \<in> A. (LEAST j. j \<in> (card ` A)) = card K"
(* proof by L. C. Paulson *)
by (simp add:image_def, rule LeastI, rule_tac x = "H" in exI, simp)
lemma Nset2_prep2:"x \<in> A \<Longrightarrow> A - {x} \<union> {x} = A"
by auto
lemma Nset2_finiteTr:"\<forall>A. (finite A \<and>(card A = Suc n) \<longrightarrow>
(\<exists>f. f \<in> {i. i \<le> n} \<rightarrow> A \<and> surj_to f {i. i \<le> n} A))"
apply (induct_tac n, rule allI, rule impI, erule conjE)
apply (simp add: card1_Tr2 del: Pi_split_insert_domain)
(* n *)
apply (rule allI, rule impI, erule conjE, frule Nset2_prep1, assumption+)
apply (erule exE)
apply(drule_tac a = "A - {x}" in forall_spec)
apply simp
apply (erule exE)
apply (cut_tac x = x in card1_tr1, (erule conjE)+)
apply (frule_tac f = f and n = n and A = "A - {x}" and
g = "constmap {0} {x}" and m = 0 and B = "{x}" in jointfun_surj,
assumption+)
apply simp+
apply (frule_tac f = f and n = n and A = "A - {x}" and
g = "constmap {0} {x}" and m = 0 and B = "{x}" in jointfun_hom0,
simp,
frule_tac x = x and A = A in Nset2_prep2, simp, blast)
done
lemma Nset2_finite:"\<lbrakk> finite A; card A = Suc n\<rbrakk> \<Longrightarrow>
\<exists>f. f \<in> {i. i \<le> n} \<rightarrow> A \<and> surj_to f {i. i \<le> n} A "
by (simp add:Nset2_finiteTr)
lemma Nset2finite_inj_tr0:"j \<in> {i. i \<le> (n::nat)} \<Longrightarrow>
card ({i. i \<le> n} - {j}) = n"
by simp
lemma Nset2finite_inj_tr1:"\<lbrakk> i \<le> (n::nat); j \<le> n; f i = f j; i \<noteq> j \<rbrakk> \<Longrightarrow>
f ` ({i. i \<le> n} - {j}) = f ` {i. i \<le> n}"
apply (simp add:image_def, rule equalityI, rule subsetI, simp add:CollectI,
erule bexE, case_tac "xa = j", frule sym, thin_tac "f i = f j",
simp, blast)
apply (rule subsetI, simp, erule exE, case_tac "xa = j", frule sym,
thin_tac "f i = f j", blast, blast)
done
lemma Nset2finite_inj:"\<lbrakk>finite A; card A = Suc n; surj_to f {i. i \<le> n} A \<rbrakk> \<Longrightarrow>
inj_on f {i. i \<le> n}"
by (metis card_Collect_le_nat eq_card_imp_inj_on finite_Collect_le_nat surj_to_def)
definition
zmax :: "[int, int] \<Rightarrow> int" where
"zmax x y = (if (x \<le> y) then y else x)"
primrec Zmax :: "[nat, nat \<Rightarrow> int] \<Rightarrow> int"
where
Zmax_0 : "Zmax 0 f = f 0"
| Zmax_Suc :"Zmax (Suc n) f = zmax (Zmax n f) (f (Suc n))"
lemma Zmax_memTr:"f \<in> {i. i \<le> (n::nat)} \<rightarrow> (UNIV::int set) \<longrightarrow>
Zmax n f \<in> f ` {i. i \<le> n}"
apply (induct_tac n)
apply simp
apply (rule impI)
apply (frule func_pre)
apply (frule_tac f = f and A = "{i. i \<le> Suc n}" and B = UNIV and
?A1.0 = "{i. i \<le> n}" and ?A2.0 = "{i. i \<le> Suc n}" in im_set_mono)
apply (rule subsetI, simp, simp, simp)
apply (case_tac "(Zmax n f) \<le> (f (Suc n))", simp add:zmax_def)
apply (simp add:zmax_def)
apply (simp add:subsetD)
done
lemma zmax_ge_r:"y \<le> zmax x y"
by (simp add:zmax_def)
lemma zmax_ge_l:"x \<le> zmax x y"
by (simp add:zmax_def)
lemma Zmax_geTr:"f \<in> {j. j \<le> (n::nat)} \<rightarrow> (UNIV::int set) \<longrightarrow>
(\<forall>j\<in>{j. j \<le> n}. (f j) \<le> Zmax n f)"
apply (induct_tac n,
rule impI, rule ballI,
simp)
apply (rule impI, rule ballI,
frule func_pre, simp,
case_tac "j = Suc n", simp, rule zmax_ge_r,
cut_tac x = j and n = n in Nset_pre, simp, assumption,
thin_tac "j \<le> Suc n",
simp)
apply (cut_tac x = "Zmax n f" and y = "f (Suc n)" in zmax_ge_l,
frule_tac x = j in spec,
thin_tac "\<forall>j\<le>n. f j \<le> Zmax n f")
apply simp
done
lemma Zmax_plus1:"f \<in> {j. j \<le> (n::nat)} \<rightarrow> (UNIV::int set) \<Longrightarrow>
((Zmax n f) + 1) \<notin> f ` {j. j \<le> n}"
apply (cut_tac Zmax_geTr[of f n])
apply (rule contrapos_pp, simp+)
apply (simp add:image_def, erule exE, erule conjE)
apply (frule_tac a = x in forall_spec, assumption,
thin_tac "\<forall>j\<le>n. f j \<le> Zmax n f")
apply (frule sym, thin_tac "Zmax n f + 1 = f x", simp)
done
lemma image_Nsetn_card_pos:" 0 < card (f ` {i. i \<le> (n::nat)})"
apply(rule nonempty_card_pos)
apply auto
done
lemma card_image_Nsetn_Suc
:"\<lbrakk>f \<in> {j. j \<le> Suc n} \<rightarrow> B;
f (Suc n) \<notin> f ` {j. j \<le> n}\<rbrakk> \<Longrightarrow>
card (f ` {j. j \<le> Suc n}) - Suc 0 =
Suc (card (f ` {j. j \<le> n}) - Suc 0)"
apply (simp add:image_Nset_Suc)
apply (cut_tac image_Nsetn_card_pos[of f n], simp)
done
lemma slide_surj:
\<open>surj_to (slide i) {l. l \<le> (j - i)} (nset i j)\<close> if \<open>i < j\<close> for i j :: nat
using that
by (auto simp add: surj_to_def image_def slide_def nset_def) presburger
lemma slide_inj:"i < j \<Longrightarrow> inj_on (slide i) {k. k \<le> (j - i)}"
apply (simp add:inj_on_def, (rule allI)+)
apply (rule impI, rule allI, rule impI, rule impI)
apply (simp add:slide_def)
done
lemma card_nset:"i < (j :: nat) \<Longrightarrow> card (nset i j) = Suc (j - i)"
apply (frule slide_inj [of "i" "j"])
apply (frule card_image [of "slide i" "{k. k \<le> (j - i)}"])
apply (simp, frule slide_surj [of "i" "j"], simp add:surj_to_def)
done
lemma sliden_hom:"i < j \<Longrightarrow> (sliden i) \<in> nset i j \<rightarrow> {k. k \<le> (j - i)}"
by (simp add:Pi_def, rule allI, rule impI, simp add:sliden_def,
simp add:nset_def, erule conjE, simp add:diff_le_mono)
lemma slide_sliden:"(sliden i) (slide i k) = k"
by (simp add:sliden_def slide_def)
lemma sliden_surj:"i < j \<Longrightarrow> surj_to (sliden i) (nset i j) {k. k \<le> (j - i)}"
apply (simp add:surj_to_def image_def, rule equalityI)
apply (rule subsetI, simp, erule bexE, simp add:nset_def sliden_def,
erule conjE, rule_tac m = xa in diff_le_mono[of _ "j" "i"],
assumption+)
apply (rule subsetI, simp add:nset_def sliden_def,
frule_tac i = x in add_le_mono[of _ "j - i" "i" "i"], simp,
simp, subgoal_tac "i \<le> x + i", subgoal_tac "x = (x + i) - i",
blast) apply simp+
done
lemma sliden_inj: "i < j \<Longrightarrow> inj_on (sliden i) (nset i j)"
apply (simp add:inj_on_def, (rule ballI)+, rule impI, simp add:sliden_def)
apply (simp add:nset_def, (erule conjE)+,
subgoal_tac "(x - i = y - i) = (x = y)", blast)
apply (rule eq_diff_iff, assumption+)
done
definition
transpos :: "[nat, nat] \<Rightarrow> (nat \<Rightarrow> nat)" where
"transpos i j = (\<lambda>k. if k = i then j else if k = j then i else k)"
lemma transpos_id:"\<lbrakk> i \<le> n; j \<le> n; i \<noteq> j ; x \<in> {k. k \<le> n} - {i, j} \<rbrakk>
\<Longrightarrow> transpos i j x = x"
proof -
assume p1:"i \<le> n" and p2:"j \<le> n" and p3:" i \<noteq> j" and
p4:"x \<in> {k. k \<le> n} - {i, j}"
from p1 and p2 and p3 and p4 show ?thesis
apply (simp add:transpos_def)
done
qed
lemma transpos_id_1:"\<lbrakk>i \<le> n; j \<le> n; i \<noteq> j; x \<le> n; x \<noteq> i; x \<noteq> j\<rbrakk> \<Longrightarrow>
transpos i j x = x"
proof -
assume p1:"i \<le> n" and p2:"j \<le> n" and p3:"i \<noteq> j" and p4:"x \<le> n" and p5:"x \<noteq> i" and p6:"x \<noteq> j"
from p1 and p2 and p3 and p4 and p5 and p6 show ?thesis
apply (simp add:transpos_def)
done
qed
lemma transpos_id_2:"i \<le> n \<Longrightarrow> transpos i n (Suc n) = Suc n"
by (simp add:transpos_def)
lemma transpos_ij_1:"\<lbrakk>i \<le> n; j \<le> n; i \<noteq> j \<rbrakk> \<Longrightarrow>
transpos i j i = j"
by (simp add:transpos_def)
lemma transpos_ij_2:"\<lbrakk>i \<le> n; j \<le> n; i \<noteq> j\<rbrakk> \<Longrightarrow> transpos i j j = i"
by (simp add:transpos_def)
lemma transpos_hom:"\<lbrakk>i \<le> n; j \<le> n; i \<noteq> j\<rbrakk> \<Longrightarrow>
(transpos i j) \<in> {i. i \<le> n} \<rightarrow> {i. i \<le> n}"
apply (simp add:Pi_def, rule allI, rule impI)
apply (case_tac "x = i", simp add:transpos_def)
apply (case_tac "x = j", simp add:transpos_def,
subst transpos_id, assumption+, simp, assumption)
done
lemma transpos_mem:"\<lbrakk>i \<le> n; j \<le> n; i \<noteq> j; l \<le> n\<rbrakk> \<Longrightarrow>
(transpos i j l) \<le> n"
apply (frule transpos_hom [of "i" "n" "j"], assumption+,
cut_tac funcset_mem[of "transpos i j" "{i. i \<le> n}" "{i. i \<le> n}" l])
apply simp+
done
lemma transpos_inj:"\<lbrakk>i \<le> n; j \<le> n; i \<noteq> j\<rbrakk>
\<Longrightarrow> inj_on (transpos i j) {i. i \<le> n}"
apply (simp add:inj_on_def, (rule allI, rule impI)+, rule impI,
case_tac "x = i", case_tac "y = j",
simp add:transpos_def)
apply (simp add:transpos_ij_1, rule contrapos_pp, simp+,
frule_tac x = y in transpos_id [of "i" "n" "j"], assumption+,
simp+)
apply (case_tac "x = j", simp,
simp add:transpos_ij_2, rule contrapos_pp, simp+,
frule_tac x = y in transpos_id [of "i" "n" "j"], assumption+,
simp, rule contrapos_pp, simp+, simp add:transpos_ij_1)
apply (simp, simp add:transpos_ij_1, simp add:transpos_id_1,
thin_tac "x = transpos i j y",
case_tac "y = i", simp add:transpos_ij_1,
case_tac "y = j", simp add:transpos_ij_2)
apply (simp add:transpos_id_1)
done
lemma transpos_surjec:"\<lbrakk>i \<le> n; j \<le> n; i \<noteq> j\<rbrakk>
\<Longrightarrow> surj_to (transpos i j) {i. i \<le> n} {i. i \<le> n}"
apply (simp add:surj_to_def,
frule transpos_hom [of "i" "n" "j"], assumption+,
frule image_sub [of "transpos i j" "{i. i \<le> n}" "{i. i \<le> n}"
"{i. i \<le> n}"], simp)
apply (frule transpos_inj [of "i" "n" "j"], assumption+,
frule card_image [of "transpos i j" "{i. i \<le> n}"],
simp add:card_seteq)
done
lemma comp_transpos:"\<lbrakk>i \<le> n; j \<le> n; i \<noteq> j\<rbrakk> \<Longrightarrow>
\<forall>k \<le> n. (compose {i. i \<le> n} (transpos i j) (transpos i j)) k = k"
proof -
assume p1:"i \<le> n" and p2:"j \<le> n" and p3:"i \<noteq> j"
from p1 and p2 and p3 show ?thesis
apply (simp add:compose_def)
apply (rule allI)
apply (case_tac "k = i") apply simp
apply (subst transpos_ij_1, assumption+)
apply (rule transpos_ij_2, simp+)
apply (rule impI)
apply (case_tac "k = j") apply simp
apply (subst transpos_ij_2, simp+)
apply (rule transpos_ij_1, simp+)
apply (subst transpos_id_1, assumption+)
apply (simp add:transpos_mem)
apply (simp add:transpos_id_1)+
done
qed
lemma comp_transpos_1:"\<lbrakk>i \<le> n; j \<le> n; i \<noteq> j; k \<le> n\<rbrakk> \<Longrightarrow>
(transpos i j) ((transpos i j) k) = k"
apply (frule comp_transpos [of "i" "n" "j"], assumption+)
apply (simp add:compose_def)
done
lemma cmp_transpos1:"\<lbrakk>i \<le> n; j \<le> n; i \<noteq> j; k \<le> n\<rbrakk> \<Longrightarrow>
(cmp (transpos i j) (transpos i j)) k = k"
apply (simp add:cmp_def)
apply (simp add:comp_transpos_1)
done
lemma cmp_transpos:"\<lbrakk>i \<le> n; i \<noteq> n; a \<le> (Suc n)\<rbrakk> \<Longrightarrow>
(cmp (transpos i n) (cmp (transpos n (Suc n)) (transpos i n))) a =
transpos i (Suc n) a"
apply (simp add:cmp_def)
apply (case_tac "a = Suc n", simp)
apply (simp add:transpos_id_2)
apply (cut_tac transpos_ij_2[of n "Suc n" "Suc n"], simp,
cut_tac transpos_ij_2[of i "Suc n" "Suc n"], simp,
cut_tac transpos_ij_2[of i n n], simp+)
apply (frule le_imp_less_or_eq[of a "Suc n"],
thin_tac "a \<le> Suc n", simp,
frule Suc_less_le[of a n])
apply (case_tac "a = n", simp,
cut_tac transpos_ij_2[of i n n], simp,
cut_tac transpos_id[of i "Suc n" "Suc n" n], simp,
cut_tac transpos_id[of n "Suc n" "Suc n" i], simp,
cut_tac transpos_ij_1[of i n n], simp+)
apply (case_tac "a = i", simp,
cut_tac transpos_ij_1[of i n n], simp+,
cut_tac transpos_ij_1[of i "Suc n" "Suc n"], simp,
cut_tac transpos_ij_1[of n "Suc n" "Suc n"], simp,
cut_tac transpos_id[of i "Suc n" n "Suc n"], simp+)
apply (cut_tac transpos_id[of i n n a], simp,
cut_tac transpos_id[of i "Suc n" "Suc n" a], simp,
cut_tac transpos_id[of n "Suc n" "Suc n" a], simp+)
done
lemma im_Nset_Suc:"insert (f (Suc n)) (f ` {i. i \<le> n}) = f ` {i. i\<le>(Suc n)}"
apply (simp add:image_def)
apply (rule equalityI)
apply (rule subsetI, simp)
apply (erule disjE, blast)
apply (erule exE, erule conjE, simp,
frule_tac i = xa and j = n and k = "Suc n" in le_trans,
simp)
apply blast
apply (rule subsetI, simp, erule exE, erule conjE)
apply (case_tac "xa = Suc n", simp)
apply (metis le_SucE)
done
lemma Nset_injTr0:"\<lbrakk>f \<in> {i. i \<le> (Suc n)} \<rightarrow> {i. i \<le> (Suc n)};
inj_on f {i. i \<le> (Suc n)}; f (Suc n) = Suc n\<rbrakk> \<Longrightarrow>
f \<in> {i. i \<le> n} \<rightarrow> {i. i \<le> n} \<and> inj_on f {i. i \<le> n}"
proof -
assume p1:"f \<in> {i. i \<le> (Suc n)} \<rightarrow> {i. i \<le> (Suc n)}" and
p2:"inj_on f {i. i \<le> (Suc n)}" and p3:"f (Suc n) = Suc n"
have q1:"\<forall>l \<le> n. l \<le> (Suc n)" apply simp done
from p1 and p2 and p3 and q1 have q2:"f \<in> {i. i \<le> n} \<rightarrow> {i. i \<le> n}"
apply (simp add:Pi_def)
apply (rule allI, rule impI)
apply (frule_tac a = x in forall_spec, simp,
thin_tac "\<forall>x\<le>Suc n. f x \<le> Suc n")
apply (rule contrapos_pp, simp+)
apply (simp add:nat_not_le_less)
apply (frule_tac n = "f x" in Suc_leI[of n], thin_tac "n < (f x)")
apply (frule_tac m = "Suc n" and n = "f x" in le_antisym, assumption)
apply(unfold inj_on_def)
apply (frule_tac x = x in bspec, simp,
thin_tac "\<forall>x\<in>{i. i \<le> Suc n}. \<forall>y\<in>{i. i \<le> Suc n}. f x = f y \<longrightarrow> x = y",
frule_tac x = "Suc n" in bspec, simp)
apply (frule_tac r = "f (Suc n)" and s = "Suc n" and t = "f x" in trans,
assumption,
thin_tac "f (Suc n) = Suc n", thin_tac "Suc n = f x",
thin_tac "\<forall>y\<in>{i. i \<le> Suc n}. f x = f y \<longrightarrow> x = y")
apply simp
done
from p2 have q3:"inj_on f {i. i \<le> n}"
apply (simp add:inj_on_def) done
from q2 and q3 show ?thesis apply simp done
qed
lemma inj_surj:"\<lbrakk>f \<in> {i. i \<le> (n::nat)} \<rightarrow> {i. i \<le> n};
inj_on f {i. i \<le> (n::nat)}\<rbrakk> \<Longrightarrow> f ` {i. i \<le> n} = {i. i \<le> n}"
proof -
assume p1:"f \<in> {i. i \<le> n} \<rightarrow> {i. i \<le> n}" and p2:"inj_on f {i. i \<le> n}"
have q1:"0 < Suc 0" apply simp done
from p1 and p2 and q1 show ?thesis
apply simp
apply (frule image_sub [of "f" "{i. i \<le> n}" "{i. i \<le> n}" "{i. i \<le> n}"])
apply simp+
apply (cut_tac card_image [of "f" "{i. i \<le> n}"])
apply (simp add:card_seteq) apply assumption
done
qed
lemma Nset_pre_mem:"\<lbrakk>f:{i. i\<le>(Suc n)} \<rightarrow>{i. i\<le>(Suc n)};
inj_on f {i. i\<le>(Suc n)}; f (Suc n) = Suc n; k \<le> n\<rbrakk> \<Longrightarrow> f k \<in> {i. i\<le>n}"
apply (frule Nset_injTr0[of f n], assumption+, erule conjE)
apply (frule_tac x = k in funcset_mem[of f "{i. i \<le> n}" "{i. i \<le> n}"],
simp, assumption)
done
lemma Nset_injTr1:"\<lbrakk> \<forall>l \<le>(Suc n). f l \<le> (Suc n); inj_on f {i. i \<le> (Suc n)};
f (Suc n) = Suc n \<rbrakk> \<Longrightarrow> inj_on f {i. i \<le> n}"
by (cut_tac Nset_injTr0[of f n], simp, simp)
lemma Nset_injTr2:"\<lbrakk>\<forall>l\<le> (Suc n). f l \<le> (Suc n); inj_on f {i. i \<le> (Suc n)};
f (Suc n) = Suc n\<rbrakk> \<Longrightarrow> \<forall>l \<le> n. f l \<le> n"
apply (rule allI, rule impI)
apply (cut_tac k = l in Nset_pre_mem[of f n])
apply (simp+)
done
lemma TR_inj_inj:"\<lbrakk>\<forall>l\<le> (Suc n). f l \<le> (Suc n); inj_on f {i. i \<le> (Suc n)};
i \<le> (Suc n); j \<le> (Suc n); i < j \<rbrakk> \<Longrightarrow>
inj_on (compose {i. i \<le> (Suc n)} (transpos i j) f) {i. i \<le> (Suc n)}"
apply (frule transpos_inj[of i "Suc n" j], assumption+,
simp )
apply (rule comp_inj [of f "{i. i \<le> (Suc n)}" "{i. i \<le> (Suc n)}"
"transpos i j" "{i. i \<le> (Suc n)}"])
apply (simp, assumption,
rule transpos_hom[of i "Suc n" j], simp+)
done
lemma enumeration:"\<lbrakk>f \<in> {i. i \<le> (n::nat)} \<rightarrow> {i. i \<le> m}; inj_on f {i. i \<le> n}\<rbrakk>
\<Longrightarrow> n \<le> m"
apply (frule image_sub[of f "{i. i \<le> n}" "{i. i \<le> m}" "{i. i \<le> n}"])
apply simp
apply (frule card_image[of f "{i. i \<le> n}"])
apply(drule card_mono[OF finite_Collect_le_nat])
apply simp
done
lemma enumerate_1:"\<lbrakk>\<forall>j \<le> (n::nat). f j \<in> A; \<forall>j \<le> (m::nat). g j \<in> A;
inj_on f {i. i \<le> n}; inj_on g {j. j \<le> m}; f `{j. j \<le> n} = A;
g ` {j. j \<le> m} = A \<rbrakk> \<Longrightarrow> n = m"
apply (frule card_image[of f "{i. i \<le> n}"],
frule card_image[of g "{i. i \<le> m}"])
apply simp
done
definition
ninv :: "[nat, (nat \<Rightarrow> nat)] \<Rightarrow> (nat \<Rightarrow> nat)" where
"ninv n f = (\<lambda>y\<in>{i. i \<le> n}. (SOME x. (x \<le> n \<and> y = f x)))"
lemma ninv_r_inv:"\<lbrakk>f \<in> {i. i \<le> (n::nat)} \<rightarrow> {i. i \<le> n}; inj_on f {i. i \<le> n};
b \<le> n\<rbrakk> \<Longrightarrow> f (ninv n f b) = b "
apply (simp add:ninv_def)
apply (frule inj_surj, assumption+)
apply (cut_tac a = b in eq_set_inc[of _ "{i. i \<le> n}" "f ` {i. i \<le> n}"])
apply (simp, rule sym, assumption)
apply (thin_tac "f ` {i. i \<le> n} = {i. i \<le> n}", simp add:image_def,
erule exE, erule conjE, frule sym, thin_tac "b = f x")
apply (rule someI2_ex, blast)
apply (erule conjE, rule sym, assumption)
done
lemma ninv_inj:"\<lbrakk>f \<in> {i. i \<le> n} \<rightarrow> {i. i \<le> n}; inj_on f {i. i \<le> n}\<rbrakk> \<Longrightarrow>
inj_on (ninv n f) {i. i \<le> n}"
apply (subst inj_on_def, simp)
apply ((rule allI, rule impI)+, rule impI)
apply (frule ninv_hom[of f n], assumption,
frule_tac x = x in funcset_mem[of "ninv n f" "{i. i \<le> n}" "{i. i \<le> n}"], simp,
frule_tac x = y in funcset_mem[of "ninv n f" "{i. i \<le> n}" "{i. i \<le> n}"],
simp,
frule_tac b = x in ninv_r_inv [of f n], assumption+)
apply (simp add:ninv_r_inv)
done
subsection "Lemmas required in Algebra6.thy"
lemma ge2_zmult_pos:
"2 \<le> m \<Longrightarrow> 0 < z \<Longrightarrow> 1 < int m * z"
proof -
assume a1: "0 < z"
assume a2: "2 \<le> m"
have "int m + - 1 * (int m * z) \<le> 0"
using a1 by (simp add: pos_zmult_pos)
then show ?thesis
using a2 by linarith
qed
lemma zmult_pos_mono:"\<lbrakk> (0::int) < w; w * z \<le> w * z'\<rbrakk> \<Longrightarrow> z \<le> z'"
apply (rule contrapos_pp, simp+)
done
lemma zmult_pos_mono_r:
"\<lbrakk>(0::int) < w; z * w \<le> z' * w\<rbrakk> \<Longrightarrow> z \<le> z'"
apply (simp add:mult.commute)
done
lemma an_neq_inf:"an n \<noteq> \<infinity>"
by (simp add:an_def)
lemma an_neq_minf:"an n \<noteq> -\<infinity>"
by (simp add:an_def)
lemma aeq_mult:"\<lbrakk>z \<noteq> 0; a = b\<rbrakk> \<Longrightarrow> a * ant z = b * ant z"
by simp
lemma tna_0[simp]:"tna 0 = 0"
by (simp add:ant_0[THEN sym] tna_ant)
lemma ale_nat_le:"(an n \<le> an m) = (n \<le> m)"
by (simp add:an_def)
lemma aless_nat_less:"(an n < an m) = (n < m)"
by (simp add:an_def, subst aless_zless[of "int n" "int m"], simp)
lemma apos_natpos:"\<lbrakk>a \<noteq> \<infinity>; 0 \<le> a\<rbrakk> \<Longrightarrow> 0 \<le> na a"
by (cut_tac ale_nat_le[of "0" "na a"], simp add:na_def an_def)
lemma apos_tna_pos:"\<lbrakk>n \<noteq> \<infinity>; 0 \<le> n\<rbrakk> \<Longrightarrow> 0 \<le> tna n"
by (subst tna_0[THEN sym],
subst ale_zle[THEN sym, of "tna 0" "tna n"],
frule apos_neq_minf[of "n"],
simp add:ant_tna ant_0)
lemma apos_na_pos:"\<lbrakk>n \<noteq> \<infinity>; 0 \<le> n\<rbrakk> \<Longrightarrow> 0 \<le> na n"
by (frule apos_tna_pos[of "n"], assumption,
cut_tac tna_0[THEN sym], simp del:tna_0)
lemma aposs_tna_poss:"\<lbrakk>n \<noteq> \<infinity>; 0 < n\<rbrakk> \<Longrightarrow> 0 < tna n"
apply (subst tna_0[THEN sym],
subst aless_zless[THEN sym, of "tna 0" "tna n"],
frule aless_imp_le[of "0" "n"],
frule apos_neq_minf[of "n"],
simp add:ant_tna ant_0)
done
lemma aposs_na_poss:"\<lbrakk>n \<noteq> \<infinity>; 0 < n\<rbrakk> \<Longrightarrow> 0 < na n"
apply (frule aless_imp_le[of "0" "n"],
simp add:aneg_less[THEN sym, of "0" "n"],
simp add:na_def)
apply (rule aposs_tna_poss, assumption+)
done
lemma nat_0_le: "0 \<le> z ==> int (nat z) = z"
apply simp
done
lemma int_eq:"m = n \<Longrightarrow> int m = int n"
by simp
lemma box_equation:"\<lbrakk>a = b; a = c\<rbrakk> \<Longrightarrow> b = c"
apply simp
done
lemma aeq_nat_eq:"\<lbrakk>n \<noteq> \<infinity>; 0 \<le> n; m \<noteq> \<infinity>; 0 \<le> m\<rbrakk> \<Longrightarrow>
(n = m) = (na n = na m)"
apply (rule iffI, simp)
apply (cut_tac aneg_less[THEN sym, of "0" "n"],
cut_tac aneg_less[THEN sym, of "0" "m"], simp,
simp add:na_def,
frule apos_neq_minf[of "n"],
frule apos_neq_minf[of "m"])
apply (cut_tac mem_ant[of "m"],
cut_tac mem_ant[of "n"], simp,
(erule exE)+, simp,
simp add:tna_ant,
simp only:ant_0[THEN sym],
simp only:ale_zle)
done
lemma na_minf:"na (-\<infinity>) = 0"
apply (simp add:na_def, rule impI,
cut_tac minf_less_0, simp)
done
lemma an_na:"\<lbrakk>a \<noteq> \<infinity>; 0 \<le> a\<rbrakk> \<Longrightarrow> an (na a) = a"
apply (frule apos_tna_pos[of "a"], assumption,
frule apos_neq_minf[of "a"],
cut_tac mem_ant[of "a"], simp, erule exE,
simp, simp add:an_def na_def)
apply (cut_tac y = 0 and x = "ant z" in aneg_less, simp,
simp only:ant_0[THEN sym],
simp only:ale_zle, simp add:tna_ant)
done
lemma not_na_le_minf:"\<not> (an n \<le> -\<infinity> )"
apply (rule contrapos_pp, simp+)
apply (cut_tac minf_le_any[of "an n"], frule ale_antisym[of "an n" "-\<infinity>"],
assumption+, simp add:an_def)
done
lemma not_na_less_minf:"\<not> (an n < -\<infinity>)"
apply (simp add:aneg_less)
done
lemma not_na_ge_inf:"\<not> \<infinity> \<le> (an n)"
apply (simp add:aneg_le, unfold an_def)
apply (simp add:z_less_i[of "int n"])
done
lemma an_na_le:"j \<le> an n \<Longrightarrow> na j \<le> n"
by (metis ale_nat_le an_0 an_na an_nat_pos aneg_le na_def not_na_ge_inf)
lemma aless_neq :"(x::ant) < y \<Longrightarrow> x \<noteq> y"
by (rule contrapos_pp, simp+)
chapter "Ordered Set"
(* In this chapter, I prove Zorn's lemma in general form. *)
section "Basic Concepts of Ordered Sets"
record 'a carrier =
carrier :: "'a set"
record 'a Order = "'a carrier" +
rel :: "('a \<times> 'a) set"
locale Order =
fixes D (structure)
assumes closed: "rel D \<subseteq> carrier D \<times> carrier D"
and refl: "a \<in> carrier D \<Longrightarrow> (a, a) \<in> rel D"
and antisym: "\<lbrakk>a \<in> carrier D; b \<in> carrier D; (a, b) \<in> rel D;
(b, a) \<in> rel D\<rbrakk> \<Longrightarrow> a = b"
and trans: "\<lbrakk>a \<in> carrier D; b \<in> carrier D; c \<in> carrier D;
(a, b) \<in> rel D; (b, c) \<in> rel D\<rbrakk> \<Longrightarrow> (a, c) \<in> rel D"
(* print_locale Order *)
definition
ole :: "_ \<Rightarrow> 'a \<Rightarrow> 'a \<Rightarrow> bool" (infix "\<preceq>\<index>" 60) where
"a \<preceq>\<^bsub>D\<^esub> b \<longleftrightarrow> (a, b) \<in> rel D"
definition
oless :: "_ \<Rightarrow> 'a \<Rightarrow> 'a \<Rightarrow> bool" (infix "\<prec>\<index>" 60) where
"a \<prec>\<^bsub>D\<^esub> b \<equiv> a \<preceq>\<^bsub>D\<^esub> b \<and> a \<noteq> b"
lemma Order_component:"(E::'a Order) = \<lparr> carrier = carrier E, rel = rel E \<rparr>"
by simp (** An ordered set consists of two components **)
lemma Order_comp_eq:"\<lbrakk>carrier (E::'a Order) = carrier (F::'a Order);
rel E = rel F\<rbrakk> \<Longrightarrow> E = F"
by simp (* components coincide then ordered sets coincide. *)
lemma (in Order) le_rel:"\<lbrakk>a \<in> carrier D; b \<in> carrier D\<rbrakk> \<Longrightarrow>
(a \<preceq> b) = ((a, b) \<in> rel D)"
by (simp add:ole_def)
lemma (in Order) less_imp_le:
"\<lbrakk>a \<in> carrier D; b \<in> carrier D; a \<prec> b \<rbrakk> \<Longrightarrow> a \<preceq> b"
by (simp add:oless_def)
lemma (in Order) le_refl:"a \<in> carrier D \<Longrightarrow> a \<preceq> a"
apply (unfold ole_def)
apply (rule refl, assumption)
done
lemma (in Order) le_antisym:"\<lbrakk>a \<in> carrier D; b \<in> carrier D;
a \<preceq> b; b \<preceq> a \<rbrakk> \<Longrightarrow> a = b"
apply (unfold ole_def)
apply (rule antisym)
apply assumption+
done
lemma (in Order) le_trans:"\<lbrakk>a \<in> carrier D; b \<in> carrier D; c \<in> carrier D;
a \<preceq> b; b \<preceq> c \<rbrakk> \<Longrightarrow> a \<preceq> c"
apply (unfold ole_def)
apply (rule_tac a = a and b = b and c = c in trans)
apply assumption+
done
lemma (in Order) less_trans:"\<lbrakk>a \<in> carrier D; b \<in> carrier D; c \<in> carrier D;
a \<prec> b; b \<prec> c \<rbrakk> \<Longrightarrow> a \<prec> c"
apply (unfold oless_def)
apply (erule conjE)+
apply (simp add:le_trans[of a b c])
apply (rule contrapos_pp, simp+)
apply (frule_tac le_antisym[of b c], assumption+)
apply simp
done
lemma (in Order) le_less_trans:"\<lbrakk>a \<in> carrier D; b \<in> carrier D; c \<in> carrier D;
a \<preceq> b; b \<prec> c \<rbrakk> \<Longrightarrow> a \<prec> c"
apply (simp add:oless_def)
apply (erule conjE)
apply (simp add:le_trans[of a b c])
apply (rule contrapos_pp, simp+)
apply (frule le_antisym[of "b" "c"])
apply assumption+
apply simp
done
lemma (in Order) less_le_trans:"\<lbrakk>a \<in> carrier D; b \<in> carrier D; c \<in> carrier D;
a \<prec> b; b \<preceq> c \<rbrakk> \<Longrightarrow> a \<prec> c"
apply (simp add:oless_def)
apply ( erule conjE)
apply (simp add:le_trans[of a b c])
apply (rule contrapos_pp, simp+)
apply (frule le_antisym[of "b" "c"])
apply assumption+
apply simp
done
lemma (in Order) le_imp_less_or_eq:
"\<lbrakk>a \<in> carrier D; b \<in> carrier D\<rbrakk> \<Longrightarrow> (a \<preceq> b) = (a \<prec> b \<or> a = b)"
apply (simp add:oless_def)
apply (rule iffI)
apply simp
apply (erule disjE)
apply simp
apply simp
apply (rule le_refl)
apply assumption
done
lemma (in Order) less_neq: "a \<prec> b \<Longrightarrow> a \<noteq> b"
by (simp add: oless_def)
lemma (in Order) le_neq_less: "\<lbrakk>a \<preceq> b; a \<noteq> b\<rbrakk> \<Longrightarrow> a \<prec> b"
by (simp add: oless_def)
lemma (in Order) less_irrefl: "\<lbrakk>a \<in> carrier D; a \<prec> a\<rbrakk> \<Longrightarrow> C"
by (simp add:oless_def)
lemma (in Order) less_irrefl': "a \<in> carrier D \<Longrightarrow> \<not> a \<prec> a"
by (simp add:oless_def)
lemma (in Order) less_asym:
"a \<in> carrier D \<Longrightarrow> b \<in> carrier D \<Longrightarrow> a \<prec> b \<Longrightarrow> b \<prec> a \<Longrightarrow> C"
apply (simp add:oless_def)
apply (erule conjE)+
apply (frule le_antisym[of "a" "b"])
apply assumption+
apply simp
done
lemma (in Order) less_asym':
"a \<in> carrier D \<Longrightarrow> b \<in> carrier D \<Longrightarrow> a \<prec> b \<Longrightarrow> \<not> b \<prec> a"
apply (rule contrapos_pp, simp+)
apply (simp add:oless_def)
apply (erule conjE)+
apply (frule le_antisym[of "a" "b"])
apply assumption+
apply simp
done
lemma (in Order) gt_than_any_outside:"\<lbrakk>A \<subseteq> carrier D; b \<in> carrier D;
\<forall>x\<in>A. x \<prec> b\<rbrakk> \<Longrightarrow> b \<notin> A"
apply (rule contrapos_pp, simp+)
apply (frule_tac x = b in bspec)
apply (assumption,
thin_tac "\<forall>x\<in>A. x \<prec> b", simp add:oless_def)
done
definition
Iod :: "_ \<Rightarrow> 'a set \<Rightarrow> _" where
"Iod D T =
D \<lparr>carrier := T, rel := {(a, b). (a, b) \<in> rel D \<and> a \<in> T \<and> b \<in> T}\<rparr>"
definition
SIod :: "'a Order \<Rightarrow> 'a set \<Rightarrow> 'a Order" where
"SIod D T = \<lparr>carrier = T, rel = {(a, b). (a, b) \<in> rel D \<and> a \<in> T \<and> b \<in> T}\<rparr>"
lemma (in Order) Iod_self: "D = Iod D (carrier D)"
apply (unfold Iod_def)
apply (cases D)
apply (insert closed)
apply (simp add:Iod_def)
apply (rule equalityI)
apply (rule subsetI)
apply auto
done
lemma SIod_self:"Order D \<Longrightarrow> D = SIod D (carrier D)"
apply (unfold SIod_def)
apply (cases D)
apply (cut_tac Order.closed[of "D"])
apply auto
done
lemma (in Order) Od_carrier:"carrier (D\<lparr>carrier := S, rel := R\<rparr>) = S"
by simp
lemma (in Order) Od_rel:"rel (D\<lparr>carrier := S, rel := R\<rparr>) = R"
by simp
lemma (in Order) Iod_carrier:
"T \<subseteq> carrier D \<Longrightarrow> carrier (Iod D T) = T"
by (simp add: Iod_def)
lemma SIod_carrier:"\<lbrakk>Order D; T \<subseteq> carrier D\<rbrakk> \<Longrightarrow> carrier (SIod D T) = T"
by (simp add:SIod_def)
lemma (in Order) Od_compare:"(S = S' \<and> R = R') = (D\<lparr>carrier := S, rel := R\<rparr> = D\<lparr>carrier := S', rel := R'\<rparr>)"
apply (rule iffI)
apply simp
apply (cut_tac Od_carrier[of R S], cut_tac Od_carrier[of R' S'], simp)
apply (cut_tac Od_rel[of R S], cut_tac Od_rel[of R' S'])
apply (thin_tac "S' = S")
apply simp
done
lemma (in Order) Iod_le:
"\<lbrakk>T \<subseteq> carrier D; a \<in> T; b \<in> T\<rbrakk> \<Longrightarrow> (a \<preceq>\<^bsub>Iod D T\<^esub> b) = (a \<preceq> b)"
apply (simp add: Iod_def)
apply (simp add:ole_def)
done
lemma SIod_le:"\<lbrakk>T \<subseteq> carrier D; a \<in> T; b \<in> T\<rbrakk> \<Longrightarrow>
(a \<preceq>\<^bsub>SIod D T\<^esub> b) = (a \<preceq>\<^bsub>D\<^esub> b)"
apply (simp add:SIod_def)
apply (simp add:ole_def)
done
lemma (in Order) Iod_less:
"\<lbrakk>T \<subseteq> carrier D; a \<in> T; b \<in> T\<rbrakk> \<Longrightarrow> (a \<prec>\<^bsub>Iod D T\<^esub> b) = (a \<prec> b)"
apply (simp add:oless_def)
apply (simp add:Iod_le)
done
lemma SIod_less:"\<lbrakk>T \<subseteq> carrier D; a \<in> T; b \<in> T\<rbrakk> \<Longrightarrow>
(a \<prec>\<^bsub>SIod D T\<^esub> b) = (a \<prec>\<^bsub>D\<^esub> b)"
by (simp add:oless_def SIod_le)
lemma (in Order) Iod_Order:
"T \<subseteq> carrier D \<Longrightarrow> Order (Iod D T)"
apply (rule Order.intro)
apply (simp add:Iod_def)
apply (rule subsetI)
apply (unfold split_paired_all)
apply simp
apply (simp add:Iod_carrier)
apply (simp add:Iod_def)
apply (rule refl)
apply (rule subsetD, assumption+)
apply (simp add:Iod_carrier)
apply (simp add:Iod_def)
apply (rule_tac a = a and b = b in antisym)
apply (simp add:subsetD[of "T" "carrier D"])+
apply (simp add:Iod_def)
apply (rule_tac a = a and b = b and c = c in trans)
apply (simp add:subsetD[of "T" "carrier D"])+
done
lemma SIod_Order:"\<lbrakk> Order D; T \<subseteq> carrier D\<rbrakk> \<Longrightarrow> Order (SIod D T)"
apply (rule Order.intro)
apply (rule subsetI)
apply (simp add:SIod_def)
apply (unfold split_paired_all)
apply simp
apply (simp add:SIod_def)
apply (frule_tac c = a in subsetD[of T "carrier D"], assumption+)
apply (simp add:Order.refl[of D])
apply (simp add:SIod_def)
apply (rule Order.antisym[of D], assumption+)
apply (simp add:subsetD)+
apply (simp add:SIod_def)
apply (frule_tac c = a in subsetD[of T "carrier D"], assumption+,
frule_tac c = b in subsetD[of T "carrier D"], assumption+,
frule_tac c = c in subsetD[of T "carrier D"], assumption+)
apply (rule_tac a = a and b = b and c = c in Order.trans[of D], assumption+)
done
lemma (in Order) emptyset_Iod:"Order (Iod D {})"
apply (rule Iod_Order)
apply simp
done
lemma (in Order) Iod_sub_sub:
"\<lbrakk>S \<subseteq> T; T \<subseteq> carrier D\<rbrakk> \<Longrightarrow> Iod (Iod D T) S = Iod D S"
apply (simp add:Iod_def)
apply (subst Od_compare[THEN sym])
apply simp
apply blast
done
lemma SIod_sub_sub:
"\<lbrakk>S \<subseteq> T; T \<subseteq> carrier D\<rbrakk> \<Longrightarrow> SIod (SIod D T) S = SIod D S"
apply (simp add:SIod_def)
apply blast
done
lemma rel_SIod:"\<lbrakk>Order D; Order E; carrier E \<subseteq> carrier D;
\<forall>a\<in>carrier E. \<forall>b\<in>carrier E. (a \<preceq>\<^bsub>E\<^esub> b) = (a \<preceq>\<^bsub>D\<^esub> b)\<rbrakk> \<Longrightarrow>
rel E = rel (SIod D (carrier E))"
apply (rule equalityI) (* show the equality of the sets *)
apply (rule subsetI)
apply (unfold split_paired_all)
apply (simp add:ole_def)
apply (simp add:SIod_def)
apply (cut_tac Order.closed[of "E"])
apply blast
apply assumption
apply (rule subsetI)
apply (unfold split_paired_all)
apply (simp add:SIod_def)
apply (simp add:ole_def)
done
lemma SIod_self_le:"\<lbrakk>Order D; Order E;
carrier E \<subseteq> carrier D;
\<forall>a\<in>carrier E. \<forall>b\<in>carrier E. (a \<preceq>\<^bsub>E\<^esub> b) = (a \<preceq>\<^bsub>D\<^esub> b) \<rbrakk> \<Longrightarrow>
E = SIod D (carrier E)"
apply (rule Order_comp_eq[of "E" "SIod D (carrier E)"])
apply (simp add:SIod_carrier)
apply (rule rel_SIod[of "D" "E"], assumption+)
done
subsection \<open>Total ordering\<close>
locale Torder = Order +
assumes le_linear: "\<lbrakk>a \<in> carrier D; b \<in> carrier D\<rbrakk> \<Longrightarrow>
a \<preceq> b \<or> b \<preceq> a"
lemma (in Order) Iod_empty_Torder:"Torder (Iod D {})"
apply (rule Torder.intro)
apply(simp add:emptyset_Iod)
apply (rule Torder_axioms.intro)
apply (simp add:Iod_carrier)
done
lemma (in Torder) le_cases:
"\<lbrakk>a \<in> carrier D; b \<in> carrier D; (a \<preceq> b \<Longrightarrow> C); (b \<preceq> a \<Longrightarrow> C)\<rbrakk> \<Longrightarrow> C"
by (cut_tac le_linear[of "a" "b"], blast, assumption+)
lemma (in Torder) Order:"Order D"
apply (rule Order_axioms)
done
lemma (in Torder) less_linear:
"a \<in> carrier D \<Longrightarrow> b \<in> carrier D \<Longrightarrow> a \<prec> b \<or> a = b \<or> b \<prec> a"
apply (simp add:oless_def)
apply (rule le_cases[of "a" "b"])
apply assumption+
apply blast
apply blast
done
lemma (in Torder) not_le_less:
"a \<in> carrier D \<Longrightarrow> b \<in> carrier D \<Longrightarrow>
(\<not> a \<preceq> b) = (b \<prec> a)"
apply (unfold oless_def)
apply (cut_tac le_linear[of a b])
apply (rule iffI)
apply simp
apply (rule contrapos_pp, simp+)
apply (rule contrapos_pp, simp+)
apply (erule conjE)
apply (frule le_antisym[of b a])
apply assumption+
apply simp+
done
lemma (in Torder) not_less_le:
"a \<in> carrier D \<Longrightarrow> b \<in> carrier D \<Longrightarrow>
(\<not> a \<prec> b) = (b \<preceq> a)"
apply (unfold oless_def)
apply (rule iffI)
apply (simp only:de_Morgan_conj[of "a \<preceq> b" "a \<noteq> b"])
apply (simp only:not_le_less[of "a" "b"])
apply (erule disjE)
apply (simp add:less_imp_le)
apply (simp add:le_imp_less_or_eq)
apply (rule contrapos_pp, simp+)
apply (erule conjE)
apply (frule le_antisym[of "a" "b"])
apply assumption+
apply simp
done
lemma (in Order) Iod_not_le_less:"\<lbrakk>T \<subseteq> carrier D; a \<in> T; b \<in> T;
Torder (Iod D T)\<rbrakk> \<Longrightarrow> (\<not> a \<preceq>\<^bsub>(Iod D T)\<^esub> b) = b \<prec>\<^bsub>(Iod D T)\<^esub> a"
apply (subst Torder.not_le_less)
apply assumption+
apply (simp add:Iod_carrier)+
done
lemma (in Order) Iod_not_less_le:"\<lbrakk>T \<subseteq> carrier D; a \<in> T; b \<in> T;
Torder (Iod D T)\<rbrakk> \<Longrightarrow> (\<not> a \<prec>\<^bsub>(Iod D T)\<^esub> b) = b \<preceq>\<^bsub>(Iod D T)\<^esub> a"
apply (subst Torder.not_less_le)
apply assumption+
apply (simp add:Iod_carrier)+
done
subsection \<open>Two ordered sets\<close>
definition
Order_Pow :: "'a set \<Rightarrow> 'a set Order" ("(po _)" [999] 1000) where
"po A =
\<lparr>carrier = Pow A,
rel = {(X, Y). X \<in> Pow A \<and> Y \<in> Pow A \<and> X \<subseteq> Y}\<rparr>"
interpretation order_Pow: Order "po A"
apply (unfold Order_Pow_def)
apply (rule Order.intro)
apply (rule subsetI)
apply (unfold split_paired_all)
apply simp
apply simp
apply simp
apply simp
done
definition
Order_fs :: "'a set \<Rightarrow> 'b set \<Rightarrow> ('a set * ('a \<Rightarrow> 'b)) Order" where
"Order_fs A B =
\<lparr>carrier = {Z. \<exists>A1 f. A1 \<in> Pow A \<and> f \<in> A1 \<rightarrow> B \<and>
f \<in> extensional A1 \<and> Z = (A1, f)},
rel = {Y. Y \<in> ({Z. \<exists>A1 f. A1 \<in> Pow A \<and> f \<in> A1 \<rightarrow> B \<and> f \<in> extensional A1
\<and> Z = (A1, f)}) \<times> ({Z. \<exists>A1 f. A1 \<in> Pow A \<and> f \<in> A1 \<rightarrow> B \<and> f \<in> extensional A1
\<and> Z = (A1, f)}) \<and> fst (fst Y) \<subseteq> fst (snd Y) \<and>
(\<forall>a\<in> (fst (fst Y)). (snd (fst Y)) a = (snd (snd Y)) a)}\<rparr>"
lemma Order_fs:"Order (Order_fs A B)"
apply (simp add:Order_fs_def)
apply (rule Order.intro)
apply (rule subsetI)
apply (unfold split_paired_all)
apply (auto intro: funcset_eq)
done
subsection \<open>Homomorphism of ordered sets\<close>
definition
ord_inj :: "[('a, 'm0) Order_scheme, ('b, 'm1) Order_scheme,
'a \<Rightarrow> 'b] \<Rightarrow> bool" where
"ord_inj D E f \<longleftrightarrow> f \<in> extensional (carrier D) \<and>
f \<in> (carrier D) \<rightarrow> (carrier E) \<and>
(inj_on f (carrier D)) \<and>
(\<forall>a\<in>carrier D. \<forall>b\<in>carrier D. (a \<prec>\<^bsub>D\<^esub> b) = ((f a) \<prec>\<^bsub>E\<^esub> (f b)))"
definition
ord_isom :: "[('a, 'm0) Order_scheme, ('b, 'm1) Order_scheme,
'a \<Rightarrow> 'b] \<Rightarrow> bool" where
"ord_isom D E f \<longleftrightarrow> ord_inj D E f \<and>
(surj_to f (carrier D) (carrier E))"
lemma (in Order) ord_inj_func:"\<lbrakk>Order E; ord_inj D E f\<rbrakk> \<Longrightarrow>
f \<in> carrier D \<rightarrow> carrier E"
by (simp add:ord_inj_def)
lemma (in Order) ord_isom_func:"\<lbrakk>Order E; ord_isom D E f\<rbrakk> \<Longrightarrow>
f \<in> carrier D \<rightarrow> carrier E"
by (simp add:ord_isom_def ord_inj_func)
lemma (in Order) ord_inj_restrict_isom:"\<lbrakk>Order E; ord_inj D E f; T \<subseteq> carrier D\<rbrakk>
\<Longrightarrow> ord_isom (Iod D T) (Iod E (f ` T)) (restrict f T)"
apply (subst ord_isom_def) (* The following two lemmas are preliminaries. *)
apply (frule ord_inj_func[of E f], assumption,
frule image_sub[of f "carrier D" "carrier E" "T"], assumption+)
apply (rule conjI)
apply (subst ord_inj_def)
apply (simp add:Iod_carrier Order.Iod_carrier)
apply (rule conjI)
apply (rule restrict_inj[of f "carrier D" "T"])
apply (simp add:ord_inj_def, assumption+)
apply (rule ballI)+
apply (frule_tac x = a in elem_in_image2[of f "carrier D" "carrier E" T],
assumption+,
frule_tac x = b in elem_in_image2[of f "carrier D" "carrier E" T],
assumption+)
apply (simp add:Iod_less Order.Iod_less)
apply (frule_tac c = a in subsetD[of T "carrier D"], assumption+,
frule_tac c = b in subsetD[of T "carrier D"], assumption+)
apply (simp add:ord_inj_def)
apply (subst surj_to_def)
apply (simp add:Iod_carrier Order.Iod_carrier)
done
lemma ord_inj_Srestrict_isom:"\<lbrakk>Order D; Order E; ord_inj D E f; T \<subseteq> carrier D\<rbrakk>
\<Longrightarrow> ord_isom (SIod D T) (SIod E (f ` T)) (restrict f T)"
apply (subst ord_isom_def)
apply (frule Order.ord_inj_func[of D E f], assumption+,
frule image_sub[of f "carrier D" "carrier E" "T"], assumption+)
apply (rule conjI)
apply (subst ord_inj_def)
apply (simp add:SIod_carrier)
apply (rule conjI)
apply (rule restrict_inj[of f "carrier D" "T"])
apply (simp add:ord_inj_def, assumption+)
apply (rule ballI)+
apply (frule_tac x = a in elem_in_image2[of f "carrier D" "carrier E" T],
assumption+,
frule_tac x = b in elem_in_image2[of f "carrier D" "carrier E" T],
assumption+)
apply (simp add:SIod_less)
apply (frule_tac c = a in subsetD[of T "carrier D"], assumption+,
frule_tac c = b in subsetD[of T "carrier D"], assumption+)
apply (simp add:ord_inj_def)
apply (simp add:SIod_carrier)
apply (simp add:surj_to_def)
done
lemma (in Order) id_ord_isom:"ord_isom D D (idmap (carrier D))"
apply (simp add:ord_isom_def)
apply (cut_tac idmap_bij[of "carrier D"])
apply (simp add:bij_to_def)
apply (simp add:ord_inj_def)
apply (simp add:idmap_def[of "carrier D"])
done
lemma (in Order) ord_isom_bij_to:"\<lbrakk>Order E; ord_isom D E f\<rbrakk> \<Longrightarrow>
bij_to f (carrier D) (carrier E)"
by (simp add:bij_to_def ord_isom_def,
simp add:ord_inj_def)
lemma (in Order) ord_inj_mem:"\<lbrakk>Order E; ord_inj D E f; a \<in> carrier D\<rbrakk> \<Longrightarrow>
(f a) \<in> carrier E"
apply (simp add:ord_inj_def, (erule conjE)+)
apply (simp add:Pi_def)
done
lemma (in Order) ord_isom_mem:"\<lbrakk>Order E; ord_isom D E f; a \<in> carrier D\<rbrakk> \<Longrightarrow>
(f a) \<in> carrier E"
apply (simp add:ord_isom_def, (erule conjE)+)
apply (simp add:ord_inj_mem)
done
lemma (in Order) ord_isom_surj:"\<lbrakk>Order E; ord_isom D E f; b \<in> carrier E\<rbrakk> \<Longrightarrow>
\<exists>a\<in>carrier D. b = f a"
apply (simp add:ord_isom_def, (erule conjE)+)
apply (simp add:surj_to_def image_def)
apply (frule sym, thin_tac "{y. \<exists>x\<in>carrier D. y = f x} = carrier E",
simp)
done
lemma (in Order) ord_isom_surj_forall:"\<lbrakk>Order E; ord_isom D E f\<rbrakk> \<Longrightarrow>
\<forall>b \<in> carrier E. \<exists>a\<in>carrier D. b = f a"
apply (rule ballI)
apply (rule ord_isom_surj[of "E" "f"], assumption+)
done
lemma (in Order) ord_isom_onto:"\<lbrakk>Order E; ord_isom D E f\<rbrakk> \<Longrightarrow>
f ` (carrier D) = carrier E "
apply (frule ord_isom_bij_to[of "E" "f"], assumption+)
apply(simp add:bij_to_def surj_to_def)
done
lemma (in Order) ord_isom_inj_on:"\<lbrakk>Order E; ord_isom D E f\<rbrakk> \<Longrightarrow>
inj_on f (carrier D)"
by (simp add:ord_isom_def ord_inj_def)
lemma (in Order) ord_isom_inj:"\<lbrakk>Order E; ord_isom D E f;
a \<in> carrier D; b \<in> carrier D\<rbrakk> \<Longrightarrow> (a = b) = ((f a) = (f b))"
apply (frule ord_isom_inj_on[of E f], assumption)
apply (simp add:injective_iff)
done
lemma (in Order) ord_isom_surj_to:"\<lbrakk>Order E; ord_isom D E f\<rbrakk> \<Longrightarrow>
surj_to f (carrier D) (carrier E)"
by (simp add:ord_isom_def)
lemma (in Order) ord_inj_less:"\<lbrakk>Order E; ord_inj D E f; a \<in> carrier D;
b \<in> carrier D\<rbrakk> \<Longrightarrow> (a \<prec>\<^bsub>D\<^esub> b) = ((f a) \<prec>\<^bsub>E\<^esub> (f b))"
by (simp add:ord_inj_def)
lemma (in Order) ord_isom_less:"\<lbrakk>Order E; ord_isom D E f;
a \<in> carrier D; b \<in> carrier D\<rbrakk> \<Longrightarrow> (a \<prec>\<^bsub>D\<^esub> b) = ((f a) \<prec>\<^bsub>E\<^esub> (f b))"
by (simp add:ord_isom_def ord_inj_less)
lemma (in Order) ord_isom_less_forall:"\<lbrakk>Order E; ord_isom D E f\<rbrakk> \<Longrightarrow>
\<forall>a \<in> carrier D. \<forall> b \<in> carrier D. (a \<prec>\<^bsub>D\<^esub> b) = ((f a) \<prec>\<^bsub>E\<^esub> (f b))"
by ((rule ballI)+,
simp add:ord_isom_less)
lemma (in Order) ord_isom_le:"\<lbrakk>Order E; ord_isom D E f;
a \<in> carrier D; b \<in> carrier D\<rbrakk> \<Longrightarrow> (a \<preceq>\<^bsub>D\<^esub> b) = ((f a) \<preceq>\<^bsub>E\<^esub> (f b))"
apply (frule_tac a = a in ord_isom_mem[of "E" "f"], assumption+,
frule_tac a = b in ord_isom_mem[of "E" "f"], assumption+)
apply (simp add:le_imp_less_or_eq Order.le_imp_less_or_eq[of "E"])
apply (simp add:ord_isom_less ord_isom_inj)
done
lemma (in Order) ord_isom_le_forall:"\<lbrakk>Order E; ord_isom D E f\<rbrakk> \<Longrightarrow>
\<forall>a \<in> carrier D. \<forall> b \<in> carrier D. (a \<preceq> b) = ((f a) \<preceq>\<^bsub>E\<^esub> (f b))"
by ((rule ballI)+,
rule ord_isom_le, assumption+)
lemma (in Order) ord_isom_convert:"\<lbrakk>Order E; ord_isom D E f;
x \<in> carrier D; a \<in> carrier D\<rbrakk> \<Longrightarrow> (\<forall>y\<in>carrier D. (x \<prec> y \<longrightarrow> \<not> y \<prec> a)) =
(\<forall>z\<in>carrier E. ((f x) \<prec>\<^bsub>E\<^esub> z \<longrightarrow> \<not> z \<prec>\<^bsub>E\<^esub> (f a)))"
apply (rule iffI)
apply (rule ballI, rule impI)
apply (frule_tac b = z in ord_isom_surj[of "E" "f"], assumption+,
erule bexE)
apply ( simp add:ord_isom_less[THEN sym, of "E" "f"])
apply (rule ballI, rule impI)
apply (simp add:ord_isom_less[of "E" "f"])
apply (frule_tac a = y in ord_isom_mem[of "E" "f"], assumption+)
apply simp
done
lemma (in Order) ord_isom_sym:"\<lbrakk>Order E; ord_isom D E f\<rbrakk> \<Longrightarrow>
ord_isom E D (invfun (carrier D) (carrier E) f)"
apply (frule ord_isom_func[of E f], assumption+,
frule ord_isom_inj_on[of E f], assumption+,
frule ord_isom_surj_to[of E f], assumption+)
apply (subst ord_isom_def, subst ord_inj_def)
apply (simp add:inv_func)
apply (simp add:invfun_inj)
apply (simp add:invfun_surj)
apply (rule conjI)
apply (simp add:invfun_def extensional_def)
apply (rule ballI)+
apply (frule_tac b = a in invfun_mem[of "f" "carrier D" "carrier E"],
assumption+,
frule_tac b = b in invfun_mem[of "f" "carrier D" "carrier E"],
assumption+)
apply (frule_tac a = "(f\<inverse>\<^bsub>carrier E,carrier D\<^esub>) a" and b = "(f\<inverse>\<^bsub>carrier E,carrier D\<^esub>) b"
in ord_isom_less[of E f], assumption+)
apply (simp add:invfun_r)
done
lemma (in Order) ord_isom_trans:"\<lbrakk>Order E; Order F; ord_isom D E f;
ord_isom E F g \<rbrakk> \<Longrightarrow> ord_isom D F (compose (carrier D) g f)"
apply (frule ord_isom_func[of E f], assumption+,
frule ord_isom_inj_on[of E f], assumption+,
frule ord_isom_surj_to[of E f], assumption+,
frule Order.ord_isom_func[of E F g], assumption+,
frule Order.ord_isom_inj_on[of E F g], assumption+,
frule Order.ord_isom_surj_to[of E F g], assumption+)
(* lemmas concerning compose require assumptions given above *)
apply (subst ord_isom_def, subst ord_inj_def)
apply (simp add:composition)
apply (simp add:comp_inj[of "f" "carrier D" "carrier E" "g" "carrier F"])
apply (simp add:compose_surj)
apply (rule ballI)+
apply (frule_tac x = a in funcset_mem[of f "carrier D" "carrier E"],
assumption+,
frule_tac x = b in funcset_mem[of f "carrier D" "carrier E"],
assumption+)
apply (frule_tac a = a and b = b in ord_isom_less[of E f], assumption+,
frule_tac a = "f a" and b = "f b" in Order.ord_isom_less[of E F g],
assumption+)
apply (simp add:compose_def)
done
definition
ord_equiv :: "[_, ('b, 'm1) Order_scheme] \<Rightarrow> bool" where
"ord_equiv D E \<longleftrightarrow> (\<exists>f. ord_isom D E f)"
lemma (in Order) ord_equiv:"\<lbrakk>Order E; ord_isom D E f\<rbrakk> \<Longrightarrow> ord_equiv D E"
by (simp add:ord_equiv_def, blast)
lemma (in Order) ord_equiv_isom:"\<lbrakk>Order E; ord_equiv D E\<rbrakk> \<Longrightarrow>
\<exists>f. ord_isom D E f"
by (simp add:ord_equiv_def)
lemma (in Order) ord_equiv_reflex:"ord_equiv D D"
apply (simp add:ord_equiv_def)
apply (cut_tac id_ord_isom, blast)
done
lemma (in Order) eq_ord_equiv:"\<lbrakk>Order E; D = E\<rbrakk> \<Longrightarrow> ord_equiv D E"
apply (frule sym, thin_tac "D = E")
apply ( simp add:ord_equiv_reflex)
done
lemma (in Order) ord_equiv_trans:"\<lbrakk>Order E; Order F; ord_equiv D E;
ord_equiv E F\<rbrakk> \<Longrightarrow> ord_equiv D F"
apply (simp add:ord_equiv_def)
apply (erule exE)+
apply (frule_tac f = f and g = fa in ord_isom_trans [of "E" "F"],
assumption+, blast)
done
lemma (in Order) ord_equiv_box:"\<lbrakk>Order E; Order F; ord_equiv D E;
ord_equiv D F\<rbrakk> \<Longrightarrow> ord_equiv E F"
apply (rule Order.ord_equiv_trans[of E D F])
apply assumption
apply (rule Order_axioms)
apply assumption
apply (rule ord_equiv_sym) apply assumption+
done
lemma SIod_isom_Iod:"\<lbrakk>Order D; T \<subseteq> carrier D \<rbrakk> \<Longrightarrow>
ord_isom (SIod D T) (Iod D T) (\<lambda>x\<in>T. x)"
apply (simp add:ord_isom_def ord_inj_def)
apply (simp add:SIod_carrier Order.Iod_carrier)
apply (rule conjI)
apply (fold idmap_def[of T])
apply (simp add:SIod_less Order.Iod_less)
apply (cut_tac A = T in idmap_bij,
simp add:bij_to_def)
done
definition
minimum_elem :: "[_ , 'a set, 'a] \<Rightarrow> bool" where
"minimum_elem = (\<lambda>D X a. a \<in> X \<and> (\<forall>x\<in>X. a \<preceq>\<^bsub>D\<^esub> x))"
locale Worder = Torder +
assumes ex_minimum: "\<forall>X. X \<subseteq> (carrier D) \<and> X \<noteq> {} \<longrightarrow>
(\<exists>x. minimum_elem D X x)"
lemma (in Worder) Order:"Order D"
by (rule Order)
lemma (in Worder) Torder:"Torder D"
apply (rule Torder_axioms)
done
lemma (in Worder) Worder:"Worder D"
apply (rule Worder_axioms)
done
lemma (in Worder) equiv_isom:"\<lbrakk>Worder E; ord_equiv D E\<rbrakk> \<Longrightarrow>
\<exists>f. ord_isom D E f"
by (insert Order, frule Worder.Order[of "E"], simp add:ord_equiv_def)
lemma (in Order) minimum_elem_mem:"\<lbrakk>X \<subseteq> carrier D; minimum_elem D X a\<rbrakk>
\<Longrightarrow> a \<in> X"
by (simp add:minimum_elem_def)
lemma (in Order) minimum_elem_unique:"\<lbrakk>X \<subseteq> carrier D; minimum_elem D X a1;
minimum_elem D X a2\<rbrakk> \<Longrightarrow> a1 = a2"
apply (frule minimum_elem_mem[of "X" "a1"], assumption+,
frule minimum_elem_mem[of "X" "a2"], assumption+)
apply (simp add:minimum_elem_def)
apply (drule_tac x = a2 in bspec, assumption)
apply (drule_tac x = a1 in bspec, assumption)
apply (rule le_antisym[of a1 a2])
apply (simp add:subsetD)+
done
lemma (in Order) compare_minimum_elements:"\<lbrakk>S \<subseteq> carrier D; T \<subseteq> carrier D;
S \<subseteq> T; minimum_elem D S s; minimum_elem D T t \<rbrakk> \<Longrightarrow> t \<preceq> s"
apply (frule minimum_elem_mem[of "S" "s"], assumption+)
apply (frule subsetD[of "S" "T" "s"], assumption+)
apply (simp add:minimum_elem_def)
done
lemma (in Order) minimum_elem_sub:"\<lbrakk>T \<subseteq> carrier D; X \<subseteq> T\<rbrakk>
\<Longrightarrow> minimum_elem D X a = minimum_elem (Iod D T) X a"
apply (simp add:minimum_elem_def)
apply (simp add:subset_eq[of X T])
apply (rule iffI, erule conjE)
apply simp
apply (rule ballI)
apply (simp add:Iod_le)
apply simp
apply (rule ballI)
apply (erule conjE)
apply (simp add:Iod_le)
done
apply (rule iffI)
apply simp
apply (rule ballI, erule conjE)
apply (drule_tac x = x in bspec, assumption)
apply (frule_tac c = x in subsetD[of "X" "T"], assumption+,
frule_tac c = a in subsetD[of "X" "T"], assumption+)
apply (simp add:SIod_le)
apply simp
apply (rule ballI, erule conjE)
apply (drule_tac x = x in bspec, assumption)
apply (frule_tac c = x in subsetD[of "X" "T"], assumption+,
frule_tac c = a in subsetD[of "X" "T"], assumption+)
apply (simp add:SIod_le)
done
lemma (in Order) augmented_set_minimum:"\<lbrakk>X \<subseteq> carrier D; a \<in> carrier D;
Y - {a} \<subseteq> X; y - {a} \<noteq> {}; minimum_elem (Iod D X) (Y - {a}) x;
\<forall>x\<in>X. x \<preceq> a\<rbrakk> \<Longrightarrow> minimum_elem (Iod D (insert a X)) Y x"
apply (frule insert_mono[of "Y - {a}" "X" "a"])
apply simp
apply (frule insert_sub[of X "carrier D" a], assumption+)
apply (simp add:minimum_elem_sub[THEN sym, of "insert a X" Y],
simp add:minimum_elem_sub[THEN sym, of X "Y - {a}"])
apply (simp add:subset_eq[of "Y - {a}" X])
apply (simp add:minimum_elem_def, (erule conjE)+)
apply (rule ballI)
apply blast
done
lemma augmented_Sset_minimum:"\<lbrakk>Order D; X \<subseteq> carrier D; a \<in> carrier D;
Y - {a} \<subseteq> X; y - {a} \<noteq> {}; minimum_elem (SIod D X) (Y - {a}) x;
\<forall>x\<in>X. x \<preceq>\<^bsub>D\<^esub> a\<rbrakk> \<Longrightarrow> minimum_elem (SIod D (insert a X)) Y x"
apply (frule insert_mono[of "Y - {a}" "X" "a"])
apply simp
apply (frule insert_sub[of X "carrier D" a], assumption+)
apply (simp add:minimum_elem_Ssub[THEN sym, of D "insert a X" Y],
simp add:minimum_elem_Ssub[THEN sym, of D X "Y - {a}"])
apply (simp add:subset_eq[of "Y - {a}" X])
apply (simp add:minimum_elem_def, (erule conjE)+)
apply (rule ballI)
apply blast
done
lemma (in Order) ord_isom_minimum:"\<lbrakk>Order E; ord_isom D E f;
S \<subseteq> carrier D; a \<in> carrier D; minimum_elem D S a\<rbrakk> \<Longrightarrow>
minimum_elem E (f`S) (f a)"
apply (subst minimum_elem_def,
frule minimum_elem_mem[of "S" "a"], assumption+)
apply (simp add:ord_isom_mem)
apply (rule ballI)
apply (simp add:minimum_elem_def)
apply (frule_tac x = x in bspec, assumption,
thin_tac "Ball S ((\<preceq>) a)")
apply (frule_tac b = x in ord_isom_le[of E f a], assumption+)
apply (simp add:subsetD)
apply simp
done
lemma (in Worder) pre_minimum:"\<lbrakk>T \<subseteq> carrier D; minimum_elem D T t;
s \<in> carrier D; s \<prec>\<^bsub>D\<^esub> t \<rbrakk> \<Longrightarrow> \<not> s \<in> T"
apply (rule contrapos_pp, simp+)
apply (simp add:minimum_elem_def, (erule conjE)+)
apply (frule_tac x = s in bspec, assumption+,
thin_tac "\<forall>x\<in>T. t \<preceq>\<^bsub>D\<^esub> x")
apply (simp add:oless_def, erule conjE)
apply (frule le_antisym[of s t])
apply (simp add:subsetD[of "T" "carrier D"], assumption+)
apply simp
done
lemma bex_nonempty_subset:"\<exists>a. a \<in> A \<and> P a \<Longrightarrow>
{x. x \<in> A \<and> P x} \<subseteq> A \<and> {x. x \<in> A \<and> P x} \<noteq> {}"
apply (erule exE, rule conjI)
apply (rule subsetI, simp)
apply (rule_tac A = "{x \<in> A. P x}" in nonempty, simp)
done
lemma (in Worder) to_subset:"\<lbrakk>T \<subseteq> carrier D; ord_isom D (Iod D T) f\<rbrakk> \<Longrightarrow>
\<forall>a. a \<in> carrier D \<longrightarrow> a \<preceq> (f a)"
apply (rule contrapos_pp, simp+)
apply (cut_tac ex_minimum)
apply (drule_tac a = "{a. a \<in> carrier D \<and> \<not> a \<preceq> f a}" in forall_spec) (*
thin_tac "\<forall>X. X \<subseteq> carrier D \<and> X \<noteq> {} \<longrightarrow> (\<exists>x. minimum_elem D X x)") *)
apply (rule conjI)
apply (rule subsetI, simp)
apply (rule ex_nonempty, simp)
(*
apply (thin_tac "\<forall>X. X \<subseteq> carrier D \<and> X \<noteq> {} \<longrightarrow> (\<exists>x. minimum_elem D X x)",
thin_tac "\<exists>a. a \<in> carrier D \<and> \<not> a \<preceq> f a") *)
apply ((erule exE)+, simp add:minimum_elem_def, (erule conjE)+)
apply (frule Iod_Order[of "T"],
frule_tac a = x in ord_isom_mem[of "Iod D T" "f"], assumption+)
apply (frule_tac a = x and b = "f x" in ord_isom_le[of "Iod D T" "f"],
assumption+)
apply (simp add:Iod_carrier subsetD)
apply (frule Iod_carrier[of "T"],
frule_tac a = "f x" in eq_set_inc[of _ "carrier (Iod D T)" "T"],
assumption+)
apply (frule_tac c = "f x" in subsetD[of "T" "carrier D"], assumption+)
apply (frule_tac a = "f x" in ord_isom_mem[of "Iod D T" "f"], assumption+)
apply (frule_tac a = "f (f x)" in eq_set_inc[of _ "carrier (Iod D T)" "T"],
assumption+)
apply (drule_tac a = "f x" in forall_spec)
(* thin_tac "\<forall>xa. xa \<in> carrier D \<and> \<not> xa \<preceq> f xa \<longrightarrow> x \<preceq> xa") *)
apply (simp add:subsetD Iod_le)
apply simp
done
lemma to_subsetS:"\<lbrakk>Worder D; T \<subseteq> carrier D; ord_isom D (SIod D T) f\<rbrakk> \<Longrightarrow>
\<forall>a. a \<in> carrier D \<longrightarrow> a \<preceq>\<^bsub>D\<^esub> (f a)"
apply (frule Worder.Order[of "D"],
frule SIod_isom_Iod[of "D" "T"], assumption+,
frule Order.ord_isom_trans[of "D" "SIod D T" "Iod D T" f "\<lambda>x\<in>T. x"])
apply (simp add:SIod_Order, simp add:Order.Iod_Order, assumption+)
apply (frule_tac D = D and T = T and f = "compose (carrier D) (\<lambda>x\<in>T. x) f"
in Worder.to_subset, assumption+)
apply (rule allI, rule impI)
apply (drule_tac a = a in forall_spec, simp)
(* thin_tac "\<forall>a. a \<in> carrier D \<longrightarrow>
a \<preceq>\<^bsub>D\<^esub> compose (carrier D) (\<lambda>x\<in>T. x) f a") *)
apply (frule_tac a = a in Order.ord_isom_mem[of "D" "SIod D T" "f"])
apply (simp add:SIod_Order, assumption+)
apply (simp add:SIod_carrier)
apply (simp add:compose_def)
done
lemma (in Worder) isom_Worder:"\<lbrakk>Order T; ord_isom D T f\<rbrakk> \<Longrightarrow> Worder T"
apply (rule Worder.intro)
apply (rule Torder.intro)
apply assumption
apply (rule Torder_axioms.intro)
apply (frule_tac b = a in ord_isom_surj[of T f], assumption+,
frule_tac b = b in ord_isom_surj[of T f], assumption+,
(erule bexE)+)
apply (cut_tac Torder_axioms, simp add:Torder_axioms_def)
apply (meson le_cases ord_isom_le)
apply (rule Worder_axioms.intro)
apply (rule allI, rule impI, erule conjE)
apply (frule ord_isom_func[of "T" "f"], assumption+)
apply (frule ord_isom_bij_to[of "T" "f"], assumption+)
apply (frule ord_isom_sym[of "T" "f"], assumption+,
frule Order.ord_isom_func[of "T" "D"
"invfun (carrier D) (carrier T) f"])
apply (rule Order, assumption)
apply (frule_tac ?A1.0 = X in image_sub[of
"invfun (carrier D) (carrier T) f" "carrier T" "carrier D"],
assumption+,
frule_tac ?A1.0 = X in image_nonempty[of "invfun (carrier D)
(carrier T) f" "carrier T" "carrier D"], assumption+)
apply (cut_tac ex_minimum) (** Because D is well ordered **)
apply (drule_tac a = "invfun (carrier D) (carrier T) f ` X" in forall_spec,
(* thin_tac "\<forall>X. X \<subseteq> carrier D \<and> X \<noteq> {} \<longrightarrow> (\<exists>x. minimum_elem D X x)", *)
simp) apply (
(* thin_tac "\<forall>X. X \<subseteq> carrier D \<and> X \<noteq> {} \<longrightarrow> (\<exists>x. minimum_elem D X x)", *)
erule exE)
apply (frule_tac S = "invfun (carrier D) (carrier T) f ` X" and a = x in
ord_isom_minimum[of "T" "f"], assumption+)
apply (frule_tac X = "invfun (carrier D) (carrier T) f ` X" and a = x in
minimum_elem_mem, assumption+)
apply (simp add:subsetD) apply assumption
apply (simp add:invfun_set, blast)
done
lemma (in Worder) equiv_Worder:"\<lbrakk>Order T; ord_equiv D T\<rbrakk> \<Longrightarrow> Worder T"
by (simp add:ord_equiv_def,
erule exE, simp add:isom_Worder)
lemma (in Worder) equiv_Worder1:"\<lbrakk>Order T; ord_equiv T D\<rbrakk> \<Longrightarrow> Worder T"
apply (cut_tac Worder,
frule Worder.Order[of D],
frule Order.ord_equiv_sym[of T D], assumption+)
apply (rule equiv_Worder, assumption+)
done
lemma (in Worder) ord_isom_self_id:"ord_isom D D f \<Longrightarrow> f = idmap (carrier D)"
apply (cut_tac Order,
frule ord_isom_sym [of "D" "f"], assumption+,
frule ord_isom_func[of "D" "f"], assumption+)
apply (rule funcset_eq[of "f" "carrier D" "idmap (carrier D)"])
apply (simp add:ord_isom_def ord_inj_def, simp add:idmap_def)
apply (rule ballI)
apply (simp add:idmap_def)
apply (cut_tac subset_self[of "carrier D"],
frule to_subset [of "carrier D" "f"],
simp add:Iod_self[THEN sym])
apply (drule_tac a = x in forall_spec, assumption
(* thin_tac "\<forall>a. a \<in> carrier D \<longrightarrow> a \<preceq> (f a)" *))
apply (frule to_subset [of "carrier D" "invfun (carrier D) (carrier D) f"])
apply (simp add:Iod_self[THEN sym])
apply (drule_tac a = x in forall_spec, assumption) (*,
thin_tac "\<forall>a. a \<in> carrier D \<longrightarrow>
a \<preceq> (invfun (carrier D) (carrier D) f a)") *)
apply (frule_tac x = x in funcset_mem [of "f" "carrier D" "carrier D"],
assumption+)
apply (frule_tac a = x in ord_isom_mem[of "D"
"invfun (carrier D) (carrier D) f"], assumption+)
apply (frule_tac a = x and b = "invfun (carrier D) (carrier D) f x" in
ord_isom_le[of "D" "f"], assumption+)
apply simp
apply (frule ord_isom_bij_to[of "D" "f"], assumption+,
simp add:bij_to_def, erule conjE)
apply (simp add:invfun_r[of "f" "carrier D" "carrier D"])
apply (rule_tac a = "f x" and b = x in le_antisym,
assumption+)
done
lemma (in Worder) isom_unique:"\<lbrakk>Worder E; ord_isom D E f; ord_isom D E g\<rbrakk>
\<Longrightarrow> f = g"
apply (frule Worder.Order[of "E"])
apply (insert Order,
frule ord_isom_sym[of "E" "g"], assumption+,
frule ord_isom_trans [of "E" "D" "f"
"invfun (carrier D) (carrier E) g"], assumption+,
frule ord_isom_func[of "D"
"compose (carrier D) (invfun (carrier D) (carrier E) g) f"], assumption+)
apply (frule ord_isom_self_id [of
"compose (carrier D) (invfun (carrier D) (carrier E) g) f"])
apply (thin_tac "ord_isom E D (invfun (carrier D) (carrier E) g)")
apply (cut_tac id_ord_isom, insert Order,
frule ord_isom_func[of "D" "idmap (carrier D)"], assumption+)
apply (rule funcset_eq[of "f" "carrier D" "g"])
apply (simp add:ord_isom_def ord_inj_def)
apply (simp add:ord_isom_def ord_inj_def)
apply (rule ballI)
apply (frule_tac x = x in eq_funcs[of
"compose (carrier D) (invfun (carrier D) (carrier E) g) f"
"carrier D" "carrier D" "idmap (carrier D)"], assumption+)
apply (frule_tac a = x in ord_isom_mem [of "E" "f"], assumption+,
thin_tac " compose (carrier D) (invfun (carrier D) (carrier E) g) f =
idmap (carrier D)",
simp add:idmap_def compose_def)
apply (simp add:ord_isom_def[of _ "E" "g"] ord_inj_def, (erule conjE)+)
apply (frule_tac b = "f x" in invfun_r[of "g" "carrier D" "carrier E"],
assumption+)
apply simp
done
definition
segment :: "_ \<Rightarrow> 'a \<Rightarrow> 'a set" where
"segment D a = (if a \<notin> carrier D then carrier D else
{x. x \<prec>\<^bsub>D\<^esub> a \<and> x \<in> carrier D})"
definition
Ssegment :: "'a Order \<Rightarrow> 'a \<Rightarrow> 'a set" where
"Ssegment D a = (if a \<notin> carrier D then carrier D else
{x. x \<prec>\<^bsub>D\<^esub> a \<and> x \<in> carrier D})"
lemma (in Order) segment_sub:"segment D a \<subseteq> carrier D"
apply (rule subsetI, simp add:segment_def)
apply (case_tac "a \<notin> carrier D", simp)
apply ( simp add:segment_def)
done
lemma Ssegment_sub:"Ssegment D a \<subseteq> carrier D"
by (rule subsetI, simp add:Ssegment_def,
case_tac "a \<notin> carrier D", simp, simp add:Ssegment_def)
lemma (in Order) segment_free:"a \<notin> carrier D \<Longrightarrow>
segment D a = carrier D"
by (simp add:segment_def)
lemma Ssegment_free:"a \<notin> carrier D \<Longrightarrow>
Ssegment D a = carrier D"
by (simp add:Ssegment_def)
lemma (in Order) segment_sub_sub:"\<lbrakk>S \<subseteq> carrier D; d \<in> S\<rbrakk> \<Longrightarrow>
segment (Iod D S) d \<subseteq> segment D d"
apply (rule subsetI)
apply (frule_tac c = d in subsetD[of "S" "carrier D"], assumption+)
apply (simp add:segment_def)
apply (simp add:Iod_carrier)
apply (erule conjE, simp add:Iod_less[of "S"])
apply (simp add:subsetD)
done
lemma Ssegment_sub_sub:"\<lbrakk>Order D; S \<subseteq> carrier D; d \<in> S\<rbrakk> \<Longrightarrow>
Ssegment (SIod D S) d \<subseteq> Ssegment D d"
apply (rule subsetI)
apply (frule_tac c = d in subsetD[of "S" "carrier D"], assumption+)
apply (simp add:Ssegment_def)
apply (simp add:SIod_carrier, erule conjE, simp add:SIod_less[of "S"])
apply (simp add:subsetD)
done
lemma (in Order) a_notin_segment:"a \<notin> segment D a"
by (simp add:segment_def oless_def)
lemma a_notin_Ssegment:"a \<notin> Ssegment D a"
by (simp add:Ssegment_def oless_def)
lemma (in Order) Iod_carr_segment:
"carrier (Iod D (segment D a)) = segment D a"
by (cut_tac segment_sub[of "a"], simp add:Iod_carrier)
lemma SIod_carr_Ssegment:"Order D \<Longrightarrow>
carrier (SIod D (Ssegment D a)) = Ssegment D a"
apply (cut_tac Ssegment_sub[of "D" "a"])
apply (simp add:SIod_carrier)
done
lemma (in Order) segment_inc:"\<lbrakk>a \<in> carrier D; b \<in> carrier D\<rbrakk> \<Longrightarrow>
(a \<prec> b) = (a \<in> segment D b)"
by (simp add:segment_def)
lemma Ssegment_inc:"\<lbrakk>Order D; a \<in> carrier D; b \<in> carrier D\<rbrakk> \<Longrightarrow>
(a \<prec>\<^bsub>D\<^esub> b) = (a \<in> Ssegment D b)"
by (simp add:Ssegment_def)
lemma (in Order) segment_inc1:"b \<in> carrier D \<Longrightarrow>
(a \<prec> b \<and> a \<in> carrier D) = (a \<in> segment D b)"
by (simp add:segment_def)
lemma Ssegment_inc1:"\<lbrakk>Order D; b \<in> carrier D\<rbrakk> \<Longrightarrow>
(a \<prec>\<^bsub>D\<^esub> b \<and> a \<in> carrier D) = (a \<in> Ssegment D b)"
by (simp add:Ssegment_def)
lemma Ssegment_inc_if:"\<lbrakk>Order D; b \<in> carrier D; a \<in> Ssegment D b\<rbrakk> \<Longrightarrow>
a \<prec>\<^bsub>D\<^esub> b"
by (simp add:Ssegment_def)
lemma (in Order) segment_inc_less:"\<lbrakk>W \<subseteq> carrier D; a \<in> carrier D;
y \<in> W; x \<in> segment (Iod D W) a; y \<prec> x\<rbrakk> \<Longrightarrow> y \<in> segment (Iod D W) a"
apply (frule Iod_Order[of "W"],
frule Order.segment_sub[of "Iod D W" "a"],
frule subsetD[of "segment (Iod D W) a" "carrier (Iod D W)" x],
assumption+, simp add:Iod_carrier)
apply (case_tac "a \<in> carrier (Iod D W)")
apply (subst Order.segment_inc[THEN sym, of "Iod D W" "y" "a"], assumption,
simp add:Iod_carrier, simp add:Iod_carrier)
apply (simp add:Iod_carrier, simp add:Iod_less)
apply (rule less_trans[of y x a], (simp add:subsetD)+)
apply (frule Order.segment_inc[THEN sym, of "Iod D W" "x" "a"],
(simp add:Iod_carrier)+,
frule_tac Order.segment_sub[of "Iod D W" x],
frule subsetD[of "segment (Iod D W) a" "W" "x"], assumption+,
simp add:Iod_carrier,
frule_tac subsetD[of "segment (Iod D W) a" W x], assumption+,
simp add:Iod_less)
apply (simp add:Order.segment_free[of "Iod D W" a], simp add:Iod_carrier)
done
lemma (in Order) segment_order_less:"\<forall>b\<in>carrier D. \<forall>x\<in> segment D b. \<forall>y\<in> segment D b. (x \<prec> y) = (x \<prec>\<^bsub>(Iod D (segment D b))\<^esub> y)"
by ((rule ballI)+,
cut_tac a = b in segment_sub, simp add:Iod_less)
lemma Ssegment_order_less:"Order D \<Longrightarrow>
\<forall>b\<in>carrier D. \<forall>x\<in> Ssegment D b. \<forall>y\<in> Ssegment D b.
(x \<prec>\<^bsub>D\<^esub> y) = (x \<prec>\<^bsub>(SIod D (Ssegment D b))\<^esub> y)"
by ((rule ballI)+,
cut_tac a = b in Ssegment_sub[of "D"], simp add:SIod_less)
lemma (in Order) segment_order_le:"\<forall>b\<in>carrier D. \<forall>x\<in> segment D b.
\<forall>y\<in> segment D b. (x \<preceq> y) = (x \<preceq>\<^bsub>(Iod D (segment D b))\<^esub> y)"
by ((rule ballI)+,
cut_tac a = b in segment_sub, simp add:Iod_le)
lemma Ssegment_order_le:"\<forall>b\<in>carrier D. \<forall>x\<in> Ssegment D b.
\<forall>y\<in> Ssegment D b. (x \<preceq>\<^bsub>D\<^esub> y) = (x \<preceq>\<^bsub>(SIod D (Ssegment D b))\<^esub> y)"
by ((rule ballI)+,
cut_tac a = b in Ssegment_sub[of "D"], simp add:SIod_le)
lemma (in Torder) Iod_Torder:"X \<subseteq> carrier D \<Longrightarrow> Torder (Iod D X)"
apply (rule Torder.intro)
apply (simp add:Iod_Order)
apply (rule Torder_axioms.intro)
apply (simp add:Iod_carrier Iod_le)
apply (meson contra_subsetD le_cases)
done
lemma SIod_Torder:"\<lbrakk>Torder D; X \<subseteq> carrier D\<rbrakk> \<Longrightarrow> Torder (SIod D X)"
apply (simp add:Torder_def, simp add:SIod_Order, simp add:Torder_axioms_def)
apply ((rule allI, rule impI)+,
simp add:SIod_carrier SIod_le) apply (erule conjE)
apply (frule_tac c = a in subsetD[of "X" "carrier D"], assumption+,
frule_tac c = b in subsetD[of "X" "carrier D"], assumption+)
apply blast
done
lemma (in Order) segment_not_inc:"\<lbrakk>a \<in> carrier D; b \<in> carrier D;
a \<prec> b\<rbrakk> \<Longrightarrow> b \<notin> segment D a"
apply (rule contrapos_pp, simp+, simp add:segment_def)
apply (simp add:oless_def, (erule conjE)+)
apply (frule le_antisym[of "a" "b"], assumption+, simp)
done
lemma Ssegment_not_inc:"\<lbrakk>Order D; a \<in> carrier D; b \<in> carrier D; a \<prec>\<^bsub>D\<^esub> b\<rbrakk> \<Longrightarrow>
b \<notin> Ssegment D a"
apply (rule contrapos_pp, simp+, simp add:Ssegment_def)
apply (simp add:oless_def, (erule conjE)+)
apply (frule Order.le_antisym[of "D" "a" "b"], assumption+, simp)
done
lemma (in Torder) segment_not_inc_iff:"\<lbrakk>a \<in> carrier D; b \<in> carrier D\<rbrakk> \<Longrightarrow>
(a \<preceq> b) = (b \<notin> segment D a)"
apply (rule iffI)
apply (simp add:le_imp_less_or_eq,
erule disjE, simp add:segment_not_inc, simp add:a_notin_segment)
apply (simp add:segment_def, simp add:not_less_le[THEN sym, of "b" "a"])
done
lemma Ssegment_not_inc_iff:"\<lbrakk>Torder D; a \<in> carrier D; b \<in> carrier D\<rbrakk> \<Longrightarrow>
(a \<preceq>\<^bsub>D\<^esub> b) = (b \<notin> Ssegment D a)"
apply (rule iffI)
apply (frule Torder.Order[of "D"])
apply (simp add:Order.le_imp_less_or_eq,
erule disjE, rule Ssegment_not_inc, assumption+)
apply (simp add: a_notin_Ssegment)
apply (simp add:Ssegment_def)
apply ( simp add:Torder.not_less_le[THEN sym, of "D" "b" "a"])
done
lemma (in Torder) minimum_segment_of_sub:"\<lbrakk>X \<subseteq> carrier D;
minimum_elem D (segment (Iod D X) d) m \<rbrakk> \<Longrightarrow> minimum_elem D X m"
apply (case_tac "d \<notin> carrier (Iod D X)")
apply (simp add:segment_def)
apply (simp add:Iod_carrier)
apply (simp add:Iod_carrier)
apply (subst minimum_elem_def)
apply (frule Iod_Order[of "X"],
frule Order.segment_sub[of "Iod D X" "d"],
simp add:Iod_carrier,
frule subset_trans[of "segment (Iod D X) d" "X" "carrier D"],
assumption+,
frule minimum_elem_mem[of "segment (Iod D X) d" m], assumption)
apply (simp add:subsetD[of "segment (Iod D X) d" "X" m])
apply (rule ballI)
apply (simp add:minimum_elem_def)
apply (case_tac "x \<in> segment (Iod D X) d")
apply (frule_tac a1 = x in Order.segment_inc[THEN sym, of "Iod D X" _ d])
apply (simp add:Iod_carrier subsetD)
apply (simp add:Iod_carrier)
apply (simp add:Iod_less)
apply (frule Iod_Torder[of "X"])
apply (frule_tac b1 = x in Torder.segment_not_inc_iff[THEN sym,
of "Iod D X" d])
apply (simp add:Iod_carrier)
apply (simp add:Iod_carrier)
apply simp
apply (frule Order.segment_inc[THEN sym, of "Iod D X" m d],
thin_tac "x \<notin> segment (Iod D X) d",
frule Order.segment_sub[of "Iod D X" "d"])
apply (simp add:Iod_carrier subsetD)
apply (simp add:Iod_carrier)
apply simp
apply (frule subsetD[of "segment (Iod D X) d" "X" m], assumption)
apply (simp add:Iod_le Iod_less)
apply (frule subsetD[of X "carrier D" m], assumption+,
frule subsetD[of X "carrier D" d], assumption+,
frule_tac c = x in subsetD[of X "carrier D"], assumption+)
apply (frule_tac c = x in less_le_trans[of m d], assumption+)
apply (simp add:less_imp_le)
done
lemma (in Torder) segment_out:"\<lbrakk>a \<in> carrier D; b \<in> carrier D;
a \<prec> b\<rbrakk> \<Longrightarrow> segment (Iod D (segment D a)) b = segment D a"
apply (subst segment_def[of "Iod D (segment D a)"])
apply (frule segment_not_inc[of "a" "b"], assumption+)
apply (cut_tac segment_sub[of "a"])
apply (simp add:Iod_carrier)
done
lemma (in Torder) segment_minimum_minimum:"\<lbrakk>X \<subseteq> carrier D; d \<in> X;
minimum_elem (Iod D (segment D d)) (X \<inter> (segment D d)) m\<rbrakk> \<Longrightarrow>
minimum_elem D X m"
apply (cut_tac segment_sub[of d])
apply (subst minimum_elem_def)
apply (cut_tac Order.minimum_elem_mem[of "Iod D (segment D d)"
"X \<inter> (segment D d)" m])
apply (cut_tac Int_lower1[of X "segment D d"],
frule_tac subsetD[of "X \<inter> segment D d" X m], assumption+, simp)
apply (rule ballI)
apply (case_tac "x \<in> segment D d")
apply (simp add:minimum_elem_def)
apply (drule_tac x = x in bspec,
(* thin_tac "Ball (X \<inter> segment D d) ((\<preceq>\<^bsub>Iod) D (segment D d)\<^esub> m)", *)
simp) apply (
simp add:Iod_le)
apply (frule subsetD[of X "carrier D" d], assumption+,
frule subsetD[of X "carrier D" m], assumption+,
frule_tac c = x in subsetD[of X "carrier D"], assumption+)
apply (simp add:segment_inc[THEN sym, of _ d],
simp add:not_less_le)
apply (frule_tac c = x in less_le_trans[of m d], assumption+)
apply (simp add:less_imp_le)
apply (simp add:Iod_Order)
apply (simp add:Iod_carrier)
apply (simp add:Int_lower2)
done
lemma (in Torder) segment_mono:"\<lbrakk>a \<in> carrier D; b \<in> carrier D\<rbrakk> \<Longrightarrow>
(a \<prec> b) = (segment D a \<subset> segment D b)"
apply (rule iffI)
apply (rule psubsetI, rule subsetI)
apply (simp add:segment_def, erule conjE)
apply (rule_tac a = x and b = a and c = b in less_trans,
assumption+)
apply (cut_tac a_notin_segment[of "a"],
simp add:segment_inc[of "a" "b"], blast)
apply (simp add:psubset_eq, erule conjE,
frule not_sym[of "segment D a" "segment D b"],
thin_tac "segment D a \<noteq> segment D b",
frule sets_not_eq[of "segment D b" "segment D a"], assumption+)
apply (erule bexE)
apply (thin_tac "segment D a \<subseteq> segment D b",
thin_tac "segment D b \<noteq> segment D a")
apply (simp add:segment_def, (erule conjE)+)
apply (frule_tac a = aa and b = a in not_less_le, assumption+,
simp, simp add:oless_def, (erule conjE)+)
apply (frule_tac a = a and b = aa and c = b in le_trans,
assumption+, simp)
apply (rule contrapos_pp, simp+)
done
lemma Ssegment_mono:"\<lbrakk>Torder D; a \<in> carrier D; b \<in> carrier D\<rbrakk> \<Longrightarrow>
(a \<prec>\<^bsub>D\<^esub> b) = (Ssegment D a \<subset> Ssegment D b)"
apply (frule Torder.Order)
apply (rule iffI)
apply (rule psubsetI, rule subsetI)
apply (simp add:Ssegment_def, erule conjE)
apply (rule_tac a = x and b = a and c = b in Order.less_trans,
assumption+)
apply (cut_tac a_notin_Ssegment[of "a"],
simp add:Ssegment_inc[of "D" "a" "b"], blast)
apply (simp add:psubset_eq, erule conjE,
frule not_sym[of "Ssegment D a" "Ssegment D b"],
thin_tac "Ssegment D a \<noteq> Ssegment D b",
frule sets_not_eq[of "Ssegment D b" "Ssegment D a"], assumption+)
apply (erule bexE)
apply (thin_tac "Ssegment D a \<subseteq> Ssegment D b",
thin_tac "Ssegment D b \<noteq> Ssegment D a")
apply (simp add:Ssegment_def, (erule conjE)+)
apply (frule_tac a = aa and b = a in Torder.not_less_le, assumption+,
simp, simp add:oless_def, (erule conjE)+)
apply (frule_tac a = a and b = aa and c = b in Order.le_trans,
assumption+, simp)
apply (rule contrapos_pp, simp+)
done
lemma (in Torder) segment_le_mono:"\<lbrakk>a \<in> carrier D; b \<in> carrier D\<rbrakk> \<Longrightarrow>
(a \<preceq> b) = (segment D a \<subseteq> segment D b)"
apply (simp add:le_imp_less_or_eq[of "a" "b"])
apply (rule iffI)
apply (erule disjE)
apply (simp add:segment_mono[of "a" "b"], simp)
apply (frule segment_mono[THEN sym, of "a" "b"], assumption+)
apply (simp add:psubset_eq)
apply (case_tac "segment D a \<noteq> segment D b", simp)
apply simp
apply (rule contrapos_pp, simp+,
frule less_linear[of "a" "b"], assumption+, simp,
simp add:segment_mono[of "b" "a"])
done
lemma Ssegment_le_mono:"\<lbrakk>Torder D; a \<in> carrier D; b \<in> carrier D\<rbrakk> \<Longrightarrow>
(a \<preceq>\<^bsub>D\<^esub> b) = (Ssegment D a \<subseteq> Ssegment D b)"
apply (cut_tac Torder.Order[of "D"])
apply (simp add:Order.le_imp_less_or_eq[of "D" "a" "b"])
apply (rule iffI)
apply (erule disjE)
apply (simp add: Ssegment_mono[of "D" "a" "b"])
apply (frule Ssegment_mono[THEN sym, of "D" "a" "b"], assumption+)
apply (simp add:psubset_eq)
apply (case_tac "Ssegment D a \<noteq> Ssegment D b")
apply (cut_tac Ssegment_mono[THEN sym, of "D" "a" "b"])
apply (simp add:psubset_eq, assumption+)
apply simp
apply (cut_tac a_notin_Ssegment[of "a" "D"], simp)
apply (simp add:Ssegment_not_inc_iff[THEN sym, of "D" "b" "a"])
apply (frule sym, thin_tac "Ssegment D a = Ssegment D b")
apply (cut_tac a_notin_Ssegment[of "b" "D"], simp)
apply (simp add:Ssegment_not_inc_iff[THEN sym, of "D" "a" "b"])
apply (frule Order.le_antisym[of "D" "a" "b"], assumption+, simp+)
done
lemma (in Torder) segment_inj:"\<lbrakk>a \<in> carrier D; b \<in> carrier D\<rbrakk> \<Longrightarrow>
(a = b) = (segment D a = segment D b)"
apply (rule iffI)
apply simp
apply (rule equalityE[of "segment D a" "segment D b"], assumption)
apply (thin_tac "segment D a = segment D b")
apply (simp add:segment_le_mono[THEN sym, of "a" "b"])
apply (simp add:segment_le_mono[THEN sym, of "b" "a"])
apply (simp add:le_antisym)
done
lemma Ssegment_inj:"\<lbrakk>Torder D; a \<in> carrier D; b \<in> carrier D\<rbrakk> \<Longrightarrow>
(a = b) = (Ssegment D a = Ssegment D b)"
apply (rule iffI)
apply simp
apply (rule equalityE[of "Ssegment D a" "Ssegment D b"], assumption)
apply (thin_tac "Ssegment D a = Ssegment D b")
apply (simp add:Ssegment_le_mono[THEN sym, of "D" "a" "b"])
apply (simp add:Ssegment_le_mono[THEN sym, of "D" "b" "a"])
apply (cut_tac Torder.Order[of "D"])
apply (simp add:Order.le_antisym, assumption)
done
lemma (in Torder) segment_inj_neq:"\<lbrakk>a \<in> carrier D; b \<in> carrier D\<rbrakk> \<Longrightarrow>
(a \<noteq> b) = (segment D a \<noteq> segment D b)"
by (simp add:segment_inj)
lemma Ssegment_inj_neq:"\<lbrakk>Torder D; a \<in> carrier D; b \<in> carrier D\<rbrakk> \<Longrightarrow>
(a \<noteq> b) = (Ssegment D a \<noteq> Ssegment D b)"
by (simp add:Ssegment_inj)
lemma (in Order) segment_inc_psub:"\<lbrakk>x \<in> segment D a\<rbrakk> \<Longrightarrow>
segment D x \<subset> segment D a"
apply (simp add:psubset_eq)
apply (rule conjI, rule subsetI)
apply (simp add:segment_def)
apply (case_tac "a \<notin> carrier D", simp)
apply (simp, (erule conjE)+)
apply (rule_tac a = xa and b = x and c = a in less_trans, assumption+)
apply (cut_tac a_notin_segment[of "x"]) apply blast
done
lemma Ssegment_inc_psub:"\<lbrakk>Order D; x \<in> Ssegment D a\<rbrakk> \<Longrightarrow>
Ssegment D x \<subset> Ssegment D a"
apply (simp add:psubset_eq)
apply (rule conjI, rule subsetI)
apply (simp add:Ssegment_def)
apply (case_tac "a \<notin> carrier D", simp)
apply (simp, (erule conjE)+)
apply (rule_tac a = xa and b = x and c = a in Order.less_trans[of "D"],
assumption+)
apply (cut_tac a_notin_Ssegment[of "x"]) apply blast
done
lemma (in Order) segment_segment:"\<lbrakk>b \<in> carrier D; a \<in> segment D b\<rbrakk> \<Longrightarrow>
segment (Iod D (segment D b)) a = segment D a"
apply (rule equalityI)
apply (rule subsetI)
apply (simp add:segment_def[of "Iod D (segment D b)" "a"])
apply (cut_tac segment_sub[of "b"], simp add:Iod_carrier)
apply (erule conjE)
apply (simp add:Iod_less)
apply (frule_tac c = x in subsetD[of "segment D b" "carrier D"], assumption+,
frule_tac c = a in subsetD[of "segment D b" "carrier D"], assumption+)
apply (simp add:segment_inc[of _ "a"])
apply (rule subsetI)
apply (simp add:segment_def[of "Iod D (segment D b)" "a"])
apply (cut_tac segment_sub[of "b"], simp add:Iod_carrier)
apply (frule segment_inc_psub[of "a" "b"],
frule psubset_imp_subset[of "segment D a" "segment D b"],
thin_tac "segment D a \<subset> segment D b",
frule_tac c = x in subsetD[of "segment D a" "segment D b"],
assumption+)
apply (simp add:Iod_less) apply (simp add:segment_def)
done
lemma Ssegment_Ssegment:"\<lbrakk>Order D; b \<in> carrier D; a \<in> Ssegment D b\<rbrakk> \<Longrightarrow>
Ssegment (SIod D (Ssegment D b)) a = Ssegment D a"
apply (rule equalityI)
apply (rule subsetI)
apply (simp add:Ssegment_def[of "SIod D (Ssegment D b)" "a"])
apply (cut_tac Ssegment_sub[of "D" "b"], simp add:SIod_carrier)
apply (erule conjE)
apply (simp add:SIod_less)
apply (frule_tac c = x in subsetD[of "Ssegment D b" "carrier D"], assumption+,
frule_tac c = a in subsetD[of "Ssegment D b" "carrier D"], assumption+)
apply (simp add:Ssegment_inc[of "D"_ "a"])
apply (rule subsetI)
apply (simp add:Ssegment_def[of "SIod D (Ssegment D b)" "a"])
apply (cut_tac Ssegment_sub[of "D" "b"], simp add:SIod_carrier)
apply (frule Ssegment_inc_psub[of "D" "a" "b"], assumption,
frule psubset_imp_subset[of "Ssegment D a" "Ssegment D b"],
thin_tac "Ssegment D a \<subset> Ssegment D b",
frule_tac c = x in subsetD[of "Ssegment D a" "Ssegment D b"],
assumption+)
apply (simp add:SIod_less) apply (simp add:Ssegment_def)
done
lemma (in Order) Iod_segment_segment:"a \<in> carrier (Iod D (segment D b)) \<Longrightarrow>
Iod (Iod D (segment D b)) (segment (Iod D (segment D b)) a) =
Iod D (segment D a)"
apply (case_tac "b \<in> carrier D")
apply (cut_tac segment_sub[of "b"])
apply (simp add:Iod_carrier)
apply (frule segment_inc_psub[of "a" "b"],
frule psubset_imp_subset[of "segment D a" "segment D b"],
thin_tac "segment D a \<subset> segment D b")
apply (simp add:segment_segment[of "b" "a"])
apply (simp add:Iod_sub_sub[of "segment D a" "segment D b"])
apply (simp add:segment_def[of D b])
apply (simp add:Iod_self[THEN sym])
done
lemma SIod_Ssegment_Ssegment:"\<lbrakk>Order D; a \<in> carrier (SIod D (Ssegment D b))\<rbrakk>
\<Longrightarrow>
SIod (SIod D (Ssegment D b)) (Ssegment (SIod D (Ssegment D b)) a) =
SIod D (Ssegment D a)"
apply (case_tac "b \<in> carrier D")
apply (cut_tac Ssegment_sub[of "D" "b"])
apply (simp add:SIod_carrier[of "D"])
apply (frule Ssegment_inc_psub[of "D" "a" "b"], simp add:subsetD) apply (
frule psubset_imp_subset[of "Ssegment D a" "Ssegment D b"],
thin_tac "Ssegment D a \<subset> Ssegment D b")
apply (simp add:Ssegment_Ssegment[of "D" "b" "a"])
apply (simp add:SIod_sub_sub[of "Ssegment D a" "Ssegment D b"])
apply (simp add:Ssegment_def[of D b], simp add:SIod_self[THEN sym])
done
lemma (in Order) ord_isom_segment_mem:"\<lbrakk>Order E;
ord_isom D E f; a \<in> carrier D; x \<in> segment D a \<rbrakk> \<Longrightarrow>
(f x) \<in> segment E (f a)"
apply (frule segment_inc_if[of "a" "x"], assumption+)
apply (frule ord_isom_less[of "E" "f" "x" "a"], assumption+)
apply (simp add:segment_def, assumption, simp)
apply (frule ord_isom_mem[of "E" "f" "x"], assumption+,
simp add:segment_def,
frule ord_isom_mem[of "E" "f" "a"], assumption+)
apply (simp add:Order.segment_inc[of "E" "f x" "f a"])
done
lemma ord_isom_Ssegment_mem:"\<lbrakk>Order D; Order E;
ord_isom D E f; a \<in> carrier D; x \<in> Ssegment D a\<rbrakk> \<Longrightarrow>
(f x) \<in> Ssegment E (f a)"
apply (frule Ssegment_inc_if[of "D" "a" "x"], assumption+)
apply (frule Order.ord_isom_less[of "D" "E" "f" "x" "a"], assumption+)
apply (simp add:Ssegment_def, assumption, simp)
apply (frule Order.ord_isom_mem[of "D" "E" "f" "x"], assumption+,
simp add:Ssegment_def,
frule Order.ord_isom_mem[of "D" "E" "f" "a"], assumption+)
apply (simp add:Ssegment_def)
done
lemma (in Order) ord_isom_segment_segment:"\<lbrakk>Order E;
ord_isom D E f; a \<in> carrier D \<rbrakk> \<Longrightarrow>
ord_isom (Iod D (segment D a)) (Iod E (segment E (f a)))
(\<lambda>x\<in>carrier (Iod D (segment D a)). f x)"
apply (frule ord_isom_inj_on[of E f], assumption+)
apply (cut_tac segment_sub[of a])
apply (frule restrict_inj[of f "carrier D" "segment D a"], assumption)
apply (frule ord_isom_surj_to[of E f], assumption+)
apply (subst ord_isom_def, subst ord_inj_def)
apply (simp add:Iod_carr_segment Order.Iod_carr_segment)
apply (subgoal_tac "restrict f (segment D a) \<in>
segment D a \<rightarrow> segment E (f a)", simp)
defer
apply (simp add:ord_isom_segment_mem)
apply (rule conjI)
defer
apply (rule surj_to_test, assumption+)
apply (rule ballI, simp)
apply (frule ord_isom_func[of E f], assumption+)
apply (frule surj_to_el[of f "carrier D" "carrier E"], assumption+,
frule ord_isom_mem[of E f a], assumption+,
frule Order.segment_sub[of E "f a"],
frule_tac c = b in subsetD[of "segment E (f a)" "carrier E"],
assumption+,
drule_tac x = b in bspec, assumption, (*
thin_tac "\<forall>b\<in>carrier E. \<exists>a\<in>carrier D. f a = b", *)
erule bexE)
apply (simp add:Order.segment_inc[THEN sym, of E _ "f a"],
rotate_tac -1, frule sym, thin_tac "f aa = b", simp,
frule_tac a1 = aa and b1 = a in ord_isom_less[THEN sym, of E f],
assumption+, simp,
simp add:segment_inc[of _ a], blast)
apply (rule ballI)+
apply (frule ord_isom_mem[of E f a], assumption+,
frule Order.segment_sub[of E "f a"])
apply (frule_tac x = aa in ord_isom_segment_mem[of E f a], assumption+,
frule_tac x = b in ord_isom_segment_mem[of E f a], assumption+,
simp add:Iod_less Order.Iod_less,
subst ord_isom_less[of E f], assumption+, (simp add:subsetD)+)
done
lemma ord_isom_Ssegment_Ssegment:"\<lbrakk>Order D; Order E;
ord_isom D E f; a \<in> carrier D \<rbrakk> \<Longrightarrow>
ord_isom (SIod D (Ssegment D a)) (SIod E (Ssegment E (f a)))
(\<lambda>x\<in>carrier (SIod D (Ssegment D a)). f x)"
apply (frule_tac a = a in Order.ord_isom_mem[of D E f], assumption+)
apply (cut_tac Ssegment_sub[of D a],
cut_tac Ssegment_sub[of "E" "f a"])
apply (subst ord_isom_def, simp add:ord_inj_def)
apply (rule conjI)
apply (rule Pi_I)
apply (simp add:SIod_carrier)
apply (frule_tac c = x in subsetD[of "Ssegment D a" "carrier D"], assumption+)
apply (frule_tac a = x in Order.ord_isom_mem[of D E f], assumption+)
apply (subst Ssegment_inc[THEN sym, of "E" _ "f a"], assumption+)
apply (subst Order.ord_isom_less[THEN sym, of D E f _ a], assumption+)
apply (subst Ssegment_inc[of D _ a], assumption+)
apply (rule conjI)
apply (simp add:SIod_carrier)
apply (simp add:ord_isom_def bij_to_def, (erule conjE)+)
apply (simp add:ord_inj_def, (erule conjE)+)
apply (rule restrict_inj[of "f" "carrier D" "Ssegment D a"], assumption+)
apply (rule conjI)
apply (rule ballI)+
apply (simp add:SIod_carrier)
apply (frule_tac c = aa in subsetD[of "Ssegment D a" "carrier D"],
assumption+,
frule_tac c = b in subsetD[of "Ssegment D a" "carrier D"], assumption+)
apply (frule_tac a1 = aa and b1 = a in Ssegment_inc[THEN sym], assumption+,
frule_tac a1 = b and b1 = a in Ssegment_inc[THEN sym], assumption+,
simp)
apply (simp add:Order.ord_isom_less[of D E f])
apply (frule_tac a = a in Order.ord_isom_mem[of D E f], assumption+,
frule_tac a = aa in Order.ord_isom_mem[of D E f], assumption+,
frule_tac a = b in Order.ord_isom_mem[of D E f], assumption+)
apply (simp add:Ssegment_inc[of E])
apply (simp add:SIod_less Order.ord_isom_less)
apply (simp add:surj_to_def,
simp add:SIod_carrier)
apply (rule equalityI)
apply (rule subsetI, simp add:image_def, erule bexE)
apply (frule_tac c = xa in subsetD[of "Ssegment D a" "carrier D"],
assumption+)
apply (frule_tac a = xa in Ssegment_inc[of D _ a], assumption+, simp)
apply (simp add:Order.ord_isom_less[of D E f _ a])
apply (frule_tac a = xa in Order.ord_isom_mem[of D E f], assumption+)
apply (subst Ssegment_inc[THEN sym], assumption+)
apply (rule subsetI)
apply (frule_tac c = x in subsetD[of "Ssegment E (f a)" "carrier E"],
assumption+)
apply (simp add:Ssegment_inc[THEN sym])
apply (frule_tac b = x in Order.ord_isom_surj[of D E f], assumption+,
erule bexE, simp, thin_tac "x = f aa")
apply (simp add:Order.ord_isom_less[THEN sym])
apply (simp add:Ssegment_inc[of D])
done
lemma (in Order) ord_equiv_segment_segment:
"\<lbrakk>Order E; ord_equiv D E; a \<in> carrier D\<rbrakk>
\<Longrightarrow> \<exists>t\<in>carrier E. ord_equiv (Iod D (segment D a)) (Iod E (segment E t))"
apply (simp add:ord_equiv_def, erule exE)
apply (frule_tac f = f in ord_isom_segment_segment[of E _ a], assumption+)
apply (frule_tac f = f in ord_isom_mem[of E _ a], assumption+)
apply blast
done
lemma ord_equiv_Ssegment_Ssegment:
"\<lbrakk>Order D; Order E; ord_equiv D E; a \<in> carrier D\<rbrakk>
\<Longrightarrow> \<exists>t\<in>carrier E. ord_equiv (SIod D (Ssegment D a)) (SIod E (Ssegment E t))"
apply (simp add:ord_equiv_def, erule exE)
apply (frule_tac f = f in ord_isom_Ssegment_Ssegment[of "D" "E" _ "a"],
assumption+)
apply (frule_tac f = f in Order.ord_isom_mem[of D E _ a], assumption+)
apply blast
done
lemma (in Order) ord_isom_restricted:
"\<lbrakk>Order E; ord_isom D E f; D1 \<subseteq> carrier D\<rbrakk> \<Longrightarrow>
ord_isom (Iod D D1) (Iod E (f ` D1)) (\<lambda>x\<in>D1. f x)"
apply (simp add:ord_isom_def[of D E f], erule conjE)
apply (simp add:ord_inj_restrict_isom[of E f D1])
done
lemma ord_isom_restrictedS:
"\<lbrakk>Order D; Order E; ord_isom D E f; D1 \<subseteq> carrier D\<rbrakk> \<Longrightarrow>
ord_isom (SIod D D1) (SIod E (f ` D1)) (\<lambda>x\<in>D1. f x)"
apply (simp add:ord_isom_def[of D E f], erule conjE)
apply (simp add:ord_inj_Srestrict_isom[of D E f D1])
done
lemma (in Order) ord_equiv_induced:
"\<lbrakk>Order E; ord_isom D E f; D1 \<subseteq> carrier D \<rbrakk> \<Longrightarrow>
ord_equiv (Iod D D1) (Iod E (f ` D1))"
apply (simp add:ord_equiv_def)
apply (frule ord_isom_restricted [of "E" "f" "D1"], assumption+)
apply blast
done
lemma ord_equiv_inducedS:
"\<lbrakk>Order D; Order E; ord_isom D E f; D1 \<subseteq> carrier D \<rbrakk> \<Longrightarrow>
ord_equiv (SIod D D1) (SIod E (f ` D1))"
apply (simp add:ord_equiv_def)
apply (frule ord_isom_restrictedS [of "D" "E" "f" "D1"], assumption+)
apply blast
done
lemma (in Order) equiv_induced_by_inj:"\<lbrakk>Order E; ord_inj D E f;
D1 \<subseteq> carrier D\<rbrakk> \<Longrightarrow> ord_equiv (Iod D D1) (Iod E (f ` D1))"
apply (simp add:ord_equiv_def)
apply (frule ord_inj_restrict_isom [of E f D1], assumption+)
apply blast
done
lemma equiv_induced_by_injS:"\<lbrakk>Order D; Order E; ord_inj D E f;
D1 \<subseteq> carrier D\<rbrakk> \<Longrightarrow> ord_equiv (SIod D D1) (SIod E (f ` D1))"
apply (simp add:ord_equiv_def)
apply (frule ord_inj_Srestrict_isom[of D E f D1], assumption+)
apply blast
done
lemma (in Torder) le_segment_segment:"\<lbrakk>a \<in> carrier D; b \<in> carrier D\<rbrakk> \<Longrightarrow>
(a \<preceq> b) = (segment (Iod D (segment D b)) a = segment D a)"
apply (cut_tac segment_sub[of b],
frule Iod_Order[of "segment D b"])
apply (case_tac "a = b") apply simp
apply (simp add:le_refl)
apply ( cut_tac a_notin_segment[of "b"])
apply (subst Order.segment_free[of "Iod D (segment D b)" b], assumption)
apply (simp add:Iod_carrier)
apply (simp add:Iod_carrier)
apply (subst le_imp_less_or_eq[of "a" "b"], assumption+, simp)
apply (rule iffI)
apply (rule equalityI)
apply (rule subsetI)
apply (frule_tac a1 = x in Order.segment_inc[THEN sym,
of "Iod D (segment D b)" _ a])
apply (frule_tac Order.segment_sub[of "Iod D (segment D b)" a])
apply (rule subsetD, assumption+)
apply (simp add:Iod_carrier) apply (simp add:segment_inc)
apply simp
apply (subst segment_inc[THEN sym])
apply (simp add:segment_def Iod_def) apply assumption
apply (simp add:segment_inc)
apply (frule Order.segment_sub[of "Iod D (segment D b)" a])
apply (simp add:Iod_carrier)
apply (simp add:subsetD Iod_less)
apply (rule subsetI)
apply (subst Order.segment_inc[THEN sym, of "Iod D (segment D b)"],
assumption+)
apply (simp add:Iod_carrier)
apply (simp add:segment_mono[of a b] psubset_eq, erule conjE)
apply (rule subsetD[of "segment D a" "segment D b"], assumption+)
apply (simp add:Iod_carrier segment_inc)
apply (frule segment_inc[of a b], assumption, simp)
apply (frule segment_mono[of a b], assumption, simp)
apply (simp add:psubset_eq, (erule conjE)+)
apply (frule_tac c = x in subsetD[of "segment D a" "segment D b"],
assumption+)
apply (simp add:Iod_less)
apply (subst segment_inc) apply (simp add:subsetD) apply assumption+
apply (rule contrapos_pp, simp+)
apply (simp add:not_less_le)
apply (simp add:le_imp_less_or_eq)
apply (frule segment_not_inc[of b a], assumption+)
apply (frule Order.segment_free[of "Iod D (segment D b)" a])
apply (simp add:Iod_carrier)
apply (simp add:Iod_carrier)
apply (simp add:segment_inj[THEN sym, of b a])
done
lemma le_Ssegment_Ssegment:"\<lbrakk>Torder D; a \<in> carrier D; b \<in> carrier D\<rbrakk> \<Longrightarrow>
(a \<preceq>\<^bsub>D\<^esub> b) = (Ssegment (SIod D (Ssegment D b)) a = Ssegment D a)"
apply (frule Torder.Order[of "D"])
apply (case_tac "a = b") apply simp
apply (simp add:Order.le_refl)
apply (cut_tac Ssegment_sub[of "D" "b"])
apply (frule SIod_Order[of "D" "Ssegment D b"], assumption)
apply (cut_tac a_notin_Ssegment[of "b" "D"])
apply (frule SIod_carrier[THEN sym, of "D" "Ssegment D b"], assumption+)
apply (frule eq_set_not_inc[of "b" "Ssegment D b"
"carrier (SIod D (Ssegment D b))"], assumption+)
apply (thin_tac "b \<notin> Ssegment D b",
thin_tac "Ssegment D b = carrier (SIod D (Ssegment D b))")
apply (cut_tac Ssegment_free[of "b" "SIod D (Ssegment D b)" ])
apply (simp add:SIod_carrier) apply assumption+
apply (subst Order.le_imp_less_or_eq[of "D" "a" "b"], assumption+)
apply simp
apply (cut_tac Ssegment_sub[of "D" "b"])
apply (subst Ssegment_def[of "SIod D (Ssegment D b)"],
subst SIod_carrier[of "D" "Ssegment D b"], assumption+)
apply (subst Ssegment_inc[of "D" "a" "b"], assumption+)
apply (rule iffI) apply simp
apply (simp add:SIod_carrier)
apply (rule equalityI)
apply (rule subsetI)
apply (simp, erule conjE)
apply (simp add:SIod_less)
apply (subst Ssegment_def, simp add:Ssegment_def)
apply (rule subsetI, simp)
apply (simp add:Ssegment_inc[THEN sym, of "D" "a" "b"])
apply (cut_tac a1 = x in Ssegment_inc[THEN sym, of "D" _ "a"], assumption+)
apply (simp add:Ssegment_def, assumption, simp)
apply (cut_tac a = x in Order.less_trans[of "D" _ "a" "b"], assumption)
apply (simp add:Ssegment_def, assumption+)
apply (cut_tac a = x in Ssegment_inc[of "D" _ "b"], assumption)
apply (simp add:Ssegment_def)
apply assumption+
apply simp
apply (cut_tac a = a in Ssegment_inc[of "D" _ "b"])
apply assumption+
apply simp
apply (simp add:SIod_less)
apply (rule contrapos_pp, simp+)
apply (simp add:SIod_carrier)
apply (frule sym, thin_tac "Ssegment D b = Ssegment D a", simp)
apply (simp add:Ssegment_inc[THEN sym, of "D" "a" "b"])
apply (simp add:Torder.not_less_le[of "D" "a" "b"])
apply (frule not_sym, thin_tac "a \<noteq> b")
apply (simp add:Order.le_imp_less_or_eq[of "D" "b" "a"])
apply (simp add:Ssegment_inc[of "D" "b" "a"])
apply (simp add:a_notin_Ssegment[of "b" "D"])
done
lemma (in Torder) inc_segment_segment:"\<lbrakk>b \<in> carrier D;
a \<in> segment D b\<rbrakk> \<Longrightarrow> segment (Iod D (segment D b)) a = segment D a"
apply (cut_tac segment_sub[of "b"],
frule subsetD[of "segment D b" "carrier D" "a"], assumption)
apply (subst le_segment_segment[THEN sym, of "a" "b"],
assumption+)
apply (simp add:segment_inc[THEN sym])
apply (simp add:less_imp_le)
done
lemma (in Torder) segment_segment:"\<lbrakk>a \<in> carrier D; b \<in> carrier D\<rbrakk> \<Longrightarrow>
(segment (Iod D (segment D b)) a = segment D a) =
((segment D a) \<subseteq> (segment D b))"
apply (subst le_segment_segment[THEN sym, of "a" "b"],
assumption+)
apply (simp add:segment_le_mono[of "a" "b"])
done
lemma (in Torder) less_in_Iod:"\<lbrakk>a \<in> carrier D; b \<in> carrier D; a \<prec> b\<rbrakk>
\<Longrightarrow> (a \<prec> b) = (a \<in> carrier (Iod D (segment D b)))"
apply (simp add:Iod_def segment_inc)
done
definition
SS :: "_ \<Rightarrow> 'a set Order" where
"SS D = \<lparr>carrier = {X. \<exists>a\<in>carrier D. X = segment D a}, rel =
{XX. XX \<in> {X. \<exists>a\<in>carrier D. X = segment D a} \<times>
{X. \<exists>a\<in>carrier D. X = segment D a} \<and> ((fst XX) \<subseteq> (snd XX))} \<rparr>"
(** Ordered set consisting of segments **)
definition
segmap::"_ \<Rightarrow> 'a \<Rightarrow> 'a set" where
"segmap D = (\<lambda>x\<in>(carrier D). segment D x)"
lemma segmap_func:"segmap D \<in> carrier D \<rightarrow> carrier (SS D)"
by (simp add:SS_def segmap_def Pi_def) blast
lemma (in Worder) ord_isom_segmap:" ord_isom D (SS D) (segmap D)"
apply (simp add:ord_isom_def)
apply (rule conjI)
apply (simp add:ord_inj_def)
apply (rule conjI)
apply (simp add:segmap_def)
apply (rule conjI)
apply (simp add:segmap_func)
apply (rule conjI)
apply (simp add:inj_on_def)
apply ((rule ballI)+, rule impI, simp add:segmap_def,
simp add:segment_inj[THEN sym])
apply (rule ballI)+
apply (simp add:oless_def[of "SS D"]) apply (simp add:ole_def SS_def)
apply (rule iffI)
apply (simp add:oless_def, erule conjE)
apply (frule_tac a = a and b = b in segment_le_mono, assumption+)
apply (simp add:segment_inj segmap_def)
apply blast
apply (erule conjE)+
apply (thin_tac "\<exists>aa\<in>carrier D. segmap D a = segment D aa",
thin_tac " \<exists>a\<in>carrier D. segmap D b = segment D a")
apply (simp add:segmap_def segment_inj[THEN sym])
apply (simp add:segment_le_mono[THEN sym])
apply (simp add:oless_def)
apply (rule surj_to_test[of "segmap D" "carrier D" "carrier (SS D)"])
apply (simp add:segmap_func)
apply (rule ballI)
apply (simp add:SS_def, erule bexE, simp)
apply (simp add:segmap_def, blast)
done
lemma (in Worder) nonequiv_segment:"a \<in> carrier D \<Longrightarrow>
\<not> ord_equiv D (Iod D (segment D a))"
apply (rule contrapos_pp, simp+)
apply (simp add:ord_equiv_def)
apply (erule exE)
apply (cut_tac segment_sub[of "a"])
apply (frule Iod_Order[of "segment D a"])
apply (frule_tac f = f in ord_isom_func[of "Iod D (segment D a)"],
assumption+)
apply (frule_tac f = f and a = a in ord_isom_mem[of "Iod D (segment D a)"]
, assumption+)
apply (frule_tac f = f in to_subset [of "segment D a"], assumption+)
apply (drule_tac a = a in forall_spec, assumption) (*
apply (thin_tac "\<forall>a. a \<in> carrier D \<longrightarrow> a \<preceq> (f a)") *)
apply (simp add:Iod_carrier)
apply (frule_tac c = "f a" in subsetD[of "segment D a" "carrier D" ],
assumption+)
apply (simp add:segment_inc[THEN sym])
apply (simp add:not_le_less[THEN sym, of "a" _])
done
lemma nonequiv_Ssegment:"\<lbrakk>Worder D; a \<in> carrier D\<rbrakk> \<Longrightarrow>
\<not> ord_equiv D (SIod D (Ssegment D a))"
apply (frule Worder.Order[of "D"], frule Worder.Torder[of "D"])
apply (rule contrapos_pp, simp+)
apply (simp add:ord_equiv_def)
apply (erule exE)
apply (cut_tac Ssegment_sub[of "D" "a"])
apply (frule SIod_Order[of "D" "Ssegment D a"], assumption)
apply (frule_tac f = f in Order.ord_isom_func[of "D" "SIod D (Ssegment D a)"],
assumption+,
frule_tac f = f and a = a in Order.ord_isom_mem[of "D"
"SIod D (Ssegment D a)"], assumption+)
apply (frule_tac f = f in to_subsetS [of "D" "Ssegment D a"], assumption+)
apply (drule_tac a = a in forall_spec, assumption) (*
thin_tac "\<forall>a. a \<in> carrier D \<longrightarrow> a \<preceq>\<^bsub>D\<^esub> f a") *)
apply (simp add:SIod_carrier)
apply (frule_tac c = "f a" in subsetD[of "Ssegment D a" "carrier D"],
assumption+)
apply (simp add:Ssegment_inc[THEN sym])
apply (simp add:Torder.not_le_less[THEN sym, of "D" "a" _])
done
lemma (in Worder) subset_Worder:" T \<subseteq> carrier D \<Longrightarrow>
Worder (Iod D T)"
apply (rule Worder.intro)
apply (simp add: Iod_Torder)
apply (rule Worder_axioms.intro)
apply (rule allI, rule impI)
apply (simp add:Iod_carrier, erule conjE)
apply (cut_tac ex_minimum)
apply (frule_tac A = X and B = T and C = "carrier D" in subset_trans,
assumption+)
apply (frule_tac a = X in forall_spec, simp,
thin_tac "\<forall>X. X \<subseteq> carrier D \<and> X \<noteq> {} \<longrightarrow> (\<exists>x. minimum_elem D X x)")
apply (erule exE)
apply (simp add:minimum_elem_sub)
apply blast
done
lemma SIod_Worder:"\<lbrakk>Worder D; T \<subseteq> carrier D\<rbrakk> \<Longrightarrow> Worder (SIod D T)"
apply (frule Worder.Order[of "D"],
frule Worder.Torder[of "D"])
apply (rule Worder.intro)
apply (simp add: SIod_Torder)
apply (rule Worder_axioms.intro)
apply (rule allI, rule impI, erule conjE, simp add:SIod_carrier)
apply (frule Worder.ex_minimum)
apply (frule_tac A = X and B = T and C = "carrier D" in subset_trans,
assumption+)
apply (frule_tac a = X in forall_spec, simp,
thin_tac "\<forall>X. X \<subseteq> carrier D \<and> X \<noteq> {} \<longrightarrow> (\<exists>x. minimum_elem D X x)")
apply (simp add:minimum_elem_Ssub)
done
lemma Ssegment_Worder:"Worder D \<Longrightarrow>Worder (SIod D (Ssegment D a))"
apply (rule SIod_Worder, assumption)
apply (rule Ssegment_sub[of "D" "a"])
done
lemma (in Worder) segment_unique1:"\<lbrakk>a \<in> carrier D; b \<in> carrier D; a \<prec> b\<rbrakk> \<Longrightarrow>
\<not> ord_equiv (Iod D (segment D b)) (Iod D (segment D a))"
apply (cut_tac segment_Worder[of b],
cut_tac segment_sub[of b],
frule segment_mono[of a b], assumption, simp add:psubset_eq,
erule conjE)
apply (simp add:segment_inc,
frule Worder.nonequiv_segment[of "Iod D (segment D b)" a],
simp add:Iod_carrier)
apply (frule segment_segment[THEN sym, of a b], assumption, simp)
apply (simp add:Iod_sub_sub[of "segment D a" "segment D b"])
done
lemma Ssegment_unique1:"\<lbrakk>Worder D; a \<in> carrier D; b \<in> carrier D; a \<prec>\<^bsub>D\<^esub> b\<rbrakk> \<Longrightarrow>
\<not> ord_equiv (SIod D (Ssegment D b)) (SIod D (Ssegment D a))"
apply (frule Worder.Order[of "D"], frule Worder.Torder[of "D"],
frule Ssegment_inc[of "D" "a" "b"], assumption+, simp,
frule Ssegment_Worder [of "D" "b"])
apply (cut_tac Ssegment_sub[of "D" "b"]) apply (
frule Ssegment_mono[of D a b], assumption+, simp)
apply (frule nonequiv_Ssegment[of "SIod D (Ssegment D b)" "a"])
apply (simp add:SIod_carrier)
apply (frule le_Ssegment_Ssegment[of D a b], assumption+)
apply (simp add:oless_def psubset_eq, (erule conjE)+)
apply (simp add:SIod_sub_sub[of "Ssegment D a" "Ssegment D b"])
done
lemma (in Worder) segment_unique:"\<lbrakk>a \<in> carrier D; b \<in> carrier D;
ord_equiv (Iod D (segment D a)) (Iod D (segment D b)) \<rbrakk> \<Longrightarrow> a = b"
apply (cut_tac segment_sub[of a],
frule_tac Iod_Order[of "segment D a"],
cut_tac segment_sub[of b],
frule_tac Iod_Order[of "segment D b"])
apply (rule contrapos_pp, simp+)
apply (frule less_linear[of "a" "b"], assumption+)
apply simp
apply (erule disjE)
apply (frule segment_unique1[of "a" "b"], assumption+)
apply (simp add:Order.ord_equiv_sym[of "Iod D (segment D a)"
"Iod D (segment D b)"])
apply (simp add:segment_unique1[of "b" "a"])
done
lemma Ssegment_unique:"\<lbrakk>Worder D; a \<in> carrier D; b \<in> carrier D;
ord_equiv (SIod D (Ssegment D a)) (SIod D (Ssegment D b)) \<rbrakk> \<Longrightarrow> a = b"
apply (frule Worder.Order[of "D"], frule Worder.Torder[of "D"],
cut_tac Ssegment_sub[of "D" "b"],
cut_tac Ssegment_sub[of "D" "a"],
frule SIod_Order[of "D" "Ssegment D a"], assumption,
frule SIod_Order[of "D" "Ssegment D b"], assumption)
apply (rule contrapos_pp, simp+)
apply (frule Torder.less_linear[of "D" "a" "b"], assumption+)
apply simp
apply (erule disjE)
apply (frule Ssegment_unique1[of "D" "a" "b"], assumption+)
apply (simp add:Order.ord_equiv_sym[of "SIod D (Ssegment D a)"
"SIod D (Ssegment D b)"])
apply (simp add:Ssegment_unique1[of "D" "b" "a"])
done
lemma (in Worder) subset_segment:"\<lbrakk>T \<subseteq> carrier D;
\<forall>b\<in>T. \<forall>x. x \<prec> b \<and> x \<in> carrier D \<longrightarrow> x \<in> T;
minimum_elem D (carrier D - T) a\<rbrakk> \<Longrightarrow> T = segment D a"
apply (cut_tac Diff_subset[of "carrier D" T],
frule minimum_elem_mem [of "carrier D - T" a], assumption,
simp, erule conjE)
apply (rule equalityI)
apply (rule subsetI)
apply (frule_tac c = x in subsetD[of T "carrier D"], assumption+)
apply (subst segment_inc[THEN sym], assumption+)
apply (frule_tac x = x in bspec, assumption,
thin_tac "\<forall>b\<in>T. \<forall>x. x \<prec> b \<and> x \<in> carrier D \<longrightarrow> x \<in> T")
apply (rule contrapos_pp, simp+)
apply (frule_tac a = x and b = a in not_less_le, assumption+)
apply (simp add:le_imp_less_or_eq, thin_tac "\<not> x \<prec> a")
apply (erule disjE)
apply (frule_tac a = a in forall_spec) apply (
thin_tac "\<forall>xa. xa \<prec> x \<and> xa \<in> carrier D \<longrightarrow> xa \<in> T")
apply simp apply simp apply simp
apply (rule subsetI)
apply (cut_tac a = a in segment_sub)
apply (frule_tac c = x and A = "segment D a" in subsetD[of _ "carrier D"],
assumption+)
apply (thin_tac "\<forall>b\<in>T. \<forall>x. x \<prec> b \<and> x \<in> carrier D \<longrightarrow> x \<in> T")
apply (rule contrapos_pp, simp+)
apply (simp add:minimum_elem_def)
apply (frule_tac x = x in bspec, simp)
apply (simp add:segment_inc[THEN sym])
apply (simp add:not_le_less[THEN sym])
done
lemma subset_Ssegment:"\<lbrakk>Worder D; T \<subseteq> carrier D;
\<forall>b\<in>T. \<forall>x. x \<prec>\<^bsub>D\<^esub> b \<and> x \<in> carrier D \<longrightarrow> x \<in> T;
minimum_elem D (carrier D - T) a\<rbrakk> \<Longrightarrow> T = Ssegment D a"
apply (cut_tac Diff_subset[of "carrier D" T],
frule Worder.Torder[of D],
frule Worder.Order[of D],
frule Order.minimum_elem_mem [of D "carrier D - T" a], assumption+,
simp, erule conjE)
apply (rule equalityI)
apply (rule subsetI)
apply (frule_tac c = x in subsetD[of T "carrier D"], assumption+)
apply (subst Ssegment_inc[THEN sym], assumption+)
apply (frule_tac x = x in bspec, assumption,
thin_tac "\<forall>b\<in>T. \<forall>x. x \<prec>\<^bsub>D\<^esub> b \<and> x \<in> carrier D \<longrightarrow> x \<in> T")
apply (rule contrapos_pp, simp+)
apply (frule_tac a = x and b = a in Torder.not_less_le, assumption+)
apply (simp add:Order.le_imp_less_or_eq, thin_tac "\<not> x \<prec>\<^bsub>D\<^esub> a")
apply (erule disjE)
apply (frule_tac a = a in forall_spec) apply (
thin_tac "\<forall>xa. xa \<prec>\<^bsub>D\<^esub> x \<and> xa \<in> carrier D \<longrightarrow> xa \<in> T")
apply simp apply simp apply simp
apply (rule subsetI)
apply (cut_tac a = a in Ssegment_sub[of D])
apply (frule_tac c = x and A = "Ssegment D a" in subsetD[of _ "carrier D"],
assumption+)
apply (thin_tac "\<forall>b\<in>T. \<forall>x. x \<prec>\<^bsub>D\<^esub> b \<and> x \<in> carrier D \<longrightarrow> x \<in> T")
apply (rule contrapos_pp, simp+)
apply (simp add:minimum_elem_def)
apply (frule_tac x = x in bspec, simp,
thin_tac "Ball (carrier D - T) ((\<preceq>\<^bsub>D\<^esub>) a)")
apply (simp add:Ssegment_inc[THEN sym])
apply (simp add:Torder.not_le_less[THEN sym])
done
lemma (in Worder) segmentTr:"\<lbrakk>T \<subseteq> carrier D;
\<forall>b \<in> T. (\<forall>x. (x \<prec> b \<and> x \<in> (carrier D) \<longrightarrow> x \<in> T))\<rbrakk> \<Longrightarrow>
(T = carrier D) \<or> (\<exists>a. a \<in> (carrier D) \<and> T = segment D a)"
apply (case_tac "T = carrier D")
apply simp
apply simp
apply (frule not_sym, thin_tac "T \<noteq> carrier D",
frule diff_nonempty[of "carrier D" "T"], assumption)
apply (cut_tac ex_minimum)
apply (frule_tac a = "carrier D - T" in forall_spec, simp)
apply (thin_tac "\<forall>X. X \<subseteq> carrier D \<and> X \<noteq> {} \<longrightarrow> (\<exists>x. minimum_elem D X x)")
apply (erule exE, rename_tac a)
apply (thin_tac "carrier D \<noteq> T", thin_tac "carrier D - T \<noteq> {}")
apply (cut_tac Diff_subset[of "carrier D" "T"])
apply (frule_tac a = a in minimum_elem_mem[of "carrier D - T"],
assumption+,
thin_tac "carrier D - T \<subseteq> carrier D")
apply (simp only:Diff_iff, erule conjE)
apply (frule_tac a = a in subset_segment[of T], assumption+)
apply blast
done
lemma SsegmentTr:"\<lbrakk>Worder D; T \<subseteq> carrier D;
\<forall>b \<in> T. (\<forall>x. (x \<prec>\<^bsub>D\<^esub> b \<and> x \<in> (carrier D) \<longrightarrow> x \<in> T))\<rbrakk> \<Longrightarrow>
(T = carrier D) \<or> (\<exists>a. a \<in> (carrier D) \<and> T = Ssegment D a)"
apply (case_tac "T = carrier D")
apply simp
apply simp
apply (frule not_sym, thin_tac "T \<noteq> carrier D",
frule diff_nonempty[of "carrier D" "T"], assumption)
apply (cut_tac Worder.ex_minimum[of D])
apply (frule_tac a = "carrier D - T" in forall_spec, simp)
apply (thin_tac "\<forall>X. X \<subseteq> carrier D \<and> X \<noteq> {} \<longrightarrow> (\<exists>x. minimum_elem D X x)")
apply (erule exE, rename_tac a)
apply (thin_tac "carrier D \<noteq> T", thin_tac "carrier D - T \<noteq> {}")
apply (cut_tac Diff_subset[of "carrier D" "T"])
apply (frule Worder.Order[of D])
apply (frule_tac a = a in Order.minimum_elem_mem[of D "carrier D - T"],
assumption+,
thin_tac "carrier D - T \<subseteq> carrier D")
apply (simp only:Diff_iff, erule conjE)
apply (subgoal_tac "T = Ssegment D a")
apply blast
apply (rule equalityI)
apply (rule subsetI)
apply (frule_tac c = x in subsetD[of T "carrier D"], assumption+)
apply (subst Ssegment_inc[THEN sym], assumption+)
apply (frule_tac x = x in bspec, assumption,
thin_tac "\<forall>b\<in>T. \<forall>x. x \<prec>\<^bsub>D\<^esub> b \<and> x \<in> carrier D \<longrightarrow> x \<in> T")
apply (rule contrapos_pp, simp+)
apply (frule Worder.Torder[of D],
frule_tac a = x and b = a in Torder.not_less_le[of D], assumption+)
apply (simp add:Order.le_imp_less_or_eq, thin_tac "\<not> x \<prec>\<^bsub>D\<^esub> a")
apply (erule disjE)
apply (frule_tac a = a in forall_spec) apply (
thin_tac "\<forall>xa. xa \<prec>\<^bsub>D\<^esub> x \<and> xa \<in> carrier D \<longrightarrow> xa \<in> T")
apply simp apply simp apply simp
apply (rule subsetI)
apply (frule Worder.Torder[of D],
frule Torder.Order[of D])
apply (cut_tac a = a in Ssegment_sub[of D])
apply (frule_tac c = x and A = "Ssegment D a" in subsetD[of _ "carrier D"],
assumption+)
apply (thin_tac "\<forall>b\<in>T. \<forall>x. x \<prec>\<^bsub>D\<^esub> b \<and> x \<in> carrier D \<longrightarrow> x \<in> T")
apply (rule contrapos_pp, simp+)
apply (simp add:minimum_elem_def)
apply (frule_tac x = x in bspec, simp)
apply (simp add:Ssegment_inc[THEN sym])
apply (simp add:Torder.not_le_less[THEN sym])
apply assumption
done
lemma (in Worder) ord_isom_segment_segment:"\<lbrakk>Worder E;
ord_isom D E f; a \<in> carrier D \<rbrakk> \<Longrightarrow>
ord_isom (Iod D (segment D a)) (Iod E (segment E (f a)))
(\<lambda>x\<in>carrier (Iod D (segment D a)). f x)"
by (frule Worder.Order[of "E"],
rule ord_isom_segment_segment[of "E" "f" "a"], assumption+)
definition
Tw :: "[_ , ('b, 'm1) Order_scheme] \<Rightarrow> 'a \<Rightarrow> 'b" ("(2Tw\<^bsub>_,_\<^esub>)" [60,61]60) where
"Tw\<^bsub>D,T\<^esub> = (\<lambda>a\<in> carrier D. SOME x. x\<in>carrier T \<and>
ord_equiv (Iod D (segment D a)) (Iod T (segment T x)))"
lemma (in Worder) Tw_func:"\<lbrakk>Worder T;
\<forall>a\<in>carrier D. \<exists>b\<in>carrier T. ord_equiv (Iod D (segment D a))
(Iod T (segment T b))\<rbrakk> \<Longrightarrow> Tw\<^bsub>D,T\<^esub> \<in> carrier D \<rightarrow> carrier T"
apply (rule Pi_I)
apply (simp add:Tw_def)
apply (rule someI2_ex) apply blast apply simp
done
lemma (in Worder) Tw_mem:"\<lbrakk>Worder E; x \<in> carrier D;
\<forall>a\<in>carrier D. \<exists>b\<in>carrier E. ord_equiv (Iod D (segment D a))
(Iod E (segment E b))\<rbrakk> \<Longrightarrow> (Tw\<^bsub>D,E\<^esub>) x \<in> carrier E"
by (frule Tw_func[of E], assumption,
simp add:Pi_def)
lemma (in Worder) Tw_equiv:"\<lbrakk>Worder T;
\<forall>a\<in>carrier D. \<exists>b\<in>carrier T. ord_equiv (Iod D (segment D a))
(Iod T (segment T b)); x \<in> carrier D \<rbrakk> \<Longrightarrow>
ord_equiv (Iod D (segment D x)) (Iod T (segment T ((Tw\<^bsub>D,T\<^esub>) x)))"
apply (frule_tac x = x in bspec, assumption+,
thin_tac "\<forall>a\<in>carrier D.
\<exists>b\<in>carrier T. ord_equiv (Iod D (segment D a)) (Iod T (segment T b))")
apply (simp add:Tw_def)
apply (rule someI2_ex)
apply blast apply simp
done
lemma (in Worder) Tw_inj:"\<lbrakk>Worder E;
\<forall>a\<in>carrier D. \<exists>b\<in>carrier E. ord_equiv (Iod D (segment D a))
(Iod E (segment E b))\<rbrakk> \<Longrightarrow> inj_on (Tw\<^bsub>D,E\<^esub>) (carrier D)"
apply (simp add:inj_on_def)
apply (rule ballI)+ apply (rule impI)
apply (frule_tac x = x in Tw_equiv [of "E"], assumption+)
apply simp
apply (frule Tw_func[of "E"], assumption)
apply (frule_tac x = x in funcset_mem[of "Tw D E" "carrier D" "carrier E"],
assumption+,
frule_tac x = y in funcset_mem[of "Tw D E" "carrier D" "carrier E"],
assumption+)
apply (frule Worder.Order[of "E"],
cut_tac a = x in segment_sub,
cut_tac a = y in segment_sub,
cut_tac a = "Tw D E y" in Order.segment_sub[of "E"], assumption)
apply (frule_tac T = "segment D x" in Iod_Order,
frule_tac T = "segment D y" in Iod_Order,
frule_tac T = "segment E (Tw D E y)" in Order.Iod_Order[of "E"],
assumption)
apply (thin_tac "Tw D E x = Tw D E y")
apply (frule_tac x = y in Tw_equiv[of "E"], assumption+)
apply (frule_tac D = "Iod D (segment D y)" and
E = "Iod E (segment E (Tw D E y))" in Order.ord_equiv_sym,
assumption+,
thin_tac "ord_equiv (Iod D (segment D y))
(Iod E (segment E (Tw D E y)))")
apply (frule_tac D = "Iod D (segment D x)" and
E = "Iod E (segment E (Tw D E y))" and
F = "Iod D (segment D y)" in Order.ord_equiv_trans, assumption+)
apply (simp add:segment_unique)
done
lemma (in Worder) Tw_eq_ord_isom:"\<lbrakk>Worder E;
\<forall>a\<in>carrier D. \<exists>b\<in>carrier E.
ord_equiv (Iod D (segment D a)) (Iod E (segment E b)); a \<in> carrier D;
ord_isom (Iod D (segment D a)) (Iod E (segment E (Tw D E a))) f;
x \<in> segment D a \<rbrakk> \<Longrightarrow> f x = Tw D E x"
apply (cut_tac segment_sub[of a])
apply (frule_tac c = x in subsetD[of "segment D a" "carrier D"], assumption+,
frule Tw_equiv[of E x], assumption+)
apply (frule Worder.Torder[of E],
frule Torder.Order[of E])
apply (cut_tac a = x in segment_Worder,
frule_tac D = "Iod D (segment D x)" in Worder.Torder,
frule_tac D = "Iod D (segment D x)" in Worder.Order)
apply (frule_tac T = "segment D a" in Iod_Order)
apply (frule_tac x = a in Tw_mem[of E], assumption+)
apply (frule_tac a = "Tw D E x" in Order.segment_sub[of E])
apply (frule_tac a = "Tw D E a" in Worder.segment_Worder,
frule_tac D = "Iod E (segment E (Tw D E a))" in Worder.Order)
apply (frule_tac f = f and a = x in Order.ord_isom_segment_segment[of
"Iod D (segment D a)" "Iod E (segment E (Tw D E a))"], assumption+)
apply (simp add:Iod_carrier)
apply (frule_tac a = x and b = a in segment_le_mono, assumption+)
apply (frule_tac a1 = x and b1 = a in segment_inc[THEN sym], assumption+)
apply (simp add:oless_def)
apply (frule_tac a1 = x and b1 = a in segment_segment[THEN sym], assumption+)
apply simp
apply (simp add:Iod_sub_sub)
apply (frule_tac f = f and a = x in Order.ord_isom_mem[of
"Iod D (segment D a)" "Iod E (segment E (Tw D E a))"],
simp add:Iod_carrier,
frule Order.segment_sub[of E "Tw D E a"],
simp add:Order.Iod_carrier, simp add:Iod_carrier,
frule Order.segment_sub[of E "Tw D E a"],
simp add:Order.Iod_carrier[of E],
frule_tac c = "f x" in subsetD[of "segment E (Tw D E a)"
"carrier E"], assumption+)
apply (frule_tac a1 = "f x" in Order.segment_inc[THEN sym, of E _
"Tw D E a"], assumption+, simp)
apply (simp add:oless_def, (erule conjE)+)
apply (frule_tac a = "f x" and b = "Tw D E a" in
Torder.segment_le_mono [of E], assumption+, simp)
apply (frule_tac a = "f x" and b = "Tw D E a" in
Order.segment_segment[of E], assumption+)
apply simp
apply (simp add:Order.Iod_sub_sub)
apply (frule_tac D = "Iod D (segment D x)" in Torder.Order)
apply (frule_tac D = "Iod D (segment D x)" and E = "Iod E (segment E (f x))"
and F = "Iod E (segment E (Tw D E x))" in Order.ord_equiv_box)
apply (frule_tac a = "f x" in Order.segment_sub[of E])
apply (frule_tac T = "segment E (f x)" in Order.Iod_Order[of E], assumption+)
apply (frule_tac a = "f x" in Order.segment_sub[of E])
apply (frule Tw_mem[of E x], assumption+)
apply (frule Order.segment_sub[of E "Tw D E x"])
apply (rule Order.Iod_Order[of E], assumption+)
apply (simp add:ord_equiv_def, blast)
apply assumption
apply (frule_tac a = "f x" and b = "Tw D E x" in
Worder.segment_unique[of E], assumption+)
apply (frule_tac x = x in Tw_mem[of E], assumption+)
done
lemma (in Worder) Tw_ord_injTr:"\<lbrakk>Worder E;
\<forall>a\<in>carrier D. \<exists>b\<in>carrier E.
ord_equiv (Iod D (segment D a)) (Iod E (segment E b));
a \<in> carrier D; b \<in> carrier D; a \<prec> b\<rbrakk> \<Longrightarrow>
Tw D E a \<prec>\<^bsub>E\<^esub> (Tw D E b)"
apply (frule_tac x = b in Tw_equiv [of "E"], assumption+)
apply (simp add:segment_inc)
apply (simp add:ord_equiv_def, erule exE, fold ord_equiv_def)
apply (frule_tac f = f in Tw_eq_ord_isom[of E b _ a], assumption+)
apply (cut_tac segment_sub[of b])
apply (frule Iod_Order[of "segment D b"])
apply (frule Worder.Order[of E],
frule Tw_mem[of E b], assumption+,
frule Order.segment_sub[of E "Tw D E b"],
frule Order.Iod_Order[of E "segment E (Tw D E b)"], assumption)
apply (frule_tac f = f and a = a in Order.ord_isom_mem[of
"Iod D (segment D b)" "Iod E (segment E (Tw D E b))"], assumption+)
apply (simp add:Iod_carrier)
apply (simp add:Order.Iod_carrier)
apply (subst Order.segment_inc[of E], assumption+)
apply (simp add:Tw_mem)+
done
lemma (in Worder) Tw_ord_inj:"\<lbrakk>Worder E;
\<forall>a\<in>carrier D. \<exists>b\<in>carrier E. ord_equiv (Iod D (segment D a))
(Iod E (segment E b))\<rbrakk> \<Longrightarrow> ord_inj D E (Tw D E)"
apply (simp add:ord_inj_def)
apply (rule conjI)
apply (simp add:Tw_def extensional_def)
apply (simp add:Tw_func)
apply (rule conjI)
apply (simp add:Tw_inj)
apply (rule ballI)+
apply (rule iffI)
apply (simp add:Tw_ord_injTr)
apply (rule contrapos_pp, simp+)
apply (simp add:not_less_le)
apply (simp add:le_imp_less_or_eq)
apply (erule disjE)
apply (frule_tac a = b and b = a in Tw_ord_injTr[of "E"], assumption+)
apply (frule Tw_func [of "E"], assumption+)
apply (frule_tac x = a in funcset_mem[of "Tw D E" "carrier D" "carrier E"],
assumption+,
frule_tac x = b in funcset_mem[of "Tw D E" "carrier D" "carrier E"],
assumption+)
apply (frule Worder.Torder[of "E"],
frule_tac a1 = "Tw D E b" and b1 = "Tw D E a" in
Torder.not_le_less[THEN sym, of "E"], assumption+, simp)
apply (frule Worder.Order[of "E"],
frule_tac a = "Tw D E b" and b = "Tw D E a" in
Order.less_imp_le[of "E"], assumption+, simp)
apply (simp add:oless_def)
done
lemma (in Worder) ord_isom_restricted_by_Tw:"\<lbrakk>Worder E;
\<forall>a\<in>carrier D. \<exists>b\<in>carrier E.
ord_equiv (Iod D (segment D a)) (Iod E (segment E b));
D1 \<subseteq> carrier D\<rbrakk> \<Longrightarrow>
ord_isom (Iod D D1) (Iod E ((Tw D E) ` D1))
(restrict (Tw D E) D1)"
apply (frule Tw_ord_inj [of "E"], assumption+)
apply (frule Worder.Order[of E])
apply (rule ord_inj_restrict_isom [of E "Tw D E" "D1"], assumption+)
done
lemma (in Worder) Tw_segment_segment:"\<lbrakk>Worder E;
\<forall>a\<in>carrier D.\<exists>b\<in>carrier E.
ord_equiv (Iod D (segment D a)) (Iod E (segment E b)); a \<in> carrier D\<rbrakk>
\<Longrightarrow> Tw D E ` (segment D a) = segment E (Tw D E a)"
apply (rule equalityI)
apply (rule subsetI)
apply (simp add:image_def, erule bexE)
apply (frule Tw_equiv[of "E" "a"], assumption+)
apply (simp add:ord_equiv_def, erule exE, fold ord_equiv_def)
apply (frule_tac x = xa in Tw_eq_ord_isom[of E a], assumption+)
apply (rotate_tac -1, frule sym, thin_tac "f xa = Tw D E xa", simp)
apply (cut_tac segment_sub[of a],
frule Iod_Order[of "segment D a"])
apply (frule Worder.Order[of E],
frule_tac a = "Tw D E a" in Order.segment_sub[of E],
frule Tw_mem[of E a], assumption+,
frule Order.segment_sub[of E "Tw D E a"])
apply (frule_tac T = "segment E (Tw D E a)" in Order.Iod_Order[of E],
assumption+)
apply (frule_tac a = xa and f = f and D = "Iod D (segment D a)" and
E = "Iod E (segment E (Tw D E a))" in Order.ord_isom_mem,
assumption+)
apply (simp add:Iod_carrier)
apply (simp add:Order.Iod_carrier)
apply (rule subsetI)
apply (simp add:image_def)
apply (frule Tw_equiv[of "E" "a"], assumption+)
apply (simp add:ord_equiv_def, erule exE, fold ord_equiv_def)
apply (cut_tac segment_sub[of a],
frule Iod_Order[of "segment D a"])
apply (frule Worder.Order[of E],
frule_tac a = "Tw D E a" in Order.segment_sub[of E],
frule Tw_mem[of E a], assumption+,
frule Order.segment_sub[of E "Tw D E a"])
apply (frule_tac T = "segment E (Tw D E a)" in Order.Iod_Order[of E],
assumption+)
apply (frule Iod_Order[of "segment D a"])
apply (frule_tac b = x in Order.ord_isom_surj [of "Iod D (segment D a)"
"Iod E (segment E (Tw D E a))"], assumption+)
apply (simp add:Order.Iod_carrier)
apply (erule bexE, simp add:Iod_carrier)
apply (frule_tac f = f and x = aa in Tw_eq_ord_isom[of E a], assumption+)
apply (simp, blast)
done
lemma (in Worder) ord_isom_Tw_segment:"\<lbrakk>Worder E;
\<forall>a\<in>carrier D. \<exists>b\<in>carrier E.
ord_equiv (Iod D (segment D a)) (Iod E (segment E b)); a\<in>carrier D\<rbrakk> \<Longrightarrow>
ord_isom (Iod D (segment D a)) (Iod E (segment E (Tw D E a)))
(restrict (Tw D E) (segment D a))"
apply (cut_tac segment_sub[of "a"],
frule ord_isom_restricted_by_Tw[of "E" "segment D a"], assumption+,
simp add:Tw_segment_segment[of "E" "a"])
done
lemma (in Worder) well_ord_compare1:"\<lbrakk>Worder E;
\<forall>a\<in>carrier D. \<exists>b\<in>carrier E.
ord_equiv (Iod D (segment D a)) (Iod E (segment E b))\<rbrakk> \<Longrightarrow>
(ord_equiv D E) \<or> (\<exists>c\<in>carrier E. ord_equiv D (Iod E (segment E c)))"
apply (frule Tw_ord_inj [of "E"], assumption+)
apply (frule Tw_func[of "E"], assumption+)
apply (frule ord_isom_restricted_by_Tw [of "E" "carrier D"], assumption+,
simp)
apply (simp add:Iod_self[THEN sym])
apply (frule image_sub0[of "Tw D E" "carrier D" "carrier E"],
frule Worder.segmentTr [of "E" "(Tw D E) ` (carrier D)"],
assumption)
apply (rule ballI, rule allI, rule impI, erule conjE)
apply (thin_tac "ord_isom D (Iod E (Tw D E ` carrier D))
(restrict (Tw D E) (carrier D))")
apply (thin_tac "Tw D E ` carrier D \<subseteq> carrier E",
simp add:image_def, erule bexE)
apply (frule_tac a = xa in ord_isom_Tw_segment[of "E"], assumption+)
apply (rename_tac b x c)
apply (frule_tac x = c in funcset_mem[of "Tw D E" "carrier D" "carrier E"],
assumption, simp, thin_tac "b = Tw D E c")
apply (frule Worder.Order[of "E"],
frule_tac a = "Tw D E c" in Order.segment_sub[of "E"],
cut_tac a = c in segment_Worder,
cut_tac a = "Tw D E c" in Worder.segment_Worder[of "E"],
assumption,
frule_tac D = "Iod D (segment D c)" in Worder.Order,
frule_tac D = "Iod E (segment E (Tw D E c))" in Worder.Order)
apply (frule_tac D = "Iod D (segment D c)" and
E = "Iod E (segment E (Tw D E c))" and
f = "restrict (Tw D E) (segment D c)" and b = x in
Order.ord_isom_surj, assumption+)
apply (simp add:Order.Iod_carrier[of "E"])
apply (frule_tac a = x and b = "Tw D E c" in Order.segment_inc[of "E"],
assumption+, simp)
apply (insert Order,
cut_tac a = c in segment_sub,
simp add:Iod_carrier, erule bexE, blast)
apply (erule disjE)
apply simp
apply (frule Worder.Order[of "E"],
simp add:Order.Iod_self[THEN sym, of "E"],
simp add:ord_equiv)
apply (erule exE, erule conjE, simp,
frule Worder.Order[of "E"],
frule_tac a = a in Order.segment_sub[of "E"],
cut_tac a = a in Worder.segment_Worder[of "E"],
assumption,
frule_tac D = "Iod E (segment E a)" in Worder.Order,
frule_tac E = "Iod E (segment E a)" in ord_equiv, simp, blast)
done
lemma bex_nonempty_set:"\<exists>x \<in> A. P x \<Longrightarrow> {x. x \<in> A \<and> P x } \<noteq> {}"
by blast
lemma nonempty_set_sub:"{x. x \<in> A \<and> P x } \<noteq> {} \<Longrightarrow>
{x. x \<in> A \<and> P x} \<subseteq> A"
by (rule subsetI, simp)
lemma (in Torder) less_minimum:"\<lbrakk>minimum_elem D {x. x \<in> carrier D \<and> P x} d\<rbrakk>
\<Longrightarrow> \<forall>a. (((a \<prec> d) \<and> a \<in> carrier D) \<longrightarrow> \<not> (P a))"
apply (rule allI, rule impI, erule conjE)
apply (rule contrapos_pp, simp+)
apply (simp add:minimum_elem_def, (erule conjE)+)
apply (frule_tac a = a in forall_spec, simp,
thin_tac "\<forall>x. x \<in> carrier D \<and> P x \<longrightarrow> d \<preceq> x")
apply (simp add:not_le_less[THEN sym, of "d"])
done
lemma (in Torder) segment_minimum_empty:"\<lbrakk>X \<subseteq> carrier D; d \<in> X\<rbrakk> \<Longrightarrow>
(minimum_elem D X d) = (segment (Iod D X) d = {})"
apply (rule iffI)
apply (rule contrapos_pp, simp+)
apply (frule nonempty_ex[of "segment (Iod D X) d"], erule exE,
thin_tac "segment (Iod D X) d \<noteq> {}",
frule minimum_elem_mem[of "X" "d"], assumption+,
frule_tac c = d in subsetD[of "X" "carrier D"], assumption+)
apply (simp add:segment_def,
simp add:Iod_carrier, erule conjE,
simp add:Iod_less[of "X"])
apply (simp add:minimum_elem_def,
frule_tac x = x in bspec, assumption,
frule_tac c = x in subsetD[of "X" "carrier D"], assumption+,
frule_tac a1 = x and b1 = d in not_less_le[THEN sym], assumption+)
apply simp
apply (simp add:minimum_elem_def)
by (metis Iod_Order Iod_Torder Iod_carrier Iod_le Iod_not_less_le Order.segment_inc1 nonempty)
end
|
Northwest Alaska Back Country Outfitters offers discerning Fly fisherman the opportunity to tackle the worlds best fishing for Dolly Varden!
Northwest of Kotzebue, Alaska, are numerous drainages entering the Chukchi Sea. These great watersheds hold Dolly Varden in the 15-20 pounds range and include the Wulik, Kivalina, and the Kelly River. The current world's record is 27.7 pounds on the Wulik River.
These rivers can only be reached via bush planes and using rafts and inflatable canoes. We can arrange your flights and all of your gear. Most of these world class fish have never seen a fly and you will have the entire drainage to yourself!
Call for prices and gear information. In Northwest Alaska the best time to target Dolly Varden in during the spawn which runs from the first week of August through week 3. Most of our clients land between 75-100 fish in the 15+ pound range and most groups will land a few fish over 20 pounds!
All gear rentals are based on 7 day minimum trips.
Read about our service in Fly fisherman Magazine.
"Two rod-and-reel outfits suit the trip. Use a 5-weight for the eager grayling that are often rising, and an 8- or 10-weight outfit for char, salmon, and sheefish. Use a floating line on the 5-weight, and a multi-tip line for the heavier rod (RIO Coldwater VersiTip or Scientific Anglers Tri Tip).
Bring #2-6 streamers such as Barr’s Meat Whistles (black, white, tan), #1/0 to #4 Clouser Minnows (chartreuse over white), and Egg-Sucking Bunny Leeches. Realistic egg imitations (cheese-colored, 6 to 8mm), mouse patterns, poppers, and large rubber-legged stoneflies are also valuable."
SOAR 16 foot Canoes...Perfect for Dolly trips into the Brooks Range!
2018 Fall Dolly Spawn. Lots of really nice Dollies landed with the largest of the season being 38 inches! That is a big Dolly! |
{-# OPTIONS --without-K --safe #-}
private
variable
A : Set
a : A
variable
A : Set
private
A = B
where
B = Set
|
If $g$ is a nonzero polynomial, then $(f \cdot g) / g = f$. |
The support of a sum divided by a scalar is the support of the sum of the scalar divided by the scalar. |
import Aoc
import Data.List
import Data.SortedMap
import Data.Strings
%default total
play : List Int -> Nat -> Int
play input finalTurn =
let n = length input
d0 = fromList $ zip input [1..cast n]
in go d0 (cast n+1) 0 (minus finalTurn (n+1))
where
||| We're on turn `t`, and its number is `n`.
||| `d` maps numbers to the last turn they were said.
||| The game ends `fuel` turns from now.
go : SortedMap Int Int -> (turn : Int) -> (num : Int) -> (fuel : Nat) -> Int
go d t n Z = n
go d t n (S k) = go (insert n t d) (t+1) (maybe 0 (t-) (lookup n d)) k
main : IO ()
main = do
input <- parseLines parseInteger
putStr "* "; printLn (play input 2020)
putStr "** "; printLn (play input 30000000)
|
{-# LANGUAGE TypeFamilies #-}
{-# LANGUAGE RankNTypes #-}
{-# LANGUAGE ScopedTypeVariables #-}
module Bio.ChromVAR.Background (getBackgroundPeaks) where
import qualified Data.Vector.Unboxed as U
import qualified Data.Vector.Storable as S
import qualified Data.Vector as V
import qualified Data.Matrix.Unboxed as MU
import qualified Data.Vector.Unboxed.Mutable as UM
import Control.Monad
import Data.Ord
import Data.List
import Control.Arrow
import qualified Data.Matrix.Static.Dense as D
import qualified Data.Matrix.Static.Generic as D
import Data.Matrix.Dynamic (Dynamic(..), matrix)
import Statistics.Sample
import System.Random.MWC
import System.Random.MWC.Distributions (categorical)
import Bio.ChromVAR.Utils
type PeakGroup = ( MU.Matrix Double -- ^ Group to group distance
, V.Vector (U.Vector Int) -- ^ Peaks in each group
, U.Vector Int -- ^ Peak-group membership
)
getBackgroundPeaks :: Int -> U.Vector (Double, Double) -> GenIO -> IO [U.Vector Int]
getBackgroundPeaks n xs gen =
let pg = mkPeakGroup xs
in replicateM n $ getBackgroundPeak gen pg
getBackgroundPeak :: GenIO -> PeakGroup -> IO (U.Vector Int)
getBackgroundPeak gen (weightMat, bins, membership) = U.generateM n $ \i -> do
let ws = weightMat `MU.takeRow` (membership U.! i)
grp <- categorical ws gen
let peaks = bins V.! grp
idx <- uniformR (0, U.length peaks - 1) gen
return $ peaks U.! idx
where
n = U.length membership
{-# INLINE getBackgroundPeak #-}
-- | Make peak groups.
mkPeakGroup :: U.Vector (Double, Double) -> PeakGroup
mkPeakGroup raw = (weights, V.fromList groups, membership)
where
transformed = case matrix (map (\(x,y) -> [x,y]) $ U.toList raw) of
Dynamic mat@(D.Matrix _) -> U.fromList $
map ((\[x,y] -> (x,y)) . S.toList) $ D.toRows $ whiten Cholesky mat
weights = MU.generate (U.length points, U.length points) $ \(i,j) ->
weight (points U.! i) (points U.! j)
where
points = U.fromList $ flip map groups $ \is -> mean *** mean $
U.unzip $ U.map (transformed U.!) is
membership = U.create $ do
v <- UM.new $ U.length transformed
forM_ (zip [0..] groups) $ \(x, is) -> U.forM_ is $ \i ->
UM.unsafeWrite v i x
return v
groups = go [] 0 0 $ sortBy (comparing fst) $ zip (U.toList transformed) [0..]
where
go acc i j (((x,y), idx) : rest)
| null acc || i' == i || j' == j = go (idx : acc) i' j' rest
| i' > i || j' > j = U.fromList acc : go [idx] i' j' rest
| otherwise = error "Impossible"
where
i' = truncate $ (x - x_min) / x_step :: Int
j' = truncate $ (y - y_min) / y_step :: Int
go _ _ _ _ = []
(xs, ys) = U.unzip transformed
x_min = U.minimum xs
x_max = U.maximum xs
x_step = (x_max - x_min) / n
y_min = U.minimum ys
y_max = U.maximum ys
y_step = (y_max - y_min) / n
n = 50
{-# INLINE mkPeakGroup #-} |
{-# OPTIONS --omega-in-omega --no-termination-check --overlapping-instances #-}
module Light.Implementation.Standard where
module Data where
module Empty where open import Light.Implementation.Standard.Data.Empty public
module Unit where open import Light.Implementation.Standard.Data.Unit public
module These where open import Light.Implementation.Standard.Data.These public
module Product where open import Light.Implementation.Standard.Data.Product public
module Relation where
module Decidable where open import Light.Implementation.Standard.Relation.Decidable public
module Sets where open import Light.Implementation.Standard.Relation.Sets public
|
{-# OPTIONS --safe --warning=error --without-K #-}
open import LogicalFormulae
open import Groups.Lemmas
open import Groups.Definition
open import Setoids.Orders.Partial.Definition
open import Setoids.Orders.Total.Definition
open import Setoids.Setoids
open import Functions.Definition
open import Sets.EquivalenceRelations
open import Rings.Definition
open import Rings.Orders.Total.Definition
open import Rings.Orders.Partial.Definition
open import Numbers.Naturals.Semiring
open import Numbers.Naturals.Order
open import Orders.Total.Definition
open import Rings.IntegralDomains.Definition
module Rings.Orders.Total.Lemmas {n m p : _} {A : Set n} {S : Setoid {n} {m} A} {_+_ : A → A → A} {_*_ : A → A → A} {R : Ring S _+_ _*_} {_<_ : Rel {_} {p} A} {pOrder : SetoidPartialOrder S _<_} {pOrderRing : PartiallyOrderedRing R pOrder} (order : TotallyOrderedRing pOrderRing) where
open Ring R
open Group additiveGroup
open Setoid S
open SetoidPartialOrder pOrder
open TotallyOrderedRing order
open SetoidTotalOrder total
open PartiallyOrderedRing pOrderRing
open import Rings.Lemmas R
open import Rings.Orders.Partial.Lemmas pOrderRing
abstract
lemm2 : (a : A) → a < 0G → 0G < inverse a
lemm2 a a<0 with totality 0R (inverse a)
lemm2 a a<0 | inl (inl 0<-a) = 0<-a
lemm2 a a<0 | inl (inr -a<0) = exFalso (irreflexive {0G} (SetoidPartialOrder.<Transitive pOrder (<WellDefined (invLeft {a}) (identLeft {a}) (orderRespectsAddition -a<0 a)) a<0))
lemm2 a a<0 | inr 0=-a = exFalso (irreflexive {0G} (<WellDefined (Equivalence.transitive eq (Equivalence.symmetric eq identRight) t) (Equivalence.reflexive eq) a<0))
where
t : a + 0G ∼ 0G
t = Equivalence.transitive eq (+WellDefined (Equivalence.reflexive eq) 0=-a) (invRight {a})
lemm2' : (a : A) → 0G < a → inverse a < 0G
lemm2' a 0<a with totality 0R (inverse a)
lemm2' a 0<a | inl (inl 0<-a) = exFalso (irreflexive {0G} (SetoidPartialOrder.<Transitive pOrder 0<a (<WellDefined (identLeft {a}) (invLeft {a}) (orderRespectsAddition 0<-a a))))
lemm2' a 0<a | inl (inr -a<0) = -a<0
lemm2' a 0<a | inr 0=-a = exFalso (irreflexive {0G} (<WellDefined (Equivalence.reflexive eq) (Equivalence.transitive eq (Equivalence.symmetric eq identRight) t) 0<a))
where
t : a + 0G ∼ 0G
t = Equivalence.transitive eq (+WellDefined (Equivalence.reflexive eq) 0=-a) (invRight {a})
ringMinusFlipsOrder : {x : A} → (Ring.0R R) < x → (Group.inverse (Ring.additiveGroup R) x) < (Ring.0R R)
ringMinusFlipsOrder {x = x} 0<x with totality (Ring.0R R) (Group.inverse (Ring.additiveGroup R) x)
ringMinusFlipsOrder {x} 0<x | inl (inl 0<inv) = exFalso (SetoidPartialOrder.irreflexive pOrder bad')
where
bad : (Group.0G (Ring.additiveGroup R) + Group.0G (Ring.additiveGroup R)) < (x + Group.inverse (Ring.additiveGroup R) x)
bad = ringAddInequalities 0<x 0<inv
bad' : (Group.0G (Ring.additiveGroup R)) < (Group.0G (Ring.additiveGroup R))
bad' = SetoidPartialOrder.<WellDefined pOrder (Group.identRight (Ring.additiveGroup R)) (Group.invRight (Ring.additiveGroup R)) bad
ringMinusFlipsOrder {x} 0<x | inl (inr inv<0) = inv<0
ringMinusFlipsOrder {x} 0<x | inr 0=inv = exFalso (SetoidPartialOrder.irreflexive pOrder (SetoidPartialOrder.<WellDefined pOrder (Equivalence.reflexive (Setoid.eq S)) (groupLemmaMove0G (Ring.additiveGroup R) 0=inv) 0<x))
ringMinusFlipsOrder' : {x : A} → (Group.inverse (Ring.additiveGroup R) x) < (Ring.0R R) → (Ring.0R R) < x
ringMinusFlipsOrder' {x} -x<0 with totality (Ring.0R R) x
ringMinusFlipsOrder' {x} -x<0 | inl (inl 0<x) = 0<x
ringMinusFlipsOrder' {x} -x<0 | inl (inr x<0) = exFalso (SetoidPartialOrder.irreflexive pOrder (SetoidPartialOrder.<WellDefined pOrder (Group.invLeft (Ring.additiveGroup R)) (Group.identRight (Ring.additiveGroup R)) bad))
where
bad : ((Group.inverse (Ring.additiveGroup R) x) + x) < (Group.0G (Ring.additiveGroup R) + Group.0G (Ring.additiveGroup R))
bad = ringAddInequalities -x<0 x<0
ringMinusFlipsOrder' {x} -x<0 | inr 0=x = exFalso (SetoidPartialOrder.irreflexive pOrder (SetoidPartialOrder.<WellDefined pOrder (symmetric (groupLemmaMove0G' (Ring.additiveGroup R) (symmetric 0=x))) (Equivalence.reflexive (Setoid.eq S)) -x<0))
where
open Equivalence eq
ringMinusFlipsOrder'' : {x : A} → x < (Ring.0R R) → (Ring.0R R) < Group.inverse (Ring.additiveGroup R) x
ringMinusFlipsOrder'' {x} x<0 = ringMinusFlipsOrder' (SetoidPartialOrder.<WellDefined pOrder {x} {Group.inverse (Ring.additiveGroup R) (Group.inverse (Ring.additiveGroup R) x)} {Ring.0R R} {Ring.0R R} (Equivalence.symmetric (Setoid.eq S) (invInv (Ring.additiveGroup R))) (Equivalence.reflexive (Setoid.eq S)) x<0)
ringMinusFlipsOrder''' : {x : A} → (Ring.0R R) < (Group.inverse (Ring.additiveGroup R) x) → x < (Ring.0R R)
ringMinusFlipsOrder''' {x} 0<-x = SetoidPartialOrder.<WellDefined pOrder (invInv (Ring.additiveGroup R)) (Equivalence.reflexive (Setoid.eq S)) (ringMinusFlipsOrder 0<-x)
ringCanCancelPositive : {x y c : A} → (Ring.0R R) < c → (x * c) < (y * c) → x < y
ringCanCancelPositive {x} {y} {c} 0<c xc<yc = SetoidPartialOrder.<WellDefined pOrder (Group.identLeft additiveGroup) (Equivalence.transitive eq (symmetric (Group.+Associative additiveGroup)) (Equivalence.transitive eq (Group.+WellDefined additiveGroup reflexive (Group.invLeft additiveGroup)) (Group.identRight additiveGroup))) q''
where
open Equivalence (Setoid.eq S)
have : 0R < ((y * c) + (Group.inverse additiveGroup (x * c)))
have = SetoidPartialOrder.<WellDefined pOrder (Group.invRight additiveGroup) reflexive (orderRespectsAddition xc<yc (Group.inverse additiveGroup _))
p1 : 0R < ((y * c) + ((Group.inverse additiveGroup x) * c))
p1 = SetoidPartialOrder.<WellDefined pOrder reflexive (Group.+WellDefined additiveGroup reflexive (symmetric (Equivalence.transitive eq (*Commutative) (Equivalence.transitive eq ringMinusExtracts (inverseWellDefined additiveGroup *Commutative))))) have
q : 0R < ((y + Group.inverse additiveGroup x) * c)
q = SetoidPartialOrder.<WellDefined pOrder reflexive (Equivalence.transitive eq (Equivalence.transitive eq (Group.+WellDefined additiveGroup *Commutative *Commutative) (symmetric *DistributesOver+)) *Commutative) p1
q' : 0R < (y + Group.inverse additiveGroup x)
q' with totality 0R (y + Group.inverse additiveGroup x)
q' | inl (inl pr) = pr
q' | inl (inr y-x<0) = exFalso (SetoidPartialOrder.irreflexive pOrder (SetoidPartialOrder.<WellDefined pOrder reflexive (Equivalence.transitive eq *Commutative (Ring.timesZero R)) k))
where
f : ((y + inverse x) + (inverse (y + inverse x))) < (0G + inverse (y + inverse x))
f = orderRespectsAddition y-x<0 _
g : 0G < inverse (y + inverse x)
g = SetoidPartialOrder.<WellDefined pOrder invRight identLeft f
h : (0G * c) < ((inverse (y + inverse x)) * c)
h = ringCanMultiplyByPositive 0<c g
i : (0R + (0G * c)) < (((y + inverse x) * c) + ((inverse (y + inverse x)) * c))
i = ringAddInequalities q h
j : 0R < (((y + inverse x) + (inverse (y + inverse x))) * c)
j = SetoidPartialOrder.<WellDefined pOrder (Equivalence.transitive eq identLeft (Equivalence.transitive eq *Commutative (Ring.timesZero R))) (symmetric (Equivalence.transitive eq *Commutative (Equivalence.transitive eq *DistributesOver+ (Group.+WellDefined additiveGroup *Commutative *Commutative)))) i
k : 0R < (0R * c)
k = SetoidPartialOrder.<WellDefined pOrder reflexive (*WellDefined invRight reflexive) j
q' | inr 0=y-x = exFalso (SetoidPartialOrder.irreflexive pOrder (SetoidPartialOrder.<WellDefined pOrder (*WellDefined x=y reflexive) reflexive xc<yc))
where
f : inverse 0G ∼ inverse (y + inverse x)
f = inverseWellDefined additiveGroup 0=y-x
g : 0G ∼ (inverse y) + x
g = Equivalence.transitive eq (symmetric (invIdent additiveGroup)) (Equivalence.transitive eq f (Equivalence.transitive eq (Equivalence.transitive eq (invContravariant additiveGroup) groupIsAbelian) (+WellDefined reflexive (invInv additiveGroup))))
x=y : x ∼ y
x=y = transferToRight additiveGroup (symmetric (Equivalence.transitive eq g groupIsAbelian))
q'' : (0R + x) < ((y + Group.inverse additiveGroup x) + x)
q'' = orderRespectsAddition q' x
ringCanCancelNegative : {x y c : A} → c < (Ring.0R R) → (x * c) < (y * c) → y < x
ringCanCancelNegative {x} {y} {c} c<0 xc<yc = r
where
open Equivalence eq
p0 : 0R < ((y * c) + inverse (x * c))
p0 = SetoidPartialOrder.<WellDefined pOrder invRight reflexive (orderRespectsAddition xc<yc (inverse (x * c)))
p1 : 0R < ((y * c) + ((inverse x) * c))
p1 = SetoidPartialOrder.<WellDefined pOrder reflexive (Group.+WellDefined additiveGroup reflexive (Equivalence.transitive eq (inverseWellDefined additiveGroup *Commutative) (Equivalence.transitive eq (symmetric ringMinusExtracts) *Commutative))) p0
p2 : 0R < ((y + inverse x) * c)
p2 = SetoidPartialOrder.<WellDefined pOrder reflexive (Equivalence.transitive eq (Group.+WellDefined additiveGroup *Commutative *Commutative) (Equivalence.transitive eq (symmetric *DistributesOver+) *Commutative)) p1
q : (y + inverse x) < 0R
q with totality 0R (y + inverse x)
q | inl (inl pr) = exFalso (SetoidPartialOrder.irreflexive pOrder (SetoidPartialOrder.<Transitive pOrder bad c<0))
where
bad : 0R < c
bad = ringCanCancelPositive pr (SetoidPartialOrder.<WellDefined pOrder (symmetric (Equivalence.transitive eq *Commutative (Ring.timesZero R))) *Commutative p2)
q | inl (inr pr) = pr
q | inr 0=y-x = exFalso (SetoidPartialOrder.irreflexive pOrder (SetoidPartialOrder.<WellDefined pOrder (*WellDefined x=y reflexive) reflexive xc<yc))
where
x=y : x ∼ y
x=y = Equivalence.transitive eq (symmetric identLeft) (Equivalence.transitive eq (Group.+WellDefined additiveGroup 0=y-x reflexive) (Equivalence.transitive eq (symmetric (Group.+Associative additiveGroup)) (Equivalence.transitive eq (Group.+WellDefined additiveGroup reflexive invLeft) identRight)))
r : y < x
r = SetoidPartialOrder.<WellDefined pOrder (Equivalence.transitive eq (symmetric (Group.+Associative additiveGroup)) (Equivalence.transitive eq (Group.+WellDefined additiveGroup reflexive (invLeft)) identRight)) (Group.identLeft additiveGroup) (orderRespectsAddition q x)
posTimesNeg : (a b : A) → (0G < a) → (b < 0G) → (a * b) < 0G
posTimesNeg a b 0<a b<0 with orderRespectsMultiplication 0<a (lemm2 _ b<0)
... | bl = <WellDefined (invTwice additiveGroup _) (Equivalence.reflexive eq) (lemm2' _ (<WellDefined (Equivalence.reflexive eq) ringMinusExtracts bl))
negTimesPos : (a b : A) → (a < 0G) → (b < 0G) → 0G < (a * b)
negTimesPos a b a<0 b<0 with orderRespectsMultiplication (lemm2 _ a<0) (lemm2 _ b<0)
... | bl = <WellDefined (Equivalence.reflexive eq) twoNegativesTimes bl
halvePositive : (a : A) → 0R < (a + a) → 0R < a
halvePositive a 0<2a with totality 0R a
halvePositive a 0<2a | inl (inl x) = x
halvePositive a 0<2a | inl (inr a<0) = exFalso (irreflexive {a + a} (SetoidPartialOrder.<Transitive pOrder (<WellDefined (Equivalence.reflexive eq) identRight (ringAddInequalities a<0 a<0)) 0<2a))
halvePositive a 0<2a | inr x = exFalso (irreflexive {0G} (<WellDefined (Equivalence.reflexive eq) (Equivalence.transitive eq (+WellDefined (Equivalence.symmetric eq x) (Equivalence.symmetric eq x)) identRight) 0<2a))
halvePositive' : {a b : A} → (a + a) ∼ b → 0R < b → 0R < a
halvePositive' {a} {b} pr 0<b = halvePositive a (<WellDefined (Equivalence.reflexive eq) (Equivalence.symmetric eq pr) 0<b)
0<1 : (0R ∼ 1R → False) → 0R < 1R
0<1 0!=1 with totality 0R 1R
0<1 0!=1 | inl (inl x) = x
0<1 0!=1 | inl (inr x) = <WellDefined (Equivalence.reflexive eq) (Equivalence.transitive eq twoNegativesTimes identIsIdent) (orderRespectsMultiplication (lemm2 1R x) (lemm2 1R x))
0<1 0!=1 | inr x = exFalso (0!=1 x)
1<0False : (1R < 0R) → False
1<0False 1<0 with orderRespectsMultiplication (lemm2 _ 1<0) (lemm2 _ 1<0)
... | bl = exFalso (irreflexive (SetoidPartialOrder.<Transitive pOrder 1<0 (<WellDefined (Equivalence.reflexive eq) (Equivalence.transitive eq (twoNegativesTimes) identIsIdent) bl)))
orderedImpliesCharNot2 : (0R ∼ 1R → False) → 1R + 1R ∼ 0R → False
orderedImpliesCharNot2 0!=1 x = irreflexive (<WellDefined (identRight {0R}) x (ringAddInequalities (0<1 0!=1) (0<1 0!=1)))
open import Rings.InitialRing R
open Equivalence eq
fromNPreservesOrder' : (0R ∼ 1R → False) → {a b : ℕ} → (fromN a) < (fromN b) → a <N b
fromNPreservesOrder' nontrivial {a} {b} a<b with TotalOrder.totality ℕTotalOrder a b
... | inl (inl x) = x
... | inl (inr x) = exFalso (irreflexive (<Transitive a<b (fromNPreservesOrder (0<1 nontrivial) x)))
... | inr x = exFalso (irreflexive (<WellDefined (fromNWellDefined x) reflexive a<b))
reciprocalPositive : (a b : A) → .(0<a : 0R < a) → (a * b ∼ 1R) → 0R < b
reciprocalPositive a 1/a 0<a ab=1 with totality 0G 1/a
... | inl (inl x) = x
... | inl (inr x) = exFalso (1<0False (<WellDefined (transitive *Commutative ab=1) timesZero' (ringCanMultiplyByPositive 0<a x)))
... | inr x = exFalso (anyComparisonImpliesNontrivial 0<a (transitive (transitive (symmetric timesZero) (*WellDefined reflexive x)) ab=1))
reciprocalPositive' : (a b : A) → .(0<a : 0R < a) → (b * a ∼ 1R) → 0R < b
reciprocalPositive' a 1/a 0<a ab=1 = reciprocalPositive a 1/a 0<a (transitive *Commutative ab=1)
reciprocal<1 : (a b : A) → .(1<a : 1R < a) → (a * b ∼ 1R) → b < 1R
reciprocal<1 a b 0<a ab=1 with totality b 1R
... | inl (inl x) = x
... | inr b=1 = exFalso (irreflexive (<WellDefined (symmetric ab=1) (transitive (symmetric identIsIdent) (transitive *Commutative ((*WellDefined reflexive (symmetric b=1))))) 0<a))
... | inl (inr x) = exFalso (irreflexive (<WellDefined identIsIdent ab=1 (ringMultiplyPositives (0<1 (anyComparisonImpliesNontrivial 0<a)) (0<1 (anyComparisonImpliesNontrivial 0<a)) 0<a x)))
isIntDom : (nonempty : 1R ∼ 0R → False) → IntegralDomain R
IntegralDomain.intDom (isIntDom n) {a} {b} ab=0 a!=0 with totality 0R b
... | inr 0=b = symmetric 0=b
IntegralDomain.intDom (isIntDom n) {a} {b} ab=0 a!=0 | inl (inl 0<b) with totality 0R a
... | inl (inl x) = exFalso (irreflexive (<WellDefined reflexive ab=0 (orderRespectsMultiplication x 0<b)))
... | inl (inr x) = exFalso (irreflexive (<WellDefined ab=0 timesZero' (ringCanMultiplyByPositive 0<b x)))
... | inr x = exFalso (a!=0 (symmetric x))
IntegralDomain.intDom (isIntDom n) {a} {b} ab=0 a!=0 | inl (inr b<0) with totality 0R a
... | inl (inl x) = exFalso (irreflexive (<WellDefined (transitive *Commutative ab=0) timesZero' (ringCanMultiplyByPositive x b<0)))
... | inl (inr x) = exFalso (irreflexive (<WellDefined reflexive (transitive twoNegativesTimes ab=0) (orderRespectsMultiplication (lemm2 a x) (lemm2 b b<0))))
... | inr x = exFalso (a!=0 (symmetric x))
IntegralDomain.nontrivial (isIntDom n) = n
|
import .dvd
import .nat_sub
import .induction
namespace hidden
namespace mynat
private lemma mod_lemma {m n : mynat} :
0 < n ∧ n ≤ m → m - n < m :=
begin
assume h,
cases h with h0n hnm,
have : 0 < m,
apply lt_le_chain n,
assumption,
assumption,
rw zero_lt_iff_succ at h0n,
cases h0n with k hk,
rw hk,
apply sub_succ_lt,
from nzero_iff_zero_lt.mpr this,
end
-- I copied this from Lean source :(
private def mod.F (m : mynat)
(f : Π k, k < m → mynat → mynat) (n : mynat) : mynat :=
if h : 0 < n ∧ n ≤ m then f (m - n) (mod_lemma h) n else m
def mod := well_founded.fix lt_well_founded mod.F
instance: has_mod mynat := ⟨mod⟩
lemma mod_def_aux (x y : mynat) : x % y = if h : 0 < y ∧ y ≤ x then (x - y) % y else x :=
congr_fun (well_founded.fix_eq lt_well_founded mod.F x) y
variables {m n k : mynat}
theorem mod_zero: ∀ {m : mynat}, m % 0 = m
| zero := rfl
| (succ m) := rfl
theorem zero_mod: ∀ {m : mynat}, 0 % m = 0
| zero := rfl
| (succ m) := rfl
theorem mod_cancel: (m + n) % n = m % n :=
begin
rw mod_def_aux,
cases n, {
rw dif_neg _, {
rw [zz, mod_zero],
refl,
}, {
from (λ ⟨h, _⟩, lt_nrefl h),
},
}, {
rw dif_pos _, {
rw add_sub,
}, {
split, {
from zero_lt_succ,
}, {
rw add_comm,
from le_to_add,
}
}
}
end
theorem mod_cancel_lots: (m + k * n) % n = m % n :=
begin
induction k with k' hk', {
rw [zz, zero_mul, add_zero],
}, {
rw [succ_mul, ←add_assoc, mod_cancel, hk'],
},
end
theorem zero_mod_iff_dvd: m % n = 0 ↔ n ∣ m :=
begin
split, {
-- probably we'll need to define div before we can do this
sorry,
}, {
assume hnm,
cases hnm with k hk,
rw hk,
rw ←zero_add (k * n),
rw mod_cancel_lots,
rw zero_mod,
},
end
end mynat
end hidden
|
namespace hidden
def divides (m n : ℕ) : Prop := ∃ k, m * k = n
instance : has_dvd nat := ⟨divides⟩
def even (n : ℕ) : Prop := 2 ∣ n
-- BEGIN
def prime (n : ℕ) : Prop :=
¬∃ m, m > 1 ∧ m < n ∧ (m ∣ n)
def infinitely_many_primes : Prop :=
∀ n, ∃ p, p > n ∧ prime p
def Fermat_number (n : ℕ) : Prop :=
∃ k : ℕ, 2^(2^k) + 1 = n
def Fermat_prime (n : ℕ) : Prop :=
prime n ∧ Fermat_number n
def infinitely_many_Fermat_primes : Prop :=
∀ n, ∃ fp, fp > n ∧ Fermat_prime fp
-- Every even integer greater than 2 can be expressed as the sum of two primes
def goldbach_conjecture : Prop :=
∀ n, n > 2 → ∃ p q, p + q = n ∧ prime p ∧ prime q
-- Every odd number greater than 5 can be expressed as the sum of three primes
def Goldbach's_weak_conjecture : Prop :=
∀ n, even n ∧ n > 5 → ∃ p p' p'', p + p' + p'' = n ∧ prime p ∧ prime p' ∧ prime p''
-- no three positive integers a, b, and c satisfy the equation an + bn = cn for
-- any integer value of n greater than 2
def Fermat's_last_theorem : Prop :=
∀ n, n > 2 → ¬∃ a b c, a^n + b^n = c^n ∧ a > 0 ∧ b > 0 ∧ c > 0
-- END
end hidden
|
= = Tenth commandment = =
|
theory Category imports FreeLogic
abbrevs "morphism" = ":\<rightarrow>" and
"wedge" = "\<leftarrow>-()-\<rightarrow>" and
"ptimes" = "\<^bold>\<times>"
begin
(*Begin: some useful parameter settings*)
declare [[ smt_solver = cvc4, smt_oracle = true, smt_timeout = 120]] declare [[ show_types ]]
sledgehammer_params [provers = cvc4 z3 spass e vampire]
nitpick_params [user_axioms, show_all, format = 2]
(*nitpick_params[user_axioms, show_all, format = 2, expect = genuine]*)
(*End: some useful parameter settings*)
section \<open>The basis of category theory\<close>
class category =
fixes domain:: "'a \<Rightarrow> 'a" ("dom _" [108] 109) and
codomain:: "'a\<Rightarrow> 'a" ("cod _" [110] 111) and
composition:: "'a \<Rightarrow> 'a \<Rightarrow> 'a" (infix "\<cdot>" 110)
assumes
S1: "E(dom x) \<^bold>\<rightarrow> E x" and
S2: "E(cod y) \<^bold>\<rightarrow> E y" and
S3: "E(x\<cdot>y) \<^bold>\<leftrightarrow> dom x \<simeq> cod y" and
S4: "x\<cdot>(y\<cdot>z) \<cong> (x\<cdot>y)\<cdot>z" and
S5: "x\<cdot>(dom x) \<cong> x" and
S6: "(cod y)\<cdot>y \<cong> y"
context category
begin
subsection\<open>Basic definitions\<close>
definition type where "type x \<equiv> x \<cong> dom x"
definition arrow ("(_):(_)\<rightarrow>(_)" [120,120,120] 119) where
"x:a\<rightarrow>b \<equiv> dom x \<simeq> a \<and> cod x \<simeq> b"
definition wedge ("_\<leftarrow>_- (_) -_\<rightarrow>_" [120,0,0,0,120] 119) where
"a \<leftarrow>f- (x) -g\<rightarrow> b \<equiv> dom f \<simeq> x \<and> cod f \<simeq> a \<and> dom g \<simeq> x \<and> cod g \<simeq> b"
definition monic::"'a \<Rightarrow> bool" where
"monic m \<equiv> \<forall>f g. m\<cdot>f \<simeq> m\<cdot>g \<longrightarrow> f \<simeq> g"
definition epi::"'a \<Rightarrow> bool" where
"epi m \<equiv> \<forall>f g. f\<cdot>m \<simeq> g\<cdot>m \<longrightarrow> f \<simeq> g"
definition isomorphism::"'a \<Rightarrow> bool" where
"isomorphism f \<equiv> \<exists>g. f\<cdot>g \<cong> (dom g) \<and> g\<cdot>f \<cong> (dom f)"
definition isomorphic::"'a \<Rightarrow> 'a \<Rightarrow> bool" where
"isomorphic z y \<equiv> \<exists>f. dom f \<cong> z \<and> cod f \<cong> y \<and> isomorphism f"
definition initial::"'a \<Rightarrow> bool" where
"initial z \<equiv> \<^bold>\<forall>t. (\<exists>!f. dom f \<simeq> z \<and> cod f \<simeq> t)"
definition final::"'a \<Rightarrow> bool" where
"final z \<equiv> \<^bold>\<forall>t. (\<exists>!f. dom f \<simeq> t \<and> cod f \<simeq> z)"
\<comment>\<open>Checking equivalences of definitions\<close>
lemma StrongerInitial1: "(initial z) \<longrightarrow> (\<^bold>\<forall>t. (\<^bold>\<exists>!f. dom f \<simeq> z \<and> cod f \<simeq> t))"
by (metis (no_types) S1 initial_def)
lemma StrongerInitial2: "(\<^bold>\<forall>t. (\<^bold>\<exists>!f. dom f \<simeq> z \<and> cod f \<simeq> t)) \<longrightarrow> initial z"
by (smt S2 initial_def)
lemma WeakerInitial1: "(\<^bold>\<forall>t. (\<exists>!f. dom f \<cong> z \<and> cod f \<cong> t)) \<longrightarrow> initial z"
by (smt S5 category.S2 category.S3 category_axioms initial_def)
lemma WeakerInitial2: "initial z \<longrightarrow> (\<^bold>\<forall>t. (\<exists>!f. dom f \<cong> z \<and> cod f \<cong> t))"
by (smt initial_def)
(*The same as above would do for final*)
end
subsection\<open>Initial and Final Types | Mono and Epi Maps\<close>
lemma (in category) InitialsAreIsomorphic: "initial z \<and> initial y \<longrightarrow> isomorphic z y" (*sledgehammer sledgehammer [remote_leo3]*)
by (smt S1 S3 S5 S6 category_axioms epi_def initial_def isomorphic_def isomorphism_def)
lemma (in category) InitialIsUnique: "\<forall>z. \<forall>f. initial z \<and> (dom f \<cong> z \<and> cod f \<cong> z) \<longrightarrow> z \<cong> f" (*sledgehammer sledgehammer [remote_leo3]*)
by (metis(no_types) S3 S5 S6 initial_def)
lemma (in category) FinalsAreIsomorphic: "final z \<and> final y \<longrightarrow> isomorphic z y" (*sledgehammer sledgehammer [remote_leo3]*)
by (smt S2 S3 S5 final_def isomorphic_def isomorphism_def)
lemma (in category) FinalIsUnique: "\<forall>z. \<forall>f. final z \<and> ( dom f \<cong> z \<and> cod f \<cong> z) \<longrightarrow> z \<cong> f" (*sledgehammer sledgehammer [remote_leo3]*)
by (metis(no_types) S3 S5 S6 final_def)
lemma (in category) TwoMonicsBetweenTypes: "(\<^bold>\<forall>(m::'a) (n::'a). monic m \<and> monic n \<and> dom m \<simeq> dom n \<and> cod m \<simeq> cod n \<longrightarrow> (m\<simeq>n))" nitpick oops
(*Relationship between isomorphisms and epic and monic maps*)
proposition (in category) "isomorphism m \<longrightarrow> monic m \<and> epi m" \<comment>\<open>cvc4 and Leo-III prove this\<close>
(*sledgehammer sledgehammer [remote_leo3]*)
by (smt S1 S3 S4 S5 S6 epi_def isomorphism_def monic_def)
proposition (in category) "monic m \<and> epi m \<longrightarrow> isomorphism m" nitpick oops
subsection \<open>Products\<close>
definition (in category) product::"'a \<Rightarrow> 'a \<Rightarrow> 'a \<Rightarrow> 'a \<Rightarrow> 'a \<Rightarrow> bool" where
"product a b c p1 p2 \<equiv> p1:c\<rightarrow>a \<and> p2:c\<rightarrow>b \<and>
(\<^bold>\<forall>x f g. (a\<leftarrow>f-(x)-g\<rightarrow>b) \<longrightarrow> (\<^bold>\<exists>!h. h:x\<rightarrow>c \<and> f \<simeq> p1\<cdot>h \<and> g \<simeq> p2\<cdot>h))"
lemma (in category) prodIso: "(\<^bold>\<exists>p1 p2. (product a b c p1 p2) \<and> (\<^bold>\<exists>p1 p2. product a b d p1 p2)) \<longrightarrow> isomorphic c d"
unfolding isomorphic_def isomorphism_def product_def arrow_def wedge_def
(* sledgehammer sledgehammer [remote_leo3] *)
by (smt S3 S4 S5)
lemma (in category) sym_prod: "(\<^bold>\<exists>p1 p2. product b a c p1 p2) \<longrightarrow> (\<^bold>\<exists>p1 p2. product a b c p1 p2)"
unfolding product_def arrow_def wedge_def
(* sledgehammer sledgehammer [remote_leo3] *)
by smt
\<comment> \<open>Solved the exercise but writing down proof will take time.\<close>
(*lemma (in category) "((product a b u) \<and> (product u c v) \<and> (product a s t) \<and> (product b c s)) \<longrightarrow> isomorphic v t" using prodIso
unfolding isomorphic_def isomorphism_def product_def arrow_def wedge_def sorry*)
lemma (in category) zero_prod: "\<^bold>\<forall>x a. initial (x) \<longrightarrow> (\<^bold>\<exists>p1 p2. product x a x p1 p2)"
unfolding initial_def product_def arrow_def wedge_def
by (smt local.S3 local.S5 local.S6)
lemma (in category) unit_prod: "\<^bold>\<forall>u a. final (u) \<longrightarrow> (\<^bold>\<exists>p1 p2. product u a a p1 p2)"
unfolding final_def product_def arrow_def wedge_def
by (smt local.S3 local.S5 local.S6)
section \<open>Functor\<close>
definition isFunctor::"('c::category \<Rightarrow> 'd::category) \<Rightarrow> bool" where
"isFunctor F \<equiv> (\<forall>x. E x \<longleftrightarrow> E (F x)) \<and>
(\<forall>x. type x \<longrightarrow> type (F x)) \<and>
(\<forall>x. F (dom x) \<cong> dom (F x)) \<and>
(\<^bold>\<forall>x y. E(x\<cdot>y) \<longrightarrow> (F (x\<cdot>y) \<cong> (F x) \<cdot> (F y)))"
lemma isFunc_codOnCod: "isFunctor F \<longrightarrow> (\<forall>x. F (cod x) \<cong> cod (F x))" unfolding isFunctor_def type_def
by (metis (full_types) S2 S3 S6)
class categoryObj = category +
fixes fstar1::'a ("\<^bold>*\<^sub>1") and
fstar2::'a ("\<^bold>*\<^sub>2")
assumes O1: "E \<^bold>*\<^sub>1" and
O2: "\<not>(E \<^bold>*\<^sub>2)" and
O3: "type \<^bold>*\<^sub>1" and
O4: "type \<^bold>*\<^sub>2"
typedef (overloaded) ('c::category, 'd::categoryObj) Functor = "{F::('c \<Rightarrow>'d). isFunctor F}"
morphisms "getFunctor" "setFunctor"
proof
show "(\<lambda>m. if E m then \<^bold>*\<^sub>1 else \<^bold>*\<^sub>2) \<in> {F::'c \<Rightarrow> 'd. isFunctor F}" unfolding isFunctor_def
apply standard
by (smt O1 O2 O3 S1 S3 S5 type_def)
qed
section \<open>Natural Transformations\<close>
(*This should be a type in order to build the functor category*)
definition isNaturalTransformation::"('c, 'd) Functor \<Rightarrow> ('c, 'd) Functor \<Rightarrow> ('c::category \<Rightarrow> 'd::categoryObj) \<Rightarrow> bool" ("natTrans")
where "natTrans F G \<upsilon> \<equiv> \<^bold>\<forall>f::'c. (\<upsilon> (cod f))\<cdot>(getFunctor F f) \<simeq> (getFunctor G f)\<cdot>(\<upsilon> (dom f))"
\<comment> \<open>Checking the domain of \<upsilon>\<close>
lemma assumes "natTrans F G \<upsilon>" shows "\<^bold>\<forall>f. (getFunctor F) (dom f) \<simeq> dom (\<upsilon> (dom f))"
by (smt S3 S5 assms getFunctor isFunc_codOnCod isNaturalTransformation_def mem_Collect_eq)
\<comment> \<open>It is perhaps better to not introduce the type Functor\<close>
definition isNaturalTransformation2::"('c \<Rightarrow> 'd) \<Rightarrow> ('c \<Rightarrow> 'd) \<Rightarrow> ('c::category \<Rightarrow> 'd::category) \<Rightarrow> bool" ("natTrans2")
where "natTrans2 F G \<upsilon> \<equiv> isFunctor F \<and> isFunctor G \<and>
(\<^bold>\<forall>f::'c. (\<upsilon> (cod f))\<cdot>(F f) \<simeq> (G f)\<cdot>(\<upsilon> (dom f)))"
lemma assumes "natTrans2 F G \<upsilon>" shows "\<^bold>\<forall>f. (F) (dom f) \<simeq> dom (\<upsilon> (dom f))" unfolding isNaturalTransformation2_def
by (smt S3 S5 assms isFunc_codOnCod isNaturalTransformation2_def)
(*
instantiation prod :: (category, category) category
begin
instance sorry
end
definition productFunctor::"('c::category, 'c::category) prod \<Rightarrow> 'c::category" where
"productFunctor \<equiv> \<lambda>(a,b). (SOME f. product (fst (a,b)) (snd (a,b)) f)"
lemma "isFunctor productFunctor" oops
*)
section \<open>Cartesian category\<close>
class categoryProduct = category +
fixes product_func::"'a \<Rightarrow> 'a \<Rightarrow> 'a" (infix "\<^bold>\<times>" 105)
assumes catProd1: "E (a \<^bold>\<times> b) \<longrightarrow> (E a \<and> E b)" and
catProd2: "\<^bold>\<forall>a b. \<^bold>\<exists>p1 p2. product a b (a \<^bold>\<times> b) p1 p2"
\<comment> \<open>e is the equalizer for f and g.\<close>
definition (in category) isEqualizer::"'a \<Rightarrow> 'a \<Rightarrow> 'a \<Rightarrow> bool"
where "isEqualizer f g e \<equiv> f \<cdot> e \<simeq> g \<cdot> e \<and>
(\<^bold>\<forall>z. f \<cdot> z \<simeq> g \<cdot> z \<longrightarrow> (\<^bold>\<exists>!u. u:(dom z)\<rightarrow>(dom g) \<and> e \<cdot> u \<simeq> z))"
class cartesianCategory = categoryProduct +
fixes finalObject::"'a" ("\<^bold>1")
assumes
cartesian1: "\<forall>f g. (f:(dom g)\<rightarrow>(cod g)) \<longrightarrow> (\<exists>e. isEqualizer f g e)" and
cartesian2: "final \<^bold>1"
begin
lemma "True" nitpick[satisfy] oops
lemma "isomorphic (\<^bold>1 \<^bold>\<times> A) A"
by (metis local.S1 local.S2 local.S3 catProd1 catProd2 local.cartesian2 local.final_def local.isomorphic_def local.isomorphism_def local.prodIso local.unit_prod)
lemma "\<forall> f g e. isEqualizer f g e \<longrightarrow> E e" unfolding isEqualizer_def
using local.S2 local.S3 by blast
end
end
|
lemma kuhn_labelling_lemma': assumes "(\<forall>x::nat\<Rightarrow>real. P x \<longrightarrow> P (f x))" and "\<forall>x. P x \<longrightarrow> (\<forall>i::nat. Q i \<longrightarrow> 0 \<le> x i \<and> x i \<le> 1)" shows "\<exists>l. (\<forall>x i. l x i \<le> (1::nat)) \<and> (\<forall>x i. P x \<and> Q i \<and> x i = 0 \<longrightarrow> l x i = 0) \<and> (\<forall>x i. P x \<and> Q i \<and> x i = 1 \<longrightarrow> l x i = 1) \<and> (\<forall>x i. P x \<and> Q i \<and> l x i = 0 \<longrightarrow> x i \<le> f x i) \<and> (\<forall>x i. P x \<and> Q i \<and> l x i = 1 \<longrightarrow> f x i \<le> x i)" |
State Before: α : Type u
β : Type v
inst✝ : UniformSpace α
s : Set α
⊢ TotallyBounded s ↔ ∀ (f : Ultrafilter α), ↑f ≤ 𝓟 s → Cauchy ↑f State After: α : Type u
β : Type v
inst✝ : UniformSpace α
s : Set α
H : ∀ (f : Ultrafilter α), ↑f ≤ 𝓟 s → Cauchy ↑f
⊢ ∀ (f : Filter α), NeBot f → f ≤ 𝓟 s → ∃ c, c ≤ f ∧ Cauchy c Tactic: refine' ⟨fun hs f => f.cauchy_of_totallyBounded hs, fun H => totallyBounded_iff_filter.2 _⟩ State Before: α : Type u
β : Type v
inst✝ : UniformSpace α
s : Set α
H : ∀ (f : Ultrafilter α), ↑f ≤ 𝓟 s → Cauchy ↑f
⊢ ∀ (f : Filter α), NeBot f → f ≤ 𝓟 s → ∃ c, c ≤ f ∧ Cauchy c State After: α : Type u
β : Type v
inst✝ : UniformSpace α
s : Set α
H : ∀ (f : Ultrafilter α), ↑f ≤ 𝓟 s → Cauchy ↑f
f : Filter α
hf : NeBot f
hfs : f ≤ 𝓟 s
⊢ ∃ c, c ≤ f ∧ Cauchy c Tactic: intro f hf hfs State Before: α : Type u
β : Type v
inst✝ : UniformSpace α
s : Set α
H : ∀ (f : Ultrafilter α), ↑f ≤ 𝓟 s → Cauchy ↑f
f : Filter α
hf : NeBot f
hfs : f ≤ 𝓟 s
⊢ ∃ c, c ≤ f ∧ Cauchy c State After: no goals Tactic: exact ⟨Ultrafilter.of f, Ultrafilter.of_le f, H _ ((Ultrafilter.of_le f).trans hfs)⟩ |
/***********************************************************************
Copyright (c) 2015, Carnegie Mellon University
All rights reserved.
Authors: Matthew Klingensmith <[email protected]>
Michael Koval <[email protected]>
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*************************************************************************/
#include <qapplication.h>
#include <QFileDialog>
#include <QMainWindow>
#include <QMessageBox>
#include <QMenuBar>
#include <QString>
#include <QTimer>
#include <OgreRenderWindow.h>
#include <OgreHardwarePixelBuffer.h>
#include <boost/format.hpp>
#include <rviz/display_group.h>
#include <rviz/render_panel.h>
#include <rviz/visualization_manager.h>
#include "util/ogre_conversions.h"
#include "util/ros_conversions.h"
#include "util/ScopedConnection.h"
#include "RVizViewer.h"
using boost::format;
using boost::str;
static double const kRefreshRate = 30;
static std::string const kOffscreenCameraName = "OffscreenCamera";
static std::string const kInteractiveMarkersDisplayName = "OpenRAVE Markers";
static std::string const kEnvironmentDisplayName = "OpenRAVE Environment";
namespace or_rviz {
/*
* Helpers
*/
namespace detail {
OffscreenRenderRequest::OffscreenRenderRequest()
: done(false)
, width(0)
, height(0)
, depth(0)
, memory(NULL)
{
}
template <typename T>
T *getOrCreateDisplay(::rviz::VisualizationManager *manager,
std::string const &class_name,
std::string const &name,
bool enabled)
{
::rviz::DisplayGroup *display_group = manager->getRootDisplayGroup();
BOOST_ASSERT(display_group);
// Search for an existing display with the same name.
::rviz::Display *this_display = NULL;
for (size_t i = 0; i < display_group->numDisplays(); ++i) {
::rviz::Display *display = display_group->getDisplayAt(i);
BOOST_ASSERT(display);
if (display->getNameStd() == name) {
this_display = display;
break;
}
}
// Update the existing display.
if (this_display) {
std::string const matched_class = this_display->getClassId().toStdString();
if (matched_class != class_name) {
throw OpenRAVE::openrave_exception(
str(format(
"Unable to create RViz display '%s' of type '%s': There is"
" already a display of type '%s' with this name. Try deleting"
" '$HOME/.rviz/config'.\n")
% name % class_name % matched_class),
OpenRAVE::ORE_InvalidState
);
}
this_display->setEnabled(enabled);
RAVELOG_DEBUG("Re-using existing RViz diplay '%s' of type '%s'.\n",
name.c_str(), class_name.c_str()
);
} else {
this_display = manager->createDisplay(
QString::fromStdString(class_name),
QString::fromStdString(name),
true
);
RAVELOG_DEBUG("Creating new RViz diplay '%s' of type '%s'.\n",
name.c_str(), class_name.c_str()
);
}
// Cast to the display subclass. Ideally, we would use a dynamic_cast here
// to check that the Display is of the correct type. Unfortunately, that is
// not possible because recent builds of RViz do not include RTTI. And,
// even if they did, RTTI does not work reliably between shared objects.
return static_cast<T *>(this_display);
}
}
/*
* Public
*/
RVizViewer::RVizViewer(OpenRAVE::EnvironmentBasePtr env,
std::string const &topic_name,
bool anonymize)
: InteractiveMarkerViewer(env, GenerateTopicName(topic_name, anonymize))
, timer_(NULL)
{
initialize();
rviz_manager_ = getManager();
rviz_main_panel_ = rviz_manager_->getRenderPanel();
rviz_scene_manager_ = rviz_manager_->getSceneManager();
markers_display_ = InitializeInteractiveMarkers();
environment_display_ = InitializeEnvironmentDisplay(env);
InitializeOffscreenRendering();
InitializeLighting();
InitializeMenus();
installEventFilter(this);
}
int RVizViewer::main(bool bShow)
{
qApp->setActiveWindow(this);
timer_ = new QTimer(this);
timer_->setInterval(33);
timer_->setSingleShot(false);
timer_->start();
connect(timer_, SIGNAL(timeout()), this, SLOT(EnvironmentSyncSlot()));
running_ = true;
show();
return qApp->exec();
}
void RVizViewer::quitmainloop()
{
// TODO: Disconnect the timer.
running_ = false;
qApp->quit();
}
void RVizViewer::EnvironmentSync()
{
InteractiveMarkerViewer::EnvironmentSync();
environment_display_->EnvironmentSync();
}
void RVizViewer::SetBkgndColor(OpenRAVE::RaveVector<float> const &color)
{
render_panel_->setBackgroundColor(
Ogre::ColourValue(color.x, color.y, color.z));
}
void RVizViewer::SetSize(int w, int h)
{
resize(w, h);
}
void RVizViewer::Move(int x, int y)
{
move(x, y);
}
std::string const &RVizViewer::GetName() const
{
// We need to store the string in member variable to avoid returning a
// temporary reference. This isn't great, but it's the best we can do.
window_title_ = windowTitle().toStdString();
return window_title_;
}
void RVizViewer::SetName(std::string const &name)
{
setWindowTitle(QString::fromStdString(name));
}
OpenRAVE::UserDataPtr RVizViewer::RegisterViewerImageCallback(
OpenRAVE::ViewerBase::ViewerImageCallbackFn const &cb)
{
boost::signals2::connection const con = viewer_image_callbacks_.connect(cb);
return boost::make_shared<util::ScopedConnection>(con);
}
void RVizViewer::SetCamera(OpenRAVE::RaveTransform<float> &trans,
float focalDistance)
{
SetCamera(rviz_main_panel_->getCamera(), trans, focalDistance);
}
OpenRAVE::RaveTransform<float> RVizViewer::GetCameraTransform() const
{
Ogre::Camera *const camera = rviz_main_panel_->getCamera();
OpenRAVE::RaveTransform<float> pose;
pose.trans = util::toORVector<float>(camera->getPosition());
pose.rot = util::toORQuaternion<float>(camera->getOrientation());
return pose;
}
OpenRAVE::geometry::RaveCameraIntrinsics<float> RVizViewer::GetCameraIntrinsics() const
{
Ogre::Camera* camera = rviz_main_panel_->getCamera();
Ogre::Matrix4 projection_matrix = camera->getProjectionMatrix();
OpenRAVE::geometry::RaveCameraIntrinsics<float> intrinsics;
intrinsics.focal_length = camera->getFocalLength();
intrinsics.fx = projection_matrix[0][0];
intrinsics.fy = projection_matrix[1][1];
intrinsics.cx = projection_matrix[0][2];
intrinsics.cy = projection_matrix[1][2];
intrinsics.distortion_model = "";
return intrinsics;
}
bool RVizViewer::GetCameraImage(
std::vector<uint8_t> &memory, int width, int height,
OpenRAVE::RaveTransform<float> const &t,
OpenRAVE::SensorBase::CameraIntrinsics const &intrinsics)
{
static int const depth = 24;
BOOST_ASSERT(width > 0);
BOOST_ASSERT(height > 0);
BOOST_ASSERT(depth >= 8 && depth % 8 == 0);
memory.resize(width * height * (depth / 8), 0x00);
detail::OffscreenRenderRequest request;
request.done = false;
request.width = width;
request.height = height;
request.depth = depth;
request.memory = &memory.front();
request.extrinsics = t;
request.intrinsics = intrinsics;
RAVELOG_DEBUG("Submitting OffscreenRenderRequest(%p).\n", &request);
{
// Make the request.
boost::mutex::scoped_lock lock(offscreen_mutex_);
offscreen_requests_.push_back(&request);
// Wait for the request to finish. At this point, the output buffer has
// been populated by the render thread.
while (!request.done) {
offscreen_condition_.wait(lock);
}
}
RAVELOG_DEBUG("Completed OffscreenRenderRequest(%p).\n", &request);
return true;
}
void RVizViewer::ProcessOffscreenRenderRequests()
{
while (!offscreen_requests_.empty()) {
detail::OffscreenRenderRequest *request;
{
boost::mutex::scoped_lock lock(offscreen_mutex_);
request = offscreen_requests_.front();
offscreen_requests_.pop_front();
}
RAVELOG_DEBUG(
"Processing OffscreenRenderRequest(%p): size = [%d x %d],"
" depth = %d, focal_length = %f.\n",
request, request->width, request->height,
request->depth, request->intrinsics.focal_length
);
// Setup the camera.
//float const &focal_length = request->intrinsics.focal_length;
float const focal_length = 0.785;
SetCamera(offscreen_camera_, request->extrinsics, focal_length);
#if 0
offscreen_camera_->setNearClipDistance(focal_length);
offscreen_camera_->setFarClipDistance(focal_length * 10000);
offscreen_camera_->setAspectRatio(
(request->intrinsics.fy / static_cast<float>(request->height))
/ (request->intrinsics.fx / static_cast<float>(request->width))
);
offscreen_camera_->setFOVy(
Ogre::Radian(2.0f * std::atan(
0.5f * request->height / request->intrinsics.fy))
);
#endif
// Render the texture into a texture.
std::string const name = str(format("offscreen[%p]") % request);
Ogre::PixelFormat const pixel_format = GetPixelFormat(request->depth);
Ogre::TexturePtr const texture
= Ogre::TextureManager::getSingleton().createManual(
name, Ogre::ResourceGroupManager::DEFAULT_RESOURCE_GROUP_NAME,
Ogre::TEX_TYPE_2D, request->width, request->height, 0,
pixel_format, Ogre::TU_RENDERTARGET
);
BOOST_ASSERT(!texture.isNull());
// Render into the texture.
Ogre::RenderTexture *render_texture
= texture->getBuffer()->getRenderTarget();
BOOST_ASSERT(render_texture);
render_texture->addViewport(offscreen_camera_);
// Copy the texture into the output buffer.
Ogre::Box const extents(0, 0, request->width, request->height);
Ogre::PixelBox const pb(extents, pixel_format, request->memory);
render_texture->update();
render_texture->copyContentsToMemory(pb, Ogre::RenderTarget::FB_AUTO);
// Delete the texture.
Ogre::TextureManager::getSingleton().unload(name);
Ogre::TextureManager::getSingleton().remove(name);
{
boost::mutex::scoped_lock lock(offscreen_mutex_);
request->done = true;
offscreen_condition_.notify_all();
}
}
}
bool RVizViewer::eventFilter(QObject *o, QEvent *e)
{
bool result = ::rviz::VisualizationFrame::eventFilter(o, e);
if (e->type() == QEvent::Paint) {
if (!viewer_image_callbacks_.empty()) {
int width, height, bytes_per_pixel;
unsigned char *data = WriteCurrentView(&width, &height,
&bytes_per_pixel);
viewer_image_callbacks_(data, width, height, bytes_per_pixel);
}
}
return result;
}
/*
* Slots
*/
void RVizViewer::LoadEnvironmentSlot()
{
QString file = QFileDialog::getOpenFileName(this, "Load", ".");
if (file.count() > 0) {
if (!GetEnv()->Load(file.toStdString())) {
QMessageBox::warning(this, "Load", "Failed to load environment.");
}
}
}
void RVizViewer::EnvironmentSyncSlot()
{
if (running_) {
if(do_sync_) {
EnvironmentSync();
}
ProcessOffscreenRenderRequests();
viewer_callbacks_();
}
}
/*
* Private
*/
void RVizViewer::InitializeLighting()
{
rviz_scene_manager_->setShadowTechnique(Ogre::SHADOWTYPE_NONE);
Ogre::Light* mainDirectional = rviz_scene_manager_->getLight( "MainDirectional" );
mainDirectional->setCastShadows(false);
Ogre::Light *light = rviz_scene_manager_->createLight("FillLight");
light->setType(Ogre::Light::LT_DIRECTIONAL);
light->setDiffuseColour(0.6, 0.55, 0.5);
light->setSpecularColour(1, 1, 1);
light->setDirection(0.05, 0.01, -1);
light->setCastShadows(true);
Ogre::Light *light2 = rviz_scene_manager_->createLight("BackLight");
light2->setType(Ogre::Light::LT_DIRECTIONAL);
light2->setDiffuseColour(0.2, 0.25, 0.3);
light2->setSpecularColour(1, 1, 1);
light2->setDirection(-0.1, -0.1, 0.05);
light2->setCastShadows(false);
Ogre::Light *light3 = rviz_scene_manager_->createLight("KeyLight");
light3->setType(Ogre::Light::LT_DIRECTIONAL);
light3->setDiffuseColour(0.4, 0.4, 0.4);
light3->setSpecularColour(1, 1, 1);
light3->setDirection(0.1, 0.1, -0.05);
light3->setCastShadows(false);
rviz_scene_manager_->setAmbientLight(Ogre::ColourValue(0.3, 0.3, 0.3));
rviz_scene_manager_->setShadowColour(Ogre::ColourValue(0.3, 0.3, 0.3, 1.0));
}
void RVizViewer::InitializeOffscreenRendering()
{
offscreen_panel_ = new ::rviz::RenderPanel(this);
offscreen_panel_->setVisible(false);
offscreen_main_panel_ = offscreen_panel_->getRenderWindow();
offscreen_main_panel_->setVisible(false);
offscreen_camera_ = rviz_scene_manager_->createCamera(kOffscreenCameraName);
}
void RVizViewer::InitializeMenus()
{
menu_openrave_ = new QMenu("OpenRAVE", this);
menu_openrave_->addAction(LoadEnvironmentAction());
menu_environments_ = menu_openrave_->addMenu("Environments");
menuBar()->addMenu(menu_openrave_);
}
::rviz::InteractiveMarkerDisplay *RVizViewer::InitializeInteractiveMarkers()
{
auto *const display =
detail::getOrCreateDisplay< ::rviz::InteractiveMarkerDisplay>(
rviz_manager_, "rviz/InteractiveMarkers",
kInteractiveMarkersDisplayName, true);
std::string const update_topic = str(format("%s/update") % topic_name_);
display->setTopic(QString::fromStdString(update_topic), "");
return display;
}
rviz::EnvironmentDisplay *RVizViewer::InitializeEnvironmentDisplay(
OpenRAVE::EnvironmentBasePtr const &env)
{
auto *const display =
detail::getOrCreateDisplay<rviz::EnvironmentDisplay>(
rviz_manager_, "or_rviz::rviz::EnvironmentDisplay",
kEnvironmentDisplayName, true);
//display->set_environment(env);
set_parent_frame(rviz_manager_->getFixedFrame().toStdString());
environment_change_handle_
= display->RegisterEnvironmentChangeCallback(
boost::bind(&RVizViewer::set_environment, this, _1));
return display;
}
QAction *RVizViewer::LoadEnvironmentAction()
{
QAction* toReturn = new QAction("Load", this);
connect(toReturn, SIGNAL(triggered(bool)), this, SLOT(LoadEnvironmentSlot()));
return toReturn;
}
unsigned char *RVizViewer::WriteCurrentView(int *width, int *height, int *depth)
{
BOOST_ASSERT(width && height && depth);
int left, top;
render_panel_->getViewport()->getActualDimensions(left, top, *width, *height);
Ogre::PixelFormat format = Ogre::PF_BYTE_RGBA;
int outWidth = *width;
int outHeight = *height;
*depth = Ogre::PixelUtil::getNumElemBytes(format);
unsigned char *data = new unsigned char[outWidth * outHeight * *depth];
Ogre::Box extents(left, top, left + *width, top + *height);
Ogre::PixelBox pb(extents, format, data);
render_panel_->getRenderWindow()->copyContentsToMemory(
pb, Ogre::RenderTarget::FB_AUTO);
return data;
}
Ogre::PixelFormat RVizViewer::GetPixelFormat(int depth) const
{
switch (depth) {
case 8:
return Ogre::PF_L8;
case 16:
return Ogre::PF_FLOAT16_GR;
case 24:
return Ogre::PF_R8G8B8;
case 32:
return Ogre::PF_R8G8B8A8;
default:
RAVELOG_ERROR(
"Error: Unsupported depth %d. Supported depths: 8 (gray byte),"
" 16 (float16 gray), 24 (RGB bytes), 32 (RGBA bytes)\n",
depth
);
return Ogre::PF_R8G8B8;
}
}
std::string RVizViewer::GenerateTopicName(std::string const &base_name,
bool anonymize) const
{
if (anonymize) {
return str(format("%s_%p") % base_name % this);
} else {
return base_name;
}
}
void RVizViewer::SetCamera(Ogre::Camera *camera,
OpenRAVE::RaveTransform<float> const &trans,
float focalDistance) const
{
camera->setPosition(util::toOgreVector(trans.trans));
camera->setOrientation(util::toOgreQuaternion(trans.rot));
camera->setFocalLength(std::max(focalDistance, 0.01f));
}
}
|
Formal statement is: lemma convex_on_inverse: assumes "A \<subseteq> {0<..}" shows "convex_on A (inverse :: real \<Rightarrow> real)" Informal statement is: The inverse function is convex on the positive reals. |
# 2. faza: Uvoz podatkov
require(dplyr)
require(tidyr)
require(readr)
require(readxl)
sl <- locale("sl", decimal_mark=",", grouping_mark=".")
vremenskipodatki <- read_csv2("podatki/vremenski_podatki_za_analizo_pridelave_hrane/prenos",
col_names=c("st_postaje","ime_postaje","obdobje","","poveprecna_temperatura", "maksimalna_temperatura"),
skip=3, na="-",
locale=locale(encoding="Windows-1250")))
|
/-
Copyright (c) 2022 Michael Stoll. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Michael Stoll
-/
import data.fin.tuple.sort
import data.fintype.perm
import order.well_founded
/-!
# "Bubble sort" induction
> THIS FILE IS SYNCHRONIZED WITH MATHLIB4.
> Any changes to this file require a corresponding PR to mathlib4.
We implement the following induction principle `tuple.bubble_sort_induction`
on tuples with values in a linear order `α`.
Let `f : fin n → α` and let `P` be a predicate on `fin n → α`. Then we can show that
`f ∘ sort f` satisfies `P` if `f` satisfies `P`, and whenever some `g : fin n → α`
satisfies `P` and `g i > g j` for some `i < j`, then `g ∘ swap i j` also satisfies `P`.
We deduce it from a stronger variant `tuple.bubble_sort_induction'`, which
requires the assumption only for `g` that are permutations of `f`.
The latter is proved by well-founded induction via `well_founded.induction_bot'`
with respect to the lexicographic ordering on the finite set of all permutations of `f`.
-/
namespace tuple
/-- *Bubble sort induction*: Prove that the sorted version of `f` has some property `P`
if `f` satsifies `P` and `P` is preserved on permutations of `f` when swapping two
antitone values. -/
lemma bubble_sort_induction' {n : ℕ} {α : Type*} [linear_order α] {f : fin n → α}
{P : (fin n → α) → Prop} (hf : P f)
(h : ∀ (σ : equiv.perm (fin n)) (i j : fin n),
i < j → (f ∘ σ) j < (f ∘ σ) i → P (f ∘ σ) → P (f ∘ σ ∘ equiv.swap i j)) :
P (f ∘ sort f) :=
begin
letI := @preorder.lift _ (lex (fin n → α)) _ (λ σ : equiv.perm (fin n), to_lex (f ∘ σ)),
refine @well_founded.induction_bot' _ _ _
(@finite.preorder.well_founded_lt (equiv.perm (fin n)) _ _)
(equiv.refl _) (sort f) P (λ σ, f ∘ σ) (λ σ hσ hfσ, _) hf,
obtain ⟨i, j, hij₁, hij₂⟩ := antitone_pair_of_not_sorted' hσ,
exact ⟨σ * equiv.swap i j, pi.lex_desc hij₁ hij₂, h σ i j hij₁ hij₂ hfσ⟩,
end
/-- *Bubble sort induction*: Prove that the sorted version of `f` has some property `P`
if `f` satsifies `P` and `P` is preserved when swapping two antitone values. -/
lemma bubble_sort_induction {n : ℕ} {α : Type*} [linear_order α] {f : fin n → α}
{P : (fin n → α) → Prop} (hf : P f)
(h : ∀ (g : fin n → α) (i j : fin n), i < j → g j < g i → P g → P (g ∘ equiv.swap i j)) :
P (f ∘ sort f) :=
bubble_sort_induction' hf (λ σ, h _)
end tuple
|
(*
* Copyright 2014, General Dynamics C4 Systems
*
* SPDX-License-Identifier: GPL-2.0-only
*)
theory IpcCancel_AI
imports ArchSchedule_AI
begin
context begin interpretation Arch .
requalify_facts
arch_stit_invs
arch_post_cap_deletion_pred_tcb_at
end
declare arch_post_cap_deletion_pred_tcb_at[wp]
lemma blocked_cancel_ipc_simple:
"\<lbrace>tcb_at t\<rbrace> blocked_cancel_ipc ts t \<lbrace>\<lambda>rv. st_tcb_at simple t\<rbrace>"
by (simp add: blocked_cancel_ipc_def | wp sts_st_tcb_at')+
lemma cancel_signal_simple:
"\<lbrace>\<top>\<rbrace> cancel_signal t ntfn \<lbrace>\<lambda>rv. st_tcb_at simple t\<rbrace>"
by (simp add: cancel_signal_def | wp sts_st_tcb_at')+
crunch typ_at: cancel_all_ipc "\<lambda>s. P (typ_at T p s)" (wp: crunch_wps mapM_x_wp)
lemma cancel_all_helper:
" \<lbrace>valid_objs and
(\<lambda>s. \<forall>t \<in> set queue. st_tcb_at (\<lambda>st. \<not> halted st) t s) \<rbrace>
mapM_x (\<lambda>t. do y \<leftarrow> set_thread_state t Structures_A.Restart;
do_extended_op (tcb_sched_enqueue_ext t) od) queue
\<lbrace>\<lambda>rv. valid_objs\<rbrace>"
apply (rule hoare_strengthen_post)
apply (rule mapM_x_wp [where S="set queue", simplified])
apply (wp, simp, wp hoare_vcg_const_Ball_lift sts_st_tcb_at_cases, simp)
apply (clarsimp elim: pred_tcb_weakenE)
apply (erule(1) my_BallE)
apply (clarsimp simp: st_tcb_def2)
apply (frule(1) valid_tcb_objs)
apply (clarsimp simp: valid_tcb_def valid_tcb_state_def
cte_wp_at_cases tcb_cap_cases_def
dest!: get_tcb_SomeD)
apply simp+
done
lemma cancel_all_ipc_valid_objs:
"\<lbrace>valid_objs and (\<lambda>s. sym_refs (state_refs_of s))\<rbrace>
cancel_all_ipc ptr \<lbrace>\<lambda>_. valid_objs\<rbrace>"
apply (simp add: cancel_all_ipc_def)
apply (rule hoare_seq_ext [OF _ get_simple_ko_sp])
apply (case_tac ep, simp_all add: get_ep_queue_def)
apply (wp, simp)
apply (wp cancel_all_helper hoare_vcg_const_Ball_lift
| clarsimp simp: ep_queued_st_tcb_at obj_at_def valid_ep_def)+
done
crunch typ_at: cancel_all_signals "\<lambda>s. P (typ_at T p s)" (wp: crunch_wps mapM_x_wp)
lemma unbind_notification_valid_objs_helper:
"valid_ntfn ntfn s \<longrightarrow> valid_ntfn (ntfn_set_bound_tcb ntfn None) s "
by (clarsimp simp: valid_bound_tcb_def valid_ntfn_def
split: option.splits ntfn.splits)
lemma unbind_notification_valid_objs:
"\<lbrace>valid_objs\<rbrace>
unbind_notification ptr \<lbrace>\<lambda>rv. valid_objs\<rbrace>"
unfolding unbind_notification_def
apply (wp thread_set_valid_objs_triv set_simple_ko_valid_objs hoare_drop_imp | wpc
| simp add: tcb_cap_cases_def
| strengthen unbind_notification_valid_objs_helper)+
apply (wp thread_get_wp' | simp add:get_bound_notification_def)+
apply (clarsimp)
apply (erule (1) obj_at_valid_objsE)
apply (clarsimp simp:valid_obj_def valid_tcb_def)+
done
lemma cancel_all_signals_valid_objs:
"\<lbrace>valid_objs and (\<lambda>s. sym_refs (state_refs_of s))\<rbrace>
cancel_all_signals ptr \<lbrace>\<lambda>rv. valid_objs\<rbrace>"
apply (simp add: cancel_all_signals_def unbind_maybe_notification_def)
apply (rule hoare_seq_ext [OF _ get_simple_ko_sp])
apply (rule hoare_pre)
apply (wp unbind_notification_valid_objs | wpc | simp_all add:unbind_maybe_notification_def)+
apply (wp cancel_all_helper hoare_vcg_const_Ball_lift
set_simple_ko_valid_objs unbind_notification_valid_objs
| clarsimp simp: ntfn_queued_st_tcb_at obj_at_def
valid_ntfn_def valid_bound_tcb_def
| wpc)+
apply (clarsimp split: option.splits)
apply (erule (1) valid_objsE)
apply (simp add: valid_obj_def valid_ntfn_def)
done
lemma get_ep_queue_inv[wp]:
"\<lbrace>P\<rbrace> get_ep_queue ep \<lbrace>\<lambda>_. P\<rbrace>"
by (cases ep, simp_all add: get_ep_queue_def)
lemma cancel_all_ipc_st_tcb_at:
assumes x[simp]: "P Structures_A.Restart" shows
"\<lbrace>st_tcb_at P t\<rbrace> cancel_all_ipc epptr \<lbrace>\<lambda>rv. st_tcb_at P t\<rbrace>"
unfolding cancel_all_ipc_def
by (wp ep_cases_weak_wp mapM_x_wp' sts_st_tcb_at_cases | simp)+
lemmas cancel_all_ipc_makes_simple[wp] =
cancel_all_ipc_st_tcb_at[where P=simple, simplified]
lemma unbind_notification_st_tcb_at[wp]:
"\<lbrace>st_tcb_at P t\<rbrace> unbind_notification t' \<lbrace>\<lambda>rv. st_tcb_at P t\<rbrace>"
unfolding unbind_notification_def
by (wp thread_set_no_change_tcb_state hoare_drop_imps | wpc | simp)+
lemma unbind_maybe_notification_st_tcb_at[wp]:
"\<lbrace>st_tcb_at P t\<rbrace> unbind_maybe_notification r \<lbrace>\<lambda>rv. st_tcb_at P t \<rbrace>"
unfolding unbind_maybe_notification_def
apply (rule hoare_pre)
apply (wp thread_set_no_change_tcb_state hoare_drop_imps| wpc | simp)+
done
lemma cancel_all_signals_st_tcb_at:
assumes x[simp]: "P Structures_A.Restart" shows
"\<lbrace>st_tcb_at P t\<rbrace> cancel_all_signals ntfnptr \<lbrace>\<lambda>rv. st_tcb_at P t\<rbrace>"
unfolding cancel_all_signals_def unbind_maybe_notification_def
by (wp ntfn_cases_weak_wp mapM_x_wp' sts_st_tcb_at_cases
hoare_drop_imps unbind_notification_st_tcb_at
| simp | wpc)+
lemmas cancel_all_signals_makes_simple[wp] =
cancel_all_signals_st_tcb_at[where P=simple, simplified]
lemma get_blocking_object_inv[wp]:
"\<lbrace>P\<rbrace> get_blocking_object st \<lbrace>\<lambda>_. P\<rbrace>"
by (cases st, simp_all add: get_blocking_object_def)
lemma blocked_ipc_st_tcb_at_general:
"\<lbrace>st_tcb_at P t' and K (t = t' \<longrightarrow> P Structures_A.Inactive)\<rbrace>
blocked_cancel_ipc st t
\<lbrace>\<lambda>rv. st_tcb_at P t'\<rbrace>"
apply (simp add: blocked_cancel_ipc_def)
apply (wp sts_st_tcb_at_cases static_imp_wp, simp+)
done
lemma cancel_signal_st_tcb_at_general:
"\<lbrace>st_tcb_at P t' and K (t = t' \<longrightarrow> (P Structures_A.Running \<and> P Structures_A.Inactive))\<rbrace>
cancel_signal t ntfn
\<lbrace>\<lambda>rv. st_tcb_at P t'\<rbrace>"
apply (simp add: cancel_signal_def)
apply (wp sts_st_tcb_at_cases ntfn_cases_weak_wp static_imp_wp)
apply simp
done
lemma fast_finalise_misc[wp]:
"\<lbrace>st_tcb_at simple t \<rbrace> fast_finalise a b \<lbrace>\<lambda>_. st_tcb_at simple t\<rbrace>"
apply (case_tac a,simp_all)
apply (wp|clarsimp)+
done
locale IpcCancel_AI =
fixes state_ext :: "('a::state_ext) itself"
assumes arch_post_cap_deletion_typ_at[wp]:
"\<And>P T p acap. \<lbrace>\<lambda>(s :: 'a state). P (typ_at T p s)\<rbrace> arch_post_cap_deletion acap \<lbrace>\<lambda>rv s. P (typ_at T p s)\<rbrace>"
assumes arch_post_cap_deletion_idle_thread[wp]:
"\<And>P acap. \<lbrace>\<lambda>(s :: 'a state). P (idle_thread s)\<rbrace> arch_post_cap_deletion acap \<lbrace>\<lambda>rv s. P (idle_thread s)\<rbrace>"
crunches update_restart_pc
for typ_at[wp]: "\<lambda>s. P (typ_at ty ptr s)"
(* NB: Q needed for following has_reply_cap proof *)
and cte_wp_at[wp]: "\<lambda>s. Q (cte_wp_at P cte s)"
and idle_thread[wp]: "\<lambda>s. P (idle_thread s)"
and pred_tcb_at[wp]: "\<lambda>s. pred_tcb_at P proj tcb s"
and invs[wp]: "\<lambda>s. invs s"
lemma update_restart_pc_has_reply_cap[wp]:
"\<lbrace>\<lambda>s. \<not> has_reply_cap t s\<rbrace> update_restart_pc t \<lbrace>\<lambda>_ s. \<not> has_reply_cap t s\<rbrace>"
apply (simp add: has_reply_cap_def)
apply (wp hoare_vcg_all_lift)
done
crunch st_tcb_at_simple[wp]: reply_cancel_ipc "st_tcb_at simple t"
(wp: crunch_wps select_wp sts_st_tcb_at_cases thread_set_no_change_tcb_state
simp: crunch_simps unless_def)
lemma cancel_ipc_simple [wp]:
"\<lbrace>\<top>\<rbrace> cancel_ipc t \<lbrace>\<lambda>rv. st_tcb_at simple t\<rbrace>"
apply (simp add: cancel_ipc_def)
apply (rule hoare_seq_ext [OF _ gts_sp])
apply (case_tac state, simp_all)
apply (wp hoare_strengthen_post [OF blocked_cancel_ipc_simple]
hoare_strengthen_post [OF cancel_signal_simple]
hoare_strengthen_post
[OF reply_cancel_ipc_st_tcb_at_simple [where t=t]]
sts_st_tcb_at_cases
hoare_drop_imps
| clarsimp elim!: pred_tcb_weakenE pred_tcb_at_tcb_at)+
done
lemma blocked_cancel_ipc_typ_at[wp]:
"\<lbrace>\<lambda>s. P (typ_at T p s)\<rbrace> blocked_cancel_ipc st t \<lbrace>\<lambda>rv s. P (typ_at T p s)\<rbrace>"
apply (simp add: blocked_cancel_ipc_def get_blocking_object_def get_ep_queue_def
get_simple_ko_def)
apply (wp get_object_wp|wpc)+
apply simp
done
lemma blocked_cancel_ipc_tcb_at [wp]:
"\<lbrace>tcb_at t\<rbrace> blocked_cancel_ipc st t' \<lbrace>\<lambda>rv. tcb_at t\<rbrace>"
by (simp add: tcb_at_typ) wp
context IpcCancel_AI begin
crunch typ_at[wp]: cancel_ipc, reply_cancel_ipc, unbind_maybe_notification
"\<lambda>(s :: 'a state). P (typ_at T p s)"
(wp: crunch_wps hoare_vcg_if_splitE select_wp
simp: crunch_simps unless_def)
lemma cancel_ipc_tcb [wp]:
"\<lbrace>tcb_at t\<rbrace> cancel_ipc t' \<lbrace>\<lambda>rv. (tcb_at t) :: 'a state \<Rightarrow> bool\<rbrace>"
by (simp add: tcb_at_typ) wp
end
lemma gbep_ret:
"\<lbrakk> st = Structures_A.BlockedOnReceive epPtr pl' \<or>
st = Structures_A.BlockedOnSend epPtr pl \<rbrakk> \<Longrightarrow>
get_blocking_object st = return epPtr"
by (auto simp add: get_blocking_object_def)
lemma st_tcb_at_valid_st2:
"\<lbrakk> st_tcb_at ((=) st) t s; valid_objs s \<rbrakk> \<Longrightarrow> valid_tcb_state st s"
apply (clarsimp simp add: valid_objs_def get_tcb_def pred_tcb_at_def
obj_at_def)
apply (drule_tac x=t in bspec)
apply (erule domI)
apply (simp add: valid_obj_def valid_tcb_def)
done
definition
"emptyable \<equiv> \<lambda>p s. (tcb_at (fst p) s \<and> snd p = tcb_cnode_index 2) \<longrightarrow>
st_tcb_at halted (fst p) s"
locale delete_one_abs = IpcCancel_AI state_ext
for state_ext :: "('a :: state_ext) itself" +
assumes delete_one_invs:
"\<And>p. \<lbrace>invs and emptyable p\<rbrace> (cap_delete_one p :: (unit,'a) s_monad) \<lbrace>\<lambda>rv. invs\<rbrace>"
assumes delete_one_deletes:
"\<lbrace>\<top>\<rbrace> (cap_delete_one sl :: (unit,'a) s_monad) \<lbrace>\<lambda>rv. cte_wp_at (\<lambda>c. c = cap.NullCap) sl\<rbrace>"
assumes delete_one_caps_of_state:
"\<And>P p. \<lbrace>\<lambda>s. cte_wp_at can_fast_finalise p s
\<longrightarrow> P ((caps_of_state s) (p \<mapsto> cap.NullCap))\<rbrace>
(cap_delete_one p :: (unit,'a) s_monad)
\<lbrace>\<lambda>rv s. P (caps_of_state s)\<rbrace>"
lemma reply_master_no_descendants_no_reply:
"\<lbrakk> valid_mdb s; valid_reply_masters s; tcb_at t s \<rbrakk> \<Longrightarrow>
descendants_of (t, tcb_cnode_index 2) (cdt s) = {} \<longrightarrow> \<not> has_reply_cap t s"
by (fastforce simp: invs_def valid_state_def valid_mdb_def has_reply_cap_def
cte_wp_at_caps_of_state reply_mdb_def is_reply_cap_to_def
reply_caps_mdb_def descendants_of_def cdt_parent_defs
dest: reply_master_caps_of_stateD tranclD)
lemma reply_cap_unique_descendant:
"\<lbrakk> invs s; tcb_at t s \<rbrakk> \<Longrightarrow>
\<forall>sl\<in>descendants_of (t, tcb_cnode_index 2) (cdt s). \<forall>sl'. sl' \<noteq> sl \<longrightarrow>
sl' \<notin> descendants_of (t, tcb_cnode_index 2) (cdt s)"
apply (subgoal_tac "cte_wp_at (\<lambda>c. (is_master_reply_cap c \<and> obj_ref_of c = t)
\<or> c = cap.NullCap)
(t, tcb_cnode_index 2) s")
apply (clarsimp simp: invs_def valid_state_def valid_mdb_def2 is_cap_simps
valid_reply_caps_def cte_wp_at_caps_of_state)
apply (erule disjE)
apply (fastforce simp: reply_mdb_def is_cap_simps dest: unique_reply_capsD)
apply (fastforce dest: mdb_cte_at_Null_descendants)
apply (clarsimp simp: tcb_cap_wp_at invs_valid_objs
tcb_cap_cases_def is_cap_simps)
done
lemma reply_master_one_descendant:
"\<lbrakk> invs s; tcb_at t s; descendants_of (t, tcb_cnode_index 2) (cdt s) \<noteq> {} \<rbrakk>
\<Longrightarrow> \<exists>sl. descendants_of (t, tcb_cnode_index 2) (cdt s) = {sl}"
by (fastforce elim: construct_singleton dest: reply_cap_unique_descendant)
lemma ep_redux_simps2:
"ep \<noteq> Structures_A.IdleEP \<Longrightarrow>
valid_ep (case xs of [] \<Rightarrow> Structures_A.endpoint.IdleEP
| a # list \<Rightarrow> update_ep_queue ep xs)
= (\<lambda>s. distinct xs \<and> (\<forall>t\<in>set xs. tcb_at t s))"
"ep \<noteq> Structures_A.IdleEP \<Longrightarrow>
ep_q_refs_of (case xs of [] \<Rightarrow> Structures_A.endpoint.IdleEP
| a # list \<Rightarrow> update_ep_queue ep xs)
= (set xs \<times> {case ep of Structures_A.SendEP xs \<Rightarrow> EPSend | Structures_A.RecvEP xs \<Rightarrow> EPRecv})"
by (cases ep, simp_all cong: list.case_cong add: ep_redux_simps)+
lemma gbi_ep_sp:
"\<lbrace>P\<rbrace>
get_blocking_object st
\<lbrace>\<lambda>ep. P and K ((\<exists>d. st = Structures_A.BlockedOnReceive ep d)
\<or> (\<exists>d. st = Structures_A.BlockedOnSend ep d))\<rbrace>"
apply (cases st, simp_all add: get_blocking_object_def)
apply (wp | simp)+
done
lemma get_epq_sp:
"\<lbrace>P\<rbrace>
get_ep_queue ep
\<lbrace>\<lambda>q. P and K (ep \<in> {Structures_A.SendEP q, Structures_A.RecvEP q})\<rbrace>"
apply (simp add: get_ep_queue_def)
apply (cases ep)
apply (wp|simp)+
done
lemma refs_in_tcb_bound_refs:
"(x, ref) \<in> tcb_bound_refs ntfn \<Longrightarrow> ref = TCBBound"
by (auto simp: tcb_bound_refs_def split: option.splits)
lemma refs_in_ntfn_bound_refs:
"(x, ref) \<in> ntfn_bound_refs tcb \<Longrightarrow> ref = NTFNBound"
by (auto simp: ntfn_bound_refs_def split: option.splits)
lemma blocked_cancel_ipc_invs:
"\<lbrace>invs and st_tcb_at ((=) st) t\<rbrace> blocked_cancel_ipc st t \<lbrace>\<lambda>rv. invs\<rbrace>"
apply (simp add: blocked_cancel_ipc_def)
apply (rule hoare_seq_ext [OF _ gbi_ep_sp])
apply (rule hoare_seq_ext [OF _ get_simple_ko_sp])
apply (rule hoare_seq_ext [OF _ get_epq_sp])
apply (simp add: invs_def valid_state_def valid_pspace_def)
apply (rule hoare_pre, wp valid_irq_node_typ sts_only_idle)
apply (simp add: valid_tcb_state_def)
apply (strengthen reply_cap_doesnt_exist_strg)
apply simp
apply (wp valid_irq_node_typ valid_ioports_lift)
apply (subgoal_tac "ep \<noteq> Structures_A.IdleEP")
apply (clarsimp simp: ep_redux_simps2 cong: if_cong)
apply (frule(1) if_live_then_nonz_capD, (clarsimp simp: live_def)+)
apply (frule ko_at_state_refs_ofD)
apply (erule(1) obj_at_valid_objsE, clarsimp simp: valid_obj_def)
apply (frule st_tcb_at_state_refs_ofD)
apply (subgoal_tac "epptr \<notin> set (remove1 t queue)")
apply (case_tac ep, simp_all add: valid_ep_def)[1]
apply (auto elim!: delta_sym_refs pred_tcb_weaken_strongerE
simp: obj_at_def is_ep_def2 idle_not_queued refs_in_tcb_bound_refs
dest: idle_no_refs
split: if_split_asm)[2]
apply (case_tac ep, simp_all add: valid_ep_def)[1]
apply (clarsimp, drule(1) bspec, clarsimp simp: obj_at_def is_tcb_def)+
apply fastforce
done
lemma symreftype_inverse':
"symreftype ref = ref' \<Longrightarrow> ref = symreftype ref'"
by (cases ref) simp_all
lemma cancel_signal_invs:
"\<lbrace>invs and st_tcb_at ((=) (Structures_A.BlockedOnNotification ntfn)) t\<rbrace>
cancel_signal t ntfn
\<lbrace>\<lambda>rv. invs\<rbrace>"
apply (simp add: cancel_signal_def
invs_def valid_state_def valid_pspace_def)
apply (rule hoare_seq_ext [OF _ get_simple_ko_sp])
apply (case_tac "ntfn_obj ntfna", simp_all)[1]
apply (rule hoare_pre)
apply (wp set_simple_ko_valid_objs valid_irq_node_typ sts_only_idle valid_ioports_lift
| simp add: valid_tcb_state_def
| strengthen reply_cap_doesnt_exist_strg
| wpc)+
apply (clarsimp simp: ep_redux_simps cong: list.case_cong if_cong)
apply (frule(1) if_live_then_nonz_capD, (clarsimp simp: live_def)+)
apply (frule ko_at_state_refs_ofD)
apply (frule st_tcb_at_state_refs_ofD)
apply (erule(1) obj_at_valid_objsE, clarsimp simp: valid_obj_def valid_ntfn_def)
apply (case_tac ntfna)
apply clarsimp
apply (rule conjI)
apply (clarsimp simp: pred_tcb_at_def obj_at_def)
apply clarsimp
apply (rule conjI)
apply (clarsimp split:option.split)
apply (rule conjI, erule delta_sym_refs)
apply (clarsimp split: if_split_asm)+
apply (fastforce dest: refs_in_tcb_bound_refs refs_in_ntfn_bound_refs symreftype_inverse')
apply (fastforce simp: obj_at_def is_ntfn idle_not_queued
dest: idle_no_refs elim: pred_tcb_weakenE)
done
lemma reply_mdb_cte_at_master_None:
"\<lbrakk> reply_mdb m cs; mdb_cte_at (\<lambda>p. \<exists>c. cs p = Some c \<and> cap.NullCap \<noteq> c) m;
cs ptr = Some cap; is_master_reply_cap cap \<rbrakk> \<Longrightarrow>
m ptr = None"
unfolding reply_mdb_def reply_masters_mdb_def
by (fastforce simp: is_cap_simps)
lemma reply_slot_not_descendant:
"\<And>ptr. \<lbrakk> invs s; tcb_at t s \<rbrakk> \<Longrightarrow>
(t, tcb_cnode_index 2) \<notin> descendants_of ptr (cdt s)"
apply (subgoal_tac "cte_wp_at (\<lambda>c. is_master_reply_cap c \<or> c = cap.NullCap)
(t, tcb_cnode_index 2) s")
apply (fastforce simp: invs_def valid_state_def valid_mdb_def2
cte_wp_at_caps_of_state
dest: reply_mdb_cte_at_master_None mdb_cte_at_Null_None
descendants_of_NoneD)
apply (clarsimp simp: tcb_cap_wp_at invs_valid_objs
tcb_cap_cases_def)
done
lemma reply_cancel_ipc_invs:
assumes delete: "\<And>p. \<lbrace>invs and emptyable p\<rbrace>
(cap_delete_one p :: (unit,'z::state_ext) s_monad) \<lbrace>\<lambda>rv. invs\<rbrace>"
shows "\<lbrace>invs\<rbrace> (reply_cancel_ipc t :: (unit,'z::state_ext) s_monad) \<lbrace>\<lambda>rv. invs\<rbrace>"
apply (simp add: reply_cancel_ipc_def)
apply (wp delete select_wp)
apply (rule_tac Q="\<lambda>rv. invs" in hoare_post_imp)
apply (fastforce simp: emptyable_def dest: reply_slot_not_descendant)
apply (wp thread_set_invs_trivial)
apply (auto simp: tcb_cap_cases_def)+
done
lemma (in delete_one_abs) cancel_ipc_invs[wp]:
"\<lbrace>invs\<rbrace> (cancel_ipc t :: (unit,'a) s_monad) \<lbrace>\<lambda>rv. invs\<rbrace>"
apply (simp add: cancel_ipc_def)
apply (rule hoare_seq_ext [OF _ gts_sp])
apply (case_tac state, simp_all)
apply (auto intro!: hoare_weaken_pre [OF return_wp]
hoare_weaken_pre [OF blocked_cancel_ipc_invs]
hoare_weaken_pre [OF cancel_signal_invs]
hoare_weaken_pre [OF reply_cancel_ipc_invs]
delete_one_invs
elim!: pred_tcb_weakenE)
done
context IpcCancel_AI begin
lemma cancel_ipc_valid_cap:
"\<lbrace>valid_cap c\<rbrace> cancel_ipc p \<lbrace>\<lambda>_. (valid_cap c) :: 'a state \<Rightarrow> bool\<rbrace>"
by (wp valid_cap_typ)
lemma cancel_ipc_cte_at[wp]:
"\<lbrace>cte_at p\<rbrace> cancel_ipc t \<lbrace>\<lambda>_. (cte_at p) :: 'a state \<Rightarrow> bool\<rbrace>"
by (wp valid_cte_at_typ)
end
lemma valid_ep_queue_subset:
"\<lbrace>\<lambda>s. valid_ep ep s\<rbrace>
get_ep_queue ep
\<lbrace>\<lambda>queue s. valid_ep (case (remove1 t queue) of [] \<Rightarrow> Structures_A.endpoint.IdleEP
| a # list \<Rightarrow> update_ep_queue ep (remove1 t queue)) s\<rbrace>"
apply (simp add: get_ep_queue_def)
apply (case_tac ep, simp_all)
apply wp
apply (clarsimp simp: ep_redux_simps2 valid_ep_def)
apply wp
apply (clarsimp simp: ep_redux_simps2 valid_ep_def)
done
lemma blocked_cancel_ipc_valid_objs[wp]:
"\<lbrace>valid_objs\<rbrace> blocked_cancel_ipc st t \<lbrace>\<lambda>_. valid_objs\<rbrace>"
apply (simp add: blocked_cancel_ipc_def)
apply (wp get_simple_ko_valid[where f=Endpoint, simplified valid_ep_def2[symmetric]]
valid_ep_queue_subset
| simp only: valid_inactive simp_thms
cong: imp_cong
| rule hoare_drop_imps
| clarsimp)+
done
lemma cancel_signal_valid_objs[wp]:
"\<lbrace>valid_objs\<rbrace> cancel_signal t ntfnptr \<lbrace>\<lambda>_. valid_objs\<rbrace>"
apply (simp add: cancel_signal_def)
apply (rule hoare_seq_ext [OF _ get_simple_ko_sp])
apply (rule hoare_pre)
apply (wp set_simple_ko_valid_objs
| simp only: valid_inactive
| simp
| wpc)+
apply (clarsimp simp: ep_redux_simps cong: list.case_cong)
apply (erule(1) obj_at_valid_objsE)
apply (clarsimp simp: valid_obj_def valid_ntfn_def)
apply (auto split: option.splits list.splits)
done
lemma tcb_in_valid_state:
"\<lbrakk> st_tcb_at P t s; valid_objs s \<rbrakk> \<Longrightarrow> \<exists>st. P st \<and> valid_tcb_state st s"
apply (clarsimp simp add: valid_objs_def pred_tcb_at_def obj_at_def)
apply (erule my_BallE, erule domI)
apply (simp add: valid_obj_def valid_tcb_def)
apply blast
done
lemma no_refs_simple_strg:
"st_tcb_at simple t s \<and> P {} \<longrightarrow> st_tcb_at (\<lambda>st. P (tcb_st_refs_of st)) t s"
by (fastforce elim!: pred_tcb_weakenE)+
crunch it[wp]: cancel_all_ipc "\<lambda>s. P (idle_thread s)"
(wp: crunch_wps select_wp simp: unless_def crunch_simps)
crunch it[wp]: cancel_all_signals, fast_finalise, unbind_notification "\<lambda>s. P (idle_thread s)"
(wp: crunch_wps select_wp simp: unless_def crunch_simps)
context IpcCancel_AI begin
crunch it[wp]: reply_cancel_ipc "\<lambda>(s::'a state). P (idle_thread s)"
(wp: crunch_wps select_wp simp: unless_def crunch_simps)
crunch it[wp]: cancel_ipc "\<lambda>(s :: 'a state). P (idle_thread s)"
end
lemma reply_cap_descends_from_master:
"\<lbrakk> invs s; tcb_at t s \<rbrakk> \<Longrightarrow>
\<forall>sl\<in>descendants_of (t, tcb_cnode_index 2) (cdt s). \<forall>sl' R. sl' \<noteq> sl \<longrightarrow>
caps_of_state s sl' \<noteq> Some (cap.ReplyCap t False R)"
apply (subgoal_tac "cte_wp_at (\<lambda>c. (is_master_reply_cap c \<and> obj_ref_of c = t)
\<or> c = cap.NullCap)
(t, tcb_cnode_index 2) s")
apply (clarsimp simp: invs_def valid_state_def valid_mdb_def2 is_cap_simps
valid_reply_caps_def cte_wp_at_caps_of_state)
apply (erule disjE)
apply (unfold reply_mdb_def reply_masters_mdb_def)[1]
apply (elim conjE)
apply (erule_tac x="(t, tcb_cnode_index 2)" in allE)
apply (erule_tac x=t in allE)+
apply (fastforce simp: unique_reply_caps_def is_cap_simps)
apply (fastforce dest: mdb_cte_at_Null_descendants)
apply (clarsimp simp: tcb_cap_wp_at invs_valid_objs
tcb_cap_cases_def is_cap_simps)
done
lemma (in delete_one_abs) reply_cancel_ipc_no_reply_cap[wp]:
notes hoare_pre
shows "\<lbrace>invs and tcb_at t\<rbrace> (reply_cancel_ipc t :: (unit,'a) s_monad) \<lbrace>\<lambda>rv s. \<not> has_reply_cap t s\<rbrace>"
apply (simp add: reply_cancel_ipc_def)
apply wp
apply (rule_tac Q="\<lambda>rvp s. cte_wp_at (\<lambda>c. c = cap.NullCap) x s \<and>
(\<forall>sl R. sl \<noteq> x \<longrightarrow>
caps_of_state s sl \<noteq> Some (cap.ReplyCap t False R))"
in hoare_strengthen_post)
apply (wp hoare_vcg_conj_lift hoare_vcg_all_lift
delete_one_deletes delete_one_caps_of_state)
apply (clarsimp simp: has_reply_cap_def cte_wp_at_caps_of_state is_reply_cap_to_def)
apply (case_tac "(aa, ba) = (a, b)",simp_all)[1]
apply (wp hoare_vcg_all_lift select_wp | simp del: split_paired_All)+
apply (rule_tac Q="\<lambda>_ s. invs s \<and> tcb_at t s" in hoare_post_imp)
apply (erule conjE)
apply (frule(1) reply_cap_descends_from_master)
apply (auto dest: reply_master_no_descendants_no_reply[rotated -1])[1]
apply (wp thread_set_invs_trivial | clarsimp simp: tcb_cap_cases_def)+
done
lemma (in delete_one_abs) cancel_ipc_no_reply_cap[wp]:
shows "\<lbrace>invs and tcb_at t\<rbrace> (cancel_ipc t :: (unit,'a) s_monad) \<lbrace>\<lambda>rv s. \<not> has_reply_cap t s\<rbrace>"
apply (simp add: cancel_ipc_def)
apply (wpsimp wp: hoare_post_imp [OF invs_valid_reply_caps]
reply_cancel_ipc_no_reply_cap
cancel_signal_invs cancel_signal_st_tcb_at_general
blocked_cancel_ipc_invs blocked_ipc_st_tcb_at_general
| strengthen reply_cap_doesnt_exist_strg)+
apply (rule_tac Q="\<lambda>rv. st_tcb_at ((=) rv) t and invs" in hoare_strengthen_post)
apply (wpsimp wp: gts_st_tcb)
apply (fastforce simp: invs_def valid_state_def st_tcb_at_tcb_at
elim!: pred_tcb_weakenE)+
done
lemma (in delete_one_abs) suspend_invs[wp]:
"\<lbrace>invs and tcb_at t and (\<lambda>s. t \<noteq> idle_thread s)\<rbrace>
(suspend t :: (unit,'a) s_monad) \<lbrace>\<lambda>rv. invs\<rbrace>"
by (wp sts_invs_minor user_getreg_inv as_user_invs sts_invs_minor cancel_ipc_invs
cancel_ipc_no_reply_cap
| strengthen no_refs_simple_strg
| simp add: suspend_def)+
context IpcCancel_AI begin
lemma suspend_typ_at [wp]:
"\<lbrace>\<lambda>(s::'a state). P (typ_at T p s)\<rbrace> suspend t \<lbrace>\<lambda>rv s. P (typ_at T p s)\<rbrace>"
by (wpsimp simp: suspend_def)
lemma suspend_valid_cap:
"\<lbrace>valid_cap c\<rbrace> suspend tcb \<lbrace>\<lambda>_. (valid_cap c) :: 'a state \<Rightarrow> bool\<rbrace>"
by (wp valid_cap_typ)
lemma suspend_tcb[wp]:
"\<lbrace>tcb_at t'\<rbrace> suspend t \<lbrace>\<lambda>rv. (tcb_at t') :: 'a state \<Rightarrow> bool\<rbrace>"
by (simp add: tcb_at_typ) wp
end
declare if_cong [cong del]
crunch cte_wp_at[wp]: blocked_cancel_ipc "cte_wp_at P p"
(wp: crunch_wps)
crunch cte_wp_at[wp]: cancel_signal "cte_wp_at P p"
(wp: crunch_wps)
locale delete_one_pre =
fixes state_ext_type :: "('a :: state_ext) itself"
assumes delete_one_cte_wp_at_preserved:
"(\<And>cap. P cap \<Longrightarrow> \<not> can_fast_finalise cap) \<Longrightarrow>
\<lbrace>cte_wp_at P sl\<rbrace> (cap_delete_one sl' :: (unit,'a) s_monad) \<lbrace>\<lambda>rv. cte_wp_at P sl\<rbrace>"
lemma (in delete_one_pre) reply_cancel_ipc_cte_wp_at_preserved:
"(\<And>cap. P cap \<Longrightarrow> \<not> can_fast_finalise cap) \<Longrightarrow>
\<lbrace>cte_wp_at P p\<rbrace> (reply_cancel_ipc t :: (unit,'a) s_monad) \<lbrace>\<lambda>rv. cte_wp_at P p\<rbrace>"
unfolding reply_cancel_ipc_def
apply (wpsimp wp: select_wp delete_one_cte_wp_at_preserved)
apply (rule_tac Q="\<lambda>_. cte_wp_at P p" in hoare_post_imp, clarsimp)
apply (wpsimp wp: thread_set_cte_wp_at_trivial simp: ran_tcb_cap_cases)
apply assumption
done
lemma (in delete_one_pre) cancel_ipc_cte_wp_at_preserved:
"(\<And>cap. P cap \<Longrightarrow> \<not> can_fast_finalise cap) \<Longrightarrow>
\<lbrace>cte_wp_at P p\<rbrace> (cancel_ipc t :: (unit,'a) s_monad) \<lbrace>\<lambda>rv. cte_wp_at P p\<rbrace>"
apply (simp add: cancel_ipc_def)
apply (rule hoare_pre)
apply (wp reply_cancel_ipc_cte_wp_at_preserved | wpcw | simp)+
done
lemma (in delete_one_pre) suspend_cte_wp_at_preserved:
"(\<And>cap. P cap \<Longrightarrow> \<not> can_fast_finalise cap) \<Longrightarrow>
\<lbrace>cte_wp_at P p\<rbrace> (suspend tcb :: (unit,'a) s_monad) \<lbrace>\<lambda>_. cte_wp_at P p\<rbrace>"
by (simp add: suspend_def) (wpsimp wp: cancel_ipc_cte_wp_at_preserved)+
(* FIXME - eliminate *)
lemma obj_at_exst_update:
"obj_at P p (trans_state f s) = obj_at P p s"
by (rule more_update.obj_at_update)
lemma set_thread_state_bound_tcb_at[wp]:
"\<lbrace>bound_tcb_at P t\<rbrace> set_thread_state p ts \<lbrace>\<lambda>_. bound_tcb_at P t\<rbrace>"
unfolding set_thread_state_def set_object_def get_object_def
by (wpsimp simp: pred_tcb_at_def obj_at_def get_tcb_def)
crunch bound_tcb_at[wp]: cancel_all_ipc, empty_slot, is_final_cap, get_cap "bound_tcb_at P t"
(wp: mapM_x_wp_inv)
lemma fast_finalise_bound_tcb_at:
"\<lbrace>\<lambda>s. bound_tcb_at P t s \<and> (\<exists>tt b R. cap = ReplyCap tt b R) \<rbrace> fast_finalise cap final \<lbrace>\<lambda>_. bound_tcb_at P t\<rbrace>"
by (case_tac cap, simp_all)
lemma get_cap_reply_cap_helper:
"\<lbrace>\<lambda>s. \<exists>t b R. Some (ReplyCap t b R) = caps_of_state s slot \<rbrace> get_cap slot \<lbrace>\<lambda>rv s. \<exists>t b R. rv = ReplyCap t b R\<rbrace>"
by (auto simp: valid_def get_cap_caps_of_state[symmetric])
lemma cap_delete_one_bound_tcb_at:
"\<lbrace>\<lambda>s. bound_tcb_at P t s \<and> (\<exists>t b R. caps_of_state s c = Some (ReplyCap t b R)) \<rbrace>
cap_delete_one c
\<lbrace>\<lambda>_. bound_tcb_at P t\<rbrace>"
apply (clarsimp simp: unless_def cap_delete_one_def)
apply (wp fast_finalise_bound_tcb_at)
apply (wp hoare_vcg_if_lift2, simp)
apply (wp hoare_conjI)
apply (wp only:hoare_drop_imp)
apply (wp hoare_vcg_conj_lift)
apply (wp get_cap_reply_cap_helper hoare_drop_imp | clarsimp)+
done
lemma valid_mdb_impl_reply_masters:
"valid_mdb s \<Longrightarrow> reply_masters_mdb (cdt s) (caps_of_state s)"
by (auto simp: valid_mdb_def reply_mdb_def)
lemma caps_of_state_tcb_index_trans:
"\<lbrakk>get_tcb p s = Some tcb \<rbrakk> \<Longrightarrow> caps_of_state s (p, tcb_cnode_index n) = (tcb_cnode_map tcb) (tcb_cnode_index n)"
apply (drule get_tcb_SomeD)
apply (clarsimp simp: caps_of_state_def)
apply (safe)
apply (clarsimp simp: get_object_def get_cap_def)
apply (simp add:set_eq_iff)
apply (drule_tac x=cap in spec)
apply (drule_tac x=s in spec)
apply (clarsimp simp: in_monad)
apply (clarsimp simp: get_cap_def get_object_def)
apply (rule ccontr)
apply (drule not_sym)
apply (auto simp: set_eq_iff in_monad)
done
lemma descendants_of_nullcap:
"\<lbrakk>descendants_of (a, b) (cdt s) \<noteq> {}; valid_mdb s \<rbrakk> \<Longrightarrow> caps_of_state s (a, b) \<noteq> Some NullCap"
apply clarsimp
apply (drule_tac cs="caps_of_state s" and p="(a,b)" and m="(cdt s)" in mdb_cte_at_Null_descendants)
apply (clarsimp simp: valid_mdb_def2)+
done
lemma reply_cancel_ipc_bound_tcb_at[wp]:
"\<lbrace>bound_tcb_at P t and valid_mdb and valid_objs and tcb_at p \<rbrace>
reply_cancel_ipc p
\<lbrace>\<lambda>_. bound_tcb_at P t\<rbrace>"
unfolding reply_cancel_ipc_def
apply (wpsimp wp: cap_delete_one_bound_tcb_at select_inv select_wp)
apply (rule_tac Q="\<lambda>_. bound_tcb_at P t and valid_mdb and valid_objs and tcb_at p" in hoare_strengthen_post)
apply (wpsimp wp: thread_set_no_change_tcb_pred thread_set_mdb)
apply (fastforce simp:tcb_cap_cases_def)
apply (wpsimp wp: thread_set_valid_objs_triv simp: ran_tcb_cap_cases)
apply clarsimp
apply (frule valid_mdb_impl_reply_masters)
apply (clarsimp simp: reply_masters_mdb_def)
apply (spec p)
apply (spec "tcb_cnode_index 2")
apply (spec p)
apply (clarsimp simp:tcb_at_def)
apply (frule(1) valid_tcb_objs)
apply (clarsimp simp: valid_tcb_def)
apply (erule impE)
apply (simp add: caps_of_state_tcb_index_trans tcb_cnode_map_def)
apply (clarsimp simp: tcb_cap_cases_def is_master_reply_cap_def split:cap.splits )
apply (subgoal_tac "descendants_of (p, tcb_cnode_index 2) (cdt s) \<noteq> {}")
prefer 2
apply simp
apply (drule descendants_of_nullcap, simp)
apply (simp add: caps_of_state_tcb_index_trans tcb_cnode_map_def)
apply fastforce
apply simp
done
crunch bound_tcb_at[wp]: cancel_ipc "bound_tcb_at P t"
(ignore: set_object thread_set wp: mapM_x_wp_inv)
context IpcCancel_AI begin
lemma suspend_unlive:
"\<lbrace>\<lambda>(s::'a state).
(bound_tcb_at ((=) None) t and valid_mdb and valid_objs) s \<rbrace>
suspend t
\<lbrace>\<lambda>rv. obj_at (Not \<circ> live0) t\<rbrace>"
apply (simp add: suspend_def set_thread_state_def set_object_def get_object_def)
(* avoid creating two copies of obj_at *)
supply hoare_vcg_if_split[wp_split del] if_split[split del]
apply (wp | simp only: obj_at_exst_update)+
apply (simp add: obj_at_def)
apply (rule_tac Q="\<lambda>_. bound_tcb_at ((=) None) t" in hoare_strengthen_post)
supply hoare_vcg_if_split[wp_split]
apply wp
apply (auto simp: pred_tcb_def2)[1]
apply (simp flip: if_split)
apply wpsimp+
apply (simp add: pred_tcb_at_tcb_at)
done
end
definition bound_refs_of_tcb :: "'a state \<Rightarrow> machine_word \<Rightarrow> (machine_word \<times> reftype) set"
where
"bound_refs_of_tcb s t \<equiv> case kheap s t of
Some (TCB tcb) \<Rightarrow> tcb_bound_refs (tcb_bound_notification tcb)
| _ \<Rightarrow> {}"
lemma bound_refs_of_tcb_trans:
"bound_refs_of_tcb (trans_state f s) x = bound_refs_of_tcb s x"
by (clarsimp simp:bound_refs_of_tcb_def trans_state_def)
lemma cancel_all_invs_helper:
"\<lbrace>all_invs_but_sym_refs
and (\<lambda>s. (\<forall>x\<in>set q. ex_nonz_cap_to x s)
\<and> sym_refs (\<lambda>x. if x \<in> set q then {r \<in> state_refs_of s x. snd r = TCBBound}
else state_refs_of s x)
\<and> sym_refs (\<lambda>x. state_hyp_refs_of s x)
\<and> (\<forall>x\<in>set q. st_tcb_at (Not \<circ> (halted or awaiting_reply)) x s))\<rbrace>
mapM_x (\<lambda>t. do y \<leftarrow> set_thread_state t Structures_A.thread_state.Restart;
do_extended_op (tcb_sched_enqueue_ext t) od) q
\<lbrace>\<lambda>rv. invs\<rbrace>"
apply (simp add: invs_def valid_state_def valid_pspace_def)
apply (rule mapM_x_inv_wp2)
apply (clarsimp simp: )
apply wp
apply (simp add:bound_refs_of_tcb_trans)
apply wp[1]
apply (rule hoare_pre, wp hoare_vcg_const_Ball_lift
valid_irq_node_typ sts_only_idle)
apply (rule sts_st_tcb_at_cases, simp)
apply (strengthen reply_cap_doesnt_exist_strg)
apply (auto simp: valid_tcb_state_def idle_no_ex_cap o_def if_split_asm
elim!: rsubst[where P=sym_refs] st_tcb_weakenE)
done
lemma ep_no_bound_refs:
"ep_at p s \<Longrightarrow> {r \<in> state_refs_of s p. snd r = TCBBound} = {}"
by (clarsimp simp: obj_at_def state_refs_of_def refs_of_def is_ep ep_q_refs_of_def split: endpoint.splits)
lemma obj_at_conj_distrib:
"obj_at (\<lambda>ko. P ko \<and> Q ko) p s \<Longrightarrow> obj_at (\<lambda>ko. P ko) p s \<and> obj_at (\<lambda>ko. Q ko) p s"
by (auto simp:obj_at_def)
lemma ep_q_refs_of_no_ntfn_bound:
"(x, tp) \<in> ep_q_refs_of ep \<Longrightarrow> tp \<noteq> NTFNBound"
by (auto simp: ep_q_refs_of_def split:endpoint.splits)
lemma ep_q_refs_no_NTFNBound:
"(x, NTFNBound) \<notin> ep_q_refs_of ep"
by (clarsimp simp: ep_q_refs_of_def split:endpoint.splits)
lemma ep_list_tcb_at:
"\<lbrakk>ep_at p s; valid_objs s; state_refs_of s p = set q \<times> {k}; x \<in> set q \<rbrakk> \<Longrightarrow> tcb_at x s"
apply (erule (1) obj_at_valid_objsE)
apply (fastforce simp:is_ep valid_obj_def valid_ep_def state_refs_of_def split:endpoint.splits)
done
lemma tcb_at_no_ntfn_bound:
"\<lbrakk> valid_objs s; tcb_at x s \<rbrakk> \<Longrightarrow> (t, NTFNBound) \<notin> state_refs_of s x"
by (auto elim!: obj_at_valid_objsE
simp: state_refs_of_def is_tcb valid_obj_def tcb_bound_refs_def tcb_st_refs_of_def
split:thread_state.splits option.splits)
lemma ep_no_ntfn_bound:
"\<lbrakk>is_ep ko; refs_of ko = set q \<times> {NTFNBound}; y \<in> set q \<rbrakk> \<Longrightarrow> False"
apply (subst (asm) set_eq_iff)
apply (drule_tac x="(y, NTFNBound)" in spec)
apply (clarsimp simp: refs_of_rev is_ep)
done
lemma cancel_all_ipc_invs_helper:
assumes x: "\<And>x ko. (x, symreftype k) \<in> refs_of ko
\<Longrightarrow> (refs_of ko = {(x, symreftype k)} \<or>
(\<exists>y. refs_of ko = {(x, symreftype k), (y, TCBBound)}))"
shows
"\<lbrace>invs and obj_at (\<lambda>ko. is_ep ko \<and> refs_of ko = set q \<times> {k}) p\<rbrace>
do y \<leftarrow> set_endpoint p Structures_A.endpoint.IdleEP;
y \<leftarrow> mapM_x (\<lambda>t. do y \<leftarrow> set_thread_state t Structures_A.thread_state.Restart;
do_extended_op (tcb_sched_action (tcb_sched_enqueue) t) od) q;
do_extended_op reschedule_required
od \<lbrace>\<lambda>rv. invs\<rbrace>"
apply (subst bind_assoc[symmetric])
apply (rule hoare_seq_ext)
apply wp
apply simp
apply (rule hoare_pre)
apply (wp cancel_all_invs_helper hoare_vcg_const_Ball_lift valid_irq_node_typ valid_ioports_lift)
apply (clarsimp simp: invs_def valid_state_def valid_pspace_def valid_ep_def live_def)
apply (rule conjI)
apply (fastforce simp: live_def is_ep_def elim!: obj_at_weakenE split: kernel_object.splits)
apply (rule conjI)
apply clarsimp
apply (drule(1) sym_refs_obj_atD, clarsimp)
apply (drule(1) bspec, erule(1) if_live_then_nonz_capD)
apply (rule refs_of_live, clarsimp)
apply (rule conjI[rotated])
apply (subgoal_tac "\<exists>ep. ko_at (Endpoint ep) p s", clarsimp)
apply (subgoal_tac "\<exists>rt. (x, rt) \<in> ep_q_refs_of ep", clarsimp)
apply (fastforce elim!: ep_queued_st_tcb_at)
apply (clarsimp simp: obj_at_def is_ep_def)+
apply (case_tac ko, simp_all)
apply (frule(1) sym_refs_obj_atD)
apply (frule obj_at_state_refs_ofD)
apply (clarsimp dest!:obj_at_conj_distrib)
apply (thin_tac "obj_at (\<lambda>ko. refs_of ko = set q \<times> {k}) p s")
apply (erule delta_sym_refs)
apply (clarsimp simp: if_split_asm)+
apply (safe)
apply (fastforce dest!:symreftype_inverse' ep_no_ntfn_bound)
apply (clarsimp dest!: symreftype_inverse')
apply (frule (3) ep_list_tcb_at)
apply (frule_tac t=y in tcb_at_no_ntfn_bound, simp+)[1]
apply simp
subgoal
apply (clarsimp dest!: symreftype_inverse')
apply (frule (3) ep_list_tcb_at)
by (clarsimp simp: obj_at_def is_tcb is_ep)
apply (fastforce dest!: obj_at_state_refs_ofD x)
apply (fastforce dest!: obj_at_state_refs_ofD x)
apply (fastforce dest!: symreftype_inverse' ep_no_ntfn_bound)
apply (clarsimp)
apply (fastforce dest!: symreftype_inverse' ep_no_ntfn_bound)
done
lemma cancel_all_ipc_invs:
"\<lbrace>invs\<rbrace> cancel_all_ipc epptr \<lbrace>\<lambda>rv. invs\<rbrace>"
apply (simp add: cancel_all_ipc_def)
apply (rule hoare_seq_ext [OF _ get_simple_ko_sp])
apply (case_tac ep, simp_all add: get_ep_queue_def)
apply (wp, fastforce)
apply (rule hoare_pre, rule cancel_all_ipc_invs_helper[where k=EPSend])
apply (fastforce simp: refs_of_rev tcb_bound_refs_def split: option.splits)
apply (clarsimp elim!: obj_at_weakenE simp: is_ep)
apply (rule hoare_pre, rule cancel_all_ipc_invs_helper[where k=EPRecv])
apply (fastforce simp: refs_of_rev tcb_bound_refs_def split: option.splits)
apply (clarsimp elim!: obj_at_weakenE simp: is_ep)
done
lemma ntfn_q_refs_no_NTFNBound:
"(x, NTFNBound) \<notin> ntfn_q_refs_of ntfn"
by (auto simp: ntfn_q_refs_of_def split:ntfn.splits)
lemma ntfn_q_refs_no_TCBBound:
"(x, TCBBound) \<notin> ntfn_q_refs_of ntfn"
by (auto simp: ntfn_q_refs_of_def split:ntfn.splits)
lemma ntfn_bound_tcb_get_set[simp]:
"ntfn_bound_tcb (ntfn_set_bound_tcb ntfn ntfn') = ntfn'"
by auto
lemma ntfn_obj_tcb_get_set[simp]:
"ntfn_obj (ntfn_set_bound_tcb ntfn ntfn') = ntfn_obj ntfn"
by auto
lemma valid_ntfn_set_bound_None:
"valid_ntfn ntfn s \<Longrightarrow> valid_ntfn (ntfn_set_bound_tcb ntfn None) s"
by (auto simp: valid_ntfn_def split:ntfn.splits)
lemma ntfn_bound_tcb_at:
"\<lbrakk>sym_refs (state_refs_of s); valid_objs s; kheap s ntfnptr = Some (Notification ntfn);
ntfn_bound_tcb ntfn = Some tcbptr; P (Some ntfnptr)\<rbrakk>
\<Longrightarrow> bound_tcb_at P tcbptr s"
apply (drule_tac x=ntfnptr in sym_refsD[rotated])
apply (fastforce simp: state_refs_of_def)
apply (fastforce simp: pred_tcb_at_def obj_at_def valid_obj_def valid_ntfn_def is_tcb
state_refs_of_def refs_of_rev
simp del: refs_of_simps)
done
lemma bound_tcb_bound_notification_at:
"\<lbrakk>sym_refs (state_refs_of s); valid_objs s; kheap s ntfnptr = Some (Notification ntfn);
bound_tcb_at (\<lambda>ptr. ptr = (Some ntfnptr)) tcbptr s \<rbrakk>
\<Longrightarrow> ntfn_bound_tcb ntfn = Some tcbptr"
apply (drule_tac x=tcbptr in sym_refsD[rotated])
apply (fastforce simp: state_refs_of_def pred_tcb_at_def obj_at_def)
apply (auto simp: pred_tcb_at_def obj_at_def valid_obj_def valid_ntfn_def is_tcb
state_refs_of_def refs_of_rev
simp del: refs_of_simps)
done
lemma unbind_notification_invs:
shows "\<lbrace>invs\<rbrace> unbind_notification t \<lbrace>\<lambda>rv. invs\<rbrace>"
apply (simp add: unbind_notification_def invs_def valid_state_def valid_pspace_def)
apply (rule hoare_seq_ext [OF _ gbn_sp])
apply (case_tac ntfnptr, clarsimp, wp, simp)
apply clarsimp
apply (rule hoare_seq_ext [OF _ get_simple_ko_sp])
apply (wp valid_irq_node_typ set_simple_ko_valid_objs valid_ioports_lift
| clarsimp split del: if_split)+
apply (intro conjI impI;
(match conclusion in "sym_refs r" for r \<Rightarrow> \<open>-\<close>
| auto elim!: obj_at_weakenE obj_at_valid_objsE if_live_then_nonz_capD2
simp: live_def valid_ntfn_set_bound_None is_ntfn valid_obj_def
)?)
apply (clarsimp simp: if_split)
apply (rule delta_sym_refs, assumption)
apply (fastforce simp: obj_at_def is_tcb
dest!: pred_tcb_at_tcb_at ko_at_state_refs_ofD
split: if_split_asm)
apply (clarsimp split: if_split_asm)
apply (frule pred_tcb_at_tcb_at)
apply (frule_tac p=t in obj_at_ko_at, clarsimp)
apply (subst (asm) ko_at_state_refs_ofD, assumption)
apply (fastforce simp: obj_at_def is_tcb ntfn_q_refs_no_NTFNBound tcb_at_no_ntfn_bound refs_of_rev
tcb_ntfn_is_bound_def
dest!: pred_tcb_at_tcb_at bound_tcb_at_state_refs_ofD)
apply (subst (asm) ko_at_state_refs_ofD, assumption)
apply (fastforce simp: ntfn_bound_refs_def obj_at_def ntfn_q_refs_no_TCBBound
elim!: pred_tcb_weakenE
dest!: bound_tcb_bound_notification_at refs_in_ntfn_bound_refs symreftype_inverse'
split: option.splits)
done
crunch bound_tcb_at[wp]: cancel_all_signals "bound_tcb_at P t"
(wp: mapM_x_wp_inv)
lemma waiting_ntfn_list_tcb_at:
"\<lbrakk>valid_objs s; ko_at (Notification ntfn) ntfnptr s; ntfn_obj ntfn = WaitingNtfn x\<rbrakk> \<Longrightarrow> \<forall>t \<in> set x. tcb_at t s"
by (fastforce elim!: obj_at_valid_objsE simp:valid_obj_def valid_ntfn_def)
lemma tcb_at_ko_at:
"tcb_at p s \<Longrightarrow> \<exists>tcb. ko_at (TCB tcb) p s"
by (fastforce simp: obj_at_def is_tcb)
lemma tcb_state_refs_no_tcb:
"\<lbrakk>valid_objs s; tcb_at y s; (x, ref) \<in> state_refs_of s y\<rbrakk> \<Longrightarrow> ~ tcb_at x s"
apply (clarsimp simp: ko_at_state_refs_ofD dest!: tcb_at_ko_at)
apply (erule (1) obj_at_valid_objsE)
apply (clarsimp simp: is_tcb valid_obj_def)
apply (erule disjE)
apply (clarsimp simp: tcb_st_refs_of_def valid_tcb_def valid_tcb_state_def obj_at_def is_ep is_ntfn
split:thread_state.splits)
apply (clarsimp simp: tcb_bound_refs_def valid_tcb_def obj_at_def is_ntfn
split:option.splits)
done
lemma cancel_all_signals_invs:
"\<lbrace>invs\<rbrace> cancel_all_signals ntfnptr \<lbrace>\<lambda>rv. invs\<rbrace>"
apply (simp add: cancel_all_signals_def)
apply (rule hoare_seq_ext [OF _ get_simple_ko_sp])
apply (rule hoare_pre)
apply (wp cancel_all_invs_helper set_simple_ko_valid_objs valid_irq_node_typ
hoare_vcg_const_Ball_lift valid_ioports_lift
| wpc
| simp add: live_def)+
apply (clarsimp simp: invs_def valid_state_def valid_pspace_def)
apply (rule conjI)
apply (fastforce simp: valid_obj_def valid_ntfn_def elim!: obj_at_valid_objsE)
apply (rule conjI)
apply (fastforce simp: live_def elim!: if_live_then_nonz_capD)
apply (rule conjI)
apply (fastforce simp: is_ntfn elim!: ko_at_weakenE)
apply (rule conjI)
apply (fastforce simp: st_tcb_at_refs_of_rev
dest: bspec sym_refs_ko_atD
elim: st_tcb_ex_cap)
apply (rule conjI[rotated])
apply (fastforce elim!: ntfn_queued_st_tcb_at)
apply (rule delta_sym_refs, assumption)
apply (fastforce dest!: refs_in_ntfn_bound_refs ko_at_state_refs_ofD
split: if_split_asm)
apply (clarsimp split:if_split_asm)
apply (fastforce dest: waiting_ntfn_list_tcb_at refs_in_ntfn_bound_refs
simp: obj_at_def is_tcb_def)
apply (rule conjI)
apply (fastforce dest: refs_in_ntfn_bound_refs symreftype_inverse')
apply (frule (2) waiting_ntfn_list_tcb_at)
apply (fastforce simp: st_tcb_at_refs_of_rev refs_in_tcb_bound_refs
dest: bspec sym_refs_ko_atD st_tcb_at_state_refs_ofD)
apply (fastforce simp: ntfn_bound_refs_def valid_obj_def valid_ntfn_def refs_of_rev
dest!: symreftype_inverse' ko_at_state_refs_ofD
split: option.splits
elim!: obj_at_valid_objsE)
done
lemma cancel_all_unlive_helper:
"\<lbrace>obj_at (\<lambda>obj. \<not> live obj \<and> (\<forall>tcb. obj \<noteq> TCB tcb)) ptr\<rbrace>
mapM_x (\<lambda>t. do y \<leftarrow> set_thread_state t Structures_A.Restart;
do_extended_op (tcb_sched_enqueue_ext t) od) q
\<lbrace>\<lambda>rv. obj_at (Not \<circ> live) ptr\<rbrace>"
apply (rule hoare_strengthen_post [OF mapM_x_wp'])
apply (simp add: set_thread_state_def set_object_def get_object_def)
apply (wp | simp only: obj_at_exst_update)+
apply (clarsimp dest!: get_tcb_SomeD)
apply (clarsimp simp: obj_at_def)
apply (clarsimp elim!: obj_at_weakenE)
done
lemma cancel_all_ipc_unlive[wp]:
"\<lbrace>\<top>\<rbrace> cancel_all_ipc ptr \<lbrace>\<lambda> rv. obj_at (Not \<circ> live) ptr\<rbrace>"
apply (simp add: cancel_all_ipc_def)
apply (rule hoare_seq_ext [OF _ get_simple_ko_sp])
apply (case_tac ep, simp_all add: set_simple_ko_def get_ep_queue_def)
apply wp
apply (clarsimp simp: live_def elim!: obj_at_weakenE)
apply (wp cancel_all_unlive_helper set_object_at_obj3 | simp only: obj_at_exst_update)+
apply (clarsimp simp: live_def)
apply (wp cancel_all_unlive_helper set_object_at_obj3 | simp only: obj_at_exst_update)+
apply (clarsimp simp: live_def)
done
(* This lemma should be sufficient provided that each notification object is unbound BEFORE the 'cancel_all_signals' function is invoked. TODO: Check the abstract specification and the C and Haskell implementations that this is indeed the case. *)
lemma cancel_all_signals_unlive[wp]:
"\<lbrace>\<lambda>s. obj_at (\<lambda>ko. \<exists>ntfn. ko = Notification ntfn \<and> ntfn_bound_tcb ntfn = None) ntfnptr s\<rbrace>
cancel_all_signals ntfnptr
\<lbrace>\<lambda> rv. obj_at (Not \<circ> live) ntfnptr\<rbrace>"
apply (simp add: cancel_all_signals_def)
apply (rule hoare_seq_ext [OF _ get_simple_ko_sp])
apply (rule hoare_pre)
apply (wp
| wpc
| simp add: unbind_maybe_notification_def)+
apply (rule_tac Q="\<lambda>_. obj_at (is_ntfn and Not \<circ> live) ntfnptr" in hoare_post_imp)
apply (fastforce elim: obj_at_weakenE)
apply (wp mapM_x_wp' sts_obj_at_impossible
| simp add: is_ntfn)+
apply (simp add: set_simple_ko_def)
apply (wp get_object_wp obj_set_prop_at)
apply (auto simp: live_def pred_tcb_at_def obj_at_def)
done
crunch cte_wp_at[wp]: cancel_all_ipc "cte_wp_at P p"
(wp: crunch_wps mapM_x_wp)
crunch cte_wp_at[wp]: cancel_all_signals "cte_wp_at P p"
(wp: crunch_wps mapM_x_wp thread_set_cte_wp_at_trivial
simp: tcb_cap_cases_def)
lemma cancel_badged_sends_filterM_helper':
"\<forall>ys.
\<lbrace>\<lambda>s. all_invs_but_sym_refs s \<and> sym_refs (state_hyp_refs_of s) \<and> distinct (xs @ ys) \<and> ep_at epptr s
\<and> ex_nonz_cap_to epptr s
\<and> sym_refs ((state_refs_of s) (epptr := ((set (xs @ ys)) \<times> {EPSend})))
\<and> (\<forall>x \<in> set (xs @ ys). {r \<in> state_refs_of s x. snd r \<noteq> TCBBound} = {(epptr, TCBBlockedSend)})\<rbrace>
filterM (\<lambda>t. do st \<leftarrow> get_thread_state t;
if blocking_ipc_badge st = badge
then do y \<leftarrow> set_thread_state t Structures_A.thread_state.Restart;
y \<leftarrow> do_extended_op (tcb_sched_action action t);
return False
od
else return True
od) xs
\<lbrace>\<lambda>rv s. all_invs_but_sym_refs s \<and> sym_refs (state_hyp_refs_of s)
\<and> ep_at epptr s \<and> (\<forall>x \<in> set (xs @ ys). tcb_at x s)
\<and> ex_nonz_cap_to epptr s
\<and> (\<forall>y \<in> set ys. {r \<in> state_refs_of s y. snd r \<noteq> TCBBound} = {(epptr, TCBBlockedSend)})
\<and> distinct rv \<and> distinct (xs @ ys) \<and> (set rv \<subseteq> set xs)
\<and> sym_refs ((state_refs_of s) (epptr := ((set rv \<union> set ys) \<times> {EPSend})))\<rbrace>"
apply (rule rev_induct[where xs=xs])
apply (rule allI, simp)
apply wp
apply clarsimp
apply (drule(1) bspec, drule singleton_eqD, clarsimp, drule state_refs_of_elemD)
apply (clarsimp simp: st_tcb_at_refs_of_rev pred_tcb_at_def is_tcb
elim!: obj_at_weakenE)
apply (clarsimp simp: filterM_append bind_assoc simp del: set_append distinct_append)
apply (drule spec, erule hoare_seq_ext[rotated])
apply (rule hoare_seq_ext [OF _ gts_sp])
apply (rule hoare_pre,
wpsimp wp: valid_irq_node_typ sts_only_idle hoare_vcg_const_Ball_lift)
apply (clarsimp simp: valid_tcb_state_def)
apply (rule conjI[rotated])
apply blast
apply clarsimp
apply (thin_tac "obj_at f epptr s" for f s)
apply (thin_tac "tcb_at x s" for x s)
apply (thin_tac "sym_refs (state_hyp_refs_of s)" for s)
apply (frule singleton_eqD, clarify, drule state_refs_of_elemD)
apply (frule(1) if_live_then_nonz_capD)
apply (rule refs_of_live, clarsimp)
apply (clarsimp simp: st_tcb_at_refs_of_rev)
apply (clarsimp simp: pred_tcb_def2 valid_idle_def)
apply (rule conjI, clarsimp)
apply (rule conjI, clarsimp)
apply (drule(1) valid_reply_capsD, clarsimp simp: st_tcb_def2)
apply (rule conjI, blast)
apply (rule conjI, blast)
apply (erule delta_sym_refs)
apply (auto dest!: get_tcb_ko_atD ko_at_state_refs_ofD symreftype_inverse'
refs_in_tcb_bound_refs
split: if_split_asm)[2]
done
lemmas cancel_badged_sends_filterM_helper
= spec [where x=Nil, OF cancel_badged_sends_filterM_helper', simplified]
lemma cancel_badged_sends_invs_helper:
"{r. snd r \<noteq> TCBBound \<and>
(r \<in> tcb_st_refs_of ts \<or> r \<in> tcb_bound_refs ntfnptr)} =
tcb_st_refs_of ts"
by (auto simp add: tcb_st_refs_of_def tcb_bound_refs_def split: thread_state.splits option.splits)
lemma cancel_badged_sends_invs[wp]:
"\<lbrace>invs\<rbrace> cancel_badged_sends epptr badge \<lbrace>\<lambda>rv. invs\<rbrace>"
apply (simp add: cancel_badged_sends_def)
apply (rule hoare_seq_ext [OF _ get_simple_ko_sp])
apply (case_tac ep; simp)
apply wpsimp
apply (simp add: invs_def valid_state_def valid_pspace_def)
apply (wpsimp wp: valid_irq_node_typ valid_ioports_lift)
apply (simp add: fun_upd_def[symmetric] ep_redux_simps ep_at_def2[symmetric, simplified]
cong: list.case_cong)
apply (rule hoare_strengthen_post,
rule cancel_badged_sends_filterM_helper[where epptr=epptr])
apply (auto intro:obj_at_weakenE)[1]
apply (wpsimp wp: valid_irq_node_typ set_endpoint_ep_at valid_ioports_lift)
apply (clarsimp simp: valid_ep_def conj_comms)
apply (subst obj_at_weakenE, simp, fastforce)
apply (clarsimp simp: is_ep_def)
apply (frule(1) sym_refs_ko_atD, clarsimp)
apply (frule(1) if_live_then_nonz_capD, (clarsimp simp: live_def)+)
apply (erule(1) obj_at_valid_objsE)
apply (clarsimp simp: valid_obj_def valid_ep_def st_tcb_at_refs_of_rev)
apply (simp add: fun_upd_idem obj_at_def is_ep_def | subst fun_upd_def[symmetric])+
apply (clarsimp, drule(1) bspec)
apply (drule st_tcb_at_state_refs_ofD)
apply (clarsimp simp only: cancel_badged_sends_invs_helper Un_iff, clarsimp)
apply (simp add: set_eq_subset)
apply wpsimp
done
(* FIXME rule_format? *)
lemma real_cte_emptyable_strg:
"real_cte_at p s \<longrightarrow> emptyable p s"
by (clarsimp simp: emptyable_def obj_at_def is_tcb is_cap_table)
end
|
{-# LANGUAGE FlexibleContexts #-}
module WeightDecay
( input
, testdata
, traindata
, testY
, testX
, trainLinRegUnkownTarget
, trainAndTestWithData
, trainAndTestWithRegularization
, makeTransformedMatrix
, listToTuple'
) where
import Data.Maybe
import LinearRegression
import Network.HTTP
import NonlinearTransform
import Numeric.LinearAlgebra.Data
import Numeric.LinearAlgebra.HMatrix as HMatrix
--------------------------------------------------------------
-- Solutions to homework 6 of "Learning from data" question 5-6
--------------------------------------------------------------
input http = do
response <- simpleHTTP $ getRequest http
let body = fmap rspBody response
let rows = fmap (map (map readDouble . words) . lines) body
return rows
readDouble :: String -> Double
readDouble = read
listToTuple' :: [a] -> Maybe (a, a)
listToTuple' (x:y:xs) = Just (x, y)
listToTuple' _ = Nothing
-- | downloads the test data
testdata = input "http://work.caltech.edu/data/out.dta"
-- | downloads the training data
traindata = input "http://work.caltech.edu/data/in.dta"
testY = fmap (fmap makeVectorY) testdata
where
makeVectorY lists = vector $ map last lists
testX = makeDataMatrix testdata
trainY = fmap (fmap makeVectorY) traindata
where
makeVectorY lists = vector $ map last lists
trainX = makeDataMatrix traindata
makeLabelVector inputdata = fmap (fmap makeVectorY) inputdata
where
makeVectorY lists = vector $ map last lists
makeDataMatrix inputdata = fmap (fmap makeMatrixX) inputdata
where
makeMatrixX lists = matrix 3 $ concatMap xvectors lists
xvectors list = 1 : init list
makeTransformedMatrix2 inputdata = fmap (fmap makeMatrixX) inputdata
where
makeMatrixX lists =
createTransformedX2 $ map (fromMaybe (0, 0) . listToTuple') lists
makeTransformedMatrix k inputdata = fmap (fmap makeMatrixX) inputdata
where
makeMatrixX lists =
createTransformedXk k $ map (fromMaybe (0, 0) . listToTuple') lists
trainXTransformed2 = makeTransformedMatrix2 traindata
testXTransformed2 = makeTransformedMatrix2 testdata
-- | Trains linear Regression from the data on the course website
trainLinRegUnkownTarget = do
matrixXTrain <- trainXTransformed2
vectorYTrain <- trainY
let weights = linearRegressionWeight <$> matrixXTrain <*> vectorYTrain
let inSampleError =
linRegClassificationError <$> matrixXTrain <*> vectorYTrain <*> weights
return (weights, inSampleError)
-- | Tests linear Regression from the data on the course website
testLinRegUnkownTarget weights = do
matrixXTest <- testXTransformed2
vectorYTest <- testY
let outOfSampleError =
linRegClassificationError <$> matrixXTest <*> vectorYTest <*> weights
return outOfSampleError
-- | solution for question 2: train and test with given data, with nonlinear transformation but without regularization and print the result
trainAndTestWithData :: IO ()
trainAndTestWithData = do
(weights, inSampleError) <- trainLinRegUnkownTarget
outOfSampleError <- testLinRegUnkownTarget weights
putStr "In Sample Error: "
print inSampleError
putStr "Out of Sample Error: "
print outOfSampleError
-- | returns the weights determined by linear regression on the training data with regularization
linearRegressionWeightReg :: R -> Matrix R -> Vector R -> Vector R
linearRegressionWeightReg regParam matrixOfInputData vectorOfLabels =
inv
(tr matrixOfInputData HMatrix.<> matrixOfInputData +
scalar regParam * (ident $fst $size $tr matrixOfInputData)) HMatrix.#>
(tr matrixOfInputData #> vectorOfLabels)
-- | Trains linear Regression from the data on the course website with regularization
trainLinRegWithRegularization regParam = do
matrixXTrain <- trainXTransformed2
vectorYTrain <- trainY
let weights =
linearRegressionWeightReg regParam <$> matrixXTrain <*> vectorYTrain
let inSampleError =
linRegClassificationError <$> matrixXTrain <*> vectorYTrain <*> weights
return (weights, inSampleError)
-- | performs training and testing with the regularization parameter provided as an input and prints the in-sample and out-of-sample error
trainAndTestWithRegularization :: R -> IO ()
trainAndTestWithRegularization regParam = do
(weights, inSampleError) <- trainLinRegWithRegularization regParam
outOfSampleError <- testLinRegUnkownTarget weights
putStr "In Sample Error: "
print inSampleError
putStr "Out of Sample Error: "
print outOfSampleError
|
function res = isfield(this, varargin)
% Returns true if the string fieldname is the name of a field in the
% substructure 'other' in the meeg object 'this'.
% FORMAT res = isfield(this,fieldname)
%
% An overloaded function...
% _______________________________________________________________________
% Copyright (C) 2008 Wellcome Trust Centre for Neuroimaging
% Christophe Phillips
% $Id: isfield.m 2720 2009-02-09 19:50:46Z vladimir $
res = isfield(this.other, varargin{:}); |
State Before: G : Type u_1
inst✝ : Group G
H K : Subgroup G
S T : Set G
f : Quotient (QuotientGroup.rightRel H) → G
hf : ∀ (q : Quotient (QuotientGroup.rightRel H)), Quotient.mk'' (f q) = q
q : Quotient (QuotientGroup.rightRel H)
⊢ ↑(↑(toEquiv (_ : (Set.range fun q => f q) ∈ rightTransversals ↑H)) q) = f q State After: G : Type u_1
inst✝ : Group G
H K : Subgroup G
S T : Set G
f : Quotient (QuotientGroup.rightRel H) → G
hf : ∀ (q : Quotient (QuotientGroup.rightRel H)), Quotient.mk'' (f q) = q
q : Quotient (QuotientGroup.rightRel H)
⊢ ↑(toEquiv (_ : (Set.range fun q => f q) ∈ rightTransversals ↑H)) q =
{ val := f q, property := (_ : ∃ y, (fun q => f q) y = f q) } Tactic: refine' (Subtype.ext_iff.mp _).trans (Subtype.coe_mk (f q) ⟨q, rfl⟩) State Before: G : Type u_1
inst✝ : Group G
H K : Subgroup G
S T : Set G
f : Quotient (QuotientGroup.rightRel H) → G
hf : ∀ (q : Quotient (QuotientGroup.rightRel H)), Quotient.mk'' (f q) = q
q : Quotient (QuotientGroup.rightRel H)
⊢ ↑(toEquiv (_ : (Set.range fun q => f q) ∈ rightTransversals ↑H)) q =
{ val := f q, property := (_ : ∃ y, (fun q => f q) y = f q) } State After: no goals Tactic: exact (toEquiv (range_mem_rightTransversals hf)).apply_eq_iff_eq_symm_apply.mpr (hf q).symm |
context("Does a string have ANSI style?")
op <- options()
on.exit(options(op))
options(crayon.enabled = TRUE)
test_that("has_style works", {
expect_false(has_style("foobar"))
for (st in names(styles)) {
expect_true(has_style(style("foobar", st)))
}
})
context("Strip style from string")
test_that("strip_style works", {
expect_equal("", strip_style(""))
expect_equal("foobar", strip_style("foobar"))
expect_equal("foobar", strip_style(red$underline$bold("foobar")))
for (st in names(styles)) {
expect_equal("foobar", strip_style(style("foobar", st)))
}
})
|
[STATEMENT]
lemma analz_Decrypt' [dest]:
"[| Crypt K X \<in> analz H; Key K \<in> analz H |] ==> X \<in> analz H"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>Crypt K X \<in> analz H; Key K \<in> analz H\<rbrakk> \<Longrightarrow> X \<in> analz H
[PROOF STEP]
by auto |
-- You can use _ in a binding position in notation.
module WildcardNotation where
data Σ (A : Set) (B : A → Set) : Set where
_,_ : ∀ x → B x → Σ A B
syntax Σ A (λ _ → B) = A × B
swap : ∀ {A B} → A × B → B × A
swap (x , y) = y , x
syntax compose (λ _ → x) (λ _ → y) = x instead-of y
compose : {A B C : Set} → (B → C) → (A → B) → (A → C)
compose f g = λ x → f (g x)
open import Common.Prelude
open import Common.Equality
thm₁ : swap (1 , 2) ≡ (2 , 1)
thm₁ = refl
thm₂ : (a b : Nat) → (5 instead-of a) b ≡ 5
thm₂ a b = refl
|
module Human.IO where
postulate IO : ∀ {a} → Set a → Set a
{-# BUILTIN IO IO #-}
|
Formal statement is: lemma dim_substandard: assumes d: "d \<subseteq> Basis" shows "dim {x::'a::euclidean_space. \<forall>i\<in>Basis. i \<notin> d \<longrightarrow> x\<bullet>i = 0} = card d" (is "dim ?A = _") Informal statement is: The dimension of the set of points in $\mathbb{R}^n$ that are orthogonal to a given set of $d$ linearly independent vectors is $d$. |
Require Import Essentials.Notations.
Require Import Essentials.Types.
Require Import Essentials.Facts_Tactics.
Require Import Category.Main.
Require Import Functor.Main.
Require Import Cat.Cat.
Require Import Basic_Cons.Terminal.
Require Import Archetypal.Discr.Discr.
Require Import NatTrans.NatTrans NatTrans.NatIso.
(** The unique functor to the terminal category. *)
Program Definition Functor_To_1_Cat (C' : Category) : (C' –≻ 1)%functor :=
{|
FO := fun x => tt;
FA := fun a b f => tt;
F_id := fun _ => eq_refl;
F_compose := fun _ _ _ _ _ => eq_refl
|}.
(** Terminal category. *)
Program Instance Cat_Term : Terminal Cat :=
{
terminal := 1%category;
t_morph := fun x => Functor_To_1_Cat x
}.
Next Obligation. (* t_morph_unique *)
Proof.
Func_eq_simpl;
FunExt;
match goal with
[|- ?A = ?B] =>
destruct A;
destruct B end;
trivial.
Qed.
(** A functor from terminal category maps all arrows (any arrow is just the identity)
to the identity arrow. *)
Section From_Term_Cat.
Context {C : Category} (F : (1 –≻ C)%functor).
Theorem From_Term_Cat : ∀ h, (F @_a tt tt h)%morphism = id.
Proof.
destruct h.
change tt with (id 1 tt).
apply F_id.
Qed.
End From_Term_Cat.
(** Any two functors from a category to the terminal categoy are naturally isomorphic. *)
Program Definition Functor_To_1_Cat_Iso
{C : Category}
(F F' : (C –≻ 1)%functor)
: (F ≃ F')%natiso :=
{|
iso_morphism :=
{|
Trans := fun _ => tt
|};
inverse_morphism :=
{|
Trans := fun _ => tt
|}
|}. |
-- Code modified from Alex Zhukovsky's one from DepTyp telegram chat (https://t.me/c/1062361327/39965)
-- Code modifiation is mostly a further usage of function extensionality.
record State s a where
constructor MkState
runState : s -> (a,s)
funext : {0 f, g : a -> b} -> ((x : a) -> f x = g x) -> f = g
funext = believe_me
injectiveProjections : (0 ab : (a,b)) -> (fst ab, snd ab) = ab
injectiveProjections {ab=(x,y)} = Refl
stateMap : (a -> b) -> (s -> (a, s)) -> s -> (b, s)
stateMap f = (mapFst f .)
stateMapIdIsId : stateMap Prelude.id rs = rs
stateMapIdIsId = funext \x => rewrite sym $ injectiveProjections $ rs x in Refl
Functor (State s) where
map f (MkState rs) = MkState $ stateMap f rs
interface Functor f => VerifiedFunctor (0 f : Type -> Type) where
functorIdentity : {0 a : Type} -> map {f} {a} Prelude.id = Prelude.id
VerifiedFunctor (State s) where
functorIdentity = funext \(MkState _) => cong MkState stateMapIdIsId
|
/-
Copyright (c) 2021 Johan Commelin. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Johan Commelin
-/
import topology.subset_properties
import topology.connected
import topology.algebra.monoid
import tactic.tfae
/-!
# Locally constant functions
This file sets up the theory of locally constant function from a topological space to a type.
## Main definitions and constructions
* `is_locally_constant f` : a map `f : X → Y` where `X` is a topological space is locally
constant if every set in `Y` has an open preimage.
* `locally_constant X Y` : the type of locally constant maps from `X` to `Y`
* `locally_constant.map` : push-forward of locally constant maps
* `locally_constant.comap` : pull-back of locally constant maps
-/
variables {X Y Z α : Type*} [topological_space X]
open set filter
open_locale topological_space
/-- A function between topological spaces is locally constant if the preimage of any set is open. -/
def is_locally_constant (f : X → Y) : Prop := ∀ s : set Y, is_open (f ⁻¹' s)
namespace is_locally_constant
protected lemma tfae (f : X → Y) :
tfae [is_locally_constant f,
∀ x, ∀ᶠ x' in 𝓝 x, f x' = f x,
∀ x, is_open {x' | f x' = f x},
∀ y, is_open (f ⁻¹' {y}),
∀ x, ∃ (U : set X) (hU : is_open U) (hx : x ∈ U), ∀ x' ∈ U, f x' = f x] :=
begin
tfae_have : 1 → 4, from λ h y, h {y},
tfae_have : 4 → 3, from λ h x, h (f x),
tfae_have : 3 → 2, from λ h x, mem_nhds_sets (h x) rfl,
tfae_have : 2 → 5,
{ intros h x,
rcases mem_nhds_sets_iff.1 (h x) with ⟨U, eq, hU, hx⟩,
exact ⟨U, hU, hx, eq⟩ },
tfae_have : 5 → 1,
{ intros h s,
refine is_open_iff_forall_mem_open.2 (λ x hx, _),
rcases h x with ⟨U, hU, hxU, eq⟩,
exact ⟨U, λ x' hx', mem_preimage.2 $ (eq x' hx').symm ▸ hx, hU, hxU⟩ },
tfae_finish
end
@[nontriviality] lemma of_discrete [discrete_topology X] (f : X → Y) :
is_locally_constant f :=
λ s, is_open_discrete _
lemma is_open_fiber {f : X → Y} (hf : is_locally_constant f) (y : Y) :
is_open {x | f x = y} :=
hf {y}
lemma iff_exists_open (f : X → Y) :
is_locally_constant f ↔ ∀ x, ∃ (U : set X) (hU : is_open U) (hx : x ∈ U), ∀ x' ∈ U, f x' = f x :=
(is_locally_constant.tfae f).out 0 4
lemma iff_eventually_eq (f : X → Y) :
is_locally_constant f ↔ ∀ x, ∀ᶠ y in 𝓝 x, f y = f x :=
(is_locally_constant.tfae f).out 0 1
lemma exists_open {f : X → Y} (hf : is_locally_constant f) (x : X) :
∃ (U : set X) (hU : is_open U) (hx : x ∈ U), ∀ x' ∈ U, f x' = f x :=
(iff_exists_open f).1 hf x
protected lemma eventually_eq {f : X → Y} (hf : is_locally_constant f) (x : X) :
∀ᶠ y in 𝓝 x, f y = f x :=
(iff_eventually_eq f).1 hf x
protected lemma continuous [topological_space Y] {f : X → Y} (hf : is_locally_constant f) :
continuous f :=
⟨λ U hU, hf _⟩
lemma iff_continuous {_ : topological_space Y} [discrete_topology Y] (f : X → Y) :
is_locally_constant f ↔ continuous f :=
⟨is_locally_constant.continuous, λ h s, h.is_open_preimage s (is_open_discrete _)⟩
lemma iff_continuous_bot (f : X → Y) :
is_locally_constant f ↔ @continuous X Y _ ⊥ f :=
iff_continuous f
lemma of_constant (f : X → Y) (h : ∀ x y, f x = f y) :
is_locally_constant f :=
(iff_eventually_eq f).2 $ λ x, eventually_of_forall $ λ x', h _ _
lemma const (y : Y) : is_locally_constant (function.const X y) :=
of_constant _ $ λ _ _, rfl
lemma comp {f : X → Y} (hf : is_locally_constant f) (g : Y → Z) :
is_locally_constant (g ∘ f) :=
λ s, by { rw set.preimage_comp, exact hf _ }
lemma prod_mk {Y'} {f : X → Y} {f' : X → Y'} (hf : is_locally_constant f)
(hf' : is_locally_constant f') :
is_locally_constant (λ x, (f x, f' x)) :=
(iff_eventually_eq _).2 $ λ x, (hf.eventually_eq x).mp $ (hf'.eventually_eq x).mono $
λ x' hf' hf, prod.ext hf hf'
lemma comp₂ {Y₁ Y₂ Z : Type*} {f : X → Y₁} {g : X → Y₂}
(hf : is_locally_constant f) (hg : is_locally_constant g) (h : Y₁ → Y₂ → Z) :
is_locally_constant (λ x, h (f x) (g x)) :=
(hf.prod_mk hg).comp (λ x : Y₁ × Y₂, h x.1 x.2)
lemma comp_continuous [topological_space Y] {g : Y → Z} {f : X → Y}
(hg : is_locally_constant g) (hf : continuous f) :
is_locally_constant (g ∘ f) :=
λ s, by { rw set.preimage_comp, exact hf.is_open_preimage _ (hg _) }
/-- A locally constant function is constant on any preconnected set. -/
lemma apply_eq_of_is_preconnected {f : X → Y} (hf : is_locally_constant f)
{s : set X} (hs : is_preconnected s) {x y : X} (hx : x ∈ s) (hy : y ∈ s) :
f x = f y :=
begin
let U := f ⁻¹' {f y},
suffices : x ∉ Uᶜ, from not_not.1 this,
intro hxV,
specialize hs U Uᶜ (hf {f y}) (hf {f y}ᶜ) _ ⟨y, ⟨hy, rfl⟩⟩ ⟨x, ⟨hx, hxV⟩⟩,
{ simp only [union_compl_self, subset_univ] },
{ simpa only [inter_empty, not_nonempty_empty, inter_compl_self] using hs }
end
lemma iff_is_const [preconnected_space X] {f : X → Y} :
is_locally_constant f ↔ ∀ x y, f x = f y :=
⟨λ h x y, h.apply_eq_of_is_preconnected is_preconnected_univ trivial trivial, of_constant _⟩
lemma range_finite [compact_space X] {f : X → Y} (hf : is_locally_constant f) :
(set.range f).finite :=
begin
letI : topological_space Y := ⊥,
haveI : discrete_topology Y := ⟨rfl⟩,
rw @iff_continuous X Y ‹_› ‹_› at hf,
exact finite_of_is_compact_of_discrete _ (compact_range hf)
end
@[to_additive] lemma one [has_one Y] : is_locally_constant (1 : X → Y) := const 1
@[to_additive] lemma inv [has_inv Y] ⦃f : X → Y⦄ (hf : is_locally_constant f) :
is_locally_constant f⁻¹ :=
hf.comp (λ x, x⁻¹)
@[to_additive]
lemma mul [has_mul Y] ⦃f g : X → Y⦄ (hf : is_locally_constant f) (hg : is_locally_constant g) :
is_locally_constant (f * g) :=
hf.comp₂ hg (*)
@[to_additive]
lemma div [has_div Y] ⦃f g : X → Y⦄ (hf : is_locally_constant f) (hg : is_locally_constant g) :
is_locally_constant (f / g) :=
hf.comp₂ hg (/)
end is_locally_constant
/-- A (bundled) locally constant function from a topological space `X` to a type `Y`. -/
structure locally_constant (X Y : Type*) [topological_space X] :=
(to_fun : X → Y)
(is_locally_constant : is_locally_constant to_fun)
namespace locally_constant
instance [inhabited Y] : inhabited (locally_constant X Y) :=
⟨⟨_, is_locally_constant.const (default Y)⟩⟩
instance : has_coe_to_fun (locally_constant X Y) := ⟨_, locally_constant.to_fun⟩
initialize_simps_projections locally_constant (to_fun → apply)
@[simp] lemma to_fun_eq_coe (f : locally_constant X Y) : f.to_fun = f := rfl
@[simp] lemma coe_mk (f : X → Y) (h) : ⇑(⟨f, h⟩ : locally_constant X Y) = f := rfl
theorem congr_fun {f g : locally_constant X Y} (h : f = g) (x : X) : f x = g x :=
congr_arg (λ h : locally_constant X Y, h x) h
theorem congr_arg (f : locally_constant X Y) {x y : X} (h : x = y) : f x = f y :=
congr_arg (λ x : X, f x) h
theorem coe_injective : function.injective (λ (f : locally_constant X Y) (x : X), f x)
| ⟨f, hf⟩ ⟨g, hg⟩ h := have f = g, from h, by subst f
@[simp, norm_cast] theorem coe_inj {f g : locally_constant X Y} : (f : X → Y) = g ↔ f = g :=
coe_injective.eq_iff
@[ext] theorem ext ⦃f g : locally_constant X Y⦄ (h : ∀ x, f x = g x) : f = g :=
coe_injective (funext h)
theorem ext_iff {f g : locally_constant X Y} : f = g ↔ ∀ x, f x = g x :=
⟨λ h x, h ▸ rfl, λ h, ext h⟩
protected lemma continuous [topological_space Y] (f : locally_constant X Y) : continuous f :=
f.is_locally_constant.continuous
/-- The constant locally constant function on `X` with value `y : Y`. -/
def const (X : Type*) {Y : Type*} [topological_space X] (y : Y) :
locally_constant X Y :=
⟨function.const X y, is_locally_constant.const _⟩
lemma range_finite [compact_space X] (f : locally_constant X Y) :
(set.range f).finite :=
f.is_locally_constant.range_finite
lemma apply_eq_of_is_preconnected (f : locally_constant X Y) {s : set X} (hs : is_preconnected s)
{x y : X} (hx : x ∈ s) (hy : y ∈ s) :
f x = f y :=
f.is_locally_constant.apply_eq_of_is_preconnected hs hx hy
lemma apply_eq_of_preconnected_space [preconnected_space X] (f : locally_constant X Y) (x y : X) :
f x = f y :=
f.is_locally_constant.apply_eq_of_is_preconnected is_preconnected_univ trivial trivial
lemma eq_const [preconnected_space X] (f : locally_constant X Y) (x : X) :
f = const X (f x) :=
ext $ λ y, apply_eq_of_preconnected_space f _ _
lemma exists_eq_const [preconnected_space X] [nonempty Y] (f : locally_constant X Y) :
∃ y, f = const X y :=
begin
rcases classical.em (nonempty X) with ⟨⟨x⟩⟩|hX,
{ exact ⟨f x, f.eq_const x⟩ },
{ exact ⟨classical.arbitrary Y, ext $ λ x, (hX ⟨x⟩).elim⟩ }
end
/-- Push forward of locally constant maps under any map, by post-composition. -/
def map (f : Y → Z) : locally_constant X Y → locally_constant X Z :=
λ g, ⟨f ∘ g, λ s, by { rw set.preimage_comp, apply g.is_locally_constant }⟩
@[simp] lemma map_apply (f : Y → Z) (g : locally_constant X Y) : ⇑(map f g) = f ∘ g := rfl
@[simp] lemma map_id : @map X Y Y _ id = id := by { ext, refl }
@[simp] lemma map_comp {Y₁ Y₂ Y₃ : Type*} (g : Y₂ → Y₃) (f : Y₁ → Y₂) :
@map X _ _ _ g ∘ map f = map (g ∘ f) := by { ext, refl }
section comap
open_locale classical
variables [topological_space Y]
/-- Pull back of locally constant maps under any map, by pre-composition.
This definition only makes sense if `f` is continuous,
in which case it sends locally constant functions to their precomposition with `f`.
See also `locally_constant.coe_comap`. -/
noncomputable
def comap (f : X → Y) :
locally_constant Y Z → locally_constant X Z :=
if hf : continuous f
then λ g, ⟨g ∘ f, g.is_locally_constant.comp_continuous hf⟩
else
begin
by_cases H : nonempty X,
{ introsI g, exact const X (g $ f $ classical.arbitrary X) },
{ intro g, refine ⟨λ x, (H ⟨x⟩).elim, _⟩,
intro s, rw is_open_iff_nhds, intro x, exact (H ⟨x⟩).elim }
end
@[simp] lemma coe_comap (f : X → Y) (g : locally_constant Y Z) (hf : continuous f) :
⇑(comap f g) = g ∘ f :=
by { rw [comap, dif_pos hf], refl }
@[simp] lemma comap_id : @comap X X Z _ _ id = id :=
by { ext, simp only [continuous_id, id.def, function.comp.right_id, coe_comap] }
lemma comap_comp [topological_space Z]
(f : X → Y) (g : Y → Z) (hf : continuous f) (hg : continuous g) :
@comap _ _ α _ _ f ∘ comap g = comap (g ∘ f) :=
by { ext, simp only [hf, hg, hg.comp hf, coe_comap] }
lemma comap_const (f : X → Y) (y : Y) (h : ∀ x, f x = y) :
(comap f : locally_constant Y Z → locally_constant X Z) =
λ g, ⟨λ x, g y, is_locally_constant.const _⟩ :=
begin
ext, rw coe_comap,
{ simp only [h, coe_mk, function.comp_app] },
{ rw show f = λ x, y, by ext; apply h,
exact continuous_const }
end
end comap
end locally_constant
|
tabPanel(title = "Sample Clonality",
sidebarLayout(
sidebarPanel(
h3("Graphs:"),
radioButtons("sc", "",
c("Number of Mutations" = "oneC",
"Number of Clones" = "oneE",
"Shannon Diversity" = "twoA",
"Mutations in \n Dominant Clone" = "twoB",
"Dominant Clone Size" = "threeA"
)),
conditionalPanel(
condition = "input.tabselected==1",
selectInput("sampleClonGroups", "Grouping",c("Final_group","Dx","Group") ,selected="Final_group")
),
conditionalPanel(
condition = "input.tabselected==2",
splitLayout(
textInput("Group1",label="Group Name",value="DNMT3A_only"),
selectInput("Group1_include", label="Include",
colnames(clone_mutations)[6:33],
multiple = TRUE,
selected=c("DNMT3A")),
selectInput("Group1_exclude", label="Exclude",
colnames(clone_mutations)[6:33],
multiple = TRUE,
selected=c("RAS","FLT3"))),
splitLayout(
textInput("Group2",label=NULL,value="DNMT3A_NRAS"),
selectInput("Group2_include", label=NULL,
colnames(clone_mutations)[6:33],
multiple = TRUE,
selected=c("DNMT3A","NRAS")),
selectInput("Group2_exclude", label=NULL,
colnames(clone_mutations)[6:33],
multiple = TRUE,
selected=c("FLT3"))),
splitLayout(
textInput("Group3",label=NULL,value="DNMT3A_FLT3"),
selectInput("Group3_include", label=NULL,
colnames(clone_mutations)[6:33],
multiple = TRUE,
selected=c("DNMT3A","FLT3")),
selectInput("Group3_exclude", label=NULL,
colnames(clone_mutations)[6:33],
multiple = TRUE,
selected=c("NRAS"))),
)),
mainPanel(
tabsetPanel(type = "tabs", id = "tabselected",
tabPanel("Premade", value=1, plotOutput("sampleClonPlotP",width="50%")),
tabPanel("Custom", value=2, plotOutput("sampleClonPlotC",width="50%"))
)
)
)
) |
/**
*
* @file qwrapper_zgemm_tile.c
*
* PLASMA core_blas quark wrapper
* PLASMA is a software package provided by Univ. of Tennessee,
* Univ. of California Berkeley and Univ. of Colorado Denver
*
* @version 2.6.0
* @author Mark Gates
* @date 2010-11-15
* @precisions normal z -> c d s
*
**/
#include <cblas.h>
#include "common.h"
/***************************************************************************//**
*
* Version of zgemm for tile storage, to avoid dependency problem when
* computations are done within the tile. alpha and beta are passed as
* pointers so they can depend on runtime values.
*
* @param[in] Alock
* Pointer to tile owning submatrix A.
*
* @param[in] Block
* Pointer to tile owning submatrix B.
*
* @param[in] Clock
* Pointer to tile owning submatrix C.
*
**/
void QUARK_CORE_zgemm_tile(Quark *quark, Quark_Task_Flags *task_flags,
PLASMA_enum transA, PLASMA_enum transB,
int m, int n, int k, int nb,
const PLASMA_Complex64_t *alpha, const PLASMA_Complex64_t *A, int lda,
const PLASMA_Complex64_t *B, int ldb,
const PLASMA_Complex64_t *beta, PLASMA_Complex64_t *C, int ldc,
const PLASMA_Complex64_t *Alock,
const PLASMA_Complex64_t *Block,
const PLASMA_Complex64_t *Clock)
{
DAG_CORE_GEMM;
QUARK_Insert_Task(quark, CORE_zgemm_tile_quark, task_flags,
sizeof(PLASMA_enum), &transA, VALUE,
sizeof(PLASMA_enum), &transB, VALUE,
sizeof(int), &m, VALUE,
sizeof(int), &n, VALUE,
sizeof(int), &k, VALUE,
sizeof(PLASMA_Complex64_t), alpha, INPUT,
sizeof(PLASMA_Complex64_t)*nb*nb, A, NODEP, /* input; see Alock */
sizeof(int), &lda, VALUE,
sizeof(PLASMA_Complex64_t)*nb*nb, B, NODEP, /* input; see Block */
sizeof(int), &ldb, VALUE,
sizeof(PLASMA_Complex64_t), beta, INPUT,
sizeof(PLASMA_Complex64_t)*nb*nb, C, NODEP, /* inout; see Clock */
sizeof(int), &ldc, VALUE,
sizeof(PLASMA_Complex64_t)*nb*nb, Alock, INPUT,
sizeof(PLASMA_Complex64_t)*nb, Block, INPUT,
sizeof(PLASMA_Complex64_t)*nb, Clock, INOUT,
0);
}
/***************************************************************************//**
*
**/
#if defined(PLASMA_HAVE_WEAK)
#pragma weak CORE_zgemm_tile_quark = PCORE_zgemm_tile_quark
#define CORE_zgemm_tile_quark PCORE_zgemm_tile_quark
#endif
void CORE_zgemm_tile_quark(Quark *quark)
{
PLASMA_enum transA, transB;
int m, n, k, lda, ldb, ldc;
const PLASMA_Complex64_t *alpha, *beta;
const PLASMA_Complex64_t *A, *B;
PLASMA_Complex64_t *C;
quark_unpack_args_13( quark, transA, transB, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc );
cblas_zgemm(
CblasColMajor,
(CBLAS_TRANSPOSE)transA, (CBLAS_TRANSPOSE)transB,
m, n, k,
CBLAS_SADDR(*alpha), A, lda,
B, ldb,
CBLAS_SADDR(*beta), C, ldc );
}
|
/-
Copyright (c) 2020 Heather Macbeth. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Heather Macbeth
-/
import data.set.intervals.basic
import data.set.function
/-!
# Monotone surjective functions are surjective on intervals
A monotone surjective function sends any interval in the domain onto the interval with corresponding
endpoints in the range. This is expressed in this file using `set.surj_on`, and provided for all
permutations of interval endpoints.
-/
variables {α : Type*} {β : Type*} [linear_order α] [partial_order β] {f : α → β}
open set function order_dual (to_dual)
lemma surj_on_Ioo_of_monotone_surjective
(h_mono : monotone f) (h_surj : function.surjective f) (a b : α) :
surj_on f (Ioo a b) (Ioo (f a) (f b)) :=
begin
intros p hp,
rcases h_surj p with ⟨x, rfl⟩,
refine ⟨x, mem_Ioo.2 _, rfl⟩,
contrapose! hp,
exact λ h, h.2.not_le (h_mono $ hp $ h_mono.reflect_lt h.1)
end
lemma surj_on_Ico_of_monotone_surjective
(h_mono : monotone f) (h_surj : function.surjective f) (a b : α) :
surj_on f (Ico a b) (Ico (f a) (f b)) :=
begin
obtain hab | hab := lt_or_le a b,
{ intros p hp,
rcases eq_left_or_mem_Ioo_of_mem_Ico hp with rfl|hp',
{ exact mem_image_of_mem f (left_mem_Ico.mpr hab) },
{ have := surj_on_Ioo_of_monotone_surjective h_mono h_surj a b hp',
exact image_subset f Ioo_subset_Ico_self this } },
{ rw Ico_eq_empty (h_mono hab).not_lt,
exact surj_on_empty f _ }
end
lemma surj_on_Ioc_of_monotone_surjective
(h_mono : monotone f) (h_surj : function.surjective f) (a b : α) :
surj_on f (Ioc a b) (Ioc (f a) (f b)) :=
by simpa using surj_on_Ico_of_monotone_surjective h_mono.dual h_surj (to_dual b) (to_dual a)
-- to see that the hypothesis `a ≤ b` is necessary, consider a constant function
lemma surj_on_Icc_of_monotone_surjective
(h_mono : monotone f) (h_surj : function.surjective f) {a b : α} (hab : a ≤ b) :
surj_on f (Icc a b) (Icc (f a) (f b)) :=
begin
intros p hp,
rcases eq_endpoints_or_mem_Ioo_of_mem_Icc hp with (rfl|rfl|hp'),
{ exact ⟨a, left_mem_Icc.mpr hab, rfl⟩ },
{ exact ⟨b, right_mem_Icc.mpr hab, rfl⟩ },
{ have := surj_on_Ioo_of_monotone_surjective h_mono h_surj a b hp',
exact image_subset f Ioo_subset_Icc_self this }
end
lemma surj_on_Ioi_of_monotone_surjective
(h_mono : monotone f) (h_surj : function.surjective f) (a : α) :
surj_on f (Ioi a) (Ioi (f a)) :=
begin
rw [← compl_Iic, ← compl_compl (Ioi (f a))],
refine maps_to.surj_on_compl _ h_surj,
exact λ x hx, (h_mono hx).not_lt
end
lemma surj_on_Iio_of_monotone_surjective
(h_mono : monotone f) (h_surj : function.surjective f) (a : α) :
surj_on f (Iio a) (Iio (f a)) :=
@surj_on_Ioi_of_monotone_surjective _ _ _ _ _ h_mono.dual h_surj a
lemma surj_on_Ici_of_monotone_surjective
(h_mono : monotone f) (h_surj : function.surjective f) (a : α) :
surj_on f (Ici a) (Ici (f a)) :=
begin
rw [← Ioi_union_left, ← Ioi_union_left],
exact (surj_on_Ioi_of_monotone_surjective h_mono h_surj a).union_union
(@image_singleton _ _ f a ▸ surj_on_image _ _)
end
lemma surj_on_Iic_of_monotone_surjective
(h_mono : monotone f) (h_surj : function.surjective f) (a : α) :
surj_on f (Iic a) (Iic (f a)) :=
@surj_on_Ici_of_monotone_surjective _ _ _ _ _ h_mono.dual h_surj a
|
// Software License for MTL
//
// Copyright (c) 2007 The Trustees of Indiana University.
// 2008 Dresden University of Technology and the Trustees of Indiana University.
// 2010 SimuNova UG (haftungsbeschränkt), www.simunova.com.
// All rights reserved.
// Authors: Peter Gottschling and Andrew Lumsdaine
//
// This file is part of the Matrix Template Library
//
// See also license.mtl.txt in the distribution.
#include <iostream>
#include <cmath>
#include <boost/numeric/mtl/mtl.hpp>
using namespace std;
#ifdef MTL_WITH_INITLIST
template <typename Vector>
void test(const char* name)
{
const Vector v= {3, 4, 5};
mtl::io::tout << name << ", v: " << v << "\n";
MTL_THROW_IF(v[0] != 3.0, mtl::runtime_error("wrong"));
Vector w;
w= {2, 4, 7};
mtl::io::tout << "w: " << w << "\n";
MTL_THROW_IF(w[0] != 2.0, mtl::runtime_error("wrong"));
}
#else
template <typename Vector>
void test(const char* ) {}
#endif
int main(int , char**)
{
using mtl::vec::parameters;
using namespace mtl;
test<dense_vector<float> >("test float");
test<dense_vector<double> >("test double");
test<dense_vector<float, parameters<row_major> > >("test float in row vector");
return 0;
}
|
% demo.m
close all;
clear;
clc;
%% set parameters
k = 5;
%% load data
load('./dataset/ORL_Face_img_cov.mat');
%% perform RCM k-NN classifier with
% GRCM2 with eigenvalue-based distance
grcm_accuracy = rcm_knn_classifier(TrainSet, TestSet,'GRCM', '2', 'EV', k);
% RCM4 with eigenvalue-based distance
rcm_accuracy = rcm_knn_classifier(TrainSet, TestSet, 'RCM', '4', 'EV', k);
fprintf('\n');
fprintf('# GRCM2 Accuracy = %5.2f\n', grcm_accuracy);
fprintf('# RCM4 Accuracy = %5.2f\n', rcm_accuracy); |
State Before: case inr
F✝ : Type ?u.203878
X : Type u
Y : Type v
Z : Type w
Z' : Type x
inst✝³ : TopologicalSpace X
inst✝² : TopologicalSpace Y
inst✝¹ : TopologicalSpace Z
inst✝ : TopologicalSpace Z'
f₀ f₁ f₂ : C(X, Y)
F : Homotopy f₀ f₁
G : Homotopy f₁ f₂
x : ↑I × X
h✝ : ¬↑x.fst ≤ 1 / 2
⊢ ↑(↑(extend G) (2 * ↑x.fst - 1)) x.snd = ↑G ({ val := 2 * ↑x.fst - 1, property := (_ : 2 * ↑x.fst - 1 ∈ I) }, x.snd) State After: case inr
F✝ : Type ?u.203878
X : Type u
Y : Type v
Z : Type w
Z' : Type x
inst✝³ : TopologicalSpace X
inst✝² : TopologicalSpace Y
inst✝¹ : TopologicalSpace Z
inst✝ : TopologicalSpace Z'
f₀ f₁ f₂ : C(X, Y)
F : Homotopy f₀ f₁
G : Homotopy f₁ f₂
x : ↑I × X
h✝ : ¬↑x.fst ≤ 1 / 2
⊢ ↑(↑(curry G) { val := 2 * ↑x.fst - 1, property := ?inr.hx }) x.snd =
↑G ({ val := 2 * ↑x.fst - 1, property := (_ : 2 * ↑x.fst - 1 ∈ I) }, x.snd)
case inr.hx
F✝ : Type ?u.203878
X : Type u
Y : Type v
Z : Type w
Z' : Type x
inst✝³ : TopologicalSpace X
inst✝² : TopologicalSpace Y
inst✝¹ : TopologicalSpace Z
inst✝ : TopologicalSpace Z'
f₀ f₁ f₂ : C(X, Y)
F : Homotopy f₀ f₁
G : Homotopy f₁ f₂
x : ↑I × X
h✝ : ¬↑x.fst ≤ 1 / 2
⊢ 2 * ↑x.fst - 1 ∈ Set.Icc 0 1 Tactic: rw [extend, ContinuousMap.coe_IccExtend, Set.IccExtend_of_mem] State Before: case inr
F✝ : Type ?u.203878
X : Type u
Y : Type v
Z : Type w
Z' : Type x
inst✝³ : TopologicalSpace X
inst✝² : TopologicalSpace Y
inst✝¹ : TopologicalSpace Z
inst✝ : TopologicalSpace Z'
f₀ f₁ f₂ : C(X, Y)
F : Homotopy f₀ f₁
G : Homotopy f₁ f₂
x : ↑I × X
h✝ : ¬↑x.fst ≤ 1 / 2
⊢ ↑(↑(curry G) { val := 2 * ↑x.fst - 1, property := ?inr.hx }) x.snd =
↑G ({ val := 2 * ↑x.fst - 1, property := (_ : 2 * ↑x.fst - 1 ∈ I) }, x.snd)
case inr.hx
F✝ : Type ?u.203878
X : Type u
Y : Type v
Z : Type w
Z' : Type x
inst✝³ : TopologicalSpace X
inst✝² : TopologicalSpace Y
inst✝¹ : TopologicalSpace Z
inst✝ : TopologicalSpace Z'
f₀ f₁ f₂ : C(X, Y)
F : Homotopy f₀ f₁
G : Homotopy f₁ f₂
x : ↑I × X
h✝ : ¬↑x.fst ≤ 1 / 2
⊢ 2 * ↑x.fst - 1 ∈ Set.Icc 0 1 State After: no goals Tactic: rfl |
(******************************************************************************)
(* Project: Isabelle/UTP: Unifying Theories of Programming in Isabelle/HOL *)
(* File: Normalise.thy *)
(* Authors: Frank Zeyda and Simon Foster (University of York, UK) *)
(* Emails: [email protected] and [email protected] *)
(******************************************************************************)
(* LAST REVIEWED: 09 Jun 2022 *)
section \<open>Normalisation\<close>
theory Normalise
imports Main
"HOL-Eisbach.Eisbach"
begin
subsection \<open>Theorem Attribute\<close>
text \<open>Theorems and tactic used for normalisation.\<close>
named_theorems normalise "normalisation theorems"
subsection \<open>Normalisation Tactic\<close>
method normalise = (simp add: normalise)
subsection \<open>Type Class @{text normalise}\<close>
text \<open>
Instantiation of the class @{text normalise} signals that normalisation can
be performed on constructions that involve the underlying type. For now, we
only consider chains of function updates. Importantly, the type must provide
a linear order to ensure that normalisation does not loop.
\<close>
class normalise = linorder
begin
theorem fun_upd_normalise [normalise]:
fixes x :: "'a"
fixes y :: "'a"
shows "x < y \<Longrightarrow> f(y := a, x := b) = f(x := b, y := a)"
by (simp add: fun_upd_twist)
end
text \<open>
The tactic below is not very efficient as using backtracking. In practice,
it is slightly quicker to use plain simplification instead of substitution.
Both approaches, however, suffer from the same drawback that dead ends may
need to be explored. A better solution would implement some form of guarded
rewriting.
\<close>
method fun_upd_normalise_tac =
(subst fun_upd_normalise, (simp; fail))+
end |
module like
use params
use utils1
implicit none
contains
!=======================================================================
subroutine slikelihood(Cube,slhood)
implicit none
double precision Cube(nest_nPar),slhood
double precision temp(sdim),dist,loclik
integer i,j
double precision TwoPi
TwoPi=6.2831853
slhood=-huge(1.d0)*epsilon(1.d0)
!rescaling the parameters in unit hypercube according to the prior
do i=1,sdim
temp(i)=(spriorran(i,2)-spriorran(i,1))*Cube(i)+spriorran(i,1)
end do
Cube(1:sdim)=temp(1:sdim)
do i=1,sModes
dist=(sqrt(sum((temp(1:sdim)-sc(i,1:sdim))**2.))-sr(i))**2
loclik=-dist/(2.*(sw(i)**2.))-log(TwoPi*sw(i)**2)/2.
slhood=logSumExp(slhood,loclik)
end do
end subroutine slikelihood
!=======================================================================
end module like
|
subroutine qng(f,a,b,epsabs,epsrel,result,abserr,neval,ier)
c***begin prologue qng
c***date written 800101 (yymmdd)
c***revision date 830518 (yymmdd)
c***category no. h2a1a1
c***keywords automatic integrator, smooth integrand,
c non-adaptive, gauss-kronrod(patterson)
c***author piessens,robert,appl. math. & progr. div. - k.u.leuven
c de doncker,elise,appl math & progr. div. - k.u.leuven
c kahaner,david,nbs - modified (2/82)
c***purpose the routine calculates an approximation result to a
c given definite integral i = integral of f over (a,b),
c hopefully satisfying following claim for accuracy
c abs(i-result).le.max(epsabs,epsrel*abs(i)).
c***description
c
c non-adaptive integration
c standard fortran subroutine
c real version
c
c f - real version
c function subprogram defining the integrand function
c f(x). the actual name for f needs to be declared
c e x t e r n a l in the driver program.
c
c a - real version
c lower limit of integration
c
c b - real version
c upper limit of integration
c
c epsabs - real
c absolute accuracy requested
c epsrel - real
c relative accuracy requested
c if epsabs.le.0
c and epsrel.lt.max(50*rel.mach.acc.,0.5d-28),
c the routine will end with ier = 6.
c
c on return
c result - real
c approximation to the integral i
c result is obtained by applying the 21-point
c gauss-kronrod rule (res21) obtained by optimal
c addition of abscissae to the 10-point gauss rule
c (res10), or by applying the 43-point rule (res43)
c obtained by optimal addition of abscissae to the
c 21-point gauss-kronrod rule, or by applying the
c 87-point rule (res87) obtained by optimal addition
c of abscissae to the 43-point rule.
c
c abserr - real
c estimate of the modulus of the absolute error,
c which should equal or exceed abs(i-result)
c
c neval - integer
c number of integrand evaluations
c
c ier - ier = 0 normal and reliable termination of the
c routine. it is assumed that the requested
c accuracy has been achieved.
c ier.gt.0 abnormal termination of the routine. it is
c assumed that the requested accuracy has
c not been achieved.
c error messages
c ier = 1 the maximum number of steps has been
c executed. the integral is probably too
c difficult to be calculated by dqng.
c = 6 the input is invalid, because
c epsabs.le.0 and
c epsrel.lt.max(50*rel.mach.acc.,0.5d-28).
c result, abserr and neval are set to zero.
c
c***references (none)
c***routines called r1mach,xerror
c***end prologue qng
c
real a,absc,abserr,b,centr,dhlgth,epmach,epsabs,epsrel,f,fcentr,
* fval,fval1,fval2,fv1,fv2,fv3,fv4,hlgth,result,res10,res21,res43,
* res87,resabs,resasc,reskh,r1mach,savfun,uflow,w10,w21a,w43a,
* w43b,w87a,w87b,x1,x2,x3,x4
integer ier,ipx,k,l,neval
external f
c
dimension fv1(5),fv2(5),fv3(5),fv4(5),x1(5),x2(5),x3(11),x4(22),
* w10(5),w21a(5),w21b(6),w43a(10),w43b(12),w87a(21),w87b(23),
* savfun(21)
c
c the following data statements contain the
c abscissae and weights of the integration rules used.
c
c x1 abscissae common to the 10-, 21-, 43-
c and 87-point rule
c x2 abscissae common to the 21-, 43- and
c 87-point rule
c x3 abscissae common to the 43- and 87-point
c rule
c x4 abscissae of the 87-point rule
c w10 weights of the 10-point formula
c w21a weights of the 21-point formula for
c abscissae x1
c w21b weights of the 21-point formula for
c abscissae x2
c w43a weights of the 43-point formula for
c abscissae x1, x3
c w43b weights of the 43-point formula for
c abscissae x3
c w87a weights of the 87-point formula for
c abscissae x1, x2, x3
c w87b weights of the 87-point formula for
c abscissae x4
c
data x1(1),x1(2),x1(3),x1(4),x1(5)/
* 0.9739065285171717e+00, 0.8650633666889845e+00,
* 0.6794095682990244e+00, 0.4333953941292472e+00,
* 0.1488743389816312e+00/
data x2(1),x2(2),x2(3),x2(4),x2(5)/
* 0.9956571630258081e+00, 0.9301574913557082e+00,
* 0.7808177265864169e+00, 0.5627571346686047e+00,
* 0.2943928627014602e+00/
data x3(1),x3(2),x3(3),x3(4),x3(5),x3(6),x3(7),x3(8),
* x3(9),x3(10),x3(11)/
* 0.9993333609019321e+00, 0.9874334029080889e+00,
* 0.9548079348142663e+00, 0.9001486957483283e+00,
* 0.8251983149831142e+00, 0.7321483889893050e+00,
* 0.6228479705377252e+00, 0.4994795740710565e+00,
* 0.3649016613465808e+00, 0.2222549197766013e+00,
* 0.7465061746138332e-01/
data x4(1),x4(2),x4(3),x4(4),x4(5),x4(6),x4(7),x4(8),x4(9),
* x4(10),x4(11),x4(12),x4(13),x4(14),x4(15),x4(16),x4(17),x4(18),
* x4(19),x4(20),x4(21),x4(22)/ 0.9999029772627292e+00,
* 0.9979898959866787e+00, 0.9921754978606872e+00,
* 0.9813581635727128e+00, 0.9650576238583846e+00,
* 0.9431676131336706e+00, 0.9158064146855072e+00,
* 0.8832216577713165e+00, 0.8457107484624157e+00,
* 0.8035576580352310e+00, 0.7570057306854956e+00,
* 0.7062732097873218e+00, 0.6515894665011779e+00,
* 0.5932233740579611e+00, 0.5314936059708319e+00,
* 0.4667636230420228e+00, 0.3994248478592188e+00,
* 0.3298748771061883e+00, 0.2585035592021616e+00,
* 0.1856953965683467e+00, 0.1118422131799075e+00,
* 0.3735212339461987e-01/
data w10(1),w10(2),w10(3),w10(4),w10(5)/
* 0.6667134430868814e-01, 0.1494513491505806e+00,
* 0.2190863625159820e+00, 0.2692667193099964e+00,
* 0.2955242247147529e+00/
data w21a(1),w21a(2),w21a(3),w21a(4),w21a(5)/
* 0.3255816230796473e-01, 0.7503967481091995e-01,
* 0.1093871588022976e+00, 0.1347092173114733e+00,
* 0.1477391049013385e+00/
data w21b(1),w21b(2),w21b(3),w21b(4),w21b(5),w21b(6)/
* 0.1169463886737187e-01, 0.5475589657435200e-01,
* 0.9312545458369761e-01, 0.1234919762620659e+00,
* 0.1427759385770601e+00, 0.1494455540029169e+00/
data w43a(1),w43a(2),w43a(3),w43a(4),w43a(5),w43a(6),w43a(7),
* w43a(8),w43a(9),w43a(10)/ 0.1629673428966656e-01,
* 0.3752287612086950e-01, 0.5469490205825544e-01,
* 0.6735541460947809e-01, 0.7387019963239395e-01,
* 0.5768556059769796e-02, 0.2737189059324884e-01,
* 0.4656082691042883e-01, 0.6174499520144256e-01,
* 0.7138726726869340e-01/
data w43b(1),w43b(2),w43b(3),w43b(4),w43b(5),w43b(6),
* w43b(7),w43b(8),w43b(9),w43b(10),w43b(11),w43b(12)/
* 0.1844477640212414e-02, 0.1079868958589165e-01,
* 0.2189536386779543e-01, 0.3259746397534569e-01,
* 0.4216313793519181e-01, 0.5074193960018458e-01,
* 0.5837939554261925e-01, 0.6474640495144589e-01,
* 0.6956619791235648e-01, 0.7282444147183321e-01,
* 0.7450775101417512e-01, 0.7472214751740301e-01/
data w87a(1),w87a(2),w87a(3),w87a(4),w87a(5),w87a(6),
* w87a(7),w87a(8),w87a(9),w87a(10),w87a(11),w87a(12),
* w87a(13),w87a(14),w87a(15),w87a(16),w87a(17),w87a(18),
* w87a(19),w87a(20),w87a(21)/
* 0.8148377384149173e-02, 0.1876143820156282e-01,
* 0.2734745105005229e-01, 0.3367770731163793e-01,
* 0.3693509982042791e-01, 0.2884872430211531e-02,
* 0.1368594602271270e-01, 0.2328041350288831e-01,
* 0.3087249761171336e-01, 0.3569363363941877e-01,
* 0.9152833452022414e-03, 0.5399280219300471e-02,
* 0.1094767960111893e-01, 0.1629873169678734e-01,
* 0.2108156888920384e-01, 0.2537096976925383e-01,
* 0.2918969775647575e-01, 0.3237320246720279e-01,
* 0.3478309895036514e-01, 0.3641222073135179e-01,
* 0.3725387550304771e-01/
data w87b(1),w87b(2),w87b(3),w87b(4),w87b(5),w87b(6),w87b(7),
* w87b(8),w87b(9),w87b(10),w87b(11),w87b(12),w87b(13),w87b(14),
* w87b(15),w87b(16),w87b(17),w87b(18),w87b(19),w87b(20),
* w87b(21),w87b(22),w87b(23)/ 0.2741455637620724e-03,
* 0.1807124155057943e-02, 0.4096869282759165e-02,
* 0.6758290051847379e-02, 0.9549957672201647e-02,
* 0.1232944765224485e-01, 0.1501044734638895e-01,
* 0.1754896798624319e-01, 0.1993803778644089e-01,
* 0.2219493596101229e-01, 0.2433914712600081e-01,
* 0.2637450541483921e-01, 0.2828691078877120e-01,
* 0.3005258112809270e-01, 0.3164675137143993e-01,
* 0.3305041341997850e-01, 0.3425509970422606e-01,
* 0.3526241266015668e-01, 0.3607698962288870e-01,
* 0.3669860449845609e-01, 0.3712054926983258e-01,
* 0.3733422875193504e-01, 0.3736107376267902e-01/
c
c list of major variables
c -----------------------
c
c centr - mid point of the integration interval
c hlgth - half-length of the integration interval
c fcentr - function value at mid point
c absc - abscissa
c fval - function value
c savfun - array of function values which
c have already been computed
c res10 - 10-point gauss result
c res21 - 21-point kronrod result
c res43 - 43-point result
c res87 - 87-point result
c resabs - approximation to the integral of abs(f)
c resasc - approximation to the integral of abs(f-i/(b-a))
c
c machine dependent constants
c ---------------------------
c
c epmach is the largest relative spacing.
c uflow is the smallest positive magnitude.
c
c***first executable statement qng
epmach = r1mach(4)
uflow = r1mach(1)
c
c test on validity of parameters
c ------------------------------
c
result = 0.0e+00
abserr = 0.0e+00
neval = 0
ier = 6
if(epsabs.le.0.0e+00.and.epsrel.lt.amax1(0.5e-14,0.5e+02*epmach))
* go to 80
hlgth = 0.5e+00*(b-a)
dhlgth = abs(hlgth)
centr = 0.5e+00*(b+a)
fcentr = f(centr)
neval = 21
ier = 1
c
c compute the integral using the 10- and 21-point formula.
c
do 70 l = 1,3
go to (5,25,45),l
5 res10 = 0.0e+00
res21 = w21b(6)*fcentr
resabs = w21b(6)*abs(fcentr)
do 10 k=1,5
absc = hlgth*x1(k)
fval1 = f(centr+absc)
fval2 = f(centr-absc)
fval = fval1+fval2
res10 = res10+w10(k)*fval
res21 = res21+w21a(k)*fval
resabs = resabs+w21a(k)*(abs(fval1)+abs(fval2))
savfun(k) = fval
fv1(k) = fval1
fv2(k) = fval2
10 continue
ipx = 5
do 15 k=1,5
ipx = ipx+1
absc = hlgth*x2(k)
fval1 = f(centr+absc)
fval2 = f(centr-absc)
fval = fval1+fval2
res21 = res21+w21b(k)*fval
resabs = resabs+w21b(k)*(abs(fval1)+abs(fval2))
savfun(ipx) = fval
fv3(k) = fval1
fv4(k) = fval2
15 continue
c
c test for convergence.
c
result = res21*hlgth
resabs = resabs*dhlgth
reskh = 0.5e+00*res21
resasc = w21b(6)*abs(fcentr-reskh)
do 20 k = 1,5
resasc = resasc+w21a(k)*(abs(fv1(k)-reskh)+abs(fv2(k)-reskh))
* +w21b(k)*(abs(fv3(k)-reskh)+abs(fv4(k)-reskh))
20 continue
abserr = abs((res21-res10)*hlgth)
resasc = resasc*dhlgth
go to 65
c
c compute the integral using the 43-point formula.
c
25 res43 = w43b(12)*fcentr
neval = 43
do 30 k=1,10
res43 = res43+savfun(k)*w43a(k)
30 continue
do 40 k=1,11
ipx = ipx+1
absc = hlgth*x3(k)
fval = f(absc+centr)+f(centr-absc)
res43 = res43+fval*w43b(k)
savfun(ipx) = fval
40 continue
c
c test for convergence.
c
result = res43*hlgth
abserr = abs((res43-res21)*hlgth)
go to 65
c
c compute the integral using the 87-point formula.
c
45 res87 = w87b(23)*fcentr
neval = 87
do 50 k=1,21
res87 = res87+savfun(k)*w87a(k)
50 continue
do 60 k=1,22
absc = hlgth*x4(k)
res87 = res87+w87b(k)*(f(absc+centr)+f(centr-absc))
60 continue
result = res87*hlgth
abserr = abs((res87-res43)*hlgth)
65 if(resasc.ne.0.0e+00.and.abserr.ne.0.0e+00)
* abserr = resasc*amin1(0.1e+01,
* (0.2e+03*abserr/resasc)**1.5e+00)
if (resabs.gt.uflow/(0.5e+02*epmach)) abserr = amax1
* ((epmach*0.5e+02)*resabs,abserr)
if (abserr.le.amax1(epsabs,epsrel*abs(result))) ier = 0
c ***jump out of do-loop
if (ier.eq.0) go to 999
70 continue
80 call xerror(26habnormal return from qng ,26,ier,0)
999 return
end
|
import heterocl as hcl
import numpy as np
import time
from gemm_main import gemm
def gemm_hbm(m=1024, n=1024, k=1024, dtype=hcl.UInt(32)):
matrix_1 = hcl.placeholder((m, k), dtype=dtype, name="matrix_1")
matrix_2 = hcl.placeholder((k, n), dtype=dtype, name="matrix_2")
def kernel(matrix_1, matrix_2):
r = hcl.reduce_axis(0, k, 'k')
out_matrix = hcl.compute((m, n),
lambda x, y: hcl.sum(matrix_1[x, r] * matrix_2[r, y],
axis=r, dtype=dtype), dtype=dtype,
name="out_matrix")
return out_matrix
s = hcl.create_schedule([matrix_1, matrix_2], kernel)
config = {
"host" : hcl.dev.cpu("intel", "e5"),
"xcel" : [
hcl.dev.fpga("xilinx", "xcvu19p")
]
}
target = hcl.platform.custom(config)
target.config(compile="vitis", mode="hw_exe", backend="vhls")
# block tiling and reorder
out_matrix = kernel.out_matrix
block_size = 4
y0, y1 = s[out_matrix].split(out_matrix.axis[0], factor=block_size)
x0, x1 = s[out_matrix].split(out_matrix.axis[1], factor=block_size)
s[out_matrix].reorder(y0, x0, y1, x1)
s[out_matrix].unroll(out_matrix.axis[1])
s[out_matrix].unroll(out_matrix.axis[0])
s.to(matrix_1, target.xcel.hbm[0])
s.to(matrix_2, target.xcel.hbm[1])
s.to(kernel.out_matrix, target.host.hbm[2])
f = hcl.build(s, target=target)
np_1 = np.random.randint(10, size=(m, k))
np_2 = np.random.randint(10, size=(k, n))
np_3 = np.matmul(np_1, np_2)
hcl_m1 = hcl.asarray(np_1, dtype=dtype)
hcl_m2 = hcl.asarray(np_2, dtype=dtype)
hcl_m3 = hcl.asarray(np.zeros((m, n)), dtype=dtype)
f(hcl_m1, hcl_m2, hcl_m3)
print(hcl_m3.asnumpy())
def time_gemm(dtype, m=1024, n=1024, k=1024, target=None):
hcl.init(dtype)
f = gemm(m, n, k, dtype, target)
np_1 = np.random.randint(10, size=(m, k))
np_2 = np.random.randint(10, size=(k, n))
np_3 = np.matmul(np_1, np_2)
hcl_m1 = hcl.asarray(np_1, dtype=dtype)
hcl_m2 = hcl.asarray(np_2, dtype=dtype)
hcl_m3 = hcl.asarray(np.zeros((m, n)), dtype=dtype)
f(hcl_m1, hcl_m2, hcl_m3)
begin = time.time()
for i in range(10):
f(hcl_m1, hcl_m2, hcl_m3)
end = time.time()
print("dtype is: ", dtype)
print("average of 10 runs takes: {} sec".format((end - begin) / 10))
np.testing.assert_allclose(hcl_m3.asnumpy(), np_3, rtol=1e-03)
###############################################################################
# Test the algorithm with different data types
dtypes = [hcl.Int(32), hcl.Float(), hcl.Fixed(32, 16)]
# for dtype in dtypes:
# time_gemm(hcl.Float(), 10, 10, 10, 'sdaccel')
gemm_hbm()
time_gemm()
|
# -*- coding: utf-8 -*-
"""
Python module for generating fake total emission in a magnitude band along a sightline.
This uses the Arepo/Illustris output GFM_Photometrics to get photometric band data,
which may or may not be accurate.
"""
from __future__ import print_function
import math
import os.path as path
import shutil
import h5py
import numpy as np
from . import spectra as ss
def maginJy(mag, band):
"""Convert a magnitude to flux in Jansky, according to wikipedia's table"""
bandfluxes = {'U':1810, 'B':4260, 'V':3640,'K':670,'g':3730,'r':4490,'i':4760 ,'z':4810}
return 10**(mag/(-2.5))*bandfluxes[band]
def apparentflux(DL):
"""Convert flux from absolute magnitudes (flux at 10 pc distance) to apparent flux in Jy.
DL is luminosity distance in Mpc"""
return (10/(DL*1e6))**2
def distance(arcsec, redshift, hubble, OmegaM):
"""Find the size of something in comoving kpc/h from the size on the sky in arcseconds.
"""
#First arcsec to radians
#2 pi radians -> degrees -> arcminute -> arcsecond
rad = 2*math.pi/360./60./60. * arcsec
#Then to physical kpc
atime = 1./(1+redshift)
(_, DA, _) = calculator(hubble*100, OmegaM, redshift)
size = DA * rad * 1000
#Comoving kpc/h
size = size /( atime/ hubble)
return size
class EmissionSpectra(ss.Spectra):
"""Class to compute the emission from stars in B band around the DLA spectrum"""
stellar = {}
def _read_stellar_data(self,fn, band, hhmult=10.):
"""Read the particle data for a single interpolation"""
bands = {'U':0, 'B':1, 'V':2,'K':3,'g':4,'r':5,'i':6,'z':7}
nband = bands[band]
pos = self.snapshot_set.get_data(4,"Position", segment = fn).astype(np.float32)
#Set each stellar radius to the pixel size
hh = hhmult*np.ones(np.shape(pos)[0], dtype=np.float32)
#Find particles we care about
ind = self.particles_near_lines(pos, hh,self.axis,self.cofm)
#print np.size(ind)
#Do nothing if there aren't any, and return a suitably shaped zero array
if np.size(ind) == 0:
raise ValueError("No stars")
pos = pos[ind,:]
hh = hh[ind]
#Find the magnitude of stars in this band
emflux = maginJy(self.snapshot_set.get_data(4,"GFM_StellarPhotometrics", segment = fn).astype(np.float32)[ind][:,nband],band)
fluxx = np.array([ np.sum(emflux[self.particles_near_lines(pos, hh,np.array([ax,]),np.array([cofm,]))]) for (ax, cofm) in zip(self.axis, self.cofm)])
#print np.sum(emflux)
return fluxx
#return (pos, emflux, hh)
def get_emflux(self, band, pixelsz=1):
"""
Get the density weighted flux in each pixel for a given species.
band: rest-frame optical band observed in
pixelsz: Angular size of the pixels in arcseconds
"""
#Mapping from bandname to number
dist = distance(pixelsz, 1./self.atime-1, self.hubble, self.OmegaM)
try:
self._really_load_array((band,dist), self.stellar, "stellar")
emflux = self.stellar[(band,dist)]
except KeyError:
emflux = np.zeros(self.NumLos,dtype=np.float32)
for fn in self.snapshot_set.get_n_segments():
try:
emflux += self._read_stellar_data(fn, band,dist)
except ValueError:
pass
self.stellar[(band,dist)] = emflux
(_,_,DL) = calculator(self.hubble*100, self.OmegaM, 1./self.atime-1)
emflux *= apparentflux(DL)
return emflux
def save_file(self):
"""
Saves spectra to a file, because they are slow to generate.
File is by default to be $snap_dir/snapdir_$snapnum/spectra.hdf5.
"""
#We should make sure we have loaded all lazy-loaded things first.
self._load_all_multihash(self.stellar, "stellar")
self._load_all_multihash(self.tau_obs, "tau_obs")
self._load_all_multihash(self.tau, "tau")
self._load_all_multihash(self.colden, "colden")
try:
self._load_all_multihash(self.colden, "velocity")
except IOError:
pass
try:
if path.exists(self.savefile):
shutil.move(self.savefile,self.savefile+".backup")
f=h5py.File(self.savefile,'w')
except IOError:
try:
f=h5py.File(self.savefile,'w')
except IOError:
raise IOError("Could not open ",self.savefile," for writing")
grp_grid = f.create_group("stellar")
self._save_multihash(self.stellar, grp_grid)
self._save_file(f)
def calculator(H0, Omega_M, zz):
"""Compute luminosity distance for a given cosmology. Assumes flatness.
Freely adapted from James Schombert's python version of Ned Wright's cosmology calculator.
Inputs:
H0 - Hubble constant in km/s/Mpc
Omega_M - Omega_matter
zz - redshift to compute distances to
Returns:
(Comoving distance, angular distancem luminosity distance) (all in physical Mpc)"""
light = 299792.458 # speed of light in km/sec
h = H0/100.
WR = 4.165E-5/(h*h) # includes 3 massless neutrino species, T0 = 2.72528
#Assume flat
WV = 1- Omega_M-WR
#scale factor to compute distance to
az = 1.0/(1.+zz)
n=1000 # number of points in integrals
# do integral over a=1/(1+z) from az to 1 in n steps, midpoint rule
a = np.logspace(np.log10(az), 0, n)
a2H = a*a*np.sqrt(Omega_M/a**3+WR/(a**4)+WV)
#Comoving distance
DCMR = np.trapz(1./a2H, a)
#In Mpc
DC_Mpc = (light/H0) * DCMR
# angular size distance In Mpc
DA_Mpc = (light/H0)*az*DCMR
#Luminosity distance in Mpc
DL_Mpc = DA_Mpc/(az*az)
#print 'The comoving radial distance is %1.1f' % DC_Mpc + ' Mpc'
#print 'The angular size distance D_A is ' + '%1.1f' % DA_Mpc + ' Mpc'
#print 'The luminosity distance D_L is ' + '%1.1f' % DL_Mpc + ' Mpc'
return (DC_Mpc, DA_Mpc, DL_Mpc)
|
R version 3.3.2 (2016-10-31) -- "Sincere Pumpkin Patch"
Copyright (C) 2016 The R Foundation for Statistical Computing
Platform: x86_64-w64-mingw32/x64 (64-bit)
R is free software and comes with ABSOLUTELY NO WARRANTY.
You are welcome to redistribute it under certain conditions.
Type 'license()' or 'licence()' for distribution details.
R is a collaborative project with many contributors.
Type 'contributors()' for more information and
'citation()' on how to cite R or R packages in publications.
Type 'demo()' for some demos, 'help()' for on-line help, or
'help.start()' for an HTML browser interface to help.
Type 'q()' to quit R.
[Workspace loaded from ~/.RData]
install.packages("gdata")
install.packages("evir")
library(gdata)
library(evir)
StormDamageData <- read.xls("d:/publicdatamay2007.xls", sheet = 1)
head(StormDamageData)
tail(StormDamageData)
dim(StormDamageData)
ChangeFormat <- function(x){
x=as.character(x)
for(i in 1:10){x=sub(",","",as.character(x))}
return(as.numeric(x))}
base <- StormDamageData[,1:4]
base$Base.Economic.Damage <- Vectorize(ChangeFormat)(StormDamageData$Base.Economic.Damage)
base$Normalized.PL05 <- Vectorize(ChangeFormat)(StormDamageData$Normalized.PL05)
base$Normalized.CL05 <- Vectorize(ChangeFormat)(StormDamageData$Normalized.CL05)
head(base)
plot(base$Normalized.PL05/1e9,type="h",ylim=c(0,155), main = "207 Hurricanes, Normalized Costs: 1900 - 2005", xlab = "Index of Loss", ylab = "Normalized Costs", col = "red")
TestBase <- table(base$Year)
TestBase
years <- as.numeric(names(TestBase))
years
frequency <- as.numeric(TestBase)
frequency
years0frequency <- (1900:2005)[which(!(1900:2005)%in%years)]
years0frequency
StormDamageData <- data.frame(years=c(years, years0frequency), frequency=c(frequency, rep(0,length(years0frequency))))
head(StormDamageData)
plot(years, frequency, type="h", main = "Frequency of Hurricanes: 1900 - 2005", xlab = "Time (Years)", ylab = "Annual Frequency", col = "red")
mean(StormDamageData$frequency)
LinearTrend <- glm(frequency~years, data = StormDamageData, family=poisson(link="identity"), start=lm(frequency~years, data = StormDamageData)$coefficients)
LinearTrend
ExpTrend <- glm(frequency~years, data=StormDamageData, family = poisson(link="log"))
ExpTrend
plot(years, frequency, type='h', ylim=c(0,6), main = "No. of Major Hurricanes Predicted for 2014", xlim=c(1900,2020))
cpred1 <- predict(ExpTrend, newdata = data.frame(years=1890:2030), type="response")
cpred1
lines(1890:2030,cpred1,col="blue")
cpred0 <- predict(LinearTrend, newdata=data.frame(years=1890:2030), type="response")
cpred0
lines(1890:2030, cpred0, col="red")
abline(h = mean(StormDamageData$frequency), col="black")
predictions <- cbind(constant = mean(StormDamageData$frequency), linear= cpred0[126], exponential=cpred1[126])
predictions
points(rep((1890:2030)[126],3), predictions, col=c("black","red","blue"), pch=19)
hill(base$Normalized.PL05)
threshold <- .5
gpd.PL <- gpd(base$Normalized.PL05/1e9/20, threshold)$par.ests
mean(base$Normalized.CL05/1e9/20 >.5)
ExpectedValue <- function(yinf,ysup,xi,beta){
as.numeric(integrate(function(x) (x-yinf) * dgpd(x,xi,mu=threshold,beta),
lower=yinf,upper=ysup)$value +
(1-pgpd(ysup,xi,mu=threshold,beta))*(ysup-yinf))
}
predictions[1]
mean(base$Normalized.PL05/1e9/20.5)
ExpectedValue(2,6,gpd.PL[1],gpd.PL[2])*1e3
predictions[1] * mean(base$Normalized.PL05/1e9/20 >.5) * ExpectedValue(2, 6, gpd.PL[1], gpd.PL[2]) * 1e3
|
lemma filterlim_of_real_at_infinity [tendsto_intros]: "filterlim (of_real :: real \<Rightarrow> 'a :: real_normed_algebra_1) at_infinity at_top" |
[STATEMENT]
lemma tree_cross_acyclic:
"acyclic (tree_edges s \<union> cross_edges s)" (is "acyclic ?E")
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. acyclic (tree_edges s \<union> cross_edges s)
[PROOF STEP]
proof (rule ccontr)
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<not> acyclic (tree_edges s \<union> cross_edges s) \<Longrightarrow> False
[PROOF STEP]
{
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<not> acyclic (tree_edges s \<union> cross_edges s) \<Longrightarrow> False
[PROOF STEP]
fix u v
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<not> acyclic (tree_edges s \<union> cross_edges s) \<Longrightarrow> False
[PROOF STEP]
assume *: "u \<in> dom (finished s)" and "(u,v) \<in> ?E\<^sup>+"
[PROOF STATE]
proof (state)
this:
u \<in> dom (finished s)
(u, v) \<in> (tree_edges s \<union> cross_edges s)\<^sup>+
goal (1 subgoal):
1. \<not> acyclic (tree_edges s \<union> cross_edges s) \<Longrightarrow> False
[PROOF STEP]
from this(2)
[PROOF STATE]
proof (chain)
picking this:
(u, v) \<in> (tree_edges s \<union> cross_edges s)\<^sup>+
[PROOF STEP]
have "\<phi> s v < \<phi> s u \<and> v \<in> dom (finished s)"
[PROOF STATE]
proof (prove)
using this:
(u, v) \<in> (tree_edges s \<union> cross_edges s)\<^sup>+
goal (1 subgoal):
1. \<phi> s v < \<phi> s u \<and> v \<in> dom (finished s)
[PROOF STEP]
proof induct
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<And>y. (u, y) \<in> tree_edges s \<union> cross_edges s \<Longrightarrow> \<phi> s y < \<phi> s u \<and> y \<in> dom (finished s)
2. \<And>y z. \<lbrakk>(u, y) \<in> (tree_edges s \<union> cross_edges s)\<^sup>+; (y, z) \<in> tree_edges s \<union> cross_edges s; \<phi> s y < \<phi> s u \<and> y \<in> dom (finished s)\<rbrakk> \<Longrightarrow> \<phi> s z < \<phi> s u \<and> z \<in> dom (finished s)
[PROOF STEP]
case base
[PROOF STATE]
proof (state)
this:
(u, y_) \<in> tree_edges s \<union> cross_edges s
goal (2 subgoals):
1. \<And>y. (u, y) \<in> tree_edges s \<union> cross_edges s \<Longrightarrow> \<phi> s y < \<phi> s u \<and> y \<in> dom (finished s)
2. \<And>y z. \<lbrakk>(u, y) \<in> (tree_edges s \<union> cross_edges s)\<^sup>+; (y, z) \<in> tree_edges s \<union> cross_edges s; \<phi> s y < \<phi> s u \<and> y \<in> dom (finished s)\<rbrakk> \<Longrightarrow> \<phi> s z < \<phi> s u \<and> z \<in> dom (finished s)
[PROOF STEP]
thus ?case
[PROOF STATE]
proof (prove)
using this:
(u, y_) \<in> tree_edges s \<union> cross_edges s
goal (1 subgoal):
1. \<phi> s y_ < \<phi> s u \<and> y_ \<in> dom (finished s)
[PROOF STEP]
by (metis Un_iff * cross_edges_finished_decr cross_edges_target_finished tree_edge_impl_parenthesis)
[PROOF STATE]
proof (state)
this:
\<phi> s y_ < \<phi> s u \<and> y_ \<in> dom (finished s)
goal (1 subgoal):
1. \<And>y z. \<lbrakk>(u, y) \<in> (tree_edges s \<union> cross_edges s)\<^sup>+; (y, z) \<in> tree_edges s \<union> cross_edges s; \<phi> s y < \<phi> s u \<and> y \<in> dom (finished s)\<rbrakk> \<Longrightarrow> \<phi> s z < \<phi> s u \<and> z \<in> dom (finished s)
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>y z. \<lbrakk>(u, y) \<in> (tree_edges s \<union> cross_edges s)\<^sup>+; (y, z) \<in> tree_edges s \<union> cross_edges s; \<phi> s y < \<phi> s u \<and> y \<in> dom (finished s)\<rbrakk> \<Longrightarrow> \<phi> s z < \<phi> s u \<and> z \<in> dom (finished s)
[PROOF STEP]
case (step v w)
[PROOF STATE]
proof (state)
this:
(u, v) \<in> (tree_edges s \<union> cross_edges s)\<^sup>+
(v, w) \<in> tree_edges s \<union> cross_edges s
\<phi> s v < \<phi> s u \<and> v \<in> dom (finished s)
goal (1 subgoal):
1. \<And>y z. \<lbrakk>(u, y) \<in> (tree_edges s \<union> cross_edges s)\<^sup>+; (y, z) \<in> tree_edges s \<union> cross_edges s; \<phi> s y < \<phi> s u \<and> y \<in> dom (finished s)\<rbrakk> \<Longrightarrow> \<phi> s z < \<phi> s u \<and> z \<in> dom (finished s)
[PROOF STEP]
hence "\<phi> s w < \<phi> s v \<and> w \<in> dom (finished s)"
[PROOF STATE]
proof (prove)
using this:
(u, v) \<in> (tree_edges s \<union> cross_edges s)\<^sup>+
(v, w) \<in> tree_edges s \<union> cross_edges s
\<phi> s v < \<phi> s u \<and> v \<in> dom (finished s)
goal (1 subgoal):
1. \<phi> s w < \<phi> s v \<and> w \<in> dom (finished s)
[PROOF STEP]
by (metis Un_iff cross_edges_finished_decr cross_edges_target_finished tree_edge_impl_parenthesis)
[PROOF STATE]
proof (state)
this:
\<phi> s w < \<phi> s v \<and> w \<in> dom (finished s)
goal (1 subgoal):
1. \<And>y z. \<lbrakk>(u, y) \<in> (tree_edges s \<union> cross_edges s)\<^sup>+; (y, z) \<in> tree_edges s \<union> cross_edges s; \<phi> s y < \<phi> s u \<and> y \<in> dom (finished s)\<rbrakk> \<Longrightarrow> \<phi> s z < \<phi> s u \<and> z \<in> dom (finished s)
[PROOF STEP]
with step
[PROOF STATE]
proof (chain)
picking this:
(u, v) \<in> (tree_edges s \<union> cross_edges s)\<^sup>+
(v, w) \<in> tree_edges s \<union> cross_edges s
\<phi> s v < \<phi> s u \<and> v \<in> dom (finished s)
\<phi> s w < \<phi> s v \<and> w \<in> dom (finished s)
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
(u, v) \<in> (tree_edges s \<union> cross_edges s)\<^sup>+
(v, w) \<in> tree_edges s \<union> cross_edges s
\<phi> s v < \<phi> s u \<and> v \<in> dom (finished s)
\<phi> s w < \<phi> s v \<and> w \<in> dom (finished s)
goal (1 subgoal):
1. \<phi> s w < \<phi> s u \<and> w \<in> dom (finished s)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
\<phi> s w < \<phi> s u \<and> w \<in> dom (finished s)
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
\<phi> s v < \<phi> s u \<and> v \<in> dom (finished s)
goal (1 subgoal):
1. \<not> acyclic (tree_edges s \<union> cross_edges s) \<Longrightarrow> False
[PROOF STEP]
}
[PROOF STATE]
proof (state)
this:
\<lbrakk>?u9 \<in> dom (finished s); (?u9, ?v9) \<in> (tree_edges s \<union> cross_edges s)\<^sup>+\<rbrakk> \<Longrightarrow> \<phi> s ?v9 < \<phi> s ?u9 \<and> ?v9 \<in> dom (finished s)
goal (1 subgoal):
1. \<not> acyclic (tree_edges s \<union> cross_edges s) \<Longrightarrow> False
[PROOF STEP]
note aux = this
[PROOF STATE]
proof (state)
this:
\<lbrakk>?u9 \<in> dom (finished s); (?u9, ?v9) \<in> (tree_edges s \<union> cross_edges s)\<^sup>+\<rbrakk> \<Longrightarrow> \<phi> s ?v9 < \<phi> s ?u9 \<and> ?v9 \<in> dom (finished s)
goal (1 subgoal):
1. \<not> acyclic (tree_edges s \<union> cross_edges s) \<Longrightarrow> False
[PROOF STEP]
assume "\<not> acyclic ?E"
[PROOF STATE]
proof (state)
this:
\<not> acyclic (tree_edges s \<union> cross_edges s)
goal (1 subgoal):
1. \<not> acyclic (tree_edges s \<union> cross_edges s) \<Longrightarrow> False
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
\<not> acyclic (tree_edges s \<union> cross_edges s)
[PROOF STEP]
obtain u where path: "(u,u) \<in> ?E\<^sup>+"
[PROOF STATE]
proof (prove)
using this:
\<not> acyclic (tree_edges s \<union> cross_edges s)
goal (1 subgoal):
1. (\<And>u. (u, u) \<in> (tree_edges s \<union> cross_edges s)\<^sup>+ \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by (auto simp add: acyclic_def)
[PROOF STATE]
proof (state)
this:
(u, u) \<in> (tree_edges s \<union> cross_edges s)\<^sup>+
goal (1 subgoal):
1. \<not> acyclic (tree_edges s \<union> cross_edges s) \<Longrightarrow> False
[PROOF STEP]
show False
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. False
[PROOF STEP]
proof cases
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. ?P \<Longrightarrow> False
2. \<not> ?P \<Longrightarrow> False
[PROOF STEP]
assume "u \<in> dom (finished s)"
[PROOF STATE]
proof (state)
this:
u \<in> dom (finished s)
goal (2 subgoals):
1. ?P \<Longrightarrow> False
2. \<not> ?P \<Longrightarrow> False
[PROOF STEP]
with aux path
[PROOF STATE]
proof (chain)
picking this:
\<lbrakk>?u9 \<in> dom (finished s); (?u9, ?v9) \<in> (tree_edges s \<union> cross_edges s)\<^sup>+\<rbrakk> \<Longrightarrow> \<phi> s ?v9 < \<phi> s ?u9 \<and> ?v9 \<in> dom (finished s)
(u, u) \<in> (tree_edges s \<union> cross_edges s)\<^sup>+
u \<in> dom (finished s)
[PROOF STEP]
show False
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>?u9 \<in> dom (finished s); (?u9, ?v9) \<in> (tree_edges s \<union> cross_edges s)\<^sup>+\<rbrakk> \<Longrightarrow> \<phi> s ?v9 < \<phi> s ?u9 \<and> ?v9 \<in> dom (finished s)
(u, u) \<in> (tree_edges s \<union> cross_edges s)\<^sup>+
u \<in> dom (finished s)
goal (1 subgoal):
1. False
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
False
goal (1 subgoal):
1. u \<notin> dom (finished s) \<Longrightarrow> False
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. u \<notin> dom (finished s) \<Longrightarrow> False
[PROOF STEP]
assume *: "u \<notin> dom (finished s)"
[PROOF STATE]
proof (state)
this:
u \<notin> dom (finished s)
goal (1 subgoal):
1. u \<notin> dom (finished s) \<Longrightarrow> False
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
u \<notin> dom (finished s)
goal (1 subgoal):
1. u \<notin> dom (finished s) \<Longrightarrow> False
[PROOF STEP]
from no_loop_in_tree
[PROOF STATE]
proof (chain)
picking this:
(?v, ?v) \<notin> (tree_edges s)\<^sup>+
[PROOF STEP]
have "(u,u) \<notin> (tree_edges s)\<^sup>+"
[PROOF STATE]
proof (prove)
using this:
(?v, ?v) \<notin> (tree_edges s)\<^sup>+
goal (1 subgoal):
1. (u, u) \<notin> (tree_edges s)\<^sup>+
[PROOF STEP]
.
[PROOF STATE]
proof (state)
this:
(u, u) \<notin> (tree_edges s)\<^sup>+
goal (1 subgoal):
1. u \<notin> dom (finished s) \<Longrightarrow> False
[PROOF STEP]
with trancl_union_outside[OF path]
[PROOF STATE]
proof (chain)
picking this:
(u, u) \<notin> (tree_edges s)\<^sup>+ \<Longrightarrow> \<exists>x y. (u, x) \<in> (tree_edges s \<union> cross_edges s)\<^sup>* \<and> (x, y) \<in> cross_edges s \<and> (y, u) \<in> (tree_edges s \<union> cross_edges s)\<^sup>*
(u, u) \<notin> (tree_edges s)\<^sup>+
[PROOF STEP]
obtain x y where "(u,x) \<in> ?E\<^sup>*" "(x,y) \<in> cross_edges s" "(y,u) \<in> ?E\<^sup>*"
[PROOF STATE]
proof (prove)
using this:
(u, u) \<notin> (tree_edges s)\<^sup>+ \<Longrightarrow> \<exists>x y. (u, x) \<in> (tree_edges s \<union> cross_edges s)\<^sup>* \<and> (x, y) \<in> cross_edges s \<and> (y, u) \<in> (tree_edges s \<union> cross_edges s)\<^sup>*
(u, u) \<notin> (tree_edges s)\<^sup>+
goal (1 subgoal):
1. (\<And>x y. \<lbrakk>(u, x) \<in> (tree_edges s \<union> cross_edges s)\<^sup>*; (x, y) \<in> cross_edges s; (y, u) \<in> (tree_edges s \<union> cross_edges s)\<^sup>*\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
(u, x) \<in> (tree_edges s \<union> cross_edges s)\<^sup>*
(x, y) \<in> cross_edges s
(y, u) \<in> (tree_edges s \<union> cross_edges s)\<^sup>*
goal (1 subgoal):
1. u \<notin> dom (finished s) \<Longrightarrow> False
[PROOF STEP]
with cross_edges_target_finished
[PROOF STATE]
proof (chain)
picking this:
(?u, ?v) \<in> cross_edges s \<Longrightarrow> ?v \<in> dom (finished s)
(u, x) \<in> (tree_edges s \<union> cross_edges s)\<^sup>*
(x, y) \<in> cross_edges s
(y, u) \<in> (tree_edges s \<union> cross_edges s)\<^sup>*
[PROOF STEP]
have "y \<in> dom (finished s)"
[PROOF STATE]
proof (prove)
using this:
(?u, ?v) \<in> cross_edges s \<Longrightarrow> ?v \<in> dom (finished s)
(u, x) \<in> (tree_edges s \<union> cross_edges s)\<^sup>*
(x, y) \<in> cross_edges s
(y, u) \<in> (tree_edges s \<union> cross_edges s)\<^sup>*
goal (1 subgoal):
1. y \<in> dom (finished s)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
y \<in> dom (finished s)
goal (1 subgoal):
1. u \<notin> dom (finished s) \<Longrightarrow> False
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
y \<in> dom (finished s)
goal (1 subgoal):
1. u \<notin> dom (finished s) \<Longrightarrow> False
[PROOF STEP]
with * \<open>(y,u) \<in> ?E\<^sup>*\<close>
[PROOF STATE]
proof (chain)
picking this:
u \<notin> dom (finished s)
(y, u) \<in> (tree_edges s \<union> cross_edges s)\<^sup>*
y \<in> dom (finished s)
[PROOF STEP]
have "(y,u) \<in> ?E\<^sup>+"
[PROOF STATE]
proof (prove)
using this:
u \<notin> dom (finished s)
(y, u) \<in> (tree_edges s \<union> cross_edges s)\<^sup>*
y \<in> dom (finished s)
goal (1 subgoal):
1. (y, u) \<in> (tree_edges s \<union> cross_edges s)\<^sup>+
[PROOF STEP]
by (auto simp add: rtrancl_eq_or_trancl)
[PROOF STATE]
proof (state)
this:
(y, u) \<in> (tree_edges s \<union> cross_edges s)\<^sup>+
goal (1 subgoal):
1. u \<notin> dom (finished s) \<Longrightarrow> False
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
u \<notin> dom (finished s)
y \<in> dom (finished s)
(y, u) \<in> (tree_edges s \<union> cross_edges s)\<^sup>+
[PROOF STEP]
show False
[PROOF STATE]
proof (prove)
using this:
u \<notin> dom (finished s)
y \<in> dom (finished s)
(y, u) \<in> (tree_edges s \<union> cross_edges s)\<^sup>+
goal (1 subgoal):
1. False
[PROOF STEP]
by (metis aux)
[PROOF STATE]
proof (state)
this:
False
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
False
goal:
No subgoals!
[PROOF STEP]
qed |
[STATEMENT]
lemma keysFor_insert_Hash [simp]: "keysFor (insert (Hash X) H) = keysFor H"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. keysFor (insert (Hash X) H) = keysFor H
[PROOF STEP]
by (unfold keysFor_def, auto) |
module Issue2486 where
open import Common.Prelude
open import Issue2486.Import
open import Issue2486.ImportB
open import Issue2486.HaskellB
f : MyList String → String
f [] = "sdf"
f (x :: _) = x
xs : MyList String
xs = "sdfg" :: []
postulate
toBList : ∀ {A} → MyList A → BList A
fromBList : ∀ {A} → BList A → MyList A
{-# COMPILE GHC toBList = \ _ xs -> xs #-}
{-# COMPILE GHC fromBList = \ _ xs -> xs #-}
{-# FOREIGN GHC import qualified MAlonzo.Code.Issue2486.HaskellB as B #-}
data Test : Set where
Con : BBool → Test
{-# COMPILE GHC Test = data B.Test ( B.Con ) #-}
{-
ff : BBool
ff = BTrue
-}
main : IO Unit
main =
putStrLn (f (fromBList (toBList xs)))
|
---
Project for the course in Microeconometrics | Summer 2020, M.Sc. Economics, Bonn University | Julia Wilhelm
# Replication of F. Barrera-Osorio, M. Bertrand, L. L. Linden, F. Perez-Calle (2011) <a class="tocSkip">
---
This notebook contains my replication of the results from the following paper:
> Barrera-Osorio, Felipe, Marianne Bertrand, Leigh L. Linden, and Francisco Perez-Calle (2011). "Improving the Design of Conditional Transfer Programs: Evidence from a Randomized Education Experiment in Colombia." American Economic Journal: Applied Economics, 3 (2): 167-95.
The original paper, as well as the data and code provided by the authors can be accessed [here](https://www.aeaweb.org/articles?id=10.1257/app.3.2.167).
<h1>Table of Contents<span class="tocSkip"></span></h1>
<div class="toc"><ul class="toc-item"><li><span><a href="#1.-Introduction" data-toc-modified-id="1.-Introduction-1">1. Introduction</a></span></li><li><span><a href="#2.-Identification" data-toc-modified-id="2.-Identification-2">2. Identification</a></span></li><li><span><a href="#3.-Empirical-Strategy" data-toc-modified-id="3.-Empirical-Strategy-3">3. Empirical Strategy</a></span></li><li><span><a href="#4.-Replication-of-Barrera-Osorio-et-al.-(2011)" data-toc-modified-id="4.-Replication-of-Barrera-Osorio-et-al.-(2011)-4">4. Replication of Barrera-Osorio et al. (2011)</a></span><ul class="toc-item"><li><span><a href="#4.1.-Data-&-Descriptive-Statistics" data-toc-modified-id="4.1.-Data-&-Descriptive-Statistics-4.1">4.1. Data & Descriptive Statistics</a></span></li><li><span><a href="#4.2.-Baseline-Comparison" data-toc-modified-id="4.2.-Baseline-Comparison-4.2">4.2. Baseline Comparison</a></span></li><li><span><a href="#4.3.-Results" data-toc-modified-id="4.3.-Results-4.3">4.3. Results</a></span><ul class="toc-item"><li><span><a href="#4.3.1.-Attendence" data-toc-modified-id="4.3.1.-Attendence-4.3.1">4.3.1. Attendence</a></span></li><li><span><a href="#4.3.2.-Re-enrollment" data-toc-modified-id="4.3.2.-Re-enrollment-4.3.4">4.3.2. Re-enrollment</a></span></li><li><span><a href="#4.3.3.-Survey-Based-Outcomes-Graduation-and-Tertiary-Enrollment" data-toc-modified-id="4.3.3.-Survey-Based-Outcomes-Graduation-and-Tertiary-Enrollment-4.3.3">4.3.3. Survey-Based Outcomes - Graduation and Tertiary Enrollment</a></span></li><li><span><a href="#4.3.4.-Siblings-Effects" data-toc-modified-id="4.3.4.-Siblings-Effects-4.3.4">4.3.4. Siblings Effects</a></span></li></ul></li></ul></li><li><span><a href="#5.-Critical-Assessment" data-toc-modified-id="5.-Critical-Assessment-5">5. Critical Assessment</a></span><li><span><a href="#6.-Extensions" data-toc-modified-id="6.-Extensions-7">6. Extensions</a></span><ul class="toc-item"> <li><span><a href="#6.1.-Check-for-Balanced-Groups-across-Experiments" data-toc-modified-id="6.1.-Check-for-Balanced-Groups-across-Experiments-6.1">6.1. Check for Balanced Groups across Experiments</a></span></li><li><span><a href="#6.2.-Elimination-of-Back-door-Paths-Controlling-only-for-Locality" data-toc-modified-id="6.2.-Elimination-of-Back-door-Paths-Controlling-only-for-Locality-6.2">6.2. Elimination of Back-door Paths Controlling only for Locality</a></span></ul><li><span><a href="#7.-Conclusion" data-toc-modified-id="7.-Conclusion-7">7. Conclusion</a></span></li><li><span><a href="#8.-References" data-toc-modified-id="8.-References-8">8. References</a></span></li></ul></div>
```python
import pandas as pd
import numpy as np
import pandas.io.formats.style
import statsmodels.api as sm_api
```
```python
from auxiliary.auxiliary_tables import *
```
---
# 1. Introduction
---
Barrera-Osorio et al. (2011) compare three education-based conditional cash transfer designs aimed at incentivizing academic participation. Using data from a pilot study in Bogota, Colombia they examine the effects of a bi-monthly transfer (basic treatment), a bi-monthly transfer combined with a lump-sum payment at the time students are supposed to re-enroll in school (savings treatment) and a bi-monthly transfer combined with a large payment upon graduation (tertiary treatment). The payments are conditional on school attendence of the child and designed to prevent dropout from secondary schools and to encourage matriculation at tertiary institutions. On the one hand, the savings and tertiary treatment impose more binding short-term liquidity constraints on participating families than the basic treatment. The authors examine whether this decreases monthly school attendence. On the other hand, these two treatments might provide stronger incentives for families to re-enroll their children at school or for graduation.
To estimate and compare the causal impact of the three treatments, Barrera-Osorio et al. (2011) apply a difference model to the data from Bogota. The Secretary of Education of the City implemented a pilot study running for one year, where they randomly allocated treatments to children in two localities. They randomize at child-level, generating variation within schools and families. This allows the authors to assess comparability of different groups. Barrera-Osorio et al. (2011) find that all designs significantly increase attendance and that the savings and tertiary treatments increase enrollment rates more strongly than the basic treatment. They conclude, that the structure of the intervention can help targeting resources.
In this notebook, I replicate the results presented in the paper by Barrera-Osorio et al. (2011). Additionally, I critically discuss the quality of the strategy and the results. My analysis supports the findings of Barrera-Osorio et al. (2011).
This notebook is structured as follows. In the next section, I present the identification strategy Barrera-Osorio et al. (2011) use to unravel the causal effects of the conditional cash transfers (section 2). Section 3 briefly discusses the empirical strategy the authors use for estimation. Section 4 shows my replication of the results of the paper, and section 5 is a critical discussion thereof. In section 6 I check the identification assumption across the two experiments and conduct regressions conditioning on one variable that blocks all back-door paths from the causal variable to the outcome variable to identify causal effects. Section 7 presents some conclusions.
---
# 2. Identification
---
Barrera-Osorio et al. (2011) aim to answer the question on how three different education-based conditional cash transfer designs perform in preventing dropout from secondary schools and encouraging matriculation at tertiary institutions.
The different treatments were implemented in two localities in Bogota, San Cristobal and Suba. Eligible children in San Cristobal were randomly assigned between a control group, the basic treatment (bi-monthly transfer) and the savings treatment (bi-monthly transfer combined with a lump-sum payment at the time students are supposed to re-enroll in school). In Suba eligible children were randomly assigned to the tertiary treatment (bi-monthly transfer combined with a large payment upon graduation) and a control group. Since it is impossible to observe treatment effects at the individual level, researchers thus estimate average effects using treatment and control groups. For each individual $i$ we can image a potential outcome where they are treated $Y_i(1)$ and where they are not $Y_i(0)$, but we can never simultaneously observe both outcomes for each individual. The random treatment assignment allows the authors to estimate the causal effects of the three treatments using experiments. While they can compare the effect of the basic and savings treatment directly, comparing those with the tertiary treatment they cannot rely on purely random variation. This is because the tertiary treatment was implemented in another locality and therefore, the comparison to the tertiary treatment occurs across experiments.
Since treatments were assigned randomly within the two localities, potential outcomes are independent of the treatment indicator $D$ and the selection bias is eliminated. The naive estimate which simply compares the observed average outcome of the treatment and control groups, then equals the true average treatment effect:
\begin{align*}
E[Y\mid D = 1] - E[Y\mid D = 0] & = E[Y^1\mid D = 1] - E[Y^0\mid D = 0] \\
& =E[Y^1\mid D = 1] - E[Y^0\mid D = 1] + E[Y^0\mid D = 1] - E[Y^0\mid D = 0] \\
& = \underbrace{E[Y^1 - Y^0\mid D = 1]}_{ATT} + \underbrace{E[Y^0\mid D= 1]- E[Y^0 \mid D = 0]}_{\text{Selection bias}} \\
& =E[Y^1 - Y^0\mid D = 1] \\
& =E[Y^1 - Y^0\mid D = 0] \\
& =E[Y^1 - Y^0]
\end{align*}
The authors here rely on the following two assumptions:
\begin{align*}
E[Y^1\mid D = 1] = E[Y^1\mid D = 0] \\
E[Y^0\mid D = 1] = E[Y^0\mid D = 0] \\
\end{align*}
The causal graphs below illustrate the relationship between the treatments $D_B$, $D_S$, $D_T$ and outcome $Y$ in the two localities. Additionally there may be observables $W$ and unobservables $U$ also affecting $Y$. Due to random treatment assignment within the two localities, treatment is independent of $W$ and $U$ and there is no back-door path which has to be eliminated.
**San Cristobal:**
$D_B$: Basic treatment
$D_S$: Savings treatment
$Y$: Students outcome
$U$: Unobservables
$W$: Observables
**Suba:**
$D_T$: Tertiary treatment
$Y$: Students outcome
$U$: Unobservables
$W$: Observables
The identification assumption to eliminate causal effects is, that randomization is successful within the localities. Barrera-Osorio et al. (2011) account for this checking whether treatment assignment created balanced treatment and control groups using household- and individual-level characteristics. These information were collected prior to the randomization, which suggests that students in each group should, on average, have similar characteristics. The authors make 60 comparisons and find 7 differences that are statistically significant at the 10 percent level, 5 at the 5 percent level and 2 at the 1 percent level. They conclude that randomization of the treatment assignment is successful.
In order to compare the effects of all three treatments the authors consider the experiments in the two localities together. The causal graph has three back-door paths which have to be eliminated. Treatment assignment is not completely random, since it is not random whether a person lives in Suba or San Cristobal. Observable or unobservable factors may affect treatment assignment and the outcome at the same time. The causal graph the authors use looks as follows:
$D_B$: Basic treatment
$D_S$: Savings treatment
$D_T$: Tertiary treatment
$L$: Dummy for locality of household (Suba or San Cristobal)
$Y$: Students outcome
$U$: Unobservables
$W$: Observables
In order to eliminate the back-door paths Barrera-Osorio et al. (2011) control for the locality of the householods and a large set of observable demographic characteristics. Nevertheless, the authors mention that differences between the tertiary treatment and the other treatments could be due to unobserved heterogeneity in treatment effects. I will refer to this problem later again.
---
# 3. Empirical Strategy
---
Barrera-Osorio et al. (2011) examine the impact of the basic, the savings and the tertiary treatment on student outcome. They use a simple difference model that compares different subsets of the sample without controlling for covariates.
For the basic-savings experiment in San Cristobal the specification takes the following form:
\begin{equation}
y_{ij} = \beta_0 + \beta_B Basic_i + \beta_S Savings_i + \epsilon_{ij}
\end{equation}
For the tertiary experiment in Suba the specification takes the following form:
\begin{equation}
y_{ij} = \beta_0 + \beta_T Tertiary_i + \epsilon_{ij}
\end{equation}
* $y_{ij}$ denotes a particular outcome for child $i$ in school $j$,
* $Basic_i$, $Savings_i$ and $Tertiary_i$ are indicator variables for whether or not the child is in the respective treatment group,
* $\epsilon_{ij}$ is the error term, which is allowed to vary up to the school level.
The authors additionally use a difference estimator that controls for socio-demographic and school characteristics.
For the basic-savings experiment the model is specified as follows:
\begin{equation}
y_{ij} = \beta_0 + \beta_B Basic_i + \beta_S Savings_i + \delta X_{ijk} + \phi_{j} + \epsilon_{ij}
\end{equation}
For the tertiary treatment the model is specified as follows:
\begin{equation}
y_{ij} = \beta_0 + \beta_T Tertiary_i + \delta X_{ijk} + \phi_{j} + \epsilon_{ij}
\end{equation}
The variables are defined as before. Additionally,
* $X_{ijk}$ is a vector of socio-demographic controls for child $i$ in school $j$ and family $k$,
* $\phi_{j}$ are school fixed effects.
---
# 4. Replication of Barrera-Osorio et al. (2011)
---
## 4.1. Data & Descriptive Statistics
Barrera-Osorio et al. (2011) restricted their sample of students spread across 251 schools to the 68 school with the largest number of registered students. Additionally, they filter the data by those students who completed a baseline survey they conducted. For the tertiary experiment they drop students in grade 6-8 since those were not eligible for the program.
```python
data = pd.read_stata('data/Public_Data_AEJApp_2010-0132.dta')
data.index.name = "individual"
data['grade_group'] = 'Grades 6-8'
data.loc[data['grade'] > 8, 'grade_group'] = 'Grades 9-10'
data.loc[data['grade'] > 10, 'grade_group'] = 'Grade 11'
data['group'] = 'Control'
data.loc[data['T1_treat'] == 1, 'group'] = 'Basic'
data.loc[data['T2_treat'] == 1, 'group'] = 'Savings'
data.loc[data['T3_treat'] == 1, 'group'] = 'Tertiary'
sample = data.drop(data[(data.suba == 1) & (data.grade < 9)].index)
sample['s_teneviv_int'] = sample['s_teneviv'].cat.codes + 1
sample['s_sexo_int'] = sample['s_sexo'].cat.codes
sample['s_estcivil_int'] = sample['s_estcivil'].cat.codes + 1
sample = sample.join(pd.get_dummies(sample['school_code']))
sample = sample.join(pd.get_dummies(sample['s_teneviv']))
sample = sample.join(pd.get_dummies(sample['s_estcivil']))
sample = sample.join(pd.get_dummies(sample['grade'], prefix='grade'))
sample = sample.join(pd.get_dummies(sample['s_estrato'], prefix='estrato'))
sample_baselinesurvey = sample.drop(sample[sample.bl_observed == 0].index)
```
Table 1 summarizes the distribution of children by grade, gender and experimental group. They end up with a sample of 7158 students.
#### Table 1- Distribution of Subjects by Research Groups
```python
create_table1(sample_baselinesurvey)
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead tr th {
text-align: left;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr>
<th>Experiment</th>
<th colspan="3" halign="left">Basic-Savings</th>
<th colspan="2" halign="left">Tertiary</th>
<th>Total</th>
</tr>
<tr>
<th>Group</th>
<th>Control</th>
<th>Basic</th>
<th>Savings</th>
<th>Control</th>
<th>Tertiary</th>
<th></th>
</tr>
</thead>
<tbody>
<tr>
<th>Grades 6-8</th>
<td>1189</td>
<td>1215</td>
<td>1166</td>
<td>0</td>
<td>0</td>
<td>3570</td>
</tr>
<tr>
<th>Grades 9-10</th>
<td>643</td>
<td>633</td>
<td>586</td>
<td>449</td>
<td>425</td>
<td>2736</td>
</tr>
<tr>
<th>Grade 11</th>
<td>179</td>
<td>188</td>
<td>177</td>
<td>160</td>
<td>148</td>
<td>852</td>
</tr>
<tr>
<th>Female</th>
<td>1047</td>
<td>1022</td>
<td>1000</td>
<td>361</td>
<td>336</td>
<td>3766</td>
</tr>
<tr>
<th>Male</th>
<td>964</td>
<td>1014</td>
<td>929</td>
<td>248</td>
<td>237</td>
<td>3392</td>
</tr>
<tr>
<th>Total</th>
<td>2011</td>
<td>2036</td>
<td>1929</td>
<td>609</td>
<td>573</td>
<td>7158</td>
</tr>
</tbody>
</table>
</div>
## 4.2. Baseline Comparison
Barrera-Osorio et al. (2011) check if the randomization was successful and created balanced research groups. To do so, they compare characteristics of students between research groups. Table 2 shows the averages of 15 different variables for the control groups in the basic-savings experiment (B-S) and the tertiary experiment (T). The 4 other columns show 60 comparisons between the treatment and control groups. The standard errors are in the row below each difference, labeled with "SE". 7 differences are statistically significant at the 10 percent level, 5 at the 5 percent level, and 2 at the 1 percent level. Since the number of statistically significant differences is low, one can conclude that treatment is assigned randomly within the localities, which supports the identification assumption of their strategy.
#### Table 2- Comparison of Students between Research Groups
```python
sancristobal = sample.drop(sample[sample.suba == 1].index)
suba = sample.drop(sample[sample.suba == 0].index)
create_table2(sancristobal, suba)
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>Control average B-S</th>
<th>Basic-Control</th>
<th>Savings-Control</th>
<th>Basic-Savings</th>
<th>Control average T</th>
<th>Tertiary-Control</th>
</tr>
</thead>
<tbody>
<tr>
<th>Possessions</th>
<td>1.90</td>
<td>0.07</td>
<td>0.04</td>
<td>0.03</td>
<td>1.94</td>
<td>-0.05</td>
</tr>
<tr>
<th>Possessions SE</th>
<td>1.1</td>
<td>0.02</td>
<td>0.02</td>
<td>0.02</td>
<td>1.02</td>
<td>0.04</td>
</tr>
<tr>
<th>Utilities</th>
<td>4.65</td>
<td>-0.02</td>
<td>0.06</td>
<td>-0.08</td>
<td>4.85</td>
<td>0.04</td>
</tr>
<tr>
<th>Utilities SE</th>
<td>1.42</td>
<td>0.03</td>
<td>0.03</td>
<td>0.03</td>
<td>1.32</td>
<td>0.04</td>
</tr>
<tr>
<th>Durable Goods</th>
<td>1.37</td>
<td>-0.02</td>
<td>0.01</td>
<td>-0.03</td>
<td>1.63</td>
<td>0.02</td>
</tr>
<tr>
<th>Durable Goods SE</th>
<td>0.89</td>
<td>0.02</td>
<td>0.02</td>
<td>0.02</td>
<td>0.86</td>
<td>0.03</td>
</tr>
<tr>
<th>Physical Infrastructure</th>
<td>11.65</td>
<td>-0.05</td>
<td>0.04</td>
<td>-0.09</td>
<td>12.14</td>
<td>-0.05</td>
</tr>
<tr>
<th>Physical Infrastructure SE</th>
<td>1.75</td>
<td>0.03</td>
<td>0.03</td>
<td>0.04</td>
<td>1.49</td>
<td>0.06</td>
</tr>
<tr>
<th>Age</th>
<td>14.38</td>
<td>0.09</td>
<td>-0.06</td>
<td>0.16</td>
<td>15.67</td>
<td>-0.06</td>
</tr>
<tr>
<th>Age SE</th>
<td>5.3</td>
<td>0.10</td>
<td>0.14</td>
<td>0.17</td>
<td>4.23</td>
<td>0.19</td>
</tr>
<tr>
<th>Gender</th>
<td>0.50</td>
<td>0.00</td>
<td>-0.00</td>
<td>0.01</td>
<td>0.45</td>
<td>-0.01</td>
</tr>
<tr>
<th>Gender SE</th>
<td>0.5</td>
<td>0.01</td>
<td>0.01</td>
<td>0.01</td>
<td>0.5</td>
<td>0.02</td>
</tr>
<tr>
<th>Years of Education</th>
<td>5.61</td>
<td>-0.02</td>
<td>-0.01</td>
<td>-0.02</td>
<td>7.43</td>
<td>-0.05</td>
</tr>
<tr>
<th>Years of Education SE</th>
<td>1.86</td>
<td>0.04</td>
<td>0.05</td>
<td>0.04</td>
<td>1.34</td>
<td>0.05</td>
</tr>
<tr>
<th>Single Head</th>
<td>0.30</td>
<td>0.02</td>
<td>0.01</td>
<td>0.01</td>
<td>0.27</td>
<td>0.01</td>
</tr>
<tr>
<th>Single Head SE</th>
<td>0.46</td>
<td>0.01</td>
<td>0.01</td>
<td>0.01</td>
<td>0.44</td>
<td>0.02</td>
</tr>
<tr>
<th>Age of Head</th>
<td>45.92</td>
<td>-0.07</td>
<td>0.11</td>
<td>-0.19</td>
<td>46.21</td>
<td>0.21</td>
</tr>
<tr>
<th>Age of Head SE</th>
<td>10.27</td>
<td>0.17</td>
<td>0.23</td>
<td>0.21</td>
<td>8.59</td>
<td>0.30</td>
</tr>
<tr>
<th>Years of ed., head</th>
<td>5.65</td>
<td>-0.11</td>
<td>-0.18</td>
<td>0.07</td>
<td>5.94</td>
<td>-0.13</td>
</tr>
<tr>
<th>Years of ed., head SE</th>
<td>2.94</td>
<td>0.08</td>
<td>0.07</td>
<td>0.07</td>
<td>2.94</td>
<td>0.09</td>
</tr>
<tr>
<th>People in Household</th>
<td>5.42</td>
<td>-0.04</td>
<td>-0.03</td>
<td>-0.02</td>
<td>5.16</td>
<td>-0.01</td>
</tr>
<tr>
<th>People in Household SE</th>
<td>2.01</td>
<td>0.05</td>
<td>0.05</td>
<td>0.04</td>
<td>1.78</td>
<td>0.07</td>
</tr>
<tr>
<th>Member under 18</th>
<td>2.57</td>
<td>0.03</td>
<td>0.01</td>
<td>0.01</td>
<td>2.31</td>
<td>0.05</td>
</tr>
<tr>
<th>Member under 18 SE</th>
<td>1.35</td>
<td>0.03</td>
<td>0.03</td>
<td>0.03</td>
<td>1.2</td>
<td>0.06</td>
</tr>
<tr>
<th>Estrato</th>
<td>1.44</td>
<td>-0.01</td>
<td>0.02</td>
<td>-0.03</td>
<td>1.63</td>
<td>-0.01</td>
</tr>
<tr>
<th>Estrato SE</th>
<td>0.83</td>
<td>0.02</td>
<td>0.02</td>
<td>0.02</td>
<td>0.77</td>
<td>0.03</td>
</tr>
<tr>
<th>SISBEN score</th>
<td>11.76</td>
<td>-0.11</td>
<td>-0.02</td>
<td>-0.10</td>
<td>13.44</td>
<td>0.03</td>
</tr>
<tr>
<th>SISBEN score SE</th>
<td>4.64</td>
<td>0.08</td>
<td>0.11</td>
<td>0.10</td>
<td>4.33</td>
<td>0.18</td>
</tr>
<tr>
<th>Household income (1,000 pesos)</th>
<td>366.70</td>
<td>-4.73</td>
<td>-0.35</td>
<td>-4.37</td>
<td>402.03</td>
<td>3.18</td>
</tr>
<tr>
<th>Household income (1,000 pesos) SE</th>
<td>241.01</td>
<td>5.77</td>
<td>6.17</td>
<td>6.64</td>
<td>235.44</td>
<td>7.67</td>
</tr>
</tbody>
</table>
</div>
## 4.3. Results
### 4.3.1 Attendence
First, the authors analyse the effects of the conditional cash transfers on the school attendence rate. They include only individuals who are enrolled in one of the 68 schools selected for surveying. They also restrict the sample to students in grades 6-10, because they exclude students who are in grade 11 for the enrollment effect estimations, since they should graduate rather than re-enroll. In order to replicate their results I run simple regressions of the school attendence rate on the treatment variable, once without control variables, once with demographic controls and once with demographic controls and school fixed effects. The first three columns of table 3 show the results for the basic-savings experiment in Sancristobal, while columns 4 to 6 show the results for the tertiary experiment in Suba. The last column shows results of a regression containing all three treatments, demographic controls and school fixed effects. The estimated treatment effects and their standard errors ("SE") are provided in rows 1-6. The test statistics from comparisons of the relative treatment effects and their p-values are in rows 7-10.
#### Table 3 - Effects on monitored school attendence rates
```python
sancristobal = sample.drop(sample[sample.suba == 1].index)
suba = sample.drop(sample[sample.suba == 0].index)
sancristobal = sancristobal.drop(sancristobal[(sancristobal.survey_selected == 0) | (sancristobal.grade == 11)].index)
suba = suba.drop(suba[(suba.survey_selected == 0) | (suba.grade == 11) | (suba.grade < 9)].index)
sample_survey = sample.drop(sample[(sample.survey_selected == 0) | (sample.grade == 11) | (sample.grade < 9)].index)
create_table34(sancristobal, suba, sample_survey, 'at_msamean')
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>Basic-Savings</th>
<th>Basic-Savings with demographics</th>
<th>Basic-Savings with demographics and school fixed effects</th>
<th>Tertiary</th>
<th>Tertiary with demographics</th>
<th>Tertiary with demographics and school fixed effects</th>
<th>Both</th>
</tr>
</thead>
<tbody>
<tr>
<th>Basic treatment</th>
<td>0.033</td>
<td>0.032</td>
<td>0.032</td>
<td></td>
<td></td>
<td></td>
<td>0.025</td>
</tr>
<tr>
<th>Basic treatment SE</th>
<td>0.007</td>
<td>0.008</td>
<td>0.007</td>
<td></td>
<td></td>
<td></td>
<td>0.01</td>
</tr>
<tr>
<th>Savings treatment</th>
<td>0.029</td>
<td>0.027</td>
<td>0.027</td>
<td></td>
<td></td>
<td></td>
<td>0.028</td>
</tr>
<tr>
<th>Savings treatment SE</th>
<td>0.008</td>
<td>0.008</td>
<td>0.007</td>
<td></td>
<td></td>
<td></td>
<td>0.012</td>
</tr>
<tr>
<th>Tertiary treatment</th>
<td></td>
<td></td>
<td></td>
<td>0.052</td>
<td>0.054</td>
<td>0.056</td>
<td>0.055</td>
</tr>
<tr>
<th>Tertiary treatment SE</th>
<td></td>
<td></td>
<td></td>
<td>0.018</td>
<td>0.016</td>
<td>0.02</td>
<td>0.02</td>
</tr>
<tr>
<th>H0: Basic-Savings F-Stat</th>
<td>0.312</td>
<td>0.404</td>
<td>0.481</td>
<td></td>
<td></td>
<td></td>
<td>0.053</td>
</tr>
<tr>
<th>p-value</th>
<td>0.581</td>
<td>0.53</td>
<td>0.494</td>
<td></td>
<td></td>
<td></td>
<td>0.819</td>
</tr>
<tr>
<th>H0: Tertiary-Basic F-Stat</th>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td>1.863</td>
</tr>
<tr>
<th>p-value</th>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td>0.18</td>
</tr>
<tr>
<th></th>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<th>Observations</th>
<td>5799</td>
<td>5799</td>
<td>5799</td>
<td>930</td>
<td>930</td>
<td>930</td>
<td>2937</td>
</tr>
<tr>
<th>R squared</th>
<td>0.003</td>
<td>0.037</td>
<td>0.089</td>
<td>0.008</td>
<td>0.058</td>
<td>0.269</td>
<td>0.134</td>
</tr>
</tbody>
</table>
</div>
The table shows:
- Basic treatment increases attendence by 3.3 percentage points (significant at the one percent level)
- Savings treatent increases attendence by 2.9 percentage points (significant at the one percent level)
- Tertiary treatment increases attendence by 5.2 percentage points (significant at the one percent level)
- no evidence that the treatments have different effects
My results from the regressions and difference tests are the same as those Barrera-Osorio et al. (2011) estimate in their paper. One can conclude, that although the savings and tertiary treatment impose more binding short-term liquidity constraints on families than the basic treatment, there is no evidence of this hurting monthly attendence.
### 4.3.2 Re-enrollment
Second, the authors analyse the effects of the conditional cash transfers on re-enrollment. Table 4 is designed as table 3, running regressions on the observed re-enrollment rate.
#### Table 4 - Effects on administrative enrollment in following year
```python
sancristobal = sample.drop(sample[(sample.suba == 1) | (sample.grade == 11)].index)
suba = sample.drop(sample[(sample.suba == 0) | (sample.grade == 11) | (sample.grade < 9)].index)
sancristobal = sancristobal[sancristobal['m_enrolled'].notna()]
suba = suba[suba['m_enrolled'].notna()]
sample_grade = sample.drop(sample[(sample.grade == 11) | (sample.grade < 9)].index)
sample_grade = sample_grade[sample_grade['m_enrolled'].notna()]
create_table34(sancristobal, suba, sample_grade, 'm_enrolled')
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>Basic-Savings</th>
<th>Basic-Savings with demographics</th>
<th>Basic-Savings with demographics and school fixed effects</th>
<th>Tertiary</th>
<th>Tertiary with demographics</th>
<th>Tertiary with demographics and school fixed effects</th>
<th>Both</th>
</tr>
</thead>
<tbody>
<tr>
<th>Basic treatment</th>
<td>0.017</td>
<td>0.016</td>
<td>0.011</td>
<td></td>
<td></td>
<td></td>
<td>0</td>
</tr>
<tr>
<th>Basic treatment SE</th>
<td>0.009</td>
<td>0.008</td>
<td>0.01</td>
<td></td>
<td></td>
<td></td>
<td>0.016</td>
</tr>
<tr>
<th>Savings treatment</th>
<td>0.045</td>
<td>0.046</td>
<td>0.04</td>
<td></td>
<td></td>
<td></td>
<td>0.03</td>
</tr>
<tr>
<th>Savings treatment SE</th>
<td>0.016</td>
<td>0.015</td>
<td>0.011</td>
<td></td>
<td></td>
<td></td>
<td>0.017</td>
</tr>
<tr>
<th>Tertiary treatment</th>
<td></td>
<td></td>
<td></td>
<td>0.042</td>
<td>0.039</td>
<td>0.037</td>
<td>0.042</td>
</tr>
<tr>
<th>Tertiary treatment SE</th>
<td></td>
<td></td>
<td></td>
<td>0.022</td>
<td>0.021</td>
<td>0.02</td>
<td>0.019</td>
</tr>
<tr>
<th>H0: Basic-Savings F-Stat</th>
<td>3.99</td>
<td>3.941</td>
<td>5.519</td>
<td></td>
<td></td>
<td></td>
<td>2.271</td>
</tr>
<tr>
<th>p-value</th>
<td>0.048</td>
<td>0.049</td>
<td>0.02</td>
<td></td>
<td></td>
<td></td>
<td>0.133</td>
</tr>
<tr>
<th>H0: Tertiary-Basic F-Stat</th>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td>2.228</td>
</tr>
<tr>
<th>p-value</th>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td>0.137</td>
</tr>
<tr>
<th></th>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<th>Observations</th>
<td>8980</td>
<td>8980</td>
<td>8980</td>
<td>1735</td>
<td>1735</td>
<td>1735</td>
<td>4775</td>
</tr>
<tr>
<th>R squared</th>
<td>0.002</td>
<td>0.047</td>
<td>0.172</td>
<td>0.002</td>
<td>0.079</td>
<td>0.259</td>
<td>0.208</td>
</tr>
</tbody>
</table>
</div>
The table shows:
- Basic treatment increases re-enrollment by 1.7 percentage points (significant at the 10 percent level)
- Savings treatent increases re-enrollment by 4.5 percentage points (significant at the one percent level)
- Tertiary treatment increases re-enrollment by 3.6 percentage points (significant at the 10 percent level)
- difference in magnitude of the basic and savings treatment effects is statistically significant at the 5 percent level
- no evidence that the tertiary and the basic treatment effects are different
Again, my results from the regressions are the same as those Barrera-Osorio et al. (2011) estimate in their paper. One can conclude, that the savings treatment, which consists of a bi-monthly transfer combined with a lump-sum payment at the time students are supposed to re-enroll in school, is more effective in increasing re-enrollment than the basic treatment.
### 4.3.3 Survey-Based Outcomes - Graduation and Tertiary Enrollment
Barrera-Osorio et al. (2011) use data from a follow-up survey which was conducted after the treatments were implemented to analyse the effects of each treatment on self-reported graduation and tertiary enrollment for students who were in grade 11. The sample is restricted to those students, for which the follow-up survey data is provided. In order to replicate the results from Barrera-Osorio et al. (2011), I run regressions of the binary variable for graduation and of the binary variable for tertiary enrollment on the treatment variables with demographic controls and school fixed effects. The first three columns of table 3 show the effects on graduation in the sample of Sancristobal, the sample in Suba and both together. Columns 4 to 6 show the effects on tertiary enrollment, again, in the sample of Sancristobal, the sample in Suba and both together. The estimated treatment effects and their standard errors ("SE") are provided in rows 1-6 and the test statistics from comparisons of the relative treatment effects and their p-values are in rows 7-10.
#### Table 5 - Effects graduation and tertiary enrollment for students in grade 11
```python
create_table5(sample)
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>Graduation Basic-Savings</th>
<th>Graduation Tertiary</th>
<th>Graduation Both</th>
<th>Tertiary enrollment Basic-Savings</th>
<th>Tertiary enrollment Tertiary</th>
<th>Tertiary enrollment Both</th>
</tr>
</thead>
<tbody>
<tr>
<th>Basic treatment</th>
<td>0.039</td>
<td></td>
<td>0.036</td>
<td>0.043</td>
<td></td>
<td>0.048</td>
</tr>
<tr>
<th>Basic treatment SE</th>
<td>0.042</td>
<td></td>
<td>0.042</td>
<td>0.036</td>
<td></td>
<td>0.033</td>
</tr>
<tr>
<th>Savings treatment</th>
<td>0.04</td>
<td></td>
<td>0.039</td>
<td>0.094</td>
<td></td>
<td>0.094</td>
</tr>
<tr>
<th>Savings treatment SE</th>
<td>0.033</td>
<td></td>
<td>0.03</td>
<td>0.034</td>
<td></td>
<td>0.033</td>
</tr>
<tr>
<th>Tertiary treatment</th>
<td></td>
<td>0.047</td>
<td>0.044</td>
<td></td>
<td>0.489</td>
<td>0.487</td>
</tr>
<tr>
<th>Tertiary treatment SE</th>
<td></td>
<td>0.037</td>
<td>0.031</td>
<td></td>
<td>0.04</td>
<td>0.041</td>
</tr>
<tr>
<th>H0: Basic-Savings F-Stat</th>
<td>0</td>
<td></td>
<td>0.006</td>
<td>1.769</td>
<td></td>
<td>1.542</td>
</tr>
<tr>
<th>p-value</th>
<td>0.991</td>
<td></td>
<td>0.94</td>
<td>0.199</td>
<td></td>
<td>0.223</td>
</tr>
<tr>
<th>H0: Tertiary-Basic F-Stat</th>
<td></td>
<td></td>
<td>0.022</td>
<td></td>
<td></td>
<td>75.558</td>
</tr>
<tr>
<th>p-value</th>
<td></td>
<td></td>
<td>0.882</td>
<td></td>
<td></td>
<td>0</td>
</tr>
<tr>
<th></th>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<th>Observations</th>
<td>529</td>
<td>297</td>
<td>826</td>
<td>513</td>
<td>290</td>
<td>803</td>
</tr>
<tr>
<th>R squared</th>
<td>0.103</td>
<td>0.15</td>
<td>0.098</td>
<td>0.085</td>
<td>0.373</td>
<td>0.208</td>
</tr>
</tbody>
</table>
</div>
The table shows:
- None of the estimated effects on graduation are statistically significant
- Savings treatment increases tertiary enrollment by 9.4 percentage points (statistically significant at the 5 percent level)
- Tertiary treatment increases tertiary enrollment by 48.9 percentage points (statistically significant at the 1 percent level)
- The effect for the tertiary treatment is statistically significantly different from the effect of the basic treatment (p-value < 0.0001)
These estimates are of the same size as those provided by Barrera-Osorio et al. (2011). They mention that the tertiary enrollment findings might not be credible due to the extremely large estimate for the tertiary treatment group. One explanation for this result could be that students in the tertiary treatment group are lying in the follow-up survey about being enrolled in tertiary institutions. However, self-reported graduation rates seem to match the estimates based on the administrative enrollment data and verified attendence data.
### 4.3.4 Siblings Effects
Barrera-Osorio et al. (2011) analyse the effect of the treatments on school attendence and re-enrollment for siblings. They use the intra-familiy variation in treatment assignment to provide a reduced form test of whether receiving the transfer changes the allocation of opportunities within the household. In order to extract causal effects they restrict their sample to the subset of siblings that were also registered, since otherwise systematic differences between those families who registered one and those who registered two children might bias the results. They focus on families who registered two children and only choose those for which administrative enrollment data is available. They pool the treatments due to the small sample size. Columns 1 and 2 of Table 6 contain comparisons of untreated children with and without treated siblings. Columns 3 and 4 show comparisons only for girls and columns 5 and 6 for boys.
#### Table 6 - Effects of treatment on siblings using monitored and administrative participation, households with two registered children
```python
create_table6(data)
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>Attendence</th>
<th>Enrollment</th>
<th>Attendence Female</th>
<th>Enrollment Female</th>
<th>Attendence Male</th>
<th>Enrollment Male</th>
</tr>
</thead>
<tbody>
<tr>
<th>Sibling is treated?</th>
<td>-0.03</td>
<td>-0.071</td>
<td>-0.053</td>
<td>-0.114</td>
<td>-0.029</td>
<td>-0.04</td>
</tr>
<tr>
<th>Sibling is treated? SE</th>
<td>0.015</td>
<td>0.026</td>
<td>0.021</td>
<td>0.053</td>
<td>0.032</td>
<td>0.04</td>
</tr>
<tr>
<th></th>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<th>Observations</th>
<td>690</td>
<td>668</td>
<td>352</td>
<td>340</td>
<td>338</td>
<td>328</td>
</tr>
<tr>
<th>R squared</th>
<td>0.278</td>
<td>0.137</td>
<td>0.332</td>
<td>0.23</td>
<td>0.383</td>
<td>0.234</td>
</tr>
</tbody>
</table>
</div>
The table shows:
- Attendence of untreated children with treated siblings is 3 percentage points lower compared to untreated children whose siblings are also untreated (statistically significant at the 10 percent level)
- Enrollment of untreated children with treated siblings is 7.1 percentage points lower compared to untreated children whose siblings are also untreated (statistically significant at the 1 percent level)
- The effect is qualitatively similar for both genders
- The effect is stronger for girls, statistically significant at the 5 percent level for attendence and at the 10 percent level for enrollment
The results in columns 1, 2, 3 and 5 are the same as those provided by Barrera-Osorio et al. (2011). The estimates of the effect on enrollment of female and male are slightly different to those provided in the paper. However, qualitatively the effects are the same. The table suggest that the additional household resources generated by the program are not used to invest in the education of the untreated children. Instead, families with a treated child seem to take educational input away from the untreated child. The authors conclude, that eligibility rules that cut across children may increase inequality in the educational attainment within the household.
---
# 5. Critical Assessment
---
The following section discusses the quality of the strategy and estimations provided by Barrera-Osorio et al. (2011).
First, one strength of their analysis is, that they use administrative data for attendence and enrollment. This implies a high quality of data, since it is not systematically affected by individuals lying or misreporting as survey data might be.
Second, they check whether treatments are assigned randomly within the localities. Comparing characteristics between control and treatment groups they conclude that they are balanced. Furthermore, the fact that regression estimates do not differ strongly when demographics and school fixed effects are included, implies, that randomization was successful.
Third, in order to rule out spillover effects of the treatment between children through peer networks, Barrera-Osorio et al. (2011) check that children in the treatment and the control group have similar networks. Therefore, any indirect treatment effect would be equally distributed across the groups.
Fourth, the authors make sure that there is no self selection into treatment. Only children from families, who lived in the localities prior to 2004 were eligible to register for the program. This rules out that families moved to take advantage of the treatments.
Fifth, they mention that the SISBEN data, which they use for background characteristics, may underestimate assets and income, since the surveyed families knew that they were surveyed for the purpose of scoring them on a poverty index. However, the bias due to this hawthorne effect is not correlated with the differences investigated in the paper given the timing and purpose of the survey.
Sixth, Barrera-Osorio et al. (2011) check whether their baseline and follow-up survey induce a bias due to attrition. They conclude that attrition occurs similar across treatment and control groups, which implies that the results should not be biased.
One problem regarding their analysis is, that the tertiary treatment ends up being more generous than the basic and savings treatment. This makes it difficult to compare the treatments and the effects of the tertiary treatment might be biased upwards compared to the effect of the other two treatments.
Next, they cannot rely on random treatment assignment for comparisons between the treatments in the two experiments. This might bias the result if there are systematic differences of characteristics which at the same time affect treatment assignment and the outcome variable. For example, the fact that the difference in magnitude of the basic and the savings treatment effects on re-enrollment is statistically significant in the regression, where I only consider the experiment in San Cristobal, and not statistically significant in the regression, where I include both locations, indicates this.
Another problem is, that the administrative enrollment data could not be matched to 9.3% of the students in the experiment from San Cristobal, and 8.5% could not be matched in Suba. If the children for which the data could not be matched have characteristics, which systematically affect their enrollment rate, this creates a bias.
The results for the effects on graduation and tertiary enrollment relies on survey-based data. Since individuals might lie or misreport information, the results could be biased.
Lastly, the sample size for the tertiary treatment is not very large, which reduces reliability of the results and therefore is another weakness of the paper.
---
# 6. Extensions
---
## 6.1. Check for Balanced Groups across Experiments
As mentioned above, to compare the effects of all three treatments, while considering the experiments in the two localities together, the causal graph has three back-door paths which have to be eliminated. Treatment assignment is not completely random, since it is not random whether a person lives in Suba or San Cristobal. Observable or unobservable factors may affect where a family lives and therefore also are correlated with the treatment assignment and the outcome at the same time. This can be seen, when comparing characteristics of all three treatment groups to each other and to the group of not treated individuals in both localities together. Table 7 shows these comparisons. The first column shows the average for all not treated children in the sample. The following columns present the differences between the corresponding groups. For each mean and difference standard errors are provided below, denoted by 'SE'.
#### Table 7 - Comparisons of Students across Experiments
```python
create_table7(sample)
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>Control average</th>
<th>Basic-Control</th>
<th>Savings-Control</th>
<th>Tertiary-Conrol</th>
<th>Basic-Savings</th>
<th>Basic-Tertiary</th>
<th>Savings-Tertiary</th>
</tr>
</thead>
<tbody>
<tr>
<th>Possessions</th>
<td>1.91</td>
<td>0.06</td>
<td>0.02</td>
<td>-0.01</td>
<td>0.03</td>
<td>0.07</td>
<td>0.04</td>
</tr>
<tr>
<th>Possessions SE</th>
<td>1.08</td>
<td>0.02</td>
<td>0.02</td>
<td>0.04</td>
<td>0.02</td>
<td>0.05</td>
<td>0.05</td>
</tr>
<tr>
<th>Utilities</th>
<td>4.70</td>
<td>-0.07</td>
<td>0.01</td>
<td>0.19</td>
<td>-0.08</td>
<td>-0.25</td>
<td>-0.18</td>
</tr>
<tr>
<th>Utilities SE</th>
<td>1.39</td>
<td>0.03</td>
<td>0.03</td>
<td>0.06</td>
<td>0.03</td>
<td>0.08</td>
<td>0.07</td>
</tr>
<tr>
<th>Durable Goods</th>
<td>1.44</td>
<td>-0.09</td>
<td>-0.06</td>
<td>0.21</td>
<td>-0.03</td>
<td>-0.30</td>
<td>-0.27</td>
</tr>
<tr>
<th>Durable Goods SE</th>
<td>0.89</td>
<td>0.02</td>
<td>0.02</td>
<td>0.03</td>
<td>0.02</td>
<td>0.04</td>
<td>0.04</td>
</tr>
<tr>
<th>Physical Infrastructure</th>
<td>11.78</td>
<td>-0.18</td>
<td>-0.09</td>
<td>0.31</td>
<td>-0.09</td>
<td>-0.48</td>
<td>-0.40</td>
</tr>
<tr>
<th>Physical Infrastructure SE</th>
<td>1.70</td>
<td>0.04</td>
<td>0.03</td>
<td>0.06</td>
<td>0.04</td>
<td>0.07</td>
<td>0.06</td>
</tr>
<tr>
<th>Age</th>
<td>14.71</td>
<td>-0.24</td>
<td>-0.39</td>
<td>0.90</td>
<td>0.16</td>
<td>-1.13</td>
<td>-1.29</td>
</tr>
<tr>
<th>Age SE</th>
<td>5.08</td>
<td>0.13</td>
<td>0.14</td>
<td>0.20</td>
<td>0.17</td>
<td>0.25</td>
<td>0.21</td>
</tr>
<tr>
<th>Gender</th>
<td>0.48</td>
<td>0.01</td>
<td>0.01</td>
<td>-0.04</td>
<td>0.01</td>
<td>0.05</td>
<td>0.04</td>
</tr>
<tr>
<th>Gender SE</th>
<td>0.50</td>
<td>0.01</td>
<td>0.01</td>
<td>0.02</td>
<td>0.01</td>
<td>0.02</td>
<td>0.02</td>
</tr>
<tr>
<th>Years of Education</th>
<td>6.08</td>
<td>-0.49</td>
<td>-0.47</td>
<td>1.30</td>
<td>-0.02</td>
<td>-1.79</td>
<td>-1.77</td>
</tr>
<tr>
<th>Years of Education SE</th>
<td>1.91</td>
<td>0.10</td>
<td>0.10</td>
<td>0.12</td>
<td>0.04</td>
<td>0.13</td>
<td>0.13</td>
</tr>
<tr>
<th>Single Head</th>
<td>0.29</td>
<td>0.02</td>
<td>0.02</td>
<td>-0.01</td>
<td>0.01</td>
<td>0.04</td>
<td>0.03</td>
</tr>
<tr>
<th>Single Head SE</th>
<td>0.45</td>
<td>0.01</td>
<td>0.01</td>
<td>0.01</td>
<td>0.01</td>
<td>0.02</td>
<td>0.02</td>
</tr>
<tr>
<th>Age of Head</th>
<td>45.99</td>
<td>-0.15</td>
<td>0.04</td>
<td>0.43</td>
<td>-0.19</td>
<td>-0.58</td>
<td>-0.39</td>
</tr>
<tr>
<th>Age of Head SE</th>
<td>9.87</td>
<td>0.17</td>
<td>0.21</td>
<td>0.27</td>
<td>0.21</td>
<td>0.31</td>
<td>0.30</td>
</tr>
<tr>
<th>Years of ed., head</th>
<td>5.73</td>
<td>-0.18</td>
<td>-0.25</td>
<td>0.08</td>
<td>0.07</td>
<td>-0.26</td>
<td>-0.33</td>
</tr>
<tr>
<th>Years of ed., head SE</th>
<td>2.94</td>
<td>0.08</td>
<td>0.07</td>
<td>0.10</td>
<td>0.07</td>
<td>0.12</td>
<td>0.11</td>
</tr>
<tr>
<th>People in Household</th>
<td>5.35</td>
<td>0.02</td>
<td>0.04</td>
<td>-0.20</td>
<td>-0.02</td>
<td>0.22</td>
<td>0.24</td>
</tr>
<tr>
<th>People in Household SE</th>
<td>1.95</td>
<td>0.05</td>
<td>0.05</td>
<td>0.07</td>
<td>0.04</td>
<td>0.08</td>
<td>0.07</td>
</tr>
<tr>
<th>Member under 18</th>
<td>2.50</td>
<td>0.09</td>
<td>0.08</td>
<td>-0.14</td>
<td>0.01</td>
<td>0.24</td>
<td>0.22</td>
</tr>
<tr>
<th>Member under 18 SE</th>
<td>1.32</td>
<td>0.03</td>
<td>0.03</td>
<td>0.05</td>
<td>0.03</td>
<td>0.06</td>
<td>0.05</td>
</tr>
<tr>
<th>Estrato</th>
<td>1.49</td>
<td>-0.06</td>
<td>-0.03</td>
<td>0.13</td>
<td>-0.03</td>
<td>-0.19</td>
<td>-0.16</td>
</tr>
<tr>
<th>Estrato SE</th>
<td>0.82</td>
<td>0.02</td>
<td>0.02</td>
<td>0.04</td>
<td>0.02</td>
<td>0.05</td>
<td>0.05</td>
</tr>
<tr>
<th>SISBEN score</th>
<td>12.19</td>
<td>-0.55</td>
<td>-0.45</td>
<td>1.28</td>
<td>-0.10</td>
<td>-1.83</td>
<td>-1.73</td>
</tr>
<tr>
<th>SISBEN score SE</th>
<td>4.62</td>
<td>0.12</td>
<td>0.13</td>
<td>0.23</td>
<td>0.10</td>
<td>0.26</td>
<td>0.25</td>
</tr>
<tr>
<th>Household income (1,000 pesos)</th>
<td>375.73</td>
<td>-13.76</td>
<td>-9.39</td>
<td>29.47</td>
<td>-4.37</td>
<td>-43.23</td>
<td>-38.86</td>
</tr>
<tr>
<th>Household income (1,000 pesos) SE</th>
<td>240.07</td>
<td>5.85</td>
<td>6.10</td>
<td>7.15</td>
<td>6.64</td>
<td>8.41</td>
<td>8.71</td>
</tr>
</tbody>
</table>
</div>
The table provides 90 differences. 45 are statistically significant at the 1 percent level, 57 at the 5 percent level and 59 at the 10 percent level. Between the basic and the savings treatment group only 2 differences are statistically significant (one at the 1 percent level and two at the 5 percent level), which we already have seen in table 2. Out of the remaining 75 differences, 57 are statistically significant at the 10 percent level, 55 at the 5 percent level and 44 at the 1 percent level. One can conclude that the treatment and control groups in both localities together are not similar regarding these characteristics. This suggests, that simple estimates of regressing outcome variables on the treatment might be biased and the back-door paths have to be eliminated.
## 6.2. Elimination of Back-door Paths Controlling only for Locality
In order to eliminate the back-door paths Barrera-Osorio et al. (2011) control for the locality of the households and a large set of observable demographic characteristics. However, all three back-door paths should be eliminated by simply controling for the locality in which the family lives, since within the localities treatment assignment is random. In the following, I estimate the effects of all three treatments together on attendence and enrollment rates only controlling for the locality. Table 7 shows the results of this regression. I compare my results with those presented in the last column of table 3, respectively table 4.
#### Table 8 - Effects on Attendence and Re-enrollment only Controlling for the Locality
```python
create_table8(sample)
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>Attendence</th>
<th>Enrollment</th>
</tr>
</thead>
<tbody>
<tr>
<th>Basic treatment</th>
<td>0.023</td>
<td>0.004</td>
</tr>
<tr>
<th>Basic treatment SE</th>
<td>0.011</td>
<td>0.014</td>
</tr>
<tr>
<th>Savings treatment</th>
<td>0.028</td>
<td>0.024</td>
</tr>
<tr>
<th>Savings treatment SE</th>
<td>0.011</td>
<td>0.019</td>
</tr>
<tr>
<th>Tertiary treatment</th>
<td>0.052</td>
<td>0.042</td>
</tr>
<tr>
<th>Tertiary treatment SE</th>
<td>0.017</td>
<td>0.022</td>
</tr>
<tr>
<th>H0: Basic-Savings F-Stat</th>
<td>0.089</td>
<td>0.932</td>
</tr>
<tr>
<th>p-value</th>
<td>0.767</td>
<td>0.335</td>
</tr>
<tr>
<th>H0: Tertiary-Basic F-Stat</th>
<td>1.965</td>
<td>1.978</td>
</tr>
<tr>
<th>p-value</th>
<td>0.168</td>
<td>0.161</td>
</tr>
<tr>
<th></th>
<td></td>
<td></td>
</tr>
<tr>
<th>Observations</th>
<td>2937</td>
<td>4775</td>
</tr>
<tr>
<th>R squared</th>
<td>0.004</td>
<td>0.002</td>
</tr>
</tbody>
</table>
</div>
These estimates are very similar to those presented in the last column of table 3, respectively table 4. For example, the tertiary treatment here is estimated to increase the attendence rate by 5.2 percentage points, table 3 shows an effect of 5.5 percentage points. The largest difference occurs for the effect of the savings treatment on re-enrollment, which is an increase of 2.5 percentage points compared to 3 when we control for demographics and include school fixed effects. However, the results are statistically significant at the same significance levels as in table 3 and 4. Concerning the difference tests of the treatment effects the results differ more strongly, while in none of the cases they are statistically significant. One can conclude that including demographic characteristics and school fixed effects in addition to controlling for the locality in which the family lives does not change the results of the regression with all three treatments. This can be explained by the fact that the back-door paths are blocked by adding the locality to the regression.
---
# 7. Conclusion
---
The results in this notebook support the findings reported in the paper by Barrera-Osorio et al. (2011). I reproduced the results precisely for almost all tables except for the third and sixth column of table 6 for which I could only produce similar results. In addition to the replication of the main results from Barrera-Osorio et al. (2011), I critically discuss the quality of their strategy and estimations and evaluate the robustness of their results. Comparing mean characteristics between the research groups across the two experiments, I find that the groups are different. This indicates that their identification assumption, which is that treatments are assigned randomly, is violated and estimates from the regressions, where all three treatments are included, might be biased. Barrera-Osorio et al. (2011) include demographic controls and school fixed effects in the regression in order to rule out potential biases. However, evaluating the causal graph I provide in section 2 one can conclude that including the locality into the regression equation eliminates all three back-door paths. Running this regression I find similar results as Barrera-Osorio et al. (2011). However, the fact that estimates of the effects of the basic and savings treatment are very different when the tertiary treatment is included in the regression or not, indicates a bias. Further analysis of the difference of the basic and savings treatment compared to the tertiary treatment might be helpful to provide more precise policy implications.
Additionally, the study focuses only on the effects of the conditional cash transfers on the children included in the experiment in the two localities. It might be the case that the children who registered and are eligible for the experiment yield higher outcomes than those who did not register or are not eligible. This raises the question whether external validity of the results is given. Further research may address this issue.
Overall, the findings from Barrera-Osorio et al. (2011) offer credible results and policy implications regarding the design of education-based conditional cash transfers.
---
# 8. References
---
* **Barrera-Osorio, Felipe, Marianne Bertrand, Leigh L. Linden, and Francisco Perez-Calle (2011)**. "Improving the Design of Conditional Transfer Programs: Evidence from a Randomized Education Experiment in Colombia." *American Economic Journal: Applied Economics*, 3 (2): 167-95.
|
/-
Copyright (c) 2021 Alena Gusakov, Bhavik Mehta, Kyle Miller. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Alena Gusakov, Bhavik Mehta, Kyle Miller
! This file was ported from Lean 3 source module combinatorics.hall.finite
! leanprover-community/mathlib commit 63f84d91dd847f50bae04a01071f3a5491934e36
! Please do not edit these lines, except to modify the commit id
! if you have ported upstream changes.
-/
import Mathbin.Data.Fintype.Basic
import Mathbin.Data.Set.Finite
/-!
# Hall's Marriage Theorem for finite index types
> THIS FILE IS SYNCHRONIZED WITH MATHLIB4.
> Any changes to this file require a corresponding PR to mathlib4.
This module proves the basic form of Hall's theorem.
In constrast to the theorem described in `combinatorics.hall.basic`, this
version requires that the indexed family `t : ι → finset α` have `ι` be finite.
The `combinatorics.hall.basic` module applies a compactness argument to this version
to remove the `finite` constraint on `ι`.
The modules are split like this since the generalized statement
depends on the topology and category theory libraries, but the finite
case in this module has few dependencies.
A description of this formalization is in [Gusakov2021].
## Main statements
* `finset.all_card_le_bUnion_card_iff_exists_injective'` is Hall's theorem with
a finite index set. This is elsewhere generalized to
`finset.all_card_le_bUnion_card_iff_exists_injective`.
## Tags
Hall's Marriage Theorem, indexed families
-/
open Finset
universe u v
namespace HallMarriageTheorem
variable {ι : Type u} {α : Type v} [DecidableEq α] {t : ι → Finset α}
section Fintype
variable [Fintype ι]
#print HallMarriageTheorem.hall_cond_of_erase /-
theorem hall_cond_of_erase {x : ι} (a : α)
(ha : ∀ s : Finset ι, s.Nonempty → s ≠ univ → s.card < (s.bunionᵢ t).card)
(s' : Finset { x' : ι | x' ≠ x }) : s'.card ≤ (s'.bunionᵢ fun x' => (t x').eraseₓ a).card :=
by
haveI := Classical.decEq ι
specialize ha (s'.image coe)
rw [nonempty.image_iff, Finset.card_image_of_injective s' Subtype.coe_injective] at ha
by_cases he : s'.nonempty
· have ha' : s'.card < (s'.bUnion fun x => t x).card :=
by
convert ha he fun h => by simpa [← h] using mem_univ x using 2
ext x
simp only [mem_image, mem_bUnion, exists_prop, SetCoe.exists, exists_and_right,
exists_eq_right, Subtype.coe_mk]
rw [← erase_bUnion]
by_cases hb : a ∈ s'.bUnion fun x => t x
· rw [card_erase_of_mem hb]
exact Nat.le_pred_of_lt ha'
· rw [erase_eq_of_not_mem hb]
exact Nat.le_of_lt ha'
· rw [nonempty_iff_ne_empty, Classical.not_not] at he
subst s'
simp
#align hall_marriage_theorem.hall_cond_of_erase HallMarriageTheorem.hall_cond_of_erase
-/
#print HallMarriageTheorem.hall_hard_inductive_step_A /-
/-- First case of the inductive step: assuming that
`∀ (s : finset ι), s.nonempty → s ≠ univ → s.card < (s.bUnion t).card`
and that the statement of **Hall's Marriage Theorem** is true for all
`ι'` of cardinality ≤ `n`, then it is true for `ι` of cardinality `n + 1`.
-/
theorem hall_hard_inductive_step_A {n : ℕ} (hn : Fintype.card ι = n + 1)
(ht : ∀ s : Finset ι, s.card ≤ (s.bunionᵢ t).card)
(ih :
∀ {ι' : Type u} [Fintype ι'] (t' : ι' → Finset α),
Fintype.card ι' ≤ n →
(∀ s' : Finset ι', s'.card ≤ (s'.bunionᵢ t').card) →
∃ f : ι' → α, Function.Injective f ∧ ∀ x, f x ∈ t' x)
(ha : ∀ s : Finset ι, s.Nonempty → s ≠ univ → s.card < (s.bunionᵢ t).card) :
∃ f : ι → α, Function.Injective f ∧ ∀ x, f x ∈ t x :=
by
haveI : Nonempty ι := fintype.card_pos_iff.mp (hn.symm ▸ Nat.succ_pos _)
haveI := Classical.decEq ι
-- Choose an arbitrary element `x : ι` and `y : t x`.
let x := Classical.arbitrary ι
have tx_ne : (t x).Nonempty := by
rw [← Finset.card_pos]
calc
0 < 1 := Nat.one_pos
_ ≤ (Finset.bunionᵢ {x} t).card := (ht {x})
_ = (t x).card := by rw [Finset.singleton_bunionᵢ]
choose y hy using tx_ne
-- Restrict to everything except `x` and `y`.
let ι' := { x' : ι | x' ≠ x }
let t' : ι' → Finset α := fun x' => (t x').eraseₓ y
have card_ι' : Fintype.card ι' = n :=
calc
Fintype.card ι' = Fintype.card ι - 1 := Set.card_ne_eq _
_ = n := by rw [hn, Nat.add_succ_sub_one, add_zero]
rcases ih t' card_ι'.le (hall_cond_of_erase y ha) with ⟨f', hfinj, hfr⟩
-- Extend the resulting function.
refine' ⟨fun z => if h : z = x then y else f' ⟨z, h⟩, _, _⟩
· rintro z₁ z₂
have key : ∀ {x}, y ≠ f' x := by
intro x h
simpa [← h] using hfr x
by_cases h₁ : z₁ = x <;> by_cases h₂ : z₂ = x <;> simp [h₁, h₂, hfinj.eq_iff, key, key.symm]
· intro z
split_ifs with hz
· rwa [hz]
· specialize hfr ⟨z, hz⟩
rw [mem_erase] at hfr
exact hfr.2
#align hall_marriage_theorem.hall_hard_inductive_step_A HallMarriageTheorem.hall_hard_inductive_step_A
-/
#print HallMarriageTheorem.hall_cond_of_restrict /-
theorem hall_cond_of_restrict {ι : Type u} {t : ι → Finset α} {s : Finset ι}
(ht : ∀ s : Finset ι, s.card ≤ (s.bunionᵢ t).card) (s' : Finset (s : Set ι)) :
s'.card ≤ (s'.bunionᵢ fun a' => t a').card := by
classical
rw [← card_image_of_injective s' Subtype.coe_injective]
convert ht (s'.image coe) using 1
apply congr_arg
ext y
simp
#align hall_marriage_theorem.hall_cond_of_restrict HallMarriageTheorem.hall_cond_of_restrict
-/
/- warning: hall_marriage_theorem.hall_cond_of_compl -> HallMarriageTheorem.hall_cond_of_compl is a dubious translation:
lean 3 declaration is
forall {α : Type.{u2}} [_inst_1 : DecidableEq.{succ u2} α] {ι : Type.{u1}} {t : ι -> (Finset.{u2} α)} {s : Finset.{u1} ι}, (Eq.{1} Nat (Finset.card.{u1} ι s) (Finset.card.{u2} α (Finset.bunionᵢ.{u1, u2} ι α (fun (a : α) (b : α) => _inst_1 a b) s t))) -> (forall (s : Finset.{u1} ι), LE.le.{0} Nat Nat.hasLe (Finset.card.{u1} ι s) (Finset.card.{u2} α (Finset.bunionᵢ.{u1, u2} ι α (fun (a : α) (b : α) => _inst_1 a b) s t))) -> (forall (s' : Finset.{u1} (coeSort.{succ u1, succ (succ u1)} (Set.{u1} ι) Type.{u1} (Set.hasCoeToSort.{u1} ι) (HasCompl.compl.{u1} (Set.{u1} ι) (BooleanAlgebra.toHasCompl.{u1} (Set.{u1} ι) (Set.booleanAlgebra.{u1} ι)) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Finset.{u1} ι) (Set.{u1} ι) (HasLiftT.mk.{succ u1, succ u1} (Finset.{u1} ι) (Set.{u1} ι) (CoeTCₓ.coe.{succ u1, succ u1} (Finset.{u1} ι) (Set.{u1} ι) (Finset.Set.hasCoeT.{u1} ι))) s)))), LE.le.{0} Nat Nat.hasLe (Finset.card.{u1} (coeSort.{succ u1, succ (succ u1)} (Set.{u1} ι) Type.{u1} (Set.hasCoeToSort.{u1} ι) (HasCompl.compl.{u1} (Set.{u1} ι) (BooleanAlgebra.toHasCompl.{u1} (Set.{u1} ι) (Set.booleanAlgebra.{u1} ι)) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Finset.{u1} ι) (Set.{u1} ι) (HasLiftT.mk.{succ u1, succ u1} (Finset.{u1} ι) (Set.{u1} ι) (CoeTCₓ.coe.{succ u1, succ u1} (Finset.{u1} ι) (Set.{u1} ι) (Finset.Set.hasCoeT.{u1} ι))) s))) s') (Finset.card.{u2} α (Finset.bunionᵢ.{u1, u2} (coeSort.{succ u1, succ (succ u1)} (Set.{u1} ι) Type.{u1} (Set.hasCoeToSort.{u1} ι) (HasCompl.compl.{u1} (Set.{u1} ι) (BooleanAlgebra.toHasCompl.{u1} (Set.{u1} ι) (Set.booleanAlgebra.{u1} ι)) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Finset.{u1} ι) (Set.{u1} ι) (HasLiftT.mk.{succ u1, succ u1} (Finset.{u1} ι) (Set.{u1} ι) (CoeTCₓ.coe.{succ u1, succ u1} (Finset.{u1} ι) (Set.{u1} ι) (Finset.Set.hasCoeT.{u1} ι))) s))) α (fun (a : α) (b : α) => _inst_1 a b) s' (fun (x' : coeSort.{succ u1, succ (succ u1)} (Set.{u1} ι) Type.{u1} (Set.hasCoeToSort.{u1} ι) (HasCompl.compl.{u1} (Set.{u1} ι) (BooleanAlgebra.toHasCompl.{u1} (Set.{u1} ι) (Set.booleanAlgebra.{u1} ι)) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Finset.{u1} ι) (Set.{u1} ι) (HasLiftT.mk.{succ u1, succ u1} (Finset.{u1} ι) (Set.{u1} ι) (CoeTCₓ.coe.{succ u1, succ u1} (Finset.{u1} ι) (Set.{u1} ι) (Finset.Set.hasCoeT.{u1} ι))) s))) => SDiff.sdiff.{u2} (Finset.{u2} α) (Finset.hasSdiff.{u2} α (fun (a : α) (b : α) => _inst_1 a b)) (t ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (coeSort.{succ u1, succ (succ u1)} (Set.{u1} ι) Type.{u1} (Set.hasCoeToSort.{u1} ι) (HasCompl.compl.{u1} (Set.{u1} ι) (BooleanAlgebra.toHasCompl.{u1} (Set.{u1} ι) (Set.booleanAlgebra.{u1} ι)) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Finset.{u1} ι) (Set.{u1} ι) (HasLiftT.mk.{succ u1, succ u1} (Finset.{u1} ι) (Set.{u1} ι) (CoeTCₓ.coe.{succ u1, succ u1} (Finset.{u1} ι) (Set.{u1} ι) (Finset.Set.hasCoeT.{u1} ι))) s))) ι (HasLiftT.mk.{succ u1, succ u1} (coeSort.{succ u1, succ (succ u1)} (Set.{u1} ι) Type.{u1} (Set.hasCoeToSort.{u1} ι) (HasCompl.compl.{u1} (Set.{u1} ι) (BooleanAlgebra.toHasCompl.{u1} (Set.{u1} ι) (Set.booleanAlgebra.{u1} ι)) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Finset.{u1} ι) (Set.{u1} ι) (HasLiftT.mk.{succ u1, succ u1} (Finset.{u1} ι) (Set.{u1} ι) (CoeTCₓ.coe.{succ u1, succ u1} (Finset.{u1} ι) (Set.{u1} ι) (Finset.Set.hasCoeT.{u1} ι))) s))) ι (CoeTCₓ.coe.{succ u1, succ u1} (coeSort.{succ u1, succ (succ u1)} (Set.{u1} ι) Type.{u1} (Set.hasCoeToSort.{u1} ι) (HasCompl.compl.{u1} (Set.{u1} ι) (BooleanAlgebra.toHasCompl.{u1} (Set.{u1} ι) (Set.booleanAlgebra.{u1} ι)) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Finset.{u1} ι) (Set.{u1} ι) (HasLiftT.mk.{succ u1, succ u1} (Finset.{u1} ι) (Set.{u1} ι) (CoeTCₓ.coe.{succ u1, succ u1} (Finset.{u1} ι) (Set.{u1} ι) (Finset.Set.hasCoeT.{u1} ι))) s))) ι (coeBase.{succ u1, succ u1} (coeSort.{succ u1, succ (succ u1)} (Set.{u1} ι) Type.{u1} (Set.hasCoeToSort.{u1} ι) (HasCompl.compl.{u1} (Set.{u1} ι) (BooleanAlgebra.toHasCompl.{u1} (Set.{u1} ι) (Set.booleanAlgebra.{u1} ι)) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Finset.{u1} ι) (Set.{u1} ι) (HasLiftT.mk.{succ u1, succ u1} (Finset.{u1} ι) (Set.{u1} ι) (CoeTCₓ.coe.{succ u1, succ u1} (Finset.{u1} ι) (Set.{u1} ι) (Finset.Set.hasCoeT.{u1} ι))) s))) ι (coeSubtype.{succ u1} ι (fun (x : ι) => Membership.Mem.{u1, u1} ι (Set.{u1} ι) (Set.hasMem.{u1} ι) x (HasCompl.compl.{u1} (Set.{u1} ι) (BooleanAlgebra.toHasCompl.{u1} (Set.{u1} ι) (Set.booleanAlgebra.{u1} ι)) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Finset.{u1} ι) (Set.{u1} ι) (HasLiftT.mk.{succ u1, succ u1} (Finset.{u1} ι) (Set.{u1} ι) (CoeTCₓ.coe.{succ u1, succ u1} (Finset.{u1} ι) (Set.{u1} ι) (Finset.Set.hasCoeT.{u1} ι))) s))))))) x')) (Finset.bunionᵢ.{u1, u2} ι α (fun (a : α) (b : α) => _inst_1 a b) s t)))))
but is expected to have type
forall {α : Type.{u2}} [_inst_1 : DecidableEq.{succ u2} α] {ι : Type.{u1}} {t : ι -> (Finset.{u2} α)} {s : Finset.{u1} ι}, (Eq.{1} Nat (Finset.card.{u1} ι s) (Finset.card.{u2} α (Finset.bunionᵢ.{u1, u2} ι α (fun (a : α) (b : α) => _inst_1 a b) s t))) -> (forall (s : Finset.{u1} ι), LE.le.{0} Nat instLENat (Finset.card.{u1} ι s) (Finset.card.{u2} α (Finset.bunionᵢ.{u1, u2} ι α (fun (a : α) (b : α) => _inst_1 a b) s t))) -> (forall (s' : Finset.{u1} (Set.Elem.{u1} ι (HasCompl.compl.{u1} (Set.{u1} ι) (BooleanAlgebra.toHasCompl.{u1} (Set.{u1} ι) (Set.instBooleanAlgebraSet.{u1} ι)) (Finset.toSet.{u1} ι s)))), LE.le.{0} Nat instLENat (Finset.card.{u1} (Set.Elem.{u1} ι (HasCompl.compl.{u1} (Set.{u1} ι) (BooleanAlgebra.toHasCompl.{u1} (Set.{u1} ι) (Set.instBooleanAlgebraSet.{u1} ι)) (Finset.toSet.{u1} ι s))) s') (Finset.card.{u2} α (Finset.bunionᵢ.{u1, u2} (Set.Elem.{u1} ι (HasCompl.compl.{u1} (Set.{u1} ι) (BooleanAlgebra.toHasCompl.{u1} (Set.{u1} ι) (Set.instBooleanAlgebraSet.{u1} ι)) (Finset.toSet.{u1} ι s))) α (fun (a : α) (b : α) => _inst_1 a b) s' (fun (x' : Set.Elem.{u1} ι (HasCompl.compl.{u1} (Set.{u1} ι) (BooleanAlgebra.toHasCompl.{u1} (Set.{u1} ι) (Set.instBooleanAlgebraSet.{u1} ι)) (Finset.toSet.{u1} ι s))) => SDiff.sdiff.{u2} (Finset.{u2} α) (Finset.instSDiffFinset.{u2} α (fun (a : α) (b : α) => _inst_1 a b)) (t (Subtype.val.{succ u1} ι (fun (x : ι) => Membership.mem.{u1, u1} ι (Set.{u1} ι) (Set.instMembershipSet.{u1} ι) x (HasCompl.compl.{u1} (Set.{u1} ι) (BooleanAlgebra.toHasCompl.{u1} (Set.{u1} ι) (Set.instBooleanAlgebraSet.{u1} ι)) (Finset.toSet.{u1} ι s))) x')) (Finset.bunionᵢ.{u1, u2} ι α (fun (a : α) (b : α) => _inst_1 a b) s t)))))
Case conversion may be inaccurate. Consider using '#align hall_marriage_theorem.hall_cond_of_compl HallMarriageTheorem.hall_cond_of_complₓ'. -/
theorem hall_cond_of_compl {ι : Type u} {t : ι → Finset α} {s : Finset ι}
(hus : s.card = (s.bunionᵢ t).card) (ht : ∀ s : Finset ι, s.card ≤ (s.bunionᵢ t).card)
(s' : Finset (sᶜ : Set ι)) : s'.card ≤ (s'.bunionᵢ fun x' => t x' \ s.bunionᵢ t).card :=
by
haveI := Classical.decEq ι
have disj : Disjoint s (s'.image coe) :=
by
simp only [disjoint_left, not_exists, mem_image, exists_prop, SetCoe.exists, exists_and_right,
exists_eq_right, Subtype.coe_mk]
intro x hx hc h
exact absurd hx hc
have : s'.card = (s ∪ s'.image coe).card - s.card := by
simp [disj, card_image_of_injective _ Subtype.coe_injective]
rw [this, hus]
refine' (tsub_le_tsub_right (ht _) _).trans _
rw [← card_sdiff]
· refine' (card_le_of_subset _).trans le_rfl
intro t
simp only [mem_bUnion, mem_sdiff, not_exists, mem_image, and_imp, mem_union, exists_and_right,
exists_imp]
rintro x (hx | ⟨x', hx', rfl⟩) rat hs
· exact (hs x hx Rat).elim
· exact ⟨⟨x', hx', Rat⟩, hs⟩
· apply bUnion_subset_bUnion_of_subset_left
apply subset_union_left
#align hall_marriage_theorem.hall_cond_of_compl HallMarriageTheorem.hall_cond_of_compl
#print HallMarriageTheorem.hall_hard_inductive_step_B /-
/-- Second case of the inductive step: assuming that
`∃ (s : finset ι), s ≠ univ → s.card = (s.bUnion t).card`
and that the statement of **Hall's Marriage Theorem** is true for all
`ι'` of cardinality ≤ `n`, then it is true for `ι` of cardinality `n + 1`.
-/
theorem hall_hard_inductive_step_B {n : ℕ} (hn : Fintype.card ι = n + 1)
(ht : ∀ s : Finset ι, s.card ≤ (s.bunionᵢ t).card)
(ih :
∀ {ι' : Type u} [Fintype ι'] (t' : ι' → Finset α),
Fintype.card ι' ≤ n →
(∀ s' : Finset ι', s'.card ≤ (s'.bunionᵢ t').card) →
∃ f : ι' → α, Function.Injective f ∧ ∀ x, f x ∈ t' x)
(s : Finset ι) (hs : s.Nonempty) (hns : s ≠ univ) (hus : s.card = (s.bunionᵢ t).card) :
∃ f : ι → α, Function.Injective f ∧ ∀ x, f x ∈ t x :=
by
haveI := Classical.decEq ι
-- Restrict to `s`
let t' : s → Finset α := fun x' => t x'
rw [Nat.add_one] at hn
have card_ι'_le : Fintype.card s ≤ n :=
by
apply Nat.le_of_lt_succ
calc
Fintype.card s = s.card := Fintype.card_coe _
_ < Fintype.card ι := ((card_lt_iff_ne_univ _).mpr hns)
_ = n.succ := hn
rcases ih t' card_ι'_le (hall_cond_of_restrict ht) with ⟨f', hf', hsf'⟩
-- Restrict to `sᶜ` in the domain and `(s.bUnion t)ᶜ` in the codomain.
set ι'' := (s : Set ι)ᶜ with ι''_def
let t'' : ι'' → Finset α := fun a'' => t a'' \ s.bUnion t
have card_ι''_le : Fintype.card ι'' ≤ n :=
by
simp_rw [← Nat.lt_succ_iff, ← hn, ι'', ← Finset.coe_compl, coe_sort_coe]
rwa [Fintype.card_coe, card_compl_lt_iff_nonempty]
rcases ih t'' card_ι''_le (hall_cond_of_compl hus ht) with ⟨f'', hf'', hsf''⟩
-- Put them together
have f'_mem_bUnion : ∀ {x'} (hx' : x' ∈ s), f' ⟨x', hx'⟩ ∈ s.bUnion t :=
by
intro x' hx'
rw [mem_bUnion]
exact ⟨x', hx', hsf' _⟩
have f''_not_mem_bUnion : ∀ {x''} (hx'' : ¬x'' ∈ s), ¬f'' ⟨x'', hx''⟩ ∈ s.bUnion t :=
by
intro x'' hx''
have h := hsf'' ⟨x'', hx''⟩
rw [mem_sdiff] at h
exact h.2
have im_disj : ∀ (x' x'' : ι) (hx' : x' ∈ s) (hx'' : ¬x'' ∈ s), f' ⟨x', hx'⟩ ≠ f'' ⟨x'', hx''⟩ :=
by
intro _ _ hx' hx'' h
apply f''_not_mem_bUnion hx''
rw [← h]
apply f'_mem_bUnion
refine' ⟨fun x => if h : x ∈ s then f' ⟨x, h⟩ else f'' ⟨x, h⟩, _, _⟩
· exact hf'.dite _ hf'' im_disj
· intro x
split_ifs with h
· exact hsf' ⟨x, h⟩
· exact sdiff_subset _ _ (hsf'' ⟨x, h⟩)
#align hall_marriage_theorem.hall_hard_inductive_step_B HallMarriageTheorem.hall_hard_inductive_step_B
-/
end Fintype
variable [Finite ι]
#print HallMarriageTheorem.hall_hard_inductive /-
/-- Here we combine the two inductive steps into a full strong induction proof,
completing the proof the harder direction of **Hall's Marriage Theorem**.
-/
theorem hall_hard_inductive (ht : ∀ s : Finset ι, s.card ≤ (s.bunionᵢ t).card) :
∃ f : ι → α, Function.Injective f ∧ ∀ x, f x ∈ t x :=
by
cases nonempty_fintype ι
induction' hn : Fintype.card ι using Nat.strong_induction_on with n ih generalizing ι
rcases n with (_ | _)
· rw [Fintype.card_eq_zero_iff] at hn
exact ⟨isEmptyElim, isEmptyElim, isEmptyElim⟩
· have ih' :
∀ (ι' : Type u) [Fintype ι'] (t' : ι' → Finset α),
Fintype.card ι' ≤ n →
(∀ s' : Finset ι', s'.card ≤ (s'.bunionᵢ t').card) →
∃ f : ι' → α, Function.Injective f ∧ ∀ x, f x ∈ t' x :=
by
intro ι' _ _ hι' ht'
exact ih _ (Nat.lt_succ_of_le hι') ht' _ rfl
by_cases h : ∀ s : Finset ι, s.Nonempty → s ≠ univ → s.card < (s.bunionᵢ t).card
· exact hall_hard_inductive_step_A hn ht ih' h
· push_neg at h
rcases h with ⟨s, sne, snu, sle⟩
exact hall_hard_inductive_step_B hn ht ih' s sne snu (Nat.le_antisymm (ht _) sle)
#align hall_marriage_theorem.hall_hard_inductive HallMarriageTheorem.hall_hard_inductive
-/
end HallMarriageTheorem
/- warning: finset.all_card_le_bUnion_card_iff_exists_injective' -> Finset.all_card_le_bunionᵢ_card_iff_existsInjective' is a dubious translation:
lean 3 declaration is
forall {ι : Type.{u1}} {α : Type.{u2}} [_inst_1 : Finite.{succ u1} ι] [_inst_2 : DecidableEq.{succ u2} α] (t : ι -> (Finset.{u2} α)), Iff (forall (s : Finset.{u1} ι), LE.le.{0} Nat Nat.hasLe (Finset.card.{u1} ι s) (Finset.card.{u2} α (Finset.bunionᵢ.{u1, u2} ι α (fun (a : α) (b : α) => _inst_2 a b) s t))) (Exists.{max (succ u1) (succ u2)} (ι -> α) (fun (f : ι -> α) => And (Function.Injective.{succ u1, succ u2} ι α f) (forall (x : ι), Membership.Mem.{u2, u2} α (Finset.{u2} α) (Finset.hasMem.{u2} α) (f x) (t x))))
but is expected to have type
forall {ι : Type.{u2}} {α : Type.{u1}} [_inst_1 : Finite.{succ u2} ι] [_inst_2 : DecidableEq.{succ u1} α] (t : ι -> (Finset.{u1} α)), Iff (forall (s : Finset.{u2} ι), LE.le.{0} Nat instLENat (Finset.card.{u2} ι s) (Finset.card.{u1} α (Finset.bunionᵢ.{u2, u1} ι α (fun (a : α) (b : α) => _inst_2 a b) s t))) (Exists.{max (succ u2) (succ u1)} (ι -> α) (fun (f : ι -> α) => And (Function.Injective.{succ u2, succ u1} ι α f) (forall (x : ι), Membership.mem.{u1, u1} α (Finset.{u1} α) (Finset.instMembershipFinset.{u1} α) (f x) (t x))))
Case conversion may be inaccurate. Consider using '#align finset.all_card_le_bUnion_card_iff_exists_injective' Finset.all_card_le_bunionᵢ_card_iff_existsInjective'ₓ'. -/
/-- This is the version of **Hall's Marriage Theorem** in terms of indexed
families of finite sets `t : ι → finset α` with `ι` finite.
It states that there is a set of distinct representatives if and only
if every union of `k` of the sets has at least `k` elements.
See `finset.all_card_le_bUnion_card_iff_exists_injective` for a version
where the `finite ι` constraint is removed.
-/
theorem Finset.all_card_le_bunionᵢ_card_iff_existsInjective' {ι α : Type _} [Finite ι]
[DecidableEq α] (t : ι → Finset α) :
(∀ s : Finset ι, s.card ≤ (s.bunionᵢ t).card) ↔
∃ f : ι → α, Function.Injective f ∧ ∀ x, f x ∈ t x :=
by
constructor
· exact HallMarriageTheorem.hall_hard_inductive
· rintro ⟨f, hf₁, hf₂⟩ s
rw [← card_image_of_injective s hf₁]
apply card_le_of_subset
intro
rw [mem_image, mem_bUnion]
rintro ⟨x, hx, rfl⟩
exact ⟨x, hx, hf₂ x⟩
#align finset.all_card_le_bUnion_card_iff_exists_injective' Finset.all_card_le_bunionᵢ_card_iff_existsInjective'
|
(* Title: Presburger-Automata/Exec.thy
Author: Stefan Berghofer, TU Muenchen, 2008-2009
*)
theory Exec
imports Presburger_Automata "~~/src/HOL/Library/Efficient_Nat"
begin
declare gen_dfs_simps [code]
lemma [code_unfold]: "op \<longrightarrow> = (\<lambda>P. op \<or> (\<not> P))"
by (simp add: fun_eq_iff)
lemma "\<forall>x. (\<exists>xa. int xa - int x = 5) \<or> (\<forall>xa xb. \<not> 6 \<le> int xb \<longrightarrow> int xb + (6 * int xa - int x) = 0 \<longrightarrow> int xa = 1)"
proof -
have "?thesis = eval_pf (Forall (Exist (Or (Eq [1, -1] 5)
(Forall (Forall (Imp (Neg (Le [-1, 0] -6)) (Imp (Eq [1, 6, 0, -1] 0) (Eq [0, 1] 1)))))))) []"
(is "_ = eval_pf ?P []")
by simp
also have "\<dots> = dfa_accepts (dfa_of_pf 0 ?P) []"
by (simp add: dfa_of_pf_correctness del: dfa_of_pf.simps)
also have "\<dots>" by eval
finally show ?thesis .
qed
lemma "\<forall>x xa xb. \<not> 2 \<le> int xb \<longrightarrow> int xb + (2 * int xa - int x) = 1 \<longrightarrow>
(\<forall>xb xc. \<not> 2 \<le> int xc \<longrightarrow> int xc + (2 * int xb - int x) = 0 \<longrightarrow> (\<exists>xa. 2 * int xa = int x) \<longrightarrow> xb = xa)"
proof -
have "?thesis = eval_pf (Forall (Forall (Forall (Imp (Neg (Le [-1] -2))
(Imp (Eq [1, 2, -1] 1) (Forall (Forall (Imp (Neg (Le [-1] -2))
(Imp (Eq [1, 2, 0, 0, -1] 0) (Imp (Exist (Eq [2, 0, 0, 0, 0, -1] 0)) (Eq [0, 1, 0, -1] 0))))))))))) []"
(is "_ = eval_pf ?P []")
by simp
also have "\<dots> = dfa_accepts (dfa_of_pf 0 ?P) []"
by (simp add: dfa_of_pf_correctness del: dfa_of_pf.simps)
also have "\<dots>" by eval
finally show ?thesis .
qed
definition "mk_dfa = dfa_of_pf 0"
definition "stamp = Forall (Imp (Le [-1] -8) (Exist (Exist (Eq [5, 3, -1] 0))))"
definition "stamp_false = Forall (Imp (Le [-1] -7) (Exist (Exist (Eq [5, 3, -1] 0))))"
definition "example = Forall (Exist (Or (Eq [1, -1] 5)
(Forall (Forall (Imp (Neg (Le [-1, 0] -6)) (Imp (Eq [1, 6, 0, -1] 0) (Eq [0, 1] 1)))))))"
definition "example2 = Forall (Forall (Forall (Imp (Neg (Le [-1] -2))
(Imp (Eq [1, 2, -1] 1) (Forall (Forall (Imp (Neg (Le [-1] -2))
(Imp (Eq [1, 2, 0, 0, -1] 0) (Imp (Exist (Eq [2, 0, 0, 0, 0, -1] 0)) (Eq [0, 1, 0, -1] 0))))))))))"
definition "example2_false = Forall (Forall (Forall (Imp (Neg (Le [-1] -2))
(Imp (Eq [1, 2, -1] 1) (Forall (Forall (Imp (Neg (Le [-1] -2))
(Imp (Eq [1, 2, 0, 0, -1] 0) (Imp (Exist (Eq [3, 0, 0, 0, 0, -1] 0)) (Eq [0, 1, 0, -1] 0))))))))))"
definition "harrison1 = Exist (Forall (Imp (Le [-1, 1] 0) (Exist (Exist
(And (Le [0, -1] 0) (And (Le [-1] 0) (Eq [8, 3, -1] 0)))))))"
definition "harrison2 = Exist (Forall (Imp (Le [-1, 1] 0) (Exist (Exist
(And (Le [0, -1] 0) (And (Le [-1] 0) (Eq [8, 7, -1] 0)))))))"
value "mk_dfa stamp"
value "min_dfa (mk_dfa stamp)"
value "mk_dfa stamp_false"
value "mk_dfa example"
value "mk_dfa example2"
value "mk_dfa example2_false"
value "mk_dfa harrison1"
value "mk_dfa harrison2"
end
|
#pragma once
#include "errors.h"
#include "usageinfoformat.h"
#include "detail/config.h"
#include "detail/configmacro.h"
#include "detail/configaccess.h"
#include "detail/flag.h"
#include <gsl/gsl>
#include <iostream>
#include <ostream>
#include <optional>
#include <map>
#include <utility>
namespace cmdlime{
enum class ErrorOutputMode{
STDOUT,
STDERR
};
template<typename TConfig>
class ConfigReader{
struct CommandHelpFlag{
bool value = false;
std::string usageInfo;
};
public:
ConfigReader(TConfig& cfg,
std::string programName,
const UsageInfoFormat& usageInfoFormat = {},
ErrorOutputMode errorOutputMode = ErrorOutputMode::STDERR)
: cfg_(cfg)
, programName_(std::move(programName))
, usageInfoFormat_(usageInfoFormat)
, errorOutput_(errorOutputMode == ErrorOutputMode::STDERR ? std::cerr : std::cout)
{}
int exitCode() const
{
return exitCode_;
}
bool read(const std::vector<std::string>& cmdLine)
{
addExitFlags();
if(!processCommandLine(cmdLine))
return exitOnError(-1);
if (processFlagsAndExit())
return exitOnFlag();
return success();
}
bool readCommandLine(int argc, char** argv)
{
auto cmdLine = std::vector<std::string>(argv + 1, argv + argc);
return read(cmdLine);
}
private:
void addExitFlags()
{
using NameProvider = typename detail::Format<detail::ConfigAccess<TConfig>::format()>::nameProvider;
auto helpFlag = std::make_unique<detail::Flag>(NameProvider::name("help"),
std::string{},
[this]()->bool&{return help_;},
detail::Flag::Type::Exit);
helpFlag->info().addDescription("show usage info and exit");
detail::ConfigAccess<TConfig>{cfg_}.addFlag(std::move(helpFlag));
if (!cfg_.versionInfo().empty()){
auto versionFlag = std::make_unique<detail::Flag>(NameProvider::name("version"),
std::string{},
[this]()->bool&{return version_;},
detail::Flag::Type::Exit);
versionFlag->info().addDescription("show version info and exit");
detail::ConfigAccess<TConfig>{cfg_}.addFlag(std::move(versionFlag));
}
detail::ConfigAccess<TConfig>(cfg_).addHelpFlagToCommands(programName_);
}
bool processCommandLine(const std::vector<std::string>& cmdLine)
{
try{
cfg_.read(cmdLine);
}
catch(const CommandError& e){
errorOutput_ << "Command '" + e.commandName() + "' error: " << e.what() << "\n";
std::cout << e.commandUsageInfo() << std::endl;
return false;
}
catch(const Error& e){
errorOutput_ << e.what() << "\n";
std::cout << cfg_.usageInfo(programName_) << std::endl;
return false;
}
return true;
}
bool processFlagsAndExit()
{
if (help_){
std::cout << cfg_.usageInfoDetailed(programName_, usageInfoFormat_) << std::endl;
return true;
}
if (version_){
std::cout << cfg_.versionInfo() << std::endl;
return true;
}
for (auto command : detail::ConfigAccess<TConfig>{cfg_}.commandList())
if (checkCommandHelpFlag(command))
return true;
return false;
}
bool checkCommandHelpFlag(gsl::not_null<detail::ICommand*> command)
{
if (command->isHelpFlagSet()){
std::cout << command->usageInfoDetailed() << std::endl;
return true;
}
for (auto childCommand : command->commandList())
if (checkCommandHelpFlag(childCommand))
return true;
return false;
}
bool exitOnFlag()
{
exitCode_ = 0;
return false;
}
bool exitOnError(int errorCode)
{
exitCode_ = errorCode;
return false;
}
bool success()
{
exitCode_ = 0;
return true;
}
private:
TConfig& cfg_;
std::string programName_;
UsageInfoFormat usageInfoFormat_;
std::map<int, CommandHelpFlag> commandHelpFlags_;
std::ostream& errorOutput_;
int exitCode_ = 0;
bool help_ = false;
bool version_ = false;
}
;
}
|
-- Andreas, 2017-07-13, issue #2645
-- Agda accepted this definition:
record S : Set₁ where
postulate
field
f : Set -- Now: error raised here
s : Set → S
s A = record {f = A }
|
Module Day.
Inductive day : Type :=
| monday : day
| tuesday : day
| wednesday : day
| thursday : day
| friday : day
| saturday : day
| sunday : day.
Definition next_day (d : day) : day :=
match d with
| monday => tuesday
| tuesday => wednesday
| wednesday => thursday
| thursday => friday
| friday => saturday
| saturday => sunday
| sunday => monday
end.
Example test_next_day : (next_day (next_day saturday)) = monday.
Proof. simpl. trivial. Qed.
End Day.
Require Extraction.
Extraction "day.ml" Day.
|
## to be sourced from main analysis script
ggp_theme_default <- theme(
## panel parameters
panel.border=element_blank(),
panel.background=element_rect(fill="white"),
panel.grid.major=element_line(color="white"),
panel.grid.minor=element_line(color="white"),
## plot parameters
plot.background=element_rect(fill="white"),
plot.margin=margin(t=0.5, r=0.5, b=0.5, l=0.5, unit="lines"),
plot.title=element_text(size=12, face="plain", hjust=0.5),
plot.subtitle=element_text(size=10, face="plain", hjust=0.5),
## axis parameters
axis.line.x.bottom=element_line(),
axis.line.y.left=element_line(),
axis.title.x=element_text(size=10, face="plain"),
axis.title.y=element_text(size=10, face="plain"),
axis.text.x=element_text(size=9, face="plain"),
axis.text.y=element_text(size=9, face="plain"),
# axis.ticks.x=element_blank(),
# axis.ticks.y=element_blank(),
## legend parameters
legend.key=element_blank(),
legend.key.size=unit(0.3, "cm"),
legend.key.width=unit(0.3, "cm"),
legend.position="none",
legend.title=element_blank(),
legend.text=element_text(size=10, margin=margin(t=0.2)),
legend.spacing.x=unit(0.2, "cm"),
legend.spacing.y=unit(0.2, "cm"),
## facet parameters
strip.background=element_rect(fill="black"),
strip.text=element_text(colour="white")
)
|
#include "zdk/zero.h"
//
// $Id$
//
// -------------------------------------------------------------------------
// This file is part of ZeroBugs, Copyright (c) 2010 Cristian L. Vlasceanu
//
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// -------------------------------------------------------------------------
#include <assert.h>
#include <errno.h>
#include <fcntl.h>
#include <pwd.h>
#include <stdio.h> // for FILE, needed by readline
#include <signal.h>
#ifdef HAVE_UNISTD_H
#include <unistd.h> // getpid()
#endif
#include <sys/stat.h>
#include <sys/utsname.h>
#include <sys/wait.h>
#ifdef __linux__
#include <execinfo.h>
#endif
#ifdef USE_GNU_READLINE
#include <readline/history.h>
#include <readline/readline.h> // GNU readline
#endif
#include <fstream> // ofstream
#include <iomanip>
#include <iostream>
#include <sstream>
#include <stdexcept>
#include <boost/tokenizer.hpp>
#include "dharma/canonical_path.h"
#include "dharma/directory.h"
#include "dharma/environ.h"
#include "dharma/exec_arg.h"
#include "dharma/process_name.h"
#include "dharma/sigutil.h"
#include "dharma/symbol_util.h"
#include "dharma/syscall_wrap.h"
#include "dharma/system_error.h"
#include "generic/auto_file.h"
#include "generic/state_saver.h"
#include "generic/temporary.h"
#include "interp/variant_impl.h"
#include "zdk/breakpoint_util.h"
#include "zdk/check_ptr.h"
#include "zdk/expr.h"
#include "zdk/history.h"
#include "zdk/fheap.h"
#include "zdk/shared_string_impl.h"
#include "zdk/signal_policy.h"
#include "zdk/thread_util.h"
#include "zdk/types.h"
#include "zdk/utility.h"
#include "zdk/variant_util.h"
#include "zdk/zobject_scope.h"
#include "readline/read_line.h"
#include "readline/terminal.h"
#include "debugger_shell.h"
#include "internal_cmd.h"
#include "source.h"
#include "sysid.h"
#include "thread.h"
#include "version.h"
using namespace std;
#if defined(USE_GNU_READLINE) && (RL_READLINE_VERSION < 0x0403)
#define rl_compentry_func_t Function
#endif
size_t max_array_range();
static const char banner[] = "Zero (v%d.%d.%d " __DATE__ " " __TIME__
")\nThe Linux Application Debugger (%s %s/%s) %s@%s\n%s";
static void auto_complete_breakpoints(const char*, vector<string>&);
static void auto_complete_debug_sym(const char*, vector<string>&);
static void auto_complete_disabled_breakpoints(const char*, vector<string>&);
static void auto_complete_enabled_breakpoints(const char*, vector<string>&);
static void auto_complete_core(const char*, vector<string>&);
static void auto_complete_fname(const char*, vector<string>&);
static void auto_complete_pid(const char*, vector<string>&);
static void auto_complete_sym(const char*, vector<string>&);
static void auto_complete_show(const char*, vector<string>&);
static void auto_complete_sig(const char*, vector<string>&);
static void auto_complete_thread(const char*, vector<string>&);
DebuggerShell* DebuggerShell::theDebugger_ = 0;
Command DebuggerShell::cmd_[] =
{
{ "@", &DebuggerShell::cmd_line, 0,
"Print current source file name and line number\n"
},
{ "%r", &DebuggerShell::cmd_reg, 0,
" <N>: print value of N-th general purpose CPU register\n"
},
{ "addmod", &DebuggerShell::cmd_addmod, auto_complete_fname,
" [module-name]: add a module (shared object or executable)\n"
"to the internally-managed list of modules needed by the "
"program being debugged\n"
},
{ "attach", &DebuggerShell::cmd_attach, auto_complete_pid,
" <pid>: attach debugger to a process. You can type\n"
"'attach <TAB>' for a list of all the running process ids."
},
{ "break", &DebuggerShell::cmd_break, auto_complete_sym,
" <addr> | <function-name> | <filename>:<line-number>\n"
"set a breakpoint at given address, function, or source line."
},
{ "bt", &DebuggerShell::cmd_where, 0,
" [depth]: print backtrace, optional argument controls the depth of\n"
"the backtrace (how many steps back to show); same as 'where'"
},
{ "clear", &DebuggerShell::cmd_clear, auto_complete_breakpoints,
" [addr...]: clear breakpoint(s) at given addresses."
},
{ "close", &DebuggerShell::cmd_open, 0, "stop redirecting output."
},
{ "continue", &DebuggerShell::cmd_continue, 0, ": resume program execution" },
#ifdef DEBUG_OBJECT_LEAKS
{ "count_objects", &DebuggerShell::cmd_count_objects, 0, 0 },
#endif
{ "detach", &DebuggerShell::cmd_detach, 0,
": detach from debugged process."
},
{ "disable", &DebuggerShell::cmd_enable, auto_complete_enabled_breakpoints,
" [addr [addr]]: Disable a list of breakpoints, specified by their addresses.\n"
},
{ "disassemble", &DebuggerShell::cmd_disassemble, 0,
"[at <addr>] [how_many]: Disassembles debugged program starting\n"
"at given address, in the given thread, or at current instruction pointer\n"
"if no address specified."
},
{ "down", &DebuggerShell::cmd_navigate_stack, 0, "move down the stack trace."
},
{ "dump", &DebuggerShell::cmd_dump, 0,
"begin_addr [end_addr]: dumps memory contents to console. If end_addr is not\n"
"specifed, a default chunk of 64 bytes, starting at begin_addr, is dumped. "
"If end_addr\nis lower than begin_addr, then the range is reversed.\n"
},
{ "enable", &DebuggerShell::cmd_enable, auto_complete_disabled_breakpoints,
" [addr [addr]]: Enable a list of breakpoints, specified by their addresses.\n"
},
{ "exec", &DebuggerShell::cmd_exec, auto_complete_fname,
"[progname] [arg [arg]]: Start a program and attach to it.\n"
"If the debugger is currently attached to another program,\n"
"it automatically detaches before attaching to the new one.\n\n"
"NOTE: arguments that follow the program name, and do not start with a dash,\n"
"are passed as command line arguments to the debugged program. If an argument\n"
"contains spaces, then you need to put double quotes around it.\n"
"Arguments starting with a dash are passed to the debugger engine and its\n"
"plug-ins. If you need to pass command line arguments starting with a dash to\n"
"the debugged program, prefix them with a standalone, double dash.\n"
"For example: zero foo -v --main passes -v and --main to the debugger engine,\n"
"while: zero foo -- -v --main passes -v and --main to the debugged program foo.\n"
},
{ "eval", &DebuggerShell::cmd_eval, auto_complete_debug_sym,
" [expr [expr]]: evaluate expressions. NOTE: This command\n"
"can be used to modify variables inside the debugged program.\n"
"Examples: eval i=42\n"
" eval i++\n"
" eval x*=3.14159"
},
{ "find", &DebuggerShell::cmd_find, 0,
"addr pattern: scan memory page for a pattern of bytes; "
"the pattern can be specified\n"
"as a string formatted according to the following grammar:\n"
"PATTERN := BYTELIST\n"
"BYTELIST := BYTE | BYTELIST BYTE\n"
"BYTE := 'CHAR' | HEXNUM | WILDCARD\n"
"CHAR := a-z | A-Z\n"
"HEXNUM := HEXDIGIT HEXDIGIT\n"
"HEXDIGIT := 0-9 | a-f | A-F\n"
"WILDCARD := ?\n"
"Empty spaces that are not enclosed in single-quotes are ignored\n"
},
{ "frame", &DebuggerShell::cmd_frame, 0,
" <number>: Select a given stack frame and make it current. "
"Subsequent\n"
"commands will be executed in the context of this frame."
},
{ "help", &DebuggerShell::cmd_help, auto_complete_command },
{ "handle", &DebuggerShell::cmd_handle, auto_complete_sig,
" [stop][nostop][pass][nopass|ignore] <signum>: change signal handling.\n"
"Tell the debugger to stop or not when signal occurs, and whether to pass\n"
"the signal to the debugged program or to ignore it. If ignored, the debugged\n"
"program will not see the signal -- note that this may break the program if\n"
"it relies on a signal notification (such as SIGPOLL, for e.g.)."
},
{ "instruction", &DebuggerShell::cmd_step, 0,
": execute one machine instruction (single-step program)."
},
{ "loadcore", &DebuggerShell::cmd_loadcore,
auto_complete_core, "load a core file in the debugger"
},
{ "lookup", &DebuggerShell::cmd_lookup, auto_complete_sym,
" [name [name]] [/c]: lookup symbols. If /c option is given (count),\n"
"the command prints just the number of symbols that strictly match the names.\n"
"Otherwise, each matching symbol is printed.\n"
},
{ "open", &DebuggerShell::cmd_open, auto_complete_fname,
" <filename>: open specified filename and redirect all subsequent\n"
"console output to it, until a 'close' command is invoked.\n"
"This may be useful when dumping large stack traces and symbol tables\n"
},
{ "next", &DebuggerShell::cmd_next, 0,
": step program until the next line is reached,\n"
"without diving into function calls."
},
{ "print", &DebuggerShell::cmd_print, auto_complete_debug_sym,
" [name [name] ]: print symbols in current scope; symbol names can be\n"
"specified as arguments to this command; if no symbol name is given,\n"
"all symbols in scope are printed."
},
{ "line", &DebuggerShell::cmd_line, 0,
"Print current line number\n"
},
{ "list", &DebuggerShell::cmd_list, auto_complete_fname,
" [<] [line-number] [how-many] [filename]: lists source file.\n"
"'<' starts the listing at the beginning of the "
"current function.\n"
"'list 0' resets listing to the line number that\n"
"corresponds to the current instruction pointer."
},
{ "quit", &DebuggerShell::cmd_quit },
{ "restart", &DebuggerShell::cmd_restart, 0,
": restart the current program\n"
},
{ "ret", &DebuggerShell::cmd_return, 0,
": step program until the current function returns\n"
},
{ "run", &DebuggerShell::cmd_exec, auto_complete_fname,
"[progname] [arg [arg]]: Start a program and attach to it.\n"
"If the debugger is currently attached to another program,\n"
"it automatically detaches before attaching to the new one.\n\n"
"NOTE: This command behaves like exec, with the difference that the\n"
"program name and all of its command line arguments are expanded\n"
"according to shell rules. For example, run a.out 'ls'\n"
"will execute the ls command, and pass its tokenized output as command\n"
"line arguments to a.out\n"
},
{ "set_next", &DebuggerShell::cmd_set_next, 0,
"<file>:<line>|<addr>>\n"
"Set the line or address to be executed next, by forcing the\n"
"instruction pointer (aka program counter) to the specified value.\n"
},
{ "symtab", &DebuggerShell::cmd_symtab, 0,
" [/a] dump all symbol tables that are currently loaded\n"
"sorted by demangled name (the default) or by address if\n"
"/a or /addr is specified."
},
/* undocummented: "show regs /all" attempts to display FPU and XMM registers
in addition to the general regs (the implementation needs reviewing and testing;
"show threads /count" displays the number of threads */
{ "show", &DebuggerShell::cmd_show, auto_complete_show,
" break | modules | regs | signals | threads\n"
" list breakpoints/modules/registers/signal policies/threads"
},
{ "step", &DebuggerShell::cmd_step, 0,
": execute on source line (i.e. step program until a different source line\n"
"is reached). If a function call is encountered, dive into the function.\n"
},
{ "thread", &DebuggerShell::cmd_switch_thread, auto_complete_thread,
" <number>: switch view to the specified thread."
"Subsequent commands (bt, where, frame, etc.) will happen in the\n"
"context of the new thread."
},
{ "up", &DebuggerShell::cmd_navigate_stack, 0, "move up the stack trace."
},
{ "watch", &DebuggerShell::cmd_watch, auto_complete_sym,
"<variable-name> [/w |/rw]: monitor memory accesses to a program variable.\n"
"When an access occurs, the program will break in the\n"
"debugger, and user-interactive mode will be entered.\n"
"The /w flag tells the engine to break whenever the variable is being written.\n"
"If no mode is specified, then /w will be assumed by default.\n"
"The /rw (read-write) flags instruct the engine to stop on all accesses.\n"
"Please NOTE that the debugger engine is using hardware debug registers for\n"
"monitoring variables. On the x86 family, there can be as much as 4 (four)\n"
"hardware breakpoints active at a given time."
},
{ "whatis", &DebuggerShell::cmd_eval, auto_complete_debug_sym,
" <symbol>|<expression>|<constant>: evaluate the given argument, which may\n"
" be a variable's name, an expression, or a constant, and print its type.\n"
},
{ "where", &DebuggerShell::cmd_where, 0,
" [depth]: print backtrace, optional argument controls the depth of\n"
"the backtrace (how many steps back to show); same as 'bt'"
},
{ "yield", &DebuggerShell::cmd_yield, 0, ""
},
};
#define ELEM_COUNT(x) sizeof(x)/sizeof((x)[0])
static const char prompt[] = "zero> ";
namespace DebugSymbolHelpers
{
class ZDK_LOCAL Base : public DebugSymbolEvents
{
bool is_expanding(DebugSymbol*) const { return false; }
void symbol_change(DebugSymbol*, DebugSymbol*) { }
protected:
BEGIN_INTERFACE_MAP(Base)
INTERFACE_ENTRY(DebugSymbolEvents)
END_INTERFACE_MAP()
};
/**
* Print debug symbols to console
*/
class ZDK_LOCAL Print : public Base
{
int numericBase_;
bool notify(DebugSymbol* sym)
{
if (sym)
{
DebuggerShell::instance().print_debug_symbol(
sym, index_++, depth_, numericBase_);
}
return true;
}
int numeric_base(const DebugSymbol*) const
{
return numericBase_;
}
public:
size_t index_;
size_t depth_;
public:
Print() : numericBase_(10), index_(0), depth_(1) {}
void set_numeric_base(int base) { numericBase_ = base; }
};
/**
* Populate a vector of strings with the debug symbols
* that match a string typed by the used.
*/
class ZDK_LOCAL AutoComplete : public Base
{
const char* token_;
size_t len_;
vector<string>& matches_;
public:
AutoComplete(const char* token, vector<string>& matches)
: token_(token), len_(0), matches_(matches)
{
if (token_) { len_ = strlen(token_); }
}
bool notify(DebugSymbol* sym)
{
assert(sym);
if (strncmp(token_, sym->name()->c_str(), len_) == 0)
{
matches_.push_back(sym->name()->c_str());
}
return true;
}
int numeric_base(const DebugSymbol*) const
{
return 0;
}
};
}
namespace
{
/**
* Helper callback used by auto-complete; I cannot pass
* a vector directly to DebuggerCommand::auto_complete,
* because of interface rules.
*/
class ZDK_LOCAL StringEnum : public EnumCallback<const char*>
{
public:
explicit StringEnum(vector<string>& vs) : vs_(vs)
{
vs_.clear();
}
void notify(const char* s)
{
assert(s);
vs_.push_back(s);
}
private:
StringEnum(const StringEnum&);
StringEnum& operator=(const StringEnum&);
vector<string>& vs_;
};
}
/**
* Fill out MATCHES with all shell commands that match TEXT
*/
void DebuggerShell::auto_complete_command
(
const char* text,
vector<string>& matches
)
{
assert(text);
assert(matches.empty());
if (matches.empty())
{
const size_t len = strlen(text);
const CommandList& commands = instance().commands_;
CommandList::const_iterator i = commands.begin();
for (; i != commands.end(); ++i)
{
if (strncmp((*i)->name(), text, len) == 0)
{
matches.push_back((*i)->name());
}
}
}
}
/**
* Helper for the 'attach' command: find the program ids
* that we can attach to, and that are a pontential match
* (either by PID or the corresponding program name.
*/
static void
auto_complete_pid(const char* text, vector<string>& matches)
{
class ZDK_LOCAL HelperCallback : public EnumCallback<const Runnable*>
{
const char* text_; // input
size_t len_;
vector<string>& matches_; // output
public:
HelperCallback(const char* text, vector<string>& matches)
: text_(text)
, len_(strlen(text))
, matches_(matches)
{}
void notify(const Runnable* task)
{
ostringstream outs;
outs << task->pid() << " (" << task->name() << ")";
if (passwd* pwd = getpwuid( task->ruid() ))
{
outs << " [" << pwd->pw_name << "]";
}
if (strncmp(outs.str().c_str(), text_, len_) == 0
|| strncmp(task->name(), text_, len_) == 0)
{
matches_.push_back(outs.str());
}
}
};
assert(text);
HelperCallback cb(text, matches);
DebuggerShell::instance().enum_user_tasks(&cb);
}
/**
* Match symbols from the currently debugged program against a
* given string (the current word typed in the shell) and fill
* out possible matches
*/
static void auto_complete_sym(const char* s, vector<string>& matches)
{
assert(s);
assert(matches.empty());
const size_t len = strlen(s);
const TargetManager& targets = DebuggerShell::instance();
Lock<Mutex> lock(targets.mutex());
TargetManager::const_iterator t = targets.begin(lock);
const TargetManager::const_iterator end = targets.end(lock);
for (; t != end; ++t)
{
SymbolEnum symEnum;
(*t)->symbols()->enum_symbols(0, &symEnum);
SymbolEnum::const_iterator i(symEnum.begin());
for (; i != symEnum.end(); ++i)
{
const char* name = (*i)->name()->c_str();
if (*s == 0)
{
matches.push_back(name);
}
else
{
const string symName((*i)->demangled_name(false)->c_str());
if (strncmp(symName.c_str(), s, len) == 0)
{
matches.push_back(symName);
}
}
}
}
}
static void
auto_complete_debug_sym(const char* s, vector<string>& matches)
{
Debugger& debugger = DebuggerShell::instance();
if (Thread* thread = debugger.get_thread(DEFAULT_THREAD))
{
DebugSymbolHelpers::AutoComplete expand(s, matches);
debugger.enum_variables(thread, "", 0, &expand, LOOKUP_MODULE, true);
}
//auto_complete_sym(s, matches);
}
/**
* Constructs a list of files in a given directory (path);
* fills out the files vector.
*/
static void list_dir(
const char* path,
vector<string>& files,
const string& pattern)
{
struct stat st = { };
Directory dir(path, (pattern + '*').c_str());
Directory::const_iterator i = dir.begin();
for (; i != dir.end(); ++i)
{
string tmp = *i;
if (stat(tmp.c_str(), &st) == 0 && S_ISDIR(st.st_mode))
{
tmp += '/';
}
files.push_back(tmp);
}
}
static void
auto_complete_fname(const char* fname, vector<string>& matches)
{
assert(matches.empty());
assert(fname);
string path(fname);
string pattern;
const size_t n = path.rfind('/');
if (n != string::npos)
{
pattern = path.substr(n + 1);
path.erase(n);
}
try
{
list_dir(path.c_str(), matches, pattern);
}
catch (...)
{
// make sure we don't throw if path is not a directory
}
if (matches.empty())
{
pattern = fname;
list_dir(".", matches, pattern);
}
}
static void
auto_complete_core(const char* fname, vector<string>& matches)
{
if (*fname == 0)
{
fname = "core";
}
auto_complete_fname(fname, matches);
}
namespace
{
enum EnumBP
{
ENUM_BP_ALL,
ENUM_BP_DISABLED,
ENUM_BP_ENABLED,
};
/**
* helper for auto_complete_breakpoints,
* auto_complete_disabled_breakpoints, and
* auto_complete_enabled_breakpoints
*/
class ZDK_LOCAL BreakPointEnum
: public EnumCallback<volatile BreakPoint*>
{
private:
const char* tok_;
size_t len_;
vector<string>& matches_;
EnumBP type_;
public:
BreakPointEnum(const char* tok, vector<string>& m, EnumBP type)
: tok_(tok)
, len_(strlen(tok))
, matches_(m)
, type_(type)
{ }
void notify(volatile BreakPoint* bpnt)
{
assert(bpnt);
if (bpnt->enum_actions("USER") == 0)
{
return;
}
if (type_ == ENUM_BP_DISABLED)
{
if (!has_disabled_actions(*bpnt))
{
return;
}
}
else if (type_ == ENUM_BP_ENABLED)
{
}
ostringstream os;
os << hex << showbase << CHKPTR(bpnt)->addr();
if (len_ == 0)
{
matches_.push_back(os.str());
}
else if (strncmp(tok_, os.str().c_str(), len_) == 0)
{
matches_.push_back(os.str());
}
}
};
} // namespace
static void
auto_complete_breakpoints(const char* tok, vector<string>& matches)
{
assert(tok);
BreakPointManager* mgr = DebuggerShell::instance().breakpoint_manager();
if (mgr)
{
BreakPointEnum helper(tok, matches, ENUM_BP_ALL);
mgr->enum_breakpoints(&helper);
}
}
static void
auto_complete_disabled_breakpoints(const char* tok, vector<string>& matches)
{
assert(tok);
BreakPointManager* mgr = DebuggerShell::instance().breakpoint_manager();
if (mgr)
{
BreakPointEnum helper(tok, matches, ENUM_BP_DISABLED);
mgr->enum_breakpoints(&helper);
}
}
static void
auto_complete_enabled_breakpoints(const char* tok, vector<string>& matches)
{
assert(tok);
BreakPointManager* mgr = DebuggerShell::instance().breakpoint_manager();
if (mgr)
{
BreakPointEnum helper(tok, matches, ENUM_BP_ENABLED);
mgr->enum_breakpoints(&helper);
}
}
static void
auto_complete_thread(const char* tok, vector<string>& matches)
{
const size_t len = tok ? strlen(tok) : 0;
size_t numThreads = 0;
const TargetManager& targets = DebuggerShell::instance();
Lock<Mutex> lock(targets.mutex());
TargetManager::const_iterator t = targets.begin(lock);
const TargetManager::const_iterator end = targets.end(lock);
for (; t != end; ++t)
{
numThreads += (*t)->enum_threads();
}
for (size_t n = 0; n != numThreads; ++n)
{
ostringstream buf;
buf << n;
if (strncmp(buf.str().c_str(), tok, len) == 0)
{
matches.push_back(buf.str());
}
}
}
/**
* Some commands expect the thread to be non-NULL, and active;
* check the conditions and throw exception if not satisfied.
*/
static void check_thread(Thread* thread)
{
if (thread == 0)
{
throw runtime_error("No thread");
}
if (thread_finished(*thread))
{
throw runtime_error("Thread has exited");
}
assert(thread_is_attached(*thread));
}
/**
* Get the current program counter
*/
static addr_t frame_program_count(Thread& thread)
{
assert(thread.stack_trace());
if (const Frame* frame = thread_current_frame(&thread))
{
return frame->program_count();
}
else
{
return thread.program_count();
}
}
DebuggerShell& DebuggerShell::instance()
{
assert(theDebugger_);
return *theDebugger_;
}
void DebuggerShell::handle_signal(int signum, siginfo_t* info, void*)
{
#ifdef DEBUG
if (info)
{
fprintf(stderr, "%s: signal %d, sender=%d instance=%p\n", __func__, signum, info->si_pid, theDebugger_);
}
#endif
if (theDebugger_)
{
theDebugger_->handle_signal_impl(signum);
}
else
{
Log::close();
_exit(signum);
}
}
static const pthread_t __main__ = pthread_self();
static Mutex sigmutex;
static void bug_report()
{
#ifdef __linux__
static char buf[1024];
static void* trace[256];
int nframes = backtrace(trace, 256);
snprintf(buf, sizeof buf, "backtrace-%d.zero", getpid());
auto_file f1(fopen(buf, "w"));
auto_file f2(fopen("version.zero", "w"));
int r;
if (f1 && f2)
{
for (int i = 0; i < nframes; ++i)
{
fprintf(f1.get(), "%p\n", trace[i]);
}
fprintf(f2.get(), "%s\n", VERSION);
f1.reset();
f2.reset();
static const char cmd[] =
#if DEBUG
"(cat /proc/%d/maps && echo \"%s\") > maps.zero";
#else
"(cat /proc/%d/maps && echo \"%s-release\") > maps.zero";
#endif
snprintf(buf, sizeof buf - 1, cmd, getpid(), SYSID);
r = system(buf);
}
else
{
fprintf(stderr, "\nPlease send a BUG REPORT to [email protected]\n\n");
fprintf(stderr, "build=%s\n", VERSION);
fprintf(stderr, "--- backtrace ---\n");
for (int i = 0; i < nframes; ++i)
{
fprintf(stderr, "%p\n", trace[i]);
}
snprintf(buf, sizeof buf, "cat /proc/%d/maps", getpid());
r = system(buf);
}
(void)r;
#endif
}
void DebuggerShell::handle_signal_impl(int signo)
{
static int interruptCount = 0;
static time_t lastInterrupt = time(0);
const time_t now = time(0);
assert(signo);
Lock<Mutex> lock(sigmutex);
BlockSignalsInScope block(signo);
try
{
pthread_t self = pthread_self();
switch (signo)
{
case SIGINT:
if (difftime(now, lastInterrupt) > 1)
{
interruptCount = 0;
}
else if (++interruptCount >= 3)
{
Log::close();
_exit(1);
}
lastInterrupt = now;
set_signaled(true);
if (!has_corefile())
{
stop();
}
break;
case SIGALRM:
set_signaled(true);
if (!has_corefile())
{
stop();
}
break;
default:
// restore default handling for this signal so
// that we don't loop forever
signal(signo, SIG_DFL);
if (self == __main__)
{
save_properties();
// make sure we're detached from the debugged program
// so that we don't leave it in an unstable state (with
// breakpoints set, etc.)
detach();
}
fprintf(stderr, "INTERNAL ERROR: signal=%d\n", signo);
bug_report();
// commit suicide, so that we can take a post-mortem
// look at the core file and see what went wrong
kill(0, signo);
}
}
catch (const exception& e)
{
fprintf(stderr, "INTERNAL ERROR in handling signal=%d: %s\n",
signo, e.what());
}
catch (...)
{
fprintf(stderr,
"INTERNAL ERROR: unknown exception in handling signal=%d\n",
signo);
}
}
DebuggerShell::DebuggerShell()
: disasmAddr_(0)
, resume_(false)
, current_(0)
, promptLoopActive_(false)
, disasmLineCount_(0)
{
assert(theDebugger_ == 0);
tcsetpgrp(0, getpgrp());
#ifdef USE_GNU_READLINE
// initialize readline()
// rl_catch_signals = 0; // do not use readline sig handlers
rl_basic_word_break_characters = " \"\\''$><=;|&{(";
// hook my auto-complete into readline
rl_completion_entry_function = (rl_compentry_func_t*)(auto_complete);
#endif
// initialize commands
commands_.reserve(ELEM_COUNT(cmd_));
for (size_t i(0); i != ELEM_COUNT(cmd_); ++i)
{
RefPtr<DebuggerCommand> ptr(new InternalCommand(cmd_[i]));
commands_.push_back(ptr);
}
// initialize signals
struct sigaction sa;
memset(&sa, 0, sizeof sa);
sa.sa_sigaction = handle_signal;
sa.sa_flags = SA_SIGINFO;
sigaction(SIGINT, &sa, 0);
sigaction(SIGSEGV, &sa, 0);
sigaction(SIGTERM, &sa, 0);
sigaction(SIGALRM, &sa, 0);
if (env::get_bool("ZERO_CATCH_SIGABRT", true))
{
sigaction(SIGABRT, &sa, 0);
}
theDebugger_ = this;
}
DebuggerShell::~DebuggerShell() throw()
{
assert(theDebugger_ == this);
theDebugger_ = 0;
}
void DebuggerShell::print_banner() const
{
struct utsname sysinfo;
uname(&sysinfo);
char buf[1024] = { 0 };
snprintf(buf, sizeof(buf),
banner, ENGINE_MAJOR, ENGINE_MINOR, ENGINE_REVISION,
sysinfo.sysname, sysinfo.release, sysinfo.machine,
USER, HOSTNAME, copyright());
cout << buf << endl;
}
void DebuggerShell::print_help(ostream& outs) const
{
outs << "Usage: zero [pid | progname] [-option ...]" << endl;
outs << "Options:" << endl;
outs << " -v|--verbose increase verbosity level" << endl;
outs << " -h|--help print this help" << endl;
outs << " --ui-disable run in text (console) mode" << endl;
outs << endl;
outs << "For help on interactive commands, type 'help' at ";
outs << "the command the prompt." << endl;
outs << "In GUI mode, check out the online help" << endl;
}
void DebuggerShell::set_current_thread(Thread* thread)
{
current_ = thread;
listing_.reset();
}
void DebuggerShell::resume(bool flag)
{
resume_ = flag;
}
bool DebuggerShell::is_resumed() const
{
return resume_;
}
void DebuggerShell::on_idle()
{
#ifdef DEBUG
print_counted_objects(__PRETTY_FUNCTION__);
#endif
prompt_user(0);
}
void DebuggerShell::begin_interactive_mode (
Thread* thread,
EventType eventType,
Symbol* sym)
{
DebuggerEngine::begin_interactive_mode(thread, eventType, sym);
prompt_user(thread, eventType);
}
/**
* Called by the DebuggerBase when the debugged program
* receives a signal, or when a thread exits.
*/
void DebuggerShell::on_event(Thread& thread)
{
DebuggerEngine::on_event(thread);
}
/**
* Helper wrapper around readline
*/
static string read_line(const char* prompt, bool addToHistory = true)
{
static bool firstTime = true;
if (firstTime)
{
firstTime = false;
cout << "Type 'help' for a list of commands, ";
cout << "<tab> to auto-complete.\n";
}
sigset_t signals;
sigemptyset(&signals);
sigaddset(&signals, SIGINT);
// BlockSignalsInScope block(signals);
string result;
#ifdef USE_GNU_READLINE
// Create a static instance of a class to
// cleanup terminal settings after readline.
static struct Cleanup
{
Cleanup() {}
~Cleanup() throw()
{
rl_cleanup_after_signal();
}
} __cleanup__;
// GNU readline is not const-correct, we need to const_cast
if (char* line = readline(const_cast<char*>(prompt)))
{
result = line;
free(line);
}
if (addToHistory && !result.empty())
{
add_history(const_cast<char*>(result.c_str()));
}
#else
static ReadLine rl;
rl.set_prompt(prompt);
rl.set_auto_complete_func(DebuggerShell::auto_complete_impl);
rl.read(result);
if (addToHistory && !result.empty())
{
rl.add_history_entry(result);
}
#endif // !USE_GNU_READLINE
// trim trailing spaces
while (size_t n = result.size())
{
if (result[--n] == ' ')
{
result.resize(n);
}
else
{
break;
}
}
return result;
}
bool ZDK_EXPORT ptrace_error(int err)
{
string msg("ptrace: ");
msg += strerror(err);
msg += " [C]ontinue or Abort? ";
string resp = read_line(msg.c_str(), false);
if (resp == "a" || resp == "A" || resp == "abort")
{
abort();
}
return false;
}
/**
* Determine the event type based upon thread's state
*/
static EventType guess_event_type(Thread* thread)
{
EventType event = E_PROMPT;
if (thread)
{
event = thread_finished(*thread)
? E_THREAD_FINISHED : E_THREAD_STOPPED;
}
return event;
}
void DebuggerShell::print_return_value(const RefPtr<Thread>& thread)
{
assert(thread);
RefPtr<DebugSymbol> ret = thread->func_return_value();
if (ret)
{
assert(ret->is_return_value());
print_debug_symbol(ret.get(), 0, 1, 0);
}
}
void DebuggerShell::show_listing(const RefPtr<Thread>& thread)
{
vector<string> tmp;
tmp.push_back("list");
tmp.push_back("0");
cmd_list(thread.get(), tmp);
print_return_value(thread);
}
/**
* Prompt the user for a command; if a plugin is interested
* in handling the event, then the engine doesn't show the prompt,
* assuming that the plugin will prompt the user instead.
* This mechanism allows plugins to implement custom command line
* interface, or graphical user interfaces.
* @note the first plugin that responds TRUE to the publish_event()
* call will grab exclusive control (i.e. there can be only one active
* prompt at a given time).
*/
void DebuggerShell::prompt_user(RefPtr<Thread> thread, EventType event)
{
// guard against multiple re-entry
assert(!promptLoopActive_);
Temporary<bool> setFlag(promptLoopActive_, true);
if (event == E_NONE)
{
event = guess_event_type(thread.get());
}
current_ = thread;
// Prompt loop: read commands from console and execute them;
// stay in the loop if command returns false, break otherwise.
for (resume_ = false, disasmAddr_ = 0; !is_quitting();)
{
try
{
// publish event to all plugins
const bool noPrompt = publish_event(thread.get(), event);
set_silent(noPrompt);
// if publish_event() returns false it means that no plugin
// was interested in grabbing the event: we show the prompt,
// read a line of input and process the command.
if (!noPrompt && !is_quitting() && !resume_)
{
if (thread.get() && event != E_PROMPT)
{
show_listing(thread);
}
const string cmdline = read_line(prompt);
if (cmdline.empty())
{
event = E_PROMPT; // keep prompting
continue;
}
addr_t tmpAddr = disasmAddr_;
resume_ = dispatch_command(thread.get(), cmdline);
// disasmAddr_ is used for resuming disassembly --
// if it has not changed, reset it so that we keep
// listing at the current instruction pointer
// Q: should "help" be an exception to this?
if (disasmAddr_ == tmpAddr)
{
disasmAddr_ = 0;
}
}
if (resume_)
{
dbgout(0) << __func__ << ": resuming event loop" << endl;
listing_.reset();
break;
}
// we are staying in the loop; the event that gets
// passed to plugins in the next iteration is merely
// a prompt rather than some thread event
event = E_PROMPT;
if (current_.get() && current_ != thread)
{
thread = current_;
assert(thread);
}
}
catch (const exception& e)
{
critical_error(thread.get(), e.what());
event = E_PROMPT;
if (signaled())
{
break;
}
quit();
}
if (!this->is_attached()) // detached from program?
{
current_ = thread = 0;
break;
}
else if (!thread || thread_finished(*thread))
{
thread = get_thread(DEFAULT_THREAD);
if (thread.get())
{
event = guess_event_type(thread.get());
cout << "[" << thread->lwpid();
cout << "]: is now current" << endl;
}
}
} // end prompt loop
}
/**
* Lookup command by name. The search is linear; the set of commands
* is not very large, so it shouldn't be a problem
*/
DebuggerCommand* DebuggerShell::lookup_command(const string& name)
{
DebuggerCommand* result = 0;
CommandList::iterator i = commands_.begin();
for (; i != commands_.end(); ++i)
{
if ((*i)->name() == name)
{
result = (*i).operator->();
break;
}
}
return result;
}
void DebuggerShell::auto_complete_impl (
const char* text,
const string& line,
vector<string>& matches)
{
typedef boost::char_separator<char> Delim;
typedef boost::tokenizer<Delim> Tokenizer;
Tokenizer tok(line, Delim(" "));
string firstWord;
if (tok.begin() != tok.end())
{
firstWord = *tok.begin();
}
DebuggerCommand* cptr = instance().lookup_command(firstWord);
if (cptr == 0)
{
auto_complete_command(text, matches);
}
else
{
StringEnum stringEnum(matches);
cptr->auto_complete(text, &stringEnum);
}
}
#ifdef USE_GNU_READLINE
char* DebuggerShell::auto_complete(const char* text, size_t n)
{
assert(text);
static vector<string> matches;
try
{
// readline is a C library:
// this function must not throw C++ exceptions
if (n == 0)
{
assert(rl_line_buffer);
// initialize the list of possible matches
auto_complete_impl(text, rl_line_buffer, matches);
}
if (matches.size() > n)
{
return strdup(matches[n].c_str());
}
matches.clear();
}
catch (const exception& e)
{
cerr << endl << __func__ << ": " << e.what() << endl;
}
return 0;
}
#endif
bool DebuggerShell::command(const char* cmd, Thread* thread)
{
bool result = false;
if (cmd)
{
result = dispatch_command(thread, cmd);
}
return result;
}
bool DebuggerShell::dispatch_command( Thread* thread, const string& cmd)
{
assert(!cmd.empty());
// break the line into a vector of tokens
//boost::char_separator<char> delim(" ");
typedef boost::escaped_list_separator<char> Delim;
typedef boost::tokenizer<Delim> Tokenizer;
Tokenizer tok(cmd, Delim('\\', ' ', '\"'));
vector<string> argv;
Tokenizer::const_iterator it = tok.begin();
for (; it != tok.end(); ++it)
{
if (!(*it).empty())
{
argv.push_back(*it);
}
}
// lookup the first token in the supported command
DebuggerCommand* cptr = lookup_command(argv[0]);
bool result = false;
if (cptr == 0)
{
argv.insert(argv.begin(), "eval");
cptr = lookup_command(argv[0]);
}
assert(cptr);
vector<const char*> args(argv.size() + 1);
transform(argv.begin(), argv.end(), args.begin(),
mem_fun_ref(&string::c_str));
assert(!args.empty());
try
{
result = cptr->execute(thread, &args[0]);
}
catch (const exception& e)
{
cout << e.what() << endl;
}
// hack: typing "list" consecutively should continue
// listing from where we left, other commands should
// reset the listing to start at the line corresponding
// to the current instruction pointer
// Q: should "help" be an exception to this?
if ((argv[0] != "list") && listing_.get())
{
listing_->set_current_line(0);
}
return result;
}
static void sym_to_addr(const RefPtr<Symbol>& sym, vector<addr_t>& addr, bool bpoint)
{
ZObjectScope scope;
if (!bpoint || sym->table(&scope)->addr())
{
addr.push_back(sym->addr());
}
else
{
sym->set_deferred_breakpoint(BreakPoint::GLOBAL);
}
}
/**
* Helper function. Whenever there is an ambiguity regarding the symbol
* to which a command should apply, prompt the user with the possible
* matches.
*/
static vector<addr_t> prompt_to_select_overload(
const SymbolEnum& symbols,
bool breakpoint,
bool multipleSelectionOk)
{
assert(!symbols.empty());
vector<addr_t> addresses;
RefPtr<Symbol> sym = *symbols.begin();
for (;;)
{
if (symbols.size() == 1)
{
sym_to_addr(sym, addresses, breakpoint);
break;
}
// Display a list of possible selections
cout << "which " << sym->demangled_name(false)->c_str();
cout << '?' << endl;
cout << " [0] <escape>" << endl;
for (size_t n(0); n < symbols.size(); ++n)
{
const RefPtr<Symbol>& s = symbols[n];
cout << " [" << n + 1 << "] "
<< s->demangled_name()
<< " 0x" << setw(sizeof(addr_t) * 2) << setfill('0')
<< hex << s->addr() << dec
<< ' ' << s->file()->c_str()
<< endl;
}
if (multipleSelectionOk)
{
cout << " [" << symbols.size() + 1 << "] <all>" << endl;
}
string line = read_line("Enter selection: ", false);
// no history ^
if (line.empty() || !isdigit(line[0]))
{
continue;
}
size_t i = strtoul(line.c_str(), 0, 0);
if (i == 0)
{
break;
}
else if ((--i) > symbols.size()
|| (i == symbols.size() && !multipleSelectionOk))
{
cout << "invalid entry, ";
continue; // keep prompting for selection
}
else if (i == symbols.size())
{
// include all symbols in the result
for (size_t n(0); n != i; ++n)
{
sym_to_addr(symbols[n], addresses, breakpoint);
}
}
else
{
sym_to_addr(symbols[i], addresses, breakpoint);
}
break;
}
return addresses;
}
namespace
{
class ZDK_LOCAL AddrCollector : public EnumCallback2<SymbolTable*, addr_t>
{
public:
explicit AddrCollector(vector<addr_t>& addrs) : addrs_(addrs)
{}
void notify(SymbolTable*, addr_t addr) { addrs_.push_back(addr); }
private:
vector<addr_t>& addrs_;
};
}
/**
* A helper routine for reading addresses from the user's
* command. addr_tes can be specified as decimal or hex
* numbers, or by symbol (function) name. The thread's symbol
* table is used to lookup the symbol names.
*/
static vector<addr_t> strings_to_addrs(
const Thread& thread,
const vector<string>& argv,
const SourceListing* listing,
bool breakpoint = false,
bool multipleOk = true)
{
assert(argv.size() > 1);
vector<addr_t> result; // return vector of addresses
bool matchAll = false;
vector<string>::const_iterator i = argv.begin();
for (++i; i != argv.end(); ++i)
{
if (*i == "/all") // hack for break /all
{
matchAll = true;
continue;
}
// check for file:line
const size_t ncol = (*i).find(':');
if ((ncol != string::npos) && ((*i)[ncol + 1] != ':'))
{
const size_t line = strtoul(i->c_str() + ncol + 1, 0, 0);
RefPtr<SharedString> file(shared_string(i->c_str(), ncol));
if (!file->length() && listing)
{
file = shared_string(listing->name());
}
AddrCollector collector(result);
Debugger* debugger = CHKPTR(thread.debugger());
if (!debugger->line_to_addr(file.get(), line, &collector, &thread))
{
cout << "Line not found: " << file.get();
cout << ':' << line << endl;
}
continue;
}
// try reading the address as a number
istringstream is(*i);
is.unsetf(ios::basefield);
addr_t addr = 0;
is >> addr;
if (addr)
{
result.push_back(addr);
}
else
{
// try it as a symbol name, enum symbols taking needed tables
// into account (i.e. looking up shared objects that might be
// loaded later)
SymbolEnum syms;
thread.symbols()->enum_symbols(
i->c_str(), &syms,
(SymbolTable::LKUP_DYNAMIC | SymbolTable::LKUP_UNMAPPED));
if (syms.empty())
{
cout << "Function not found: " << *i << endl;
}
else if (matchAll)
{
SymbolEnum::const_iterator i = syms.begin();
for (; i != syms.end(); ++i)
{
result.push_back((*i)->addr());
}
}
else
{
vector<addr_t> tmp =
prompt_to_select_overload(syms, breakpoint, multipleOk);
result.insert(result.end(), tmp.begin(), tmp.end());
}
}
}
return result;
}
/**
* add module -- useful for shared objects that are explicitly loaded with
* the dlopen() system call. Because the linkage is not implicit, such modules
* (or shared objects) are not listed in the DT_NEEDED section of the executable.
*/
bool DebuggerShell::cmd_addmod(Thread* thread, const vector<string>& argv)
{
check_thread(thread);
assert(argv.size() >= 1);
vector<string>::const_iterator i = argv.begin();
for (++i; i != argv.end(); ++i)
{
CHKPTR(thread->symbols())->add_module((*i).c_str());
}
return false;
}
/**
* Attach the debugger to a running program
*/
bool DebuggerShell::cmd_attach(Thread*, const vector<string>& argv)
{
assert(argv.size() >= 1);
for (size_t i = 1; i != argv.size(); ++i)
{
pid_t pid = strtol(argv[i].c_str(), 0, 0);
if (pid == 0) continue;
if (pid == getpid())
{
cout << "The Buddha says: Do not attach to self." << endl;
}
else
{
attach(pid);
}
}
return false;
}
/**
* Insert breakpoint at given address(es). Address can be given
* explicitly (in hex) or as a function name.
*/
bool DebuggerShell::cmd_break(Thread* thread, const vector<string>& argv)
{
if (argv.size() < 2)
{
cout << argv[0] << ": arguments missing" << endl;
return false;
}
const bool matchAll = (argv[1] == "/all");
check_thread(thread);
vector<addr_t> addresses =
strings_to_addrs(*thread, argv, listing_.get(), true, matchAll);
vector<addr_t>::const_iterator i = addresses.begin();
for (; i != addresses.end(); ++i)
{
if (set_user_breakpoint(get_runnable(thread), *i))
{
assert(thread->symbols());
RefPtr<Symbol> sym(thread->symbols()->lookup_symbol(*i));
if (sym.get())
{
StateSaver<ios, ios::fmtflags> save(cout);
cout << "Breakpoint set at: 0x" << hex << sym->addr();
cout << ' ' << sym->demangled_name()->c_str() << endl;
}
}
else
{
cout << "There already is a breakpoint at: 0x";
cout << hex << *i << dec << endl;
}
}
return false;
}
/**
* Remove a user breakpoint
*/
bool DebuggerShell::cmd_clear(Thread* thread, const vector<string>& argv)
{
check_thread(thread);
for (size_t i = 1; i < argv.size(); ++i)
{
const size_t addr = strtoul(argv[i].c_str(), 0, 0);
// NOTE at this time, this shell assumes that USER breakpoints are global;
// per-thread USER breakpoints will be supported in the future
if (remove_user_breakpoint(0, 0, addr))
{
cout << "breakpoint cleared: ";
}
else
{
cout << "No user breakpoint at: ";
}
cout << hex << addr << dec << endl;
}
return false;
}
bool DebuggerShell::cmd_open(Thread*, const vector<string>& argv)
{
if (argv[0] == "close")
{
outputRedirect_.reset();
}
else if (argv.size() != 2)
{
cout << "Incorrect number of arguments, 1 expected\n";
}
else
{
for (int flags = O_RDWR | O_CREAT | O_EXCL;;)
{
bool fileExists = false;
try
{
int fd = sys::open(argv[1].c_str(), flags);
outputRedirect_.reset(new Redirect(1, fd, true));
break;
}
catch (const SystemError& e)
{
if (e.error() == EEXIST)
{
fileExists = true;
}
else
{
cout << e.what() << endl;
break;
}
}
if (fileExists)
{
string resp = read_line("File exists. Overwrite? [y/N]", false);
if (resp == "y" || resp == "Y")
{
flags = O_RDWR | O_TRUNC;
}
else
{
break;
}
}
}
}
return false; // stay in the command loop
}
/**
* Resume debugged program. If a timer is specified, the debugged
* program will be interrupted after the given number of milliseconds
* elapses.
*/
bool DebuggerShell::cmd_continue(Thread* thread, const vector<string>& argv)
{
check_thread(thread);
for (size_t i = 1; i < argv.size(); ++i)
{
if (argv[i] == "timer" && ++i < argv.size())
{
// milliseconds
long msec = strtol(argv[i].c_str(), 0, 0);
struct itimerval itv;
memset(&itv, 0, sizeof itv);
itv.it_value.tv_sec = msec / 1000;
itv.it_value.tv_usec = (msec * 1000) % 1000000;
setitimer(ITIMER_REAL, &itv, NULL);
}
else
{
cout << "Unknown argument: " << argv[i] << endl;
return false;
}
}
return true;
}
/**
* Detach debugger from debuggee. If the debugged program was
* started (exec-ed) by the debugger, it will be killed
*/
bool DebuggerShell::cmd_detach(Thread*, const vector<string>& argv)
{
if (!is_attached())
{
cout << "Not attached." << endl; // Buddha
}
else
{
bool confirmed = false;
for (size_t i = 1; i < argv.size(); ++i)
{
if (argv[i] == "/y")
{
confirmed = true;
}
}
if (!confirmed)
{
// prompt user for confirmation
string resp = read_line("Stop debugging? (y/n)", false);
confirmed = (resp == "y" || resp == "Y" || resp == "yes");
}
if (confirmed)
{
detach();
}
else
{
return false;
}
}
return true;
}
/**
* Notification from the disassembler plugin.
* @return true to keep disassembling, false to stop.
*/
bool DebuggerShell::notify(addr_t, const char* text, size_t)
{
if (disasmLineCount_++ >= Term::screen_height(STDOUT_FILENO))
{
string resp = read_line("Continue or [Q]uit? ", false);
if (resp == "q" || resp == "Q" || resp == "quit")
{
return false;
}
disasmLineCount_ = 0;
}
cout << text << endl;
return true;
}
bool DebuggerShell::tabstops(size_t* first, size_t* second) const
{
if (first)
{
*first = 2 * sizeof(addr_t) + 2;
}
if (second)
{
*second = 2 * sizeof(addr_t) + 32;
}
return true;
}
/**
* Disassemble debugged program starting at current address.
* Keep track of the address from one command to another so
* that the next disassesmble command resumes from where we
* had left (provided that no event happened since last disasm)
*/
bool DebuggerShell::cmd_disassemble(Thread* thread, const vector<string>& args)
{
check_thread(thread);
size_t howMany = 32;
bool interleaved = true;
vector<string>::const_iterator i(args.begin());
// if the command name was not "disassemble" it means that
// we got here as a failover from other command (probably a
// "list" command which did not have source code available);
// in this case, the command arguments do not apply
//
if (*i == "disassemble")
{
for (++i; i != args.end(); ++i)
{
if (*i == "at")
{
vector<string> argv(i, i + 2);
// strings_to_addrs skips first arg
vector<addr_t> alist =
strings_to_addrs(*thread, argv, NULL, false, true);
disasmAddr_ = alist.empty() ? 0 : alist.front();
++i;
if (i == args.end())
{
break;
}
}
else if (*i == "/nos") // no source
{
interleaved = false;
}
else
{
if (size_t n = strtoul(i->c_str(), 0, 0))
{
howMany = n;
}
}
}
}
if (disasmAddr_ == 0)
{
disasmAddr_ = frame_program_count(*thread);
}
const SymbolMap* symbols = CHKPTR(thread->symbols());
RefPtr<Symbol> sym(symbols->lookup_symbol(disasmAddr_));
if (sym)
{
ZObjectScope scope;
if (const SymbolTable* table = sym->table(&scope))
{
dbgout(0) << table->filename()
<< " base=0x"
<< hex << table->adjustment()
<< " addr=" << disasmAddr_
<< endl;
// round up to machine words
const size_t nwords =
(howMany + sizeof(word_t) - 1) / sizeof(word_t);
vector<uint8_t> bytes(nwords * sizeof(word_t));
try
{
word_t* buf = (word_t*) &bytes[0];
thread->read_code(sym->addr(), buf, nwords);
}
catch (const exception& e)
{
cerr << __func__ << ": " << e.what() << endl;
}
disasmLineCount_ = 0;
size_t n = disassemble( thread,
sym.get(),
howMany,
interleaved,
&bytes[0],
this);
disasmAddr_ += n;
}
}
return false;
}
/**
* Enable or disable existing breakpoints
*/
bool DebuggerShell::cmd_enable(Thread*, const vector<string>& argv)
{
assert(!argv.empty());
vector<string>::const_iterator i = argv.begin();
const bool enable = (*i == "enable");
for (++i; i != argv.end(); ++i)
{
istringstream is(*i);
addr_t addr = 0;
is >> addr;
if (enable)
{
enable_user_breakpoint_actions(*this, addr);
}
else
{
disable_user_breakpoint_actions(*this, addr);
}
}
return false;
}
/**
* Execute (start) a program and attach to it. If currently
* attached to another program, detaches automatically
*/
bool DebuggerShell::cmd_exec(Thread*, const vector<string>& argv)
{
if (argv.size() > 1)
{
ExecArg args(argv.begin() + 1, argv.end());
const bool shellExpandArgs = (argv[0] == "run");
exec(args, shellExpandArgs, NULL);
}
return true;
}
void DebuggerShell::select_frame(Thread& thread, int n)
{
if (StackTrace* trace = thread.stack_trace())
{
if (n >= 0 && trace->size())
{
if (Frame* frame = trace->selection())
{
if (frame->index() == static_cast<size_t>(n))
{
return;
}
}
trace->select_frame(n);
// frame changed, reset address and listing
disasmAddr_ = 0;
listing_.reset();
}
if (trace->size())
{
if (Frame* frame = trace->selection())
{
cout << *frame << endl;
vector<string> argv;
argv.push_back("select_frame");
cmd_line(&thread, argv);
}
}
}
}
/**
* Select a given stack frame and make it current. Subsequent
* commands will be executed in the context of this frame
*/
bool DebuggerShell::cmd_frame(Thread* thread, const vector<string>& argv)
{
check_thread(thread);
if (argv.size() <= 1)
{
select_frame(*thread, -1);
}
else if (argv.size() > 2)
{
cout << argv[0] << ": too many arguments" << endl;
}
else if (argv[1] == "signal")
{
// locate the signal frame
if (StackTrace* stack = thread->stack_trace())
{
for (size_t i = 0; i != stack->size(); ++i)
{
if (stack->frame(i)->is_signal_handler())
{
select_frame(*thread, i);
return false;
}
}
}
cout << "could not locate signal frame" << endl;
}
else
{
long f = strtol(argv[1].c_str(), 0, 0);
select_frame(*thread, f);
}
return false; // do not resume execution
}
bool DebuggerShell::cmd_navigate_stack(Thread* thread, const vector<string>& argv)
{
check_thread(thread);
if (argv.size() > 1)
{
cout << argv[0] << ": too many arguments" << endl;
}
else if (StackTrace* trace = thread->stack_trace())
{
if (trace->size())
{
if (Frame* frame = trace->selection())
{
size_t index = frame->index();
if (argv[0] == "down")
{
if (index)
--index;
}
else if (argv[0] == "up")
{
++index;
}
select_frame(*thread, index);
}
}
}
return false;
}
/**
* Display help; if a command is given in the arguments, show
* the help string that corresponds to that command
*/
bool DebuggerShell::cmd_help(Thread*, const vector<string>& argv)
{
if (argv.size() == 1)
{
cout << "Supported commands:" << endl;
vector<string> cmds;
cmds.reserve(commands_.size());
CommandList::const_iterator i = commands_.begin();
for (; i != commands_.end(); ++i)
{
//cout << " " << (*i)->name() << endl;
cmds.push_back((*i)->name());
}
display_strings(cmds, stdout);
cout << "Try 'help <command>' for more detailed info" << endl;
}
else
{
assert(argv.size() > 1);
vector<string>::const_iterator i = argv.begin();
for (++i; i != argv.end(); ++i)
{
DebuggerCommand* cptr = lookup_command(*i);
if (cptr == 0)
{
cout << "No such command: " << *i << endl;
}
else
{
cout << cptr->name();
if (const char* help = cptr->help())
{
if (*help != ' ' && *help != ':')
{
cout << ": ";
}
cout << help << endl;
}
else
{
cout << " -- no help available";
}
cout << endl;
}
}
}
return false;
}
/* auto-completes signal numbers */
static void auto_complete_sig(const char* text, vector<string>& matches)
{
assert(text);
const size_t len = strlen(text);
static const char* args[] =
{ "pass", "nopass", "ignore", "stop", "nostop" };
for (size_t i = 0; i != ELEM_COUNT(args); ++i)
{
if (strncmp(text, args[i], len) == 0)
{
matches.push_back(args[i]);
}
}
const char* const* name = sig_name_list();
for (; name && *name; ++name)
{
if (strncmp(text, *name, len) == 0)
{
matches.push_back(*name);
}
}
}
/**
* Change the policy for handling a given signal
*/
bool DebuggerShell::cmd_handle(Thread*, const vector<string>& argv)
{
assert(argv.size() >= 1);
int passFlag = -1;
int stopFlag = -1;
unsigned sig = 0;
for (size_t i = 1; i != argv.size(); ++i)
{
const string& arg = argv[i];
if (arg == "stop")
{
stopFlag = 1;
}
else if (arg == "nostop")
{
stopFlag = 0;
}
else if (arg == "pass")
{
passFlag = 1;
}
else if (arg == "nopass" || arg == "ignore")
{
passFlag = 0;
}
else
{
sig = sig_from_name(arg);
}
}
if (sig == 0)
{
cout << "Signal not specified" << endl;
}
else if (sig >= NSIG)
{
cout << "Invalid argument: " << sig << endl;
}
else
{
if (passFlag != -1)
{
signal_policy(sig)->set_pass(passFlag);
}
if (stopFlag != -1)
{
signal_policy(sig)->set_stop(stopFlag);
}
cout << "STOP\tPASS\tDESCRIPTION" << endl;
cout << "----------------------------------------------";
cout << endl;
cout << *signal_policy(sig) << "\t" << sig_description(sig);
cout << endl;
}
return false;
}
/* load a core file */
bool DebuggerShell::cmd_loadcore(Thread*, const vector<string>& argv)
{
if (argv.size() > 1)
{
const char* prog = (argv.size() > 2) ? argv[2].c_str() : 0;
load_core(argv[1].c_str(), prog);
return true;
}
cout << "Core file not specified.\n";
return false;
}
static bool sym_is_pointer(DebugSymbol* sym, bool* isString = 0)
{
assert(sym);
PointerType* ptr = interface_cast<PointerType*>(sym->type());
if (ptr)
{
if (isString)
{
*isString = ptr->is_cstring();
}
return true;
}
return false;
}
void DebuggerShell::print_debug_symbol(
DebugSymbol* sym,
size_t index,
size_t depth,
int base,
bool printSymName)
{
if (!sym)
{
cout << "<null symbol>" << endl;
return;
}
DebugSymbolHelpers::Print print;
print.set_numeric_base(base);
CHKPTR(sym)->read(&print);
if ((sym->depth() > 1) && (index != 0))
{
cout << ", ";
}
if (printSymName)
{
cout << CHKPTR(sym)->name();
}
bool isLargeArray = false;
if (ArrayType* aType = interface_cast<ArrayType*>(sym->type()))
{
isLargeArray = aType->elem_count() > max_array_range();
cout << '[' << aType->elem_count() << ']';
}
if (sym->enum_children(0) && (!sym_is_pointer(sym) || sym->depth() < 2))
{
DebugSymbolHelpers::Print print;
print.set_numeric_base(base);
if (printSymName && CHKPTR(sym->name())->length())
{
cout << '=';
}
bool isString = false;
if (sym_is_pointer(sym, &isString))
{
RefPtr<SharedString> value = sym->value();
if (value)
{
if (isString)
{
cout << value->c_str();
isString = true;
}
else
{
cout << '[' << value->c_str() << "]=";
}
}
else
{
cout << "[<null>]";
}
}
if (!isString)
{
size_t index = 0;
swap(index, print.index_);
cout << '{'; ++print.depth_;
if (isLargeArray)
{
cout << "...";
}
else
{
sym->enum_children(&print);
}
cout << '}';
--print.depth_;
print.index_ = index;
}
}
else
{
if (sym->value())
{
if (printSymName && CHKPTR(sym->name())->length())
{
cout << '=';
}
RefPtr<SharedString> value = sym->value();
if (value)
{
cout << value->c_str();
}
else
{
cout << "<null>";
}
}
}
if (depth == 1)
{
cout << endl;
}
}
/**
* Print symbols in current scope; symbol names can be specified as
* arguments to this command; if no symbol name is given, all symbols
* are printed
*/
bool DebuggerShell::cmd_print(Thread* thread, const vector<string>& argv)
{
DebugSymbolHelpers::Print print;
check_thread(thread);
assert(thread->stack_trace());
assert(thread->stack_trace()->selection());
static const LookupScope scope = LOOKUP_MODULE;
if (argv.size() == 1)
{
enum_variables(thread, "", 0, &print, scope, true);
}
else
{
static bool enumFuncs = false;
for (size_t i = 1; i != argv.size(); ++i)
{
if (argv[i] == "/x")
{
print.set_numeric_base(16);
if (i == 1 && argv.size() == 2)
{
enum_variables(thread, "", 0, &print, scope, enumFuncs);
}
}
else if (argv[i] == "/ret")
{
print_return_value(thread);
}
else if (!enum_variables(
thread, argv[i].c_str(), 0, &print, scope, enumFuncs))
{
cout << "Symbol '" << argv[i] << "' not found.\n";
}
}
}
return false;
}
namespace
{
/**
* helper for cmd_reg, see below
*/
class ZDK_LOCAL RegOut : public EnumCallback<Register*>
{
size_t i_;
const size_t n_;
public:
explicit RegOut(size_t n) : i_(0), n_(n) { }
void notify(Register* r)
{
if (i_++ == n_)
{
if (Variant* v = r->value())
{
variant_print(cout, *v, 16);
cout << endl;
}
}
}
};
}
bool DebuggerShell::cmd_reg(Thread* thread, const vector<string>& argv)
{
assert(argv[0] == "%r");
if (argv.size() != 2)
{
cout << "Incorrect number of arguments, 1 expected\n";
}
else
{
check_thread(thread);
const size_t n = strtoul(argv[1].c_str(), 0, 0);
RegOut regOut(n);
thread->enum_cpu_regs(®Out);
}
return false;
}
namespace
{
/**
* Helper for DebuggerShell::cmd_eval, for async notifications
*/
class ZDK_LOCAL ShellExprEvents : public SubjectImpl<ExprEvents>
{
int base_;
bool whatis_; // print the type rather than the value?
addr_t addr_;
ShellExprEvents(int base, bool whatis)
: base_(base), whatis_(whatis), addr_(0)
{}
ShellExprEvents(const ShellExprEvents& other)
: base_(other.base_)
, whatis_(other.whatis_)
, addr_(other.addr_)
{ }
public:
static RefPtr<ExprEvents> create(int base, bool whatis)
{
return new ShellExprEvents(base, whatis);
}
////////////////////////////////////////////////////////
virtual ~ShellExprEvents() throw()
{ }
////////////////////////////////////////////////////////
bool on_done(Variant* v, bool*, DebugSymbolEvents*)
{
if (v)
{
StateSaver<ios, ios::fmtflags> save(cout);
if (base_ == 16)
{
cout << hex << showbase;
}
else if (base_ == 8)
{
cout << oct << showbase;
}
if (DebugSymbol* sym = v->debug_symbol())
{
if (whatis_)
{
cout << CHKPTR(sym->type_name()) << endl;
}
else
{
DebuggerShell& shell = DebuggerShell::instance();
shell.print_debug_symbol(sym, 0, 1, base_, false);
}
}
else
{
if (whatis_)
{
cout << variant_type(v->type_tag());
}
else
{
variant_print(cout, *v);
}
cout << endl;
}
}
return true;
}
////////////////////////////////////////////////////////
void on_error(const char* errMsg)
{
cout << errMsg << endl;
}
////////////////////////////////////////////////////////
void on_warning(const char* errMsg)
{
cout << errMsg << endl;
}
/**
* Handle events that occur while interpreting an
* expression
*/
bool on_event(Thread* thread, addr_t addr)
{
const int sig = thread->signal();
if (addr_ && sig && (addr != addr_))
{
ostringstream msg;
msg << sig_name(thread->signal());
msg << " occurred at " << hex << showbase << addr;
msg <<" while interpreting expression in lwpid=";
msg << dec << thread->lwpid();
on_error(msg.str().c_str());
if (sig == SIGTRAP)
{
return false;
}
else
{
thread->set_signal(0);
Runnable& task = interface_cast<Runnable&>(*thread);
task.set_program_count(addr_);
}
}
return true;
}
////////////////////////////////////////////////////////
void on_call(addr_t addr, Symbol* symbol)
{
if (symbol)
{
addr_ = addr; // entering function call at addr_
}
else
{
addr_ = 0; // function call returning
}
}
ExprEvents* clone() const
{
return new ShellExprEvents(*this);
}
};
}
/**
* Evaluate a C++ expression.
*/
bool DebuggerShell::cmd_eval(Thread* thread, const vector<string>& argv)
{
check_thread(thread);
int base = 0; // numeric base
string expr;
assert(argv.size() >= 1);
for (size_t i = 1; i != argv.size(); ++i)
{
if (i == 1 && argv[i] == "/x")
{
base = 16;
continue;
}
else
{
expr += argv[i];
expr += ' ';
}
}
const bool whatis = (argv[0] == "whatis");
RefPtr<ExprEvents> events = ShellExprEvents::create(base, whatis);
return !evaluate(expr.c_str(), thread, 0, events.get(), base);
}
/**
* dump memory
*/
bool DebuggerShell::cmd_dump(Thread* thread, const vector<string>& argv)
{
check_thread(thread);
addr_t beginAddr = 0;
addr_t endAddr = 0;
vector<string>::const_iterator i = argv.begin();
for (++i; i != argv.end(); ++i)
{
if (beginAddr == 0)
{
beginAddr = strtoul((*i).c_str(), 0, 0);
}
else if (endAddr == 0)
{
endAddr = strtoul((*i).c_str(), 0, 0);
}
else
{
cout << "Address range already specified\n";
}
}
if (beginAddr && (endAddr == 0))
{
endAddr = beginAddr + 64;
}
cout << hex << beginAddr << "-" << endAddr << dec << endl;
if (beginAddr > endAddr)
{
string resp = read_line("Invalid range, ok to swap? (y/n)", false);
if (resp == "y" || resp == "Y" || resp == "yes")
{
std::swap(beginAddr, endAddr);
}
}
if (beginAddr <= endAddr)
{
dump(cout, *thread, beginAddr, endAddr);
}
return false;
}
void DebuggerShell::dump(
ostream& outs,
Thread& thread,
addr_t beginAddr,
addr_t endAddr)
{
assert(endAddr >= beginAddr);
const bool isStdout = (&outs == &cout);
StateSaver<ios, ios::fmtflags> saveState(outs);
if (endAddr > beginAddr)
{
const size_t nbytes = (endAddr - beginAddr);
size_t nwords = (nbytes + sizeof(word_t) - 1) / sizeof (word_t);
vector<word_t> buf(nwords);
size_t wordsRead = 0;
thread.read_data(beginAddr, &buf[0], nwords, &wordsRead);
const size_t cols = isStdout
? (Term::screen_width(STDOUT_FILENO)
- sizeof(addr_t) * 2 - 5) / 4
: 16;
for (size_t n = 0, rows = 0; n < nbytes; ++rows)
{
if (isStdout && (rows + 1 >= Term::screen_height(STDOUT_FILENO)))
{
string resp = read_line("Continue or [Q]uit? ", false);
if (resp == "q" || resp == "Q" || resp == "quit")
{
break;
}
rows = 0;
}
outs << hex << setw(sizeof(addr_t) * 2);
outs << setfill('0') << beginAddr + n << ": ";
{
Temporary<size_t> save(n);
for (size_t j = 0; (j != cols); ++j, ++n)
{
if (n < nbytes)
{
const int b =
((const unsigned char*)&buf[0])[n];
outs << setw(2) << setfill('0') << b << ' ';
}
else
{
outs << " ";
}
}
}
outs << " ";
for (size_t j = 0; (j != cols) && (n < nbytes); ++j, ++n)
{
const int b = ((const char*)&buf[0])[n];
outs << (isprint(b) ? (char)b : '.');
}
outs << endl;
}
}
}
/**
* Find bytes in memory
*/
bool DebuggerShell::cmd_find(Thread* thread, const vector<string>& argv)
{
check_thread(thread);
string bytes;
addr_t addr = 0;
size_t len = 0;
vector<string>::const_iterator i = argv.begin();
for (++i; i != argv.end(); ++i)
{
if (addr == 0)
{
addr = strtoul((*i).c_str(), 0, 0);
}
else
{
if (!bytes.empty())
{
bytes += ' ';
}
bytes += *i;
}
}
dbgout(0) << "search pattern=\"" << bytes << "\"" << endl;
if (addr == 0)
{
cout << "No memory address specified.\n";
}
else if (bytes.empty())
{
cout << "No byte pattern specified.\n";
}
else if (thread_page_find(*thread, addr, bytes.c_str(), &len, &addr))
{
dump(cout, *thread, addr, addr + len);
}
else
{
cout << "Byte pattern not found.\n";
}
return false;
}
/**
* print current line info
*/
bool DebuggerShell::cmd_line(Thread* thread, const vector<string>& argv)
{
if (!listing_.get())
{
cmd_list(thread, argv);
}
else
{
if (argv[0] != "line")
{
cout << listing_->name() << ':';
}
cout << listing_->symbol_line() << endl;
}
return false;
}
/**
* List source file, or disassemble if source is not available
*/
bool DebuggerShell::cmd_list(Thread* thread, const vector<string>& args)
{
string filename;
size_t line = 0;
size_t symbolLine = 0;
size_t howManyLines = 0;
bool lineSpecified = false;
bool startAtFun = false;
// inspect command arguments
vector<string>::const_iterator i = args.begin();
for (++i; i != args.end(); ++i)
{
assert(!(*i).empty());
if (isdigit((*i)[0]))
{
const size_t n = strtoul((*i).c_str(), 0, 0);
if (line)
{
howManyLines = n;
}
else
{
line = n;
if (n)
{
lineSpecified = true;
}
else //if (listing_.get())
{
//listing_->set_symbol_line(0);
listing_.reset();
}
}
}
else if (*i == "<")
{
startAtFun = true;
}
else
{
filename = canonical_path(i->c_str());
}
}
if (filename.empty() && !startAtFun)
{
if (listing_.get())
{
filename = listing_->name();
if (line == 0)
{
line = listing_->symbol_line();
}
}
}
if (filename.empty())
{
check_thread(thread);
if (const Frame* frame = thread_current_frame(thread))
{
ZObjectScope scope;
// the current function in scope
RefPtr<Symbol> func = frame->function();
if (!func || !func->line() || !func->table(&scope))
{
listing_.reset();
// no source file available, fallback to disassembly
return cmd_disassemble(thread, args);
}
if (startAtFun)
{
addr_t addr = func->addr() - func->offset();
SymbolTable* table = CHKPTR(func->table(&scope));
if (RefPtr<Symbol> s = table->lookup_symbol(addr))
{
func = s;
lineSpecified = true;
}
}
filename = CHKPTR(func->file())->c_str();
symbolLine = func->line();
if (!lineSpecified)
{
line = symbolLine;
}
}
}
if ((listing_.get() == 0) || (listing_->name() != filename))
{
listing_.reset(new SourceListing(filename));
}
if (listing_->empty())
{
return cmd_disassemble(thread, args);
}
// start listing from specified line
if (lineSpecified || listing_->current_line() == 0)
{
listing_->set_current_line(line);
}
if (symbolLine)
{
listing_->set_symbol_line(symbolLine);
}
if (howManyLines == 0)
{
howManyLines = 20;
}
const size_t next = listing_->list(cout, howManyLines);
listing_->set_current_line(next);
return false; // do not resume execution
}
bool DebuggerShell::cmd_lookup(Thread* thread, const vector<string>& args)
{
check_thread(thread);
vector<string>::const_iterator i = args.begin();
bool verbose = true;
SymbolEnum symEnum;
for (++i; i != args.end(); ++i)
{
if (*i == "/c")
{
verbose = false;
}
else if ((*i)[0] == '/')
{
cout << "invalid option: " << *i << endl;
}
else
{
SymbolMap* symMap = CHKPTR(thread->symbols());
symMap->enum_symbols((*i).c_str(), &symEnum);
}
}
if (verbose)
{
SymbolEnum::const_iterator i = symEnum.begin();
for (; i != symEnum.end(); ++i)
{
cout << *i << endl;
}
}
else
{
cout << symEnum._count() << endl;
}
return false;
}
/**
* Execute a source line, not diving into function calls
*/
bool DebuggerShell::cmd_next(Thread* thread, const vector<string>& argv)
{
check_thread(thread);
const addr_t addr = frame_program_count(*thread);
if (argv.size() > 1)
{
cout << argv[0] << ": too many arguments" << endl;
}
return next(thread, addr, 0);
}
bool DebuggerShell::cmd_quit(Thread*, const vector<string>&)
{
try
{
quit();
}
catch (const exception& e)
{
cerr << __func__ << ": " << e.what() << endl;
_exit(1);
}
return true; // keep compiler happy
}
/**
* STEP debugged program until the current function returns
*/
bool DebuggerShell::cmd_return(Thread* thread, const vector<string>&)
{
check_thread(thread);
get_runnable(thread)->step_until_current_func_returns();
return true; // exit the prompt_user() loop
}
namespace
{
ostream& operator<<(ostream& outs, WatchType type)
{
switch(type)
{
case WATCH_WRITE: return outs << "write";
case WATCH_READ_WRITE: return outs << "read-write";
case WATCH_VALUE: assert(false);
}
return outs;
}
}
/**
* Manipulate watchpoints -- a watchpoint is similar to a breakpoint,
* only it gets activated when the program is accessing a variable,
* rather than fetching an instruction.
*/
bool DebuggerShell::cmd_watch(Thread* thread, const vector<string>& argv)
{
check_thread(thread); // needs a valid thread
WatchType type = WATCH_WRITE;
string variable;
vector<string>::const_iterator i = argv.begin();
for (++i; i != argv.end(); ++i)
{
if (*i == "/rw")
{
type = WATCH_READ_WRITE;
}
else if (*i == "/r")
{
type = WATCH_WRITE;
}
else if ((*i)[0] == '/')
{
cout << "invalid option: " << *i << endl;
}
else
{
if (!variable.empty())
{
cout << "*** Warning: variable already specified, overriding\n";
}
variable = *i;
}
}
if (variable.empty())
{
cout << "Variable not specified. You are a bozo.\n";
}
else
{
DebugSymbolEnum debugSyms;
if (enum_variables( thread,
variable.c_str(),
0,
&debugSyms,
LOOKUP_MODULE,
false))
{
DebugSymbolEnum::const_iterator i = debugSyms.begin();
for (; i != debugSyms.end(); ++i)
{
dbgout(0) << (*i)->name() << "=" << (void*)(*i)->addr()
<< endl;
if (!set_watchpoint(get_runnable(thread), type, true, (*i)->addr()))
{
cout << "error setting watchpoint at: ";
cout << (*i)->name() << endl;
}
}
}
else
{
cout << "No such symbol: " << variable << endl;
}
}
return false;
}
/**
* Show stack trace
*/
bool DebuggerShell::cmd_where(Thread* thread, const vector<string>& argv)
{
check_thread(thread);
size_t maxDepth = UINT_MAX;
bool currentFunction = false;
if (argv.size() > 2)
{
cout << "Extra arguments ignored" << endl;
}
else if (argv.size() > 1)
{
if (argv[1] == "/func")
{
currentFunction = true;
maxDepth = 1;
}
else if (size_t n = strtoul(argv[1].c_str(), 0, 10))
{
maxDepth = n;
}
}
RefPtr<SymbolMap> syms = thread->symbols();
RefPtr<StackTrace> trace = thread->stack_trace(maxDepth);
// number of lines in the output, plus room for prompt
size_t height = 1;
const size_t w = Term::screen_width(STDOUT_FILENO);
// screen_width should never return 0, by contract
assert(w);
maxDepth = min(maxDepth, trace->size());
for (size_t i = 0; i != maxDepth; ++i)
{
Frame* frame = trace->frame(i);
assert(frame);
ostringstream buf;
if (currentFunction)
{
RefPtr<Symbol> func = frame->function();
buf << func->demangled_name();
}
else if (frame->is_signal_handler())
{
buf << " #" << i << " 0x" << hex;
buf << setw(2 * sizeof(addr_t)) << setfill('0');
buf << frame->program_count() << dec;
buf << " (SIGNAL HANDLER)";
}
else
{
RefPtr<Symbol> func = frame->function();
buf << " #" << i << ' ';
print_symbol(buf, frame->program_count(), func);
}
// compute the height (i.e. the number of lines on
// the screen needed to print this frame)
const size_t h = (buf.str().length() + w - 1) / w;
cout << buf.str() << endl;
height += h;
if (height >= Term::screen_height(STDOUT_FILENO))
{
string resp = read_line("Continue or [Q]uit? ", false);
if (resp == "q" || resp == "Q" || resp == "quit")
{
break;
}
height = 1; // new screen
}
}
cout << endl;
return false;
}
////////////////////////////////////////////////////////////////
//
// For automated tests only -- don't read too much into it ;)
//
bool DebuggerShell::cmd_yield(Thread* thread, const vector<string>& argv)
{
long seconds = 0;
for (size_t i = 0; i != argv.size(); ++i)
{
if (argv[i][0] == '-')
{
}
else
{
seconds = strtol(argv[i].c_str(), 0, 0);
}
}
alarm( seconds );
return true;
}
////////////////////////////////////////////////////////////////
static void auto_complete_show(const char* text, vector<string>& matches)
{
assert(text);
const size_t len = strlen(text);
static const char* args[] =
{
"breakpoints",
"files",
"modules",
"regs",
"signals",
"status",
"threads"
};
for (size_t i = 0; i != ELEM_COUNT(args); ++i)
{
if (strncmp(text, args[i], len) == 0)
{
matches.push_back(args[i]);
}
}
}
bool DebuggerShell::cmd_set_next(Thread* thread, const vector<string>& argv)
{
if (argv.size() < 2)
{
cout << argv[0] << ": arguments missing" << endl;
return false;
}
check_thread(thread);
if (Runnable* task = interface_cast<Runnable*>(thread))
{
vector<addr_t> addr =
strings_to_addrs(*thread, argv, listing_.get());
if (addr.size() > 1)
{
cout << argv[0] << ": to many arguments\n";
}
task->set_program_count(addr.front());
show_listing(thread);
}
else
{
cout << "the operation cannot be performed on core files\n";
}
return false;
}
/**
* Step one line
*/
bool DebuggerShell::cmd_step(Thread* thread, const vector<string>& argv)
{
check_thread(thread);
bool machineInstrLevel = (argv.front() == "instruction");
const addr_t addr = frame_program_count(*thread);
if (argv.size() > 1)
{
cout << argv[0] << ": too many arguments" << endl;
}
step(thread, addr, machineInstrLevel);
return true; // resume debuggee
}
/**
* Dump symbol tables
*/
bool DebuggerShell::cmd_symtab(Thread* thread, const vector<string>& args)
{
struct ZDK_LOCAL Dumper : public EnumCallback<SymbolTable*>
, public EnumCallback<Symbol*>
{
SymbolTable::LookupMode mode_;
// sort by demangled names by default
Dumper() : mode_(SymbolTable::LKUP_DEMANGLED) { }
void notify(SymbolTable* table)
{
StateSaver<ios, ios::fmtflags> saveState(cout);
for ( ; table; table = table->next())
{
if (table->size())
{
cout << "***** " << table->filename();
cout << " " << table->name() << " *****\n";
table->enum_symbols(NULL, this, mode_);
cout << endl;
}
}
}
void notify(Symbol* symbol)
{
if (symbol)
{
cout << hex << setw(2 * sizeof(addr_t)) << symbol->addr();
cout << ": " << symbol->demangled_name() << endl;
}
}
} dumper;
check_thread(thread);
vector<string>::const_iterator i = args.begin();
for (++i; i != args.end(); ++i)
{
if ((*i == "/a") || (*i == "/addr"))
{
dumper.mode_ = SymbolTable::LKUP_SYMBOL;
}
}
thread->symbols()->enum_symbol_tables(&dumper);
return false;
}
namespace
{
/**
* Function object for use with Thread::enum_user_regs.
* Prints the register to an output stream.
*/
class ZDK_LOCAL RegisterPrinter
: public EnumCallback<Register*>
, public EnumCallback3<const char*, reg_t, reg_t>
{
ostream& outs_;
public:
explicit RegisterPrinter(ostream& outs) : outs_(outs)
{ }
void notify(Register* reg)
{
if (reg)
{
StateSaver<ios, ios::fmtflags> save(outs_);
outs_ << reg->name() << "\t\t";
outs_ << hex << showbase;
RefPtr<Variant> v = reg->value();
variant_print(outs_, *v) << endl;
if (reg->enum_fields(this))
{
outs_ << endl;
}
}
}
void notify(const char* name, reg_t value, reg_t)
{
outs_ << ' ' << name << '=' << value;
}
};
}
namespace // Helper Callbacks
{
class ZDK_LOCAL ModuleEnumerator : public EnumCallback<Module*>
{
bool unloaded_;
public:
explicit ModuleEnumerator(bool unloaded = false)
: unloaded_(unloaded) { }
void notify(Module* module)
{
if (module)
{
pid_t pid = 0;
if (SymbolTable* symtab = module->symbol_table_list())
{
// todo: should also check for symtab->adjustment()?
if (unloaded_ && (symtab->addr() || symtab->upper()))
{
return;
}
ZObjectScope scope;
if (Process* proc = symtab->process(&scope))
{
pid = proc->pid();
}
}
StateSaver<ios, ios::fmtflags> save(cout);
cout << "[" << setw(7) << setfill(' ') << pid << "] ";
cout << hex << setfill('0');
cout << setw(sizeof(addr_t) * 2);
cout << module->addr() << '-';
cout << setw(sizeof(addr_t) * 2);
cout << module->upper();
cout << ": " << module->name() << endl;
}
}
};
class ZDK_LOCAL FileEnumerator : public EnumCallback2<int, const char*>
{
void notify(int fd, const char* filename)
{
cout << setw(5) << setfill(' ');
cout << fd << ' ' << filename << endl;
}
};
class ZDK_LOCAL ThreadPrinter : public EnumCallback<Thread*>
{
ostream& out_;
Thread* current_;
size_t count_;
public:
ThreadPrinter(ostream& out, Thread* current)
: out_(out), current_(current), count_(0)
{ }
void notify(Thread* thread)
{
out_ << setw(5) << count_++ << ' ' << *thread;
if (thread->single_step_mode())
{
out_ << " single-stepping";
}
if (thread == current_)
{
out_ << " <";
}
out_ << endl;
}
};
} // namespace
bool DebuggerShell::cmd_show(Thread* thp, const vector<string>& argv)
{
if (argv.size() < 2)
{
cout << "Missing argument." << endl;
}
#ifdef DEBUG
else if (argv[1] == "heaps")
{
cout << "heap<16>: used: " << Fheap<16>::used_bytes();
cout << " avail: " << Fheap<16>::free_bytes() << endl;
}
#endif
else if (argv[1] == "signals")
{
unsigned int n = 32;
if ((argv.size() > 2) && (argv[2] == "/all"))
{
n = NSIG;
}
cout << "STOP\tPASS\tDESCRIPTION" << endl;
cout << "-----------------------------------------------\n";
for (unsigned i = 0; i != n; ++i)
{
cout << *signal_policy(i) << "\t" << sig_description(i)
<< endl;
}
}
else if (argv[1] == "threads")
{
if ((argv.size() > 2) && (argv[2] == "/count"))
{
cout << enum_threads(NULL) << endl;
}
else
{
ostringstream buf;
ThreadPrinter callback(buf, thp);
size_t count = enum_threads(&callback);
if (count)
{
cout << " PID S PPID GID ID\n";
cout << "--------------------------------------\n";
cout << buf.str();
}
}
}
else if (argv[1] == "breakpoints")
{
pid_t pid = 0;
if (argv.size() > 2)
{
pid = strtoul(argv[2].c_str(), 0, 0);
}
print_breakpoints(cout, pid);
}
// we need a running thread for breakpoints and regs
else if (thp == 0)
{
cout << "No current thread" << endl;
}
else if (argv[1] == "stat")
{
print_event_info(cout, *thp);
}
else if (argv[1] == "regs")
{
RegisterPrinter regprn(cout);
if (argv.size() > 2 && argv[2] == "/all")
{
thp->enum_cpu_regs(®prn);
}
if (argv.size() > 2 && argv[2] == "/debug")
{
interface_cast<ThreadImpl&>(*thp).dump_debug_regs(cout);
}
else
{
thp->enum_user_regs(®prn);
}
cout << endl;
}
else if (argv[1] == "modules")
{
bool unloaded = false;
if (argv.size() > 2 && argv[2] == "/unloaded")
{
unloaded = true;
}
ModuleEnumerator modEnum(unloaded);
enum_modules(&modEnum);
}
else if (argv[1] == "files")
{
FileEnumerator fileEnum;
if (Runnable* task = interface_cast<Runnable*>(thp))
{
task->enum_open_files(&fileEnum);
}
}
else if (argv[1] == "status")
{
if (SharedString* descr = thread_get_event_description(*thp))
{
cout << descr->c_str() << endl;
}
else
{
cout << sig_description(thp->signal());
}
}
else
{
cout << "Unknown argument: " << argv[1] << endl;
vector<string> args;
args.push_back("help");
args.push_back("show");
cmd_help(thp, args);
}
return false;
}
bool DebuggerShell::cmd_switch_thread(Thread*, const vector<string>& argv)
{
if (argv.size() < 2)
{
cout << argv[0] << ": arguments missing" << endl;
}
else if (argv.size() > 2)
{
cout << argv[0] << ": too many arguments" << endl;
}
else
{
size_t n = strtoul(argv[1].c_str(), 0, 0);
// first, try it as a lwpid
if (Thread* thread = get_thread(n))
{
set_current_thread(thread);
}
else
{
// helper callback, get the n-th thread
class Callback : public EnumCallback<Thread*>
{
size_t n_, count_;
public:
RefPtr<Thread> thread_;
explicit Callback(size_t n) : n_(n), count_(0) { }
virtual ~Callback() { }
void notify(Thread* thread)
{
if (count_++ == n_)
{
thread_ = thread;
}
}
};
Callback callback(n);
const TargetManager& targets = DebuggerShell::instance();
Lock<Mutex> lock(targets.mutex());
const TargetManager::const_iterator end = targets.end(lock);
for (TargetManager::const_iterator t = targets.begin(lock); t != end; ++t)
{
(*t)->enum_threads(&callback);
}
if (!callback.thread_)
{
cout << argv[0] << ": no such thread: " << n << endl;
}
else
{
set_current_thread(callback.thread_.get());
}
}
}
return false; // do not resume execution
}
#ifdef DEBUG_OBJECT_LEAKS
bool DebuggerShell::cmd_count_objects(Thread*, const vector<string>&)
{
print_counted_objects(__func__);
return false;
}
#endif
/**
* Overrides PluginManager, looks for plugins that implement the
* DebuggerCommand interface (i.e. plugins that may extend the set
* of DebuggerShell commands).
*/
bool DebuggerShell::on_interface(
DynamicLibPtr lib,
uuidref_t iid,
Unknown2*& component)
{
try
{
if (!DebuggerEngine::on_interface(lib, iid, component))
{
return false;
}
if (DebuggerCommand* cmd = interface_cast<DebuggerCommand*>(component))
{
if (cmd->name())
{
commands_.push_back(cmd);
}
}
}
catch (const exception& e)
{
cerr << "DebuggerShell::on_interface: " << e.what() << endl;
return false;
}
return true;
}
void DebuggerShell::add_command(DebuggerCommand* cmd)
{
commands_.push_back(cmd);
DebuggerEngine::add_command(cmd);
}
/**
* Restart last process with the same command line args
* and environment variables
*/
bool DebuggerShell::cmd_restart(Thread* thread, const vector<string>& argv)
{
const char* cmd = NULL;
const char* const* env = NULL;
if (thread)
{
RefPtr<Process> proc = thread->process();
if (proc)
{
env = proc->environment();
if (SharedString* commandLine = proc->command_line())
{
cmd = commandLine->c_str();
}
}
if (!cmd)
{
cmd = thread->filename();
}
}
else if (const HistoryEntry* entry = get_most_recent_history_entry())
{
env = entry->environ();
if (entry->is_live())
{
cmd = entry->command_line();
}
if (!cmd)
{
cmd = entry->name();
}
}
else
{
cout << "could not get most recent target\n";
}
if (cmd)
{
exec(cmd, false, env);
}
return true;
}
// vim: tabstop=4:softtabstop=4:expandtab:shiftwidth=4
|
State Before: α : Type u_2
ι : Sort u_3
β : Type u_1
S : ι → Set α
f : (i : ι) → ↑(S i) → β
hf :
∀ (i j : ι) (x : α) (hxi : x ∈ S i) (hxj : x ∈ S j),
f i { val := x, property := hxi } = f j { val := x, property := hxj }
T : Set α
hT : T ⊆ iUnion S
hT' : T = iUnion S
t : Set β
⊢ iUnionLift S f hf T hT ⁻¹' t = inclusion hT ⁻¹' ⋃ (i : ι), inclusion (_ : S i ⊆ ⋃ (i : ι), S i) '' (f i ⁻¹' t) State After: case h
α : Type u_2
ι : Sort u_3
β : Type u_1
S : ι → Set α
f : (i : ι) → ↑(S i) → β
hf :
∀ (i j : ι) (x : α) (hxi : x ∈ S i) (hxj : x ∈ S j),
f i { val := x, property := hxi } = f j { val := x, property := hxj }
T : Set α
hT : T ⊆ iUnion S
hT' : T = iUnion S
t : Set β
x : ↑T
⊢ x ∈ iUnionLift S f hf T hT ⁻¹' t ↔ x ∈ inclusion hT ⁻¹' ⋃ (i : ι), inclusion (_ : S i ⊆ ⋃ (i : ι), S i) '' (f i ⁻¹' t) Tactic: ext x State Before: case h
α : Type u_2
ι : Sort u_3
β : Type u_1
S : ι → Set α
f : (i : ι) → ↑(S i) → β
hf :
∀ (i j : ι) (x : α) (hxi : x ∈ S i) (hxj : x ∈ S j),
f i { val := x, property := hxi } = f j { val := x, property := hxj }
T : Set α
hT : T ⊆ iUnion S
hT' : T = iUnion S
t : Set β
x : ↑T
⊢ x ∈ iUnionLift S f hf T hT ⁻¹' t ↔ x ∈ inclusion hT ⁻¹' ⋃ (i : ι), inclusion (_ : S i ⊆ ⋃ (i : ι), S i) '' (f i ⁻¹' t) State After: case h
α : Type u_2
ι : Sort u_3
β : Type u_1
S : ι → Set α
f : (i : ι) → ↑(S i) → β
hf :
∀ (i j : ι) (x : α) (hxi : x ∈ S i) (hxj : x ∈ S j),
f i { val := x, property := hxi } = f j { val := x, property := hxj }
T : Set α
hT : T ⊆ iUnion S
hT' : T = iUnion S
t : Set β
x : ↑T
⊢ iUnionLift S f hf T hT x ∈ t ↔ ∃ i x_1, f i x_1 ∈ t ∧ inclusion (_ : S i ⊆ ⋃ (i : ι), S i) x_1 = inclusion hT x Tactic: simp only [mem_preimage, mem_iUnion, mem_image] State Before: case h
α : Type u_2
ι : Sort u_3
β : Type u_1
S : ι → Set α
f : (i : ι) → ↑(S i) → β
hf :
∀ (i j : ι) (x : α) (hxi : x ∈ S i) (hxj : x ∈ S j),
f i { val := x, property := hxi } = f j { val := x, property := hxj }
T : Set α
hT : T ⊆ iUnion S
hT' : T = iUnion S
t : Set β
x : ↑T
⊢ iUnionLift S f hf T hT x ∈ t ↔ ∃ i x_1, f i x_1 ∈ t ∧ inclusion (_ : S i ⊆ ⋃ (i : ι), S i) x_1 = inclusion hT x State After: case h.mp
α : Type u_2
ι : Sort u_3
β : Type u_1
S : ι → Set α
f : (i : ι) → ↑(S i) → β
hf :
∀ (i j : ι) (x : α) (hxi : x ∈ S i) (hxj : x ∈ S j),
f i { val := x, property := hxi } = f j { val := x, property := hxj }
T : Set α
hT : T ⊆ iUnion S
hT' : T = iUnion S
t : Set β
x : ↑T
⊢ iUnionLift S f hf T hT x ∈ t → ∃ i x_1, f i x_1 ∈ t ∧ inclusion (_ : S i ⊆ ⋃ (i : ι), S i) x_1 = inclusion hT x
case h.mpr
α : Type u_2
ι : Sort u_3
β : Type u_1
S : ι → Set α
f : (i : ι) → ↑(S i) → β
hf :
∀ (i j : ι) (x : α) (hxi : x ∈ S i) (hxj : x ∈ S j),
f i { val := x, property := hxi } = f j { val := x, property := hxj }
T : Set α
hT : T ⊆ iUnion S
hT' : T = iUnion S
t : Set β
x : ↑T
⊢ (∃ i x_1, f i x_1 ∈ t ∧ inclusion (_ : S i ⊆ ⋃ (i : ι), S i) x_1 = inclusion hT x) → iUnionLift S f hf T hT x ∈ t Tactic: constructor State Before: case h.mp
α : Type u_2
ι : Sort u_3
β : Type u_1
S : ι → Set α
f : (i : ι) → ↑(S i) → β
hf :
∀ (i j : ι) (x : α) (hxi : x ∈ S i) (hxj : x ∈ S j),
f i { val := x, property := hxi } = f j { val := x, property := hxj }
T : Set α
hT : T ⊆ iUnion S
hT' : T = iUnion S
t : Set β
x : ↑T
⊢ iUnionLift S f hf T hT x ∈ t → ∃ i x_1, f i x_1 ∈ t ∧ inclusion (_ : S i ⊆ ⋃ (i : ι), S i) x_1 = inclusion hT x State After: case h.mp.intro
α : Type u_2
ι : Sort u_3
β : Type u_1
S : ι → Set α
f : (i : ι) → ↑(S i) → β
hf :
∀ (i j : ι) (x : α) (hxi : x ∈ S i) (hxj : x ∈ S j),
f i { val := x, property := hxi } = f j { val := x, property := hxj }
T : Set α
hT : T ⊆ iUnion S
hT' : T = iUnion S
t : Set β
x : ↑T
i : ι
hi : ↑x ∈ S i
⊢ iUnionLift S f hf T hT x ∈ t → ∃ i x_1, f i x_1 ∈ t ∧ inclusion (_ : S i ⊆ ⋃ (i : ι), S i) x_1 = inclusion hT x Tactic: rcases mem_iUnion.1 (hT x.prop) with ⟨i, hi⟩ State Before: case h.mp.intro
α : Type u_2
ι : Sort u_3
β : Type u_1
S : ι → Set α
f : (i : ι) → ↑(S i) → β
hf :
∀ (i j : ι) (x : α) (hxi : x ∈ S i) (hxj : x ∈ S j),
f i { val := x, property := hxi } = f j { val := x, property := hxj }
T : Set α
hT : T ⊆ iUnion S
hT' : T = iUnion S
t : Set β
x : ↑T
i : ι
hi : ↑x ∈ S i
⊢ iUnionLift S f hf T hT x ∈ t → ∃ i x_1, f i x_1 ∈ t ∧ inclusion (_ : S i ⊆ ⋃ (i : ι), S i) x_1 = inclusion hT x State After: case h.mp.intro
α : Type u_2
ι : Sort u_3
β : Type u_1
S : ι → Set α
f : (i : ι) → ↑(S i) → β
hf :
∀ (i j : ι) (x : α) (hxi : x ∈ S i) (hxj : x ∈ S j),
f i { val := x, property := hxi } = f j { val := x, property := hxj }
T : Set α
hT : T ⊆ iUnion S
hT' : T = iUnion S
t : Set β
x : ↑T
i : ι
hi : ↑x ∈ S i
h : iUnionLift S f hf T hT x ∈ t
⊢ f i { val := ↑x, property := hi } ∈ t Tactic: refine fun h => ⟨i, ⟨x, hi⟩, ?_, rfl⟩ State Before: case h.mp.intro
α : Type u_2
ι : Sort u_3
β : Type u_1
S : ι → Set α
f : (i : ι) → ↑(S i) → β
hf :
∀ (i j : ι) (x : α) (hxi : x ∈ S i) (hxj : x ∈ S j),
f i { val := x, property := hxi } = f j { val := x, property := hxj }
T : Set α
hT : T ⊆ iUnion S
hT' : T = iUnion S
t : Set β
x : ↑T
i : ι
hi : ↑x ∈ S i
h : iUnionLift S f hf T hT x ∈ t
⊢ f i { val := ↑x, property := hi } ∈ t State After: no goals Tactic: rwa [iUnionLift_of_mem x hi] at h State Before: case h.mpr
α : Type u_2
ι : Sort u_3
β : Type u_1
S : ι → Set α
f : (i : ι) → ↑(S i) → β
hf :
∀ (i j : ι) (x : α) (hxi : x ∈ S i) (hxj : x ∈ S j),
f i { val := x, property := hxi } = f j { val := x, property := hxj }
T : Set α
hT : T ⊆ iUnion S
hT' : T = iUnion S
t : Set β
x : ↑T
⊢ (∃ i x_1, f i x_1 ∈ t ∧ inclusion (_ : S i ⊆ ⋃ (i : ι), S i) x_1 = inclusion hT x) → iUnionLift S f hf T hT x ∈ t State After: case h.mpr.intro.intro.mk.intro
α : Type u_2
ι : Sort u_3
β : Type u_1
S : ι → Set α
f : (i : ι) → ↑(S i) → β
hf :
∀ (i j : ι) (x : α) (hxi : x ∈ S i) (hxj : x ∈ S j),
f i { val := x, property := hxi } = f j { val := x, property := hxj }
T : Set α
hT : T ⊆ iUnion S
hT' : T = iUnion S
t : Set β
x : ↑T
i : ι
y : α
hi : y ∈ S i
h : f i { val := y, property := hi } ∈ t
hxy : inclusion (_ : S i ⊆ ⋃ (i : ι), S i) { val := y, property := hi } = inclusion hT x
⊢ iUnionLift S f hf T hT x ∈ t Tactic: rintro ⟨i, ⟨y, hi⟩, h, hxy⟩ State Before: case h.mpr.intro.intro.mk.intro
α : Type u_2
ι : Sort u_3
β : Type u_1
S : ι → Set α
f : (i : ι) → ↑(S i) → β
hf :
∀ (i j : ι) (x : α) (hxi : x ∈ S i) (hxj : x ∈ S j),
f i { val := x, property := hxi } = f j { val := x, property := hxj }
T : Set α
hT : T ⊆ iUnion S
hT' : T = iUnion S
t : Set β
x : ↑T
i : ι
y : α
hi : y ∈ S i
h : f i { val := y, property := hi } ∈ t
hxy : inclusion (_ : S i ⊆ ⋃ (i : ι), S i) { val := y, property := hi } = inclusion hT x
⊢ iUnionLift S f hf T hT x ∈ t State After: case h.mpr.intro.intro.mk.intro
α : Type u_2
ι : Sort u_3
β : Type u_1
S : ι → Set α
f : (i : ι) → ↑(S i) → β
hf :
∀ (i j : ι) (x : α) (hxi : x ∈ S i) (hxj : x ∈ S j),
f i { val := x, property := hxi } = f j { val := x, property := hxj }
T : Set α
hT : T ⊆ iUnion S
hT' : T = iUnion S
t : Set β
x : ↑T
i : ι
hi : ↑x ∈ S i
h : f i { val := ↑x, property := hi } ∈ t
hxy : inclusion (_ : S i ⊆ ⋃ (i : ι), S i) { val := ↑x, property := hi } = inclusion hT x
⊢ iUnionLift S f hf T hT x ∈ t Tactic: obtain rfl : y = x := congr_arg Subtype.val hxy State Before: case h.mpr.intro.intro.mk.intro
α : Type u_2
ι : Sort u_3
β : Type u_1
S : ι → Set α
f : (i : ι) → ↑(S i) → β
hf :
∀ (i j : ι) (x : α) (hxi : x ∈ S i) (hxj : x ∈ S j),
f i { val := x, property := hxi } = f j { val := x, property := hxj }
T : Set α
hT : T ⊆ iUnion S
hT' : T = iUnion S
t : Set β
x : ↑T
i : ι
hi : ↑x ∈ S i
h : f i { val := ↑x, property := hi } ∈ t
hxy : inclusion (_ : S i ⊆ ⋃ (i : ι), S i) { val := ↑x, property := hi } = inclusion hT x
⊢ iUnionLift S f hf T hT x ∈ t State After: no goals Tactic: rwa [iUnionLift_of_mem x hi] |
Monument 12 is a sculpture carved in the round , representing Ruler 2 . It dates to AD 672 .
|
(* Title: Multisets_Extras
Author: Chelsea Edmonds
*)
section \<open>Micellanious Helper Functions on Sets and Multisets\<close>
theory Multisets_Extras imports Main "HOL-Library.Multiset" "Card_Partitions.Set_Partition"
"Nested_Multisets_Ordinals.Multiset_More" "HOL-Library.Disjoint_Sets"
begin
subsection \<open>Set Theory Extras\<close>
text \<open>A number of extra helper lemmas for reasoning on sets (finite) required for Design Theory
proofs \<close>
lemma card_Pow_filter_one:
assumes "finite A"
shows "card {x \<in> Pow A . card x = 1} = card (A)"
using assms
proof (induct rule: finite_induct)
case empty
then show ?case by auto
next
case (insert x F)
have "Pow (insert x F) = Pow F \<union> insert x ` Pow F"
by (simp add: Pow_insert)
then have split: "{y \<in> Pow (insert x F) . card y = 1} =
{y \<in> (Pow F) . card y = 1} \<union> {y \<in> (insert x ` Pow F) . card y = 1}"
by blast
have "\<And> y . y \<in> (insert x ` Pow F) \<Longrightarrow> finite y"
using finite_subset insert.hyps(1) by fastforce
then have single: "\<And> y . y \<in> (insert x ` Pow F) \<Longrightarrow> card y = 1 \<Longrightarrow> y = {x}"
by (metis card_1_singletonE empty_iff image_iff insertCI insertE)
then have "card {y \<in> (insert x ` Pow F) . card y = 1} = 1"
using empty_iff imageI is_singletonI is_singletonI' is_singleton_altdef (* LONG *)
by (metis (full_types, lifting) Collect_empty_eq_bot Pow_bottom bot_empty_eq mem_Collect_eq)
then have " {y \<in> (insert x ` Pow F) . card y = 1} = {{x}}"
using single card_1_singletonE card_eq_0_iff
by (smt empty_Collect_eq mem_Collect_eq singletonD zero_neq_one)
then have split2:"{y \<in> Pow (insert x F) . card y = 1} = {y \<in> (Pow F) . card y = 1} \<union> {{x}}"
using split by simp
then show ?case
proof (cases "x \<in> F")
case True
then show ?thesis using insert.hyps(2) by auto
next
case False
then have "{y \<in> (Pow F) . card y = 1} \<inter> {{x}} = {}" by blast
then have fact:"card {y \<in> Pow (insert x F) . card y = 1} =
card {y \<in> (Pow F) . card y = 1} + card {{x}}"
using split2 card_Un_disjoint insert.hyps(1) by auto
have "card (insert x F) = card F + 1"
using False card_insert_disjoint by (metis Suc_eq_plus1 insert.hyps(1))
then show ?thesis using fact insert.hyps(3) by auto
qed
qed
lemma elem_exists_non_empty_set:
assumes "card A > 0"
obtains x where "x \<in> A"
using assms card_gt_0_iff by fastforce
lemma set_self_img_compr: "{a | a . a \<in> A} = A"
by blast
lemma card_subset_not_gt_card: "finite A \<Longrightarrow> card ps > card A \<Longrightarrow> \<not> (ps \<subseteq> A)"
using card_mono leD by auto
lemma card_inter_lt_single: "finite A \<Longrightarrow> finite B \<Longrightarrow> card (A \<inter> B) \<le> card A"
by (simp add: card_mono)
lemma set_diff_non_empty_not_subset:
assumes "A \<subseteq> (B - C)"
assumes "C \<noteq> {}"
assumes "A \<noteq> {}"
assumes "B \<noteq> {}"
shows " \<not> (A \<subseteq> C)"
proof (rule ccontr)
assume " \<not> \<not> (A \<subseteq> C)"
then have a: "\<And> x . x \<in> A \<Longrightarrow> x \<in> C" by blast
thus False using a assms by blast
qed
lemma set_card_diff_ge_zero: "finite A \<Longrightarrow> finite B \<Longrightarrow> A \<noteq> B \<Longrightarrow> card A = card B \<Longrightarrow>
card (A - B) > 0"
by (meson Diff_eq_empty_iff card_0_eq card_subset_eq finite_Diff neq0_conv)
lemma set_filter_diff: "{a \<in> A . P a } - {x} = {a \<in> (A - {x}) . (P a )}"
by (auto)
lemma set_filter_diff_card: "card ({a \<in> A . P a } - {x}) = card {a \<in> (A - {x}) . (P a )}"
by (simp add: set_filter_diff)
lemma obtain_subset_with_card_int_n:
assumes "(n ::int) \<le> of_nat (card S)"
assumes "(n ::int) \<ge> 0"
obtains T where "T \<subseteq> S" "of_nat (card T) = (n ::int)" "finite T"
using obtain_subset_with_card_n assms
by (metis nonneg_int_cases of_nat_le_iff)
lemma transform_filter_img_empty_rm:
assumes "\<And> g . g \<in> G \<Longrightarrow> g \<noteq> {}"
shows "{g - {x} | g. g \<in> G \<and> g \<noteq> {x}} = {g - {x} | g. g \<in> G } - {{}}"
proof -
let ?f = "\<lambda> g . g - {x}"
have "\<And> g . g \<in> G \<Longrightarrow> g \<noteq> {x} \<longleftrightarrow> ?f g \<noteq> {}" using assms
by (metis Diff_cancel Diff_empty Diff_insert0 insert_Diff)
thus ?thesis by auto
qed
lemma bij_betw_inter_subsets: "bij_betw f A B \<Longrightarrow> a1 \<subseteq> A \<Longrightarrow> a2 \<subseteq> A
\<Longrightarrow> f ` (a1 \<inter> a2) = (f ` a1) \<inter> (f ` a2)"
by (meson bij_betw_imp_inj_on inj_on_image_Int)
text\<open>Partition related set theory lemmas\<close>
lemma partition_on_remove_pt:
assumes "partition_on A G"
shows "partition_on (A - {x}) {g - {x} | g. g \<in> G \<and> g \<noteq> {x}}"
proof (intro partition_onI)
show "\<And>p. p \<in> {g - {x} |g. g \<in> G \<and> g \<noteq> {x}} \<Longrightarrow> p \<noteq> {}"
using assms partition_onD3 subset_singletonD by force
let ?f = "(\<lambda> g . g - {x})"
have un_img: "\<Union>({?f g | g. g \<in> G }) = ?f (\<Union> G)" by blast
have empty: "\<Union> {g - {x} |g. g \<in> G \<and> g \<noteq> {x}} = \<Union>({g - {x} | g. g \<in> G } - {{}})"
by blast
then have "\<Union>({g - {x} | g. g \<in> G } - {{}}) = \<Union>({g - {x} | g. g \<in> G })" by blast
then show " \<Union> {g - {x} |g. g \<in> G \<and> g \<noteq> {x}} = A - {x}" using partition_onD1 assms un_img
by (metis empty)
then show "\<And>p p'.
p \<in> {g - {x} |g. g \<in> G \<and> g \<noteq> {x}} \<Longrightarrow>
p' \<in> {g - {x} |g. g \<in> G \<and> g \<noteq> {x}} \<Longrightarrow> p \<noteq> p' \<Longrightarrow> p \<inter> p' = {}"
proof -
fix p1 p2
assume p1: "p1 \<in> {g - {x} |g. g \<in> G \<and> g \<noteq> {x}}"
and p2: "p2 \<in> {g - {x} |g. g \<in> G \<and> g \<noteq> {x}}"
and ne: "p1 \<noteq> p2"
obtain p1' p2' where orig1: "p1 = p1' - {x}" and orig2: "p2 = p2' - {x}"
and origne: "p1' \<noteq> p2'" and ne1: "p1' \<noteq> {x}" and ne2:"p2' \<noteq> {x}" and ing1: "p1' \<in> G"
and ing2: "p2' \<in> G"
using p1 p2 using mem_Collect_eq ne by blast
then have "p1' \<inter> p2' = {}" using assms partition_onD2 ing1 ing2 origne disjointD by blast
thus "p1 \<inter> p2 = {}" using orig1 orig2 by blast
qed
qed
lemma partition_on_cart_prod:
assumes "card I > 0"
assumes "A \<noteq> {}"
assumes "G \<noteq> {}"
assumes "partition_on A G"
shows "partition_on (A \<times> I) {g \<times> I |g. g \<in> G}"
proof (intro partition_onI)
show "\<And>p. p \<in> {g \<times> I |g. g \<in> G} \<Longrightarrow> p \<noteq> {}"
using assms(1) assms(4) partition_onD3 by fastforce
show "\<Union> {g \<times> I |g. g \<in> G} = A \<times> I"
by (metis Setcompr_eq_image Sigma_Union assms(4) partition_onD1)
show "\<And>p p'. p \<in> {g \<times> I |g. g \<in> G} \<Longrightarrow> p' \<in> {g \<times> I |g. g \<in> G} \<Longrightarrow> p \<noteq> p' \<Longrightarrow> p \<inter> p' = {}"
by (smt (verit, best) Sigma_Int_distrib1 Sigma_empty1 assms(4) mem_Collect_eq partition_onE)
qed
subsection \<open>Multiset Helpers\<close>
text \<open> Generic Size, count and card helpers \<close>
lemma count_size_set_repr: "size {# x \<in># A . x = g#} = count A g"
by (simp add: filter_eq_replicate_mset)
lemma mset_nempty_set_nempty: "A \<noteq> {#} \<longleftrightarrow> (set_mset A) \<noteq> {}"
by simp
lemma mset_size_ne0_set_card: "size A > 0 \<Longrightarrow> card (set_mset A) > 0"
using mset_nempty_set_nempty by fastforce
lemma set_count_size_min: "count A a \<ge> n \<Longrightarrow> size A \<ge> n"
by (metis (full_types) count_le_replicate_mset_subset_eq size_mset_mono size_replicate_mset)
lemma card_size_filter_eq: "finite A \<Longrightarrow> card {a \<in> A . P a} = size {#a \<in># mset_set A . P a#}"
by simp
lemma size_multiset_set_mset_const_count:
assumes "card (set_mset A) = ca"
assumes "\<And>p. p \<in># A \<Longrightarrow> count A p = ca2"
shows "size A = (ca * ca2)"
proof -
have "size A = (\<Sum> p \<in> (set_mset A) . count A p)" using size_multiset_overloaded_eq by auto
then have "size A = (\<Sum> p \<in> (set_mset A) . ca2)" using assms by simp
thus ?thesis using assms(1) by auto
qed
lemma size_multiset_int_count:
assumes "of_nat (card (set_mset A)) = (ca :: int)"
assumes "\<And>p. p \<in># A \<Longrightarrow> of_nat (count A p) = (ca2 :: int)"
shows "of_nat (size A) = ((ca :: int) * ca2)"
proof -
have "size A = (\<Sum> p \<in> (set_mset A) . count A p)" using size_multiset_overloaded_eq by auto
then have "of_nat (size A) = (\<Sum> p \<in> (set_mset A) . ca2)" using assms by simp
thus ?thesis using assms(1) by auto
qed
lemma mset_union_size: "size (A \<union># B) = size (A) + size (B - A)"
by (simp add: sup_subset_mset_def)
lemma mset_union_size_inter: "size (A \<union># B) = size (A) + size B - size (A \<inter># B)"
by (metis diff_add_inverse2 size_Un_Int)
text \<open>Lemmas for repeat\_mset \<close>
lemma repeat_mset_size [simp]: "size (repeat_mset n A) = n * size A"
by (induction n) auto
lemma repeat_mset_subset_in:
assumes "\<And> a . a \<in># A \<Longrightarrow> a \<subseteq> B"
assumes "X \<in># repeat_mset n A"
assumes "x \<in> X"
shows " x \<in> B"
using assms by (induction n) auto
lemma repeat_mset_not_empty: "n > 0 \<Longrightarrow> A \<noteq> {#} \<Longrightarrow> repeat_mset n A \<noteq> {#}"
by (induction n) auto
lemma elem_in_repeat_in_original: "a \<in># repeat_mset n A \<Longrightarrow> a \<in># A"
by (metis count_inI count_repeat_mset in_countE mult.commute mult_zero_left nat.distinct(1))
lemma elem_in_original_in_repeat: "n > 0 \<Longrightarrow> a \<in># A \<Longrightarrow> a \<in># repeat_mset n A"
by (metis (full_types) Suc_pred repeat_mset.simps(2) union_iff)
text \<open>Lemmas on image and filter for multisets \<close>
lemma multiset_add_filter_size: "size {# a \<in># (A1 + A2) . P a #} = size {# a \<in># A1 . P a #} +
size {# a \<in># A2 . P a #}"
by simp
lemma size_filter_neg: "size {#a \<in># A . P a #} = size A - size {# a \<in># A . \<not> P a #}"
using size_filter_mset_lesseq size_union union_filter_mset_complement
by (metis ordered_cancel_comm_monoid_diff_class.le_imp_diff_is_add)
lemma filter_filter_mset_cond_simp:
assumes "\<And> a . P a \<Longrightarrow> Q a"
shows "filter_mset P A = filter_mset P (filter_mset Q A)"
proof -
have "filter_mset P (filter_mset Q A) = filter_mset (\<lambda> a. Q a \<and> P a) A"
by (simp add: filter_filter_mset)
thus ?thesis using assms
by (metis (mono_tags, lifting) filter_mset_cong)
qed
lemma filter_filter_mset_ss_member: "filter_mset (\<lambda> a . {x, y} \<subseteq> a) A =
filter_mset (\<lambda> a . {x, y} \<subseteq> a) (filter_mset (\<lambda> a . x \<in> a) A)"
proof -
have filter: "filter_mset (\<lambda> a . {x, y} \<subseteq> a) (filter_mset (\<lambda> a . x \<in> a) A) =
filter_mset (\<lambda> a . x \<in> a \<and> {x, y} \<subseteq> a) A" by (simp add: filter_filter_mset)
have "\<And> a. {x, y} \<subseteq> a \<Longrightarrow> x \<in> a" by simp
thus ?thesis using filter by auto
qed
lemma multiset_image_do_nothing: "(\<And> x .x \<in># A \<Longrightarrow> f x = x) \<Longrightarrow> image_mset f A = A"
by (induct A) auto
lemma set_mset_filter: "set_mset {# f a . a \<in># A #} = {f a | a. a \<in># A}"
by (simp add: Setcompr_eq_image)
lemma mset_exists_imply: "x \<in># {# f a . a \<in># A #} \<Longrightarrow> \<exists> y \<in># A . x = f y"
by auto
lemma filter_mset_image_mset:
"filter_mset P (image_mset f A) = image_mset f (filter_mset (\<lambda>x. P (f x)) A)"
by (induction A) auto
lemma mset_bunion_filter: "{# a \<in># A . P a \<or> Q a #} = {# a \<in># A . P a #} \<union># {# a \<in># A . Q a #}"
by (rule multiset_eqI) simp
lemma mset_inter_filter: "{# a \<in># A . P a \<and> Q a #} = {# a \<in># A . P a #} \<inter># {# a \<in># A . Q a #}"
by (rule multiset_eqI) simp
lemma image_image_mset: "image_mset (\<lambda> x . f x) (image_mset (\<lambda> y . g y) A) =
image_mset (\<lambda> x. f (g x)) A"
by simp
text \<open> Big Union over multiset helpers \<close>
lemma mset_big_union_obtain:
assumes "x \<in># \<Sum>\<^sub># A"
obtains a where "a \<in># A" and "x \<in># a"
using assms by blast
lemma size_big_union_sum: "size (\<Sum>\<^sub># (M :: 'a multiset multiset)) = (\<Sum>x \<in>#M . size x)"
by (induct M) auto
text \<open> Cartesian Product on Multisets \<close>
lemma size_cartesian_product_singleton [simp]: "size ({#a#} \<times># B) = size B"
by (simp add: Times_mset_single_left)
lemma size_cartesian_product_singleton_right [simp]: "size (A \<times># {#b#}) = size A"
by (simp add: Times_mset_single_right)
lemma size_cartesian_product_empty [simp]: "size (A \<times># {#}) = 0"
by simp
lemma size_add_elem_step_eq:
assumes "size (A \<times># B) = size A * size B"
shows "size (add_mset x A \<times># B) = size (add_mset x A) * size B"
proof -
have "(add_mset x A \<times># B) = A \<times># B + {#x#} \<times># B"
by (metis Sigma_mset_plus_distrib1 add_mset_add_single)
then have "size (add_mset x A \<times># B) = size (A \<times># B) + size B" by auto
also have "... = size A * size B + size B"
by (simp add: assms)
finally have "size (add_mset x A \<times># B) = (size A + 1) * size B"
by auto
thus ?thesis by simp
qed
lemma size_cartesian_product: "size (A \<times># B) = size A * size B"
by (induct A) (simp_all add: size_add_elem_step_eq)
lemma cart_prod_distinct_mset:
assumes assm1: "distinct_mset A"
assumes assm2: "distinct_mset B"
shows "distinct_mset (A \<times># B)"
unfolding distinct_mset_count_less_1
proof (rule allI)
fix x
have count_mult: "count (A \<times># B) x = count A (fst x) * count B (snd x)"
using count_Sigma_mset by (metis prod.exhaust_sel)
then have "count A (fst x) * count B (snd x) \<le> 1" using assm1 assm2
unfolding distinct_mset_count_less_1 using mult_le_one by blast
thus "count (A \<times># B) x \<le> 1" using count_mult by simp
qed
lemma cart_product_single_intersect: "x1 \<noteq> x2 \<Longrightarrow> ({#x1#} \<times># A) \<inter># ({#x2#} \<times># B) = {#}"
using multiset_inter_single by fastforce
lemma size_union_distinct_cart_prod: "x1 \<noteq> x2 \<Longrightarrow> size (({#x1#} \<times># A) \<union># ({#x2#} \<times># B)) =
size ({#x1#} \<times># A) + size ({#x2#} \<times># B)"
by (simp add: cart_product_single_intersect size_Un_disjoint)
lemma size_Union_distinct_cart_prod: "distinct_mset M \<Longrightarrow>
size (\<Sum>p\<in>#M. ({#p#} \<times># B)) = size (M) * size (B)"
by (induction M) auto
lemma size_Union_distinct_cart_prod_filter: "distinct_mset M \<Longrightarrow>
(\<And> p . p \<in># M \<Longrightarrow> size ({# b \<in># B . P p b #}) = c) \<Longrightarrow>
size (\<Sum>p\<in>#M. ({#p#} \<times># {# b \<in># B . P p b #})) = size (M) * c"
by (induction M) auto
lemma size_Union_distinct_cart_prod_filter2: "distinct_mset V \<Longrightarrow>
(\<And> b . b \<in># B \<Longrightarrow> size ({# v \<in># V . P v b #}) = c) \<Longrightarrow>
size (\<Sum>b\<in>#B. ( {# v \<in># V . P v b #} \<times># {#b#})) = size (B) * c"
by (induction B) auto
lemma cart_product_add_1: "(add_mset a A) \<times># B = ({#a#} \<times># B) + (A \<times># B)"
by (metis Sigma_mset_plus_distrib1 add_mset_add_single union_commute)
lemma cart_product_add_1_filter: "{#m \<in># ((add_mset a M) \<times># N) . P m #} =
{#m \<in># (M \<times># N) . P m #} + {#m \<in># ({#a#} \<times># N) . P m #}"
unfolding add_mset_add_single [of a M] Sigma_mset_plus_distrib1
by (simp add: Times_mset_single_left)
lemma cart_product_add_1_filter2: "{#m \<in># (M \<times># (add_mset b N)) . P m #} =
{#m \<in># (M \<times># N) . P m #} + {#m \<in># (M \<times># {#b#}) . P m #}"
unfolding add_mset_add_single [of b N] Sigma_mset_plus_distrib1
by (metis Times_insert_left Times_mset_single_right add_mset_add_single filter_union_mset)
lemma cart_prod_singleton_right_gen:
assumes "\<And> x . x \<in># (A \<times># {#b#}) \<Longrightarrow> P x \<longleftrightarrow> Q (fst x)"
shows "{#x \<in># (A \<times># {#b#}). P x#} = {# a \<in># A . Q a#} \<times># {#b#}"
using assms
proof (induction A)
case empty
then show ?case by simp
next
case (add x A)
have "add_mset x A \<times># {#b#} = add_mset (x, b) (A \<times># {#b#})"
by (simp add: Times_mset_single_right)
then have lhs: "filter_mset P (add_mset x A \<times># {#b#}) = filter_mset P (A \<times># {#b#}) +
filter_mset P {#(x, b)#}" by simp
have rhs: "filter_mset Q (add_mset x A) \<times># {#b#} = filter_mset Q A \<times># {#b#} +
filter_mset Q {#x#} \<times># {#b#}"
by (metis Sigma_mset_plus_distrib1 add_mset_add_single filter_union_mset)
have "filter_mset P {#(x, b)#} = filter_mset Q {#x#} \<times># {#b#}"
using add.prems by fastforce
then show ?case using lhs rhs add.IH add.prems by force
qed
lemma cart_prod_singleton_left_gen:
assumes "\<And> x . x \<in># ({#a#} \<times># B) \<Longrightarrow> P x \<longleftrightarrow> Q (snd x)"
shows "{#x \<in># ({#a#} \<times># B). P x#} = {#a#} \<times># {#b \<in># B . Q b#}"
using assms
proof (induction B)
case empty
then show ?case by simp
next
case (add x B)
have lhs: "filter_mset P ({#a#} \<times># add_mset x B) = filter_mset P ({#a#} \<times># B) +
filter_mset P {#(a, x)#}"
by (simp add: cart_product_add_1_filter2)
have rhs: "{#a#} \<times># filter_mset Q (add_mset x B) = {#a#} \<times># filter_mset Q B +
{#a#} \<times># filter_mset Q {#x#}"
using add_mset_add_single filter_union_mset by (metis Times_mset_single_left image_mset_union)
have "filter_mset P {#(a, x)#} = {#a#} \<times># filter_mset Q {#x#}"
using add.prems by fastforce
then show ?case using lhs rhs add.IH add.prems by force
qed
lemma cart_product_singleton_left: "{#m \<in># ({#a#} \<times># N) . fst m \<in> snd m #} =
({#a#} \<times># {# n \<in># N . a \<in> n #})" (is "?A = ?B")
proof -
have stmt: "\<And>m. m \<in># ({#a#} \<times># N) \<Longrightarrow> fst m \<in> snd m \<longleftrightarrow> a \<in> snd m"
by (simp add: mem_Times_mset_iff)
thus ?thesis by (metis (no_types, lifting) Sigma_mset_cong stmt cart_prod_singleton_left_gen)
qed
lemma cart_product_singleton_right: "{#m \<in># (N \<times># {#b#}) . fst m \<in> snd m #} =
({# n \<in># N . n \<in> b #} \<times># {# b #})" (is "?A = ?B")
proof -
have stmt: "\<And>m. m \<in># (N \<times># {#b#}) \<Longrightarrow> fst m \<in> snd m \<longleftrightarrow> fst m \<in>b"
by (simp add: mem_Times_mset_iff)
thus ?thesis by (metis (no_types, lifting) Sigma_mset_cong stmt cart_prod_singleton_right_gen)
qed
lemma cart_product_add_1_filter_eq: "{#m \<in># ((add_mset a M) \<times># N) . (fst m \<in> snd m) #} =
{#m \<in># (M \<times># N) . (fst m \<in> snd m) #} + ({#a#} \<times># {# n \<in># N . a \<in> n #})"
unfolding add_mset_add_single [of a M] Sigma_mset_plus_distrib1
using cart_product_singleton_left cart_product_add_1_filter by fastforce
lemma cart_product_add_1_filter_eq_mirror: "{#m \<in># M \<times># (add_mset b N) . (fst m \<in> snd m) #} =
{#m \<in># (M \<times># N) . (fst m \<in> snd m) #} + ({# n \<in># M . n \<in> b #} \<times># {#b#})"
unfolding add_mset_add_single [of b N] Sigma_mset_plus_distrib1 (* longish *)
by (metis (no_types) add_mset_add_single cart_product_add_1_filter2 cart_product_singleton_right)
lemma set_break_down_left:
shows "{# m \<in># (M \<times># N) . (fst m) \<in> (snd m) #} = (\<Sum>m\<in>#M. ({#m#} \<times># {#n \<in># N. m \<in> n#}))"
by (induction M) (auto simp add: cart_product_add_1_filter_eq)
lemma set_break_down_right:
shows "{# x \<in># M \<times># N . (fst x) \<in> (snd x) #} = (\<Sum>n\<in>#N. ({#m \<in># M. m \<in> n#} \<times># {#n#}))"
by (induction N) (auto simp add: cart_product_add_1_filter_eq_mirror)
text \<open>Reasoning on sums of elements over multisets \<close>
lemma sum_over_fun_eq:
assumes "\<And> x . x \<in># A \<Longrightarrow> f x = g x"
shows "(\<Sum>x \<in># A . f(x)) = (\<Sum> x \<in># A . g (x))"
using assms by auto
lemma sum_mset_add_diff:
fixes x:: 'a and f g :: "'a \<Rightarrow> nat"
assumes "\<And>x . x \<in># A \<Longrightarrow> f x \<ge> g x"
shows "(\<Sum> x \<in># A. f x - g x) = (\<Sum> x \<in># A . f x) - (\<Sum> x \<in># A . g x)"
using assms apply (induction A)
by (simp_all add: sum_mset_mono)
lemma sum_mset_add_diff_int:
fixes x:: 'a and f g :: "'a \<Rightarrow> int"
shows "(\<Sum> x \<in># A. f x - g x) = (\<Sum> x \<in># A . f x) - (\<Sum> x \<in># A . g x)"
by (induction A) (simp_all add: sum_mset_mono)
context ordered_semiring
begin
lemma sum_mset_ge0:"(\<And> x . f x \<ge> 0) \<Longrightarrow> (\<Sum> x \<in># A. f x ) \<ge> 0"
proof (induction A)
case empty
then show ?case by simp
next
case (add x A)
then have hyp2: "0 \<le> sum_mset (image_mset f A)" by blast
then have " sum_mset (image_mset f (add_mset x A)) = sum_mset (image_mset f A) + f x"
by (simp add: add_commute)
then show ?case
by (simp add: add.IH add.prems)
qed
lemma sum_order_add_mset: "(\<And> x . f x \<ge> 0) \<Longrightarrow> (\<Sum> x \<in># A. f x ) \<le> (\<Sum> x \<in># add_mset a A. f x )"
by (simp add: local.add_increasing)
lemma sum_mset_0_left: "(\<And> x . f x \<ge> 0) \<Longrightarrow> (\<Sum> x \<in># A. f x ) = 0 \<Longrightarrow> (\<forall> x \<in># A .f x = 0)"
apply (induction A)
apply auto
using local.add_nonneg_eq_0_iff sum_mset_ge0 apply blast
by (metis local.antisym local.sum_mset.insert sum_mset_ge0 sum_order_add_mset)
lemma sum_mset_0_iff_ge_0:
assumes "(\<And> x . f x \<ge> 0)"
shows "(\<Sum> x \<in># A. f x ) = 0 \<longleftrightarrow> (\<forall> x \<in> set_mset A .f x = 0)"
using sum_mset_0_left assms by auto
end
lemma mset_set_size_card_count: "(\<Sum>x \<in># A. x) = (\<Sum>x \<in> set_mset A . x * (count A x))"
proof (induction A)
case empty
then show ?case by simp
next
case (add y A)
have lhs: "(\<Sum>x\<in>#add_mset y A. x) = (\<Sum>x\<in># A. x) + y" by simp
have rhs: "(\<Sum>x\<in>set_mset (add_mset y A). x * count (add_mset y A) x) =
(\<Sum>x\<in>(insert y (set_mset A)) . x * count (add_mset y A) x)"
by simp
then show ?case
proof (cases "y \<in># A")
case True
have x_val: "\<And> x . x \<in> (insert y (set_mset A)) \<Longrightarrow> x \<noteq> y \<Longrightarrow>
x* count (add_mset y A) x = x * (count A x)"
by auto
have y_count: "count (add_mset y A) y = 1 + count A y"
using True count_inI by fastforce
then have "(\<Sum>x\<in>set_mset (add_mset y A). x * count (add_mset y A) x) =
(y * (count (add_mset y A) y)) + (\<Sum>x\<in>(set_mset A) - {y}. x * count A x)"
using x_val finite_set_mset sum.cong sum.insert rhs
by (smt DiffD1 Diff_insert_absorb insert_absorb mk_disjoint_insert sum.insert_remove)
then have s1: "(\<Sum>x\<in>set_mset (add_mset y A). x * count (add_mset y A) x) =
y + y * (count A y) + (\<Sum>x\<in>(set_mset A) - {y}. x * count A x)"
using y_count by simp
then have "(\<Sum>x\<in>set_mset (add_mset y A). x * count (add_mset y A) x) =
y + (\<Sum>x\<in>insert y ((set_mset A) - {y} ) . x * count A x)"
by (simp add: sum.insert_remove)
then have "(\<Sum>x\<in>set_mset (add_mset y A). x * count (add_mset y A) x) =
y + (\<Sum>x\<in>(set_mset A) . x * count A x)"
by (simp add: True insert_absorb)
then show ?thesis using lhs add.IH
by linarith
next
case False
have x_val: "\<And> x . x \<in> set_mset A \<Longrightarrow> x* count (add_mset y A) x = x * (count A x)"
using False by auto
have y_count: "count (add_mset y A) y = 1" using False count_inI by fastforce
have lhs: "(\<Sum>x\<in>#add_mset y A. x) = (\<Sum>x\<in># A. x) + y" by simp
have "(\<Sum>x\<in>set_mset (add_mset y A). x * count (add_mset y A) x) =
(y * count (add_mset y A) y) + (\<Sum>x\<in>set_mset A. x * count A x)"
using x_val rhs by (metis (no_types, lifting) False finite_set_mset sum.cong sum.insert)
then have "(\<Sum>x\<in>set_mset (add_mset y A). x * count (add_mset y A) x) =
y + (\<Sum>x\<in>set_mset A. x * count A x)"
using y_count by simp
then show ?thesis using lhs add.IH by linarith
qed
qed
subsection \<open> Partitions on Multisets \<close>
text \<open>A partition on a multiset A is a multiset of multisets, where the sum over P equals A and the
empty multiset is not in the partition. Based off set partition definition.
We note that unlike set partitions, there is no requirement for elements in the multisets to be
distinct due to the definition of union on multisets \cite{benderPartitionsMultisets1974}\<close>
lemma mset_size_partition_dep: "size {# a \<in># A . P a \<or> Q a #} =
size {# a \<in># A . P a #} + size {# a \<in># A . Q a #} - size {# a \<in># A . P a \<and> Q a #}"
by (simp add: mset_bunion_filter mset_inter_filter mset_union_size_inter)
definition partition_on_mset :: "'a multiset \<Rightarrow> 'a multiset multiset \<Rightarrow> bool" where
"partition_on_mset A P \<longleftrightarrow> \<Sum>\<^sub>#P = A \<and> {#} \<notin># P"
lemma partition_on_msetI [intro]: "\<Sum>\<^sub>#P = A \<Longrightarrow> {#} \<notin># P \<Longrightarrow> partition_on_mset A P"
by (simp add: partition_on_mset_def)
lemma partition_on_msetD1: "partition_on_mset A P \<Longrightarrow> \<Sum>\<^sub>#P = A"
by (simp add: partition_on_mset_def)
lemma partition_on_msetD2: "partition_on_mset A P \<Longrightarrow> {#} \<notin># P"
by (simp add: partition_on_mset_def)
lemma partition_on_mset_empty: "partition_on_mset {#} P \<longleftrightarrow> P = {#}"
unfolding partition_on_mset_def
using multiset_nonemptyE by fastforce
lemma partition_on_mset_all: "A \<noteq> {#} \<Longrightarrow> partition_on_mset A {#A #}"
by (simp add: partition_on_mset_def)
lemma partition_on_mset_singletons: "partition_on_mset A (image_mset (\<lambda> x . {#x#}) A)"
by (auto simp: partition_on_mset_def)
lemma partition_on_mset_not_empty: "A \<noteq> {#} \<Longrightarrow> partition_on_mset A P \<Longrightarrow> P \<noteq> {#}"
by (auto simp: partition_on_mset_def)
lemma partition_on_msetI2: "\<Sum>\<^sub>#P = A \<Longrightarrow> (\<And> p . p \<in># P \<Longrightarrow> p \<noteq> {#}) \<Longrightarrow> partition_on_mset A P"
by (auto simp: partition_on_mset_def)
lemma partition_on_mset_elems: "partition_on_mset A P \<Longrightarrow> p1 \<in># P \<Longrightarrow> x \<in># p1 \<Longrightarrow> x \<in># A"
by (auto simp: partition_on_mset_def)
lemma partition_on_mset_sum_size_eq: "partition_on_mset A P \<Longrightarrow> (\<Sum>x \<in># P. size x) = size A"
by (metis partition_on_msetD1 size_big_union_sum)
lemma partition_on_mset_card: assumes "partition_on_mset A P" shows " size P \<le> size A"
proof (rule ccontr)
assume "\<not> size P \<le> size A"
then have a: "size P > size A" by simp
have "\<And> x . x \<in># P \<Longrightarrow> size x > 0" using partition_on_msetD2
using assms nonempty_has_size by auto
then have " (\<Sum>x \<in># P. size x) \<ge> size P"
by (metis leI less_one not_less_zero size_eq_sum_mset sum_mset_mono)
thus False using a partition_on_mset_sum_size_eq
using assms by fastforce
qed
lemma partition_on_mset_count_eq: "partition_on_mset A P \<Longrightarrow> a \<in># A \<Longrightarrow>
(\<Sum>x \<in># P. count x a) = count A a"
by (metis count_sum_mset partition_on_msetD1)
lemma partition_on_mset_subsets: "partition_on_mset A P \<Longrightarrow> x \<in># P \<Longrightarrow> x \<subseteq># A"
by (auto simp add: partition_on_mset_def)
lemma partition_on_mset_distinct:
assumes "partition_on_mset A P"
assumes "distinct_mset A"
shows "distinct_mset P"
proof (rule ccontr)
assume "\<not> distinct_mset P"
then obtain p1 where count: "count P p1 \<ge> 2"
by (metis Suc_1 distinct_mset_count_less_1 less_Suc_eq_le not_less_eq)
then have cge: "\<And> x . x \<in># p1 \<Longrightarrow> (\<Sum>p \<in># P. count p x ) \<ge> 2"
by (smt count_greater_eq_one_iff count_sum_mset_if_1_0 dual_order.trans sum_mset_mono zero_le)
have elem_in: "\<And> x . x \<in># p1 \<Longrightarrow> x \<in># A" using partition_on_mset_elems
by (metis count assms(1) count_eq_zero_iff not_numeral_le_zero)
have "\<And> x . x \<in># A \<Longrightarrow> count A x = 1" using assms
by (simp add: distinct_mset_def)
thus False
using assms partition_on_mset_count_eq cge elem_in count_inI local.count multiset_nonemptyE
by (metis (mono_tags) not_numeral_le_zero numeral_One numeral_le_iff partition_on_mset_def semiring_norm(69))
qed
lemma partition_on_mset_distinct_disjoint:
assumes "partition_on_mset A P"
assumes "distinct_mset A"
assumes "p1 \<in># P"
assumes "p2 \<in># P - {#p1#}"
shows "p1 \<inter># p2 = {#}"
using Diff_eq_empty_iff_mset assms diff_add_zero distinct_mset_add multiset_inter_assoc sum_mset.remove
by (smt partition_on_msetD1 subset_mset.inf.absorb_iff2 subset_mset.le_add_same_cancel1 subset_mset.le_iff_inf)
lemma partition_on_mset_diff:
assumes "partition_on_mset A P"
assumes "Q \<subseteq>#P"
shows "partition_on_mset (A - \<Sum>\<^sub>#Q) (P - Q)"
using assms partition_on_mset_def
by (smt diff_union_cancelL subset_mset.add_diff_inverse sum_mset.union union_iff)
lemma sigma_over_set_partition_count:
assumes "finite A"
assumes "partition_on A P"
assumes "x \<in># \<Sum>\<^sub># (mset_set (mset_set ` P))"
shows "count (\<Sum>\<^sub># (mset_set (mset_set ` P))) x = 1"
proof -
have disj: "disjoint P" using assms partition_onD2 by auto
then obtain p where pin: "p \<in># mset_set (mset_set ` P)" and xin: "x \<in># p"
using assms by blast
then have "count (mset_set (mset_set ` P)) p = 1"
by (meson count_eq_zero_iff count_mset_set')
then have filter: "\<And> p' . p' \<in># ((mset_set (mset_set` P)) - {#p#}) \<Longrightarrow> p \<noteq> p'"
using count_eq_zero_iff count_single by fastforce
have zero: "\<And> p'. p' \<in># mset_set (mset_set ` P) \<Longrightarrow> p' \<noteq> p \<Longrightarrow> count p' x = 0"
proof (rule ccontr)
fix p'
assume assm: "p' \<in># mset_set (mset_set ` P)" and ne: "p' \<noteq> p" and n0: "count p' x \<noteq> 0"
then have xin2: "x \<in># p'" by auto
obtain p1 p2 where p1in: "p1 \<in> P" and p2in: "p2 \<in> P" and p1eq: "mset_set p1 = p"
and p2eq: "mset_set p2 = p'" using assm assms(1) assms(2) pin
by (metis (no_types, lifting) elem_mset_set finite_elements finite_imageI image_iff)
have origne: "p1 \<noteq> p2" using ne p1eq p2eq by auto
have "p1 = p2" using partition_onD4 xin xin2
by (metis assms(2) count_eq_zero_iff count_mset_set' p1eq p1in p2eq p2in)
then show False using origne by simp
qed
have one: "count p x = 1" using pin xin assms count_eq_zero_iff count_greater_eq_one_iff
by (metis count_mset_set(3) count_mset_set_le_one image_iff le_antisym)
then have "count (\<Sum>\<^sub># (mset_set (mset_set ` P))) x =
(\<Sum>p' \<in># (mset_set (mset_set ` P)) . count p' x)"
using count_sum_mset by auto
also have "... = (count p x) + (\<Sum>p' \<in># ((mset_set (mset_set ` P)) - {#p#}) . count p' x)"
by (metis (mono_tags, lifting) insert_DiffM pin sum_mset.insert)
also have "... = 1 + (\<Sum>p' \<in># ((mset_set (mset_set ` P)) - {#p#}) . count p' x)"
using one by presburger
finally have "count (\<Sum>\<^sub># (mset_set (mset_set ` P))) x =
1 + (\<Sum>p' \<in># ((mset_set (mset_set ` P)) - {#p#}) . 0)"
using zero filter by (metis (mono_tags, lifting) in_diffD sum_over_fun_eq)
then show "count (\<Sum>\<^sub># (mset_set (mset_set ` P))) x = 1" by simp
qed
lemma partition_on_mset_set:
assumes "finite A"
assumes "partition_on A P"
shows "partition_on_mset (mset_set A) (mset_set (image (\<lambda> x. mset_set x) P))"
proof (intro partition_on_msetI)
have partd1: "\<Union>P = A" using assms partition_onD1 by auto
have imp: "\<And>x. x \<in># \<Sum>\<^sub># (mset_set (mset_set ` P)) \<Longrightarrow> x \<in># mset_set A"
proof -
fix x
assume "x \<in># \<Sum>\<^sub># (mset_set (mset_set ` P))"
then obtain p where "p \<in> (mset_set ` P)" and xin: "x \<in># p"
by (metis elem_mset_set equals0D infinite_set_mset_mset_set mset_big_union_obtain)
then have "set_mset p \<in> P"
by (metis empty_iff finite_set_mset_mset_set image_iff infinite_set_mset_mset_set)
then show "x \<in># mset_set A"
using partd1 xin assms(1) by auto
qed
have imp2: "\<And>x . x \<in># mset_set A \<Longrightarrow> x \<in># \<Sum>\<^sub># (mset_set (mset_set ` P))"
proof -
fix x
assume "x \<in># mset_set A"
then have "x \<in> A" by (simp add: assms(1))
then obtain p where "p \<in> P" and "x \<in> p" using assms(2) using partd1 by blast
then obtain p' where "p' \<in> (mset_set ` P)" and "p' = mset_set p" by blast
thus "x \<in># \<Sum>\<^sub># (mset_set (mset_set ` P))" using assms \<open>p \<in> P\<close> \<open>x \<in> p\<close> finite_elements partd1
by (metis Sup_upper finite_imageI finite_set_mset_mset_set in_Union_mset_iff rev_finite_subset)
qed
have a1: "\<And> x . x \<in># mset_set A \<Longrightarrow> count (mset_set A) x = 1"
using assms(1) by fastforce
then show "\<Sum>\<^sub># (mset_set (mset_set ` P)) = mset_set A" using imp imp2 a1
by (metis assms(1) assms(2) count_eq_zero_iff multiset_eqI sigma_over_set_partition_count)
have "\<And> p. p \<in> P \<Longrightarrow> p \<noteq> {} " using assms partition_onD3 by auto
then have "\<And> p. p \<in> P \<Longrightarrow> mset_set p \<noteq> {#}" using mset_set_empty_iff
by (metis Union_upper assms(1) partd1 rev_finite_subset)
then show "{#} \<notin># mset_set (mset_set ` P)"
by (metis elem_mset_set equals0D image_iff infinite_set_mset_mset_set)
qed
lemma partition_on_mset_distinct_inter:
assumes "partition_on_mset A P"
assumes "distinct_mset A"
assumes "p1 \<in># P" and "p2 \<in># P" and "p1 \<noteq> p2"
shows "p1 \<inter># p2 = {#}"
by (metis assms in_remove1_mset_neq partition_on_mset_distinct_disjoint)
lemma partition_on_set_mset_distinct:
assumes "partition_on_mset A P"
assumes "distinct_mset A"
assumes "p \<in># image_mset set_mset P"
assumes "p' \<in># image_mset set_mset P"
assumes "p \<noteq> p'"
shows "p \<inter> p' = {}"
proof -
obtain p1 where p1in: "p1 \<in># P" and p1eq: "set_mset p1 = p" using assms(3)
by blast
obtain p2 where p2in: "p2 \<in># P" and p2eq: "set_mset p2 = p'" using assms(4) by blast
have "distinct_mset P" using assms partition_on_mset_distinct by blast
then have "p1 \<noteq> p2" using assms using p1eq p2eq by fastforce
then have "p1 \<inter># p2 = {#}" using partition_on_mset_distinct_inter
using assms(1) assms(2) p1in p2in by auto
thus ?thesis using p1eq p2eq
by (metis set_mset_empty set_mset_inter)
qed
lemma partition_on_set_mset:
assumes "partition_on_mset A P"
assumes "distinct_mset A"
shows "partition_on (set_mset A) (set_mset (image_mset set_mset P))"
proof (intro partition_onI)
show "\<And>p. p \<in># image_mset set_mset P \<Longrightarrow> p \<noteq> {}"
using assms(1) partition_on_msetD2 by fastforce
next
have "\<And> x . x \<in> set_mset A \<Longrightarrow> x \<in> \<Union> (set_mset (image_mset set_mset P))"
by (metis Union_iff assms(1) image_eqI mset_big_union_obtain partition_on_msetD1 set_image_mset)
then show "\<Union> (set_mset (image_mset set_mset P)) = set_mset A"
using set_eqI' partition_on_mset_elems assms by auto
show "\<And>p p'. p \<in># image_mset set_mset P \<Longrightarrow> p' \<in># image_mset set_mset P \<Longrightarrow>
p \<noteq> p' \<Longrightarrow> p \<inter> p' = {}"
using partition_on_set_mset_distinct assms by fastforce
qed
lemma partition_on_mset_eq_imp_eq_carrier:
assumes "partition_on_mset A P"
assumes "partition_on_mset B P"
shows "A = B"
using assms partition_on_msetD1 by auto
lemma partition_on_mset_add_single:
assumes "partition_on_mset A P"
shows "partition_on_mset (add_mset a A) (add_mset {#a#} P)"
using assms by (auto simp: partition_on_mset_def)
lemma partition_on_mset_add_part:
assumes "partition_on_mset A P"
assumes "X \<noteq> {#}"
assumes "A + X = A'"
shows "partition_on_mset A' (add_mset X P)"
using assms by (auto simp: partition_on_mset_def)
lemma partition_on_mset_add:
assumes "partition_on_mset A P"
assumes "X \<in># P"
assumes "add_mset a X = X'"
shows "partition_on_mset (add_mset a A) (add_mset X' (P - {#X#}))"
using add_mset_add_single assms empty_not_add_mset mset_subset_eq_single partition_on_mset_all
by (smt partition_on_mset_def subset_mset.add_diff_inverse sum_mset.add_mset sum_mset.remove union_iff union_mset_add_mset_left)
lemma partition_on_mset_elem_exists_part:
assumes "partition_on_mset A P"
assumes "x \<in># A"
obtains p where "p \<in># P" and "x \<in># p"
using assms in_Union_mset_iff partition_on_msetD2 partition_on_msetI
by (metis partition_on_mset_eq_imp_eq_carrier)
lemma partition_on_mset_combine:
assumes "partition_on_mset A P"
assumes "partition_on_mset B Q"
shows "partition_on_mset (A + B) (P + Q)"
unfolding partition_on_mset_def
using assms partition_on_msetD1 partition_on_msetD2 by auto
lemma partition_on_mset_split:
assumes "partition_on_mset A (P + Q)"
shows "partition_on_mset (\<Sum>\<^sub>#P) P"
using partition_on_mset_def partition_on_msetD2 assms by fastforce
end |
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
A "grab bag" of relatively small general-purpose utilities that don't have
a clear module/package to live in.
"""
def isiterable(obj):
"""Returns `True` if the given object is iterable."""
try:
iter(obj)
return True
except TypeError:
return False
def indent(s, shift=1, width=4):
"""Indent a block of text. The indentation is applied to each line."""
indented = '\n'.join(' ' * (width * shift) + l if l else ''
for l in s.splitlines())
if s[-1] == '\n':
indented += '\n'
return indented
class NumpyRNGContext:
"""
A context manager (for use with the ``with`` statement) that will seed the
numpy random number generator (RNG) to a specific value, and then restore
the RNG state back to whatever it was before.
This is primarily intended for use in the astropy testing suit, but it
may be useful in ensuring reproducibility of Monte Carlo simulations in a
science context.
Parameters
----------
seed : int
The value to use to seed the numpy RNG
Examples
--------
A typical use case might be::
with NumpyRNGContext(<some seed value you pick>):
from numpy import random
randarr = random.randn(100)
... run your test using `randarr` ...
#Any code using numpy.random at this indent level will act just as it
#would have if it had been before the with statement - e.g. whatever
#the default seed is.
"""
def __init__(self, seed):
self.seed = seed
def __enter__(self):
from numpy import random
self.startstate = random.get_state()
random.seed(self.seed)
def __exit__(self, exc_type, exc_value, traceback):
from numpy import random
random.set_state(self.startstate)
|
function [epo_X, epo_Y] = feature_extraction_FFT(epo)
chan = {'PO3','POz','PO4','O1','Oz','O2'};
epo = proc_selectChannels(epo, chan);
epo = proc_selectIval(epo, [0 4000]); % [0 4000]
dataset = permute(epo.x, [3,1,2]);
[tr, dp, ch] = size(dataset); % tr: trial, dp: time, ch: channel
nominal = [];
for i=1:size(epo.y,2)
nominal(i) = find(epo.y(:,i),1)-1;
end
%% Fast Fourier Transform (FFT)
X_arr=[]; % make empty array
for k=1:tr % trials
x=squeeze(dataset(k, :,:)); % data
N=length(x); % get the number of points
kv=0:N-1; % create a vector from 0 to N-1
T=N/epo.fs; % get the frequency interval
freq=kv/T; % create the frequency range
X=fft(x)/N*2; % normalize the data
cutOff = ceil(N/2); % get the only positive frequency
% take only the first half of the spectrum
X=abs(X(1:cutOff,:)); % absolute values to cut off
freq = freq(1:cutOff); % frequency to cut off
XX = permute(X,[3 1 2]);
X_arr=[X_arr; XX]; % save in array
end
%% frequency band
% f_gt = [11 7 5]; % 5.45, 8.75, 12
f_last = find( freq > 30, 1); % freq < 30Hz
X_arr = X_arr(:,1:f_last,:); %
%% get features
epo_X = permute(X_arr,[2,3,4,1]);
epo_Y = nominal;
|
////////////////////////////////////////////////////////////////
// Copyright 2021 Matt Borland. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at https://www.boost.org/LICENSE_1_0.txt
#ifndef BOOST_MP_ENDIAN_HPP
#define BOOST_MP_ENDIAN_HPP
#include <boost/multiprecision/detail/standalone_config.hpp>
#ifndef BOOST_MP_STANDALONE
# include <boost/predef/other/endian.h>
# define BOOST_MP_ENDIAN_BIG_BYTE BOOST_ENDIAN_BIG_BYTE
# define BOOST_MP_ENDIAN_LITTLE_BYTE BOOST_ENDIAN_LITTLE_BYTE
#elif defined(_WIN32)
# define BOOST_MP_ENDIAN_BIG_BYTE 0
# define BOOST_MP_ENDIAN_LITTLE_BYTE 1
#elif defined(__BYTE_ORDER__)
# define BOOST_MP_ENDIAN_BIG_BYTE (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
# define BOOST_MP_ENDIAN_LITTLE_BYTE (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
#else
# error Could not determine endian type. Please disable standalone mode, and file an issue at https://github.com/boostorg/multiprecision
#endif // Determine endianness
static_assert((BOOST_MP_ENDIAN_BIG_BYTE || BOOST_MP_ENDIAN_LITTLE_BYTE)
&& !(BOOST_MP_ENDIAN_BIG_BYTE && BOOST_MP_ENDIAN_LITTLE_BYTE),
"Inconsistent endianness detected. Please disable standalone mode, and file an issue at https://github.com/boostorg/multiprecision");
#endif // BOOST_MP_ENDIAN_HPP
|
import category_theory.limits.presheaf
open category_theory
@[protect_proj] structure system : Type 1 :=
(A B C D : Type)
[category_A : category.{0} A]
[category_B : category.{0} B]
[category_C : category.{0} C]
[category_D : category.{0} D]
(AB : A ⥤ B) (BC : B ⥤ C)
(BD : B ⥤ D) (DC : D ⥤ C)
[full : full (AB ⋙ BD)]
attribute [instance] system.category_A system.category_B
system.category_C system.category_D
system.full --system.faithful
namespace system
variables (S : system)
def correct : Prop :=
∀ (X Y : S.A) (f : S.AB.obj X ⟶ S.AB.obj Y),
(S.AB ⋙ S.BC).map ((S.AB ⋙ S.BD).preimage (S.BD.map f)) = S.BC.map f
variables {S}
lemma correct_of_forall_eq
(h : ∀ (X Y : S.A) (f g : S.AB.obj X ⟶ S.AB.obj Y),
(S.BD ⋙ S.DC).map f = (S.BD ⋙ S.DC).map g → S.BC.map f = S.BC.map g) :
correct S :=
begin
intros X Y f,
rw [functor.comp_map],
apply h,
rw [← functor.comp_map, ← nat_iso.cancel_nat_iso_hom_left (S.AB.associator S.BD S.DC),
← (S.AB.associator S.BD S.DC).hom.naturality, functor.comp_map, functor.image_preimage],
simp only [functor.associator_hom_app, category.comp_id, functor.comp_map, category.id_comp],
end
lemma forall_eq_of_correct (h : correct S) [faithful S.DC] :
∀ (X Y : S.A) (f g : S.AB.obj X ⟶ S.AB.obj Y),
(S.BD ⋙ S.DC).map f = (S.BD ⋙ S.DC).map g → S.BC.map f = S.BC.map g :=
begin
intros X Y f g hfg,
dsimp [correct] at h,
rw [← h, ← h _ _ g, S.DC.map_injective hfg]
end
-- Could be replaced with i : S.BD ⋙ S.DC ⟶ S.BC such that
-- it is always epic
lemma correct_of_iso (i : S.BC ≅ S.BD ⋙ S.DC) : correct S :=
correct_of_forall_eq (λ X Y f g h,
by rw [← nat_iso.cancel_nat_iso_inv_left i, ← i.inv.naturality, h, i.inv.naturality])
end system
|
Smith Power Equipment, a division of Smith Mining Equipment (Pty) Ltd, is an importation and distribution company representing world-wide leading brands in the specialist machinery, turf maintenance and off-road vehicle market.
support and parts are provided for all our products.
Smith Power Equipment, a division of Smith Mining Equipment (Pty) Ltd., is an import and distribution company representing world-wide leading brands in the specialist machinery, turf maintenance and off-road vehicle market. These products are distributed throughout South Africa by a comprehensive dealer network. Full after sales service, support and parts are provided for all our products.
Smith Turf Equipment was established in 1991 when the company bought the distribution rights in Southern Africa for The Toro Company of Minneapolis (USA). The Toro Company is the world-wide market leader in the supply of golf course maintenance machinery and has been represented in Southern Africa since 1972.
In 1997 the company acquired the distribution rights for the products of Polaris Industries, also of Minneapolis (USA).
The Polaris Division was established to market and support the sales of Polaris quads and Personal Watercraft. The company expanded the ATV division in 2002 when it acquired the distribution rights of the Linhai Power Machinery Group of Shanghai (China) for Linhai ATV’s, scooters and generators. All Linhai products are currently being exported throughout the world, including the USA and Europe and are certified by the EU.
Smith Power Equipment expanded even more in 2000 when it acquired the sole distribution rights of the Kubota Corporation of Osaka, Japan, who manufactures the world-wide leading brand of small tractors, diesel engines and mowers.
In 2008 Smith Power Equipment introduced Victory Motorcycles, an offshoot of Polaris Industries in South Africa. Victory motorcycles with their powerful 100 and 106 cubic-inch Freedom V-Twin engines, are known for their high level of build quality.
This website was setup by Robert Keir (Marketing Manager) at Smith Power Equipment. Please contact him directly on 011 284 2024 or [email protected]. We are based in Edenvale, Gauteng, South Africa. |
Parent & Me is a wonderful way to spend quality time with your little one on the move. With a variety of options to choose from, each class focuses on a different type of dance and all include developmental activities with props needed for ages 1.5-3.
This class serves as a beginning movement class for children. Children will participate in movement activities that improve balance, coordination, socialization and motor skills while getting the wiggles out. Set to favorite tunes children will also use props, parachutes, bean bags, balloons and more to play and participate in partner activities with their parent or guardian. Suitable for boys and girls.
Experience different genres of dance including ballet, jazz, hip hop, stretches and creative movement, providing a proper foundation for future dance classes. These basic movements can improve spatial awareness, body control and balance. Class also uses props thus working on gross motor skills. Suitable for boys and girls.
Fundamentals of ballet and creative movement to fairy tale music with princess tutus, props, story time and princess crafts.
Imaginations soar with props, songs, rhythm, movement and play. Through ballet and jazz basics, children develop gross motor skills and creativity. Ballet shoes required.
Is your little one ready to take a class on their own? Visit our Combo Classes page to see age appropriate classes for them! |
%% Copyright (C) 2016-2022 Colin B. Macdonald
%%
%% This file is part of OctSymPy.
%%
%% OctSymPy is free software; you can redistribute it and/or modify
%% it under the terms of the GNU General Public License as published
%% by the Free Software Foundation; either version 3 of the License,
%% or (at your option) any later version.
%%
%% This software is distributed in the hope that it will be useful,
%% but WITHOUT ANY WARRANTY; without even the implied warranty
%% of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
%% the GNU General Public License for more details.
%%
%% You should have received a copy of the GNU General Public
%% License along with this software; see the file COPYING.
%% If not, see <http://www.gnu.org/licenses/>.
%% -*- texinfo -*-
%% @documentencoding UTF-8
%% @defmethod @@sym acsc (@var{x})
%% Symbolic acsc function.
%%
%% Example:
%% @example
%% @group
%% syms x
%% y = acsc (x)
%% @result{} y = (sym) acsc(x)
%% @end group
%% @end example
%%
%% Note: this file is autogenerated: if you want to edit it, you might
%% want to make changes to 'generate_functions.py' instead.
%%
%% @end defmethod
function y = acsc(x)
if (nargin ~= 1)
print_usage ();
end
y = elementwise_op ('acsc', x);
end
%!error acsc (sym(1), 2)
%!assert (isequaln (acsc (sym(nan)), sym(nan)))
%!shared x, d
%! d = 1;
%! x = sym('1');
%!test
%! f1 = acsc(x);
%! f2 = acsc(d);
%! assert( abs(double(f1) - f2) < 1e-15 )
%!test
%! D = [d d; d d];
%! A = [x x; x x];
%! f1 = acsc(A);
%! f2 = acsc(D);
%! assert( all(all( abs(double(f1) - f2) < 1e-15 )))
%!test
%! % round trip
%! y = sym('y');
%! A = acsc (d);
%! f = acsc (y);
%! h = function_handle (f);
%! B = h (d);
%! assert (A, B, -eps)
|
\documentclass[11pt,article,oneside,a4paper]{memoir}
%% Packages
%% ========
%% many common packages
\input{commonpackages}
%% Some more packages that you may want to use. Have a look at the
%% file, and consult the package docs for each.
\input{extrapackages}
%% Our layout configuration.
\input{layoutsetup}
%% Theorem environments. You will have to adapt this for a German
%% thesis.
\input{theoremsetup}
%% Helpful macros.
\input{macrosetup}
%%page layout settings and listing templates etc.
\input{settings}
\usepackage{listings}
\usepackage{color}
\definecolor{codegreen}{rgb}{0,0.6,0}
\definecolor{codegray}{rgb}{0.5,0.5,0.5}
\definecolor{codepurple}{rgb}{0.58,0,0.82}
\definecolor{backcolour}{rgb}{0.95,0.95,0.92}
\lstdefinestyle{mystyle}{
backgroundcolor=\color{backcolour},
commentstyle=\color{codegreen},
keywordstyle=\color{magenta},
numberstyle=\tiny\color{codegray},
stringstyle=\color{codepurple},
basicstyle=\footnotesize,
breakatwhitespace=false,
breaklines=true,
captionpos=b,
keepspaces=true,
numbers=left,
numbersep=5pt,
showspaces=false,
showstringspaces=false,
showtabs=false,
tabsize=2
}
\lstset{style=mystyle}
\title{Java.Util Cheatsheet}
\author{
Benjamin Ellenberger\\
\vspace{2em}
Github (git/svn) repository page:\\ \url{https://github.com/benelot/Java.util-cheatsheet}\\
Contact \href{mailto:[email protected]}{[email protected]} if you have any questions.}
\thesistype{Preparation for a Coding Interview}
%\advisors{}
\department{Institute of Neuroinformatics}
\date{\today}
\begin{document}
\frontmatter
%% Title page is auto-generated from document information above.
%% DO NOT CHANGE.
\begin{titlingpage}
\calccentering{\unitlength}
\begin{adjustwidth*}{\unitlength-24pt}{-\unitlength-24pt}
\maketitle
\end{adjustwidth*}
\end{titlingpage}
\mainmatter
\newpage
\chapterprecishere{---}
\newpage
%% This change is needed if the article option for the memoir document class
%% is used, in order to count sections (article) as if they were chapters (memoir)
\counterwithout{section}{chapter}
%% Our content
\newpage
\clearpage
\pagenumbering{roman}
\setcounter{tocdepth}{3}
\setcounter{secnumdepth}{2}
\tableofcontents
\clearpage
\pagenumbering{arabic}
\newpage
\section{Data structures}
\todo[inline]{Describe them.}
\subfile{Data-structure-String.tex}
\subfile{Data-structure-ArrayList.tex}
\subfile{Data-structure-Binary-Search-Tree.tex}
\subfile{Data-structure-Map.tex}
\subfile{Data-structure-Unordered-Map.tex}
\subfile{Data-structure-Linked-List.tex}
\subfile{Data-structure-Queue.tex}
\subfile{Data-structure-Set.tex}
\subfile{Data-structure-Unordered-Set.tex}
\subfile{Data-structure-Stack.tex}
\subfile{Data-structure-Vector.tex}
\section{Algorithms}
\todo[inline]{Describe them.}
\subsection{Graph Algorithms}
\subfile{Algorithm-BFS}
\subfile{Algorithm-DFS}
\subfile{Algorithm-Dijkstra}
\subsection{Sorting Algorithms}
\subfile{Algorithm-Heap-Sort}
\subfile{Algorithm-Merge-Sort}
\subfile{Algorithm-Quick-Sort}
\subsection{Searching Algorithms}
\subfile{Algorithm-Binary-Search}
%\subfile{Algorithm-Template.tex}
\section{Concepts}
\todo[inline]{Describe them.}
\subfile{Concept-Binary-Manipulation}
\subfile{Concept-Memory}
\subfile{Concept-Recursion}
\subfile{Concept-Dynamic-Programming}
\subfile{Concept-Big-O-Time-Space}
%\subfile{Concept-Template}
%\pagebreak
%\def\cheatsheet{2014}
%\subfile{15-Cheatsheet2014-BE.tex}
\subfile{Glossary.tex}
\subfile{TODO.tex}
\end{document}
|
Formal statement is: lemma sigma_algebra_sigma_sets: "a \<subseteq> Pow \<Omega> \<Longrightarrow> sigma_algebra \<Omega> (sigma_sets \<Omega> a)" Informal statement is: If $a$ is a subset of the power set of $\Omega$, then the sigma-algebra generated by $a$ is a sigma-algebra. |
/* CirKit: A circuit toolkit
* Copyright (C) 2009-2015 University of Bremen
* Copyright (C) 2015-2017 EPFL
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "write_blif.hpp"
#include <iterator>
#include <regex>
#include <boost/algorithm/string/join.hpp>
#include <boost/assign/std/vector.hpp>
#include <boost/format.hpp>
#include <boost/function.hpp>
#include <boost/iterator/counting_iterator.hpp>
#include <boost/iterator/zip_iterator.hpp>
#include <boost/range/adaptors.hpp>
#include <boost/range/algorithm.hpp>
#include <boost/range/iterator_range.hpp>
#include <boost/tuple/tuple.hpp>
#include <reversible/circuit.hpp>
#include <reversible/target_tags.hpp>
using namespace boost::assign;
namespace cirkit
{
struct is_primary
{
template<typename T>
bool operator()( const T& tuple ) const
{
return !boost::get<1>( tuple );
}
};
template<int N, typename T>
inline
const typename boost::tuples::element<N,T>::type& getN( const T& t )
{
return boost::tuples::get<N>( t );
}
struct signal_name
{
signal_name( unsigned& tmp_signal, const std::string& tmp_signal_name, bool keep_constant_names, const std::vector<std::string>& inputs )
: tmp_signal( tmp_signal ),
tmp_signal_name( tmp_signal_name ),
keep_constant_names( keep_constant_names ),
inputs( inputs ) {}
template<typename T>
std::string operator()( const T& tuple ) const
{
if ( !boost::get<1>( tuple ) || ( keep_constant_names && boost::count( inputs, boost::get<0>( tuple ) ) <= 1u ) )
{
return boost::get<0>( tuple );
}
else
{
return boost::str( boost::format( "%s%d" ) % tmp_signal_name % tmp_signal++ );
}
}
private:
unsigned& tmp_signal;
const std::string& tmp_signal_name;
bool keep_constant_names;
const std::vector<std::string>& inputs;
};
template<typename T>
struct random_access
{
typedef typename T::value_type result_type;
explicit random_access( const T& container ) : container( container ) {}
const typename T::value_type& operator()( typename T::size_type n ) const
{
return container.at( n );
}
private:
const T& container;
};
template<typename T>
random_access<T> make_random_access( const T& container )
{
return random_access<T>( container );
}
struct to_blif
{
typedef std::string result_type;
std::string operator()( const boost::optional<bool>& vt ) const
{
return vt ? ( *vt ? "1" : "0" ) : "-";
}
};
struct to_blif_mv
{
typedef std::string result_type;
std::string operator()( const boost::optional<bool>& vt ) const
{
return ( vt ? ( *vt ? "1" : "0" ) : "-" ) + std::string( " " );
}
};
void rephrase_signal_names( std::vector<std::string>& signals, const bus_collection& bus, const std::string& prefix = std::string() )
{
for ( unsigned i = 0u; i < signals.size(); ++i )
{
std::string signalname = bus.has_bus( i ) ? boost::str( boost::format( "%s<%d>" ) % bus.find_bus( i ) % bus.signal_index( i ) ) : signals.at( i );
signalname = std::regex_replace( signalname, std::regex( "\\[(\\d+)\\]" ), std::string( "<*\\1*>" ), std::regex_constants::match_default | std::regex_constants::format_sed );
//if ( bus.has_bus( i ) )
//{
signalname = prefix + signalname;
//}
signals.at( i ) = signalname;
}
}
void write_blif_settings::operator()( const gate& g, truth_table_map& map ) const
{
unsigned num_controls = g.controls().size();
if ( is_toffoli( g ) )
{
std::map<std::vector<boost::optional<bool> >, bool> cubes;
for ( unsigned j = 0; j < num_controls; ++j )
{
std::vector<boost::optional<bool> > cube( num_controls + 1u );
cube.at( 0u ) = true;
cube.at( 1u + j ) = false;
cubes[cube] = true;
}
std::vector<boost::optional<bool> > cube( num_controls + 1u, true );
cube.at( 0u ) = false;
cubes[cube] = true;
map[g.targets().front()] = cubes;
}
else if ( is_fredkin( g ) )
{
std::vector<std::string> not_equal;
not_equal += "01","10";
std::map<std::vector<boost::optional<bool> >, bool> cubes1, cubes2;
for ( unsigned i = 0u; i < 2u; ++i )
{
for ( unsigned j = 0u; j < num_controls; ++j )
{
std::vector<boost::optional<bool> > cube( num_controls + 2u );
cube.at( 0u ) = i != 0u;
cube.at( 1u ) = i == 0u;
cube.at( 2u + j ) = false;
cubes1[cube] = i != 0u;
cubes2[cube] = i == 0u;
}
std::vector<boost::optional<bool> > cube( num_controls + 2u, true );
cube.at( 0u ) = i != 0u;
cube.at( 1u ) = i == 0u;
cubes1[cube] = i == 0u;
cubes2[cube] = i != 0u;
}
std::vector<boost::optional<bool> > cube( num_controls + 2u );
cube.at( 0u ) = cube.at( 1u ) = true;
cubes1[cube] = true;
cubes2[cube] = true;
map[g.targets().at( 0u )] = cubes1;
map[g.targets().at( 1u )] = cubes2;
}
else if ( is_peres( g ) )
{
// peres has one control and two targets
std::map<std::vector<boost::optional<bool> >, bool> cubes1, cubes2;
std::vector<boost::optional<bool> > cube;
cube += false,false,true;
cubes2[cube] = true;
cube.clear();
cube += false,true,false;
cubes2[cube] = true;
cube.clear();
cube += false,true,true;
cubes1[cube] = true;
cube.clear();
cube += true,false,false;
cubes1[cube] = true;
cube.clear();
cube += true,false,true;
cubes1[cube] = cubes2[cube] = true;
cube.clear();
cube += true,true,false;
cubes1[cube] = cubes2[cube] = true;
map[g.targets().at( 0u )] = cubes1;
map[g.targets().at( 1u )] = cubes2;
}
}
void write_blif( const circuit& circ, std::ostream& os, const write_blif_settings& settings )
{
std::vector<std::string> signals( circ.lines() );
unsigned tmp_signal = 0;
/* model name */
std::string model_name = circ.circuit_name();
if ( !model_name.size() )
{
model_name = "circuit";
}
os << ".model " << model_name << std::endl;
/* override inputs and outputs with possible bus names */
std::vector<std::string> _inputs( circ.inputs().begin(), circ.inputs().end() );
std::vector<std::string> _outputs( circ.outputs().begin(), circ.outputs().end() );
rephrase_signal_names( _inputs, circ.inputbuses() );
rephrase_signal_names( _inputs, circ.statesignals() );
rephrase_signal_names( _outputs, circ.outputbuses(), settings.output_prefix );
rephrase_signal_names( _outputs, circ.statesignals(), settings.state_prefix );
/* zip inputs and outputs */
std::vector<boost::tuple<std::string, constant> > inputs( circ.lines() );
std::copy( boost::make_zip_iterator( boost::make_tuple( _inputs.begin(), circ.constants().begin() ) ),
boost::make_zip_iterator( boost::make_tuple( _inputs.end(), circ.constants().end() ) ),
inputs.begin() );
typedef boost::tuple<std::string, bool> out_tuple;
std::vector<out_tuple> outputs( circ.lines() );
std::copy( boost::make_zip_iterator( boost::make_tuple( _outputs.begin(), circ.garbage().begin() ) ),
boost::make_zip_iterator( boost::make_tuple( _outputs.end(), circ.garbage().end() ) ),
outputs.begin() );
/* inputs */
os << ".inputs ";
std::transform( boost::make_filter_iterator<is_primary>( inputs.begin(), inputs.end() ),
boost::make_filter_iterator<is_primary>( inputs.end(), inputs.end() ),
std::ostream_iterator<std::string>( os, " " ),
getN<0, boost::tuple<std::string, constant> > );
os << std::endl;
/* outputs */
os << ".outputs ";
std::transform( boost::make_filter_iterator<is_primary>( outputs.begin(), outputs.end() ),
boost::make_filter_iterator<is_primary>( outputs.end(), outputs.end() ),
std::ostream_iterator<std::string>( os, " " ),
getN<0, out_tuple> );
os << std::endl;
std::transform( inputs.begin(), inputs.end(), signals.begin(), signal_name( tmp_signal, settings.tmp_signal_name, settings.keep_constant_names, circ.inputs() ) );
/* constants */
unsigned i = 0u;
for ( const auto& c : circ.constants() )
{
if ( c )
{
os << ".names " << signals.at( i ) << std::endl
<< ".def 0" << std::endl
<< "- " << ( *c ? "1" : "0" ) << std::endl;
}
++i;
}
for ( const auto& g : circ )
{
using boost::adaptors::transformed;
write_blif_settings::truth_table_map ttm;
// calculate truth table map
settings( g, ttm );
// input signature
std::string input_signature =
boost::join( g.targets() | transformed( make_random_access( signals ) ), " " ) + " " +
boost::join( g.controls() | transformed( +[]( variable v ) { return v.line(); } ) | transformed( make_random_access( signals ) ), " " );
for ( const auto& target : g.targets() )
{
// update name
signals.at( target ) = boost::str( boost::format( "%s%d" ) % settings.tmp_signal_name % tmp_signal );
// write signature
os << boost::format( ".names %s %s%d" ) % input_signature % settings.tmp_signal_name % ( tmp_signal ) << std::endl;
++tmp_signal;
os << ".def 0" << std::endl;
// write truth table
for ( const auto& pair : ttm[target] )
{
if ( !pair.second ) continue; // omit 0 outputs
boost::function<std::string(const boost::optional<bool>&)> transformer;
if ( settings.blif_mv )
{
transformer = to_blif_mv();
}
else
{
transformer = to_blif();
}
boost::copy( pair.first | transformed( transformer ), std::ostream_iterator<std::string>( os ) );
if ( !settings.blif_mv )
{
os << ' ';
}
os << '1' << std::endl;
}
}
}
for ( const auto& t : outputs )
{
if ( !boost::get<1>( t ) )
{
os << ".names "
<< signals.at( std::find( _outputs.begin(), _outputs.end(), boost::get<0>( t ) ) - _outputs.begin() )
<< " "
<< boost::get<0>( t ) << std::endl
<< ".def 0" << std::endl
<< "1 1" << std::endl;
}
}
os << ".end" << std::endl;
}
}
// Local Variables:
// c-basic-offset: 2
// eval: (c-set-offset 'substatement-open 0)
// eval: (c-set-offset 'innamespace 0)
// End:
|
[STATEMENT]
lemma finite_range_map_of_map_add:
"finite (range f) \<Longrightarrow> finite (range (f ++ map_of l))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. finite (range f) \<Longrightarrow> finite (range (f ++ map_of l))
[PROOF STEP]
proof (induct l)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. finite (range f) \<Longrightarrow> finite (range (f ++ map_of []))
2. \<And>a l. \<lbrakk>finite (range f) \<Longrightarrow> finite (range (f ++ map_of l)); finite (range f)\<rbrakk> \<Longrightarrow> finite (range (f ++ map_of (a # l)))
[PROOF STEP]
case (Cons a l)
[PROOF STATE]
proof (state)
this:
finite (range f) \<Longrightarrow> finite (range (f ++ map_of l))
finite (range f)
goal (2 subgoals):
1. finite (range f) \<Longrightarrow> finite (range (f ++ map_of []))
2. \<And>a l. \<lbrakk>finite (range f) \<Longrightarrow> finite (range (f ++ map_of l)); finite (range f)\<rbrakk> \<Longrightarrow> finite (range (f ++ map_of (a # l)))
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
finite (range f) \<Longrightarrow> finite (range (f ++ map_of l))
finite (range f)
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
finite (range f) \<Longrightarrow> finite (range (f ++ map_of l))
finite (range f)
goal (1 subgoal):
1. finite (range (f ++ map_of (a # l)))
[PROOF STEP]
by (metis finite_range_updI map_add_upd map_of.simps(2))
[PROOF STATE]
proof (state)
this:
finite (range (f ++ map_of (a # l)))
goal (1 subgoal):
1. finite (range f) \<Longrightarrow> finite (range (f ++ map_of []))
[PROOF STEP]
qed auto |
[STATEMENT]
lemma list_of_oalist_update_by_fun_gr [simp, code abstract]:
"list_of_oalist (update_by_fun_gr k f xs) = update_by_fun_gr_raw k f (list_of_oalist xs)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. list_of_oalist (update_by_fun_gr k f xs) = update_by_fun_gr_raw k f (list_of_oalist xs)
[PROOF STEP]
unfolding update_by_fun_gr_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. list_of_oalist (oalist_of_list (update_by_fun_gr_raw k f (list_of_oalist xs))) = update_by_fun_gr_raw k f (list_of_oalist xs)
[PROOF STEP]
by (rule list_of_oalist_of_list_id, rule oalist_inv_update_by_fun_gr_raw, fact oalist_inv_list_of_oalist) |
import numpy as np
from arctor.dwt import dwt_chisq
# Compute chi-squared for a given model fitting a data set:
data = np.array([2.0, 0.0, 3.0, -2.0, -1.0, 2.0, 2.0, 0.0])
model = np.ones(8)
params = np.array([1.0, 0.1, 0.1])
chisq = dwt_chisq(model, data, params)
print(chisq)
1693.22308882
# Now, say this is a three-parameter model, with a Gaussian prior
# on the last parameter:
priors = np.array([1.0, 0.2, 0.3])
plow = np.array([0.0, 0.0, 0.1])
pup = np.array([0.0, 0.0, 0.1])
chisq = dwt_chisq(model, data, params, priors, plow, pup)
print(chisq)
|
/-
Copyright (c) 2020 Kenji Nakagawa. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Kenji Nakagawa, Anne Baanen, Filippo A. E. Nuccio
-/
import algebra.algebra.subalgebra.pointwise
import algebraic_geometry.prime_spectrum.maximal
import algebraic_geometry.prime_spectrum.noetherian
import order.hom.basic
import ring_theory.dedekind_domain.basic
import ring_theory.fractional_ideal
import ring_theory.principal_ideal_domain
import ring_theory.chain_of_divisors
/-!
# Dedekind domains and ideals
In this file, we show a ring is a Dedekind domain iff all fractional ideals are invertible.
Then we prove some results on the unique factorization monoid structure of the ideals.
## Main definitions
- `is_dedekind_domain_inv` alternatively defines a Dedekind domain as an integral domain where
every nonzero fractional ideal is invertible.
- `is_dedekind_domain_inv_iff` shows that this does note depend on the choice of field of
fractions.
- `is_dedekind_domain.height_one_spectrum` defines the type of nonzero prime ideals of `R`.
## Main results:
- `is_dedekind_domain_iff_is_dedekind_domain_inv`
- `ideal.unique_factorization_monoid`
## Implementation notes
The definitions that involve a field of fractions choose a canonical field of fractions,
but are independent of that choice. The `..._iff` lemmas express this independence.
Often, definitions assume that Dedekind domains are not fields. We found it more practical
to add a `(h : ¬ is_field A)` assumption whenever this is explicitly needed.
## References
* [D. Marcus, *Number Fields*][marcus1977number]
* [J.W.S. Cassels, A. Frölich, *Algebraic Number Theory*][cassels1967algebraic]
* [J. Neukirch, *Algebraic Number Theory*][Neukirch1992]
## Tags
dedekind domain, dedekind ring
-/
variables (R A K : Type*) [comm_ring R] [comm_ring A] [field K]
open_locale non_zero_divisors polynomial
variables [is_domain A]
section inverse
namespace fractional_ideal
variables {R₁ : Type*} [comm_ring R₁] [is_domain R₁] [algebra R₁ K] [is_fraction_ring R₁ K]
variables {I J : fractional_ideal R₁⁰ K}
noncomputable instance : has_inv (fractional_ideal R₁⁰ K) := ⟨λ I, 1 / I⟩
lemma inv_eq : I⁻¹ = 1 / I := rfl
lemma inv_zero' : (0 : fractional_ideal R₁⁰ K)⁻¹ = 0 := div_zero
lemma inv_nonzero {J : fractional_ideal R₁⁰ K} (h : J ≠ 0) :
J⁻¹ = ⟨(1 : fractional_ideal R₁⁰ K) / J, fractional_div_of_nonzero h⟩ := div_nonzero _
lemma coe_inv_of_nonzero {J : fractional_ideal R₁⁰ K} (h : J ≠ 0) :
(↑J⁻¹ : submodule R₁ K) = is_localization.coe_submodule K ⊤ / J :=
by { rwa inv_nonzero _, refl, assumption }
variables {K}
lemma mem_inv_iff (hI : I ≠ 0) {x : K} : x ∈ I⁻¹ ↔ ∀ y ∈ I, x * y ∈ (1 : fractional_ideal R₁⁰ K) :=
mem_div_iff_of_nonzero hI
lemma inv_anti_mono (hI : I ≠ 0) (hJ : J ≠ 0) (hIJ : I ≤ J) : J⁻¹ ≤ I⁻¹ :=
λ x, by { simp only [mem_inv_iff hI, mem_inv_iff hJ], exact λ h y hy, h y (hIJ hy) }
lemma le_self_mul_inv {I : fractional_ideal R₁⁰ K} (hI : I ≤ (1 : fractional_ideal R₁⁰ K)) :
I ≤ I * I⁻¹ :=
le_self_mul_one_div hI
variables (K)
lemma coe_ideal_le_self_mul_inv (I : ideal R₁) : (I : fractional_ideal R₁⁰ K) ≤ I * I⁻¹ :=
le_self_mul_inv coe_ideal_le_one
/-- `I⁻¹` is the inverse of `I` if `I` has an inverse. -/
theorem right_inverse_eq (I J : fractional_ideal R₁⁰ K) (h : I * J = 1) : J = I⁻¹ :=
begin
have hI : I ≠ 0 := ne_zero_of_mul_eq_one I J h,
suffices h' : I * (1 / I) = 1,
{ exact (congr_arg units.inv $
@units.ext _ _ (units.mk_of_mul_eq_one _ _ h) (units.mk_of_mul_eq_one _ _ h') rfl) },
apply le_antisymm,
{ apply mul_le.mpr _,
intros x hx y hy,
rw mul_comm,
exact (mem_div_iff_of_nonzero hI).mp hy x hx },
rw ← h,
apply mul_left_mono I,
apply (le_div_iff_of_nonzero hI).mpr _,
intros y hy x hx,
rw mul_comm,
exact mul_mem_mul hx hy
end
theorem mul_inv_cancel_iff {I : fractional_ideal R₁⁰ K} : I * I⁻¹ = 1 ↔ ∃ J, I * J = 1 :=
⟨λ h, ⟨I⁻¹, h⟩, λ ⟨J, hJ⟩, by rwa ← right_inverse_eq K I J hJ⟩
lemma mul_inv_cancel_iff_is_unit {I : fractional_ideal R₁⁰ K} : I * I⁻¹ = 1 ↔ is_unit I :=
(mul_inv_cancel_iff K).trans is_unit_iff_exists_inv.symm
variables {K' : Type*} [field K'] [algebra R₁ K'] [is_fraction_ring R₁ K']
@[simp] lemma map_inv (I : fractional_ideal R₁⁰ K) (h : K ≃ₐ[R₁] K') :
(I⁻¹).map (h : K →ₐ[R₁] K') = (I.map h)⁻¹ :=
by rw [inv_eq, map_div, map_one, inv_eq]
open submodule submodule.is_principal
@[simp] lemma span_singleton_inv (x : K) : (span_singleton R₁⁰ x)⁻¹ = span_singleton _ x⁻¹ :=
one_div_span_singleton x
@[simp] lemma span_singleton_div_span_singleton (x y : K) :
span_singleton R₁⁰ x / span_singleton R₁⁰ y = span_singleton R₁⁰ (x / y) :=
by rw [div_span_singleton, mul_comm, span_singleton_mul_span_singleton, div_eq_mul_inv]
lemma span_singleton_div_self {x : K} (hx : x ≠ 0) :
span_singleton R₁⁰ x / span_singleton R₁⁰ x = 1 :=
by rw [span_singleton_div_span_singleton, div_self hx, span_singleton_one]
lemma coe_ideal_span_singleton_div_self {x : R₁} (hx : x ≠ 0) :
(ideal.span ({x} : set R₁) : fractional_ideal R₁⁰ K) / ideal.span ({x} : set R₁) = 1 :=
by rw [coe_ideal_span_singleton, span_singleton_div_self K $
(map_ne_zero_iff _ $ no_zero_smul_divisors.algebra_map_injective R₁ K).mpr hx]
lemma span_singleton_mul_inv {x : K} (hx : x ≠ 0) :
span_singleton R₁⁰ x * (span_singleton R₁⁰ x)⁻¹ = 1 :=
by rw [span_singleton_inv, span_singleton_mul_span_singleton, mul_inv_cancel hx, span_singleton_one]
lemma coe_ideal_span_singleton_mul_inv {x : R₁} (hx : x ≠ 0) :
(ideal.span ({x} : set R₁) : fractional_ideal R₁⁰ K) * (ideal.span ({x} : set R₁))⁻¹ = 1 :=
by rw [coe_ideal_span_singleton, span_singleton_mul_inv K $
(map_ne_zero_iff _ $ no_zero_smul_divisors.algebra_map_injective R₁ K).mpr hx]
lemma span_singleton_inv_mul {x : K} (hx : x ≠ 0) :
(span_singleton R₁⁰ x)⁻¹ * span_singleton R₁⁰ x = 1 :=
by rw [mul_comm, span_singleton_mul_inv K hx]
lemma coe_ideal_span_singleton_inv_mul {x : R₁} (hx : x ≠ 0) :
(ideal.span ({x} : set R₁) : fractional_ideal R₁⁰ K)⁻¹ * ideal.span ({x} : set R₁) = 1 :=
by rw [mul_comm, coe_ideal_span_singleton_mul_inv K hx]
lemma mul_generator_self_inv {R₁ : Type*} [comm_ring R₁] [algebra R₁ K] [is_localization R₁⁰ K]
(I : fractional_ideal R₁⁰ K) [submodule.is_principal (I : submodule R₁ K)] (h : I ≠ 0) :
I * span_singleton _ (generator (I : submodule R₁ K))⁻¹ = 1 :=
begin
-- Rewrite only the `I` that appears alone.
conv_lhs { congr, rw eq_span_singleton_of_principal I },
rw [span_singleton_mul_span_singleton, mul_inv_cancel, span_singleton_one],
intro generator_I_eq_zero,
apply h,
rw [eq_span_singleton_of_principal I, generator_I_eq_zero, span_singleton_zero]
end
lemma invertible_of_principal (I : fractional_ideal R₁⁰ K)
[submodule.is_principal (I : submodule R₁ K)] (h : I ≠ 0) : I * I⁻¹ = 1 :=
(mul_div_self_cancel_iff).mpr
⟨span_singleton _ (generator (I : submodule R₁ K))⁻¹, mul_generator_self_inv _ I h⟩
lemma invertible_iff_generator_nonzero (I : fractional_ideal R₁⁰ K)
[submodule.is_principal (I : submodule R₁ K)] :
I * I⁻¹ = 1 ↔ generator (I : submodule R₁ K) ≠ 0 :=
begin
split,
{ intros hI hg,
apply ne_zero_of_mul_eq_one _ _ hI,
rw [eq_span_singleton_of_principal I, hg, span_singleton_zero] },
{ intro hg,
apply invertible_of_principal,
rw [eq_span_singleton_of_principal I],
intro hI,
have := mem_span_singleton_self _ (generator (I : submodule R₁ K)),
rw [hI, mem_zero_iff] at this,
contradiction }
end
lemma is_principal_inv (I : fractional_ideal R₁⁰ K)
[submodule.is_principal (I : submodule R₁ K)] (h : I ≠ 0) :
submodule.is_principal (I⁻¹).1 :=
begin
rw [val_eq_coe, is_principal_iff],
use (generator (I : submodule R₁ K))⁻¹,
have hI : I * span_singleton _ ((generator (I : submodule R₁ K))⁻¹) = 1,
apply mul_generator_self_inv _ I h,
exact (right_inverse_eq _ I (span_singleton _ ((generator (I : submodule R₁ K))⁻¹)) hI).symm
end
noncomputable instance : inv_one_class (fractional_ideal R₁⁰ K) :=
{ inv_one := div_one,
..fractional_ideal.has_one,
..fractional_ideal.has_inv K }
end fractional_ideal
/--
A Dedekind domain is an integral domain such that every fractional ideal has an inverse.
This is equivalent to `is_dedekind_domain`.
In particular we provide a `fractional_ideal.comm_group_with_zero` instance,
assuming `is_dedekind_domain A`, which implies `is_dedekind_domain_inv`. For **integral** ideals,
`is_dedekind_domain`(`_inv`) implies only `ideal.cancel_comm_monoid_with_zero`.
-/
def is_dedekind_domain_inv : Prop :=
∀ I ≠ (⊥ : fractional_ideal A⁰ (fraction_ring A)), I * I⁻¹ = 1
open fractional_ideal
variables {R A K}
lemma is_dedekind_domain_inv_iff [algebra A K] [is_fraction_ring A K] :
is_dedekind_domain_inv A ↔ (∀ I ≠ (⊥ : fractional_ideal A⁰ K), I * I⁻¹ = 1) :=
begin
let h := map_equiv (fraction_ring.alg_equiv A K),
refine h.to_equiv.forall_congr (λ I, _),
rw ← h.to_equiv.apply_eq_iff_eq,
simp [is_dedekind_domain_inv, show ⇑h.to_equiv = h, from rfl],
end
lemma fractional_ideal.adjoin_integral_eq_one_of_is_unit [algebra A K] [is_fraction_ring A K]
(x : K) (hx : is_integral A x) (hI : is_unit (adjoin_integral A⁰ x hx)) :
adjoin_integral A⁰ x hx = 1 :=
begin
set I := adjoin_integral A⁰ x hx,
have mul_self : I * I = I,
{ apply coe_to_submodule_injective, simp },
convert congr_arg (* I⁻¹) mul_self;
simp only [(mul_inv_cancel_iff_is_unit K).mpr hI, mul_assoc, mul_one],
end
namespace is_dedekind_domain_inv
variables [algebra A K] [is_fraction_ring A K] (h : is_dedekind_domain_inv A)
include h
lemma mul_inv_eq_one {I : fractional_ideal A⁰ K} (hI : I ≠ 0) : I * I⁻¹ = 1 :=
is_dedekind_domain_inv_iff.mp h I hI
lemma inv_mul_eq_one {I : fractional_ideal A⁰ K} (hI : I ≠ 0) : I⁻¹ * I = 1 :=
(mul_comm _ _).trans (h.mul_inv_eq_one hI)
protected lemma is_unit {I : fractional_ideal A⁰ K} (hI : I ≠ 0) : is_unit I :=
is_unit_of_mul_eq_one _ _ (h.mul_inv_eq_one hI)
lemma is_noetherian_ring : is_noetherian_ring A :=
begin
refine is_noetherian_ring_iff.mpr ⟨λ (I : ideal A), _⟩,
by_cases hI : I = ⊥,
{ rw hI, apply submodule.fg_bot },
have hI : (I : fractional_ideal A⁰ (fraction_ring A)) ≠ 0 := coe_ideal_ne_zero.mpr hI,
exact I.fg_of_is_unit (is_fraction_ring.injective A (fraction_ring A)) (h.is_unit hI)
end
lemma integrally_closed : is_integrally_closed A :=
begin
-- It suffices to show that for integral `x`,
-- `A[x]` (which is a fractional ideal) is in fact equal to `A`.
refine ⟨λ x hx, _⟩,
rw [← set.mem_range, ← algebra.mem_bot, ← subalgebra.mem_to_submodule, algebra.to_submodule_bot,
← coe_span_singleton A⁰ (1 : fraction_ring A), span_singleton_one,
← fractional_ideal.adjoin_integral_eq_one_of_is_unit x hx (h.is_unit _)],
{ exact mem_adjoin_integral_self A⁰ x hx },
{ exact λ h, one_ne_zero (eq_zero_iff.mp h 1 (subalgebra.one_mem _)) },
end
open ring
lemma dimension_le_one : dimension_le_one A :=
begin
-- We're going to show that `P` is maximal because any (maximal) ideal `M`
-- that is strictly larger would be `⊤`.
rintros P P_ne hP,
refine ideal.is_maximal_def.mpr ⟨hP.ne_top, λ M hM, _⟩,
-- We may assume `P` and `M` (as fractional ideals) are nonzero.
have P'_ne : (P : fractional_ideal A⁰ (fraction_ring A)) ≠ 0 := coe_ideal_ne_zero.mpr P_ne,
have M'_ne : (M : fractional_ideal A⁰ (fraction_ring A)) ≠ 0 :=
coe_ideal_ne_zero.mpr (lt_of_le_of_lt bot_le hM).ne',
-- In particular, we'll show `M⁻¹ * P ≤ P`
suffices : (M⁻¹ * P : fractional_ideal A⁰ (fraction_ring A)) ≤ P,
{ rw [eq_top_iff, ← coe_ideal_le_coe_ideal (fraction_ring A), coe_ideal_top],
calc (1 : fractional_ideal A⁰ (fraction_ring A)) = _ * _ * _ : _
... ≤ _ * _ : mul_right_mono (P⁻¹ * M : fractional_ideal A⁰ (fraction_ring A)) this
... = M : _,
{ rw [mul_assoc, ← mul_assoc ↑P, h.mul_inv_eq_one P'_ne, one_mul, h.inv_mul_eq_one M'_ne] },
{ rw [← mul_assoc ↑P, h.mul_inv_eq_one P'_ne, one_mul] },
{ apply_instance } },
-- Suppose we have `x ∈ M⁻¹ * P`, then in fact `x = algebra_map _ _ y` for some `y`.
intros x hx,
have le_one : (M⁻¹ * P : fractional_ideal A⁰ (fraction_ring A)) ≤ 1,
{ rw [← h.inv_mul_eq_one M'_ne],
exact mul_left_mono _ ((coe_ideal_le_coe_ideal (fraction_ring A)).mpr hM.le) },
obtain ⟨y, hy, rfl⟩ := (mem_coe_ideal _).mp (le_one hx),
-- Since `M` is strictly greater than `P`, let `z ∈ M \ P`.
obtain ⟨z, hzM, hzp⟩ := set_like.exists_of_lt hM,
-- We have `z * y ∈ M * (M⁻¹ * P) = P`.
have zy_mem := mul_mem_mul (mem_coe_ideal_of_mem A⁰ hzM) hx,
rw [← ring_hom.map_mul, ← mul_assoc, h.mul_inv_eq_one M'_ne, one_mul] at zy_mem,
obtain ⟨zy, hzy, zy_eq⟩ := (mem_coe_ideal A⁰).mp zy_mem,
rw is_fraction_ring.injective A (fraction_ring A) zy_eq at hzy,
-- But `P` is a prime ideal, so `z ∉ P` implies `y ∈ P`, as desired.
exact mem_coe_ideal_of_mem A⁰ (or.resolve_left (hP.mem_or_mem hzy) hzp)
end
/-- Showing one side of the equivalence between the definitions
`is_dedekind_domain_inv` and `is_dedekind_domain` of Dedekind domains. -/
theorem is_dedekind_domain : is_dedekind_domain A :=
⟨h.is_noetherian_ring, h.dimension_le_one, h.integrally_closed⟩
end is_dedekind_domain_inv
variables [algebra A K] [is_fraction_ring A K]
/-- Specialization of `exists_prime_spectrum_prod_le_and_ne_bot_of_domain` to Dedekind domains:
Let `I : ideal A` be a nonzero ideal, where `A` is a Dedekind domain that is not a field.
Then `exists_prime_spectrum_prod_le_and_ne_bot_of_domain` states we can find a product of prime
ideals that is contained within `I`. This lemma extends that result by making the product minimal:
let `M` be a maximal ideal that contains `I`, then the product including `M` is contained within `I`
and the product excluding `M` is not contained within `I`. -/
lemma exists_multiset_prod_cons_le_and_prod_not_le [is_dedekind_domain A]
(hNF : ¬ is_field A) {I M : ideal A} (hI0 : I ≠ ⊥) (hIM : I ≤ M) [hM : M.is_maximal] :
∃ (Z : multiset (prime_spectrum A)),
(M ::ₘ (Z.map prime_spectrum.as_ideal)).prod ≤ I ∧
¬ (multiset.prod (Z.map prime_spectrum.as_ideal) ≤ I) :=
begin
-- Let `Z` be a minimal set of prime ideals such that their product is contained in `J`.
obtain ⟨Z₀, hZ₀⟩ := prime_spectrum.exists_prime_spectrum_prod_le_and_ne_bot_of_domain hNF hI0,
obtain ⟨Z, ⟨hZI, hprodZ⟩, h_eraseZ⟩ := multiset.well_founded_lt.has_min
(λ Z, (Z.map prime_spectrum.as_ideal).prod ≤ I ∧ (Z.map prime_spectrum.as_ideal).prod ≠ ⊥)
⟨Z₀, hZ₀⟩,
have hZM : multiset.prod (Z.map prime_spectrum.as_ideal) ≤ M := le_trans hZI hIM,
have hZ0 : Z ≠ 0, { rintro rfl, simpa [hM.ne_top] using hZM },
obtain ⟨_, hPZ', hPM⟩ := (hM.is_prime.multiset_prod_le (mt multiset.map_eq_zero.mp hZ0)).mp hZM,
-- Then in fact there is a `P ∈ Z` with `P ≤ M`.
obtain ⟨P, hPZ, rfl⟩ := multiset.mem_map.mp hPZ',
classical,
have := multiset.map_erase prime_spectrum.as_ideal prime_spectrum.ext P Z,
obtain ⟨hP0, hZP0⟩ : P.as_ideal ≠ ⊥ ∧ ((Z.erase P).map prime_spectrum.as_ideal).prod ≠ ⊥,
{ rwa [ne.def, ← multiset.cons_erase hPZ', multiset.prod_cons, ideal.mul_eq_bot,
not_or_distrib, ← this] at hprodZ },
-- By maximality of `P` and `M`, we have that `P ≤ M` implies `P = M`.
have hPM' := (is_dedekind_domain.dimension_le_one _ hP0 P.is_prime).eq_of_le hM.ne_top hPM,
substI hPM',
-- By minimality of `Z`, erasing `P` from `Z` is exactly what we need.
refine ⟨Z.erase P, _, _⟩,
{ convert hZI,
rw [this, multiset.cons_erase hPZ'] },
{ refine λ h, h_eraseZ (Z.erase P) ⟨h, _⟩ (multiset.erase_lt.mpr hPZ),
exact hZP0 }
end
namespace fractional_ideal
open ideal
lemma exists_not_mem_one_of_ne_bot [is_dedekind_domain A]
(hNF : ¬ is_field A) {I : ideal A} (hI0 : I ≠ ⊥) (hI1 : I ≠ ⊤) :
∃ x : K, x ∈ (I⁻¹ : fractional_ideal A⁰ K) ∧ x ∉ (1 : fractional_ideal A⁰ K) :=
begin
-- WLOG, let `I` be maximal.
suffices : ∀ {M : ideal A} (hM : M.is_maximal),
∃ x : K, x ∈ (M⁻¹ : fractional_ideal A⁰ K) ∧ x ∉ (1 : fractional_ideal A⁰ K),
{ obtain ⟨M, hM, hIM⟩ : ∃ (M : ideal A), is_maximal M ∧ I ≤ M := ideal.exists_le_maximal I hI1,
resetI,
have hM0 := (M.bot_lt_of_maximal hNF).ne',
obtain ⟨x, hxM, hx1⟩ := this hM,
refine ⟨x, inv_anti_mono _ _ ((coe_ideal_le_coe_ideal _).mpr hIM) hxM, hx1⟩;
rw coe_ideal_ne_zero; assumption },
-- Let `a` be a nonzero element of `M` and `J` the ideal generated by `a`.
intros M hM,
resetI,
obtain ⟨⟨a, haM⟩, ha0⟩ := submodule.nonzero_mem_of_bot_lt (M.bot_lt_of_maximal hNF),
replace ha0 : a ≠ 0 := subtype.coe_injective.ne ha0,
let J : ideal A := ideal.span {a},
have hJ0 : J ≠ ⊥ := mt ideal.span_singleton_eq_bot.mp ha0,
have hJM : J ≤ M := ideal.span_le.mpr (set.singleton_subset_iff.mpr haM),
have hM0 : ⊥ < M := M.bot_lt_of_maximal hNF,
-- Then we can find a product of prime (hence maximal) ideals contained in `J`,
-- such that removing element `M` from the product is not contained in `J`.
obtain ⟨Z, hle, hnle⟩ := exists_multiset_prod_cons_le_and_prod_not_le hNF hJ0 hJM,
-- Choose an element `b` of the product that is not in `J`.
obtain ⟨b, hbZ, hbJ⟩ := set_like.not_le_iff_exists.mp hnle,
have hnz_fa : algebra_map A K a ≠ 0 :=
mt ((injective_iff_map_eq_zero _).mp (is_fraction_ring.injective A K) a) ha0,
have hb0 : algebra_map A K b ≠ 0 :=
mt ((injective_iff_map_eq_zero _).mp (is_fraction_ring.injective A K) b)
(λ h, hbJ $ h.symm ▸ J.zero_mem),
-- Then `b a⁻¹ : K` is in `M⁻¹` but not in `1`.
refine ⟨algebra_map A K b * (algebra_map A K a)⁻¹, (mem_inv_iff _).mpr _, _⟩,
{ exact coe_ideal_ne_zero.mpr hM0.ne' },
{ rintro y₀ hy₀,
obtain ⟨y, h_Iy, rfl⟩ := (mem_coe_ideal _).mp hy₀,
rw [mul_comm, ← mul_assoc, ← ring_hom.map_mul],
have h_yb : y * b ∈ J,
{ apply hle,
rw multiset.prod_cons,
exact submodule.smul_mem_smul h_Iy hbZ },
rw ideal.mem_span_singleton' at h_yb,
rcases h_yb with ⟨c, hc⟩,
rw [← hc, ring_hom.map_mul, mul_assoc, mul_inv_cancel hnz_fa, mul_one],
apply coe_mem_one },
{ refine mt (mem_one_iff _).mp _,
rintros ⟨x', h₂_abs⟩,
rw [← div_eq_mul_inv, eq_div_iff_mul_eq hnz_fa, ← ring_hom.map_mul] at h₂_abs,
have := ideal.mem_span_singleton'.mpr ⟨x', is_fraction_ring.injective A K h₂_abs⟩,
contradiction },
end
lemma one_mem_inv_coe_ideal {I : ideal A} (hI : I ≠ ⊥) :
(1 : K) ∈ (I : fractional_ideal A⁰ K)⁻¹ :=
begin
rw mem_inv_iff (coe_ideal_ne_zero.mpr hI),
intros y hy,
rw one_mul,
exact coe_ideal_le_one hy,
assumption
end
lemma mul_inv_cancel_of_le_one [h : is_dedekind_domain A]
{I : ideal A} (hI0 : I ≠ ⊥) (hI : ((I * I⁻¹)⁻¹ : fractional_ideal A⁰ K) ≤ 1) :
(I * I⁻¹ : fractional_ideal A⁰ K) = 1 :=
begin
-- Handle a few trivial cases.
by_cases hI1 : I = ⊤,
{ rw [hI1, coe_ideal_top, one_mul, inv_one] },
by_cases hNF : is_field A,
{ letI := hNF.to_field, rcases hI1 (I.eq_bot_or_top.resolve_left hI0) },
-- We'll show a contradiction with `exists_not_mem_one_of_ne_bot`:
-- `J⁻¹ = (I * I⁻¹)⁻¹` cannot have an element `x ∉ 1`, so it must equal `1`.
obtain ⟨J, hJ⟩ : ∃ (J : ideal A), (J : fractional_ideal A⁰ K) = I * I⁻¹ :=
le_one_iff_exists_coe_ideal.mp mul_one_div_le_one,
by_cases hJ0 : J = ⊥,
{ subst hJ0,
refine absurd _ hI0,
rw [eq_bot_iff, ← coe_ideal_le_coe_ideal K, hJ],
exact coe_ideal_le_self_mul_inv K I,
apply_instance },
by_cases hJ1 : J = ⊤,
{ rw [← hJ, hJ1, coe_ideal_top] },
obtain ⟨x, hx, hx1⟩ : ∃ (x : K),
x ∈ (J : fractional_ideal A⁰ K)⁻¹ ∧ x ∉ (1 : fractional_ideal A⁰ K) :=
exists_not_mem_one_of_ne_bot hNF hJ0 hJ1,
contrapose! hx1 with h_abs,
rw hJ at hx,
exact hI hx,
end
/-- Nonzero integral ideals in a Dedekind domain are invertible.
We will use this to show that nonzero fractional ideals are invertible,
and finally conclude that fractional ideals in a Dedekind domain form a group with zero.
-/
lemma coe_ideal_mul_inv [h : is_dedekind_domain A] (I : ideal A) (hI0 : I ≠ ⊥) :
(I * I⁻¹ : fractional_ideal A⁰ K) = 1 :=
begin
-- We'll show `1 ≤ J⁻¹ = (I * I⁻¹)⁻¹ ≤ 1`.
apply mul_inv_cancel_of_le_one hI0,
by_cases hJ0 : (I * I⁻¹ : fractional_ideal A⁰ K) = 0,
{ rw [hJ0, inv_zero'], exact zero_le _ },
intros x hx,
-- In particular, we'll show all `x ∈ J⁻¹` are integral.
suffices : x ∈ integral_closure A K,
{ rwa [is_integrally_closed.integral_closure_eq_bot, algebra.mem_bot, set.mem_range,
← mem_one_iff] at this;
assumption },
-- For that, we'll find a subalgebra that is f.g. as a module and contains `x`.
-- `A` is a noetherian ring, so we just need to find a subalgebra between `{x}` and `I⁻¹`.
rw mem_integral_closure_iff_mem_fg,
have x_mul_mem : ∀ b ∈ (I⁻¹ : fractional_ideal A⁰ K), x * b ∈ (I⁻¹ : fractional_ideal A⁰ K),
{ intros b hb,
rw mem_inv_iff at ⊢ hx,
swap, { exact coe_ideal_ne_zero.mpr hI0 },
swap, { exact hJ0 },
simp only [mul_assoc, mul_comm b] at ⊢ hx,
intros y hy,
exact hx _ (mul_mem_mul hy hb) },
-- It turns out the subalgebra consisting of all `p(x)` for `p : A[X]` works.
refine ⟨alg_hom.range (polynomial.aeval x : A[X] →ₐ[A] K),
is_noetherian_submodule.mp (is_noetherian I⁻¹) _ (λ y hy, _),
⟨polynomial.X, polynomial.aeval_X x⟩⟩,
obtain ⟨p, rfl⟩ := (alg_hom.mem_range _).mp hy,
rw polynomial.aeval_eq_sum_range,
refine submodule.sum_mem _ (λ i hi, submodule.smul_mem _ _ _),
clear hi,
induction i with i ih,
{ rw pow_zero, exact one_mem_inv_coe_ideal hI0 },
{ show x ^ i.succ ∈ (I⁻¹ : fractional_ideal A⁰ K),
rw pow_succ, exact x_mul_mem _ ih },
end
/-- Nonzero fractional ideals in a Dedekind domain are units.
This is also available as `_root_.mul_inv_cancel`, using the
`comm_group_with_zero` instance defined below.
-/
protected theorem mul_inv_cancel [is_dedekind_domain A]
{I : fractional_ideal A⁰ K} (hne : I ≠ 0) : I * I⁻¹ = 1 :=
begin
obtain ⟨a, J, ha, hJ⟩ :
∃ (a : A) (aI : ideal A), a ≠ 0 ∧ I = span_singleton A⁰ (algebra_map _ _ a)⁻¹ * aI :=
exists_eq_span_singleton_mul I,
suffices h₂ : I * (span_singleton A⁰ (algebra_map _ _ a) * J⁻¹) = 1,
{ rw mul_inv_cancel_iff,
exact ⟨span_singleton A⁰ (algebra_map _ _ a) * J⁻¹, h₂⟩ },
subst hJ,
rw [mul_assoc, mul_left_comm (J : fractional_ideal A⁰ K), coe_ideal_mul_inv, mul_one,
span_singleton_mul_span_singleton, inv_mul_cancel, span_singleton_one],
{ exact mt ((injective_iff_map_eq_zero (algebra_map A K)).mp
(is_fraction_ring.injective A K) _) ha },
{ exact coe_ideal_ne_zero.mp (right_ne_zero_of_mul hne) }
end
lemma mul_right_le_iff [is_dedekind_domain A] {J : fractional_ideal A⁰ K}
(hJ : J ≠ 0) : ∀ {I I'}, I * J ≤ I' * J ↔ I ≤ I' :=
begin
intros I I',
split,
{ intros h, convert mul_right_mono J⁻¹ h;
rw [mul_assoc, fractional_ideal.mul_inv_cancel hJ, mul_one] },
{ exact λ h, mul_right_mono J h }
end
lemma mul_left_le_iff [is_dedekind_domain A] {J : fractional_ideal A⁰ K}
(hJ : J ≠ 0) {I I'} : J * I ≤ J * I' ↔ I ≤ I' :=
by convert mul_right_le_iff hJ using 1; simp only [mul_comm]
lemma mul_right_strict_mono [is_dedekind_domain A] {I : fractional_ideal A⁰ K}
(hI : I ≠ 0) : strict_mono (* I) :=
strict_mono_of_le_iff_le (λ _ _, (mul_right_le_iff hI).symm)
lemma mul_left_strict_mono [is_dedekind_domain A] {I : fractional_ideal A⁰ K}
(hI : I ≠ 0) : strict_mono ((*) I) :=
strict_mono_of_le_iff_le (λ _ _, (mul_left_le_iff hI).symm)
/--
This is also available as `_root_.div_eq_mul_inv`, using the
`comm_group_with_zero` instance defined below.
-/
protected lemma div_eq_mul_inv [is_dedekind_domain A] (I J : fractional_ideal A⁰ K) :
I / J = I * J⁻¹ :=
begin
by_cases hJ : J = 0,
{ rw [hJ, div_zero, inv_zero', mul_zero] },
refine le_antisymm ((mul_right_le_iff hJ).mp _) ((le_div_iff_mul_le hJ).mpr _),
{ rw [mul_assoc, mul_comm J⁻¹, fractional_ideal.mul_inv_cancel hJ, mul_one, mul_le],
intros x hx y hy,
rw [mem_div_iff_of_nonzero hJ] at hx,
exact hx y hy },
rw [mul_assoc, mul_comm J⁻¹, fractional_ideal.mul_inv_cancel hJ, mul_one],
exact le_refl I
end
end fractional_ideal
/-- `is_dedekind_domain` and `is_dedekind_domain_inv` are equivalent ways
to express that an integral domain is a Dedekind domain. -/
theorem is_dedekind_domain_iff_is_dedekind_domain_inv :
is_dedekind_domain A ↔ is_dedekind_domain_inv A :=
⟨λ h I hI, by exactI fractional_ideal.mul_inv_cancel hI, λ h, h.is_dedekind_domain⟩
end inverse
section is_dedekind_domain
variables {R A} [is_dedekind_domain A] [algebra A K] [is_fraction_ring A K]
open fractional_ideal
open ideal
noncomputable instance fractional_ideal.semifield :
semifield (fractional_ideal A⁰ K) :=
{ inv := λ I, I⁻¹,
inv_zero := inv_zero' _,
div := (/),
div_eq_mul_inv := fractional_ideal.div_eq_mul_inv,
mul_inv_cancel := λ I, fractional_ideal.mul_inv_cancel,
.. fractional_ideal.comm_semiring, .. coe_ideal_injective.nontrivial }
/-- Fractional ideals have cancellative multiplication in a Dedekind domain.
Although this instance is a direct consequence of the instance
`fractional_ideal.comm_group_with_zero`, we define this instance to provide
a computable alternative.
-/
instance fractional_ideal.cancel_comm_monoid_with_zero :
cancel_comm_monoid_with_zero (fractional_ideal A⁰ K) :=
{ .. fractional_ideal.comm_semiring, -- Project out the computable fields first.
.. (by apply_instance : cancel_comm_monoid_with_zero (fractional_ideal A⁰ K)) }
instance ideal.cancel_comm_monoid_with_zero :
cancel_comm_monoid_with_zero (ideal A) :=
{ .. ideal.idem_comm_semiring,
.. function.injective.cancel_comm_monoid_with_zero (coe_ideal_hom A⁰ (fraction_ring A))
coe_ideal_injective (ring_hom.map_zero _) (ring_hom.map_one _) (ring_hom.map_mul _)
(ring_hom.map_pow _) }
instance ideal.is_domain :
is_domain (ideal A) :=
{ .. (infer_instance : is_cancel_mul_zero _), .. ideal.nontrivial }
/-- For ideals in a Dedekind domain, to divide is to contain. -/
lemma ideal.dvd_iff_le {I J : ideal A} : (I ∣ J) ↔ J ≤ I :=
⟨ideal.le_of_dvd,
λ h, begin
by_cases hI : I = ⊥,
{ have hJ : J = ⊥, { rwa [hI, ← eq_bot_iff] at h },
rw [hI, hJ] },
have hI' : (I : fractional_ideal A⁰ (fraction_ring A)) ≠ 0 := coe_ideal_ne_zero.mpr hI,
have : (I : fractional_ideal A⁰ (fraction_ring A))⁻¹ * J ≤ 1 := le_trans
(mul_left_mono (↑I)⁻¹ ((coe_ideal_le_coe_ideal _).mpr h))
(le_of_eq (inv_mul_cancel hI')),
obtain ⟨H, hH⟩ := le_one_iff_exists_coe_ideal.mp this,
use H,
refine coe_ideal_injective
(show (J : fractional_ideal A⁰ (fraction_ring A)) = ↑(I * H), from _),
rw [coe_ideal_mul, hH, ← mul_assoc, mul_inv_cancel hI', one_mul]
end⟩
lemma ideal.dvd_not_unit_iff_lt {I J : ideal A} :
dvd_not_unit I J ↔ J < I :=
⟨λ ⟨hI, H, hunit, hmul⟩, lt_of_le_of_ne (ideal.dvd_iff_le.mp ⟨H, hmul⟩)
(mt (λ h, have H = 1, from mul_left_cancel₀ hI (by rw [← hmul, h, mul_one]),
show is_unit H, from this.symm ▸ is_unit_one) hunit),
λ h, dvd_not_unit_of_dvd_of_not_dvd (ideal.dvd_iff_le.mpr (le_of_lt h))
(mt ideal.dvd_iff_le.mp (not_le_of_lt h))⟩
instance : wf_dvd_monoid (ideal A) :=
{ well_founded_dvd_not_unit :=
have well_founded ((>) : ideal A → ideal A → Prop) :=
is_noetherian_iff_well_founded.mp
(is_noetherian_ring_iff.mp is_dedekind_domain.is_noetherian_ring),
by { convert this, ext, rw ideal.dvd_not_unit_iff_lt } }
instance ideal.unique_factorization_monoid :
unique_factorization_monoid (ideal A) :=
{ irreducible_iff_prime := λ P,
⟨λ hirr, ⟨hirr.ne_zero, hirr.not_unit, λ I J, begin
have : P.is_maximal,
{ refine ⟨⟨mt ideal.is_unit_iff.mpr hirr.not_unit, _⟩⟩,
intros J hJ,
obtain ⟨J_ne, H, hunit, P_eq⟩ := ideal.dvd_not_unit_iff_lt.mpr hJ,
exact ideal.is_unit_iff.mp ((hirr.is_unit_or_is_unit P_eq).resolve_right hunit) },
rw [ideal.dvd_iff_le, ideal.dvd_iff_le, ideal.dvd_iff_le,
set_like.le_def, set_like.le_def, set_like.le_def],
contrapose!,
rintros ⟨⟨x, x_mem, x_not_mem⟩, ⟨y, y_mem, y_not_mem⟩⟩,
exact ⟨x * y, ideal.mul_mem_mul x_mem y_mem,
mt this.is_prime.mem_or_mem (not_or x_not_mem y_not_mem)⟩,
end⟩,
prime.irreducible⟩,
.. ideal.wf_dvd_monoid }
instance ideal.normalization_monoid : normalization_monoid (ideal A) :=
normalization_monoid_of_unique_units
@[simp] lemma ideal.dvd_span_singleton {I : ideal A} {x : A} :
I ∣ ideal.span {x} ↔ x ∈ I :=
ideal.dvd_iff_le.trans (ideal.span_le.trans set.singleton_subset_iff)
lemma ideal.is_prime_of_prime {P : ideal A} (h : prime P) : is_prime P :=
begin
refine ⟨_, λ x y hxy, _⟩,
{ unfreezingI { rintro rfl },
rw ← ideal.one_eq_top at h,
exact h.not_unit is_unit_one },
{ simp only [← ideal.dvd_span_singleton, ← ideal.span_singleton_mul_span_singleton] at ⊢ hxy,
exact h.dvd_or_dvd hxy }
end
theorem ideal.prime_of_is_prime {P : ideal A} (hP : P ≠ ⊥) (h : is_prime P) : prime P :=
begin
refine ⟨hP, mt ideal.is_unit_iff.mp h.ne_top, λ I J hIJ, _⟩,
simpa only [ideal.dvd_iff_le] using (h.mul_le.mp (ideal.le_of_dvd hIJ)),
end
/-- In a Dedekind domain, the (nonzero) prime elements of the monoid with zero `ideal A`
are exactly the prime ideals. -/
theorem ideal.prime_iff_is_prime {P : ideal A} (hP : P ≠ ⊥) :
prime P ↔ is_prime P :=
⟨ideal.is_prime_of_prime, ideal.prime_of_is_prime hP⟩
/-- In a Dedekind domain, the the prime ideals are the zero ideal together with the prime elements
of the monoid with zero `ideal A`. -/
theorem ideal.is_prime_iff_bot_or_prime {P : ideal A} :
is_prime P ↔ P = ⊥ ∨ prime P :=
⟨λ hp, (eq_or_ne P ⊥).imp_right $ λ hp0, (ideal.prime_of_is_prime hp0 hp),
λ hp, hp.elim (λ h, h.symm ▸ ideal.bot_prime) ideal.is_prime_of_prime⟩
lemma ideal.strict_anti_pow (I : ideal A) (hI0 : I ≠ ⊥) (hI1 : I ≠ ⊤) :
strict_anti ((^) I : ℕ → ideal A) :=
strict_anti_nat_of_succ_lt $ λ e, ideal.dvd_not_unit_iff_lt.mp
⟨pow_ne_zero _ hI0, I, mt is_unit_iff.mp hI1, pow_succ' I e⟩
lemma ideal.pow_lt_self (I : ideal A) (hI0 : I ≠ ⊥) (hI1 : I ≠ ⊤) (e : ℕ) (he : 2 ≤ e) : I^e < I :=
by convert I.strict_anti_pow hI0 hI1 he; rw pow_one
lemma ideal.exists_mem_pow_not_mem_pow_succ (I : ideal A) (hI0 : I ≠ ⊥) (hI1 : I ≠ ⊤) (e : ℕ) :
∃ x ∈ I^e, x ∉ I^(e+1) :=
set_like.exists_of_lt (I.strict_anti_pow hI0 hI1 e.lt_succ_self)
open unique_factorization_monoid
lemma ideal.eq_prime_pow_of_succ_lt_of_le {P I : ideal A} [P_prime : P.is_prime] (hP : P ≠ ⊥)
{i : ℕ} (hlt : P ^ (i + 1) < I) (hle : I ≤ P ^ i) :
I = P ^ i :=
begin
letI := classical.dec_eq (ideal A),
refine le_antisymm hle _,
have P_prime' := ideal.prime_of_is_prime hP P_prime,
have : I ≠ ⊥ := (lt_of_le_of_lt bot_le hlt).ne',
have := pow_ne_zero i hP,
have := pow_ne_zero (i + 1) hP,
rw [← ideal.dvd_not_unit_iff_lt, dvd_not_unit_iff_normalized_factors_lt_normalized_factors,
normalized_factors_pow, normalized_factors_irreducible P_prime'.irreducible,
multiset.nsmul_singleton, multiset.lt_replicate_succ]
at hlt,
rw [← ideal.dvd_iff_le, dvd_iff_normalized_factors_le_normalized_factors, normalized_factors_pow,
normalized_factors_irreducible P_prime'.irreducible, multiset.nsmul_singleton],
all_goals { assumption }
end
lemma ideal.pow_succ_lt_pow {P : ideal A} [P_prime : P.is_prime] (hP : P ≠ ⊥)
(i : ℕ) :
P ^ (i + 1) < P ^ i :=
lt_of_le_of_ne (ideal.pow_le_pow (nat.le_succ _))
(mt (pow_eq_pow_iff hP (mt ideal.is_unit_iff.mp P_prime.ne_top)).mp i.succ_ne_self)
lemma associates.le_singleton_iff (x : A) (n : ℕ) (I : ideal A) :
associates.mk I^n ≤ associates.mk (ideal.span {x}) ↔ x ∈ I^n :=
begin
rw [← associates.dvd_eq_le, ← associates.mk_pow, associates.mk_dvd_mk, ideal.dvd_span_singleton],
end
open fractional_ideal
variables {A K}
/-- Strengthening of `is_localization.exist_integer_multiples`:
Let `J ≠ ⊤` be an ideal in a Dedekind domain `A`, and `f ≠ 0` a finite collection
of elements of `K = Frac(A)`, then we can multiply the elements of `f` by some `a : K`
to find a collection of elements of `A` that is not completely contained in `J`. -/
lemma ideal.exist_integer_multiples_not_mem
{J : ideal A} (hJ : J ≠ ⊤) {ι : Type*} (s : finset ι) (f : ι → K)
{j} (hjs : j ∈ s) (hjf : f j ≠ 0) :
∃ a : K, (∀ i ∈ s, is_localization.is_integer A (a * f i)) ∧
∃ i ∈ s, (a * f i) ∉ (J : fractional_ideal A⁰ K) :=
begin
-- Consider the fractional ideal `I` spanned by the `f`s.
let I : fractional_ideal A⁰ K := span_finset A s f,
have hI0 : I ≠ 0 := span_finset_ne_zero.mpr ⟨j, hjs, hjf⟩,
-- We claim the multiplier `a` we're looking for is in `I⁻¹ \ (J / I)`.
suffices : ↑J / I < I⁻¹,
{ obtain ⟨_, a, hI, hpI⟩ := set_like.lt_iff_le_and_exists.mp this,
rw mem_inv_iff hI0 at hI,
refine ⟨a, λ i hi, _, _⟩,
-- By definition, `a ∈ I⁻¹` multiplies elements of `I` into elements of `1`,
-- in other words, `a * f i` is an integer.
{ exact (mem_one_iff _).mp (hI (f i)
(submodule.subset_span (set.mem_image_of_mem f hi))) },
{ contrapose! hpI,
-- And if all `a`-multiples of `I` are an element of `J`,
-- then `a` is actually an element of `J / I`, contradiction.
refine (mem_div_iff_of_nonzero hI0).mpr (λ y hy, submodule.span_induction hy _ _ _ _),
{ rintros _ ⟨i, hi, rfl⟩, exact hpI i hi },
{ rw mul_zero, exact submodule.zero_mem _ },
{ intros x y hx hy, rw mul_add, exact submodule.add_mem _ hx hy },
{ intros b x hx, rw mul_smul_comm, exact submodule.smul_mem _ b hx } } },
-- To show the inclusion of `J / I` into `I⁻¹ = 1 / I`, note that `J < I`.
calc ↑J / I = ↑J * I⁻¹ : div_eq_mul_inv ↑J I
... < 1 * I⁻¹ : mul_right_strict_mono (inv_ne_zero hI0) _
... = I⁻¹ : one_mul _,
{ rw [← coe_ideal_top],
-- And multiplying by `I⁻¹` is indeed strictly monotone.
exact strict_mono_of_le_iff_le (λ _ _, (coe_ideal_le_coe_ideal K).symm)
(lt_top_iff_ne_top.mpr hJ) },
end
section gcd
namespace ideal
/-! ### GCD and LCM of ideals in a Dedekind domain
We show that the gcd of two ideals in a Dedekind domain is just their supremum,
and the lcm is their infimum, and use this to instantiate `normalized_gcd_monoid (ideal A)`.
-/
@[simp] lemma sup_mul_inf (I J : ideal A) : (I ⊔ J) * (I ⊓ J) = I * J :=
begin
letI := classical.dec_eq (ideal A),
letI := classical.dec_eq (associates (ideal A)),
letI := unique_factorization_monoid.to_normalized_gcd_monoid (ideal A),
have hgcd : gcd I J = I ⊔ J,
{ rw [gcd_eq_normalize _ _, normalize_eq],
{ rw [dvd_iff_le, sup_le_iff, ← dvd_iff_le, ← dvd_iff_le],
exact ⟨gcd_dvd_left _ _, gcd_dvd_right _ _⟩ },
{ rw [dvd_gcd_iff, dvd_iff_le, dvd_iff_le],
simp } },
have hlcm : lcm I J = I ⊓ J,
{ rw [lcm_eq_normalize _ _, normalize_eq],
{ rw [lcm_dvd_iff, dvd_iff_le, dvd_iff_le],
simp },
{ rw [dvd_iff_le, le_inf_iff, ← dvd_iff_le, ← dvd_iff_le],
exact ⟨dvd_lcm_left _ _, dvd_lcm_right _ _⟩ } },
rw [← hgcd, ← hlcm, associated_iff_eq.mp (gcd_mul_lcm _ _)],
apply_instance
end
/-- Ideals in a Dedekind domain have gcd and lcm operators that (trivially) are compatible with
the normalization operator. -/
instance : normalized_gcd_monoid (ideal A) :=
{ gcd := (⊔),
gcd_dvd_left := λ _ _, by simpa only [dvd_iff_le] using le_sup_left,
gcd_dvd_right := λ _ _, by simpa only [dvd_iff_le] using le_sup_right,
dvd_gcd := λ _ _ _, by simpa only [dvd_iff_le] using sup_le,
lcm := (⊓),
lcm_zero_left := λ _, by simp only [zero_eq_bot, bot_inf_eq],
lcm_zero_right := λ _, by simp only [zero_eq_bot, inf_bot_eq],
gcd_mul_lcm := λ _ _, by rw [associated_iff_eq, sup_mul_inf],
normalize_gcd := λ _ _, normalize_eq _,
normalize_lcm := λ _ _, normalize_eq _,
.. ideal.normalization_monoid }
-- In fact, any lawful gcd and lcm would equal sup and inf respectively.
@[simp] lemma gcd_eq_sup (I J : ideal A) : gcd I J = I ⊔ J := rfl
@[simp]
lemma lcm_eq_inf (I J : ideal A) : lcm I J = I ⊓ J := rfl
lemma inf_eq_mul_of_coprime {I J : ideal A} (coprime : I ⊔ J = ⊤) :
I ⊓ J = I * J :=
by rw [← associated_iff_eq.mp (gcd_mul_lcm I J), lcm_eq_inf I J, gcd_eq_sup, coprime, top_mul]
end ideal
end gcd
end is_dedekind_domain
section is_dedekind_domain
variables {T : Type*} [comm_ring T] [is_domain T] [is_dedekind_domain T] {I J : ideal T}
open_locale classical
open multiset unique_factorization_monoid ideal
lemma prod_normalized_factors_eq_self (hI : I ≠ ⊥) : (normalized_factors I).prod = I :=
associated_iff_eq.1 (normalized_factors_prod hI)
lemma count_le_of_ideal_ge {I J : ideal T} (h : I ≤ J) (hI : I ≠ ⊥) (K : ideal T) :
count K (normalized_factors J) ≤ count K (normalized_factors I) :=
le_iff_count.1 ((dvd_iff_normalized_factors_le_normalized_factors (ne_bot_of_le_ne_bot hI h) hI).1
(dvd_iff_le.2 h)) _
lemma sup_eq_prod_inf_factors (hI : I ≠ ⊥) (hJ : J ≠ ⊥) :
I ⊔ J = (normalized_factors I ∩ normalized_factors J).prod :=
begin
have H : normalized_factors (normalized_factors I ∩ normalized_factors J).prod =
normalized_factors I ∩ normalized_factors J,
{ apply normalized_factors_prod_of_prime,
intros p hp,
rw mem_inter at hp,
exact prime_of_normalized_factor p hp.left },
have := (multiset.prod_ne_zero_of_prime (normalized_factors I ∩ normalized_factors J)
(λ _ h, prime_of_normalized_factor _ (multiset.mem_inter.1 h).1)),
apply le_antisymm,
{ rw [sup_le_iff, ← dvd_iff_le, ← dvd_iff_le],
split,
{ rw [dvd_iff_normalized_factors_le_normalized_factors this hI, H],
exact inf_le_left },
{ rw [dvd_iff_normalized_factors_le_normalized_factors this hJ, H],
exact inf_le_right } },
{ rw [← dvd_iff_le, dvd_iff_normalized_factors_le_normalized_factors,
normalized_factors_prod_of_prime, le_iff_count],
{ intro a,
rw multiset.count_inter,
exact le_min (count_le_of_ideal_ge le_sup_left hI a)
(count_le_of_ideal_ge le_sup_right hJ a) },
{ intros p hp,
rw mem_inter at hp,
exact prime_of_normalized_factor p hp.left },
{ exact ne_bot_of_le_ne_bot hI le_sup_left },
{ exact this } },
end
lemma irreducible_pow_sup (hI : I ≠ ⊥) (hJ : irreducible J) (n : ℕ) :
J^n ⊔ I = J^(min ((normalized_factors I).count J) n) :=
by rw [sup_eq_prod_inf_factors (pow_ne_zero n hJ.ne_zero) hI, min_comm,
normalized_factors_of_irreducible_pow hJ, normalize_eq J, replicate_inter, prod_replicate]
lemma irreducible_pow_sup_of_le (hJ : irreducible J) (n : ℕ)
(hn : ↑n ≤ multiplicity J I) : J^n ⊔ I = J^n :=
begin
by_cases hI : I = ⊥,
{ simp [*] at *, },
rw [irreducible_pow_sup hI hJ, min_eq_right],
rwa [multiplicity_eq_count_normalized_factors hJ hI, part_enat.coe_le_coe, normalize_eq J] at hn
end
lemma irreducible_pow_sup_of_ge (hI : I ≠ ⊥) (hJ : irreducible J) (n : ℕ)
(hn : multiplicity J I ≤ n) : J^n ⊔ I = J ^ (multiplicity J I).get (part_enat.dom_of_le_coe hn) :=
begin
rw [irreducible_pow_sup hI hJ, min_eq_left],
congr,
{ rw [← part_enat.coe_inj, part_enat.coe_get, multiplicity_eq_count_normalized_factors hJ hI,
normalize_eq J] },
{ rwa [multiplicity_eq_count_normalized_factors hJ hI, part_enat.coe_le_coe, normalize_eq J]
at hn }
end
end is_dedekind_domain
/-!
### Height one spectrum of a Dedekind domain
If `R` is a Dedekind domain of Krull dimension 1, the maximal ideals of `R` are exactly its nonzero
prime ideals.
We define `height_one_spectrum` and provide lemmas to recover the facts that prime ideals of height
one are prime and irreducible.
-/
namespace is_dedekind_domain
variables [is_domain R] [is_dedekind_domain R]
/-- The height one prime spectrum of a Dedekind domain `R` is the type of nonzero prime ideals of
`R`. Note that this equals the maximal spectrum if `R` has Krull dimension 1. -/
@[ext, nolint has_nonempty_instance unused_arguments]
structure height_one_spectrum :=
(as_ideal : ideal R)
(is_prime : as_ideal.is_prime)
(ne_bot : as_ideal ≠ ⊥)
attribute [instance] height_one_spectrum.is_prime
variables (v : height_one_spectrum R) {R}
namespace height_one_spectrum
instance is_maximal : v.as_ideal.is_maximal := dimension_le_one v.as_ideal v.ne_bot v.is_prime
lemma prime : prime v.as_ideal := ideal.prime_of_is_prime v.ne_bot v.is_prime
lemma irreducible : irreducible v.as_ideal :=
unique_factorization_monoid.irreducible_iff_prime.mpr v.prime
lemma associates_irreducible : _root_.irreducible $ associates.mk v.as_ideal :=
(associates.irreducible_mk _).mpr v.irreducible
/-- An equivalence between the height one and maximal spectra for rings of Krull dimension 1. -/
def equiv_maximal_spectrum (hR : ¬is_field R) : height_one_spectrum R ≃ maximal_spectrum R :=
{ to_fun := λ v, ⟨v.as_ideal, dimension_le_one v.as_ideal v.ne_bot v.is_prime⟩,
inv_fun := λ v,
⟨v.as_ideal, v.is_maximal.is_prime, ring.ne_bot_of_is_maximal_of_not_is_field v.is_maximal hR⟩,
left_inv := λ ⟨_, _, _⟩, rfl,
right_inv := λ ⟨_, _⟩, rfl }
variables (R K)
/-- A Dedekind domain is equal to the intersection of its localizations at all its height one
non-zero prime ideals viewed as subalgebras of its field of fractions. -/
theorem infi_localization_eq_bot [algebra R K] [hK : is_fraction_ring R K] :
(⨅ v : height_one_spectrum R,
localization.subalgebra.of_field K _ v.as_ideal.prime_compl_le_non_zero_divisors) = ⊥ :=
begin
ext x,
rw [algebra.mem_infi],
split,
by_cases hR : is_field R,
{ rcases function.bijective_iff_has_inverse.mp
(is_field.localization_map_bijective (flip non_zero_divisors.ne_zero rfl : 0 ∉ R⁰) hR)
with ⟨algebra_map_inv, _, algebra_map_right_inv⟩,
exact λ _, algebra.mem_bot.mpr ⟨algebra_map_inv x, algebra_map_right_inv x⟩,
exact hK },
all_goals { rw [← maximal_spectrum.infi_localization_eq_bot, algebra.mem_infi] },
{ exact λ hx ⟨v, hv⟩, hx ((equiv_maximal_spectrum hR).symm ⟨v, hv⟩) },
{ exact λ hx ⟨v, hv, hbot⟩, hx ⟨v, dimension_le_one v hbot hv⟩ }
end
end height_one_spectrum
end is_dedekind_domain
section
open ideal
variables {R} {A} [is_dedekind_domain A] {I : ideal R} {J : ideal A}
/-- The map from ideals of `R` dividing `I` to the ideals of `A` dividing `J` induced by
a homomorphism `f : R/I →+* A/J` -/
@[simps]
def ideal_factors_fun_of_quot_hom {f : R ⧸ I →+* A ⧸ J} (hf : function.surjective f ) :
{p : ideal R | p ∣ I} →o {p : ideal A | p ∣ J} :=
{ to_fun := λ X, ⟨comap J^.quotient.mk (map f (map I^.quotient.mk X)),
begin
have : (J^.quotient.mk).ker ≤ comap J^.quotient.mk (map f (map I^.quotient.mk X)),
{ exact ker_le_comap J^.quotient.mk },
rw mk_ker at this,
exact dvd_iff_le.mpr this,
end ⟩,
monotone' :=
begin
rintros ⟨X, hX⟩ ⟨Y, hY⟩ h,
rw [← subtype.coe_le_coe, subtype.coe_mk, subtype.coe_mk] at h ⊢,
rw [subtype.coe_mk, comap_le_comap_iff_of_surjective J^.quotient.mk quotient.mk_surjective,
map_le_iff_le_comap, subtype.coe_mk, comap_map_of_surjective _ hf (map I^.quotient.mk Y)],
suffices : map I^.quotient.mk X ≤ map I^.quotient.mk Y,
{ exact le_sup_of_le_left this },
rwa [map_le_iff_le_comap, comap_map_of_surjective I^.quotient.mk quotient.mk_surjective,
← ring_hom.ker_eq_comap_bot, mk_ker, sup_eq_left.mpr $ le_of_dvd hY],
end }
@[simp]
lemma ideal_factors_fun_of_quot_hom_id :
ideal_factors_fun_of_quot_hom (ring_hom.id (A ⧸ J)).is_surjective = order_hom.id :=
order_hom.ext _ _ (funext $ λ X, by simp only [ideal_factors_fun_of_quot_hom, map_id,
order_hom.coe_fun_mk, order_hom.id_coe, id.def, comap_map_of_surjective J^.quotient.mk
quotient.mk_surjective, ← ring_hom.ker_eq_comap_bot J^.quotient.mk, mk_ker, sup_eq_left.mpr
(dvd_iff_le.mp X.prop), subtype.coe_eta] )
variables {B : Type*} [comm_ring B] [is_domain B] [is_dedekind_domain B] {L : ideal B}
lemma ideal_factors_fun_of_quot_hom_comp {f : R ⧸ I →+* A ⧸ J} {g : A ⧸ J →+* B ⧸ L}
(hf : function.surjective f) (hg : function.surjective g) :
(ideal_factors_fun_of_quot_hom hg).comp (ideal_factors_fun_of_quot_hom hf)
= ideal_factors_fun_of_quot_hom (show function.surjective (g.comp f), from hg.comp hf) :=
begin
refine order_hom.ext _ _ (funext $ λ x, _),
rw [ideal_factors_fun_of_quot_hom, ideal_factors_fun_of_quot_hom, order_hom.comp_coe,
order_hom.coe_fun_mk, order_hom.coe_fun_mk, function.comp_app,
ideal_factors_fun_of_quot_hom, order_hom.coe_fun_mk, subtype.mk_eq_mk, subtype.coe_mk,
map_comap_of_surjective J^.quotient.mk quotient.mk_surjective, map_map],
end
variables [is_domain R] [is_dedekind_domain R] (f : R ⧸ I ≃+* A ⧸ J)
/-- The bijection between ideals of `R` dividing `I` and the ideals of `A` dividing `J` induced by
an isomorphism `f : R/I ≅ A/J`. -/
@[simps]
def ideal_factors_equiv_of_quot_equiv : {p : ideal R | p ∣ I} ≃o {p : ideal A | p ∣ J} :=
order_iso.of_hom_inv
(ideal_factors_fun_of_quot_hom (show function.surjective
(f : R ⧸I →+* A ⧸ J), from f.surjective))
(ideal_factors_fun_of_quot_hom (show function.surjective
(f.symm : A ⧸J →+* R ⧸ I), from f.symm.surjective))
(by simp only [← ideal_factors_fun_of_quot_hom_id, order_hom.coe_eq, order_hom.coe_eq,
ideal_factors_fun_of_quot_hom_comp, ← ring_equiv.to_ring_hom_eq_coe,
← ring_equiv.to_ring_hom_eq_coe, ← ring_equiv.to_ring_hom_trans, ring_equiv.symm_trans_self,
ring_equiv.to_ring_hom_refl])
(by simp only [← ideal_factors_fun_of_quot_hom_id, order_hom.coe_eq, order_hom.coe_eq,
ideal_factors_fun_of_quot_hom_comp, ← ring_equiv.to_ring_hom_eq_coe,
← ring_equiv.to_ring_hom_eq_coe, ← ring_equiv.to_ring_hom_trans, ring_equiv.self_trans_symm,
ring_equiv.to_ring_hom_refl])
lemma ideal_factors_equiv_of_quot_equiv_symm :
(ideal_factors_equiv_of_quot_equiv f).symm = ideal_factors_equiv_of_quot_equiv f.symm := rfl
lemma ideal_factors_equiv_of_quot_equiv_is_dvd_iso {L M : ideal R} (hL : L ∣ I) (hM : M ∣ I) :
(ideal_factors_equiv_of_quot_equiv f ⟨L, hL⟩ : ideal A) ∣
ideal_factors_equiv_of_quot_equiv f ⟨M, hM⟩ ↔ L ∣ M :=
begin
suffices : ideal_factors_equiv_of_quot_equiv f ⟨M, hM⟩ ≤
ideal_factors_equiv_of_quot_equiv f ⟨L, hL⟩ ↔ (⟨M, hM⟩ : {p : ideal R | p ∣ I}) ≤ ⟨L, hL⟩,
{ rw [dvd_iff_le, dvd_iff_le, subtype.coe_le_coe, this, subtype.mk_le_mk] },
exact (ideal_factors_equiv_of_quot_equiv f).le_iff_le,
end
open unique_factorization_monoid
variables [decidable_eq (ideal R)] [decidable_eq (ideal A)]
lemma ideal_factors_equiv_of_quot_equiv_mem_normalized_factors_of_mem_normalized_factors
(hJ : J ≠ ⊥) {L : ideal R} (hL : L ∈ normalized_factors I) :
↑(ideal_factors_equiv_of_quot_equiv f
⟨L, dvd_of_mem_normalized_factors hL⟩) ∈ normalized_factors J :=
begin
by_cases hI : I = ⊥,
{ exfalso,
rw [hI, bot_eq_zero, normalized_factors_zero, ← multiset.empty_eq_zero] at hL,
exact hL, },
{ apply mem_normalized_factors_factor_dvd_iso_of_mem_normalized_factors hI hJ hL _,
rintros ⟨l, hl⟩ ⟨l', hl'⟩,
rw [subtype.coe_mk, subtype.coe_mk],
apply ideal_factors_equiv_of_quot_equiv_is_dvd_iso f }
end
/-- The bijection between the sets of normalized factors of I and J induced by a ring
isomorphism `f : R/I ≅ A/J`. -/
@[simps apply]
def normalized_factors_equiv_of_quot_equiv (hI : I ≠ ⊥) (hJ : J ≠ ⊥) :
{L : ideal R | L ∈ normalized_factors I } ≃ {M : ideal A | M ∈ normalized_factors J } :=
{ to_fun := λ j, ⟨ideal_factors_equiv_of_quot_equiv f ⟨↑j, dvd_of_mem_normalized_factors j.prop⟩,
ideal_factors_equiv_of_quot_equiv_mem_normalized_factors_of_mem_normalized_factors f hJ j.prop⟩,
inv_fun := λ j, ⟨(ideal_factors_equiv_of_quot_equiv f).symm
⟨↑j, dvd_of_mem_normalized_factors j.prop⟩, by { rw ideal_factors_equiv_of_quot_equiv_symm,
exact ideal_factors_equiv_of_quot_equiv_mem_normalized_factors_of_mem_normalized_factors
f.symm hI j.prop} ⟩,
left_inv := λ ⟨j, hj⟩, by simp,
right_inv := λ ⟨j, hj⟩, by simp }
@[simp]
lemma normalized_factors_equiv_of_quot_equiv_symm (hI : I ≠ ⊥) (hJ : J ≠ ⊥) :
(normalized_factors_equiv_of_quot_equiv f hI hJ).symm =
normalized_factors_equiv_of_quot_equiv f.symm hJ hI :=
rfl
variable [decidable_rel ((∣) : ideal R → ideal R → Prop)]
variable [decidable_rel ((∣) : ideal A → ideal A → Prop)]
/-- The map `normalized_factors_equiv_of_quot_equiv` preserves multiplicities. -/
lemma normalized_factors_equiv_of_quot_equiv_multiplicity_eq_multiplicity (hI : I ≠ ⊥) (hJ : J ≠ ⊥)
(L : ideal R) (hL : L ∈ normalized_factors I) :
multiplicity ↑(normalized_factors_equiv_of_quot_equiv f hI hJ ⟨L, hL⟩) J = multiplicity L I :=
begin
rw [normalized_factors_equiv_of_quot_equiv, equiv.coe_fn_mk, subtype.coe_mk],
exact multiplicity_factor_dvd_iso_eq_multiplicity_of_mem_normalized_factor hI hJ hL
(λ ⟨l, hl⟩ ⟨l', hl'⟩, ideal_factors_equiv_of_quot_equiv_is_dvd_iso f hl hl'),
end
end
section chinese_remainder
open ideal unique_factorization_monoid
open_locale big_operators
variables {R}
lemma ring.dimension_le_one.prime_le_prime_iff_eq (h : ring.dimension_le_one R)
{P Q : ideal R} [hP : P.is_prime] [hQ : Q.is_prime] (hP0 : P ≠ ⊥) :
P ≤ Q ↔ P = Q :=
⟨(h P hP0 hP).eq_of_le hQ.ne_top, eq.le⟩
lemma ideal.coprime_of_no_prime_ge {I J : ideal R} (h : ∀ P, I ≤ P → J ≤ P → ¬ is_prime P) :
I ⊔ J = ⊤ :=
begin
by_contra hIJ,
obtain ⟨P, hP, hIJ⟩ := ideal.exists_le_maximal _ hIJ,
exact h P (le_trans le_sup_left hIJ) (le_trans le_sup_right hIJ) hP.is_prime
end
section dedekind_domain
variables {R} [is_domain R] [is_dedekind_domain R]
lemma ideal.is_prime.mul_mem_pow (I : ideal R) [hI : I.is_prime] {a b : R} {n : ℕ}
(h : a * b ∈ I^n) : a ∈ I ∨ b ∈ I^n :=
begin
cases n, { simp },
by_cases hI0 : I = ⊥, { simpa [pow_succ, hI0] using h },
simp only [← submodule.span_singleton_le_iff_mem, ideal.submodule_span_eq, ← ideal.dvd_iff_le,
← ideal.span_singleton_mul_span_singleton] at h ⊢,
by_cases ha : I ∣ span {a},
{ exact or.inl ha },
rw mul_comm at h,
exact or.inr (prime.pow_dvd_of_dvd_mul_right ((ideal.prime_iff_is_prime hI0).mpr hI) _ ha h),
end
section
open_locale classical
lemma ideal.count_normalized_factors_eq {p x : ideal R} [hp : p.is_prime] {n : ℕ}
(hle : x ≤ p^n) (hlt : ¬ (x ≤ p^(n+1))) :
(normalized_factors x).count p = n :=
count_normalized_factors_eq'
((ideal.is_prime_iff_bot_or_prime.mp hp).imp_right prime.irreducible)
(by { haveI : unique (ideal R)ˣ := ideal.unique_units, apply normalize_eq })
(by convert ideal.dvd_iff_le.mpr hle) (by convert mt ideal.le_of_dvd hlt)
/- Warning: even though a pure term-mode proof typechecks (the `by convert` can simply be
removed), it's slower to the point of a possible timeout. -/
end
lemma ideal.le_mul_of_no_prime_factors
{I J K : ideal R} (coprime : ∀ P, J ≤ P → K ≤ P → ¬ is_prime P) (hJ : I ≤ J) (hK : I ≤ K) :
I ≤ J * K :=
begin
simp only [← ideal.dvd_iff_le] at coprime hJ hK ⊢,
by_cases hJ0 : J = 0,
{ simpa only [hJ0, zero_mul] using hJ },
obtain ⟨I', rfl⟩ := hK,
rw mul_comm,
exact mul_dvd_mul_left K
(unique_factorization_monoid.dvd_of_dvd_mul_right_of_no_prime_factors hJ0
(λ P hPJ hPK, mt ideal.is_prime_of_prime (coprime P hPJ hPK))
hJ)
end
lemma ideal.le_of_pow_le_prime {I P : ideal R} [hP : P.is_prime] {n : ℕ} (h : I^n ≤ P) : I ≤ P :=
begin
by_cases hP0 : P = ⊥,
{ simp only [hP0, le_bot_iff] at ⊢ h,
exact pow_eq_zero h },
rw ← ideal.dvd_iff_le at ⊢ h,
exact ((ideal.prime_iff_is_prime hP0).mpr hP).dvd_of_dvd_pow h
end
lemma ideal.pow_le_prime_iff {I P : ideal R} [hP : P.is_prime] {n : ℕ} (hn : n ≠ 0) :
I^n ≤ P ↔ I ≤ P :=
⟨ideal.le_of_pow_le_prime, λ h, trans (ideal.pow_le_self hn) h⟩
lemma ideal.prod_le_prime {ι : Type*} {s : finset ι} {f : ι → ideal R} {P : ideal R}
[hP : P.is_prime] :
∏ i in s, f i ≤ P ↔ ∃ i ∈ s, f i ≤ P :=
begin
by_cases hP0 : P = ⊥,
{ simp only [hP0, le_bot_iff],
rw [← ideal.zero_eq_bot, finset.prod_eq_zero_iff] },
simp only [← ideal.dvd_iff_le],
exact ((ideal.prime_iff_is_prime hP0).mpr hP).dvd_finset_prod_iff _
end
/-- The intersection of distinct prime powers in a Dedekind domain is the product of these
prime powers. -/
lemma is_dedekind_domain.inf_prime_pow_eq_prod {ι : Type*}
(s : finset ι) (f : ι → ideal R) (e : ι → ℕ)
(prime : ∀ i ∈ s, prime (f i)) (coprime : ∀ i j ∈ s, i ≠ j → f i ≠ f j) :
s.inf (λ i, f i ^ e i) = ∏ i in s, f i ^ e i :=
begin
letI := classical.dec_eq ι,
revert prime coprime,
refine s.induction _ _,
{ simp },
intros a s ha ih prime coprime,
specialize ih (λ i hi, prime i (finset.mem_insert_of_mem hi))
(λ i hi j hj, coprime i (finset.mem_insert_of_mem hi) j (finset.mem_insert_of_mem hj)),
rw [finset.inf_insert, finset.prod_insert ha, ih],
refine le_antisymm (ideal.le_mul_of_no_prime_factors _ inf_le_left inf_le_right) ideal.mul_le_inf,
intros P hPa hPs hPp,
haveI := hPp,
obtain ⟨b, hb, hPb⟩ := ideal.prod_le_prime.mp hPs,
haveI := ideal.is_prime_of_prime (prime a (finset.mem_insert_self a s)),
haveI := ideal.is_prime_of_prime (prime b (finset.mem_insert_of_mem hb)),
refine coprime a (finset.mem_insert_self a s) b (finset.mem_insert_of_mem hb) _
(((is_dedekind_domain.dimension_le_one.prime_le_prime_iff_eq _).mp
(ideal.le_of_pow_le_prime hPa)).trans
((is_dedekind_domain.dimension_le_one.prime_le_prime_iff_eq _).mp
(ideal.le_of_pow_le_prime hPb)).symm),
{ unfreezingI { rintro rfl }, contradiction },
{ exact (prime a (finset.mem_insert_self a s)).ne_zero },
{ exact (prime b (finset.mem_insert_of_mem hb)).ne_zero },
end
/-- **Chinese remainder theorem** for a Dedekind domain: if the ideal `I` factors as
`∏ i, P i ^ e i`, then `R ⧸ I` factors as `Π i, R ⧸ (P i ^ e i)`. -/
noncomputable def is_dedekind_domain.quotient_equiv_pi_of_prod_eq {ι : Type*} [fintype ι]
(I : ideal R) (P : ι → ideal R) (e : ι → ℕ)
(prime : ∀ i, prime (P i)) (coprime : ∀ i j, i ≠ j → P i ≠ P j) (prod_eq : (∏ i, P i ^ e i) = I) :
R ⧸ I ≃+* Π i, R ⧸ (P i ^ e i) :=
(ideal.quot_equiv_of_eq (by { simp only [← prod_eq, finset.inf_eq_infi, finset.mem_univ, cinfi_pos,
← is_dedekind_domain.inf_prime_pow_eq_prod _ _ _ (λ i _, prime i) (λ i _ j _, coprime i j)] }))
.trans $
ideal.quotient_inf_ring_equiv_pi_quotient _ (λ i j hij, ideal.coprime_of_no_prime_ge (begin
intros P hPi hPj hPp,
haveI := hPp,
haveI := ideal.is_prime_of_prime (prime i), haveI := ideal.is_prime_of_prime (prime j),
exact coprime i j hij
(((is_dedekind_domain.dimension_le_one.prime_le_prime_iff_eq (prime i).ne_zero).mp
(ideal.le_of_pow_le_prime hPi)).trans
((is_dedekind_domain.dimension_le_one.prime_le_prime_iff_eq (prime j).ne_zero).mp
(ideal.le_of_pow_le_prime hPj)).symm)
end))
open_locale classical
/-- **Chinese remainder theorem** for a Dedekind domain: `R ⧸ I` factors as `Π i, R ⧸ (P i ^ e i)`,
where `P i` ranges over the prime factors of `I` and `e i` over the multiplicities. -/
noncomputable def is_dedekind_domain.quotient_equiv_pi_factors {I : ideal R} (hI : I ≠ ⊥) :
R ⧸ I ≃+* Π (P : (factors I).to_finset), R ⧸ ((P : ideal R) ^ (factors I).count P) :=
is_dedekind_domain.quotient_equiv_pi_of_prod_eq _ _ _
(λ (P : (factors I).to_finset), prime_of_factor _ (multiset.mem_to_finset.mp P.prop))
(λ i j hij, subtype.coe_injective.ne hij)
(calc ∏ (P : (factors I).to_finset), (P : ideal R) ^ (factors I).count (P : ideal R)
= ∏ P in (factors I).to_finset, P ^ (factors I).count P
: (factors I).to_finset.prod_coe_sort (λ P, P ^ (factors I).count P)
... = ((factors I).map (λ P, P)).prod : (finset.prod_multiset_map_count (factors I) id).symm
... = (factors I).prod : by rw multiset.map_id'
... = I : (@associated_iff_eq (ideal R) _ ideal.unique_units _ _).mp (factors_prod hI))
@[simp] lemma is_dedekind_domain.quotient_equiv_pi_factors_mk {I : ideal R} (hI : I ≠ ⊥)
(x : R) : is_dedekind_domain.quotient_equiv_pi_factors hI (ideal.quotient.mk I x) =
λ P, ideal.quotient.mk _ x :=
rfl
/-- **Chinese remainder theorem**, specialized to two ideals. -/
noncomputable def ideal.quotient_mul_equiv_quotient_prod (I J : ideal R)
(coprime : I ⊔ J = ⊤) :
(R ⧸ (I * J)) ≃+* (R ⧸ I) × R ⧸ J :=
ring_equiv.trans
(ideal.quot_equiv_of_eq (inf_eq_mul_of_coprime coprime).symm)
(ideal.quotient_inf_equiv_quotient_prod I J coprime)
/-- **Chinese remainder theorem** for a Dedekind domain: if the ideal `I` factors as
`∏ i in s, P i ^ e i`, then `R ⧸ I` factors as `Π (i : s), R ⧸ (P i ^ e i)`.
This is a version of `is_dedekind_domain.quotient_equiv_pi_of_prod_eq` where we restrict
the product to a finite subset `s` of a potentially infinite indexing type `ι`.
-/
noncomputable def is_dedekind_domain.quotient_equiv_pi_of_finset_prod_eq {ι : Type*} {s : finset ι}
(I : ideal R) (P : ι → ideal R) (e : ι → ℕ)
(prime : ∀ i ∈ s, prime (P i)) (coprime : ∀ (i j ∈ s), i ≠ j → P i ≠ P j)
(prod_eq : (∏ i in s, P i ^ e i) = I) :
R ⧸ I ≃+* Π (i : s), R ⧸ (P i ^ e i) :=
is_dedekind_domain.quotient_equiv_pi_of_prod_eq I (λ (i : s), P i) (λ (i : s), e i)
(λ i, prime i i.2)
(λ i j h, coprime i i.2 j j.2 (subtype.coe_injective.ne h))
(trans (finset.prod_coe_sort s (λ i, P i ^ e i)) prod_eq)
/-- Corollary of the Chinese remainder theorem: given elements `x i : R / P i ^ e i`,
we can choose a representative `y : R` such that `y ≡ x i (mod P i ^ e i)`.-/
lemma is_dedekind_domain.exists_representative_mod_finset {ι : Type*} {s : finset ι}
(P : ι → ideal R) (e : ι → ℕ)
(prime : ∀ i ∈ s, prime (P i)) (coprime : ∀ (i j ∈ s), i ≠ j → P i ≠ P j)
(x : Π (i : s), R ⧸ (P i ^ e i)) :
∃ y, ∀ i (hi : i ∈ s), ideal.quotient.mk (P i ^ e i) y = x ⟨i, hi⟩ :=
begin
let f := is_dedekind_domain.quotient_equiv_pi_of_finset_prod_eq _ P e prime coprime rfl,
obtain ⟨y, rfl⟩ := f.surjective x,
obtain ⟨z, rfl⟩ := ideal.quotient.mk_surjective y,
exact ⟨z, λ i hi, rfl⟩
end
/-- Corollary of the Chinese remainder theorem: given elements `x i : R`,
we can choose a representative `y : R` such that `y - x i ∈ P i ^ e i`.-/
lemma is_dedekind_domain.exists_forall_sub_mem_ideal {ι : Type*} {s : finset ι}
(P : ι → ideal R) (e : ι → ℕ)
(prime : ∀ i ∈ s, prime (P i)) (coprime : ∀ (i j ∈ s), i ≠ j → P i ≠ P j)
(x : s → R) :
∃ y, ∀ i (hi : i ∈ s), y - x ⟨i, hi⟩ ∈ P i ^ e i :=
begin
obtain ⟨y, hy⟩ := is_dedekind_domain.exists_representative_mod_finset P e prime coprime
(λ i, ideal.quotient.mk _ (x i)),
exact ⟨y, λ i hi, ideal.quotient.eq.mp (hy i hi)⟩
end
end dedekind_domain
end chinese_remainder
section PID
open multiplicity unique_factorization_monoid ideal
variables {R} [is_domain R] [is_principal_ideal_ring R]
lemma span_singleton_dvd_span_singleton_iff_dvd {a b : R} :
(ideal.span {a}) ∣ (ideal.span ({b} : set R)) ↔ a ∣ b :=
⟨λ h, mem_span_singleton.mp (dvd_iff_le.mp h (mem_span_singleton.mpr (dvd_refl b))),
λ h, dvd_iff_le.mpr (λ d hd, mem_span_singleton.mpr (dvd_trans h (mem_span_singleton.mp hd)))⟩
lemma singleton_span_mem_normalized_factors_of_mem_normalized_factors [normalization_monoid R]
[decidable_eq R] [decidable_eq (ideal R)] {a b : R} (ha : a ∈ normalized_factors b) :
ideal.span ({a} : set R) ∈ normalized_factors (ideal.span ({b} : set R)) :=
begin
by_cases hb : b = 0,
{ rw [ideal.span_singleton_eq_bot.mpr hb, bot_eq_zero, normalized_factors_zero],
rw [hb, normalized_factors_zero] at ha,
simpa only [multiset.not_mem_zero] },
{ suffices : prime (ideal.span ({a} : set R)),
{ obtain ⟨c, hc, hc'⟩ := exists_mem_normalized_factors_of_dvd _ this.irreducible
(dvd_iff_le.mpr (span_singleton_le_span_singleton.mpr (dvd_of_mem_normalized_factors ha))),
rwa associated_iff_eq.mp hc',
{ by_contra,
exact hb (span_singleton_eq_bot.mp h) } },
rw prime_iff_is_prime,
exact (span_singleton_prime (prime_of_normalized_factor a ha).ne_zero).mpr
(prime_of_normalized_factor a ha),
by_contra,
exact (prime_of_normalized_factor a ha).ne_zero (span_singleton_eq_bot.mp h) },
end
lemma multiplicity_eq_multiplicity_span [decidable_rel ((∣) : R → R → Prop)]
[decidable_rel ((∣) : ideal R → ideal R → Prop)] {a b : R} :
multiplicity (ideal.span {a}) (ideal.span ({b} : set R)) = multiplicity a b :=
begin
by_cases h : finite a b,
{ rw ← part_enat.coe_get (finite_iff_dom.mp h),
refine (multiplicity.unique
(show (ideal.span {a})^(((multiplicity a b).get h)) ∣ (ideal.span {b}), from _) _).symm ;
rw [ideal.span_singleton_pow, span_singleton_dvd_span_singleton_iff_dvd],
exact pow_multiplicity_dvd h ,
{ exact multiplicity.is_greatest ((part_enat.lt_coe_iff _ _).mpr (exists.intro
(finite_iff_dom.mp h) (nat.lt_succ_self _))) } },
{ suffices : ¬ (finite (ideal.span ({a} : set R)) (ideal.span ({b} : set R))),
{ rw [finite_iff_dom, part_enat.not_dom_iff_eq_top] at h this,
rw [h, this] },
refine not_finite_iff_forall.mpr (λ n, by {rw [ideal.span_singleton_pow,
span_singleton_dvd_span_singleton_iff_dvd], exact not_finite_iff_forall.mp h n }) }
end
variables [decidable_eq R] [decidable_eq (ideal R)] [normalization_monoid R]
/-- The bijection between the (normalized) prime factors of `r` and the (normalized) prime factors
of `span {r}` -/
@[simps]
noncomputable def normalized_factors_equiv_span_normalized_factors {r : R} (hr : r ≠ 0) :
{d : R | d ∈ normalized_factors r} ≃
{I : ideal R | I ∈ normalized_factors (ideal.span ({r} : set R))} :=
equiv.of_bijective
(λ d, ⟨ideal.span {↑d}, singleton_span_mem_normalized_factors_of_mem_normalized_factors d.prop⟩)
begin
split,
{ rintros ⟨a, ha⟩ ⟨b, hb⟩ h,
rw [subtype.mk_eq_mk, ideal.span_singleton_eq_span_singleton, subtype.coe_mk,
subtype.coe_mk] at h,
exact subtype.mk_eq_mk.mpr (mem_normalized_factors_eq_of_associated ha hb h) },
{ rintros ⟨i, hi⟩,
letI : i.is_principal := infer_instance,
letI : i.is_prime := is_prime_of_prime (prime_of_normalized_factor i hi),
obtain ⟨a, ha, ha'⟩ := exists_mem_normalized_factors_of_dvd hr
(submodule.is_principal.prime_generator_of_is_prime i
(prime_of_normalized_factor i hi).ne_zero).irreducible _,
{ use ⟨a, ha⟩,
simp only [subtype.coe_mk, subtype.mk_eq_mk, ← span_singleton_eq_span_singleton.mpr ha',
ideal.span_singleton_generator] },
{exact (submodule.is_principal.mem_iff_generator_dvd i).mp (((show ideal.span {r} ≤ i, from
dvd_iff_le.mp (dvd_of_mem_normalized_factors hi))) (mem_span_singleton.mpr (dvd_refl r))) } }
end
variables [decidable_rel ((∣) : R → R → Prop)] [decidable_rel ((∣) : ideal R → ideal R → Prop)]
/-- The bijection `normalized_factors_equiv_span_normalized_factors` between the set of prime
factors of `r` and the set of prime factors of the ideal `⟨r⟩` preserves multiplicities. -/
lemma multiplicity_normalized_factors_equiv_span_normalized_factors_eq_multiplicity {r d: R}
(hr : r ≠ 0) (hd : d ∈ normalized_factors r) :
multiplicity d r =
multiplicity (normalized_factors_equiv_span_normalized_factors hr ⟨d, hd⟩ : ideal R)
(ideal.span {r}) :=
by simp only [normalized_factors_equiv_span_normalized_factors, multiplicity_eq_multiplicity_span,
subtype.coe_mk, equiv.of_bijective_apply]
/-- The bijection `normalized_factors_equiv_span_normalized_factors.symm` between the set of prime
factors of the ideal `⟨r⟩` and the set of prime factors of `r` preserves multiplicities. -/
lemma multiplicity_normalized_factors_equiv_span_normalized_factors_symm_eq_multiplicity
{r : R} (hr : r ≠ 0) (I : {I : ideal R | I ∈ normalized_factors (ideal.span ({r} : set R))}) :
multiplicity ((normalized_factors_equiv_span_normalized_factors hr).symm I : R) r =
multiplicity (I : ideal R) (ideal.span {r}) :=
begin
obtain ⟨x, hx⟩ := (normalized_factors_equiv_span_normalized_factors hr).surjective I,
obtain ⟨a, ha⟩ := x,
rw [hx.symm, equiv.symm_apply_apply, subtype.coe_mk,
multiplicity_normalized_factors_equiv_span_normalized_factors_eq_multiplicity hr ha, hx],
end
end PID
|
"""
ShiftableLoad
ShiftableLoad Load
"""
struct ShiftableLoad <: Resource
index::Int # Unique index of device
num_timesteps::Int # Number of time-steps in horizon
cycle_begin_min::Int # Beginning of each cycle
cycle_begin_max::Int # End of each cycle
cycle_length::Int # Length of a cycle
cycle_load::Vector{Float64} # Load level at each time of the cycle
function ShiftableLoad(;
index::Integer=0,
T::Integer=0,
cycle_begin_min::Int=1,
cycle_begin_max::Int=0,
cycle_length::Int=0,
cycle_load::Vector{Float64}=Float64[]
)
# Dimension checks
T >= cycle_length || throw(error(
"T=$T but cycle has length $cycle_length"
))
cycle_begin_min >= 1 || throw(error(
"Earliest start time is too early: $cycle_begin_min"
))
cycle_begin_max <= T - cycle_length + 1 || throw(error(
"Latest start time is too late: $cycle_begin_max"
))
cycle_length == length(cycle_load) || throw(DimensionMismatch(
"L=$cycle_length but load has length $(length(cycle_load))"
))
return new(
index, T,
cycle_begin_min, cycle_begin_max,
cycle_length, cycle_load
)
end
end
function add_resource_to_model!(
m::MOI.ModelLike,
h::House,
l::ShiftableLoad,
var2idx, con2idx
)
T = l.num_timesteps
T0 = l.cycle_begin_min
T1 = l.cycle_begin_max
# ==========================================
# I. Add local variables
# ==========================================
pnet = MOI.add_variables(m, T) # Net load
L = T1 - T0 + 1
ustart = MOI.add_variables(m, L) # Start indicator
# Variable bounds
for k in 1:L
MOI.add_constraint(m, MOI.SingleVariable(ustart[k]), MOI.ZeroOne())
end
# Update variable indices
for t in 1:T
var2idx[(:shift, l.index, :pnet, t)] = pnet[t]
end
for k in 1:L
var2idx[(:shift, l.index, :pnet, k + T0 - 1)] = ustart[k]
end
# ==========================================
# II. Update linking constraints
# ==========================================
for t in 1:T
cidx = con2idx[(:house, h.index, :link, t)]
MOI.modify(m, cidx, MOI.ScalarCoefficientChange(pnet[t], -1.0))
end
# ==========================================
# III. Add local constraints
# ==========================================
# Net load
for t in 1:T
MOI.add_constraint(m,
MOI.ScalarAffineFunction([
[
MOI.ScalarAffineTerm(1.0, pnet[t])
]; [
MOI.ScalarAffineTerm(-l.cycle_load[t - T0 - k + 2], ustart[k])
for k in 1:L
if (T0 <= t - k + 1 <= T1) && (1 <= t - T0 - k + 2 <= l.cycle_length)
]
], 0.0),
MOI.EqualTo(0.0)
)
end
MOI.add_constraint(m, MOI.ScalarAffineFunction([
MOI.ScalarAffineTerm(1.0, ustart[t])
for t in 1:L
], 0.0),
MOI.EqualTo(1.0)
)
# ==========================================
# IV. Add sub-resources to current model
# ==========================================
# (None here)
return nothing
end |
function [SD1, SD2, SD1_SD2_ratio] = EvalPoincareOnWindows(rr, t_rr, HRVparams, tWin, sqi)
% OVERVIEW:
% Calculates SD1 SD2 and SD1_SD2_ratio features from Poincare plot
% for each defined windows
% INPUT
% rr - (seconds) rr intervals
% t_rr - (seconds) time stamp of rr intervals
% HRVparams - struct of settings for hrv_toolbox analysis
% sqi - Signal Quality Index; Requires a matrix with
% at least two columns. Column 1 should be
% timestamps of each sqi measure, and Column 2
% should be SQI on a scale from 0 to 1.
% tWin - Starting time of each windows to analyze
%
% OUTPUTS:
% SD1 : (ms) standard deviation of projection of the
% PP on the line perpendicular to the line of
% identity (y=-x)
% SD2 : (ms) standard deviation of the projection of the PP
% on the line of identity (y=x)
% SD1_SD2_ratio : (ms) SD1/SD2 ratio
%
%
% Written by: Giulia Da Poian <[email protected]>
% REPO:
% https://github.com/cliffordlab/PhysioNet-Cardiovascular-Signal-Toolbox
% COPYRIGHT (C) 2016
% LICENSE:
% This software is offered freely and without warranty under
% the GNU (v3 or later) public license. See license file for
% more information
%
% Make vector a column
rr = rr(:);
if nargin < 4
error('no data provided')
end
if nargin <5 || isempty(sqi)
sqi(:,1) = t_rr;
sqi(:,2) = ones(length(t_rr),1);
end
windowlength = HRVparams.windowlength;
SQI_th = HRVparams.sqi.LowQualityThreshold; % SQI threshold
WinQuality_th = HRVparams.RejectionThreshold; % Low quality windows threshold
% Preallocation (all NaN)
SD1 = ones(length(tWin),1)*NaN;
SD2 = ones(length(tWin),1)*NaN;
SD1_SD2_ratio = ones(length(tWin),1)*NaN;
% Run PoincareMetrics by Windows
% Loop through each window of RR data
for i_win = 1:length(tWin)
if ~isnan(tWin(i_win))
% Isolate data in this window
sqi_win = sqi( sqi(:,1) >= tWin(i_win) & sqi(:,1) < tWin(i_win) + windowlength,:);
nn_win = rr( t_rr >= tWin(i_win) & t_rr < tWin(i_win) + windowlength );
lowqual_idx = find(sqi_win(:,2) < SQI_th); % Analysis of SQI for the window
% If enough data has an adequate SQI, perform the calculations
if numel(lowqual_idx)/length(sqi_win(:,2)) < WinQuality_th
[SD1(i_win), SD2(i_win), SD1_SD2_ratio(i_win)] = PoincareMetrics(nn_win);
end % end of conditional statements run when SQI is adequate
end % end of check for sufficient data
end % end of loop through windows
end % end of function
|
[Table of Contents](http://nbviewer.ipython.org/github/rlabbe/Kalman-and-Bayesian-Filters-in-Python/blob/master/table_of_contents.ipynb)
# Kalman Filter Math
```python
#format the book
%matplotlib inline
%load_ext autoreload
%autoreload 2
from __future__ import division, print_function
import sys
sys.path.insert(0,'./code')
from book_format import load_style
load_style()
```
<style>
@import url('http://fonts.googleapis.com/css?family=Source+Code+Pro');
@import url('http://fonts.googleapis.com/css?family=Vollkorn');
@import url('http://fonts.googleapis.com/css?family=Arimo');
@import url('http://fonts.googleapis.com/css?family=Fira_sans');
div.cell{
width: 900px;
margin-left: 0% !important;
margin-right: auto;
}
div.text_cell code {
background: transparent;
color: #000000;
font-weight: 600;
font-size: 11pt;
font-style: bold;
font-family: 'Source Code Pro', Consolas, monocco, monospace;
}
h1 {
font-family: 'Open sans',verdana,arial,sans-serif;
}
div.input_area {
background: #F6F6F9;
border: 1px solid #586e75;
}
.text_cell_render h1 {
font-weight: 200;
font-size: 30pt;
line-height: 100%;
color:#c76c0c;
margin-bottom: 0.5em;
margin-top: 1em;
display: block;
white-space: wrap;
text-align: left;
}
h2 {
font-family: 'Open sans',verdana,arial,sans-serif;
text-align: left;
}
.text_cell_render h2 {
font-weight: 200;
font-size: 16pt;
font-style: italic;
line-height: 100%;
color:#c76c0c;
margin-bottom: 0.5em;
margin-top: 1.5em;
display: block;
white-space: wrap;
text-align: left;
}
h3 {
font-family: 'Open sans',verdana,arial,sans-serif;
}
.text_cell_render h3 {
font-weight: 200;
font-size: 14pt;
line-height: 100%;
color:#d77c0c;
margin-bottom: 0.5em;
margin-top: 2em;
display: block;
white-space: wrap;
text-align: left;
}
h4 {
font-family: 'Open sans',verdana,arial,sans-serif;
}
.text_cell_render h4 {
font-weight: 100;
font-size: 14pt;
color:#d77c0c;
margin-bottom: 0.5em;
margin-top: 0.5em;
display: block;
white-space: nowrap;
}
h5 {
font-family: 'Open sans',verdana,arial,sans-serif;
}
.text_cell_render h5 {
font-weight: 200;
font-style: normal;
color: #1d3b84;
font-size: 16pt;
margin-bottom: 0em;
margin-top: 0.5em;
display: block;
white-space: nowrap;
}
div.text_cell_render{
font-family: 'Fira sans', verdana,arial,sans-serif;
line-height: 125%;
font-size: 115%;
text-align:justify;
text-justify:inter-word;
}
div.output_subarea.output_text.output_pyout {
overflow-x: auto;
overflow-y: scroll;
max-height: 50000px;
}
div.output_subarea.output_stream.output_stdout.output_text {
overflow-x: auto;
overflow-y: scroll;
max-height: 50000px;
}
div.output_wrapper{
margin-top:0.2em;
margin-bottom:0.2em;
}
code{
font-size: 70%;
}
.rendered_html code{
background-color: transparent;
}
ul{
margin: 2em;
}
ul li{
padding-left: 0.5em;
margin-bottom: 0.5em;
margin-top: 0.5em;
}
ul li li{
padding-left: 0.2em;
margin-bottom: 0.2em;
margin-top: 0.2em;
}
ol{
margin: 2em;
}
ol li{
padding-left: 0.5em;
margin-bottom: 0.5em;
margin-top: 0.5em;
}
ul li{
padding-left: 0.5em;
margin-bottom: 0.5em;
margin-top: 0.2em;
}
a:link{
font-weight: bold;
color:#447adb;
}
a:visited{
font-weight: bold;
color: #1d3b84;
}
a:hover{
font-weight: bold;
color: #1d3b84;
}
a:focus{
font-weight: bold;
color:#447adb;
}
a:active{
font-weight: bold;
color:#447adb;
}
.rendered_html :link {
text-decoration: underline;
}
.rendered_html :hover {
text-decoration: none;
}
.rendered_html :visited {
text-decoration: none;
}
.rendered_html :focus {
text-decoration: none;
}
.rendered_html :active {
text-decoration: none;
}
.warning{
color: rgb( 240, 20, 20 )
}
hr {
color: #f3f3f3;
background-color: #f3f3f3;
height: 1px;
}
blockquote{
display:block;
background: #fcfcfc;
border-left: 5px solid #c76c0c;
font-family: 'Open sans',verdana,arial,sans-serif;
width:680px;
padding: 10px 10px 10px 10px;
text-align:justify;
text-justify:inter-word;
}
blockquote p {
margin-bottom: 0;
line-height: 125%;
font-size: 100%;
}
</style>
If you've gotten this far I hope that you are thinking that the Kalman filter's fearsome reputation is somewhat undeserved. Sure, I hand waved some equations away, but I hope implementation has been fairly straightforward for you. The underlying concept is quite straightforward - take two measurements, or a measurement and a prediction, and choose the output to be somewhere between the two. If you believe the measurement more your guess will be closer to the measurement, and if you believe the prediction is more accurate your guess will lie closer it it. That's not rocket science (little joke - it is exactly this math that got Apollo to the moon and back!).
To be honest I have been choosing my problems carefully. For any arbitrary problem finding some of the matrices that we need to feed into the Kalman filter equations can be quite difficult. I haven't been *too tricky*, though. Equations like Newton's equations of motion can be trivially computed for Kalman filter applications, and they make up the bulk of the kind of problems that we want to solve.
I have strived to illustrate concepts with code and reasoning, not math. But there are topics that do require more mathematics than I have used so far. In this chapter I will give you the math behind the topics that we have learned so far, and introduce the math that you will need to understand the topics in the rest of the book. Many topics are optional.
## Computing Means and Covariances
You will not need to do this by hand for the rest of the book, but it is important to understand the computations behind these values. It only takes a few minutes to learn.
### Computing the Mean
So let's start with 3 variables: $a, b, c$. Let's say we have two values for each: $a = [1, 3]$, $b = [8, 7]$, and $c = [3,7]$. Another, equivalent way of writing this is that you will sometimes see is
$$\mathbf{x} = \begin{bmatrix}a\\b\\c\end{bmatrix} = \begin{bmatrix}1 & 3\\8&7\\3&7\end{bmatrix}$$
You will not see that often in the Kalman filter literature, so I will adopt the alternative notation of using a subscript to indicate the $i^{th}$ value for $\mathbf{x}$
$$\mathbf{x}_0 = \begin{bmatrix}1 \\8\\3\end{bmatrix},\ \ \mathbf{x}_1 = \begin{bmatrix}3\\7\\7\end{bmatrix}$$
We compute the mean as for the univariate case - sum the values and divide by the number of values. Formally
$$ \mu_x = \frac{1}{n}\sum^n_{i=1} x_i$$
Therefore we can write:
$$\begin{aligned}
\mu_a &= \frac{1+3}{2} = 2 \\
\mu_b &= \frac{8+7}{2} = 7.5 \\
\mu_c &= \frac{3+7}{2} = 5
\end{aligned}$$
Which I can express in our shorthand matrix notation as
$$\mu_x = \begin{bmatrix}2\\7.5\\5\end{bmatrix}$$
### Expected Value
I could just give you the formula for the covariance but it will make more sense if you see how it is derived. To do that we first need to talk about **expected value** of a random variable. The expected value is the value we expect, on average, for the variable.
The expected value of a random variable is the average value it would have if we took an infinite number of samples of it and then averaged those samples together. Let's say we have $x=[1,3,5]$ and each value is equally probable. What would we *expect* $x$ to have, on average?
It would be the average of 1, 3, and 5, of course, which is 3. That should make sense; we would expect equal numbers of 1, 3, and 5 to occur, so $(1+3+5)/3=3$ is clearly the average of that infinite series of samples.
Now suppose that each value has a different probability of happening. Say 1 has an 80% chance of occurring, 3 has an 15% chance, and 5 has only a 5% chance. In this case we compute the expected value by multiplying each value of $x$ by the percent chance of it occurring, and summing the result. So for this case we could compute
$$E[x] = (1)(0.8) + (3)(0.15) + (5)(0.05) = 1.5$$
Here I have introduced the standard notation $E[x]$ for the expected value of $x$.
We can formalize this by letting $x_i$ be the $i^{th}$ value of $x$, and $p_i$ be the probability of its occurrence. This gives us
$$E[X] = \sum_{i=1}^n p_ix_i$$
The value 1.5 for $x$ makes intuitive sense because x is far more like to be 1 than 3 or 5, and 3 is more likely than 5 as well.
It is probably worth me pointing out now that if $x$ is continuous we substitute the sum for an integral, like so
$$E[X] = \int_{-\infty}^\infty xf(x)$$
where $f(x)$ is the probability distribution function of $x$. We won't be using this equation yet, but we will be using it in the next chapter.
### Computing the Covariance
Now we are prepared to compute the covariance matrix. The **covariance** measures how much two random variables move in the same direction, and is defined as
$$ COV(x,y) = \frac{1}{N}\sum_{i=1}^N (x_i - \mu_x)(y_i - \mu_y)$$
If we compare this to the formula for the variance of a single variable we can see where this definition came from.
$$\sigma^2 = \frac{1}{N}\sum_{i=1}^N(x_i - \mu)^2$$
And indeed, the covariance of a variable with itself is the variance of the variable, which we can trivially prove with
$$\begin{aligned}
COV(x,x) &= \frac{1}{N}\sum_{i=1}^N (x_i - \mu_x)(x_i - \mu_x) \\
&= \frac{1}{N}\sum_{i=1}^N (x_i - \mu_x)^2 \\
&= VAR(x)
\end{aligned}$$
Let's build some familiarity with this by calculating the covariance for the $\mathbf{x}$ from the beginning of this section.
$$\mathbf{x}_0 = \begin{bmatrix}1 \\8\\3\end{bmatrix},\ \ \mathbf{x}_1 = \begin{bmatrix}3\\7\\7\end{bmatrix}$$
We already know the mean is
$$\mu_x = \begin{bmatrix}2\\7.5\\5\end{bmatrix}$$
Recall that we named the three variables as $a, b, c$. Let's start by computing the variance of $a$.
$$
\begin{aligned}
VAR(a) &= \frac{1}{2}[(1-2)^2 + (3-2)^2] \\
&= \frac{1}{2}[1+1] \\
&= 1\end{aligned}$$
Using similar math we can compute that $VAR(b) = 0.25$ and $VAR(c)=4$. This allows us to fill in the covariance matrix with
$$\Sigma = \begin{bmatrix}1 & & \\ & 0.25 & \\ &&4\end{bmatrix}$$
Now we can compute the covariance of a and b.
$$
\begin{aligned}
COV(a,b) &= \frac{1}{2}[(1-2)(8-7.5) + (3-2)(7-7.5)] \\
&= \frac{1}{2}[(-1)(.5)+ (1)(-.5)] \\
&= -0.5\end{aligned}$$
We can fill in the $a^{th}$ row and $b^{th}$ column in the covariance matrix with this value:
$$\Sigma = \begin{bmatrix}1 & & \\ -0.5 & 0.25 & \\ &&4\end{bmatrix}$$
We already talked about the symmetry of the covariance matrix; $\sigma_{ab}=\sigma_{ba}$. We see now why this is true from the equation for the covariance.
$$\begin{aligned} COV(x,y) &= \frac{1}{N}\sum_{i=1}^N (x_i - \mu_x)(y_i - \mu_y) \\
&= \frac{1}{N}\sum_{i=1}^N (y_i - \mu_y)(x_i - \mu_x) \\
&= COV(y,x)\end{aligned}$$
This gives the covariance matrix
$$\Sigma = \begin{bmatrix}1 &-0.5 & \\ -0.5 & 0.25 & \\ &&4\end{bmatrix}$$.
The arithmetic is a bit tedious, so let's use NumPy's `cov()` function to compute the entire covariance matrix. To compute the covariance in accordance with the equations above you will need to set the parameter `bias=1`. The meaning of that parameter is not important to this book. If you are interested, wikipedia has a good article on it here http://en.wikipedia.org/wiki/Bias_of_an_estimator.
```python
import numpy as np
x = [[1, 3], [8, 7], [3, 7]]
print(np.cov(x, bias=1))
```
[[ 1. -0.5 2. ]
[-0.5 0.25 -1. ]
[ 2. -1. 4. ]]
We said above that "the *covariance* measures how much two random variables move in the same direction", so let's see what that means in practice. If we start with the value $x_0 = (1, 2)$ we can create subsequent values that vary in the same direction with $x_1 = (2, 4)$, $x_2=(3,6)$, and $x_3=(4,8)$.
```python
from filterpy.stats import plot_covariance_ellipse
x = [[1, 2, 3, 4], [2, 4, 6, 8]]
cov = np.cov(x, bias=1)
plot_covariance_ellipse([0, 0], cov)
print(cov)
```
The values for $x$ are perfectly correlated, and the result is a covariance ellipse with zero width. We can see this by making the values not slightly uncorrelated.
```python
x = [[1, 2, 3, 4], [2, 4, 6, 8.4]]
plot_covariance_ellipse([0, 0], np.cov(x, bias=1))
```
Now lets make the second value go away from the first.
```python
x = [[1, 2, 3, 4], [-2, -4, -6, -8.4]]
plot_covariance_ellipse([0, 0], np.cov(x, bias=1))
```
As we would hope, the covariance matrix is tilted in the opposite direction, indicating that the variables are inversely correlated. That is, as one gets larger, the other becomes smaller.
## Modeling a Dynamic System that Has Noise
We need to start by understanding the underlying equations and assumptions that the Kalman filter uses. We are trying to model real world phenomena, so what do we have to consider?
First, each physical system has a process. For example, a car traveling at a certain velocity goes so far in a fixed amount of time, and its velocity varies as a function of its acceleration. We describe that behavior with the well known Newtonian equations we learned in high school.
$$
\begin{aligned}
v&=at\\
x &= \frac{1}{2}at^2 + v_0t + d_0
\end{aligned}
$$
And once we learned calculus we saw them in this form:
$$
\begin{aligned}
\mathbf{v} &= \frac{d \mathbf{x}}{d t}\\
\quad \mathbf{a} &= \frac{d \mathbf{v}}{d t} = \frac{d^2 \mathbf{x}}{d t^2} \,\!
\end{aligned}
$$
A typical problem would have you compute the distance traveled given a constant velocity or acceleration. But, of course we know this is not all that is happening. First, we do not have perfect measures of things like the velocity and acceleration - there is always noise in the measurements, and we have to model that. Second, no car travels on a perfect road. There are bumps that cause the car to slow down, there is wind drag, there are hills that raise and lower the speed. If we do not have explicit knowledge of these factors we lump them all together under the term "process noise".
Trying to model all of those factors explicitly and exactly is impossible for anything but the most trivial problem. I could try to include equations for things like bumps in the road, the behavior of the car's suspension system, even the effects of hitting bugs with the windshield, but the job would never be done - there would always be more effects to add and limits to our knowledge (how many bugs do we hit in an hour, for example). What is worse, each of those models would in themselves be a simplification - do I assume the wind is constant, that the drag of the car is the same for all angles of the wind, that the suspension act as perfect springs, that the suspension for each wheel acts identically, and so on.
So control theory makes a mathematically correct simplification. We acknowledge that there are many factors that influence the system that we either do not know or that we don't want to have to model. At any time $t$ we say that the actual value (say, the position of our car) is the predicted value plus some unknown process noise:
$$
x(t) = x_{pred}(t) + noise(t)
$$
This is not meant to imply that $noise(t)$ is a function that we can derive analytically or that it is well behaved. If there is a bump in the road at $t=10$ then the noise factor will incorporate that effect. Again, this is not implying that we model, compute, or even know the value of *noise(t)*, it is merely a statement of fact - we can *always* describe the actual value as the predicted value from our idealized model plus some other value.
Let's express this with linear algebra. Using the same notation from previous chapters, we can say that our model of the system (without noise) is:
$$ f(\mathbf{x}) = \mathbf{Fx}$$
That is, we have a set of linear equations that describe our system. For our car,
$\mathbf{F}$ will be the coefficients for Newton's equations of motion.
Now we need to model the noise. We will call that *w*, and add it to the equation.
$$ f(\mathbf{x}) = \mathbf{Fx} + \mathbf{w}$$
Finally, we need to consider inputs into the system. We are dealing with linear problems here, so we will assume that there is some input $u$ into the system, and that we have some linear model that defines how that input changes the system. For example, if you press down on the accelerator in your car the car will accelerate. We will need a matrix $\mathbf{B}$ to convert $u$ into the effect on the system. We add that into our equation:
$$ f(\mathbf{x}) = \mathbf{Fx} + \mathbf{Bu} + \mathbf{w}$$
And that's it. That is one of the equations that Kalman set out to solve, and he found a way to compute an optimal solution if we assume certain properties of $w$.
However, we took advantage of something I left mostly unstated in the last chapter. We were able to provide a definition for $\mathbf{F}$ because we were able to take advantage of the exact solution that Newtonian equations provide us. However, if you have an engineering background you will realize what a small class of problems that covers. If you don't, I will explain it next, and provide you with several ways to compute $\mathbf{F}$ for arbitrary systems.
## Converting the Multivariate Equations to the Univariate Case
As it turns out the Kalman filter equations are quite easy to deal with in one dimension, so let's do the mathematical proof.
> **Note:** This section will provide you with a strong intuition into what the Kalman filter equations are actually doing. While this section is not strictly required, I recommend reading this section carefully as it should make the rest of the material easier to understand. It is not merely a proof of correctness that you would normally want to skip past! The equations look complicated, but they are actually doing something quite simple.
Let's start with the predict step, which is slightly easier. Here are the multivariate equations.
$$
\begin{aligned}
\mathbf{\bar{x}} &= \mathbf{F x} + \mathbf{B u} \\
\mathbf{\bar{P}} &= \mathbf{FPF}^\mathsf{T} + \mathbf{Q}
\end{aligned}
$$
The state $\mathbf{x}$ only has one variable, so it is a $1\times 1$ matrix. Our motion $\mathbf{u}$ is also a $1\times 1$ matrix. Therefore, $\mathbf{F}$ and $\mathbf{B}$ must also be $1\times 1$ matrices. That means that they are all scalars, and we can write
$$x = Fx + Bu$$
Here the variables are not bold, denoting that they are not matrices or vectors.
Our state transition is simple - the next state is the same as this state, so $F=1$. The same holds for the motion transition, so, $B=1$. Thus we have
$$x = x + u$$
which is equivalent to the Gaussian equation from the last chapter
$$ \mu = \mu_1+\mu_2$$
Hopefully the general process is clear, so now I will go a bit faster on the rest. Our other equation for the predict step is
$$\mathbf{\bar{P}} = \mathbf{FPF}^\mathsf{T} + \mathbf{Q}$$
Again, since our state only has one variable $\mathbf{P}$ and $\mathbf{Q}$ must also be $1\times 1$ matrix, which we can treat as scalars, yielding
$$P = FPF^\mathsf{T} + Q$$
We already know $F=1$. The transpose of a scalar is the scalar, so $F^\mathsf{T} = 1$. This yields
$$P = P + Q$$
which is equivalent to the Gaussian equation of
$$\sigma^2 = \sigma_1^2 + \sigma_2^2$$
This proves that the multivariate equations are performing the same math as the univariate equations for the case of the dimension being 1.
Here our multivariate Kalman filter equations for the update step.
$$
\begin{aligned}
\textbf{y} &= \mathbf{z} - \mathbf{H \bar{x}}\\
\mathbf{K}&= \mathbf{\bar{P}H}^\mathsf{T} (\mathbf{H\bar{P}H}^\mathsf{T} + \mathbf{R})^{-1} \\
\mathbf{x}&=\mathbf{\bar{x}} +\mathbf{K\textbf{y}} \\
\mathbf{P}&= (\mathbf{I}-\mathbf{KH})\mathbf{\bar{P}}
\end{aligned}
$$
As above, all of the matrices become scalars. $H$ defines how we convert from a position to a measurement. Both are positions, so there is no conversion, and thus $H=1$. Let's substitute in our known values and convert to scalar in one step. One final thing you need to know - division is scalar's analogous operation for matrix inversion, so we will convert the matrix inversion to division.
$$
\begin{aligned}
y &= z - x\\
K &=P / (P + R) \\
x &=x +Ky \\
P &= (1-K)P
\end{aligned}
$$
Before we continue with the proof, I want you to look at those equations to recognize what a simple concept these equations implement. The residual $y$ is nothing more than the measurement minus the previous state. The gain $K$ is scaled based on how certain we are about the last prediction vs how certain we are about the measurement. We choose a new state $x$ based on the old value of $x$ plus the scaled value of the residual. Finally, we update the uncertainty based on how certain we are about the measurement. Algorithmically this should sound exactly like what we did in the last chapter.
So let's finish off the algebra to prove this. It's straightforward, and not at all necessary for you to learn unless you are interested. Feel free to skim ahead to the last paragraph in this section if you prefer skipping the algebra.
Recall that the univariate equations for the update step are:
$$
\begin{aligned}
\mu &=\frac{\sigma_1^2 \mu_2 + \sigma_2^2 \mu_1} {\sigma_1^2 + \sigma_2^2}, \\
\sigma^2 &= \frac{1}{\frac{1}{\sigma_1^2} + \frac{1}{\sigma_2^2}}
\end{aligned}
$$
Here we will say that $\mu_1$ is the state $x$, and $\mu_2$ is the measurement $z$. That is entirely arbitrary, we could have chosen the opposite assignment. Thus it follows that that $\sigma_1^2$ is the state uncertainty $P$, and $\sigma_2^2$ is the measurement noise $R$. Let's substitute those in.
$$ \mu = \frac{Pz + Rx}{P+R} \\
\sigma^2 = \frac{1}{\frac{1}{P} + \frac{1}{R}}
$$
I will handle $\mu$ first. The corresponding equation in the multivariate case is
$$
\begin{aligned}
x &= x + Ky \\
&= x + \frac{P}{P+R}(z-x) \\
&= \frac{P+R}{P+R}x + \frac{Pz - Px}{P+R} \\
&= \frac{Px + Rx + Pz - Px}{P+R} \\
&= \frac{Pz + Rx}{P+R}
\end{aligned}
$$
Now let's look at $\sigma^2$. The corresponding equation in the multivariate case is
$$
\begin{aligned}
P &= (1-K)P \\
&= (1-\frac{P}{P+R})P \\
&= (\frac{P+R}{P+R}-\frac{P}{P+R})P \\
&= (\frac{P+R-P}{P+R})P \\
&= \frac{RP}{P+R}\\
&= \frac{1}{\frac{P+R}{RP}}\\
&= \frac{1}{\frac{R}{RP} + \frac{P}{RP}} \\
&= \frac{1}{\frac{1}{P} + \frac{1}{R}}
\quad\blacksquare
\end{aligned}
$$
So we have proven that the multivariate equations are equivalent to the univariate equations when we only have one state variable. I'll close this section by recognizing one quibble - I hand waved my assertion that $H=1$ and $F=1$. In general we know this is not true. For example, a digital thermometer may provide measurement in volts, and we need to convert that to temperature, and we use $H$ to do that conversion. I left that issue out to keep the explanation as simple and streamlined as possible. It is very straightforward to add that generalization to the equations above, redo the algebra, and still have the same results.
## Converting Kalman Filter to a g-h Filter
I've stated that the Kalman filter is a form of the g-h filter. It just takes some algebra to prove it. It's more straightforward to do with the one dimensional case, so I will do that. Recall
$$
\mu_{x}=\frac{\sigma_1^2 \mu_2 + \sigma_2^2 \mu_1} {\sigma_1^2 + \sigma_2^2}
$$
which I will make more friendly for our eyes as:
$$
\mu_{x}=\frac{ya + xb} {a+b}
$$
We can easily put this into the g-h form with the following algebra
$$
\begin{aligned}
\mu_{x}&=(x-x) + \frac{ya + xb} {a+b} \\
\mu_{x}&=x-\frac{a+b}{a+b}x + \frac{ya + xb} {a+b} \\
\mu_{x}&=x +\frac{-x(a+b) + xb+ya}{a+b} \\
\mu_{x}&=x+ \frac{-xa+ya}{a+b} \\
\mu_{x}&=x+ \frac{a}{a+b}(y-x)\\
\end{aligned}
$$
We are almost done, but recall that the variance of estimate is given by
$${\sigma_{x}^2} = \frac{1}{ \frac{1}{\sigma_1^2} + \frac{1}{\sigma_2^2}}\\
= \frac{1}{ \frac{1}{a} + \frac{1}{b}}
$$
We can incorporate that term into our equation above by observing that
$$
\begin{aligned}
\frac{a}{a+b} &= \frac{a/a}{(a+b)/a} = \frac{1}{(a+b)/a} \\
&= \frac{1}{1 + \frac{b}{a}} = \frac{1}{\frac{b}{b} + \frac{b}{a}} \\
&= \frac{1}{b}\frac{1}{\frac{1}{b} + \frac{1}{a}} \\
&= \frac{\sigma^2_{x'}}{b}
\end{aligned}
$$
We can tie all of this together with
$$
\begin{aligned}
\mu_{x}&=x+ \frac{a}{a+b}(y-x) \\
&= x + \frac{\sigma^2_{x'}}{b}(y-x) \\
&= x + g_n(y-x)
\end{aligned}
$$
where
$$g_n = \frac{\sigma^2_{x}}{\sigma^2_{y}}$$
The end result is multiplying the residual of the two measurements by a constant and adding to our previous value, which is the *g* equation for the g-h filter. *g* is the variance of the new estimate divided by the variance of the measurement. Of course in this case g is not a constant as it varies with each time step as the variance changes. We can also derive the formula for *h* in the same way but I don't find this a particularly interesting derivation. The end result is
$$h_n = \frac{COV (x,\dot{x})}{\sigma^2_{y}}$$
The takeaway point is that *g* and *h* are specified fully by the variance and covariances of the measurement and predictions at time *n*. In other words, we are picking a point between the measurement and prediction by a scale factor determined by the quality of each of those two inputs. That is all the Kalman filter is.
## Modeling Dynamic Systems
Modeling dynamic systems is properly the topic of at least one undergraduate course in mathematics. To an extent there is no substitute for a few semesters of ordinary and partial differential equations. If you are a hobbyist, or trying to solve one very specific filtering problem at work you probably do not have the time and/or inclination to devote a year or more to that education.
However, I can present enough of the theory to allow us to create the system equations for many different Kalman filters, and give you enough background to at least follow the mathematics in the literature. My goal is to get you to the stage where you can read a Kalman filtering book or paper and understand it well enough to implement the algorithms. The background math is deep, but we end up using a few simple techniques over and over again in practice.
I struggle a bit with the proper way to present this material. If you have not encountered this math before I fear reading this section will not be very profitable for you. In the **Extended Kalman Filter** chapter I take a more ad-hoc way of presenting this information where I expose a problem that the KF needs to solve, then provide the math without a lot of supporting theory. This gives you the motivation behind the mathematics at the cost of not knowing why the math I give you is correct. On the other hand, the following section gives you the math, but somewhat divorced from the specifics of the problem we are trying to solve. Only you know what kind of learner you are. If you like the presentation of the book so far (practical first, then the math) you may want to wait until you read the **Extended Kalman Filter** before.
In particular, if your intent is to work with Extended Kalman filters (a very prelevant form of nonlinear Kalman filtering) you will need to understand this math at least at the level I present it. If that is not your intent this section may still prove to be beneficial if you need to simulate a nonlinear system in order to test your filter.
Let's lay out the problem and discuss what the solution will be. We model *dynamic systems* with a set of first order *differential equations*. This should not be a surprise as calculus is the math of things that vary. For example, we say that velocity is the derivative of distance with respect to time
$$\mathbf{v}= \frac{d \mathbf{x}}{d t} = \dot{\mathbf{x}}$$
where $\dot{\mathbf{x}}$ is the notation for the derivative of $\mathbf{x}$ with respect to t.
We need to use these equations for the predict step of the Kalman filter. Given the state of the system at time $t$ we want to predict its state at time $t + \Delta t$. The Kalman filter matrices do not accept differential equations, so we need a mathematical technique that will find the solution to those equations at each time step. In general it is extremely difficult to find analytic solutions to systems of differential equations, so we will normally use *numerical* techniques to find accurate approximations for these equations.
### Why This is Hard
We model dynamic systems with a set of first order differential equations. For example, we already presented the Newtonian equation
$$\mathbf{v}=\dot{\mathbf{x}}$$
where $\dot{\mathbf{x}}$ is the notation for the derivative of $\mathbf{x}$ with respect to t, or $\frac{d \mathbf{x}}{d t}$.
In general terms we can then say that a dynamic system consists of equations of the form
$$ g(t) = \dot{x}$$
if the behavior of the system depends on time. However, if the system is *time invariant* the equations are of the form
$$ f(x) = \dot{x}$$
What does *time invariant* mean? Consider a home stereo. If you input a signal $x$ into it at time $t$, it will output some signal $f(x)$. If you instead make the input at a later time $t + \Delta t$ the output signal will still be exactly the same except shifted in time. This is different from, say, an aircraft. If you make a control input to the aircraft at a later time it's behavior will be different because it will have burned additional fuel (and thus lost weight), drag may be different due to being at a different altitude, and so on.
We can solve these equations by integrating each side. The time variant equation is very straightforward. We essentially solved this problem with the Newtonian equations above, but let's be explicit and write it out. Starting with $$\dot{\mathbf{x}}=\mathbf{v}$$ we get the expected
$$ \int \dot{\mathbf{x}}\mathrm{d}t = \int \mathbf{v} \mathrm{d}t\\
x = vt + x_0$$
However, integrating the time invariant equation is not so straightforward.
$$ \dot{x} = f(x) \\
\frac{dx}{dt} = f(x)
$$
Using the *separation of variables* techniques, we divide by $f(x)$ and move the $dx$ term to the right so we can integrate each side:
$$
\int^x_{x_0} \frac{1}{f(x)} dx = \int^t_{t_0} dt\\
$$
If we let the solution to the left hand side by named $F(x)$, we get
$$F(x) - F(x_0) = t-t_0$$
We then solve for x with
$$F(x) = t - t_0 + F(x_0) \\
x = F^{-1}[t-t_0 + F(x_0)]$$
In other words, we need to find the inverse of $F$. This is not at all trivial, and a significant amount of course work in a STEM education is devoted to finding tricky, analytic solutions to this problem, backed by several centuries of research.
In the end, however, they are tricks, and many simple forms of $f(x)$ either have no closed form solution, or pose extreme difficulties. Instead, the practicing engineer turns to numerical methods to find a solution to her problems. I would suggest that students would be better served by learning fewer analytic mathematical tricks and instead focusing on learning numerical methods, but that is the topic for another book.
### Finding the Fundamental Matrix for Time Invariant Systems
If you already have the mathematical training in solving partial differential equations you may be able to put it to use; I am not assuming that sort of background. So let me skip over quite a bit of mathematics and present the typical numerical techniques used in Kalman filter design.
First, we express the system equations in state-space form (i.e. using linear algebra equations) with
$$ \dot{\mathbf{x}} = \mathbf{Fx}$$
Now we can assert that we want to find the fundamental matrix $\Phi$ that propagates the state with the equation
$$x(t) = \Phi(t-t_0)x(t_0)$$
which we can equivalently write as
$$x(t_k) = \Phi(\Delta t)x(t_{k-1})$$
In other words, we want to compute the value of $x$ at time $t$ by multiplying its previous value by some matrix $\Phi$. This is not trivial to do because the original equations do not include time.
### Taylor Series Expansion
Broadly speaking there are three ways to find $\Phi$ that are used in Kalman filters. The technique most often used with Kalman filters is to use a Taylor-series expansion. The Taylor series represents a function as an infinite sum of terms. The terms are linear, even for a nonlinear function, so we can express any arbitrary nonlinear function using linear algebra. The cost of this choice is that unless we use an infinite number of terms (and who has time for that these days) the value we compute will be approximate rather than exact.
For the Kalman filter we will be using a form of the series that uses a matrix. But before we do that, let's work through a couple of examples with real functions since real functions are easier to plot and reason about. The Taylor series for either are nearly identical, so this is a good first step.
For a real (or complex) function the Taylor series of a function $f(x)$ evaluated at $a$ is defined as
$$ \Phi(t) = e^{\mathbf{F}t} = \mathbf{I} + \mathbf{F}t + \frac{(\mathbf{F}t)^2}{2!} + \frac{(\mathbf{F}t)^3}{3!} + ... $$
This is easy to compute, and thus is the typical approach used in Kalman filter design when the filter is reasonably small. If you are wondering where $e$ came from, I point you to the Wikipedia article on the matrix exponential [1]. Here the important point is to recognize the very simple and regular form this equation takes.
Before applying it to a Kalman filter, lets do the Taylor expansion of a math function since this is much easier to visualize than a matrix. I choose sin(x). The Taylor series for a real or complex function f(x) about x=a is
$$f(x) = \sum_{n=0}^\infty\frac{f^{(n)}(a)}{x!}(x-a)^n$$
where $f^{n}$ is the nth derivative of f. To compute the Taylor series for $f(x)=sin(x)$ at $x=0$ Let's first work out the terms for f.
$$\begin{aligned}
f^{0}(x) &= sin(x) ,\ \ &f^{0}(0) &= 0 \\
f^{1}(x) &= cos(x),\ \ &f^{1}(0) &= 1 \\
f^{2}(x) &= -sin(x),\ \ &f^{2}(0) &= 0 \\
f^{3}(x) &= -cos(x),\ \ &f^{3}(0) &= -1 \\
f^{4}(x) &= sin(x),\ \ &f^{4}(0) &= 0 \\
f^{5}(x) &= cos(x),\ \ &f^{5}(0) &= 1
\end{aligned}
$$
Now we can substitute these values into the equation.
$$f(x) = \frac{0}{0!}(x)^0 + \frac{1}{1!}(x)^1 + \frac{0}{2!}(x)^2 + \frac{-1}{3!}(x)^3 + \frac{0}{4!}(x)^4 + \frac{-1}{5!}(x)^5 + ... $$
And let's test this with some code:
```python
x = .3
estimate = x + x**3/6 + x**5/120
exact = np.sin(.3)
print('estimate of sin(.3) is', estimate)
print('exact value of sin(.3) is', exact)
```
estimate of sin(.3) is 0.30452025
exact value of sin(.3) is 0.295520206661
This is not bad for only three terms. If you are curious, go ahead and implement this as a Python function to compute the series for an arbitrary number of terms. But I will forge ahead to the matrix form of the equation.
Let's consider tracking an object moving in a vacuum. In one dimension the differential equation for motion with zero acceleration is
$$ v = \dot{x}\\a=\ddot{x} =0,$$
which we can put in state space matrix form as
$$\begin{bmatrix}\dot{x} \\ \ddot{x}\end{bmatrix} =\begin{bmatrix}0&1\\0&0\end{bmatrix} \begin{bmatrix}x \\ \dot{x}\end{bmatrix}$$
This is a first order differential equation, so we can set $\mathbf{F}=\begin{bmatrix}0&1\\0&0\end{bmatrix}$ and solve the following equation.
$$\Phi(t) = e^{\mathbf{F}t} = \mathbf{I} + \mathbf{F}t + \frac{(\mathbf{F}t)^2}{2!} + \frac{(\mathbf{F}t)^3}{3!} + ... $$
If you perform the multiplication you will find that $\mathbf{F}^2=\begin{bmatrix}0&0\\0&0\end{bmatrix}$, which means that all higher powers of $\mathbf{F}$ are also $\mathbf{0}$. This makes the computation very easy.
$$
\begin{aligned}
\Phi(t) &=\mathbf{I} + \mathbf{F}t + \mathbf{0} \\
&= \begin{bmatrix}1&0\\0&1\end{bmatrix} + \begin{bmatrix}0&1\\0&0\end{bmatrix}t\\
&= \begin{bmatrix}1&t\\0&1\end{bmatrix}
\end{aligned}$$
We plug this into $x(t_k) = \Phi(\Delta t)x(t_{k-1})$ to get
$$
\begin{aligned}
x(t_k) &= \Phi(\Delta t)x(t_{k-1}) \\
x^- &= \Phi(\Delta t)x \\
x^- &=\begin{bmatrix}1&\Delta t\\0&1\end{bmatrix}x \\
\begin{bmatrix}x \\ \dot{x}\end{bmatrix}^- &=\begin{bmatrix}1&\Delta t\\0&1\end{bmatrix}\begin{bmatrix}x \\ \dot{x}\end{bmatrix}
\end{aligned}$$
This should look very familiar to you! This is the equation we used in the **Multivariate Kalman Filter** chapter to track a moving object.
$$
{\begin{bmatrix}x\\\dot{x}\end{bmatrix}}^- =\begin{bmatrix}1&t \\ 0&1\end{bmatrix} \begin{bmatrix}x \\ \dot{x}\end{bmatrix}
$$
We derived this equation in that chapter by using techniques that are much easier to understand. The advantage of the Taylor series expansion is that we can use it for any arbitrary set of differential equations which are time invariant.
Time invariant? This means systems that do not depend on time. Suppose the input is $x(t) = \sin(t)$, and the sytem is $f(x) = t\times x(t) = t \sin(t)$. This is not time invariant; the value will be different at different times due to the multiplication by t. On the other hand $f(x) = 3 \sin(t)$ is time invariant. The input $x(t)$ is dependent on time, but that is allowed. This is because to be time invariant it must be true that if $f(t) = x(t)$ then $f(t+\Delta t) = x(t+\Delta t)$. In other words, the output is just time shifted, not altered. This will be true for the second equation, but not the first.
However, we often use a Taylor expansion even when the equations are not time invariant. The answer will still be reasonably accurate so long as the time step is short and the system is nearly constant over that time step.
### Linear Time Invariant Theory
*Linear Time Invariant Theory*, also known as LTI System Theory, gives us a way to find $\Phi$ using the inverse Laplace transform. You are either nodding your head now, or completely lost. Don't worry, I will not be using the Laplace transform in this book except in this paragraph, as the computation is quite difficult to perform in practice. LTI system theory tells us that
$$ \Phi(t) = \mathcal{L}^{-1}[(s\mathbf{I} - \mathbf{F})^{-1}]$$
I have no intention of going into this other than to say that the inverse Laplace transform converts a signal into the frequency (time) domain, but finding a solution to the equation above is non-trivial. If you are interested, the Wikipedia article on LTI system theory provides an introduction [2].
### Numerical Solutions
Finally, there are numerical techniques to find $\Phi$. As filters get larger finding analytical solutions becomes very tedious (though packages like SymPy make it easier). C. F. van Loan [3] has developed a technique that finds both $\Phi$ and $Q$ numerically. Given the continuous model
$$ x' = Fx + Gu$$
where u is the unity white noise, we compute and return the $\sigma$ and $Q_k$ that discretizes that equation.
I have implemented van Loan's method in `FilterPy`. You may use it as follows:
from filterpy.common import van_loan_discretization
F = np.array([[0,1],[-1,0]], dtype=float)
G = np.array([[0.],[2.]]) # white noise scaling
phi, Q = van_loan_discretization(F, G, dt=0.1)
### Forming First Order Equations from Higher Order Equations
In the sections above I spoke of *first order* differential equations; these are equations with only first derivatives. However, physical systems often require second or higher order equations. Any higher order system of equations can be converted to a first order set of equations by defining extra variables for the first order terms and then solving. Let's do an example.
Given the system $\ddot{x} - 6\dot{x} + 9x = t$ find the first order equations.
The first step is to isolate the highest order term onto one side of the equation .
$$\ddot{x} = 6\dot{x} - 9x + t$$
We define two new variables:
$$ x_1(t) = x \\
x_2(t) = \dot{x}
$$
Now we will substitute these into the original equation and solve, giving us a set of first order equations in terms of these new variables.
First, we know that $\dot{x}_1 = x_2$ and that $\dot{x}_2 = \ddot{x}$. Therefore
$$\begin{aligned}
\dot{x}_2 &= \ddot{x} \\
&= 6\dot{x} - 9x + t\\
&= 6x_2-9x_1 + t
\end{aligned}$$
Therefore our first order system of equations is
$$\begin{aligned}\dot{x}_1 &= x_2 \\
\dot{x}_2 &= 6x_2-9x_1 + t\end{aligned}$$
If you practice this a bit you will become adept at it. Isolate the highest term, define a new variable and its derivatives, and then substitute.
## Design of the Process Noise Matrix
**Author's note: this section contains some of the more challenging math in this book. Please bear with it, as few books cover this well, and an accurate design is imperative for good filter performance. At the end I present Python functions from FilterPy which will compute the math for you for common scenarios.**
In general the design of the $\mathbf{Q}$ matrix is among the most difficult aspects of Kalman filter design. This is due to several factors. First, the math itself is somewhat difficult and requires a good foundation in signal theory. Second, we are trying to model the noise in something for which we have little information. For example, consider trying to model the process noise for a baseball. We can model it as a sphere moving through the air, but that leave many unknown factors - the wind, ball rotation and spin decay, the coefficient of friction of a scuffed ball with stitches, the effects of wind and air density, and so on. I will develop the equations for an exact mathematical solution for a given process model, but since the process model is incomplete the result for $\mathbf{Q}$ will also be incomplete. This has a lot of ramifications for the behavior of the Kalman filter. If $\mathbf{Q}$ is too small then the filter will be overconfident in its prediction model and will diverge from the actual solution. If $\mathbf{Q}$ is too large than the filter will be unduly influenced by the noise in the measurements and perform sub-optimally. In practice we spend a lot of time running simulations and evaluating collected data to try to select an appropriate value for $\mathbf{Q}$. But let's start by looking at the math.
Let's assume a kinematic system - some system that can be modeled using Newton's equations of motion. We can make a few different assumptions about this process.
We have been using a process model of
$$ f(\mathbf{x}) = \mathbf{Fx} + \mathbf{w}$$
where $\mathbf{w}$ is the process noise. Kinematic systems are *continuous* - their inputs and outputs can vary at any arbitrary point in time. However, our Kalman filters are *discrete*. We sample the system at regular intervals. Therefore we must find the discrete representation for the noise term in the equation above. However, this depends on what assumptions we make about the behavior of the noise. We will consider two different models for the noise.
### Continuous White Noise Model
We model kinematic systems using Newton's equations. So far in this book we have either used position and velocity, or position, velocity, and acceleration as the models for our systems. There is nothing stopping us from going further - we can model jerk, jounce, snap, and so on. We don't do that normally because adding terms beyond the dynamics of the real system actually degrades the solution.
Let's say that we need to model the position, velocity, and acceleration. We can then assume that acceleration is constant. Of course, there is process noise in the system and so the acceleration is not actually constant. In this section we will assume that the acceleration changes by a continuous time zero-mean white noise $w(t)$. In other words, we are assuming that velocity is acceleration changing by small amounts that over time average to 0 (zero-mean).
Since the noise is changing continuously we will need to integrate to get the discrete noise for the discretization interval that we have chosen. We will not prove it here, but the equation for the discretization of the noise is
$$\mathbf{Q} = \int_0^{\Delta t} \Phi(t)\mathbf{Q_c}\Phi^\mathsf{T}(t) dt$$
where $\mathbf{Q_c}$ is the continuous noise. This gives us
$$\Phi = \begin{bmatrix}1 & \Delta t & {\Delta t}^2/2 \\ 0 & 1 & \Delta t\\ 0& 0& 1\end{bmatrix}$$
for the fundamental matrix, and
$$\mathbf{Q_c} = \begin{bmatrix}0&0&0\\0&0&0\\0&0&1\end{bmatrix} \Phi_s$$
for the continuous process noise matrix, where $\Phi_s$ is the spectral density of the white noise.
We could carry out these computations ourselves, but I prefer using SymPy to solve the equation.
$$\mathbf{Q_c} = \begin{bmatrix}0&0&0\\0&0&0\\0&0&1\end{bmatrix} \Phi_s$$
```python
import sympy
from sympy import (init_printing, Matrix,MatMul,
integrate, symbols)
init_printing(use_latex='mathjax')
dt, phi = symbols('\Delta{t} \Phi_s')
F_k = Matrix([[1, dt, dt**2/2],
[0, 1, dt],
[0, 0, 1]])
Q_c = Matrix([[0, 0, 0],
[0, 0, 0],
[0, 0, 1]])*phi
Q=sympy.integrate(F_k * Q_c * F_k.T, (dt, 0, dt))
# factor phi out of the matrix to make it more readable
Q = Q / phi
sympy.MatMul(Q, phi)
```
$$\left[\begin{matrix}\frac{\Delta{t}^{5}}{20} & \frac{\Delta{t}^{4}}{8} & \frac{\Delta{t}^{3}}{6}\\\frac{\Delta{t}^{4}}{8} & \frac{\Delta{t}^{3}}{3} & \frac{\Delta{t}^{2}}{2}\\\frac{\Delta{t}^{3}}{6} & \frac{\Delta{t}^{2}}{2} & \Delta{t}\end{matrix}\right] \Phi_{s}$$
For completeness, let us compute the equations for the 0th order and 1st order equations.
```python
F_k = sympy.Matrix([[1]])
Q_c = sympy.Matrix([[phi]])
print('0th order discrete process noise')
sympy.integrate(F_k*Q_c*F_k.T,(dt, 0, dt))
```
0th order discrete process noise
$$\left[\begin{matrix}\Delta{t} \Phi_{s}\end{matrix}\right]$$
```python
F_k = sympy.Matrix([[1, dt],
[0, 1]])
Q_c = sympy.Matrix([[0, 0],
[0, 1]])*phi
Q = sympy.integrate(F_k * Q_c * F_k.T, (dt, 0, dt))
print('1st order discrete process noise')
# factor phi out of the matrix to make it more readable
Q = Q / phi
sympy.MatMul(Q, phi)
```
1st order discrete process noise
$$\left[\begin{matrix}\frac{\Delta{t}^{3}}{3} & \frac{\Delta{t}^{2}}{2}\\\frac{\Delta{t}^{2}}{2} & \Delta{t}\end{matrix}\right] \Phi_{s}$$
### Piecewise White Noise Model
Another model for the noise assumes that the that highest order term (say, acceleration) is constant for each time period, but differs for each time period, and each of these is uncorrelated between time periods. This is subtly different than the model above, where we assumed that the last term had a continuously varying noisy signal applied to it.
We will model this as
$$f(x)=Fx+\Gamma w$$
where $\Gamma$ is the *noise gain* of the system, and $w$ is the constant piecewise acceleration (or velocity, or jerk, etc).
Lets start by looking at a first order system. In this case we have the state transition function
$$\mathbf{F} = \begin{bmatrix}1&\Delta t \\ 0& 1\end{bmatrix}$$
In one time period, the change in velocity will be $w(t)\Delta t$, and the change in position will be $w(t)\Delta t^2/2$, giving us
$$\Gamma = \begin{bmatrix}\frac{1}{2}\Delta t^2 \\ \Delta t\end{bmatrix}$$
The covariance of the process noise is then
$$Q = E[\Gamma w(t) w(t) \Gamma^\mathsf{T}] = \Gamma\sigma^2_v\Gamma^\mathsf{T}$$.
We can compute that with SymPy as follows
```python
var=symbols('sigma^2_v')
v = Matrix([[dt**2 / 2], [dt]])
Q = v * var * v.T
# factor variance out of the matrix to make it more readable
Q = Q / var
sympy.MatMul(Q, var)
```
$$\left[\begin{matrix}\frac{\Delta{t}^{4}}{4} & \frac{\Delta{t}^{3}}{2}\\\frac{\Delta{t}^{3}}{2} & \Delta{t}^{2}\end{matrix}\right] \sigma^{2}_{v}$$
The second order system proceeds with the same math.
$$\mathbf{F} = \begin{bmatrix}1 & \Delta t & {\Delta t}^2/2 \\ 0 & 1 & \Delta t\\ 0& 0& 1\end{bmatrix}$$
Here we will assume that the white noise is a discrete time Wiener process. This gives us
$$\Gamma = \begin{bmatrix}\frac{1}{2}\Delta t^2 \\ \Delta t\\ 1\end{bmatrix}$$
There is no 'truth' to this model, it is just convenient and provides good results. For example, we could assume that the noise is applied to the jerk at the cost of a more complicated equation.
The covariance of the process noise is then
$$Q = E[\Gamma w(t) w(t) \Gamma^\mathsf{T}] = \Gamma\sigma^2_v\Gamma^\mathsf{T}$$.
We can compute that with SymPy as follows
```python
var=symbols('sigma^2_v')
v = Matrix([[dt**2 / 2], [dt], [1]])
Q = v * var * v.T
# factor variance out of the matrix to make it more readable
Q = Q / var
sympy.MatMul(Q, var)
```
$$\left[\begin{matrix}\frac{\Delta{t}^{4}}{4} & \frac{\Delta{t}^{3}}{2} & \frac{\Delta{t}^{2}}{2}\\\frac{\Delta{t}^{3}}{2} & \Delta{t}^{2} & \Delta{t}\\\frac{\Delta{t}^{2}}{2} & \Delta{t} & 1\end{matrix}\right] \sigma^{2}_{v}$$
We cannot say that this model is more or less correct than the continuous model - both are approximations to what is happening to the actual object. Only experience and experiments can guide you to the appropriate model. In practice you will usually find that either model provides reasonable results, but typically one will perform better than the other.
The advantage of the second model is that we can model the noise in terms of $\sigma^2$ which we can describe in terms of the motion and the amount of error we expect. The first model requires us to specify the spectral density, which is not very intuitive, but it handles varying time samples much more easily since the noise is integrated across the time period. However, these are not fixed rules - use whichever model (or a model of your own devising) based on testing how the filter performs and/or your knowledge of the behavior of the physical model.
A good rule of thumb is to set $\sigma$ somewhere from $\frac{1}{2}\Delta a$ to $\Delta a$, where $\Delta a$ is the maximum amount that the acceleration will change between sample periods. In practice we pick a number, run simulations on data, and choose a value that works well.
### Using FilterPy to Compute Q
FilterPy offers several routines to compute the $\mathbf{Q}$ matrix. The function `Q_continuous_white_noise()` computes $\mathbf{Q}$ for a given value for $\Delta t$ and the spectral density.
```python
from filterpy.common import Q_continuous_white_noise
from filterpy.common import Q_discrete_white_noise
Q = Q_continuous_white_noise(dim=2, dt=1, spectral_density=1)
print(Q)
```
[[ 0.33333333 0.5 ]
[ 0.5 1. ]]
```python
Q = Q_continuous_white_noise(dim=3, dt=1, spectral_density=1)
print(Q)
```
[[ 0.05 0.125 0.16666667]
[ 0.125 0.33333333 0.5 ]
[ 0.16666667 0.5 1. ]]
The function `Q_discrete_white_noise()` computes $\mathbf{Q}$ assuming a piecewise model for the noise.
```python
Q = Q_discrete_white_noise(2, var=1.)
print(Q)
```
[[ 0.25 0.5 ]
[ 0.5 1. ]]
```python
Q = Q_discrete_white_noise(3, var=1.)
print(Q)
```
[[ 0.25 0.5 0.5 ]
[ 0.5 1. 1. ]
[ 0.5 1. 1. ]]
### Simplification of Q
Through the early parts of this book I used a much simpler form for $\mathbf{Q}$, often only putting a noise term in the lower rightmost element. Is this justified? Well, consider the value of $\mathbf{Q}$ for a small $\Delta t$
```python
Q = Q_continuous_white_noise(
dim=3, dt=0.05, spectral_density=1)
print(Q)
```
[[ 0.00000002 0.00000078 0.00002083]
[ 0.00000078 0.00004167 0.00125 ]
[ 0.00002083 0.00125 0.05 ]]
We can see that most of the terms are very small. Recall that the only Kalman filter using this matrix is
$$ \mathbf{P}=\mathbf{FPF}^\mathsf{T} + \mathbf{Q}$$
If the values for $\mathbf{Q}$ are small relative to $\mathbf{P}$
than it will be contributing almost nothing to the computation of $\mathbf{P}$. Setting $\mathbf{Q}$ to
$$\mathbf{Q}=\begin{bmatrix}0&0&0\\0&0&0\\0&0&\sigma^2\end{bmatrix}$$
while not correct, is often a useful approximation. If you do this you will have to perform quite a few studies to guarantee that your filter works in a variety of situations. Given the availability of functions to compute the correct values of $\mathbf{Q}$ for you I would strongly recommend not using approximations. Perhaps it is justified for quick-and-dirty filters, or on embedded devices where you need to wring out every last bit of performance, and seek to minimize the number of matrix operations required.
## Numeric Integration of Differential Equations
> ** author's note: This topic requires multiple books to fully cover it. If you need to know this in depth,
*Computational Physics in Python * by Dr. Eric Ayars is excellent, and available for free here.
> http://phys.csuchico.edu/ayars/312/Handouts/comp-phys-python.pdf **
So far in this book we have been working with systems that can be expressed with simple linear differential equations such as
$$v = \dot{x} = \frac{dx}{dt}$$
which we can integrate into a closed form solution, in this case $x(t) =vt + x_0$. This equation is then put into the system matrix $\mathbf{F}$, which allows the Kalman filter equations to predict the system state in the future. For example, our constant velocity filters use
$$\mathbf{F} = \begin{bmatrix}
1 & \Delta t \\ 0 & 1\end{bmatrix}$$.
The Kalman filter predict equation is $\mathbf{\bar{x}} = \mathbf{Fx} + \mathbf{Bu}$. Hence the prediction is
$$\mathbf{\bar{x}} = \begin{bmatrix}
1 & \Delta t \\ 0 & 1\end{bmatrix}\begin{bmatrix}
x\\ \dot{x}\end{bmatrix}
$$
which multiplies out to
$$\begin{aligned}\bar{x} &= x + v\Delta t \\
\bar{\dot{x}} &= \dot{x}\end{aligned}$$.
This works for linear ordinary differential equations (ODEs), but does not work well for nonlinear equations. For example, consider trying to predict the position of a rapidly turning car. Cars turn by pivoting the front wheels, which cause the car to pivot around the rear axle. Therefore the path will be continuously varying and a linear prediction will necessarily produce an incorrect value. If the change in the system is small enough relative to $\Delta t$ this can often produce adequate results, but that will rarely be the case with the nonlinear Kalman filters we will be studying in subsequent chapters. Another problem is that even trivial systems produce differential equations for which finding closed form solutions is difficult or impossible.
For these reasons we need to know how to numerically integrate differential equations. This can be a vast topic, and SciPy provides integration routines such as `scipy.integrate.ode`. These routines are robust, but
## Euler's Method
Let's say we have the initial condition problem of
$$ y' = y, \\ y(0) = 1$$
I happen to know the exact answer is $y=e^t$, but of course in general we will not know the exact solution. In general all we know is the derivative of the equation, which is equal to the slope. We also know the initial value: at $t=0$, $y=1$. If we know these two pieces of information we can predict that value for y(1): it is the slope of the function at $t=0$. I've plotted this below
```python
import numpy as np
t = np.linspace(-1, 1, 10)
plt.plot(t, np.exp(t))
t = np.linspace(0, 1, 2)
plt.plot(t,t+1);
```
If the curve is relatively straight than the line formed by the slope will not be very far from the curve. Here the step size of 1 is rather large; you can see that the slope is very close to the line at, say, 0.1. But let's continue with a step size of 1 for a moment. We can see that at $t=1$ the estimated value of $y$ is 2. Now we can compute the value at $t=2$ by taking the slope of the curve at $t=1$ and adding it to our initial estimate. The slope is computed with $y'=y$, so the slope is 2.
```python
import book_plots
t = np.linspace(-1, 2, 20)
plt.plot(t, np.exp(t))
t = np.linspace(0, 1, 2)
plt.plot([1, 2, 4])
book_plots.set_labels(x='x', y='y');
```
Here we see the next estimate for y is 4. The errors are quickly getting large, and you might be unimpressed. But 1 is a very large step size. Let's put this algorithm in code, and verify that it works by trying to generate the result above.
```python
def euler(t, tmax, y, dx, step=1.):
ys = []
while t < tmax:
y = y + step*dx(t, y)
ys.append(y)
t +=step
return ys
```
```python
def dx(t, y): return y
print(euler(0, 1, 1, dx, step=1.)[-1])
print(euler(0, 2, 1, dx, step=1.)[-1])
```
2.0
4.0
This looks correct. So now lets plot the result of a much smaller step size.
```python
ys = euler(0, 4, 1, dx, step=0.00001)
plt.subplot(1,2,1)
plt.title('Computed')
plt.plot(np.linspace(0, 4, len(ys)),ys)
plt.subplot(1,2,2)
t = np.linspace(0, 4, 20)
plt.title('Exact')
plt.plot(t, np.exp(t));
```
```python
print('exact answer=', np.exp(4))
print('euler answer=', ys[-1])
print('difference =', np.exp(4) - ys[-1])
print('iterations =', len(ys))
```
exact answer= 54.5981500331
euler answer= 54.59705808834125
difference = 0.00109194480299
iterations = 400000
Here we see that the error is reasonably small, but it took a very large number of iterations to get three digits of precision. In practice Euler's method is too slow for most problems, and we use higher level methods.
Before we go on, let's formally derive Euler's method, as it is the basis for the more advanced Runge Kutta methods used in the next section. In fact, Euler's method is the simplest form of Runge Kutta.
Here are the first 3 terms of the Euler expansion of $y$. An infinite expansion would give an exact answer, so $O(h^4)$ denotes the error due to the finite expansion.
$$y(t_0 + h) = y(t_0) + h y'(t_0) + \frac{1}{2!}h^2 y''(t_0) + \frac{1}{3!}h^3 y'''(t_0) + O(h^4).$$
Here we can see that Euler's method is using the first two terms of the Taylor expansion. Each subsequent term is smaller than the previous terms, so we are assured that the estimate will not be too far off from the correct value.
### Runge Kutta Methods
Runge Kutta integration is the workhorse of numerical integration. As mentioned earlier there are a vast number of methods in the literature. In practice, using the Runge Kutta algorithm that I present here will solve most any problem you will face. It offers a very good balance of speed, precision, and stability, and it is the 'go to' numerical integration method unless you have a very good reason to choose something different. If you have the knowledge to make that decision you have no need to be reading this section!
Let's dive in. We start with some differential equation
$$\ddot{y} = \frac{d}{dt}\dot{y}$$.
We can substitute the derivative of y with a function f, like so
$$\ddot{y} = \frac{d}{dt}f(y,t)$$.
Deriving these equations is outside the scope of this book, but the Runge Kutta RK4 method is defined with these equations.
$$y(t+\Delta t) = y(t) + \frac{1}{6}(k_1 + 2k_2 + 2k_3 + k_4) + O(\Delta t^4)$$
$$\begin{aligned}
k_1 &= f(y,t)\Delta t \\
k_2 &= f(y+\frac{1}{2}k_1, t+\frac{1}{2}\Delta t)\Delta t \\
k_3 &= f(y+\frac{1}{2}k_2, t+\frac{1}{2}\Delta t)\Delta t \\
k_4 &= f(y+k_3, t+\Delta t)\Delta t
\end{aligned}
$$
```python
def runge_kutta4(y, x, dx, f):
"""computes 4th order Runge-Kutta for dy/dx.
y is the initial value for y
x is the initial value for x
dx is the difference in x (e.g. the time step)
f is a callable function (y, x) that you supply
to compute dy/dx for the specified values.
"""
k1 = dx * f(y, x)
k2 = dx * f(y + 0.5*k1, x + 0.5*dx)
k3 = dx * f(y + 0.5*k2, x + 0.5*dx)
k4 = dx * f(y + k3, x + dx)
return y + (k1 + 2*k2 + 2*k3 + k4) / 6.
```
Let's use this for a simple example. Let
$$\dot{y} = t\sqrt{y(t)}$$
with the initial values
$$\begin{aligned}t_0 &= 0\\y_0 &= y(t_0) = 1\end{aligned}$$
```python
import math
import numpy as np
t = 0.
y = 1.
dt = .1
ys, ts = [], []
def func(y,t):
return t*math.sqrt(y)
while t <= 10:
y = runge_kutta4(y, t, dt, func)
t += dt
ys.append(y)
ts.append(t)
exact = [(t**2 + 4)**2 / 16. for t in ts]
plt.plot(ts, ys)
plt.plot(ts, exact)
error = np.array(exact) - np.array(ys)
print("max error {}".format(max(error)))
```
## Iterative Least Squares for Sensor Fusion (Optional)
A broad category of use for the Kalman filter is *sensor fusion*. For example, we might have a position sensor and a velocity sensor, and we want to combine the data from both to find an optimal estimate of state. In this section we will discuss a different case, where we have multiple sensors providing the same type of measurement.
The Global Positioning System (GPS) is designed so that at least 6 satellites are in view at any time at any point on the globe. The GPS receiver knows the location of the satellites in the sky relative to the Earth. At each epoch (instant in time) the receiver gets a signal from each satellite from which it can derive the *pseudorange* to the satellite. In more detail, the GPS receiver gets a signal identifying the satellite along with the time stamp of when the signal was transmitted. The GPS satellite has an atomic clock on board so this time stamp is extremely accurate. The signal travels at the speed of light, which is constant in a vacuum, so in theory the GPS should be able to produce an extremely accurate distance measurement to the measurement by measuring how long the signal took to reach the receiver. There are several problems with that. First, the signal is not traveling through a vacuum, but through the atmosphere. The atmosphere causes the signal to bend, so it is not traveling in a straight line. This causes the signal to take longer to reach the receiver than theory suggests. Second, the on board clock on the GPS *receiver* is not very accurate, so deriving an exact time duration is nontrivial. Third, in many environments the signal can bounce off of buildings, trees, and other objects, causing either a longer path or *multipaths*, in which case the receive receives both the original signal from space and the reflected signals.
Let's look at this graphically. I will do this in 2D to make it easier to graph and see, but of course this will generalize to three dimensions. We know the position of each satellite and the range to each (the range is called the *pseudorange*; we will discuss why later). We cannot measure the range exactly, so there is noise associated with the measurement, which I have depicted with the thickness of the lines. Here is an example of four pseudorange readings from four satellites. I positioned them in a configuration which is unlikely for the actual GPS constellation merely to make the intersections easy to visualize. Also, the amount of error shown is not to scale with the distances, again to make it easier to see.
```python
from book_format import set_figsize, figsize
import ukf_internal
with figsize(10, 6):
ukf_internal.show_four_gps()
```
In 2D two measurements are typically enough to determine a unique solution. There are two intersections of the range circles, but usually the second intersection is not physically realizable (it is in space, or under ground). However, with GPS we also need to solve for time, so we would need a third measurement to get a 2D position.
However, since GPS is a 3D system we need to solve for the 3 dimensions of space, and 1 dimension of time. That is 4 unknowns, so in theory with 4 satellites we have all the information we need. However, we normally have at least 6 satellites in view, and often more than 6. This means the system is *overdetermined*. Finally, because of the noise in the measurements none of pseudoranges intersect exactly.
If you are well versed in linear algebra you know that this an extremely common problem in scientific computing, and that there are various techniques for solving overdetermined systems. Probably the most common approach used by GPS receivers to find the position is the *iterative least squares* algorithm, commonly abbreviated ILS. As you know, if the errors are Gaussian then the least squares algorithm finds the optimal solution. In other words, we want to minimize the square of the residuals for an overdetermined system.
Let's start with some definitions which should be familiar to you. First, we define the innovation as
$$\delta \mathbf{\bar{z}}= \mathbf{z} - h(\mathbf{\bar{x}})$$
where $\mathbf{z}$ is the measurement, $h(\bullet)$ is the measurement function, and $\delta \mathbf{\bar{z}}$ is the innovation, which we abbreviate as $y$ in FilterPy. In other words, this is the equation $\mathbf{y} = \mathbf{z} - \mathbf{H\bar{x}}$ in the linear Kalman filter's update step.
Next, the *measurement residual* is
$$\delta \mathbf{z}^+ = \mathbf{z} - h(\mathbf{x}^+)$$
I don't use the plus superscript much because I find it quickly makes the equations unreadable, but $\mathbf{x}^+$ is the *a posteriori* state estimate, which is the predicted or unknown future state. In other words, the predict step of the linear Kalman filter computes this value. Here it is stands for the value of x which the ILS algorithm will compute on each iteration.
These equations give us the following linear algebra equation:
$$\delta \mathbf{z}^- = \mathbf{H}\delta \mathbf{x} + \delta \mathbf{z}^+$$
$\mathbf{H}$ is our measurement function, defined as
$$\mathbf{H} = \frac{d\mathbf{h}}{d\mathbf{x}} = \frac{d\mathbf{z}}{d\mathbf{x}}$$
We find the minimum of an equation by taking the derivative and setting it to zero. In this case we want to minimize the square of the residuals, so our equation is
$$ \frac{\partial}{\partial \mathbf{x}}({\delta \mathbf{z}^+}^\mathsf{T}\delta \mathbf{z}^+) = 0,$$
where
$$\delta \mathbf{z}^+=\delta \mathbf{z}^- - \mathbf{H}\delta \mathbf{x}.$$
Here I have switched to using the matrix $\mathbf{H}$ as the measurement function. We want to use linear algebra to peform the ILS, so for each step we will have to compute the matrix $\mathbf{H}$ which corresponds to $h(\mathbf{x^-})$ during each iteration. $h(\bullet)$ is usually nonlinear for these types of problems so you will have to linearize it at each step (more about this soon).
For various reasons you may want to weigh some measurement more than others. For example, the geometry of the problem might favor orthogonal measurements, or some measurements may be more noisy than others. We can do that with the equation
$$ \frac{\partial}{\partial \mathbf{x}}({\delta \mathbf{z}^+}^\mathsf{T}\mathbf{W}\delta \mathbf{z}^+) = 0$$
If we solve the first equation for ${\delta \mathbf{x}}$ (the derivation is shown in the next section) we get
$${\delta \mathbf{x}} = {{(\mathbf{H}^\mathsf{T}\mathbf{H})^{-1}}\mathbf{H}^\mathsf{T} \delta \mathbf{z}^-}
$$
And the second equation yields
$${\delta \mathbf{x}} = {{(\mathbf{H}^\mathsf{T}\mathbf{WH})^{-1}}\mathbf{H}^\mathsf{T}\mathbf{W} \delta \mathbf{z}^-}
$$
Since the equations are overdetermined we cannot solve these equations exactly so we use an iterative approach. An initial guess for the position is made, and this guess is used to compute for $\delta \mathbf{x}$ via the equation above. $\delta \mathbf{x}$ is added to the intial guess, and this new state is fed back into the equation to produce another $\delta \mathbf{x}$. We iterate in this manner until the difference in the measurement residuals is suitably small.
### Derivation of ILS Equations (Optional)
I will implement the ILS in code, but first let's derive the equation for $\delta \mathbf{x}$. You can skip the derivation if you want, but it is somewhat instructive and not too hard if you know basic linear algebra and partial differential equations.
Substituting $\delta \mathbf{z}^+=\delta \mathbf{z}^- - \mathbf{H}\delta \mathbf{x}$ into the partial differential equation we get
$$ \frac{\partial}{\partial \mathbf{x}}(\delta \mathbf{z}^- -\mathbf{H} \delta \mathbf{x})^\mathsf{T}(\delta \mathbf{z}^- - \mathbf{H} \delta \mathbf{x})=0$$
which expands to
$$ \frac{\partial}{\partial \mathbf{x}}({\delta \mathbf{x}}^\mathsf{T}\mathbf{H}^\mathsf{T}\mathbf{H}\delta \mathbf{x} -
{\delta \mathbf{x}}^\mathsf{T}\mathbf{H}^\mathsf{T}\delta \mathbf{z}^- -
{\delta \mathbf{z}^-}^\mathsf{T}\mathbf{H}\delta \mathbf{x} +
{\delta \mathbf{z}^-}^\mathsf{T}\delta \mathbf{z}^-)=0$$
We know that
$$\frac{\partial \mathbf{A}^\mathsf{T}\mathbf{B}}{\partial \mathbf{B}} = \frac{\partial \mathbf{B}^\mathsf{T}\mathbf{A}}{\partial \mathbf{B}} = \mathbf{A}^\mathsf{T}$$
Therefore the third term can be computed as
$$\frac{\partial}{\partial \mathbf{x}}{\delta \mathbf{z}^-}^\mathsf{T}\mathbf{H}\delta \mathbf{x} = {\delta \mathbf{z}^-}^\mathsf{T}\mathbf{H}$$
and the second term as
$$\frac{\partial}{\partial \mathbf{x}}{\delta \mathbf{x}}^\mathsf{T}\mathbf{H}^\mathsf{T}\delta \mathbf{z}^-={\delta \mathbf{z}^-}^\mathsf{T}\mathbf{H}$$
We also know that
$$\frac{\partial \mathbf{B}^\mathsf{T}\mathbf{AB}}{\partial \mathbf{B}} = \mathbf{B}^\mathsf{T}(\mathbf{A} + \mathbf{A}^\mathsf{T})$$
Therefore the first term becomes
$$
\begin{aligned}
\frac{\partial}{\partial \mathbf{x}}{\delta \mathbf{x}}^\mathsf{T}\mathbf{H}^\mathsf{T}\mathbf{H}\delta \mathbf{x} &= {\delta \mathbf{x}}^\mathsf{T}(\mathbf{H}^\mathsf{T}\mathbf{H} + {\mathbf{H}^\mathsf{T}\mathbf{H}}^\mathsf{T})\\
&= {\delta \mathbf{x}}^\mathsf{T}(\mathbf{H}^\mathsf{T}\mathbf{H} + \mathbf{H}^\mathsf{T}\mathbf{H}) \\
&= 2{\delta \mathbf{x}}^\mathsf{T}\mathbf{H}^\mathsf{T}\mathbf{H}
\end{aligned}$$
Finally, the fourth term is
$$ \frac{\partial}{\partial \mathbf{x}}
{\delta \mathbf{z}^-}^\mathsf{T}\delta \mathbf{z}^-=0$$
Replacing the terms in the expanded partial differential equation gives us
$$
2{\delta \mathbf{x}}^\mathsf{T}\mathbf{H}^\mathsf{T}\mathbf{H} -
{\delta \mathbf{z}^-}^\mathsf{T}\mathbf{H} - {\delta \mathbf{z}^-}^\mathsf{T}\mathbf{H}
=0
$$
$${\delta \mathbf{x}}^\mathsf{T}\mathbf{H}^\mathsf{T}\mathbf{H} -
{\delta \mathbf{z}^-}^\mathsf{T}\mathbf{H} = 0$$
$${\delta \mathbf{x}}^\mathsf{T}\mathbf{H}^\mathsf{T}\mathbf{H} =
{\delta \mathbf{z}^-}^\mathsf{T}\mathbf{H}$$
Multiplying each side by $(\mathbf{H}^\mathsf{T}\mathbf{H})^{-1}$ yields
$${\delta \mathbf{x}}^\mathsf{T} =
{\delta \mathbf{z}^-}^\mathsf{T}\mathbf{H}(\mathbf{H}^\mathsf{T}\mathbf{H})^{-1}$$
Taking the transpose of each side gives
$${\delta \mathbf{x}} = ({{\delta \mathbf{z}^-}^\mathsf{T}\mathbf{H}(\mathbf{H}^\mathsf{T}\mathbf{H})^{-1}})^\mathsf{T} \\
={{(\mathbf{H}^\mathsf{T}\mathbf{H})^{-1}}^T\mathbf{H}^\mathsf{T} \delta \mathbf{z}^-} \\
={{(\mathbf{H}^\mathsf{T}\mathbf{H})^{-1}}\mathbf{H}^\mathsf{T} \delta \mathbf{z}^-}
$$
For various reasons you may want to weigh some measurement more than others. We can do that with the equation
$$ \frac{\partial}{\partial \mathbf{x}}({\delta \mathbf{z}}^\mathsf{T}\mathbf{W}\delta \mathbf{z}) = 0$$
Replicating the math above with the added $\mathbf{W}$ term results in
$${\delta \mathbf{x}} = {{(\mathbf{H}^\mathsf{T}\mathbf{WH})^{-1}}\mathbf{H}^\mathsf{T}\mathbf{W} \delta \mathbf{z}^-}
$$
### Implementing Iterative Least Squares
Our goal is to implement an iterative solution to
$${\delta \mathbf{x}} = {{(\mathbf{H}^\mathsf{T}\mathbf{H})^{-1}}\mathbf{H}^\mathsf{T} \delta \mathbf{z}^-}
$$
First, we have to compute $\mathbf{H}$, where $\mathbf{H} = d\mathbf{z}/d\mathbf{x}$. To keep the example small so the results are easier to interpret we will do this in 2D. Therefore for $n$ satellites $\mathbf{H}$ expands to
$$\mathbf{H} = \begin{bmatrix}
\frac{\partial p_1}{\partial x_1} & \frac{\partial p_1}{\partial y_1} \\
\frac{\partial p_2}{\partial x_2} & \frac{\partial p_2}{\partial y_2} \\
\vdots & \vdots \\
\frac{\partial p_n}{\partial x_n} & \frac{\partial p_n}{\partial y_n}
\end{bmatrix}$$
We will linearize $\mathbf{H}$ by computing the partial for $x$ as
$$ \frac{estimated\_x\_position - satellite\_x\_position}{estimated\_range\_to\_satellite}$$
The equation for $y$ just substitutes $y$ for $x$.
Then the algorithm is as follows.
def ILS:
guess position
while not converged:
compute range to satellites for current estimated position
compute H linearized at estimated position
compute new estimate delta from (H^T H)'H^T dz
new estimate = current estimate + estimate delta
check for convergence
```python
import numpy as np
from numpy.linalg import norm, inv
from numpy.random import randn
from numpy import dot
import book_format
np.random.seed(1234)
user_pos = np.array([800, 200])
sat_pos = np.asarray(
[[0, 1000],
[0, -1000],
[500, 500]], dtype=float)
def satellite_range(pos, sat_pos):
""" Compute distance between position 'pos' and
the list of positions in sat_pos"""
N = len(sat_pos)
rng = np.zeros(N)
diff = np.asarray(pos) - sat_pos
for i in range(N):
rng[i] = norm(diff[i])
return norm(diff, axis=1)
def hx_ils(pos, sat_pos, range_est):
""" compute measurement function where
pos : array_like
2D current estimated position. e.g. (23, 45)
sat_pos : array_like of 2D positions
position of each satellite e.g. [(0,100), (100,0)]
range_est : array_like of floats
range to each satellite
"""
N = len(sat_pos)
H = np.zeros((N, 2))
for j in range(N):
H[j, 0] = (pos[0] - sat_pos[j, 0]) / range_est[j]
H[j, 1] = (pos[1] - sat_pos[j, 1]) / range_est[j]
return H
def lop_ils(zs, sat_pos, pos_est, hx, eps=1.e-6):
""" iteratively estimates the solution to a set of
measurement, given known transmitter locations"""
pos = np.array(pos_est)
with book_format.numpy_precision(precision=4):
converged = False
for i in range(20):
r_est = satellite_range(pos, sat_pos)
print('iteration:', i)
H = hx(pos, sat_pos, r_est)
Hinv = inv(dot(H.T, H)).dot(H.T)
# update position estimate
y = zs - r_est
print('innovation', y)
Hy = np.dot(Hinv, y)
pos = pos + Hy
print('pos {}\n\n'.format(pos))
if max(abs(Hy)) < eps:
converged = True
break
return pos, converged
# compute range to each sensor
rz = satellite_range(user_pos, sat_pos)
pos, converted = lop_ils(rz, sat_pos, (900, 90), hx=hx_ils)
print('Iterated solution: ', pos)
```
iteration: 0
innovation [-148.512 28.6789 -148.5361]
pos [ 805.4175 205.2868]
iteration: 1
innovation [-0.1177 -7.4049 -0.1599]
pos [ 800.04 199.9746]
iteration: 2
innovation [-0.0463 -0.001 -0.0463]
pos [ 800. 200.]
iteration: 3
innovation [-0. -0. -0.]
pos [ 800. 200.]
Iterated solution: [ 800. 200.]
So let's think about this. The first iteration is essentially performing the computation that the linear Kalman filter computes during the update step:
$$\mathbf{y} = \mathbf{z} - \mathbf{Hx}\\
\mathbf{x} = \mathbf{x} + \mathbf{Ky}$$
where the Kalman gain equals one. You can see that despite the very inaccurate initial guess (900, 90) the computed value for $\mathbf{x}$, (805.4, 205.3), was very close to the actual value of (800, 200). However, it was not perfect. But after three iterations the ILS algorithm was able to find the exact answer. So hopefully it is clear why we use ILS instead of doing the sensor fusion with the Kalman filter - it gives a better result. Of course, we started with a very inaccurate guess; what if the guess was better?
```python
pos, converted = lop_ils(rz, sat_pos, (801, 201), hx=hx_ils)
print('Iterated solution: ', pos)
```
iteration: 0
innovation [-0.0009 -1.3868 -0.0024]
pos [ 800.0014 199.9991]
iteration: 1
innovation [-0.0016 -0. -0.0016]
pos [ 800. 200.]
iteration: 2
innovation [-0. -0. -0.]
pos [ 800. 200.]
Iterated solution: [ 800. 200.]
The first iteration produced a better estimate, but it still could be improved upon by iterating.
I injected no noise in the measurement to test and display the theoretical performance of the filter. Now let's see how it performs when we inject noise.
```python
# add some noise
nrz = []
for z in rz:
nrz.append(z + randn())
pos, converted = lop_ils(nrz, sat_pos, (601,198.3), hx=hx_ils)
print('Iterated solution: ', pos)
```
iteration: 0
innovation [ 129.8823 100.461 107.5398]
pos [ 831.4474 186.1222]
iteration: 1
innovation [-31.6446 -7.4837 -30.7861]
pos [ 800.3284 198.8076]
iteration: 2
innovation [-0.6041 -0.3813 0.3569]
pos [ 799.948 198.6026]
iteration: 3
innovation [-0.4803 0.0004 0.4802]
pos [ 799.9476 198.6025]
iteration: 4
innovation [-0.4802 0.0007 0.4803]
pos [ 799.9476 198.6025]
Iterated solution: [ 799.9475854 198.60245871]
Here we can see that the noise means that we no longer find the exact solution but we are still able to quickly converge onto a more accurate solution than the first iteration provides.
This is far from a complete coverage of the iterated least squares algorithm, let alone methods used in GNSS to compute positions from GPS pseudoranges. You will find a number of approaches in the literature, including QR decomposition, SVD, and other techniques to solve the overdetermined system. For a nontrivial task you will have to survey the literature and perhaps design your algorithm depending on your specific sensor configuration, the amounts of noise, your accuracy requirements, and the amount of computation you can afford to do.
## References
* [1] *Matrix Exponential* http://en.wikipedia.org/wiki/Matrix_exponential
* [2] *LTI System Theory* http://en.wikipedia.org/wiki/LTI_system_theory
* [3] C.F. van Loan, "Computing Integrals Involving the Matrix Exponential," IEEE Transactions Automatic Control, June 1978.
**ORPHAN TEXT**
I admit this may be a 'magical' equation to you. If you have some experience with linear algebra and statistics, this may help. The covariance due to the prediction can be modeled as the expected value of the error in the prediction step, given by this equation.
$$\begin{aligned}
\mathbf{P}^- &= E[(\mathbf{Fx})(\mathbf{Fx})^\mathsf{T}]\\
&= E[\mathbf{Fxx}^\mathsf{T}\mathbf{F}^\mathsf{T}] \\
&= \mathbf{F}\, E[\mathbf{xx}^\mathsf{T}]\, \mathbf{F}^\mathsf{T}
\end{aligned}$$
Of course, $E[\mathbf{xx}^\mathsf{T}]$ is just $\mathbf{P}$, giving us
$$\mathbf{P}^- = \mathbf{FPF}^\mathsf{T}$$
|
[GOAL]
ι : Type u_1
inst✝¹ : Fintype ι
inst✝ : DecidableEq ι
w' : ι → ℂ
⊢ IsometryEquiv (weightedSumSquares ℂ w') (weightedSumSquares ℂ fun i => if w' i = 0 then 0 else 1)
[PROOFSTEP]
let w i := if h : w' i = 0 then (1 : Units ℂ) else Units.mk0 (w' i) h
[GOAL]
ι : Type u_1
inst✝¹ : Fintype ι
inst✝ : DecidableEq ι
w' : ι → ℂ
w : ι → ℂˣ := fun i => if h : w' i = 0 then 1 else Units.mk0 (w' i) h
⊢ IsometryEquiv (weightedSumSquares ℂ w') (weightedSumSquares ℂ fun i => if w' i = 0 then 0 else 1)
[PROOFSTEP]
have hw' : ∀ i : ι, (w i : ℂ) ^ (-(1 / 2 : ℂ)) ≠ 0 := by
intro i hi
exact (w i).ne_zero ((Complex.cpow_eq_zero_iff _ _).1 hi).1
[GOAL]
ι : Type u_1
inst✝¹ : Fintype ι
inst✝ : DecidableEq ι
w' : ι → ℂ
w : ι → ℂˣ := fun i => if h : w' i = 0 then 1 else Units.mk0 (w' i) h
⊢ ∀ (i : ι), ↑(w i) ^ (-(1 / 2)) ≠ 0
[PROOFSTEP]
intro i hi
[GOAL]
ι : Type u_1
inst✝¹ : Fintype ι
inst✝ : DecidableEq ι
w' : ι → ℂ
w : ι → ℂˣ := fun i => if h : w' i = 0 then 1 else Units.mk0 (w' i) h
i : ι
hi : ↑(w i) ^ (-(1 / 2)) = 0
⊢ False
[PROOFSTEP]
exact (w i).ne_zero ((Complex.cpow_eq_zero_iff _ _).1 hi).1
[GOAL]
ι : Type u_1
inst✝¹ : Fintype ι
inst✝ : DecidableEq ι
w' : ι → ℂ
w : ι → ℂˣ := fun i => if h : w' i = 0 then 1 else Units.mk0 (w' i) h
hw' : ∀ (i : ι), ↑(w i) ^ (-(1 / 2)) ≠ 0
⊢ IsometryEquiv (weightedSumSquares ℂ w') (weightedSumSquares ℂ fun i => if w' i = 0 then 0 else 1)
[PROOFSTEP]
convert
(weightedSumSquares ℂ w').isometryEquivBasisRepr
((Pi.basisFun ℂ ι).unitsSMul fun i => (isUnit_iff_ne_zero.2 <| hw' i).unit)
[GOAL]
case h.e'_10
ι : Type u_1
inst✝¹ : Fintype ι
inst✝ : DecidableEq ι
w' : ι → ℂ
w : ι → ℂˣ := fun i => if h : w' i = 0 then 1 else Units.mk0 (w' i) h
hw' : ∀ (i : ι), ↑(w i) ^ (-(1 / 2)) ≠ 0
⊢ (weightedSumSquares ℂ fun i => if w' i = 0 then 0 else 1) =
basisRepr (weightedSumSquares ℂ w')
(Basis.unitsSMul (Pi.basisFun ℂ ι) fun i => IsUnit.unit (_ : IsUnit (↑(w i) ^ (-(1 / 2)))))
[PROOFSTEP]
ext1 v
[GOAL]
case h.e'_10.H
ι : Type u_1
inst✝¹ : Fintype ι
inst✝ : DecidableEq ι
w' : ι → ℂ
w : ι → ℂˣ := fun i => if h : w' i = 0 then 1 else Units.mk0 (w' i) h
hw' : ∀ (i : ι), ↑(w i) ^ (-(1 / 2)) ≠ 0
v : ι → ℂ
⊢ ↑(weightedSumSquares ℂ fun i => if w' i = 0 then 0 else 1) v =
↑(basisRepr (weightedSumSquares ℂ w')
(Basis.unitsSMul (Pi.basisFun ℂ ι) fun i => IsUnit.unit (_ : IsUnit (↑(w i) ^ (-(1 / 2))))))
v
[PROOFSTEP]
erw [basisRepr_apply, weightedSumSquares_apply, weightedSumSquares_apply]
[GOAL]
case h.e'_10.H
ι : Type u_1
inst✝¹ : Fintype ι
inst✝ : DecidableEq ι
w' : ι → ℂ
w : ι → ℂˣ := fun i => if h : w' i = 0 then 1 else Units.mk0 (w' i) h
hw' : ∀ (i : ι), ↑(w i) ^ (-(1 / 2)) ≠ 0
v : ι → ℂ
⊢ ∑ i : ι, (if w' i = 0 then 0 else 1) • (v i * v i) =
∑ i : ι,
w' i •
(Finset.sum univ
(fun i =>
v i • ↑(Basis.unitsSMul (Pi.basisFun ℂ ι) fun i => IsUnit.unit (_ : IsUnit (↑(w i) ^ (-(1 / 2))))) i)
i *
Finset.sum univ
(fun i =>
v i • ↑(Basis.unitsSMul (Pi.basisFun ℂ ι) fun i => IsUnit.unit (_ : IsUnit (↑(w i) ^ (-(1 / 2))))) i)
i)
[PROOFSTEP]
refine' sum_congr rfl fun j hj => _
[GOAL]
case h.e'_10.H
ι : Type u_1
inst✝¹ : Fintype ι
inst✝ : DecidableEq ι
w' : ι → ℂ
w : ι → ℂˣ := fun i => if h : w' i = 0 then 1 else Units.mk0 (w' i) h
hw' : ∀ (i : ι), ↑(w i) ^ (-(1 / 2)) ≠ 0
v : ι → ℂ
j : ι
hj : j ∈ univ
⊢ (if w' j = 0 then 0 else 1) • (v j * v j) =
w' j •
(Finset.sum univ
(fun i =>
v i • ↑(Basis.unitsSMul (Pi.basisFun ℂ ι) fun i => IsUnit.unit (_ : IsUnit (↑(w i) ^ (-(1 / 2))))) i)
j *
Finset.sum univ
(fun i =>
v i • ↑(Basis.unitsSMul (Pi.basisFun ℂ ι) fun i => IsUnit.unit (_ : IsUnit (↑(w i) ^ (-(1 / 2))))) i)
j)
[PROOFSTEP]
have hsum :
(∑ i : ι, v i • ((isUnit_iff_ne_zero.2 <| hw' i).unit : ℂ) • (Pi.basisFun ℂ ι) i) j = v j • w j ^ (-(1 / 2 : ℂ)) :=
by
rw [Finset.sum_apply, sum_eq_single j, Pi.basisFun_apply, IsUnit.unit_spec, LinearMap.stdBasis_apply, Pi.smul_apply,
Pi.smul_apply, Function.update_same, smul_eq_mul, smul_eq_mul, smul_eq_mul, mul_one]
intro i _ hij
rw [Pi.basisFun_apply, LinearMap.stdBasis_apply, Pi.smul_apply, Pi.smul_apply, Function.update_noteq hij.symm,
Pi.zero_apply, smul_eq_mul, smul_eq_mul, mul_zero, mul_zero]
intro hj'; exact False.elim (hj' hj)
[GOAL]
ι : Type u_1
inst✝¹ : Fintype ι
inst✝ : DecidableEq ι
w' : ι → ℂ
w : ι → ℂˣ := fun i => if h : w' i = 0 then 1 else Units.mk0 (w' i) h
hw' : ∀ (i : ι), ↑(w i) ^ (-(1 / 2)) ≠ 0
v : ι → ℂ
j : ι
hj : j ∈ univ
⊢ Finset.sum univ (fun i => v i • ↑(IsUnit.unit (_ : IsUnit (↑(w i) ^ (-(1 / 2))))) • ↑(Pi.basisFun ℂ ι) i) j =
v j • ↑(w j) ^ (-(1 / 2))
[PROOFSTEP]
rw [Finset.sum_apply, sum_eq_single j, Pi.basisFun_apply, IsUnit.unit_spec, LinearMap.stdBasis_apply, Pi.smul_apply,
Pi.smul_apply, Function.update_same, smul_eq_mul, smul_eq_mul, smul_eq_mul, mul_one]
[GOAL]
case h₀
ι : Type u_1
inst✝¹ : Fintype ι
inst✝ : DecidableEq ι
w' : ι → ℂ
w : ι → ℂˣ := fun i => if h : w' i = 0 then 1 else Units.mk0 (w' i) h
hw' : ∀ (i : ι), ↑(w i) ^ (-(1 / 2)) ≠ 0
v : ι → ℂ
j : ι
hj : j ∈ univ
⊢ ∀ (b : ι), b ∈ univ → b ≠ j → (v b • ↑(IsUnit.unit (_ : IsUnit (↑(w b) ^ (-(1 / 2))))) • ↑(Pi.basisFun ℂ ι) b) j = 0
case h₁
ι : Type u_1
inst✝¹ : Fintype ι
inst✝ : DecidableEq ι
w' : ι → ℂ
w : ι → ℂˣ := fun i => if h : w' i = 0 then 1 else Units.mk0 (w' i) h
hw' : ∀ (i : ι), ↑(w i) ^ (-(1 / 2)) ≠ 0
v : ι → ℂ
j : ι
hj : j ∈ univ
⊢ ¬j ∈ univ → (v j • ↑(IsUnit.unit (_ : IsUnit (↑(w j) ^ (-(1 / 2))))) • ↑(Pi.basisFun ℂ ι) j) j = 0
[PROOFSTEP]
intro i _ hij
[GOAL]
case h₀
ι : Type u_1
inst✝¹ : Fintype ι
inst✝ : DecidableEq ι
w' : ι → ℂ
w : ι → ℂˣ := fun i => if h : w' i = 0 then 1 else Units.mk0 (w' i) h
hw' : ∀ (i : ι), ↑(w i) ^ (-(1 / 2)) ≠ 0
v : ι → ℂ
j : ι
hj : j ∈ univ
i : ι
a✝ : i ∈ univ
hij : i ≠ j
⊢ (v i • ↑(IsUnit.unit (_ : IsUnit (↑(w i) ^ (-(1 / 2))))) • ↑(Pi.basisFun ℂ ι) i) j = 0
case h₁
ι : Type u_1
inst✝¹ : Fintype ι
inst✝ : DecidableEq ι
w' : ι → ℂ
w : ι → ℂˣ := fun i => if h : w' i = 0 then 1 else Units.mk0 (w' i) h
hw' : ∀ (i : ι), ↑(w i) ^ (-(1 / 2)) ≠ 0
v : ι → ℂ
j : ι
hj : j ∈ univ
⊢ ¬j ∈ univ → (v j • ↑(IsUnit.unit (_ : IsUnit (↑(w j) ^ (-(1 / 2))))) • ↑(Pi.basisFun ℂ ι) j) j = 0
[PROOFSTEP]
rw [Pi.basisFun_apply, LinearMap.stdBasis_apply, Pi.smul_apply, Pi.smul_apply, Function.update_noteq hij.symm,
Pi.zero_apply, smul_eq_mul, smul_eq_mul, mul_zero, mul_zero]
[GOAL]
case h₁
ι : Type u_1
inst✝¹ : Fintype ι
inst✝ : DecidableEq ι
w' : ι → ℂ
w : ι → ℂˣ := fun i => if h : w' i = 0 then 1 else Units.mk0 (w' i) h
hw' : ∀ (i : ι), ↑(w i) ^ (-(1 / 2)) ≠ 0
v : ι → ℂ
j : ι
hj : j ∈ univ
⊢ ¬j ∈ univ → (v j • ↑(IsUnit.unit (_ : IsUnit (↑(w j) ^ (-(1 / 2))))) • ↑(Pi.basisFun ℂ ι) j) j = 0
[PROOFSTEP]
intro hj'
[GOAL]
case h₁
ι : Type u_1
inst✝¹ : Fintype ι
inst✝ : DecidableEq ι
w' : ι → ℂ
w : ι → ℂˣ := fun i => if h : w' i = 0 then 1 else Units.mk0 (w' i) h
hw' : ∀ (i : ι), ↑(w i) ^ (-(1 / 2)) ≠ 0
v : ι → ℂ
j : ι
hj : j ∈ univ
hj' : ¬j ∈ univ
⊢ (v j • ↑(IsUnit.unit (_ : IsUnit (↑(w j) ^ (-(1 / 2))))) • ↑(Pi.basisFun ℂ ι) j) j = 0
[PROOFSTEP]
exact False.elim (hj' hj)
[GOAL]
case h.e'_10.H
ι : Type u_1
inst✝¹ : Fintype ι
inst✝ : DecidableEq ι
w' : ι → ℂ
w : ι → ℂˣ := fun i => if h : w' i = 0 then 1 else Units.mk0 (w' i) h
hw' : ∀ (i : ι), ↑(w i) ^ (-(1 / 2)) ≠ 0
v : ι → ℂ
j : ι
hj : j ∈ univ
hsum :
Finset.sum univ (fun i => v i • ↑(IsUnit.unit (_ : IsUnit (↑(w i) ^ (-(1 / 2))))) • ↑(Pi.basisFun ℂ ι) i) j =
v j • ↑(w j) ^ (-(1 / 2))
⊢ (if w' j = 0 then 0 else 1) • (v j * v j) =
w' j •
(Finset.sum univ
(fun i =>
v i • ↑(Basis.unitsSMul (Pi.basisFun ℂ ι) fun i => IsUnit.unit (_ : IsUnit (↑(w i) ^ (-(1 / 2))))) i)
j *
Finset.sum univ
(fun i =>
v i • ↑(Basis.unitsSMul (Pi.basisFun ℂ ι) fun i => IsUnit.unit (_ : IsUnit (↑(w i) ^ (-(1 / 2))))) i)
j)
[PROOFSTEP]
simp_rw [Basis.unitsSMul_apply]
[GOAL]
case h.e'_10.H
ι : Type u_1
inst✝¹ : Fintype ι
inst✝ : DecidableEq ι
w' : ι → ℂ
w : ι → ℂˣ := fun i => if h : w' i = 0 then 1 else Units.mk0 (w' i) h
hw' : ∀ (i : ι), ↑(w i) ^ (-(1 / 2)) ≠ 0
v : ι → ℂ
j : ι
hj : j ∈ univ
hsum :
Finset.sum univ (fun i => v i • ↑(IsUnit.unit (_ : IsUnit (↑(w i) ^ (-(1 / 2))))) • ↑(Pi.basisFun ℂ ι) i) j =
v j • ↑(w j) ^ (-(1 / 2))
⊢ (if w' j = 0 then 0 else 1) • (v j * v j) =
w' j •
(Finset.sum univ (fun x => v x • IsUnit.unit (_ : IsUnit (↑(w x) ^ (-(1 / 2)))) • ↑(Pi.basisFun ℂ ι) x) j *
Finset.sum univ (fun x => v x • IsUnit.unit (_ : IsUnit (↑(w x) ^ (-(1 / 2)))) • ↑(Pi.basisFun ℂ ι) x) j)
[PROOFSTEP]
erw [hsum, smul_eq_mul]
[GOAL]
case h.e'_10.H
ι : Type u_1
inst✝¹ : Fintype ι
inst✝ : DecidableEq ι
w' : ι → ℂ
w : ι → ℂˣ := fun i => if h : w' i = 0 then 1 else Units.mk0 (w' i) h
hw' : ∀ (i : ι), ↑(w i) ^ (-(1 / 2)) ≠ 0
v : ι → ℂ
j : ι
hj : j ∈ univ
hsum :
Finset.sum univ (fun i => v i • ↑(IsUnit.unit (_ : IsUnit (↑(w i) ^ (-(1 / 2))))) • ↑(Pi.basisFun ℂ ι) i) j =
v j • ↑(w j) ^ (-(1 / 2))
⊢ (if w' j = 0 then 0 else 1) * (v j * v j) = w' j • (v j • ↑(w j) ^ (-(1 / 2)) * v j • ↑(w j) ^ (-(1 / 2)))
[PROOFSTEP]
split_ifs with h
[GOAL]
case pos
ι : Type u_1
inst✝¹ : Fintype ι
inst✝ : DecidableEq ι
w' : ι → ℂ
w : ι → ℂˣ := fun i => if h : w' i = 0 then 1 else Units.mk0 (w' i) h
hw' : ∀ (i : ι), ↑(w i) ^ (-(1 / 2)) ≠ 0
v : ι → ℂ
j : ι
hj : j ∈ univ
hsum :
Finset.sum univ (fun i => v i • ↑(IsUnit.unit (_ : IsUnit (↑(w i) ^ (-(1 / 2))))) • ↑(Pi.basisFun ℂ ι) i) j =
v j • ↑(w j) ^ (-(1 / 2))
h : w' j = 0
⊢ 0 * (v j * v j) = w' j • (v j • ↑1 ^ (-(1 / 2)) * v j • ↑1 ^ (-(1 / 2)))
[PROOFSTEP]
simp only [h, zero_smul, zero_mul]
[GOAL]
case neg
ι : Type u_1
inst✝¹ : Fintype ι
inst✝ : DecidableEq ι
w' : ι → ℂ
w : ι → ℂˣ := fun i => if h : w' i = 0 then 1 else Units.mk0 (w' i) h
hw' : ∀ (i : ι), ↑(w i) ^ (-(1 / 2)) ≠ 0
v : ι → ℂ
j : ι
hj : j ∈ univ
hsum :
Finset.sum univ (fun i => v i • ↑(IsUnit.unit (_ : IsUnit (↑(w i) ^ (-(1 / 2))))) • ↑(Pi.basisFun ℂ ι) i) j =
v j • ↑(w j) ^ (-(1 / 2))
h : ¬w' j = 0
⊢ 1 * (v j * v j) = w' j • (v j • ↑(Units.mk0 (w' j) h) ^ (-(1 / 2)) * v j • ↑(Units.mk0 (w' j) h) ^ (-(1 / 2)))
[PROOFSTEP]
have hww' : w' j = w j := by simp only [dif_neg h, Units.val_mk0]
[GOAL]
ι : Type u_1
inst✝¹ : Fintype ι
inst✝ : DecidableEq ι
w' : ι → ℂ
w : ι → ℂˣ := fun i => if h : w' i = 0 then 1 else Units.mk0 (w' i) h
hw' : ∀ (i : ι), ↑(w i) ^ (-(1 / 2)) ≠ 0
v : ι → ℂ
j : ι
hj : j ∈ univ
hsum :
Finset.sum univ (fun i => v i • ↑(IsUnit.unit (_ : IsUnit (↑(w i) ^ (-(1 / 2))))) • ↑(Pi.basisFun ℂ ι) i) j =
v j • ↑(w j) ^ (-(1 / 2))
h : ¬w' j = 0
⊢ w' j = ↑(w j)
[PROOFSTEP]
simp only [dif_neg h, Units.val_mk0]
[GOAL]
case neg
ι : Type u_1
inst✝¹ : Fintype ι
inst✝ : DecidableEq ι
w' : ι → ℂ
w : ι → ℂˣ := fun i => if h : w' i = 0 then 1 else Units.mk0 (w' i) h
hw' : ∀ (i : ι), ↑(w i) ^ (-(1 / 2)) ≠ 0
v : ι → ℂ
j : ι
hj : j ∈ univ
hsum :
Finset.sum univ (fun i => v i • ↑(IsUnit.unit (_ : IsUnit (↑(w i) ^ (-(1 / 2))))) • ↑(Pi.basisFun ℂ ι) i) j =
v j • ↑(w j) ^ (-(1 / 2))
h : ¬w' j = 0
hww' : w' j = ↑(w j)
⊢ 1 * (v j * v j) = w' j • (v j • ↑(Units.mk0 (w' j) h) ^ (-(1 / 2)) * v j • ↑(Units.mk0 (w' j) h) ^ (-(1 / 2)))
[PROOFSTEP]
simp only [one_mul, Units.val_mk0, smul_eq_mul]
[GOAL]
case neg
ι : Type u_1
inst✝¹ : Fintype ι
inst✝ : DecidableEq ι
w' : ι → ℂ
w : ι → ℂˣ := fun i => if h : w' i = 0 then 1 else Units.mk0 (w' i) h
hw' : ∀ (i : ι), ↑(w i) ^ (-(1 / 2)) ≠ 0
v : ι → ℂ
j : ι
hj : j ∈ univ
hsum :
Finset.sum univ (fun i => v i • ↑(IsUnit.unit (_ : IsUnit (↑(w i) ^ (-(1 / 2))))) • ↑(Pi.basisFun ℂ ι) i) j =
v j • ↑(w j) ^ (-(1 / 2))
h : ¬w' j = 0
hww' : w' j = ↑(w j)
⊢ v j * v j = w' j * (v j * w' j ^ (-(1 / 2)) * (v j * w' j ^ (-(1 / 2))))
[PROOFSTEP]
rw [hww']
[GOAL]
case neg
ι : Type u_1
inst✝¹ : Fintype ι
inst✝ : DecidableEq ι
w' : ι → ℂ
w : ι → ℂˣ := fun i => if h : w' i = 0 then 1 else Units.mk0 (w' i) h
hw' : ∀ (i : ι), ↑(w i) ^ (-(1 / 2)) ≠ 0
v : ι → ℂ
j : ι
hj : j ∈ univ
hsum :
Finset.sum univ (fun i => v i • ↑(IsUnit.unit (_ : IsUnit (↑(w i) ^ (-(1 / 2))))) • ↑(Pi.basisFun ℂ ι) i) j =
v j • ↑(w j) ^ (-(1 / 2))
h : ¬w' j = 0
hww' : w' j = ↑(w j)
⊢ v j * v j = ↑(w j) * (v j * ↑(w j) ^ (-(1 / 2)) * (v j * ↑(w j) ^ (-(1 / 2))))
[PROOFSTEP]
suffices v j * v j = w j ^ (-(1 / 2 : ℂ)) * w j ^ (-(1 / 2 : ℂ)) * w j * v j * v j by rw [this]; ring
[GOAL]
ι : Type u_1
inst✝¹ : Fintype ι
inst✝ : DecidableEq ι
w' : ι → ℂ
w : ι → ℂˣ := fun i => if h : w' i = 0 then 1 else Units.mk0 (w' i) h
hw' : ∀ (i : ι), ↑(w i) ^ (-(1 / 2)) ≠ 0
v : ι → ℂ
j : ι
hj : j ∈ univ
hsum :
Finset.sum univ (fun i => v i • ↑(IsUnit.unit (_ : IsUnit (↑(w i) ^ (-(1 / 2))))) • ↑(Pi.basisFun ℂ ι) i) j =
v j • ↑(w j) ^ (-(1 / 2))
h : ¬w' j = 0
hww' : w' j = ↑(w j)
this : v j * v j = ↑(w j) ^ (-(1 / 2)) * ↑(w j) ^ (-(1 / 2)) * ↑(w j) * v j * v j
⊢ v j * v j = ↑(w j) * (v j * ↑(w j) ^ (-(1 / 2)) * (v j * ↑(w j) ^ (-(1 / 2))))
[PROOFSTEP]
rw [this]
[GOAL]
ι : Type u_1
inst✝¹ : Fintype ι
inst✝ : DecidableEq ι
w' : ι → ℂ
w : ι → ℂˣ := fun i => if h : w' i = 0 then 1 else Units.mk0 (w' i) h
hw' : ∀ (i : ι), ↑(w i) ^ (-(1 / 2)) ≠ 0
v : ι → ℂ
j : ι
hj : j ∈ univ
hsum :
Finset.sum univ (fun i => v i • ↑(IsUnit.unit (_ : IsUnit (↑(w i) ^ (-(1 / 2))))) • ↑(Pi.basisFun ℂ ι) i) j =
v j • ↑(w j) ^ (-(1 / 2))
h : ¬w' j = 0
hww' : w' j = ↑(w j)
this : v j * v j = ↑(w j) ^ (-(1 / 2)) * ↑(w j) ^ (-(1 / 2)) * ↑(w j) * v j * v j
⊢ ↑(w j) ^ (-(1 / 2)) * ↑(w j) ^ (-(1 / 2)) * ↑(w j) * v j * v j =
↑(w j) * (v j * ↑(w j) ^ (-(1 / 2)) * (v j * ↑(w j) ^ (-(1 / 2))))
[PROOFSTEP]
ring
[GOAL]
case neg
ι : Type u_1
inst✝¹ : Fintype ι
inst✝ : DecidableEq ι
w' : ι → ℂ
w : ι → ℂˣ := fun i => if h : w' i = 0 then 1 else Units.mk0 (w' i) h
hw' : ∀ (i : ι), ↑(w i) ^ (-(1 / 2)) ≠ 0
v : ι → ℂ
j : ι
hj : j ∈ univ
hsum :
Finset.sum univ (fun i => v i • ↑(IsUnit.unit (_ : IsUnit (↑(w i) ^ (-(1 / 2))))) • ↑(Pi.basisFun ℂ ι) i) j =
v j • ↑(w j) ^ (-(1 / 2))
h : ¬w' j = 0
hww' : w' j = ↑(w j)
⊢ v j * v j = ↑(w j) ^ (-(1 / 2)) * ↑(w j) ^ (-(1 / 2)) * ↑(w j) * v j * v j
[PROOFSTEP]
rw [← Complex.cpow_add _ _ (w j).ne_zero, show -(1 / 2 : ℂ) + -(1 / 2) = -1 by simp [← two_mul], Complex.cpow_neg_one,
inv_mul_cancel (w j).ne_zero, one_mul]
[GOAL]
ι : Type u_1
inst✝¹ : Fintype ι
inst✝ : DecidableEq ι
w' : ι → ℂ
w : ι → ℂˣ := fun i => if h : w' i = 0 then 1 else Units.mk0 (w' i) h
hw' : ∀ (i : ι), ↑(w i) ^ (-(1 / 2)) ≠ 0
v : ι → ℂ
j : ι
hj : j ∈ univ
hsum :
Finset.sum univ (fun i => v i • ↑(IsUnit.unit (_ : IsUnit (↑(w i) ^ (-(1 / 2))))) • ↑(Pi.basisFun ℂ ι) i) j =
v j • ↑(w j) ^ (-(1 / 2))
h : ¬w' j = 0
hww' : w' j = ↑(w j)
⊢ -(1 / 2) + -(1 / 2) = -1
[PROOFSTEP]
simp [← two_mul]
[GOAL]
ι : Type u_1
inst✝¹ : Fintype ι
inst✝ : DecidableEq ι
w : ι → ℂˣ
⊢ IsometryEquiv (weightedSumSquares ℂ w) (weightedSumSquares ℂ 1)
[PROOFSTEP]
simpa using isometryEquivSumSquares ((↑) ∘ w)
|
> module SimpleProb.Measures
> import SimpleProb.SimpleProb
> import SimpleProb.BasicOperations
> import NonNegRational.NonNegRational
> import NonNegRational.BasicOperations
> import NonNegRational.BasicProperties
> %default total
> %access public export
> %auto_implicits off
* Measures
> ||| Expected value
> expectedValue : SimpleProb NonNegRational -> NonNegRational
> expectedValue = Prelude.Foldable.sum . (map (uncurry (*))) . toList
|
#============= SHE related functions ============#
export RHS_SHE, Jacobian_SHE, RHS_SHE1D, Jacobian_SHE1D
function RHS_SHE(S::State)
@unpack ϵ, ν = S.gp.p
u = S.u
ϵ * u .- u .^ 3 .- (ν * ∇²(S)) .- ∇⁴(S)
end
# Vectorized (flattened) version of the RHS
function RHS_SHE(V::Array, p::Dict, N::Int, Δx::Real; dim::Int=2)
@unpack ϵ, ν = p
ϵ * V .- V .^3 .- (ν * ∇²(V, N, Δx; dim=dim)) .- ∇⁴(V, N, Δx; dim=dim)
end
# Jacobian will go here
function Jacobian_SHE(V::Array, p::Dict, N::Int, Δx::Real; dim::Int=2)
@unpack ϵ, ν = p
spdiagm(0 => ϵ .- 3 .* V .^ 2 ) .- ν * ∇²(N, Δx; dim=dim) .- ∇⁴(N, Δx; dim=dim)
end
## One dimensional case ##
RHS_SHE1D(V::Array, p::Dict, N::Int, Δx::Real) = RHS_SHE(V, p, N, Δx; dim=1)
Jacobian_SHE1D(V::Array, p::Dict, N::Int, Δx::Real) = Jacobian_SHE(V, p, N, Δx; dim=1)
|
lemma convex_halfspace_Im_le: "convex {x. Im x \<le> b}" |
-- Andreas, 2016-09-08, issue #2167 reported by effectfully
loop : Set₁
loop = .Set
-- WAS: looping of type checker
-- NOW: proper error about invalid dotted expression
|
function y = abs( x )
%Disciplined convex/geometric programming information for ABS:
% ABS(X) is convex and nonmonotonic in X. Therefore, according to
% the strict rules of DCP, X must be affine. However, because of
% its special structure, CVX considers the sign of X as well. So,
% for instance, if X is known to be nonnegative, then ABS(X)=X.
persistent P
if isempty( P ),
P.map = cvx_remap( { 'constant' }, { 'p_nonconst' }, { 'n_nonconst' }, ...
{ 'r_affine' }, { 'c_affine' } );
P.funcs = { @abs_cnst, @abs_posn, @abs_negn, @abs_affn, @abs_affn };
end
y = cvx_unary_op( P, x );
function y = abs_cnst( x )
% Constant
y = builtin( 'abs', x );
function y = abs_posn( x )
% Positive any
y = x;
function y = abs_negn( x )
% Negative any
y = -x;
function y = abs_affn( x ) %#ok
% Affine
cvx_begin
epigraph variable y( size(x) ) nonnegative_
{ x, y } == lorentz( size(x), 0, ~isreal(x) ); %#ok
cvx_end
% Copyright 2005-2014 CVX Research, Inc.
% See the file LICENSE.txt for full copyright information.
% The command 'cvx_where' will show where this file is located.
|
[STATEMENT]
lemma i_mult_i_eq [simp]:
\<open>\<i> * \<i> = - 1\<close>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<i> * \<i> = - 1
[PROOF STEP]
by (simp add: gauss_eq_iff) |
[STATEMENT]
lemma mergeable_envs_Ex: "mergeable_envs n S \<Longrightarrow> MFOTL.nfv \<alpha> \<le> n \<Longrightarrow> MFOTL.nfv \<beta> \<le> n \<Longrightarrow>
(\<exists>v'\<in>S. \<forall>x\<in>fv \<alpha>. v' ! x = v ! x) \<Longrightarrow> (\<exists>v'\<in>S. \<forall>x\<in>fv \<beta>. v' ! x = v ! x) \<Longrightarrow>
(\<exists>v'\<in>S. \<forall>x\<in>fv \<alpha> \<union> fv \<beta>. v' ! x = v ! x)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>mergeable_envs n S; MFOTL.nfv \<alpha> \<le> n; MFOTL.nfv \<beta> \<le> n; \<exists>v'\<in>S. \<forall>x\<in>fv \<alpha>. v' ! x = v ! x; \<exists>v'\<in>S. \<forall>x\<in>fv \<beta>. v' ! x = v ! x\<rbrakk> \<Longrightarrow> \<exists>v'\<in>S. \<forall>x\<in>fv \<alpha> \<union> fv \<beta>. v' ! x = v ! x
[PROOF STEP]
proof (clarify, goal_cases mergeable)
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>v' v'a. \<lbrakk>mergeable_envs n S; MFOTL.nfv \<alpha> \<le> n; MFOTL.nfv \<beta> \<le> n; v' \<in> S; \<forall>x\<in>fv \<alpha>. v' ! x = v ! x; v'a \<in> S; \<forall>x\<in>fv \<beta>. v'a ! x = v ! x\<rbrakk> \<Longrightarrow> \<exists>v'\<in>S. \<forall>x\<in>fv \<alpha> \<union> fv \<beta>. v' ! x = v ! x
[PROOF STEP]
case (mergeable v1 v2)
[PROOF STATE]
proof (state)
this:
mergeable_envs n S
MFOTL.nfv \<alpha> \<le> n
MFOTL.nfv \<beta> \<le> n
v1 \<in> S
\<forall>x\<in>fv \<alpha>. v1 ! x = v ! x
v2 \<in> S
\<forall>x\<in>fv \<beta>. v2 ! x = v ! x
goal (1 subgoal):
1. \<And>v' v'a. \<lbrakk>mergeable_envs n S; MFOTL.nfv \<alpha> \<le> n; MFOTL.nfv \<beta> \<le> n; v' \<in> S; \<forall>x\<in>fv \<alpha>. v' ! x = v ! x; v'a \<in> S; \<forall>x\<in>fv \<beta>. v'a ! x = v ! x\<rbrakk> \<Longrightarrow> \<exists>v'\<in>S. \<forall>x\<in>fv \<alpha> \<union> fv \<beta>. v' ! x = v ! x
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
mergeable_envs n S
MFOTL.nfv \<alpha> \<le> n
MFOTL.nfv \<beta> \<le> n
v1 \<in> S
\<forall>x\<in>fv \<alpha>. v1 ! x = v ! x
v2 \<in> S
\<forall>x\<in>fv \<beta>. v2 ! x = v ! x
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
mergeable_envs n S
MFOTL.nfv \<alpha> \<le> n
MFOTL.nfv \<beta> \<le> n
v1 \<in> S
\<forall>x\<in>fv \<alpha>. v1 ! x = v ! x
v2 \<in> S
\<forall>x\<in>fv \<beta>. v2 ! x = v ! x
goal (1 subgoal):
1. \<exists>v'\<in>S. \<forall>x\<in>fv \<alpha> \<union> fv \<beta>. v' ! x = v ! x
[PROOF STEP]
by (auto intro: order.strict_trans2[OF fvi_less_nfv[rule_format]]
elim!: mergeable_envs_def[THEN iffD1, rule_format, of _ _ v1 v2])
[PROOF STATE]
proof (state)
this:
\<exists>v'\<in>S. \<forall>x\<in>fv \<alpha> \<union> fv \<beta>. v' ! x = v ! x
goal:
No subgoals!
[PROOF STEP]
qed |
State Before: θ : ℂ
⊢ sin θ = 0 ↔ ∃ k, θ = ↑k * ↑π State After: θ : ℂ
⊢ (∃ k, θ - ↑π / 2 = (2 * ↑k + 1) * ↑π / 2) ↔ ∃ k, θ = ↑k * ↑π Tactic: rw [← Complex.cos_sub_pi_div_two, cos_eq_zero_iff] State Before: θ : ℂ
⊢ (∃ k, θ - ↑π / 2 = (2 * ↑k + 1) * ↑π / 2) ↔ ∃ k, θ = ↑k * ↑π State After: case mp
θ : ℂ
⊢ (∃ k, θ - ↑π / 2 = (2 * ↑k + 1) * ↑π / 2) → ∃ k, θ = ↑k * ↑π
case mpr
θ : ℂ
⊢ (∃ k, θ = ↑k * ↑π) → ∃ k, θ - ↑π / 2 = (2 * ↑k + 1) * ↑π / 2 Tactic: constructor State Before: case mp
θ : ℂ
⊢ (∃ k, θ - ↑π / 2 = (2 * ↑k + 1) * ↑π / 2) → ∃ k, θ = ↑k * ↑π State After: case mp.intro
θ : ℂ
k : ℤ
hk : θ - ↑π / 2 = (2 * ↑k + 1) * ↑π / 2
⊢ ∃ k, θ = ↑k * ↑π Tactic: rintro ⟨k, hk⟩ State Before: case mp.intro
θ : ℂ
k : ℤ
hk : θ - ↑π / 2 = (2 * ↑k + 1) * ↑π / 2
⊢ ∃ k, θ = ↑k * ↑π State After: case mp.intro
θ : ℂ
k : ℤ
hk : θ - ↑π / 2 = (2 * ↑k + 1) * ↑π / 2
⊢ θ = ↑(k + 1) * ↑π Tactic: use k + 1 State Before: case mp.intro
θ : ℂ
k : ℤ
hk : θ - ↑π / 2 = (2 * ↑k + 1) * ↑π / 2
⊢ θ = ↑(k + 1) * ↑π State After: case mp.intro
θ : ℂ
k : ℤ
hk : θ - ↑π / 2 = (2 * ↑k + 1) * ↑π / 2
⊢ (2 * ↑k + 1) * ↑π + ↑π = (↑k + 1) * ↑π * 2 Tactic: field_simp [eq_add_of_sub_eq hk] State Before: case mp.intro
θ : ℂ
k : ℤ
hk : θ - ↑π / 2 = (2 * ↑k + 1) * ↑π / 2
⊢ (2 * ↑k + 1) * ↑π + ↑π = (↑k + 1) * ↑π * 2 State After: no goals Tactic: ring State Before: case mpr
θ : ℂ
⊢ (∃ k, θ = ↑k * ↑π) → ∃ k, θ - ↑π / 2 = (2 * ↑k + 1) * ↑π / 2 State After: case mpr.intro
k : ℤ
⊢ ∃ k_1, ↑k * ↑π - ↑π / 2 = (2 * ↑k_1 + 1) * ↑π / 2 Tactic: rintro ⟨k, rfl⟩ State Before: case mpr.intro
k : ℤ
⊢ ∃ k_1, ↑k * ↑π - ↑π / 2 = (2 * ↑k_1 + 1) * ↑π / 2 State After: case mpr.intro
k : ℤ
⊢ ↑k * ↑π - ↑π / 2 = (2 * ↑(k - 1) + 1) * ↑π / 2 Tactic: use k - 1 State Before: case mpr.intro
k : ℤ
⊢ ↑k * ↑π - ↑π / 2 = (2 * ↑(k - 1) + 1) * ↑π / 2 State After: case mpr.intro
k : ℤ
⊢ ↑k * ↑π * 2 - ↑π = (2 * (↑k - 1) + 1) * ↑π Tactic: field_simp State Before: case mpr.intro
k : ℤ
⊢ ↑k * ↑π * 2 - ↑π = (2 * (↑k - 1) + 1) * ↑π State After: no goals Tactic: ring |
Formal statement is: lemma Poly_append_replicate_0: "Poly (xs @ replicate n 0) = Poly xs" Informal statement is: If you append $n$ zeros to the end of a polynomial, the result is the same as the original polynomial. |
## Julia program for Metropolization of the Gibbs Sampler
## author: weiya <[email protected]>
## date: 2018-08-25
## example 7.3.8
## data
a = [0.06, 0.14, 0.11, 0.09];
b = [0.17, 0.24, 0.19, 0.20];
x = [9, 15, 12, 7, 8];
function ex738(T)
z = ones(T+1, 4)
for t = 1:T
for i = 1:2
atrial = 0
for k = 1:x[i]
u = rand()
if u <= a[i]*mu # ? sample mu first
end
end
end
println(1)
end
|
## This is a work in progress
plotres <- function(fit, df, x) {
## function to plot residuals
fitsum <- summary(fit)
residuals <- fitsum$residuals
dffit <- cbind(df, dffit, residuals)
#plotfit(dffit, logy, fit)
#abline(a=0,b=1,col="red") # y = a + b x
#
# plotfit(dffit, fit, residuals)
#
# plot(dffit$fit, dffit$residuals)
# x <- dffit$fit
# y <- dffit$residuals
# smooth <- predict( lm(y ~ x + I(x^2) + I(x^3)) )
# smooth <- data.frame(x,y,smooth)
# smooth <- smooth[order(smooth$x),]
# lines(smooth$x,smooth$smooth)
}
|
(* Title: HOL/Library/AList_Mapping.thy
Author: Florian Haftmann, TU Muenchen
*)
section \<open>Implementation of mappings with Association Lists\<close>
theory AList_Mapping
imports AList Mapping
begin
lift_definition Mapping :: "('a \<times> 'b) list \<Rightarrow> ('a, 'b) mapping" is map_of .
code_datatype Mapping
lemma lookup_Mapping [simp, code]: "Mapping.lookup (Mapping xs) = map_of xs"
by transfer rule
lemma keys_Mapping [simp, code]: "Mapping.keys (Mapping xs) = set (map fst xs)"
by transfer (simp add: dom_map_of_conv_image_fst)
lemma empty_Mapping [code]: "Mapping.empty = Mapping []"
by transfer simp
lemma is_empty_Mapping [code]: "Mapping.is_empty (Mapping xs) \<longleftrightarrow> List.null xs"
by (cases xs) (simp_all add: is_empty_def null_def)
lemma update_Mapping [code]: "Mapping.update k v (Mapping xs) = Mapping (AList.update k v xs)"
by transfer (simp add: update_conv')
lemma delete_Mapping [code]: "Mapping.delete k (Mapping xs) = Mapping (AList.delete k xs)"
by transfer (simp add: delete_conv')
lemma ordered_keys_Mapping [code]:
"Mapping.ordered_keys (Mapping xs) = sort (remdups (map fst xs))"
by (simp only: ordered_keys_def keys_Mapping sorted_list_of_set_sort_remdups) simp
lemma entries_Mapping [code]:
"Mapping.entries (Mapping xs) = set (AList.clearjunk xs)"
by transfer (fact graph_map_of)
lemma ordered_entries_Mapping [code]:
"Mapping.ordered_entries (Mapping xs) = sort_key fst (AList.clearjunk xs)"
proof -
have distinct: "distinct (sort_key fst (AList.clearjunk xs))"
using distinct_clearjunk distinct_map distinct_sort by blast
note folding_Map_graph.idem_if_sorted_distinct[where ?m="map_of xs", OF _ sorted_sort_key distinct]
then show ?thesis
unfolding ordered_entries_def
by (transfer fixing: xs) (auto simp: graph_map_of)
qed
lemma fold_Mapping [code]:
"Mapping.fold f (Mapping xs) a = List.fold (case_prod f) (sort_key fst (AList.clearjunk xs)) a"
by (simp add: Mapping.fold_def ordered_entries_Mapping)
lemma size_Mapping [code]: "Mapping.size (Mapping xs) = length (remdups (map fst xs))"
by (simp add: size_def length_remdups_card_conv dom_map_of_conv_image_fst)
lemma tabulate_Mapping [code]: "Mapping.tabulate ks f = Mapping (map (\<lambda>k. (k, f k)) ks)"
by transfer (simp add: map_of_map_restrict)
lemma bulkload_Mapping [code]:
"Mapping.bulkload vs = Mapping (map (\<lambda>n. (n, vs ! n)) [0..<length vs])"
by transfer (simp add: map_of_map_restrict fun_eq_iff)
lemma equal_Mapping [code]:
"HOL.equal (Mapping xs) (Mapping ys) \<longleftrightarrow>
(let ks = map fst xs; ls = map fst ys
in (\<forall>l\<in>set ls. l \<in> set ks) \<and> (\<forall>k\<in>set ks. k \<in> set ls \<and> map_of xs k = map_of ys k))"
proof -
have *: "(a, b) \<in> set xs \<Longrightarrow> a \<in> fst ` set xs" for a b xs
by (auto simp add: image_def intro!: bexI)
show ?thesis
apply transfer
apply (auto intro!: map_of_eqI)
apply (auto dest!: map_of_eq_dom intro: *)
done
qed
lemma map_values_Mapping [code]:
"Mapping.map_values f (Mapping xs) = Mapping (map (\<lambda>(x,y). (x, f x y)) xs)"
for f :: "'c \<Rightarrow> 'a \<Rightarrow> 'b" and xs :: "('c \<times> 'a) list"
apply transfer
apply (rule ext)
subgoal for f xs x by (induct xs) auto
done
lemma combine_with_key_code [code]:
"Mapping.combine_with_key f (Mapping xs) (Mapping ys) =
Mapping.tabulate (remdups (map fst xs @ map fst ys))
(\<lambda>x. the (combine_options (f x) (map_of xs x) (map_of ys x)))"
apply transfer
apply (rule ext)
apply (rule sym)
subgoal for f xs ys x
apply (cases "map_of xs x"; cases "map_of ys x"; simp)
apply (force simp: map_of_eq_None_iff combine_options_def option.the_def o_def image_iff
dest: map_of_SomeD split: option.splits)+
done
done
lemma combine_code [code]:
"Mapping.combine f (Mapping xs) (Mapping ys) =
Mapping.tabulate (remdups (map fst xs @ map fst ys))
(\<lambda>x. the (combine_options f (map_of xs x) (map_of ys x)))"
apply transfer
apply (rule ext)
apply (rule sym)
subgoal for f xs ys x
apply (cases "map_of xs x"; cases "map_of ys x"; simp)
apply (force simp: map_of_eq_None_iff combine_options_def option.the_def o_def image_iff
dest: map_of_SomeD split: option.splits)+
done
done
lemma map_of_filter_distinct: (* TODO: move? *)
assumes "distinct (map fst xs)"
shows "map_of (filter P xs) x =
(case map_of xs x of
None \<Rightarrow> None
| Some y \<Rightarrow> if P (x,y) then Some y else None)"
using assms
by (auto simp: map_of_eq_None_iff filter_map distinct_map_filter dest: map_of_SomeD
simp del: map_of_eq_Some_iff intro!: map_of_is_SomeI split: option.splits)
lemma filter_Mapping [code]:
"Mapping.filter P (Mapping xs) = Mapping (filter (\<lambda>(k,v). P k v) (AList.clearjunk xs))"
apply transfer
apply (rule ext)
apply (subst map_of_filter_distinct)
apply (simp_all add: map_of_clearjunk split: option.split)
done
lemma [code nbe]: "HOL.equal (x :: ('a, 'b) mapping) x \<longleftrightarrow> True"
by (fact equal_refl)
end
|
# 监督学习 Supervised Learning
引入科学计算和绘图相关包
```python
import logging
from collections import namedtuple
import numpy as np
from pandas import DataFrame
from IPython.display import HTML
from numpy.linalg import inv
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.model_selection import cross_val_score
# 引入绘图包
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib import animation
from matplotlib import cm
from matplotlib.colors import LogNorm
import seaborn as sns
sns.set_style('whitegrid')
logging.basicConfig(level=logging.INFO)
%matplotlib inline
```
引入波士顿房价数据
```python
from sklearn.datasets import load_boston
boston = load_boston()
print(boston.DESCR)
```
Boston House Prices dataset
===========================
Notes
------
Data Set Characteristics:
:Number of Instances: 506
:Number of Attributes: 13 numeric/categorical predictive
:Median Value (attribute 14) is usually the target
:Attribute Information (in order):
- CRIM per capita crime rate by town
- ZN proportion of residential land zoned for lots over 25,000 sq.ft.
- INDUS proportion of non-retail business acres per town
- CHAS Charles River dummy variable (= 1 if tract bounds river; 0 otherwise)
- NOX nitric oxides concentration (parts per 10 million)
- RM average number of rooms per dwelling
- AGE proportion of owner-occupied units built prior to 1940
- DIS weighted distances to five Boston employment centres
- RAD index of accessibility to radial highways
- TAX full-value property-tax rate per $10,000
- PTRATIO pupil-teacher ratio by town
- B 1000(Bk - 0.63)^2 where Bk is the proportion of blacks by town
- LSTAT % lower status of the population
- MEDV Median value of owner-occupied homes in $1000's
:Missing Attribute Values: None
:Creator: Harrison, D. and Rubinfeld, D.L.
This is a copy of UCI ML housing dataset.
http://archive.ics.uci.edu/ml/datasets/Housing
This dataset was taken from the StatLib library which is maintained at Carnegie Mellon University.
The Boston house-price data of Harrison, D. and Rubinfeld, D.L. 'Hedonic
prices and the demand for clean air', J. Environ. Economics & Management,
vol.5, 81-102, 1978. Used in Belsley, Kuh & Welsch, 'Regression diagnostics
...', Wiley, 1980. N.B. Various transformations are used in the table on
pages 244-261 of the latter.
The Boston house-price data has been used in many machine learning papers that address regression
problems.
**References**
- Belsley, Kuh & Welsch, 'Regression diagnostics: Identifying Influential Data and Sources of Collinearity', Wiley, 1980. 244-261.
- Quinlan,R. (1993). Combining Instance-Based and Model-Based Learning. In Proceedings on the Tenth International Conference of Machine Learning, 236-243, University of Massachusetts, Amherst. Morgan Kaufmann.
- many more! (see http://archive.ics.uci.edu/ml/datasets/Housing)
我们仅使用RM(平均房间数)和Price(按1000美元计的房价中位数)两个指标。
绘制平均房间数和房价中位数的散点图
```python
df = DataFrame(data=boston.data, columns=boston.feature_names)[["RM"]].join(DataFrame(data=boston.target, columns=["Price"]))
plt.scatter(df["RM"], df["Price"])
plt.xlabel("Number of rooms")
plt.ylabel("Price in $1000")
plt.show()
```
为了方便描述,我们这里引入一些符号。$x^{(i)}$ 用来表示输入变量,比如这里的平均房间数,它也被称为**特征features**。$y^{(i)}$ 用来表示输出变量,或我们希望预测的**目标target**变量。一对 $(x^{(i)}, y^{(i)})$ 称为一个**训练样本training example**,而我们将使用的 $m$ 个训练样本 $\{(x^{(i)}, y^{(i)});i=1,...,m\}$ 的集合称为**训练集training set**。注意这里的上标 $(i)$ 是对训练集的一个索引编号,不是幂运算。我们用 $\chi$ 表示输入空间,$\gamma$ 表示输出空间。在这个例子中 $\chi=\gamma=\mathbb{R}$
正式地定义监督学习:给定一个训练集,我们希望学习到一个函数 $h: \chi \rightarrow \gamma$,使得 $h(x)$ 可以很好地预测 $y$。由于历史原因,函数 $h$ 被称为**假设hypothesis**。
当我们希望预测的目标变量是连续型变量时,比如这里的房价,我们称这是一个**回归Regression**问题。当 $y$ 仅仅只取少数离散值时,我们称这是一个**分类Classification**问题。
# 线性回归 Linear Regression
包括以下内容:
1. 最小二乘法 LMS algorithm
2. 正规方程 The normal equations
3. 概率诠释 Probabilistic interpretation
4. 局部加权线性回归 Locally weighted linear regression
```python
# 预览数据
print(df.head())
```
RM Price
0 6.575 24.0
1 6.421 21.6
2 7.185 34.7
3 6.998 33.4
4 7.147 36.2
对于这个数据集,$x$ 是一个$\mathbb{R}$上的一维向量。$x_1^i$是该区域的平均房间数。
要进行监督学习,我们需要首先决定如何来表示假设/函数 $h$。较为简单的情况下,我们选取 $x$ 的线性函数来预测 $y$ :
$$ h_\theta(x) = \theta_0 + \theta_1x_1 $$
这里,$\theta_i$称为**参数parameters**(有时也称为**权重weights**),通过这组参数,我们将 $\chi$ 线性映射到 $\gamma$。在不引发误解的情况下,我们将 $h_\theta(x)$简写为 $h(x)$。习惯上,我们设 $x_0=1$ (称为**截距项intercept term**),这样
$$ h(x) = \sum_{i=0}^n \theta_ix_i = \theta^Tx $$
对于右式,我们将 $\theta$ 和 $x$ 看做向量,$n$ 是特征数量(不包括 $x_0$ )
这样,给定训练集,我们需要挑选,或者说学习出参数 $\theta$ 的值。一个直观的方法是使 $h(x)$ 尽量接近 $y$,至少对于训练集来说,尽量地接近。为了更正式地定义“接近”,我们定义**成本函数cost function**:
$$ J(\theta) = \frac{1}{2}\sum_{i=1}^m (h_{\theta}(x^{(i)}) - y^{(i)})^2$$
## 1. 最小二乘法 Linear Regression
我们希望挑选出使 $J(\theta)$ 最小化的值 $\theta$。一种搜索算法的思路是,我们随机猜测一个 $\theta$,然后按某种规则重复地改变 $\theta$ 使得 $J(\theta)$ 不断变小,直到 $\theta$ 收敛到某个值使得 $J(\theta)$ 取得最小值。
特别地,我们将考虑**梯度下降Gradient Descent**算法。如下图所示,$J(\theta)$ 是关于 $\theta_0$ 和 $\theta_1$ 的函数。首先我们随机选择一个 $ \theta = [-10, 17.5]$,想象在图形中下坡,我们需要选择一个下降最快的方向。不断地前进,直到来到某个**局域最小值local minima**。在我们的图中,只有一个**全局最小值global minima**(以红星标记)。但梯度下降的一个特性是,如果存在局域最小值时,根据随机选择初始化 $\theta$ 的不同,我们可能会到达不同的局域最小值。局部最小值的问题,在今后的模型中会遇到。
```python
# 读取训练集和目标变量
X, y = df[["RM"]].values, df["Price"].values
m, n = X.shape
# 增加截距列
X_intercept = np.column_stack((np.ones((m, 1)), X))
@np.vectorize
def cost_function(*args):
"""成本函数"""
theta = np.array(args)
return 0.5 * ((X_intercept @ theta - y) ** 2).sum()
def gradient_descent_optimize(cost_func, init_theta=None):
"""使用梯度下降寻找最优的theta"""
alpha = 0.00003
def good_enough(old_cost, new_cost):
return np.abs(new_cost - old_cost) / old_cost < 0.00000001
def improve_theta(old_theta):
return old_theta - alpha * ((X_intercept @ old_theta - y) @ X_intercept)
if init_theta is None:
init_theta = np.array([0, 0])
path_ = [init_theta]
o_theta, o_cost = init_theta, cost_function(*init_theta)
while True:
n_theta = improve_theta(o_theta)
n_cost = cost_func(*n_theta)
path_.append(n_theta)
logging.debug((o_theta, n_theta, o_cost, n_cost))
if good_enough(o_cost, n_cost):
break
o_theta, o_cost = n_theta, n_cost
Result = namedtuple('Result', ['theta', 'path'])
return Result(theta=n_theta, path=np.array(path_))
# 使用梯度下降获取优化结果
init_guess = np.array([-10, 17.5])
gradient_descent_result = gradient_descent_optimize(cost_function, init_theta=init_guess)
path = gradient_descent_result.path
sampled_path = np.concatenate((np.array([path[0, ]]),
path[(2 ** np.array(range(int(np.ceil(np.log2(path.shape[0])))))).astype(np.int32), ]))
sampled_path_mat = sampled_path.T
# 使用sklearn的LinearRegression模型,直接获取最终的theta值
regr = LinearRegression()
regr.fit(X, y)
theta0_optimize, theta1_optimize = regr.intercept_, regr.coef_[0]
minima = np.array([theta0_optimize, theta1_optimize])
minima_ = minima.reshape(-1, 1)
# 根据最优化的theta,生成以此为中心的网格数据
step = 0.2
theta0_min, theta0_max = theta0_optimize - np.abs(theta0_optimize), theta0_optimize + np.abs(theta0_optimize)
theta1_min, theta1_max = theta1_optimize - np.abs(theta1_optimize), theta1_optimize + np.abs(theta1_optimize)
theta0_range, theta1_range = np.arange(theta0_min, theta0_max + step, step), np.arange(
theta1_min, theta1_max + step, step)
theta0_mat, theta1_mat = np.meshgrid(theta0_range, theta1_range)
z = cost_function(theta0_mat.reshape(-1), theta1_mat.reshape(-1)).reshape(theta0_mat.shape)
fig = plt.figure(figsize=(12, 6))
# 绘制surface平面图
ax1 = fig.add_subplot(1, 2, 1, projection='3d', elev=50, azim=-50)
ax1.plot_surface(theta0_mat, theta1_mat, z, norm=LogNorm(), cmap=cm.jet, rstride=1, cstride=1, edgecolor='none',
alpha=.8)
ax1.scatter(sampled_path_mat[0, :-1], sampled_path_mat[1, :-1], cost_function(*sampled_path_mat[::, :-1]))
ax1.plot(*minima_, cost_function(*minima_), 'r*', markersize=10)
ax1.set_xlabel('$theta_0$')
ax1.set_ylabel('$theta_1$')
ax1.set_zlabel('$J(theta)$')
ax1.set_xlim((theta0_min, theta0_max))
ax1.set_ylim((theta1_min, theta1_max))
ax1.set_title("Surface Map of J(theta)")
# 绘制contour轮廓图
ax2 = fig.add_subplot(1, 2, 2)
ax2.contour(theta0_mat, theta1_mat, z, levels=np.logspace(0, 5, 35), norm=LogNorm(), cmap=cm.jet)
ax2.plot(*minima_, 'r*', markersize=18)
ax2.quiver(sampled_path_mat[0, :-1], sampled_path_mat[1, :-1], sampled_path_mat[0, 1:] - sampled_path_mat[0, :-1],
sampled_path_mat[1, 1:] - sampled_path_mat[1, :-1],
scale_units='xy', angles='xy', scale=1, color='k')
ax2.set_xlabel('$theta0$')
ax2.set_ylabel('$theta1$')
ax2.set_xlim((theta0_min, theta0_max))
ax2.set_ylim((theta1_min, theta1_max))
ax2.set_title("Contour Map of J(theta)")
plt.tight_layout()
plt.show()
```
下面以数学形式定义梯度下降,随机选择某个初始 $\theta$,之后重复以下更新:
$$ \theta_j = \theta_j - \alpha\frac{\partial}{\partial\theta_j}J(\theta) $$
(同时对所有值进行更新,$j = 0,...,n$)
这里,$\alpha$ 称为**学习速率learning rate**。这个算法非常自然地每次往J下降幅度最为陡峭的方向前进一小步。当 $\alpha$ 过小时,算法的收敛速度会比较慢;而 $\alpha$ 过大时,会出现算法不收敛的情况。另外,梯度下降过程中,梯度 $\frac{\partial}{\partial\theta_j}J(\theta)$ 本身会不断变小,因此更新的幅度也会越来越小:
$$\begin{equation}
\begin{split}
\frac{\partial}{\partial\theta_j}J(\theta) & = \sum_{i=1}^m\frac{\partial}{\partial\theta_j}\frac{1}{2}(h_\theta(x^{(i)}) - y^{(i)})^2 \\
& = \sum_{i=1}^m2 \cdot \frac{1}{2}(h_\theta(x^{(i)}) - y^{(i)}) \cdot \frac{\partial}{\partial\theta_j}(h_\theta(x^{(i)}) - y^{(i)}) \\
& = \sum_{i=1}^m(h_\theta(x^{(i)}) - y^{(i)}) \cdot \frac{\partial}{\partial\theta_j}(\sum_{k=0}^{n}\theta_kx_k^{(i)}-y^{(i)}) \\
& = \sum_{i=1}^m(h_\theta(x^{(i)}) - y^{(i)})x_j^{(i)}
\end{split}
\end{equation}$$
我们根据以下规则进行更新:
$$ \theta_j = \theta_j - \alpha\sum_{i=1}^m(h_\theta(x^{(i)}) - y^{(i)})x_j^{(i)} $$
重复直至 $\theta$ 收敛即可。
这个规则称为**最小二乘 LMS least mean square**,也被称为**Widrow-Hoff学习规则**。
每次更新过程中都检查整个训练集,我们称这种算法为**批量梯度下降 batch gradient descent**。对于线性回归来说,成本函数只有唯一一个全局最小值,不存在局域最小值。也就是说,$J$ 是一个凸二次函数。梯度下降对于线性回归总能收敛(唯一的前提,学习速率 $\alpha$ 没有选得过大)。
下图展现了动画形式的梯度下降训练过程:
```python
fig, ax = plt.subplots()
ax.scatter(X.reshape(-1), y, alpha=0.5)
plt.xlabel("Number of rooms")
plt.ylabel("Price in $1000")
x = np.arange(3, 10, 1)
line, = ax.plot([], [], lw=2)
def animate(i):
line.set_data(x, np.column_stack((np.ones((x.size, 1)), x)) @ sampled_path[i])
return line,
def init():
line.set_data([], [])
return line,
anim = animation.FuncAnimation(fig, animate, frames=sampled_path.shape[0], init_func=init, interval=800,
repeat_delay=3000, blit=True)
HTML(anim.to_html5_video())
```
```python
# 使用批量梯度下降训练出的theta
print(gradient_descent_result.theta)
```
[-34.35932859 9.05317227]
```python
# 使用sklearn的LinearRegression模型训练出的theta
print(minima)
```
[-34.67062078 9.10210898]
可以看出,梯度下降训练出的 $\theta$ 十分接近使用高级优化算法计算出的最优 $\theta$;进一步增加good_enough函数中的精度,可以使梯度下降算法训练出更精确的 $\theta$(同时也需要花费更多的迭代次数,意味着更长的训练时间)。
批量梯度下降的执行效率较低,因为每一次迭代更新,都需要使用全量数据来计算梯度。当 $m$ 非常大的时候,这个操作十分耗时。考虑另一种效果同样不错,但是效率更高的替代方案:
$$ \theta_j = \theta_j - \alpha(h_\theta(x^{(i)}) - y^{(i)})x_j^{(i)} $$
对于 $i = 1, 2, ..., m$ 重复执行,每次执行同时对所有值进行更新,$j = 0,...,n$
在该算法中,我们遍历整个训练集,对每一个训练样本,仅以该样本的梯度进行参数更新。这种算法叫做**随机梯度下降 stochastic gradient descent**,也称为**增量梯度下降 incremental gradient descent**。
随机梯度下降通常会比批量梯度下降更快地接近最优 $\theta$,但需要注意的是,随机梯度下降可能永远不会“收敛”到最小值,其训练的参数 $ \theta $会在 $J(\theta)$ 取得最小值处震荡。但是在实际情况中,最小值附近的值就已经足够好了。由于这些原因,当训练集非常大时,我们通常更趋向于使用随机梯度下降。
## 2. 正规方程 The normal equations
梯度下降是一种迭代算法,从算法描述就可以看出它非常符合计算机科学的思维方式。而正规方程是一种直接求最小值的方法,我们令 $J(\theta)$ 的导数为0,进一步通过推导的形式计算出此时的 $\theta$,显然这是数学的思维方式。
### 2.1 矩阵导数 Matrix derivatives
对于一个将 $m \times n$ 矩阵映射到实数的函数 $f: \mathbb{R}^{m \times n} \rightarrow \mathbb{R} $,我们定义 $f$ 对 $A$ 的导数为:
$$
\nabla_Af(A) =
\begin{bmatrix}
\frac{\partial f}{\partial A_{11}} & \dots & \frac{\partial f}{\partial A_{1n}} \\
\vdots & \ddots & \vdots \\
\frac{\partial f}{\partial A_{m1}} & \dots & \frac{\partial f}{\partial A_{mn}}
\end{bmatrix}
$$
因此,梯度 $\nabla_Af(A)$ 本身是一个 $m \times n$ 的矩阵,其第 $(i, j)$ 个元素为 $\frac{\partial f}{\partial A_{ij}}$。
以一个 $2 \times 2$ 的矩阵为例,假设 $f(A) = \frac{3}{2}A_{11} + 5A_{12}^2 + A_{21}A_{22}$,那么:
$$
\nabla_Af(A) =
\begin{bmatrix}
\frac{\partial f}{\partial A_{11}} & \frac{\partial f}{\partial A_{12}} \\
\frac{\partial f}{\partial A_{21}} & \frac{\partial f}{\partial A_{22}}
\end{bmatrix} =
\begin{bmatrix}
\frac{3}{2} & 10A_{12} \\
A_{22} & A_{21}
\end{bmatrix}
$$
对于一个 $n \times n$ 的矩阵(方阵)A,矩阵的**迹 trace**,记做 $trA$,定义为对角线上的元素之和:
$$ trA = \sum_{i=1}^n A_{ii} $$
如果 $a$ 是一个实数($ 1 \times 1$ 矩阵),$tr a = a$
对于矩阵 $A$ 和矩阵 $B$,如果矩阵 $AB$ 是方阵,那么 $trAB = trBA$。证明如下:
设 $A$ 是一个 $ m \times n$ 的矩阵,为了使 $AB$ 是方阵,则 $B$ 必须是 $n \times m$的矩阵,根据矩阵相乘和迹的定义
$$ trAB = \sum_{i=1}^m(\sum_{j=1}^n A_{ij}B_{ji}) = \sum_{j=1}^n(\sum_{i=1}^m B_{ji}A_{ij}) = trBA$$
显然上述结论具有以下推论:
$$ trABC = trCAB = trBCA $$
$$ trABCD = trDABC = tr CDAB = tr BCDA$$
对于方阵 $A$,方阵 $B$ 和实数a:
$$ trA = trA^T $$
$$ tr(A + B) = trA + trB $$
$$ traA = atrA $$
矩阵的导数具有以下性质:
$$
\begin{align*}
\nabla_AtrAB &= B^T &(1) \\
\nabla_{A^T}f(A) &= (\nabla_Af(A))^T &(2) \\
\nabla_A trABA^TC &= CAB + C^TAB^T &(3) \\
\nabla_A |A| &= |A|(A^{-1})^T &(4)
\end{align*}
$$
依然沿用 $A$ 是 $m \times n$ 的矩阵,$B$ 是 $n \times m$的矩阵的假设,证明如下:
1) 设 $ f(A) = trAB$,显然 $f$ 满足条件 $\mathbb{R}^{m \times n} \rightarrow \mathbb{R}$的映射
$$ \nabla_AtrAB = \nabla_Af(A) = \nabla_A \sum_{i=1}^m\sum_{j=1}^nA_{ij}B_{ji} =
\begin{bmatrix}
B_{11} & \dots & B_{n1} \\
\vdots & \ddots & \vdots \\
B_{1m} & \dots & B_{nm}
\end{bmatrix}
= B^T
$$
2) 设 $ X = A^T $,即 $X_{ji} = A_{ij}$:
$$
\begin{equation}
\begin{split}
\nabla_{A^T}f(A) & = \nabla_{A^T}f(A_{11}, ..., A_{1n}, A_{21}, ..., A_{2n},..., A_{m1}, ..., A_{mn}) \\
& = \nabla_{X}f(X_{11}, ..., X_{n1}, X_{12}, ..., X_{n2},..., X_{1m}, ..., X_{nm}) \\
& = \nabla_{X}f(X_{11}, ..., X_{1m}, X_{21}, ..., X_{2n},..., X_{1n}, ..., X_{nm}) \\
& = \begin{bmatrix}
\frac{\partial f}{\partial X_{11}} & \dots & \frac{\partial f}{\partial X_{1m}} \\
\vdots & \ddots & \vdots \\
\frac{\partial f}{\partial X_{n1}} & \dots & \frac{\partial f}{\partial X_{nm}}
\end{bmatrix} \\
& = \begin{bmatrix}
\frac{\partial f}{\partial X_{11}} & \dots & \frac{\partial f}{\partial X_{n1}} \\
\vdots & \ddots & \vdots \\
\frac{\partial f}{\partial X_{1m}} & \dots & \frac{\partial f}{\partial X_{nm}}
\end{bmatrix}^T \\
& = \begin{bmatrix}
\frac{\partial f}{\partial A_{11}} & \dots & \frac{\partial f}{\partial A_{1n}} \\
\vdots & \ddots & \vdots \\
\frac{\partial f}{\partial A_{m1}} & \dots & \frac{\partial f}{\partial A_{mn}}
\end{bmatrix}^T \\
& = (\nabla_{A}f(A))^T
\end{split}
\end{equation}
$$
3) 略
4) 略
### 2.2 最小二乘法回顾 Least squares revisited
将整个训练集想象成一个 $m \times n$ 的矩阵(如果加入截距项的话,实际是 $m \times n+1$ 的矩阵),$y$ 想象成是 $m$ 维的向量,$h_\theta(x) = X \theta$。于是,成本函数可以表示为:
$$
J(\theta) = \frac{1}{2}\sum_{i=1}^m (h_\theta(x^{(i)}) - y^{(i)})^2 = \frac{1}{2}(X\theta-y)^T(X\theta-y)
$$
其导数可以表示为:
$$
\begin{split}
\nabla_{\theta}J(\theta) & = \nabla_{\theta}\frac{1}{2}(X\theta-y)^T(X\theta-y) \\
& = \frac{1}{2}\nabla_{\theta}(\theta^TX^TX\theta - \theta^TX^Ty - y^TX\theta + y^Ty) \\
& = \frac{1}{2}\nabla_{\theta}tr(\theta^TX^TX\theta - \theta^TX^Ty - y^TX\theta + y^Ty) \\
& = \frac{1}{2}\nabla_{\theta}(tr\theta^TX^TX\theta - 2try^TX\theta) \\
& = \frac{1}{2}(X^TX\theta + X^TX\theta - 2X^Ty) \\
& = X^TX\theta - X^Ty
\end{split}
$$
上述推导过程中,第二步是矩阵相乘;第三步利用了实数的迹等于实数本身;第四步利用了 $trA = trA^T$,而 $y^Ty$ 与 $\theta$ 无关;第五步首先利用了公式 $\nabla_AtrA^TCAB = \nabla_A trABA^TC = CAB + C^TAB^T$,令 $A=\theta, C=X^TX, B=I$。其次利用了 $\nabla_AtrBA = \nabla_AtrAB = B^T$。
为了使 $J(\theta)$ 最小化,我们将其导数设为零,于是得到正规方程:
$$ X^TX\theta = X^Ty $$
所以,使 $J(\theta)$ 取得最小值的 $\theta$ 为:
$$ \theta = (X^TX)^{-1}X^Ty $$
```python
# 使用正规方程计算出的theta
print(inv(X_intercept.T @ X_intercept) @ X_intercept.T @ y)
```
[-34.67062078 9.10210898]
可以看到,使用正规方程计算的最优 $\theta$ 值,和sklearn线性模型计算出来的是一样的。
## 3. 概率诠释 Probabilistic interpretation
面对一个回归问题,一个疑惑是,为什么我们选择了最小二乘作为成本函数?本节将引入一些概率论的假设,在这些假设之上,最小二乘的线性回归将会是非常自然的选择。
假设目标变量和输入变量的关系如下:
$$ y^{(i)} = \theta^Tx^{(i)} + \epsilon^{(i)} $$
这里 $\epsilon$ 是误差项,用来描述模型未引入的影响因素(比如某些对 $y$ 有重要影响的指标)或随机噪声。并且我们假设 $\epsilon$ 是满足以 $0$ 为均值,$\sigma^2$ 为方差的高斯分布的**独立同分布随机变量 IID independently and identically distributed random variable**。写做 $\epsilon^{(i)} \sim N(0, \sigma^2)$,即 $\epsilon^{(i)}$ 的概率密度为:
$$ f(\epsilon^{(i)}) = \frac{1}{\sqrt{2\pi}\sigma}exp(-\frac{(\epsilon^{(i)})^2}{2\sigma^2}) $$
也就是说:
$$ f(y^{(i)}|x^{(i)};\theta) = \frac{1}{\sqrt{2\pi}\sigma}exp(-\frac{(y^{(i)} - \theta^Tx^{(i)})^2}{2\sigma^2}) $$
这里 $f(y^{(i)}|x^{(i)};\theta)$ 表示以 $\theta$ 为参数,给定 $x^{(i)}$ 时 $y^{(i)}$ 的概率密度。注意 $\theta$ 不是随机变量,而是描述 $y^{(i)}$所服从分布的参数,不能写到条件中去。
以整个训练集来描述,这个概率变为 $f(y|X; \theta)$。如果我们将其视作关于 $\theta$ 的函数,这个函数就被称为**似然函数 likelyhood function**:
$$ L(\theta) = L(\theta; X, y) = f(y|X; \theta) $$
由 $\epsilon^{(i)}$ 是独立变量的假设,似然函数可以写成:
$$ L(\theta) = \prod_{i=1}^{m} f(y^{(i)}|x^{(i)};\theta) = \prod_{i=1}^{m} \frac{1}{\sqrt{2\pi}\sigma}exp(-\frac{(y^{(i)} - \theta^Tx^{(i)})^2}{2\sigma^2})$$
**最大似然估计法 maximum likelyhood**要求我们选择使 $L(\theta)$ 最大的 $\theta$,以使得事件发生的可能性最大。
任何关于 $L(\theta)$ 的增函数取得最大值时,$L(\theta)$也会同时取得最大值。因此,很多时候我们都最大化**对数似然函数log likelyhood** $\ell(\theta)$:
$$
\begin{split}
\ell(\theta) & = logL(\theta) \\
& = log\prod_{i=1}^{m} \frac{1}{\sqrt{2\pi}\sigma}exp(-\frac{(y^{(i)} - \theta^Tx^{(i)})^2}{2\sigma^2}) \\
& = \sum_{i=1}^{m} log\frac{1}{\sqrt{2\pi}\sigma}exp(-\frac{(y^{(i)} - \theta^Tx^{(i)})^2}{2\sigma^2}) \\
& = mlog\frac{1}{\sqrt{2\pi}\sigma} - \frac{1}{2\sigma^2}\sum_{i=1}^{m} (y^{(i)} - \theta^Tx^{(i)})^2
\end{split}
$$
于是,最大化 $\ell(\theta)$ 等价于最小化 $\frac{1}{2}\sum_{i=1}^{m} (y^{(i)} - \theta^Tx^{(i)})^2$,正是成本函数 $J(\theta)$。
总结一下:给定我们关于数据的概率假设,最小二乘回归对应着寻找 $\theta$ 的最大似然估计值。
## 4. 局部加权线性回归 Locally weighted linear regression
```python
np.random.seed(0)
n_samples = 30
degrees = [1, 4, 15]
true_fun = lambda X: np.cos(1.5 * np.pi * X)
X = np.sort(np.random.rand(n_samples))
y = true_fun(X) + np.random.randn(n_samples) * 0.1
plt.figure(figsize=(14, 5))
for i in range(len(degrees)):
ax = plt.subplot(1, len(degrees), i + 1)
plt.setp(ax, xticks=(), yticks=())
polynomial_features = PolynomialFeatures(degree=degrees[i],
include_bias=False)
linear_regression = LinearRegression()
pipeline = Pipeline([("polynomial_features", polynomial_features),
("linear_regression", linear_regression)])
pipeline.fit(X[:, np.newaxis], y)
# Evaluate the models using crossvalidation
scores = cross_val_score(pipeline, X[:, np.newaxis], y,
scoring="neg_mean_squared_error", cv=10)
X_test = np.linspace(0, 1, 100)
plt.plot(X_test, pipeline.predict(X_test[:, np.newaxis]), label="Model")
plt.plot(X_test, true_fun(X_test), label="True function")
plt.scatter(X, y, label="Samples")
plt.xlabel("x")
plt.ylabel("y")
plt.xlim((0, 1))
plt.ylim((-2, 2))
plt.legend(loc="best")
plt.title("Degree {}\nMSE = {:.2e}(+/- {:.2e})".format(
degrees[i], -scores.mean(), scores.std()))
plt.show()
```
考虑这个问题,我们试图用 $x \in \mathbb{R}$ 来预测变量 $y$,上图可以看出 $y$ 与 $x$ 并不是线性关系。左侧的图中,我们试图通过线性回归来预测,拟合的效果较差。如果我们最高使用 $x^4$ 作为特征,即 $y=\theta_0 + \theta_1x + \theta_2x^2 + \theta_3x^3 + \theta_4x^4$,拟合的效果如中图所示,非常接近于真实的函数关系;然而,不断添加高阶特征也非常危险,右侧的图上添加了最高15阶的特征,即 $y = \sum_{j=0}^{15} \theta_jx^j$,这时曲线拟合的效果甚至更好,但这显然不是一个很好的模型。
左侧的模型属于**欠拟合 underfitting**,模型并没能捕捉到数据的特征;而右侧的模型属于**过拟合 overfitting**,模型因为对训练数据做了过多的优化,捕捉了非常多噪声,从而缺少泛化能力。(之后,在学习理论中,我们会更加正式地定义这两个概念)。
从中我们可以得到的另一个结论是,特征的选择,对于模型表现具有至关重要的影响。
本节我们将简单地介绍**局部加权线性回归 Locally Weighted Linear Regression(LWR)**,当有足够的数据量时,这个模型对特征的选择相对不那么敏感。
在线性回归模型中,为了根据 $x$ 预测 $y$,我们会:
1. 找到使得 $\sum_i (y^{(i)}-\theta^Tx^{(i)})^2$ 最小的 $\theta$
2. 计算 $\theta^Tx$
而在局部加权线性模型中,我们进行以下步骤:
1. 找到使得 $\sum_i w^{(i)}(y^{(i)}-\theta^Tx^{(i)})^2$ 最小的 $\theta$
2. 计算 $\theta^Tx$
这里,$w^{(i)}$ 是非负的**权重 weights**。对于某个样本 $i$ 而言,如果 $w^{(i)}$ 非常大,这个样本的平方误差,对于最终的成本函数来说,显然具有更大的话语权。
一个常见的权重选择是:
$$ w^{(i)} = exp(-\frac{(x^{(i)}-x)^2}{2\tau^2}) $$
权重取决于我们具体将要预测的点 $x$,并且给定点到预测点的距离越近,权重越接近1;反之权重越接近0;参数 $\tau$ 决定了随着 $x^{(i)}$ 远离预测点 $x$,权重下降得有多块。因而 $\tau$ 被称为**带宽 bandwidth**参数。
设有对角矩阵 $W$,即$ w(i, i) = exp(-\frac{(x^{(i)}-x)^2}{2\tau^2}) $,则局部加权线性模型的正规方程为:
$$ w = (X^TWX)^{-1}X^TWy $$
局部加权线性是一个**非参数算法 non-parametric**,而线性回归是**参数学习算法 parametric learning algorithm**,因为线性回归有固定有限个参数 $\theta$。一旦我们计算好线性回归的参数 $\theta$ 并保存下来,预测过程中我们将不再需要使用训练集。相反,对于局部加权线性模型,我们始终需要保存整个训练集。非参数算法大致可以等同于以下事实:为表示假设函数 $h(x)$,我们需要保存的数据量通常随着训练集的大小线性增长。
## 附:参考资料
1. [cs229-notes1](https://see.stanford.edu/materials/aimlcs229/cs229-notes1.pdf)
2. [Scikit Learn Document](http://scikit-learn.org/stable/documentation.html)
3. [机器学习实战](https://book.douban.com/subject/24703171/)
```python
```
|
From sflib Require Import sflib.
From Paco Require Import paco.
From PromisingLib Require Import Axioms.
From PromisingLib Require Import Basic.
From PromisingLib Require Import Loc.
From PromisingLib Require Import Language.
From PromisingLib Require Import Event.
Require Import Time.
Require Import View.
Require Import Cell.
Require Import Memory.
Require Import TView.
Require Import Local.
Require Import Thread.
Require Import Configuration.
Require Import Progress.
Require Import LowerPromises.
Require Import SimMemory.
Require Import SimPromises.
Require Import SimLocal.
Require Import SimThread.
Set Implicit Arguments.
Lemma read_step_cur_future
lc1 mem1 loc val released ord lc2
(WF1: Local.wf lc1 mem1)
(ORD: Ordering.le ord Ordering.relaxed)
(READ: Local.read_step lc1 mem1 loc ((TView.cur (Local.tview lc1)).(View.rlx) loc) val released ord lc2):
<<PROMISES: (Local.promises lc1) = (Local.promises lc2)>> /\
<<TVIEW_RLX: (TView.cur (Local.tview lc1)).(View.rlx) = (TView.cur (Local.tview lc2)).(View.rlx)>> /\
<<TVIEW_PLN: forall l (LOC: l <> loc),
(TView.cur (Local.tview lc1)).(View.pln) l = (TView.cur (Local.tview lc2)).(View.pln) l>>.
Proof.
destruct lc1 as [tview1 promises1]. inv READ. ss.
esplits; eauto.
- condtac; ss; try by destruct ord.
apply TimeMap.antisym.
+ etrans; [|apply TimeMap.join_l]. apply TimeMap.join_l.
+ apply TimeMap.join_spec; auto using TimeMap.bot_spec.
apply TimeMap.join_spec; try refl.
unfold View.singleton_ur_if. condtac; ss.
* ii. unfold TimeMap.singleton, LocFun.add, LocFun.init, LocFun.find.
condtac; try apply Time.bot_spec.
subst. refl.
* ii. unfold TimeMap.singleton, LocFun.add, LocFun.init, LocFun.find.
condtac; try apply Time.bot_spec.
subst. refl.
- i. condtac; ss; try by destruct ord.
unfold TimeMap.join, TimeMap.bot.
rewrite TimeFacts.le_join_l; try apply Time.bot_spec.
rewrite TimeFacts.le_join_l; ss.
etrans; [|apply Time.bot_spec].
unfold View.singleton_ur_if. condtac; ss; try refl.
unfold TimeMap.singleton, LocFun.add, LocFun.init. condtac; ss. refl.
Qed.
Lemma fence_step_future
lc1 sc1 ordr ordw lc2 sc2
(ORDR: Ordering.le ordr Ordering.relaxed)
(ORDW: Ordering.le ordw Ordering.acqrel)
(FENCE: Local.fence_step lc1 sc1 ordr ordw lc2 sc2):
<<PROMISES: (Local.promises lc1) = (Local.promises lc2)>> /\
<<TVIEW: (TView.cur (Local.tview lc1)) = (TView.cur (Local.tview lc2))>>.
Proof.
destruct lc1 as [tview1 promises1]. inv FENCE. split; ss.
condtac; try by destruct ordw.
condtac; try by destruct ordr.
Qed.
Lemma write_step_consistent
lc1 sc1 mem1
loc val ord
(WF1: Local.wf lc1 mem1)
(SC1: Memory.closed_timemap sc1 mem1)
(MEM1: Memory.closed mem1)
(PROMISES1: Ordering.le Ordering.strong_relaxed ord -> Memory.nonsynch_loc loc (Local.promises lc1))
(CONS1: Local.promise_consistent lc1):
exists from to released lc2 sc2 mem2 kind,
<<STEP: Local.write_step lc1 sc1 mem1 loc from to val None released ord lc2 sc2 mem2 kind>> /\
<<CONS2: Local.promise_consistent lc2>>.
Proof.
destruct (classic (exists f t m, Memory.get loc t (Local.promises lc1) = Some (f, m) /\
m <> Message.reserve)).
{ des.
exploit Memory.min_concrete_ts_exists; eauto. i. des.
exploit Memory.min_concrete_ts_spec; eauto. i. des.
exploit Memory.get_ts; try exact GET. i. des.
{ subst. inv WF1. rewrite BOT in *. ss. }
clear f t m H H0 MIN.
exploit progress_write_step_split; try exact GET; eauto.
{ ss. unfold TimeMap.bot. apply Time.bot_spec. }
i. des.
esplits; eauto. ii.
assert (TS: loc0 = loc -> Time.le ts ts0).
{ i. subst. inv x2. inv WRITE. inv PROMISE0. ss.
revert PROMISE.
erewrite Memory.remove_o; eauto. condtac; ss. des; ss.
erewrite Memory.split_o; eauto. repeat condtac; ss; i.
- des; ss. subst. refl.
- des; ss.
exploit Memory.min_concrete_ts_spec; try exact PROMISE; eauto. i. des. ss. }
inv x2. inv WRITE. inv PROMISE0. ss.
unfold TimeMap.join, TimeMap.singleton.
unfold LocFun.add, LocFun.init, LocFun.find.
condtac; ss.
- subst. apply TimeFacts.join_spec_lt.
+ eapply TimeFacts.lt_le_lt; try eapply TS; eauto.
+ eapply TimeFacts.lt_le_lt; try eapply TS; eauto.
apply Time.middle_spec. ss.
- revert PROMISE.
erewrite Memory.remove_o; eauto. condtac; ss.
erewrite Memory.split_o; eauto. repeat condtac; ss; try by des; ss.
guardH o. guardH o0. guardH o1. i.
apply TimeFacts.join_spec_lt; eauto.
destruct (TimeFacts.le_lt_dec ts0 Time.bot); ss.
inv l; inv H.
inv WF1. rewrite BOT in *. ss.
}
{ exploit progress_write_step; eauto.
{ apply Time.incr_spec. }
i. des.
esplits; eauto. ii.
inv x0. inv WRITE. inv PROMISE0. ss.
revert PROMISE.
erewrite Memory.remove_o; eauto. condtac; ss.
erewrite Memory.add_o; eauto. condtac; ss. i.
destruct (Loc.eq_dec loc0 loc).
- subst. exfalso. apply H; eauto.
- unfold TimeMap.join, TimeMap.singleton.
unfold LocFun.add, LocFun.init, LocFun.find.
condtac; ss.
apply TimeFacts.join_spec_lt; eauto.
destruct (TimeFacts.le_lt_dec ts Time.bot); ss.
inv l; inv H0.
inv WF1. rewrite BOT in *. ss.
}
Qed.
|
module Instruction where
import Protolude
import Data.Complex (Complex (..))
import qualified Data.Matrix as M
-- NOTE: could be newtyped for more safety
type Qubit = Int
type Qubits = [Qubit]
type Operation = M.Matrix (Complex Double)
data Instruction
= Gate Operation Qubits
| Measure
| Halt
deriving (Show)
|
Require Export D.
(********** Discussion and Variations **********)
Inductive ev_list {X:Type} : list X -> Prop :=
| el_nil: ev_list []
| el_cc : forall x y l, ev_list l -> ev_list (x::y::l)
.
Lemma ev_list__ev_length: forall X (l:list X),
ev_list l -> ev (length l).
Proof. intros. induction H.
Case "el_nil". simpl. apply ev_0.
Case "el_cc". simpl. apply ev_SS. apply IHev_list.
Qed.
Lemma ev_length__ev_list: forall X n,
ev n -> forall (l:list X), n = length l -> ev_list l.
Proof. intros X n H. induction H.
Case "ev_0". destruct l.
SCase "[]". intros. apply el_nil.
SCase "x::l". intros. inversion H.
Case "ev_SS". intros. destruct l.
SCase "[]". apply el_nil. destruct l.
SCase "[x]". inversion H0.
SCase "x::x0::l". inversion H0. apply IHev in H2. apply el_cc. apply H2.
Qed.
Inductive pal {X:Type} : list X -> Prop :=
| pal_nil: pal []
| pal_sgl: forall (x:X), pal [x]
| pal_rcs: forall (x:X) (l:list X), pal l -> pal (x::(snoc l x)).
Theorem pal_app_rev : forall (X:Type) (l:list X), pal (l ++ (rev l)).
Proof.
intros. induction l.
Case "[]". simpl. apply pal_nil.
Case "x::l". simpl. replace (x :: l ++ snoc (rev l) x) with (x :: snoc (l ++ rev l) x). apply pal_rcs. apply IHl.
Lemma snoc_app_lem : forall (X:Type) (x:X) l1 l2, snoc (l1 ++ l2) x = l1 ++ snoc l2 x.
Proof. intros. induction l1. reflexivity.
simpl. rewrite IHl1. reflexivity. Qed.
rewrite snoc_app_lem with (l2:=(rev l)). reflexivity.
Qed.
Theorem pal_rev : forall (X:Type) (l:list X), pal l -> l = rev l.
Proof. intros. induction H.
Case "[]". reflexivity.
Case "[x]". reflexivity.
Case "x::l". simpl.
Lemma x_snoc_lem : forall (X:Type) (x y:X) l,
x :: snoc l y = snoc (x::l) y.
Proof. intros. simpl. reflexivity. Qed.
rewrite x_snoc_lem.
Lemma snoc_rev_lem : forall (X:Type) (x:X) l,
x :: (rev l) = rev (snoc l x).
Proof. intros. induction l.
Case "[]". simpl. reflexivity.
Case "x0::l". simpl. rewrite<-IHl. simpl. reflexivity. Qed.
assert (Hrev : x::l = x::(rev l)).
Proof. rewrite<-IHpal. reflexivity.
rewrite -> Hrev. rewrite->snoc_rev_lem. reflexivity.
Qed.
Print le.
Definition lt (n m:nat) := le (S n) m.
Inductive square_of : nat -> nat -> Prop :=
sq : forall n:nat, square_of n (n*n).
Theorem sq_3_9 : square_of 3 9.
Proof.
apply sq. Qed.
Inductive next_nat : nat -> nat -> Prop :=
nxt : forall n, next_nat n (S n).
Theorem next_2_3 : next_nat 2 3.
Proof. apply nxt. Qed.
Print ev.
Inductive next_even : nat -> nat -> Prop :=
| ne_1 : forall n, ev (S n) -> next_even n (S n)
| ne_2 : forall n, ev (S (S n)) -> next_even n (S (S n)).
Lemma le_trans : forall m n o,
m <= n -> n <= o -> m <= o.
Proof. intros m n o Hmn Hno. induction Hno as [|n o].
Case "le_n". apply Hmn.
Case "le_S". apply le_S. apply IHHno. assumption. Qed.
Theorem test_le3: 2<=1 -> 2+2=5.
Proof. intro H. inversion H. inversion H2. Qed.
Theorem O_le_n : forall n, 0<=n.
Proof. intros. induction n as [|n']. apply le_n. apply le_S. assumption. Qed.
(*********************************************************)
Print le.
Theorem n_le_m__Sn_le_Sm: forall n m, S n <= S m -> n <= m.
Proof. intros. inversion H. apply le_n. apply le_trans with (m:=n) (n:=S n) (o:=m). apply le_S. apply le_n. apply H2.
Qed.
Theorem Sn_le_Sm__n_le_m: forall n m, n <= m -> S n <= S m.
Proof. intros. induction H. apply le_n. apply le_S. apply IHle. Qed.
(*********************************************************)
Lemma n_plus_O__n: forall n, n+0=n.
Proof. intros. induction n. reflexivity. simpl. rewrite->IHn. reflexivity. Qed.
Lemma a_Sb__S_a_b : forall a b, a + S b= S (a+b).
Proof. intros. induction a. reflexivity. simpl. rewrite IHa. reflexivity. Qed.
Lemma plus_comm: forall a b, a + b = b + a.
Proof. intros. induction a.
Case "a". simpl. symmetry. apply n_plus_O__n.
Case "S a". simpl. rewrite a_Sb__S_a_b. rewrite IHa. reflexivity. Qed.
Theorem le_plus_l: forall a b, a <= a + b.
Proof. intro a. induction a.
Case "O". simpl. apply O_le_n.
Case "S a". simpl. induction b.
SCase "O". rewrite n_plus_O__n. apply le_n.
SCase "S b". replace (a + S b) with (S (a + b)). apply le_S. apply IHb. symmetry. apply a_Sb__S_a_b. Qed.
Theorem plus_lt: forall n1 n2 m,
n1 + n2 < m -> n1 < m /\ n2 < m.
Proof. intros n1 n2 m. unfold "<". split.
Case "n1". apply le_trans with (m:=S n1) (n:= S(n1+n2)) (o:=m).
replace (S (n1 + n2)) with (S n1 + n2). apply le_plus_l.
simpl. reflexivity. apply H.
Case "n2". apply le_trans with (m:=S n2) (n:=S (n1 + n2)) (o:= m). replace (S (n1 + n2)) with (S n2 + n1). apply le_plus_l. simpl. rewrite plus_comm. reflexivity. apply H. Qed.
Theorem lt_S: forall n m, n < m -> n < S m.
Proof. intros n m. unfold "<". intro H. apply le_S. apply H. Qed.
Theorem ble_nat_true: forall n m,
ble_nat n m = true -> n <= m.
Proof. intros. generalize dependent n. induction m.
Case "O". induction n.
SCase "O". intros. apply le_n.
SCase "S n". intros. inversion H.
Case "S m". intros. induction n.
SCase "O". apply O_le_n.
SCase "S n". inversion H. apply IHm in H1. apply Sn_le_Sm__n_le_m. assumption. Qed.
Theorem le_ble_nat: forall n m,
n <= m -> ble_nat n m = true.
Proof. intro n. induction n.
Case "O". intros. induction m.
SCase "O". reflexivity.
SCase "S m". reflexivity.
Case "S n". intros. induction m.
SCase "O". inversion H.
SCase "S m". simpl. apply IHn. apply n_le_m__Sn_le_Sm.
assumption. Qed.
Theorem ble_nat_true_trans: forall n m o,
ble_nat n m = true -> ble_nat m o = true -> ble_nat n o = true.
Proof. intros. apply ble_nat_true in H. apply ble_nat_true in H0.
apply le_ble_nat. apply le_trans with (m:=n) (n:=m). assumption. assumption. Qed.
Theorem ble_nat_false: forall n m,
ble_nat n m = false -> ~(n<=m).
Proof. intros. unfold not. intro Hnm. apply le_ble_nat in Hnm. rewrite H in Hnm. inversion Hnm. Qed.
Inductive R : nat -> nat -> nat -> Prop :=
| c1 : R 0 0 0
| c2 : forall m n o, R m n o -> R (S m) n (S o)
| c3 : forall m n o, R m n o -> R m (S n) (S o)
| c4 : forall m n o, R (S m) (S n) (S (S o)) -> R m n o
| c5 : forall m n o, R m n o -> R n m o.
Theorem R112 : R 1 1 2.
Proof. apply c2. apply c3. apply c1. Qed.
(***************************************************)
Theorem R_mno :forall m n o, R m n o -> m + n = o.
Proof. intros. generalize dependent n. generalize dependent m. induction o.
Case "O". intros. inversion H. reflexivity. inversion H0. inversion H7. subst. Abort.
Inductive RR: nat -> list nat -> Prop :=
| Rc1: RR 0 []
| Rc2: forall n l, RR n l -> RR (S n) (n::l)
| Rc3: forall n l, RR (S n) l -> RR n l.
Theorem RR1210: RR 2 [1;2;1;0].
Proof. apply Rc2. apply Rc3. apply Rc3. apply Rc2. apply Rc2. apply Rc2. apply Rc1. Qed.
(********** Programming with Propositions **********)
Definition plus_fact : Prop := 2 + 2 = 4.
Check plus_fact.
Theorem plus_fact_is_true : plus_fact.
Proof. reflexivity. Qed.
Definition true_for_zero (P:nat->Prop) : Prop :=
P 0.
Definition true_for_all_numbers (P:nat -> Prop) : Prop :=
forall n, P n.
Definition preserved_by_S (P:nat -> Prop): Prop :=
forall (n:nat), P n -> P (S n).
Definition natural_number_induction: Prop :=
forall (P:nat->Prop),
true_for_zero P ->
preserved_by_S P ->
true_for_all_numbers P.
Definition combine_odd_even (Podd Peven : nat -> Prop) :
nat -> Prop :=
fun n => if oddb n then Podd n else Peven n.
Theorem combbine_odd_even_intro :
forall (Podd Peven : nat -> Prop) (n:nat),
(oddb n = true -> Podd n) ->
(oddb n = false -> Peven n) ->
combine_odd_even Podd Peven n.
Proof. intros. unfold combine_odd_even. destruct (oddb n) eqn:Hodd. apply H. reflexivity. apply H0. reflexivity. Qed.
Theorem combine_odd_even_elim_odd :
forall (Podd Peven : nat -> Prop) (n : nat),
combine_odd_even Podd Peven n ->
oddb n = true ->
Podd n.
Proof. intros. unfold combine_odd_even in H. rewrite H0 in H. assumption. Qed.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.