text
stringlengths 0
3.34M
|
---|
.PH 1 1 \*(XL/paris_hours/phf.bnd
|
From Coq Require Import Eqdep_dec. (* Needed to prove decidable equality on RegName *)
From stdpp Require Import gmap fin_maps list finite.
(* We assume a fixed set of registers, and a finite set of memory addresses.
The exact size of the address space does not matter, it could be made a
parameter of the machine.
*)
Definition RegNum: nat := 31.
Definition MemNum: Z := 2000000.
(* ---------------------------------- Registers ----------------------------------------*)
Inductive RegName: Type :=
| PC
| R (n: nat) (fin: n <=? RegNum = true).
Global Instance reg_eq_dec : EqDecision RegName.
Proof. intros r1 r2. destruct r1,r2; [by left | by right | by right |].
destruct (nat_eq_dec n n0).
+ subst n0. left.
assert (forall (b: bool) (n m: nat) (P1 P2: n <=? m = b), P1 = P2).
{ intros. apply eq_proofs_unicity.
intros; destruct x; destruct y; auto. }
rewrite (H _ _ _ fin fin0). reflexivity.
+ right. congruence.
Defined.
Lemma reg_eq_sym (r1 r2 : RegName) : r1 ≠ r2 → r2 ≠ r1. Proof. auto. Qed.
Program Definition n_to_regname (n : nat) : option RegName :=
if (nat_le_dec n RegNum) then Some (R n _) else None.
Next Obligation.
intros. eapply Nat.leb_le; eauto.
Defined.
Definition all_registers: list RegName :=
[R 0 eq_refl; R 1 eq_refl; R 2 eq_refl; R 3 eq_refl; R 4 eq_refl; R 5 eq_refl;
R 6 eq_refl; R 7 eq_refl; R 8 eq_refl; R 9 eq_refl; R 10 eq_refl; R 11 eq_refl;
R 12 eq_refl; R 13 eq_refl; R 14 eq_refl; R 15 eq_refl; R 16 eq_refl; R 17 eq_refl;
R 18 eq_refl; R 19 eq_refl; R 20 eq_refl; R 21 eq_refl; R 22 eq_refl; R 23 eq_refl;
R 24 eq_refl; R 25 eq_refl; R 26 eq_refl; R 27 eq_refl; R 28 eq_refl; R 29 eq_refl;
R 30 eq_refl; R 31 eq_refl; PC].
Global Instance RegName_finite: Finite RegName.
Proof.
refine {| enum := all_registers;
NoDup_enum := _;
elem_of_enum := _ |}.
{ repeat (econstructor; [set_solver|]).
econstructor. }
{ destruct x; [set_solver|].
do 32 (destruct n as [|n]; [rewrite (@Eqdep_dec.eq_proofs_unicity bool ltac:(decide equality) _ _ fin (eq_refl _)); set_solver|]).
simpl in fin. discriminate. }
Qed.
Global Instance reg_countable : Countable RegName.
Proof.
refine {| encode r := encode match r with
| PC => inl ()
| R n fin => inr n
end ;
decode n := match (decode n) with
| Some (inl ()) => Some PC
| Some (inr n) => n_to_regname n
| None => None
end ;
decode_encode := _ |}.
intro r. destruct r; auto.
rewrite decode_encode.
unfold n_to_regname.
destruct (nat_le_dec n RegNum).
- do 2 f_equal. apply eq_proofs_unicity. decide equality.
- exfalso. by apply (Nat.leb_le n RegNum) in fin.
Defined.
(* -------------------------------- Memory addresses -----------------------------------*)
Inductive Addr: Type :=
| A (z : Z) (fin: Z.leb z MemNum = true) (pos: Z.leb 0 z = true).
Definition z_of (a: Addr): Z :=
match a with
| A z _ _ => z
end.
Coercion z_of: Addr >-> Z.
Lemma z_of_eq a1 a2 :
z_of a1 = z_of a2 ->
a1 = a2.
Proof.
destruct a1, a2; cbn. intros ->.
repeat f_equal; apply eq_proofs_unicity; decide equality.
Qed.
Lemma eq_z_of a1 a2 :
a1 = a2 ->
z_of a1 = z_of a2.
Proof. destruct a1; destruct a2. congruence. Qed.
Lemma z_of_neq a1 a2 :
z_of a1 <> z_of a2 ->
a1 <> a2.
Proof. red; intros. apply H. rewrite H0; reflexivity. Qed.
Lemma neq_z_of a1 a2 :
a1 ≠ a2 → (z_of a1) ≠ (z_of a2).
Proof. intros. intros Heq%z_of_eq. congruence. Qed.
Global Instance addr_eq_dec: EqDecision Addr.
intros x y. destruct x,y. destruct (Z_eq_dec z z0).
- left. eapply z_of_eq; eauto.
- right. inversion 1. simplify_eq.
Defined.
Definition z_to_addr (z : Z) : option Addr.
Proof.
destruct (Z_le_dec z MemNum),(Z_le_dec 0 z).
- apply (Z.leb_le z MemNum) in l.
apply (Z.leb_le 0 z) in l0.
exact (Some (A z l l0)).
- exact None.
- exact None.
- exact None.
Defined.
Lemma addr_spec (a: Addr) : (a <= MemNum)%Z ∧ (0 <= a)%Z.
Proof. destruct a. cbn. rewrite Z.leb_le in fin. rewrite Z.leb_le in pos. lia. Qed.
Lemma z_to_addr_z_of (a:Addr) :
z_to_addr a = Some a.
Proof.
generalize (addr_spec a); intros [? ?].
set (z := (z_of a)) in *.
unfold z_to_addr.
destruct (Z_le_dec z MemNum) eqn:?;
destruct (Z_le_dec 0 z) eqn:?.
{ f_equal. apply z_of_eq. cbn. lia. }
all: lia.
Qed.
Lemma z_to_addr_eq_inv (a b:Addr) :
z_to_addr a = Some b → a = b.
Proof. rewrite z_to_addr_z_of. naive_solver. Qed.
Global Instance addr_countable : Countable Addr.
Proof.
refine {| encode r := encode (z_of r) ;
decode n := match (decode n) with
| Some z => z_to_addr z
| None => None
end ;
decode_encode := _ |}.
intro r. destruct r; auto.
rewrite decode_encode.
unfold z_to_addr. simpl.
destruct (Z_le_dec z MemNum),(Z_le_dec 0 z).
- repeat f_equal; apply eq_proofs_unicity; decide equality.
- exfalso. by apply (Z.leb_le 0 z) in pos.
- exfalso. by apply (Z.leb_le z MemNum) in fin.
- exfalso. by apply (Z.leb_le z MemNum) in fin.
Defined.
Definition le_lt_addr : Addr → Addr → Addr → Prop :=
λ a1 a2 a3, (a1 <= a2 < a3)%Z.
Definition le_addr : Addr → Addr → Prop :=
λ a1 a2, (a1 <= a2)%Z.
Definition lt_addr : Addr → Addr → Prop :=
λ a1 a2, (a1 < a2)%Z.
Definition leb_addr : Addr → Addr → bool :=
λ a1 a2, Z.leb a1 a2.
Definition ltb_addr : Addr → Addr → bool :=
λ a1 a2, Z.ltb a1 a2.
Definition eqb_addr : Addr → Addr → bool :=
λ a1 a2, Z.eqb a1 a2.
Definition za : Addr := A 0%Z eq_refl eq_refl.
Definition top : Addr := A MemNum eq_refl eq_refl.
Delimit Scope Addr_scope with a.
Notation "a1 <= a2 < a3" := (le_lt_addr a1 a2 a3): Addr_scope.
Notation "a1 <= a2" := (le_addr a1 a2): Addr_scope.
Notation "a1 <=? a2" := (leb_addr a1 a2): Addr_scope.
Notation "a1 < a2" := (lt_addr a1 a2): Addr_scope.
Notation "a1 <? a2" := (ltb_addr a1 a2): Addr_scope.
Notation "a1 =? a2" := (eqb_addr a1 a2): Addr_scope.
Notation "0" := (za) : Addr_scope.
Global Instance Addr_le_dec : RelDecision le_addr.
Proof. intros x y. destruct x,y. destruct (Z_le_dec z z0); [by left|by right]. Defined.
Global Instance Addr_lt_dec : RelDecision lt_addr.
Proof. intros x y. destruct x,y. destruct (Z_lt_dec z z0); [by left|by right]. Defined.
Program Definition incr_addr (a: Addr) (z: Z): option Addr :=
if (Z_le_dec (a + z)%Z MemNum) then
if (Z_le_dec 0 (a + z)%Z) then Some (A (a + z)%Z _ _) else None else None.
Next Obligation.
intros. apply Z.leb_le; auto.
Defined.
Next Obligation.
intros. apply Z.leb_le; auto.
Defined.
Notation "a1 + z" := (incr_addr a1 z): Addr_scope.
Definition max (a1 a2: Addr): Addr :=
if Addr_le_dec a1 a2 then a2 else a1.
Definition min (a1 a2: Addr): Addr :=
if Addr_le_dec a1 a2 then a1 else a2.
Lemma min_addr_spec (a1 a2: Addr):
exists a, min a1 a2 = a /\ (a: Z) = Z.min (a1: Z) (a2: Z).
Proof.
exists (min a1 a2); split; auto.
unfold min. destruct (Addr_le_dec a1 a2); unfold le_addr in *; lia.
Qed.
Ltac min_addr_as_spec a1 a2 :=
generalize (min_addr_spec a1 a2); intros [? [? ?]];
let ax := fresh "ax" in
set (ax := (min a1 a2)) in *;
clearbody ax; subst ax.
Lemma max_addr_spec (a1 a2: Addr):
exists a, max a1 a2 = a /\ (a: Z) = Z.max (a1: Z) (a2: Z).
Proof.
exists (max a1 a2); split; auto.
unfold max. destruct (Addr_le_dec a1 a2); unfold le_addr in *; lia.
Qed.
Ltac max_addr_as_spec a1 a2 :=
generalize (max_addr_spec a1 a2); intros [? [? ?]];
let ax := fresh "ax" in
set (ax := (max a1 a2)) in *;
clearbody ax; subst ax.
Definition get_addr_from_option_addr : option Addr → Addr :=
λ e_opt, match e_opt with
| Some e => e
| None => top%a
end.
Notation "^ a" := (get_addr_from_option_addr a) (format "^ a", at level 1) : Addr_scope.
(** Automation *)
(*** A zify-like tactic to send arithmetic on adresses into Z ******)
Lemma incr_addr_spec (a: Addr) (z: Z) :
(exists (a': Addr),
(a + z)%a = Some a' /\ a + z <= MemNum /\ 0 ≤ a + z ∧ (a':Z) = a + z)%Z
\/
((a + z)%a = None /\ (a + z > MemNum ∨ a + z < 0))%Z.
Proof.
unfold incr_addr.
destruct (Z_le_dec (a + z)%Z MemNum),(Z_le_dec 0 (a + z)%Z); [ left | right; split; auto; try lia..].
eexists. repeat split; lia.
Qed.
Ltac incr_addr_as_spec a x :=
generalize (incr_addr_spec a x); intros [(?&?&?&?&?)|(?&[?|?])];
let ax := fresh "ax" in
set (ax := (incr_addr a x)) in *;
clearbody ax; subst ax.
Lemma Some_eq_inj A (x y: A) :
Some x = Some y ->
x = y.
Proof. congruence. Qed.
Ltac zify_addr_op_nonbranching_step :=
lazymatch goal with
| |- @eq Addr ?a ?a' =>
apply z_of_eq
| H : @eq Addr ?a ?a' |- _ =>
apply eq_z_of in H
| |- not (@eq Addr ?a ?a') =>
apply z_of_neq
| H : not (@eq Addr ?a ?a') |- _ =>
apply neq_z_of in H
| |- @eq (option Addr) (Some _) (Some _) =>
f_equal
| H : @eq (option Addr) (Some _) (Some _) |- _ =>
apply Some_eq_inj in H
| |- @eq (option Addr) (Some _) None =>
exfalso
| |- @eq (option Addr) None (Some _) =>
exfalso
(* wrapper definitions to unfold (<=, <, etc) *)
| |- context [ le_lt_addr _ _ _ ] =>
unfold le_lt_addr
| H : context [ le_lt_addr _ _ _ ] |- _ =>
unfold le_lt_addr in H
| |- context [ le_addr _ _ ] =>
unfold le_addr
| H : context [ le_addr _ _ ] |- _ =>
unfold le_addr in H
| |- context [ leb_addr _ _ ] =>
unfold leb_addr
| H : context [ leb_addr _ _ ] |- _ =>
unfold leb_addr in H
| |- context [ lt_addr _ _ ] =>
unfold lt_addr
| H : context [ lt_addr _ _ ] |- _ =>
unfold lt_addr in H
| |- context [ ltb_addr _ _ ] =>
unfold ltb_addr
| H : context [ ltb_addr _ _ ] |- _ =>
unfold ltb_addr in H
| |- context [ eqb_addr _ _ ] =>
unfold eqb_addr
| H : context [ eqb_addr _ _ ] |- _ =>
unfold eqb_addr in H
| H : context [ min ?a1 ?a2 ] |- _ =>
min_addr_as_spec a1 a2
| |- context [ min ?a1 ?a2 ] =>
min_addr_as_spec a1 a2
| H : context [ max ?a1 ?a2 ] |- _ =>
max_addr_as_spec a1 a2
| |- context [ max ?a1 ?a2 ] =>
max_addr_as_spec a1 a2
end.
Ltac zify_addr_nonbranching_step :=
first [ progress (cbn in *)
| zify_addr_op_nonbranching_step ].
Ltac zify_addr_op_branching_goal_step :=
lazymatch goal with
| |- context [ incr_addr ?a ?x ] =>
incr_addr_as_spec a x
end.
Ltac zify_addr_op_branching_hyps_step :=
lazymatch goal with
| _ : context [ incr_addr ?a ?x ] |- _ =>
incr_addr_as_spec a x
end.
Ltac zify_addr_ty_step :=
lazymatch goal with
| a : Addr |- _ =>
generalize (addr_spec a); intros [? ?];
let z := fresh "z" in
set (z := (z_of a)) in *;
clearbody z;
first [ clear a | revert dependent a ]
end.
(** zify_addr **)
(* This greedily translates all the address-related terms in the goal and in the
context. Because each (_ + _) introduces a disjunction, the number of goals
quickly explodes if there are many (_ + _) in the context.
The solve_addr tactic below is more clever and tries to limit the
combinatorial explosion, but zify_addr does not. *)
Ltac zify_addr :=
repeat (first [ zify_addr_nonbranching_step
| zify_addr_op_branching_goal_step
| zify_addr_op_branching_hyps_step ]);
repeat zify_addr_ty_step; intros.
(** solve_addr *)
(* From a high-level perspective, [solve_addr] is equivalent to [zify_addr]
followed by [lia].
However, this gets very slow when there are many (_ + _) in the context (and
some of those may not be relevant to prove the goal at hand), so the
implementation is a bit more clever. Instead, we try to call [lia] as soon as
possible to quickly terminate sub-goals than can be proved before the whole
context gets translated. *)
Ltac zify_addr_op_goal_step :=
first [ zify_addr_nonbranching_step
| zify_addr_op_branching_goal_step ].
Ltac zify_addr_op_deepen :=
zify_addr_op_branching_hyps_step;
repeat zify_addr_nonbranching_step;
try (
zify_addr_op_branching_hyps_step;
repeat zify_addr_nonbranching_step
).
Ltac solve_addr_close_proof :=
repeat zify_addr_ty_step; intros;
solve [ auto | lia | congruence ].
Ltac solve_addr :=
intros;
repeat zify_addr_op_goal_step;
try solve_addr_close_proof;
repeat (
zify_addr_op_deepen;
try solve_addr_close_proof
);
solve_addr_close_proof.
Goal forall a : Addr,
(a + -(a + 3))%a = None.
Proof.
intros. solve_addr.
Qed.
Goal forall (a a' b b' : Addr),
(a + 1)%a = Some a' ->
(b + 1)%a = Some b' ->
(a + 0)%a = Some a.
Proof.
intros.
repeat zify_addr_op_goal_step.
(* Check that we can actually terminate early before translating the whole
context. *)
solve_addr_close_proof.
solve_addr_close_proof.
solve_addr_close_proof.
Qed.
(* ------------------ *)
(* Hack: modify [zify] to make it support [Z.to_nat] (used in the definition of
[region_size]). *)
(* TODO: remove the code below whenever we upgrade to Coq 8.11, as the issue has
been fixed upstream starting from Coq 8.11.
*)
(* Lemma Z_of_nat_zify : forall x, Z.of_nat (Z.to_nat x) = Z.max 0 x. *)
(* Proof. *)
(* intros x. destruct x. *)
(* - rewrite Z2Nat.id; reflexivity. *)
(* - rewrite Z2Nat.inj_pos. lia. *)
(* - rewrite Z2Nat.inj_neg. lia. *)
(* Qed. *)
(* Ltac zify_nat_op_extended := *)
(* match goal with *)
(* | H : context [ Z.of_nat (Z.to_nat ?a) ] |- _ => rewrite (Z_of_nat_zify a) in H *)
(* | |- context [ Z.of_nat (Z.to_nat ?a) ] => rewrite (Z_of_nat_zify a) *)
(* | _ => zify_nat_op *)
(* end. *)
(* Global Ltac zify_nat ::= *)
(* repeat zify_nat_rel; repeat zify_nat_op_extended; unfold Z_of_nat' in *. *)
(* --------------------------- BASIC LEMMAS --------------------------------- *)
(** Address arithmetic *)
Lemma addr_add_0 a: (a + 0)%a = Some a.
Proof. solve_addr. Qed.
Lemma incr_addr_one_none a :
(a + 1)%a = None ->
a = top.
Proof. solve_addr. Qed.
Lemma incr_addr_opt_add_twice (a: Addr) (n m: Z) :
(0 <= n)%Z ->
(0 <= m)%Z ->
^(^(a + n) + m)%a = ^(a + (n + m)%Z)%a.
Proof. solve_addr. Qed.
Lemma top_le_eq a : (top <= a)%a → a = top.
Proof. solve_addr. Qed.
Lemma top_not_le_eq a : ¬ (a < top)%a → a = top.
Proof. solve_addr. Qed.
Lemma next_lt (a a' : Addr) :
(a + 1)%a = Some a' → (a < a')%Z.
Proof. solve_addr. Qed.
Lemma next_lt_i (a a' : Addr) (i : Z) :
(i > 0)%Z →
(a + i)%a = Some a' → (a < a')%Z.
Proof. solve_addr. Qed.
Lemma next_le_i (a a' : Addr) (i : Z) :
(i >= 0)%Z →
(a + i)%a = Some a' → (a <= a')%Z.
Proof. solve_addr. Qed.
Lemma next_lt_top (a : Addr) i :
(i > 0)%Z →
is_Some (a + i)%a → a ≠ top.
Proof. intros ? [? ?] ?. solve_addr. Qed.
Lemma addr_next_le (a e : Addr) :
(a < e)%Z → ∃ a', (a + 1)%a = Some a'.
Proof. intros. zify_addr; eauto. exfalso. lia. lia. Qed.
Lemma addr_next_lt (a e : Addr) :
(a < e)%Z -> ∃ a', (a + 1)%a = Some a'.
Proof. intros. zify_addr; eauto. exfalso. lia. lia. Qed.
Lemma addr_next_lt_gt_contr (a e a' : Addr) :
(a < e)%Z → (a + 1)%a = Some a' → (e < a')%Z → False.
Proof. solve_addr. Qed.
Lemma addr_next_lt_le (a e a' : Addr) :
(a < e)%Z → (a + 1)%a = Some a' → (a' ≤ e)%Z.
Proof. solve_addr. Qed.
Lemma addr_abs_next (a e a' : Addr) :
(a + 1)%a = Some a' → (a < e)%Z → (Z.abs_nat (e - a) - 1) = (Z.abs_nat (e - a')).
Proof. solve_addr. Qed.
Lemma addr_unique a a' fin fin' pos pos' :
a = a' → A a fin pos = A a' fin' pos'.
Proof.
intros ->. repeat f_equal; apply eq_proofs_unicity; decide equality.
Qed.
Lemma incr_addr_trans (a1 a2 a3 : Addr) (z1 z2 : Z) :
(a1 + z1)%a = Some a2 → (a2 + z2)%a = Some a3 →
(a1 + (z1 + z2))%a = Some a3.
Proof. solve_addr. Qed.
Lemma addr_add_assoc (a a' : Addr) (z1 z2 : Z) :
(a + z1)%a = Some a' →
(a + (z1 + z2))%a = (a' + z2)%a.
Proof. solve_addr. Qed.
Lemma incr_addr_le (a1 a2 a3 : Addr) (z1 z2 : Z) :
(a1 + z1)%a = Some a2 -> (a1 + z2)%a = Some a3 -> (z1 <= z2)%Z ->
(a2 <= a3)%Z.
Proof. solve_addr. Qed.
Lemma incr_addr_ne a i :
i ≠ 0%Z → a ≠ top →
^ (a + i)%a ≠ a.
Proof. intros H1 H2. intro. apply H2. solve_addr. Qed.
Lemma incr_addr_ne_top a z a' :
(z > 0)%Z → (a + z)%a = Some a' →
a ≠ top.
Proof. intros. intro. solve_addr. Qed.
Lemma get_addrs_from_option_addr_comm a i k :
(k >= 0)%Z -> (i >= 0)%Z ->
(^(^(a + i) + k)%a) =
(^(a + (i + k)%Z)%a).
Proof. solve_addr. Qed.
Lemma incr_addr_of_z (a a' : Addr) :
(a + 1)%a = Some a' →
(a + 1)%Z = a'.
Proof. solve_addr. Qed.
Lemma incr_addr_of_z_i (a a' : Addr) i :
(a + i)%a = Some a' →
(a + i)%Z = a'.
Proof. solve_addr. Qed.
Lemma invert_incr_addr (a1 a2: Addr) (z:Z):
(a1 + z)%a = Some a2 → (a2 + (- z))%a = Some a1.
Proof. solve_addr. Qed.
|
module FuncImpl
public export
interface Monad m => FooBar m where
Foo : {A : Type} -> A -> m A -> Type
bar : {A : Type} -> (ma : m A) -> m (DPair A (\ a => Foo a ma))
foobar : {A : Type} -> (ma : m A) -> map DPair.fst (bar ma) = ma
|
theory ExF017
imports Main
begin
lemma "P a (Q (Q a)) \<longrightarrow> (\<forall>x. \<forall>y. P x (Q y) \<longrightarrow> (\<exists>z. P (Q z) y)) \<longrightarrow> (\<exists>z. P z a)"
proof -
{
assume a:"P a (Q (Q a))"
hence b:"\<exists>z. P z (Q (Q a))" by (rule exI)
{
assume c:"(\<forall>x. \<forall>y. P x (Q y) \<longrightarrow> (\<exists>z. P (Q z) y))"
hence "(\<forall>y. P a (Q y) \<longrightarrow> (\<exists>z. P (Q z) y))" ..
hence "(P a (Q (Q a)) \<longrightarrow> (\<exists>z. P (Q z) (Q a)))" ..
from this a have d:"(\<exists>z. P (Q z) (Q a))" by (rule mp)
{
assume e:"(\<forall>z . \<not>P z a)"
{
fix b
assume f:"P (Q b) (Q a)"
from c have "(\<forall>y. P (Q b) (Q y) \<longrightarrow> (\<exists>z. P (Q z) y))" by (rule allE)
hence "P (Q b) (Q a) \<longrightarrow> (\<exists>z. P (Q z) a)" by (rule allE)
from this and f have g:"\<exists>z. P (Q z) a" by (rule mp)
{
fix c
assume h:"P (Q c) a"
from e have "\<not>P (Q c) a" by (rule allE)
with h have False by contradiction
}
with g have False by (rule exE)
}
with d have False by (rule exE)
}
hence e:"\<not>(\<forall>z . \<not>P z a)" by (rule notI)
have f:"(\<exists>z. P z a)"
proof -
{
assume g:"\<not>(\<exists>z. P z a)"
have h:"\<forall>z. \<not>P z a"
proof -
{
assume i:"\<not>(\<forall>z . \<not>P z a)"
{
fix b
{
assume "P b a"
hence "\<exists>z. P z a" by (rule exI)
with g have False by contradiction
}
hence "\<not>P b a" by (rule notI)
}
hence "\<forall>z. \<not>P z a" by (rule allI)
with i have False by contradiction
}
hence "\<not>\<not>(\<forall>z . \<not>P z a)" by (rule notI)
thus "\<forall>z . \<not>P z a" by (rule notnotD)
qed
from h and e have False by contradiction
}
hence "\<not>\<not>(\<exists>z. P z a)" by (rule notI)
thus "\<exists>z. P z a" by (rule notnotD)
qed
}
hence "((\<forall>x. \<forall>y. P x (Q y) \<longrightarrow> (\<exists>z. P (Q z) y))) \<longrightarrow> (\<exists>z. P z a)" by (rule impI)
}
thus ?thesis by (rule impI)
qed
|
// Copyright (c) 2014 The Bitcoin Core developers
// Copyright (c) 2014-2015 The Dash developers
// Copyright (c) 2015-2017 The PIVX developers
// Copyright (c) 2018 The Fargocash developers
// Distributed under the MIT/X11 software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include "primitives/transaction.h"
#include "main.h"
#include <boost/test/unit_test.hpp>
BOOST_AUTO_TEST_SUITE(main_tests)
BOOST_AUTO_TEST_CASE(subsidy_limit_test)
{
//Basic rewards
BOOST_CHECK(GetBlockValue(500) == 999 * COIN);
BOOST_CHECK(GetBlockValue(21904) == 5 * COIN);
//Halvings
int nHeight = 720 /*1d*/ * 365 * 2; //2y
BOOST_CHECK(GetBlockValue(nHeight) == 5/2 * COIN);
//Halvings
nHeight = 720 /*1d*/ * 365 * 4; //4y
BOOST_CHECK(GetBlockValue(nHeight) == 5/4 * COIN);
}
BOOST_AUTO_TEST_SUITE_END()
|
lemma borel_measurable_vimage_halfspace_component_lt: "f \<in> borel_measurable (lebesgue_on S) \<longleftrightarrow> (\<forall>a i. i \<in> Basis \<longrightarrow> {x \<in> S. f x \<bullet> i < a} \<in> sets (lebesgue_on S))" |
open import Everything
module Test.Test5
{𝔵} {𝔛 : Ø 𝔵}
{𝔞} {𝔒₁ : 𝔛 → Ø 𝔞}
{𝔟} {𝔒₂ : 𝔛 → Ø 𝔟}
{ℓ}
{ℓ̇} (_↦_ : ∀ {x} → 𝔒₂ x → 𝔒₂ x → Ø ℓ̇)
⦃ _ : [ExtensibleType] _↦_ ⦄
⦃ _ : Smap!.class (Arrow 𝔒₁ 𝔒₂) (Extension 𝔒₂) ⦄
⦃ _ : Surjextensionality!.class (Arrow 𝔒₁ 𝔒₂) (Pointwise _↦_) (Extension 𝔒₂) (Pointwise _↦_) ⦄
-- ⦃ _ : [𝓢urjectivity] (Arrow 𝔒₁ 𝔒₂) (Extension $ ArrowExtensionṖroperty ℓ 𝔒₁ 𝔒₂ _↦_) ⦄
where
test[∙] : ∀ {x y} → ArrowExtensionṖroperty ℓ 𝔒₁ 𝔒₂ _↦_ x → Arrow 𝔒₁ 𝔒₂ x y → ArrowExtensionṖroperty ℓ 𝔒₁ 𝔒₂ _↦_ y
test[∙] P f = f ◃ P
|
PROGRAM p1
USE mod12
USE mod13
INTEGER :: x = 1
INTEGER :: y = 2
INTEGER :: z
z = ( x + y )
END PROGRAM p1
|
theorem T2 (a b c d : ℕ) (h1 : a = b) (h2 : b ≤ c) (h3 : c + 1 < d) : a < d :=
calc
a = b : h1
... < b + 1 : nat.lt_succ_self b
... ≤ c + 1 : nat.succ_le_succ h2
... < d : h3
|
import topology.basic
import topology.compact_open
import data.nat.prime
import data.real.basic
import data.real.irrational
import data.complex.basic
import data.fin.basic
import geometry.euclidean.basic
import analysis.inner_product_space.pi_L2
import algebra.group.defs
import algebra.field.basic
import combinatorics.configuration
import ring_theory.polynomial.basic
import group_theory.free_group
import combinatorics.simple_graph.basic
import ring_theory.integral_closure
import data.fintype.card
import category_theory.category.basic
import ring_theory.discrete_valuation_ring
import group_theory.torsion
import linear_algebra.matrix.charpoly.basic
import algebra.order.absolute_value
import analysis.convex.basic
import topology.uniform_space.uniform_convergence_topology
import topology.sequences
import analysis.normed.group.infinite_sum
import data.nat.choose.sum
import group_theory.specific_groups.cyclic
import group_theory.order_of_element
import analysis.mean_inequalities
import analysis.normed_space.banach
import topology.algebra.continuous_monoid_hom
import linear_algebra.matrix.symmetric
import analysis.inner_product_space.spectrum
import ring_theory.class_group
import ring_theory.dedekind_domain.basic
import ring_theory.principal_ideal_domain
import model_theory.satisfiability
import probability.integration
import ring_theory.simple_module
import category_theory.preadditive.schur
import representation_theory.maschke
import topology.paracompact
import combinatorics.simple_graph.coloring
/- FEW SHOT PROMPTS TO CODEX(START)
/--`theorem`
Power Set is Closed under Intersection
Let $S$ be a set.
Let $\powerset S$ be the power set of $S$.
Then:
:$\forall A, B \in \powerset S: A \cap B \in \powerset S$
`proof`
Let $A, B \in \powerset S$.
Then by the definition of power set, $A \subseteq S$ and $B \subseteq S$.
From Intersection is Subset we have that $A \cap B \subseteq A$.
It follows from Subset Relation is Transitive that $A \cap B \subseteq S$.
Thus $A \cap B \in \powerset S$ and closure is proved.
{{qed}}
-/
theorem power_set_intersection_closed {α : Type*} (S : set α) : ∀ A B ∈ 𝒫 S, (A ∩ B) ∈ 𝒫 S :=
begin
-- $A$ and $B$ are sets. $A$ and $B$ belong to power set of $S$
assume (A : set α) (hA : A ∈ 𝒫 S) (B : set α) (hB : B ∈ 𝒫 S),
-- Then $A ⊆ S$ and $B ⊆ S$, by power set definition
have h1 : (A ⊆ S) ∧ (B ⊆ S), from by auto [set.subset_of_mem_powerset, set.subset_of_mem_powerset],
-- Then $(A ∩ B) ⊆ A$, by intersection of set is a subset
have h2 : (A ∩ B) ⊆ A, from by auto [set.inter_subset_left],
-- Then $(A ∩ B) ⊆ S$, by subset relation is transitive
have h3 : (A ∩ B) ⊆ S, from by auto [set.subset.trans],
-- Hence $(A ∩ B) ∈ 𝒫 S$, by power set definition
show (A ∩ B) ∈ 𝒫 S, from by auto [set.mem_powerset],
end
/--`theorem`
Square of Sum
:$\forall x, y \in \R: \paren {x + y}^2 = x^2 + 2 x y + y^2$
`proof`
Follows from the distribution of multiplication over addition:
{{begin-eqn}}
{{eqn | l = \left({x + y}\right)^2
| r = \left({x + y}\right) \cdot \left({x + y}\right)
}}
{{eqn | r = x \cdot \left({x + y}\right) + y \cdot \left({x + y}\right)
| c = Real Multiplication Distributes over Addition
}}
{{eqn | r = x \cdot x + x \cdot y + y \cdot x + y \cdot y
| c = Real Multiplication Distributes over Addition
}}
{{eqn | r = x^2 + 2xy + y^2
| c =
}}
{{end-eqn}}
{{qed}}
-/
theorem square_of_sum (x y : ℝ) : (x + y)^2 = (x^2 + 2*x*y + y^2)
begin
-- expand the power
calc (x + y)^2 = (x+y)*(x+y) : by auto [sq]
-- distributive property of multiplication over addition gives:
... = x*(x+y) + y*(x+y) : by auto [add_mul]
-- applying the above property further gives:
... = x*x + x*y + y*x + y*y : by auto [mul_comm, add_mul] using [ring]
-- rearranging the terms using commutativity and adding gives:
... = x^2 + 2*x*y + y^2 : by auto [sq, mul_comm] using [ring]
end
/--`theorem`
Identity of Group is Unique
Let $\struct {G, \circ}$ be a group. Then there is a unique identity element $e \in G$.
`proof`
From Group has Latin Square Property, there exists a unique $x \in G$ such that:
:$a x = b$
and there exists a unique $y \in G$ such that:
:$y a = b$
Setting $b = a$, this becomes:
There exists a unique $x \in G$ such that:
:$a x = a$
and there exists a unique $y \in G$ such that:
:$y a = a$
These $x$ and $y$ are both $e$, by definition of identity element.
{{qed}}
-/
theorem group_identity_unique {G : Type*} [group G] : ∃! e : G, ∀ a : G, e * a = a ∧ a * e = a :=
begin
-- Group has Latin Square Property
have h1 : ∀ a b : G, ∃! x : G, a * x = b, from by auto using [use (a⁻¹ * b)],
have h2 : ∀ a b : G, ∃! y : G, y * a = b, from by auto using [use b * a⁻¹],
-- Setting $b = a$, this becomes:
have h3 : ∀ a : G, ∃! x : G, a * x = a, from by auto [h1],
have h4 : ∀ a : G, ∃! y : G, y * a = a, from by auto [h2],
-- These $x$ and $y$ are both $(1 : G)$, by definition of identity element
have h5 : ∀ a : G, classical.some (h3 a).exists = (1 : G), from by auto [exists_unique.unique, h3, classical.some_spec, exists_unique.exists, mul_one],
have h6 : ∀ a : G, classical.some (h4 a).exists = (1 : G), from by auto [exists_unique.unique, h4, classical.some_spec, exists_unique.exists, one_mul],
show ∃! e : G, ∀ a : G, e * a = a ∧ a * e = a, from by auto [h3, h4, exists_unique.unique, classical.some_spec, exists_unique.exists] using [use (1 : G)],
end
/--`theorem`
Schur's Inequality Short Simpler
Let $x, y, z \in \R_{\ge 0}$ be positive real numbers such that $x \ge y \ge z \ge 0$.
Let $t \in \R, t > 0$ be a (strictly) positive real number.
Then:
:$x^t \paren {x - y} \paren {x - z} + y^t \paren {y - z} \paren {y - x} + z^t \paren {z - x} \paren {z - y} \ge 0$
`proof`
Consider the expression:
:$\paren {x - y} \paren {x^t \paren {x - z} - y^t \paren {y - z}} + z^t \paren {x - z} \paren {y - z}$
We see that every term in the above is non-negative. So, directly:
:$(1): \quad \paren {x - y} \paren {x^t \paren {x - z} - y^t \paren {y - z}} + z^t \paren {x - z} \paren {y - z} \ge 0$
$(1) can be rearranged to Schur's inequality.
{{qed}}
-/
theorem
FEW SHOT PROMPTS TO CODEX(END)-/
--OUTPUT 1
theorem schur_inequality_short_simpler (x y z : ℝ) (h1 : x ≥ y) (h2 : y ≥ z) (h3 : z ≥ 0) (t : ℝ) (h4 : t > 0) : x^t * (x - y) * (x - z) + y^t * (y - z) * (y - x) + z^t * (z - x) * (z - y) ≥ 0 :=
begin
-- Consider the expression:
have h5 : (x - y) * (x^t * (x - z) - y^t * (y - z)) + z^t * (x - z) * (y - z) ≥ 0,
{
-- We see that every term in the above is non-negative. So, directly:
show (x - y) * (x^t * (x - z) - y^t * (y - z)) + z^t * (x - z) * (y - z) ≥ 0, from by auto [h1, h2, h3, h4, pow_pos, mul_nonneg, sub_nonneg, sub_nonneg_of_le, mul_nonneg, mul_nonneg, add_nonneg],
},
-- $(1) can be rearranged to Schur's inequality.
show x^t * (x - y) * (x - z) + y^t * (y - z) * (y - x) + z^t * (z - x) * (z - y) ≥ 0, from by auto [h1, h2, h3, h4, pow_pos, mul_nonneg, sub_nonneg, sub_nonneg_of_le, mul_nonneg, mul_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg, add_nonneg,
end --Needs more than 2000 tokens!
|
module RunElab0
import Language.Reflection
%language ElabReflection
0 elabScript : Elab Unit
elabScript = pure ()
x : Unit
x = %runElab elabScript
%runElab elabScript
main : IO Unit
main = putStrLn $ show x
|
import main
import split_cycle
import algebra.linear_ordered_comm_group_with_zero
open_locale classical
variables {V X : Type}
def simple_lift : Prof V X → Prof V X → X → Prop :=
λ P' P x, (∀ (a ≠ x) (b ≠ x) i, P i a b ↔ P' i a b)
∧ ∀ a i, ((P i x a → P' i x a) ∧ (P' i a x → P i a x))
def monotonicity (F : VSCC) (P P' : Prof V X) : Prop :=
∀ (x ∈ F V X P), simple_lift P' P x → x ∈ F V X P'
lemma cardinality_lemma [fintype V] (p q : V → Prop) : (∀ v, (p v → q v)) → ((finset.filter p finset.univ).card ≤ (finset.filter q finset.univ).card) :=
begin
intro pq,
have subset : (finset.filter p finset.univ) ⊆ (finset.filter q finset.univ),
refine finset.subset_iff.mpr _,
simp,
exact pq,
exact (finset.card_le_of_subset subset),
end
lemma cardinality_lemma2 [fintype V] (p q : V → Prop) : (∀ v, (p v ↔ q v)) → ((finset.filter p finset.univ).card = (finset.filter q finset.univ).card) :=
begin
intro pq,
congr,
ext1, simp at *, fsplit, work_on_goal 0 { intros ᾰ }, work_on_goal 1 { intros ᾰ },
specialize pq x,
exact pq.mp ᾰ,
specialize pq x,
exact pq.mpr ᾰ,
end
lemma margin_lemma (P P' : Prof V X) [fintype V] (a b : X) : (a ≠ b) → (∀ (v : V), (P v a b → P' v a b) ∧ (P' v b a → P v b a)) → margin P a b ≤ margin P' a b :=
begin
intro h,
intro lift,
unfold margin,
have first : ((finset.filter (λ (x_1 : V), P x_1 a b) finset.univ).card) ≤ ((finset.filter (λ (x_1 : V), P' x_1 a b) finset.univ).card),
have first_pq : ∀ v, (λ (x_1 : V), P x_1 a b) v → (λ (x_1 : V), P' x_1 a b) v,
simp,
intro v, specialize lift v,
cases lift with lift1 lift2,
exact lift1,
exact cardinality_lemma (λ (x_1 : V), P x_1 a b) (λ (x_1 : V), P' x_1 a b) first_pq,
have second : ((finset.filter (λ (x_1 : V), P' x_1 b a) finset.univ).card) ≤ ((finset.filter (λ (x_1 : V), P x_1 b a) finset.univ).card),
have second_pq : ∀ v, (λ (x_1 : V), P' x_1 b a) v → (λ (x_1 : V), P x_1 b a) v,
simp,
intro v,
contrapose,
intro npyx,
specialize lift v,
cases lift with lift1 lift2,
contrapose npyx,
push_neg, push_neg at npyx,
exact lift2 npyx,
exact cardinality_lemma (λ (x_1 : V), P' x_1 b a) (λ (x_1 : V), P x_1 b a) second_pq,
mono,
simp,
exact first,
simp,
exact second,
end
-- if the elements are the same between the two Profs, the a = b case is still true.
lemma margin_lemma' (P P' : Prof V X) [fintype V] [profile_asymmetric P'] (a b : X) : (∀ (v : V), (P v a b → P' v a b) ∧ (P' v b a → P v b a)) → margin P a b ≤ margin P' a b :=
begin
intro pq,
by_cases a = b,
rw h,
rw self_margin_zero, rw self_margin_zero,
exact margin_lemma P P' a b h pq,
end
lemma margin_lt_margin_of_lift (P P' : Prof V X) [fintype V] (y x : X) : simple_lift P' P x → margin P' y x ≤ margin P y x :=
begin
intro lift,
unfold simple_lift at lift,
cases lift with lift1 lift2,
specialize lift2 y,
by_cases x = y,
rw h, rw self_margin_zero, rw self_margin_zero,
have test := margin_lemma P P' x y h lift2,
rw margin_antisymmetric,
rw margin_antisymmetric P,
simp,
exact test,
end
theorem split_cycle_monotonicity [fintype V] (P P' : Prof V X) [profile_asymmetric P'] : monotonicity split_cycle P P' :=
begin
unfold monotonicity, intros x x_won lift,
unfold split_cycle, unfold max_el_VSCC,
rw split_cycle_definitions,
simp,
unfold split_cycle_VCCR',
intro y,
push_neg,
use _inst_1,
intro m, /- "so suppose margin P'(y,x) > 0." -/
unfold split_cycle at x_won,
unfold max_el_VSCC at x_won,
rw split_cycle_definitions at x_won,
simp at x_won,
unfold split_cycle_VCCR' at x_won,
simp at x_won, specialize x_won y, -- now we must show that there is a chain from x to y of margin greater than margin y x
unfold margin_pos at m,
have m' := margin_lt_margin_of_lift P P' y x lift,
cases x_won with _ x_won,
unfold margin_pos at x_won,
rw margin_eq_margin x_won_w _inst_1 at x_won,
specialize x_won (lt_of_lt_of_le m m'),
cases x_won with l x_won,
use l,
cases x_won with nodup x_won,
cases x_won with nonempty x_won,
use nonempty,
use nodup,
cases x_won with x_nth x_won,
use x_nth,
cases x_won with y_nth x_won,
use y_nth, -- reduced to just the chain. All the properties of the chain have been proven.
rw list.chain'_iff_nth_le at x_won,
rw list.chain'_iff_nth_le,
intro i, intro i_bound, -- for any index i
specialize x_won i,
specialize x_won i_bound,
have test := le_trans m' x_won,
apply le_trans test, -- by the transitive property it is sufficient to show that the margins exclusively increased due to the lift.
-- problem: if the cycle is not simple and contains x, it may not be a cycle in P'.
by_cases i = 0,
have eq : (l.nth_le i (nat.lt_of_lt_pred i_bound)) = (l.nth_le 0 (list.length_pos_of_ne_nil nonempty)),
congr, exact h,
rw eq, rw x_nth,
cases lift with lift1 lift2,
rw margin_eq_margin x_won_w _inst_1,
exact margin_lemma' P P' x (l.nth_le (i + 1) (nat.lt_pred_iff.mp i_bound)) (lift2 (l.nth_le (i + 1) (nat.lt_pred_iff.mp i_bound))),
-- we use the second property of lifts to show this since the first element is x.
have neq : (l.nth_le i (nat.lt_of_lt_pred i_bound)) ≠ x,
change l.pairwise ne at nodup,
rw ←x_nth,
exact ne.symm (list.pairwise_iff_nth_le.mp nodup 0 i (nat.lt_of_lt_pred i_bound) (zero_lt_iff.mpr h)),
have neq2 : (l.nth_le (i + 1) (nat.lt_pred_iff.mp i_bound)) ≠ x,
change l.pairwise ne at nodup,
rw ←x_nth,
exact ne.symm (list.pairwise_iff_nth_le.mp nodup 0 (i + 1) (nat.lt_pred_iff.mp i_bound) (nat.zero_lt_succ i)),
have pq : ∀ (v : V),
(P v (l.nth_le i (nat.lt_of_lt_pred i_bound)) (l.nth_le (i + 1) (nat.lt_pred_iff.mp i_bound)) → P' v (l.nth_le i (nat.lt_of_lt_pred i_bound)) (l.nth_le (i + 1) (nat.lt_pred_iff.mp i_bound))) ∧
(P' v (l.nth_le (i + 1) (nat.lt_pred_iff.mp i_bound)) (l.nth_le i (nat.lt_of_lt_pred i_bound)) → P v (l.nth_le (i + 1) (nat.lt_pred_iff.mp i_bound)) (l.nth_le i (nat.lt_of_lt_pred i_bound))),
intro v,
cases lift with lift1 lift2,
split,
exact (lift1 (l.nth_le i (nat.lt_of_lt_pred i_bound)) neq (l.nth_le (i + 1) (nat.lt_pred_iff.mp i_bound)) neq2 v).mp,
exact (lift1 (l.nth_le (i + 1) (nat.lt_pred_iff.mp i_bound)) neq2 (l.nth_le i (nat.lt_of_lt_pred i_bound)) neq v).mpr,
rw margin_eq_margin x_won_w _inst_1,
exact margin_lemma' P P' (l.nth_le i (nat.lt_of_lt_pred i_bound)) (l.nth_le (i + 1) (nat.lt_pred_iff.mp i_bound)) pq,
end |
The University Symphony Orchestra, or the UCDSO, is conducted by Christian Baldini (formerly conducted for many years by Dr. D. Kern Holoman). It consists of Undergraduate Students Undergraduates, Graduate Students Graduates, and Townies members of the community. They rehearse and perform concerts in the Mondavi Center on campus.
|
module Data.Iso
public export
record Iso (a, b : Type) where
constructor MkIso
to : a -> b
from : b -> a
toFrom : (x : b) -> to (from x) === x
fromTo : (x : a) -> from (to x) === x
-- TODO: transitive and reflexive for ISO
|
-- Tests to check that casting between integer types works as expected
--
-- This tests in `idris2/basic043`, `chez/chez028` and `node/node022` are the
-- same and should all have the same output.
--
-- Widening should not have any effect
--
bits8WideningNoEffect : List String
bits8WideningNoEffect = [
show $ cast {from = Bits8} {to = Bits16} 123,
show $ cast {from = Bits8} {to = Bits32} 123,
show $ cast {from = Bits8} {to = Bits64} 123,
show $ cast {from = Bits8} {to = Int} 123,
show $ cast {from = Bits8} {to = Integer} 123
]
bits16WideningNoEffect : List String
bits16WideningNoEffect = [
show $ cast {from = Bits16} {to = Bits32} 1234,
show $ cast {from = Bits16} {to = Bits64} 1234,
show $ cast {from = Bits16} {to = Int} 1234,
show $ cast {from = Bits16} {to = Integer} 1234
]
bits32WideningNoEffect : List String
bits32WideningNoEffect = [
show $ cast {from = Bits32} {to = Bits64} 1234567,
show $ cast {from = Bits32} {to = Int} 1234567,
show $ cast {from = Bits32} {to = Integer} 1234567
]
--
-- Narrowing
--
b8max : Integer
b8max = 0x100
b16max : Integer
b16max = 0x10000
b32max : Integer
b32max = 0x100000000
b64max : Integer
b64max = 18446744073709551616 -- 0x10000000000000000
narrowFromInteger : List String
narrowFromInteger = [
show $ cast {from = Integer} {to = Bits8} (b8max + 134),
show $ cast {from = Integer} {to = Bits16} (b16max + 134),
show $ cast {from = Integer} {to = Bits32} (b32max + 134),
show $ cast {from = Integer} {to = Bits64} (b64max + 134)
]
narrowFromInt : List String
narrowFromInt = [
show $ cast {from = Int} {to = Bits8} (cast (b8max + 134)),
show $ cast {from = Int} {to = Bits16} (cast (b16max + 134)),
show $ cast {from = Int} {to = Bits32} (cast (b32max + 134)),
show $ cast {from = Int} {to = Bits64} (cast (b64max + 134))
]
narrowFromBits64 : List String
narrowFromBits64 = [
show $ cast {from = Bits64} {to = Bits8} (cast (b8max + 134)),
show $ cast {from = Bits64} {to = Bits16} (cast (b16max + 134)),
show $ cast {from = Bits64} {to = Bits32} (cast (b32max + 134))
]
narrowFromBits32 : List String
narrowFromBits32 = [
show $ cast {from = Bits32} {to = Bits8} (cast (b8max + 134)),
show $ cast {from = Bits32} {to = Bits16} (cast (b16max + 134))
]
narrowFromBits16 : List String
narrowFromBits16 = [
show $ cast {from = Bits16} {to = Bits8} (cast (b8max + 134))
]
--
-- Negative numbers
--
negativeNumberCast : List String
negativeNumberCast = [
show $ cast {to = Bits8} (-19),
show $ cast {to = Bits16} (-19),
show $ cast {to = Bits32} (-19),
show $ cast {to = Bits64} (-19)
]
|
Formal statement is: lemma measure_of_of_measure: "measure_of (space M) (sets M) (emeasure M) = M" Informal statement is: The measure of a measure space is the measure space itself. |
# Binomial logistic regression with mediation simulation code
# NOTE: The assumption is that the mediator is a continuous variable
# Raymond Viviano
# [email protected]
# March 25, 2020
# TODO: Implement checks to make sure that all models converged
#' Inverse Logit Function
inv.logit <- function(p){
return(exp(p)/(1+exp(p)))
}
#' Sobel indirect effect standard error approximation
sobel.std.err <- function(a, b, se.a, se.b){
return((b^2 * se.a^2) + (a^2 + se.b^2))
}
#' Coverage Probability, i.e., the probability that the confidence interval (ci)
#' for a parameter will contain the true value
#' Inputs:
#' b - Estimate
#' se - Standard Error of the Estimate
#' true - True value of parameter for data-generating process
#' cl - Confidence level
#' dof - Degrees of Freedom
#' Output:
#' List containing coverage probability, vect
coverage.prob <- function(b, se, true, cl=.95, dof=Inf){
# Compute quantile based on confidence level
coverage.quantile <- cl + (1-cl)/2
# Compute confidence interval upper and lower bounds
ci.lower <- b - qt(coverage.quantile, df=dof)*se
ci.upper <- b + qt(coverage.quantile, df=dof)*se
# For each ci generated for each b/se pair, eval if true param is in ci
ci.contain.true <- ifelse(true>=ci.lower & true<=ci.upper, 1, 0)
# Calculate coverage probability
cp <- mean(ci.contain.true)
# Calculate Monte Carlo Error
mc.err.lower <- cp - 1.96*sqrt((cp*(1-cp))/length(b))
mc.err.upper <- cp + 1.96*sqrt((cp*(1-cp))/length(b))
# Return coverage probability and error
return(list(cp=cp, ci=cbind(mc.err.lower, mc.err.upper)))
}
#' Power to detect any effect at various alpha levels
#' Takes a vector of p-values and calculates the proportion of modell where the
#' p-val for the effect of interest was < .5, .01, and .001.
power.detect.effect <- function(p.val.vec){
less.than.05 <- ifelse(p.val.vec < .05, 1, 0)
prop.05 <- sum(less.than.05)/length(less.than.05)
less.than.01 <- ifelse(p.val.vec < .01, 1, 0)
prop.01 <- sum(less.than.01)/length(less.than.01)
less.than.001 <- ifelse(p.val.vec < .001, 1, 0)
prop.001 <- sum(less.than.001)/length(less.than.001)
cat(paste0('Power to detect effect at .05: ', prop.05, '\n'))
cat(paste0('Power to detect effect at .01: ', prop.01, '\n'))
cat(paste0('Power to detect effect at .001: ', prop.001, '\n'))
}
# Some equations to keep in mind for logistic regression with mediation:
#
# EQ1. Y = B0_1 + Tau*X + e1
# EQ2. Y = B0_2 + Tau_Prime*X + Beta*M + e2
# EQ3. M = B0_3 + alpha*X + e3
# EQ4. Tau = alpha*beta + Tau_Prime || (c = ab + c') (approx for logistic)
#
# Tau = total effect, tau_prime = direct effect of X on Y accounting for
# mediating relationship, alpha = effect of X on M, beta = effect of M on Y
# B0_1,2,&3 are intercept terms for their respective equations.
#
# Assuming zero error, the mean, or expected value, for the intercept term for
# EQ1 can be defined by the other variables.
#
# EQ5. E[B0_1] = E[B0_2] + Tau_Prime*E[X] + Beta*(E[B0_3] + Alpha*E[X]) - Tau*[X]
#
# If we force the mean of X to 1, then we get equation 6:
#
# EQ6. E[B0_1] = E[B0_2] + Tau_Prime + Beta*E[B0_3] + Alpha*Beta - Tau
#
# As Tau_Prime - Tau = -1 * Alpha*Beta, we get equation 7:
#
# EQ7. E[B0_1] = E[B0_2] + Beta*E[B0_3]
#
# We also get equation 7 if we force the mean of x to 0...and the derivation is easier...
#
# Define B0_1, B0_2, B0_3, alpha, beta, tau, and tau_prime for the data
# generation process.
#
# Use Eq3 to generate M, then use Eq2 to generate Y
# Test that you recover the expected values for alpha, beta, tau, tau_prime,
# B0_1, B02, and B03
# Specify indirect and direct effect terms
alpha = .2
beta = .2
tau_prime = .1
# Specify total effect (note that for logit this is actually approximate)
tau = alpha*beta + tau_prime
# Intercept terms for EQ2 and EQ3 (Means or expected values)
B0_2 <- .2
B0_3 <- .4
# Calculate intercept term for EQ1 based on the other specified values
B0_1 <- B0_2 + beta*B0_3
# Ensure reproducible results
set.seed(10000)
# Define sample size
n <- 16000
# Define number of simulations
nsims <- 50
# Define matrix to hold simulation data (estimates, std.errors, and pvals)
sim.prms <- matrix(NA, nrow=nsims, ncol=20)
# Loop through simulations
for(i in 1:nsims){
# Generate independant variable data, uniform random between -1 and 1
# The expected value, or expected mean of this vector should be 0
x <- runif(n, -1, 1)
# Generate data for the mediating variable, the mean or expected value
# should equal B03. But, we'll add some noise
m <- alpha*x + B0_3
# Add noise
m <- m + rnorm(length(m), 0, .2)
# True data-generation-process for Bernoulli trials
y <- rbinom(n, 1, inv.logit(B0_2 + tau_prime*x + beta*m))
# Estimate logit model
model1 <- glm(y ~ x, family=binomial(link=logit))
model2 <- glm(y ~ x + m, family=binomial(link=logit))
model3 <- lm(m ~ x)
# TODO: Implement checks to make sure that all models converged
# Put model paramters in simulation matrix
sim.prms[i, 1] <- model1$coef[1] # Estimate for B0_1
sim.prms[i, 2] <- model2$coef[1] # Estimate for B0_2
sim.prms[i, 3] <- model3$coef[1] # Estimate for B0_3
sim.prms[i, 4] <- model3$coef[2] # Estimate for Alpha
sim.prms[i, 5] <- model2$coef[3] # Estimate for Beta
sim.prms[i, 6] <- sim.prms[i, 4] * sim.prms[i, 5] # Estimate for Indirect
sim.prms[i, 7] <- model1$coef[2] # Estimate for Tau
sim.prms[i, 8] <- model2$coef[2] # Estimate for Tau_Prime
sim.prms[i, 9] <- summary(model3)$coefficients[,2][2] # Alpha Std.Err
sim.prms[i,10] <- summary(model2)$coefficients[,2][3] # Beta Std.Err
sim.prms[i,11] <- sobel.std.err(sim.prms[i,4], sim.prms[i,5],
sim.prms[i,9], sim.prms[i,10]) # Indirect Std.Err
sim.prms[i,12] <- summary(model1)$coefficients[,2][2] # Tau Std.Err
sim.prms[i,13] <- summary(model2)$coefficients[,2][2] # Tau_Prime Std.Err
sim.prms[i,14] <- summary(model1)$coefficients[,4][1] # P-Value for B0_1
sim.prms[i,15] <- summary(model2)$coefficients[,4][1] # P-Value for B0_2
sim.prms[i,16] <- summary(model3)$coefficients[,4][1] # P-Value for B0_3
sim.prms[i,17] <- summary(model3)$coefficients[,4][2] # P-Value for Alpha
sim.prms[i,18] <- summary(model2)$coefficients[,4][3] # P-Value for Beta
sim.prms[i,19] <- summary(model1)$coefficients[,4][2] # P-Value for Tau
sim.prms[i,20] <- summary(model2)$coefficients[,4][2] # P-Value for Tau'
}
# Note: The assumption for this simulation is that the mediator is continuous
cat("Note: This simulation assumes that the mediator is continous...\n\n")
# Get Covarage probabilities for the parameters
# B0_1.coverage <- coverage.prob(sim.prms[,1], sim.prms[,], B0_1, .95, n-model1$rank) TODO: Add these std.errs to sim.prms
# B0_2.coverage <- coverage.prob(sim.prms[,2], sim.prms[,], B0_2, .95, n-model2$rank) TODO: Add these std.errs to sim.prms
# B0_3.coverage <- coverage.prob(sim.prms[,3], sim.prms[,], B0_3, .95, n-model3$rank) TODO: Add these std.errs to sim.prms
alpha.coverage <- coverage.prob(sim.prms[,4], sim.prms[, 9], alpha, .95, n-model3$rank)
beta.coverage <- coverage.prob(sim.prms[,5], sim.prms[,10], beta, .95, n-model2$rank)
indirect.coverage <- coverage.prob(sim.prms[,6], sim.prms[,11], alpha*beta, .95, n-model2$rank) # TODO: Figure out confidence interval for sobel std.err
tau.coverage <- coverage.prob(sim.prms[,7], sim.prms[,12], tau, .95, n-model3$rank)
tau.prime.coverage <- coverage.prob(sim.prms[,8], sim.prms[,13], tau_prime, .95, n-model2$rank)
# # Intercept coverage probabilities -- TODO
# print("Coverage Probability for B0_1")
# print(B0_1.coverage)
# print("Coverage Probability for B0_2")
# print(B0_2.coverage)
# print("Coverage Probability for B0_3")
# print(B0_3.coverage)
print("alpha.coverage")
print(alpha.coverage)
print("beta.coverage")
print(beta.coverage)
print("indirect.coverage")
print(indirect.coverage)
print("tau.coverage")
print(tau.coverage)
print("tau.prime.coverage")
print(tau.prime.coverage)
# TODO: Update/Fix Proportion Code
# power.detect.effect(sim.prms[,5])
# power.detect.effect(sim.prms[,6]) |
using Test
using StatsFuns
using Base.Iterators: take
using Random
using LinearAlgebra
using DynamicIterators: trace, TimeLift
using TransformVariables: transform, as𝕀, inverse
using MeasureTheory
using MeasureTheory: Const
using Aqua
Aqua.test_all(MeasureTheory; ambiguities=false, unbound_args=false)
function draw2(μ)
x = rand(μ)
y = rand(μ)
while x == y
y = rand(μ)
end
return (x,y)
end
@testset "Parameterized Measures" begin
@testset "Binomial" begin
D = Binomial{(:n, :p)}
par = merge((n=20,),transform(asparams(D, (n=20,)), randn(1)))
d = D(par)
(n,p) = (par.n, par.p)
logitp = logit(p)
probitp = norminvcdf(p)
y = rand(d)
ℓ = logdensity(Binomial(;n, p), y)
@test ℓ ≈ logdensity(Binomial(;n, logitp), y)
@test ℓ ≈ logdensity(Binomial(;n, probitp), y)
@test_broken logdensity(Binomial(n,p), CountingMeasure(ℤ[0:n]), x) ≈ binomlogpdf(n,p,x)
end
@testset "NegativeBinomial" begin
D = NegativeBinomial{(:r, :p)}
par = transform(asparams(D), randn(2))
d = D(par)
(r,p) = (par.r, par.p)
logitp = logit(p)
λ = p * r / (1 - p)
logλ = log(λ)
y = rand(d)
ℓ = logdensity(NegativeBinomial(;r, p), y)
@test ℓ ≈ logdensity(NegativeBinomial(;r, logitp), y)
@test ℓ ≈ logdensity(NegativeBinomial(;r, λ), y)
@test ℓ ≈ logdensity(NegativeBinomial(;r, logλ), y)
sample1 = rand(MersenneTwister(123), NegativeBinomial(;r, λ))
sample2 = rand(MersenneTwister(123), NegativeBinomial(;r, logλ))
@test sample1 == sample2
@test_broken logdensity(Binomial(n,p), CountingMeasure(ℤ[0:n]), x) ≈ binomlogpdf(n,p,x)
end
@testset "Poisson" begin
sample1 = rand(MersenneTwister(123), Poisson(;logλ = log(100)))
sample2 = rand(MersenneTwister(123), Poisson(;λ = 100))
@test sample1 == sample2
end
@testset "Normal" begin
D = Normal{(:μ,:σ)}
par = transform(asparams(D), randn(2))
d = D(par)
@test params(d) == par
μ = par.μ
σ = par.σ
σ² = σ^2
τ = 1/σ²
logσ = log(σ)
y = rand(d)
ℓ = logdensity(Normal(;μ,σ), y)
@test ℓ ≈ logdensity(Normal(;μ,σ²), y)
@test ℓ ≈ logdensity(Normal(;μ,τ), y)
@test ℓ ≈ logdensity(Normal(;μ,logσ), y)
end
@testset "LKJCholesky" begin
D = LKJCholesky{(:k,:η)}
par = transform(asparams(D, (k=4,)), randn(1))
d = D(merge((k=4,),par))
# @test params(d) == par
η = par.η
logη = log(η)
y = rand(d)
η = par.η
ℓ = logdensity(LKJCholesky(4,η), y)
@test ℓ ≈ logdensity(LKJCholesky(k=4,logη=logη), y)
end
end
@testset "Kernel" begin
κ = MeasureTheory.kernel(MeasureTheory.Dirac, identity)
@test rand(κ(1.1)) == 1.1
end
@testset "SpikeMixture" begin
@test rand(SpikeMixture(Dirac(0), 0.5)) == 0
@test rand(SpikeMixture(Dirac(1), 1.0)) == 1
w = 1/3
m = SpikeMixture(Normal(), w)
bm = basemeasure(m)
@test (bm.s*bm.w)*bm.m == 1.0*basemeasure(Normal())
@test density(m, 1.0)*(bm.s*bm.w) == w*density(Normal(),1.0)
@test density(m, 0)*(bm.s*(1-bm.w)) ≈ (1-w)
end
@testset "Dirac" begin
@test rand(Dirac(0.2)) == 0.2
@test logdensity(Dirac(0.3), 0.3) == 0.0
@test logdensity(Dirac(0.3), 0.4) == -Inf
end
@testset "For" begin
FORDISTS = [
For(1:10) do j Normal(μ=j) end
For(4,3) do μ,σ Normal(μ,σ) end
For(1:4, 1:4) do μ,σ Normal(μ,σ) end
For(eachrow(rand(4,2))) do x Normal(x[1], x[2]) end
For(rand(4), rand(4)) do μ,σ Normal(μ,σ) end
]
for d in FORDISTS
@test logdensity(d, rand(d)) isa Float64
end
end
import MeasureTheory.:⋅
function ⋅(μ::Normal, kernel)
m = kernel(μ)
Normal(μ = m.μ.μ, σ = sqrt(m.μ.σ^2 + m.σ^2))
end
struct AffineMap{S,T}
B::S
β::T
end
(a::AffineMap)(x) = a.B*x + a.β
(a::AffineMap)(p::Normal) = Normal(μ = a.B*mean(p) + a.β, σ = sqrt(a.B*p.σ^2*a.B'))
@testset "DynamicFor" begin
mc = Chain(Normal(μ=0.0)) do x Normal(μ=x) end
r = rand(mc)
# Check that `r` is now deterministic
@test logdensity(mc, take(r, 100)) == logdensity(mc, take(r, 100))
d2 = For(r) do x Normal(μ=x) end
@test_broken let r2 = rand(d2)
logdensity(d2, take(r2, 100)) == logdensity(d2, take(r2, 100))
end
end
@testset "Univariate chain" begin
ξ0 = 1.
x = 1.2
P0 = 1.0
Φ = 0.8
β = 0.1
Q = 0.2
μ = Normal(μ=ξ0, σ=sqrt(P0))
kernel = MeasureTheory.kernel(Normal; μ=AffineMap(Φ, β), σ=Const(Q))
@test (μ ⋅ kernel).μ == Normal(μ = 0.9, σ = 0.824621).μ
chain = Chain(kernel, μ)
dyniterate(iter::TimeLift, ::Nothing) = dyniterate(iter, 0=>nothing)
tr1 = trace(TimeLift(chain), nothing, u -> u[1] > 15)
tr2 = trace(TimeLift(rand(Random.GLOBAL_RNG, chain)), nothing, u -> u[1] > 15)
collect(Iterators.take(chain, 10))
collect(Iterators.take(rand(Random.GLOBAL_RNG, chain), 10))
end
@testset "Transforms" begin
t = as𝕀
@testset "Pushforward" begin
μ = Normal()
ν = Pushforward(t, μ)
x = rand(μ)
@test logdensity(μ, x) ≈ logdensity(Pushforward(inverse(t), ν), x)
end
@testset "Pullback" begin
ν = Uniform()
μ = Pullback(t,ν)
y = rand(ν)
@test logdensity(ν, y) ≈ logdensity(Pullback(inverse(t), μ), y)
end
end
using TransformVariables
@testset "Likelihood" begin
dps = [
(Normal() , 2.0 )
# (Pushforward(as((μ=asℝ,)), Normal()^1), (μ=2.0,))
]
ℓs = [
Likelihood(Normal{(:μ,)}, 3.0)
Likelihood(kernel(Normal, x -> (μ=x, σ=2.0)), 3.0)
]
for (d,p) in dps
for ℓ in ℓs
@test logdensity(d ⊙ ℓ, p) == logdensity(d, p) + logdensity(ℓ, p)
end
end
end
@testset "Reproducibility" begin
function repro(D, args, nt=NamedTuple())
t = asparams(D{args}, nt)
d = D(transform(t, randn(t.dimension)))
r(d) = rand(Random.MersenneTwister(1), d)
logdensity(d, r(d)) == logdensity(d, r(d))
end
@testset "Bernoulli" begin
@test repro(Bernoulli, (:p,))
end
@testset "Binomial" begin
@test repro(Binomial, (:n,:p), (n=10,))
end
@testset "Beta" begin
@test repro(Beta, (:α,:β))
end
@testset "Cauchy" begin
@test repro(Cauchy, (:μ,:σ))
end
@testset "Dirichlet" begin
@test_broken repro(Dirichlet, (:p,))
end
@testset "Exponential" begin
@test repro(Exponential, (:λ,))
end
@testset "Gumbel" begin
@test repro(Gumbel, (:μ,:σ))
end
@testset "InverseGamma" begin
@test_broken repro(InverseGamma, (:p,))
end
@testset "Laplace" begin
@test repro(Laplace, (:μ,:σ))
end
@testset "LKJCholesky" begin
@test repro(LKJCholesky, (:k,:η,), (k=3,))
end
@testset "Multinomial" begin
@test_broken repro(Multinomial, (:n,:p,))
end
@testset "MvNormal" begin
@test_broken repro(MvNormal, (:μ,))
end
@testset "NegativeBinomial" begin
@test repro(NegativeBinomial, (:r, :p))
end
@testset "Normal" begin
@test repro(Normal, (:μ,:σ))
end
@testset "Poisson" begin
@test repro(Poisson, (:λ,))
end
@testset "StudentT" begin
@test repro(StudentT, (:ν, :μ))
end
@testset "Uniform" begin
@test repro(Uniform, ())
end
end
|
$\newcommand{\mb}[1]{\mathbf{ #1 }}$
$\newcommand{\bs}[1]{\boldsymbol{ #1 }}$
$\newcommand{\bb}[1]{\mathbb{ #1 }}$
$\newcommand{\R}{\bb{R}}$
$\newcommand{\ip}[2]{\left\langle #1, #2 \right\rangle}$
$\newcommand{\norm}[1]{\left\Vert #1 \right\Vert}$
$\newcommand{\der}[2]{\frac{\mathrm{d} #1 }{\mathrm{d} #2 }}$
$\newcommand{\derp}[2]{\frac{\partial #1 }{\partial #2 }}$
# Cart Pole
Consider a cart on a frictionless track. Suppose a pendulum is attached to the cart by a frictionless joint. The cart is modeled as a point mass $m_c$ and the pendulum is modeled as a massless rigid link with point mass $m_p$ a distance $l$ away from the cart.
Let $\mathcal{I} = (\mb{i}^1, \mb{i}^2, \mb{i}^3)$ denote an inertial frame. Suppose the position of the cart is resolved in the inertial frame as $\mb{r}_{co}^{\mathcal{I}} = (x, 0, 0)$. Additionally, suppose the gravitational force acting on the pendulum is resolved in the inertial frame as $\mb{f}_g^{\mathcal{I}} = (0, 0, -m_p g)$.
Let $\mathcal{B} = (\mb{b}^1, \mb{b}^2, \mb{b}^3)$ denote a body reference frame, with $\mb{b}^2 = \mb{i}^2$. The position of the pendulum mass relative to the cart is resolved in the body frame as $\mb{r}_{pc}^\mathcal{B} = (0, 0, l)$.
The kinetic energy of the system is:
\begin{equation}
\frac{1}{2} m_c \norm{\dot{\mb{r}}_{co}^\mathcal{I}}_2^2 + \frac{1}{2} m_p \norm{\dot{\mb{r}}_{po}^\mathcal{I}}_2^2
\end{equation}
First, note that $\dot{\mb{r}}_{co}^{\mathcal{I}} = (\dot{x}, 0, 0)$.
Next, note that $\mb{r}_{po}^\mathcal{I} = \mb{r}_{pc}^\mathcal{I} + \mb{r}_{co}^\mathcal{I} = \mb{C}_{\mathcal{I}\mathcal{B}}\mb{r}_{pc}^\mathcal{B} + \mb{r}_{co}^\mathcal{I}$, where $\mb{C}_{\mathcal{I}\mathcal{B}}$ is the direction cosine matrix (DCM) satisfying:
\begin{equation}
\mb{C}_{\mathcal{I}\mathcal{B}} = \begin{bmatrix} \ip{\mb{i}_1}{\mb{b}_1} & \ip{\mb{i}_1}{\mb{b}_2} & \ip{\mb{i}_1}{\mb{b}_3} \\ \ip{\mb{i}_2}{\mb{b}_1} & \ip{\mb{i}_2}{\mb{b}_2} & \ip{\mb{i}_2}{\mb{b}_3} \\ \ip{\mb{i}_3}{\mb{b}_1} & \ip{\mb{i}_3}{\mb{b}_2} & \ip{\mb{i}_3}{\mb{b}_3} \end{bmatrix}.
\end{equation}
We parameterize the DCM using $\theta$, measuring the clockwise angle of the pendulum from upright in radians. In this case, the DCM is:
\begin{equation}
\mb{C}_{\mathcal{I}\mathcal{B}} = \begin{bmatrix} \cos{\theta} & 0 & \sin{\theta} \\ 0 & 1 & 0 \\ -\sin{\theta} & 0 & \cos{\theta} \end{bmatrix},
\end{equation}
following from $\cos{\left( \frac{\pi}{2} - \theta \right)} = \sin{\theta}$. Therefore:
\begin{equation}
\mb{r}_{po}^\mathcal{I} = \begin{bmatrix} x + l\sin{\theta} \\ 0 \\ l\cos{\theta} \end{bmatrix}
\end{equation}
We have $\dot{\mb{r}}_{po}^\mathcal{I} = \dot{\mb{C}}_{\mathcal{I}\mathcal{B}} \mb{r}_{pc}^\mathcal{B} + \dot{\mb{r}}_{co}^\mathcal{I}$, following from $\dot{\mb{r}}_{pc}^\mathcal{B} = \mb{0}_3$ since the pendulum is rigid. The derivative of the DCM is:
\begin{equation}
\der{{\mb{C}}_{\mathcal{I}\mathcal{B}}}{\theta} = \begin{bmatrix} -\sin{\theta} & 0 & \cos{\theta} \\ 0 & 0 & 0 \\ -\cos{\theta} & 0 & -\sin{\theta} \end{bmatrix},
\end{equation}
finally yielding:
\begin{equation}
\dot{\mb{r}}_{po}^\mathcal{I} = \dot{\theta} \der{\mb{C}_{\mathcal{I}\mathcal{B}}}{\theta} \mb{r}^{\mathcal{B}}_{pc} + \dot{\mb{r}}_{co}^\mathcal{I} = \begin{bmatrix} l\dot{\theta}\cos{\theta} + \dot{x} \\ 0 \\ -l\dot{\theta}\sin{\theta} \end{bmatrix}
\end{equation}
Define generalized coordinates $\mb{q} = (x, \theta)$ with configuration space $\mathcal{Q} = \R \times \bb{S}^1$, where $\bb{S}^1$ denotes the $1$-sphere. The kinetic energy can then be expressed as:
\begin{align}
T(\mb{q}, \dot{\mb{q}}) &= \frac{1}{2} m_c \begin{bmatrix} \dot{x} \\ \dot{\theta} \end{bmatrix}^\top \begin{bmatrix} 1 & 0 \\ 0 & 0 \end{bmatrix} \begin{bmatrix} \dot{x} \\ \dot{\theta} \end{bmatrix} + \frac{1}{2} m_p \begin{bmatrix} \dot{x} \\ \dot{\theta} \end{bmatrix}^\top \begin{bmatrix} 1 & l \cos{\theta} \\ l \cos{\theta} & l^2 \end{bmatrix} \begin{bmatrix} \dot{x} \\ \dot{\theta} \end{bmatrix}\\
&= \frac{1}{2} \dot{\mb{q}}^\top\mb{D}(\mb{q})\dot{\mb{q}},
\end{align}
where inertia matrix function $\mb{D}: \mathcal{Q} \to \bb{S}^2_{++}$ is defined as:
\begin{equation}
\mb{D}(\mb{q}) = \begin{bmatrix} m_c + m_p & m_p l \cos{\theta} \\ m_p l \cos{\theta} & m_pl^2 \end{bmatrix}.
\end{equation}
Note that:
\begin{equation}
\derp{\mb{D}}{x} = \mb{0}_{2 \times 2},
\end{equation}
and:
\begin{equation}
\derp{\mb{D}}{\theta} = \begin{bmatrix} 0 & -m_p l \sin{\theta} \\ -m_p l \sin{\theta} & 0 \end{bmatrix},
\end{equation}
so we can express:
\begin{equation}
\derp{\mb{D}}{\mb{q}} = -m_p l \sin{\theta} (\mb{e}_1 \otimes \mb{e}_2 \otimes \mb{e}_2 + \mb{e}_2 \otimes \mb{e}_1 \otimes \mb{e}_2).
\end{equation}
The potential energy of the system is $U: \mathcal{Q} \to \R$ defined as:
\begin{equation}
U(\mb{q}) = -\ip{\mb{f}_g^\mathcal{I}}{\mb{r}^{\mathcal{I}}_{po}} = m_p g l \cos{\theta}.
\end{equation}
Define $\mb{G}: \mathcal{Q} \to \R^2$ as:
\begin{equation}
\mb{G}(\mb{q}) = \left(\derp{U}{\mb{q}}\right)^\top = \begin{bmatrix} 0 \\ -m_p g l \sin{\theta} \end{bmatrix}.
\end{equation}
Assume a force $(F, 0, 0)$ (resolved in the inertial frame) can be applied to the cart. The Euler-Lagrange equation yields:
\begin{align}
\der{}{t} \left( \derp{T}{\dot{\mb{q}}} \right)^\top - \left( \derp{T}{\mb{q}} - \derp{U}{\mb{q}} \right)^\top &= \der{}{t} \left( \mb{D}(\mb{q})\dot{\mb{q}} \right) - \frac{1}{2}\derp{\mb{D}}{\mb{q}}(\dot{\mb{q}}, \dot{\mb{q}}, \cdot) + \mb{G}(\mb{q})\\
&= \mb{D}(\mb{q})\ddot{\mb{q}} + \derp{\mb{D}}{\mb{q}}(\cdot, \dot{\mb{q}}, \dot{\mb{q}}) - \frac{1}{2}\derp{\mb{D}}{\mb{q}}(\dot{\mb{q}}, \dot{\mb{q}}, \cdot) + \mb{G}(\mb{q})\\
&= \mb{B} F,
\end{align}
with static actuation matrix:
\begin{equation}
\mb{B} = \begin{bmatrix} 1 \\ 0 \end{bmatrix}.
\end{equation}
Note that:
\begin{align}
\derp{\mb{D}}{\mb{q}}(\cdot, \dot{\mb{q}}, \dot{\mb{q}}) - \frac{1}{2}\derp{\mb{D}}{\mb{q}}(\dot{\mb{q}}, \dot{\mb{q}}, \cdot) &= -m_p l \sin{\theta} (\mb{e}_1 \dot{\theta}\dot{\theta} + \mb{e}_2\dot{x}\dot{\theta}) + \frac{1}{2} m_p l \sin{\theta} (\dot{x}\dot{\theta} \mb{e}_2 + \dot{\theta}\dot{x} \mb{e}_2)\\
&= \begin{bmatrix} -m_p l \dot{\theta}^2 \sin{\theta} \\ 0 \end{bmatrix}\\
&= \mb{C}(\mb{q}, \dot{\mb{q}})\dot{\mb{q}},
\end{align}
with Coriolis terms defined as:
\begin{equation}
\mb{C}(\mb{q}, \dot{\mb{q}}) = \begin{bmatrix} 0 & -m_p l \sin{\theta} \\ 0 & 0 \end{bmatrix}.
\end{equation}
Finally, we have:
\begin{equation}
\mb{D}(\mb{q})\ddot{\mb{q}} + \mb{C}(\mb{q}, \dot{\mb{q}})\dot{\mb{q}} + \mb{G}(\mb{q}) = \mb{B}F
\end{equation}
```python
from numpy import array, concatenate, cos, dot, reshape, sin, zeros
from core.dynamics import RoboticDynamics
class CartPole(RoboticDynamics):
def __init__(self, m_c, m_p, l, g=9.81):
RoboticDynamics.__init__(self, 2, 1)
self.params = m_c, m_p, l, g
def D(self, q):
m_c, m_p, l, _ = self.params
_, theta = q
return array([[m_c + m_p, m_p * l * cos(theta)], [m_p * l * cos(theta), m_p * (l ** 2)]])
def C(self, q, q_dot):
_, m_p, l, _ = self.params
_, theta = q
_, theta_dot = q_dot
return array([[0, -m_p * l * theta_dot * sin(theta)], [0, 0]])
def U(self, q):
_, m_p, l, g = self.params
_, theta = q
return m_p * g * l * cos(theta)
def G(self, q):
_, m_p, l, g = self.params
_, theta = q
return array([0, -m_p * g * l * sin(theta)])
def B(self, q):
return array([[1], [0]])
m_c = 0.5
m_p = 0.25
l = 0.5
cart_pole = CartPole(m_c, m_p, l)
```
We attempt to stabilize the pendulum upright, that is, drive $\theta$ to $0$. We'll use the normal form transformation:
\begin{equation}
\bs{\Phi}(\mb{q}, \dot{\mb{q}}) = \begin{bmatrix} \bs{\eta}(\mb{q}, \dot{\mb{q}}) \\ \mb{z}(\mb{q}, \dot{\mb{q}}) \end{bmatrix} = \begin{bmatrix} \theta \\ \dot{\theta} \\ x \\ m_p l \dot{x} \cos{\theta} + m_p l^2 \dot{\theta} \end{bmatrix}.
\end{equation}
```python
from core.dynamics import ConfigurationDynamics
class CartPoleOutput(ConfigurationDynamics):
def __init__(self, cart_pole):
ConfigurationDynamics.__init__(self, cart_pole, 1)
self.cart_pole = cart_pole
def y(self, q):
return q[1:]
def dydq(self, q):
return array([[0, 1]])
def d2ydq2(self, q):
return zeros((1, 2, 2))
output = CartPoleOutput(cart_pole)
```
```python
from numpy import identity
from core.controllers import FBLinController, LQRController
Q = 10 * identity(2)
R = identity(1)
lqr = LQRController.build(output, Q, R)
fb_lin = FBLinController(output, lqr)
```
```python
from numpy import linspace, pi
x_0 = array([0, pi / 4, 0, 0])
ts = linspace(0, 10, 1000 + 1)
xs, us = cart_pole.simulate(x_0, fb_lin, ts)
```
```python
from matplotlib.pyplot import subplots, show, tight_layout
```
```python
_, axs = subplots(2, 2, figsize=(8, 8))
ylabels = ['$x$ (m)', '$\\theta$ (rad)', '$\\dot{x}$ (m / sec)', '$\\dot{\\theta}$ (rad / sec)']
for ax, data, ylabel in zip(axs.flatten(), xs.T, ylabels):
ax.plot(ts, data, linewidth=3)
ax.set_ylabel(ylabel, fontsize=16)
ax.grid()
for ax in axs[-1]:
ax.set_xlabel('$t$ (sec)', fontsize=16)
tight_layout()
show()
```
```python
_, ax = subplots(figsize=(4, 4))
ax.plot(ts[:-1], us, linewidth=3)
ax.grid()
ax.set_xlabel('$t$ (sec)', fontsize=16)
ax.set_ylabel('$F$ (N)', fontsize=16)
show()
```
|
MODULE module_netcdf2kma_interface
use module_wave2grid_kma
! implicit none
CONTAINS
SUBROUTINE netcdf2kma_interface ( grid, config_flags )
USE module_domain
USE module_timing
USE module_driver_constants
USE module_configure
! IMPLICIT NONE
real,allocatable :: DPSE(:,:),DUE(:,:,:),DVE(:,:,:),DTE(:,:,:),DQE(:,:,:)
real,allocatable :: PSB (:,:), UB(:,:,:), VB(:,:,:), TB(:,:,:), QB(:,:,:)
real,allocatable :: PSG (:,:), UG(:,:,:), VG(:,:,:), TG(:,:,:), QG(:,:,:)
integer :: i,j,k !shcimsi
real,allocatable :: dum(:,:,:) !shcimsi
!--Input data.
TYPE(domain) , INTENT(INOUT) :: grid
TYPE (grid_config_rec_type) :: config_flags
integer :: USE_INCREMENT !shc
integer :: incre,back,ID(5),KT,IM,JM,KM !shc
integer :: IMAXE,JMAXE,IMAX,JMAX,KMAX,IDIM,JDIM,MEND1,ISST,JSST,ISNW,JSNW,MAXJZ,IVAR
integer :: JMAXHF, MNWAV, IMX
! we have to convert in equal lat/lon data
! to Gaussian latitude
!
! First the Equal lat/lon data
! set Field as per KMA order (North top South and 0 to 360 east)
NAMELIST /netcdf2kma_parm/ IMAXE,JMAXE,IMAX,JMAX,KMAX,IDIM,JDIM,MEND1,ISST,JSST,ISNW,JSNW,MAXJZ,IVAR
!
READ (111, NML = netcdf2kma_parm, ERR = 8000)
close (111)
print*,' netcdf2kma_parm namelist data read are as follows:'
print*,' IMAXE= ',IMAXE
print*,' JMAXE= ',JMAXE
print*,' MEND1= ',MEND1
print*,' ISST = ',ISST
print*,' JSST = ',JSST
print*,' MAXJZ= ',MAXJZ
print*,' IVAR = ',IVAR
JMAXHF=JMAX/2
MNWAV=MEND1*(MEND1+1)/2
IMX=IMAX+2
allocate(DPSE(imaxe,jmaxe))
allocate(DUE(imaxe,jmaxe,kmax),DVE(imaxe,jmaxe,kmax))
allocate(DTE(imaxe,jmaxe,kmax),DQE(imaxe,jmaxe,kmax))
allocate(PSB(imax,jmax))
allocate(UB(imax,jmax,kmax),VB(imax,jmax,kmax))
allocate(TB(imax,jmax,kmax),QB(imax,jmax,kmax))
allocate(PSG(imax,jmax))
allocate(UG(imax,jmax,kmax),VG(imax,jmax,kmax))
allocate(TG(imax,jmax,kmax),QG(imax,jmax,kmax))
allocate(dum(imax,jmax,kmax)) !shcimsi
!shc-wei start
! back = 102 !shc start
back = 48 !shc start
!shc-wei end
read(back) ID,KT,IM,JM,KM
read(back) !topo
read(back) PSB
read(back) !psea
read(back) TB
read(back) UB
read(back) VB
read(back) QB
read(back) !rh
read(back) !z !shc end
USE_INCREMENT=1 !shc start
if (USE_INCREMENT.eq.1) then
!shc-wei start
! incre = 101
incre = 47
!shc-wei end
read(incre) DPSE
read(incre) DUE
read(incre) DVE
read(incre) DTE
read(incre) DQE !shc end
! DPSE=20.0; DUE=3.0; DVE=3.0; DTE=5.0; DQE=0.001 !shcimsi
! imaxe=grid%ed31-grid%sd31 !shc start
! jmaxe=grid%ed32-grid%sd32
! kmaxe=grid%ed33-grid%sd33
! imaxg=imaxe; jmaxg=jmaxe-1; kmaxg=kmaxe
call reorder_for_kma(DPSE,imaxe,jmaxe,1)
call reorder_for_kma(DUE,imaxe,jmaxe,kmax)
call reorder_for_kma(DVE,imaxe,jmaxe,kmax)
call reorder_for_kma(DTE,imaxe,jmaxe,kmax)
call reorder_for_kma(DQE,imaxe,jmaxe,kmax) !shc end
DPSE=DPSE*0.01 !shchPa
call Einc_to_Ganl(DPSE,DUE,DVE,DTE,DQE,& !shc start
PSB, UB, VB, TB, QB,&
PSG, UG, VG, TG, QG,&
IMAX,JMAX,IMAXE,JMAXE,KMAX,MAXJZ)
9001 format(10e15.7) !shcimsi start
!modified by shc nk start
!modified by shc nk end
call PREGSM1(PSG,TG,UG,VG,QG,PSB,TB,UB,VB,QB,IMAXE,JMAXE,ISST,JSST,MAXJZ,IVAR, &
IMAX,JMAX,KMAX,IDIM,JDIM,MEND1,MEND1,MEND1,ISNW,JSNW,JMAXHF,MNWAV,IMX ) !shc end
else !shc
call reorder_for_kma(grid%ht(grid%sd31:grid%ed31-1,grid%sd32:grid%ed32-1),&
grid%ed31-grid%sd31 ,grid%ed32-grid%sd32,1)
call reorder_for_kma(grid%psfc(grid%sd31:grid%ed31-1,grid%sd32:grid%ed32-1),&
grid%ed31-grid%sd31 ,grid%ed32-grid%sd32,1)
call reorder_for_kma(grid%u_2(grid%sd31:grid%ed31-1,grid%sd32:grid%ed32-1,&
grid%sd33:grid%ed33-1),&
grid%ed31-grid%sd31 ,grid%ed32-grid%sd32 ,&
grid%ed33-grid%sd33)
call reorder_for_kma(grid%v_2(grid%sd31:grid%ed31-1,grid%sd32:grid%ed32-1,&
grid%sd33:grid%ed33-1),&
grid%ed31-grid%sd31 ,grid%ed32-grid%sd32 ,&
grid%ed33-grid%sd33)
call reorder_for_kma(grid%t_2(grid%sd31:grid%ed31-1,grid%sd32:grid%ed32-1,&
grid%sd33:grid%ed33-1),&
grid%ed31-grid%sd31 ,grid%ed32-grid%sd32 ,&
grid%ed33-grid%sd33)
call reorder_for_kma(grid%moist(grid%sd31:grid%ed31-1,grid%sd32:grid%ed32-1,&
grid%sd33:grid%ed33-1,P_qv:P_qv),&
grid%ed31-grid%sd31 ,grid%ed32-grid%sd32 ,&
grid%ed33-grid%sd33)
!
! convert xb-psfc pressure in hPa
grid%psfc(grid%sd31:grid%ed31-1,grid%sd32:grid%ed32-1) = 0.01 * &
grid%psfc(grid%sd31:grid%ed31-1,grid%sd32:grid%ed32-1)
write(*,*) 'shcimsi num of gird',grid%ed31,grid%ed32,grid%ed33
write(*,*) 'shcimsi grid',grid%ed31-grid%sd31,grid%ed32-grid%sd32,&
grid%ed33-grid%sd33
CALL PREGSM(grid%psfc(grid%sd31:grid%ed31-1,grid%sd32:grid%ed32-1),&
grid%t_2(grid%sd31:grid%ed31-1,grid%sd32:grid%ed32-1,grid%sd33:grid%ed33-1),&
grid%u_2(grid%sd31:grid%ed31-1,grid%sd32:grid%ed32-1,grid%sd33:grid%ed33-1),&
grid%v_2(grid%sd31:grid%ed31-1,grid%sd32:grid%ed32-1,grid%sd33:grid%ed33-1),&
grid%moist(grid%sd31:grid%ed31-1,grid%sd32:grid%ed32-1,grid%sd33:grid%ed33-1,P_qv),& !shc
PSB,TB,UB,VB,QB,IMAXE,JMAXE,ISST,JSST,MAXJZ,IVAR, &
IMAX,JMAX,KMAX,IDIM,JDIM,MEND1,MEND1,MEND1,ISNW,JSNW,JMAXHF,MNWAV,IMX) !shc
endif !shc
deallocate(DPSE,DUE,DVE,DTE,DQE)
deallocate(PSB , UB, VB, TB, QB)
deallocate(PSG , UG, VG, TG, QG, dum)
8000 print*,' read error on namelist unit 111'
stop
END SUBROUTINE netcdf2kma_interface
SUBROUTINE reorder_for_kma(wrf,n1,n2,n3)
!IMPLICIT none
integer, intent(in) :: n1,n2,n3
real, intent(inout) :: wrf(n1,n2,n3)
real, dimension(n1,n2,n3) :: kma
integer :: i,j,k, n1half
!
n1half = n1/2 + 0.5
do k=1,n3
do j= 1,n2
do i=1,n1
if( i <= n1half)then
kma(n1half+i,n2-j+1,k) = wrf(i,j,k)
else
kma(i-n1half,n2-j+1,k) = wrf(i,j,k)
end if
end do
end do
end do
wrf = kma
END SUBROUTINE reorder_for_kma
END MODULE module_netcdf2kma_interface
|
/-
Copyright (c) 2022 Oliver Nash. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Oliver Nash
-/
import algebra.lie.nilpotent
import algebra.lie.centralizer
/-!
# Engel's theorem
This file contains a proof of Engel's theorem providing necessary and sufficient conditions for Lie
algebras and Lie modules to be nilpotent.
The key result `lie_module.is_nilpotent_iff_forall` says that if `M` is a Lie module of a
Noetherian Lie algebra `L`, then `M` is nilpotent iff the image of `L → End(M)` consists of
nilpotent elements. In the special case that we have the adjoint representation `M = L`, this says
that a Lie algebra is nilpotent iff `ad x : End(L)` is nilpotent for all `x : L`.
Engel's theorem is true for any coefficients (i.e., it is really a theorem about Lie rings) and so
we work with coefficients in any commutative ring `R` throughout.
On the other hand, Engel's theorem is not true for infinite-dimensional Lie algebras and so a
finite-dimensionality assumption is required. We prove the theorem subject to the the assumption
that the Lie algebra is Noetherian as an `R`-module, though actually we only need the slightly
weaker property that the relation `>` is well-founded on the complete lattice of Lie subalgebras.
## Remarks about the proof
Engel's theorem is usually proved in the special case that the coefficients are a field, and uses
an inductive argument on the dimension of the Lie algebra. One begins by choosing either a maximal
proper Lie subalgebra (in some proofs) or a maximal nilpotent Lie subalgebra (in other proofs, at
the cost of obtaining a weaker end result).
Since we work with general coefficients, we cannot induct on dimension and an alternate approach
must be taken. The key ingredient is the concept of nilpotency, not just for Lie algebras, but for
Lie modules. Using this concept, we define an _Engelian Lie algebra_ `lie_algebra.is_engelian` to
be one for which a Lie module is nilpotent whenever the action consists of nilpotent endomorphisms.
The argument then proceeds by selecting a maximal Engelian Lie subalgebra and showing that it cannot
be proper.
The first part of the traditional statement of Engel's theorem consists of the statement that if `M`
is a non-trivial `R`-module and `L ⊆ End(M)` is a finite-dimensional Lie subalgebra of nilpotent
elements, then there exists a non-zero element `m : M` that is annihilated by every element of `L`.
This follows trivially from the result established here `lie_module.is_nilpotent_iff_forall`, that
`M` is a nilpotent Lie module over `L`, since the last non-zero term in the lower central series
will consist of such elements `m` (see: `lie_module.nontrivial_max_triv_of_is_nilpotent`). It seems
that this result has not previously been established at this level of generality.
The second part of the traditional statement of Engel's theorem concerns nilpotency of the Lie
algebra and a proof of this for general coefficients appeared in the literature as long ago
[as 1937](zorn1937). This also follows trivially from `lie_module.is_nilpotent_iff_forall` simply by
taking `M = L`.
It is pleasing that the two parts of the traditional statements of Engel's theorem are thus unified
into a single statement about nilpotency of Lie modules. This is not usually emphasised.
## Main definitions
* `lie_algebra.is_engelian`
* `lie_algebra.is_engelian_of_is_noetherian`
* `lie_module.is_nilpotent_iff_forall`
* `lie_algebra.is_nilpotent_iff_forall`
-/
universes u₁ u₂ u₃ u₄
variables {R : Type u₁} {L : Type u₂} {L₂ : Type u₃} {M : Type u₄}
variables [comm_ring R] [lie_ring L] [lie_algebra R L] [lie_ring L₂] [lie_algebra R L₂]
variables [add_comm_group M] [module R M] [lie_ring_module L M] [lie_module R L M]
include R L
namespace lie_submodule
open lie_module
variables {I : lie_ideal R L} {x : L} (hxI : (R ∙ x) ⊔ I = ⊤)
include hxI
lemma exists_smul_add_of_span_sup_eq_top (y : L) : ∃ (t : R) (z ∈ I), y = t • x + z :=
begin
have hy : y ∈ (⊤ : submodule R L) := submodule.mem_top,
simp only [← hxI, submodule.mem_sup, submodule.mem_span_singleton] at hy,
obtain ⟨-, ⟨t, rfl⟩, z, hz, rfl⟩ := hy,
exact ⟨t, z, hz, rfl⟩,
end
lemma lie_top_eq_of_span_sup_eq_top (N : lie_submodule R L M) :
(↑⁅(⊤ : lie_ideal R L), N⁆ : submodule R M) =
(N : submodule R M).map (to_endomorphism R L M x) ⊔ (↑⁅I, N⁆ : submodule R M) :=
begin
simp only [lie_ideal_oper_eq_linear_span', submodule.sup_span, mem_top, exists_prop,
exists_true_left, submodule.map_coe, to_endomorphism_apply_apply],
refine le_antisymm (submodule.span_le.mpr _) (submodule.span_mono (λ z hz, _)),
{ rintros z ⟨y, n, hn : n ∈ N, rfl⟩,
obtain ⟨t, z, hz, rfl⟩ := exists_smul_add_of_span_sup_eq_top hxI y,
simp only [set_like.mem_coe, submodule.span_union, submodule.mem_sup],
exact ⟨t • ⁅x, n⁆, submodule.subset_span ⟨t • n, N.smul_mem' t hn, lie_smul t x n⟩,
⁅z, n⁆, submodule.subset_span ⟨z, hz, n, hn, rfl⟩, by simp⟩, },
{ rcases hz with ⟨m, hm, rfl⟩ | ⟨y, hy, m, hm, rfl⟩,
exacts [⟨x, m, hm, rfl⟩, ⟨y, m, hm, rfl⟩], },
end
lemma lcs_le_lcs_of_is_nilpotent_span_sup_eq_top {n i j : ℕ} (hxn : (to_endomorphism R L M x)^n = 0)
(hIM : lower_central_series R L M i ≤ I.lcs M j) :
lower_central_series R L M (i + n) ≤ I.lcs M (j + 1) :=
begin
suffices : ∀ l, ((⊤ : lie_ideal R L).lcs M (i + l) : submodule R M) ≤
(I.lcs M j : submodule R M).map
((to_endomorphism R L M x)^l) ⊔ (I.lcs M (j + 1) : submodule R M),
{ simpa only [bot_sup_eq, lie_ideal.incl_coe, submodule.map_zero, hxn] using this n, },
intros l,
induction l with l ih,
{ simp only [add_zero, lie_ideal.lcs_succ, pow_zero, linear_map.one_eq_id, submodule.map_id],
exact le_sup_of_le_left hIM, },
{ simp only [lie_ideal.lcs_succ, i.add_succ l, lie_top_eq_of_span_sup_eq_top hxI, sup_le_iff],
refine ⟨(submodule.map_mono ih).trans _, le_sup_of_le_right _⟩,
{ rw [submodule.map_sup, ← submodule.map_comp, ← linear_map.mul_eq_comp, ← pow_succ,
← I.lcs_succ],
exact sup_le_sup_left coe_map_to_endomorphism_le _, },
{ refine le_trans (mono_lie_right _ _ I _) (mono_lie_right _ _ I hIM),
exact antitone_lower_central_series R L M le_self_add, }, },
end
lemma is_nilpotent_of_is_nilpotent_span_sup_eq_top
(hnp : is_nilpotent $ to_endomorphism R L M x) (hIM : is_nilpotent R I M) :
is_nilpotent R L M :=
begin
obtain ⟨n, hn⟩ := hnp,
unfreezingI { obtain ⟨k, hk⟩ := hIM, },
have hk' : I.lcs M k = ⊥,
{ simp only [← coe_to_submodule_eq_iff, I.coe_lcs_eq, hk, bot_coe_submodule], },
suffices : ∀ l, lower_central_series R L M (l * n) ≤ I.lcs M l,
{ use k * n,
simpa [hk'] using this k, },
intros l,
induction l with l ih,
{ simp, },
{ exact (l.succ_mul n).symm ▸ lcs_le_lcs_of_is_nilpotent_span_sup_eq_top hxI hn ih, },
end
end lie_submodule
section lie_algebra
open lie_module (hiding is_nilpotent)
variables (R L)
/-- A Lie algebra `L` is said to be Engelian if a sufficient condition for any `L`-Lie module `M` to
be nilpotent is that the image of the map `L → End(M)` consists of nilpotent elements.
Engel's theorem `lie_algebra.is_engelian_of_is_noetherian` states that any Noetherian Lie algebra is
Engelian. -/
def lie_algebra.is_engelian : Prop :=
∀ (M : Type u₄) [add_comm_group M], by exactI ∀ [module R M] [lie_ring_module L M], by exactI ∀
[lie_module R L M], by exactI ∀ (h : ∀ (x : L), is_nilpotent (to_endomorphism R L M x)),
lie_module.is_nilpotent R L M
variables {R L}
lemma lie_algebra.is_engelian_of_subsingleton [subsingleton L] : lie_algebra.is_engelian R L :=
begin
intros M _i1 _i2 _i3 _i4 h,
use 1,
suffices : (⊤ : lie_ideal R L) = ⊥, { simp [this], },
haveI := (lie_submodule.subsingleton_iff R L L).mpr infer_instance,
apply subsingleton.elim,
end
lemma function.surjective.is_engelian
{f : L →ₗ⁅R⁆ L₂} (hf : function.surjective f) (h : lie_algebra.is_engelian.{u₁ u₂ u₄} R L) :
lie_algebra.is_engelian.{u₁ u₃ u₄} R L₂ :=
begin
introsI M _i1 _i2 _i3 _i4 h',
letI : lie_ring_module L M := lie_ring_module.comp_lie_hom M f,
letI : lie_module R L M := comp_lie_hom M f,
have hnp : ∀ x, is_nilpotent (to_endomorphism R L M x) := λ x, h' (f x),
have surj_id : function.surjective (linear_map.id : M →ₗ[R] M) := function.surjective_id,
haveI : lie_module.is_nilpotent R L M := h M hnp,
apply hf.lie_module_is_nilpotent surj_id,
simp,
end
lemma lie_equiv.is_engelian_iff (e : L ≃ₗ⁅R⁆ L₂) :
lie_algebra.is_engelian.{u₁ u₂ u₄} R L ↔ lie_algebra.is_engelian.{u₁ u₃ u₄} R L₂ :=
⟨e.surjective.is_engelian, e.symm.surjective.is_engelian⟩
lemma lie_algebra.exists_engelian_lie_subalgebra_of_lt_normalizer
{K : lie_subalgebra R L} (hK₁ : lie_algebra.is_engelian.{u₁ u₂ u₄} R K) (hK₂ : K < K.normalizer) :
∃ (K' : lie_subalgebra R L) (hK' : lie_algebra.is_engelian.{u₁ u₂ u₄} R K'), K < K' :=
begin
obtain ⟨x, hx₁, hx₂⟩ := set_like.exists_of_lt hK₂,
let K' : lie_subalgebra R L :=
{ lie_mem' := λ y z, lie_subalgebra.lie_mem_sup_of_mem_normalizer hx₁,
.. (R ∙ x) ⊔ (K : submodule R L) },
have hxK' : x ∈ K' := submodule.mem_sup_left (submodule.subset_span (set.mem_singleton _)),
have hKK' : K ≤ K' := (lie_subalgebra.coe_submodule_le_coe_submodule K K').mp le_sup_right,
have hK' : K' ≤ K.normalizer,
{ rw ← lie_subalgebra.coe_submodule_le_coe_submodule,
exact sup_le ((submodule.span_singleton_le_iff_mem _ _).mpr hx₁) hK₂.le, },
refine ⟨K', _, lt_iff_le_and_ne.mpr ⟨hKK', λ contra, hx₂ (contra.symm ▸ hxK')⟩⟩,
introsI M _i1 _i2 _i3 _i4 h,
obtain ⟨I, hI₁ : (I : lie_subalgebra R K') = lie_subalgebra.of_le hKK'⟩ :=
lie_subalgebra.exists_nested_lie_ideal_of_le_normalizer hKK' hK',
have hI₂ : (R ∙ (⟨x, hxK'⟩ : K')) ⊔ I = ⊤,
{ rw [← lie_ideal.coe_to_lie_subalgebra_to_submodule R K' I, hI₁],
apply submodule.map_injective_of_injective (K' : submodule R L).injective_subtype,
simpa, },
have e : K ≃ₗ⁅R⁆ I := (lie_subalgebra.equiv_of_le hKK').trans
(lie_equiv.of_eq _ _ ((lie_subalgebra.coe_set_eq _ _).mpr hI₁.symm)),
have hI₃ : lie_algebra.is_engelian R I := e.is_engelian_iff.mp hK₁,
exact lie_submodule.is_nilpotent_of_is_nilpotent_span_sup_eq_top hI₂ (h _) (hI₃ _ (λ x, h x)),
end
local attribute [instance] lie_subalgebra.subsingleton_bot
variables [is_noetherian R L]
/-- *Engel's theorem*.
Note that this implies all traditional forms of Engel's theorem via
`lie_module.nontrivial_max_triv_of_is_nilpotent`, `lie_module.is_nilpotent_iff_forall`,
`lie_algebra.is_nilpotent_iff_forall`. -/
lemma lie_algebra.is_engelian_of_is_noetherian : lie_algebra.is_engelian R L :=
begin
introsI M _i1 _i2 _i3 _i4 h,
rw ← is_nilpotent_range_to_endomorphism_iff,
let L' := (to_endomorphism R L M).range,
replace h : ∀ (y : L'), is_nilpotent (y : module.End R M),
{ rintros ⟨-, ⟨y, rfl⟩⟩,
simp [h], },
change lie_module.is_nilpotent R L' M,
let s := { K : lie_subalgebra R L' | lie_algebra.is_engelian R K },
have hs : s.nonempty := ⟨⊥, lie_algebra.is_engelian_of_subsingleton⟩,
suffices : ⊤ ∈ s,
{ rw ← is_nilpotent_of_top_iff,
apply this M,
simp [lie_subalgebra.to_endomorphism_eq, h], },
have : ∀ (K ∈ s), K ≠ ⊤ → ∃ (K' ∈ s), K < K',
{ rintros K (hK₁ : lie_algebra.is_engelian R K) hK₂,
apply lie_algebra.exists_engelian_lie_subalgebra_of_lt_normalizer hK₁,
apply lt_of_le_of_ne K.le_normalizer,
rw [ne.def, eq_comm, K.normalizer_eq_self_iff, ← ne.def,
← lie_submodule.nontrivial_iff_ne_bot R K],
haveI : nontrivial (L' ⧸ K.to_lie_submodule),
{ replace hK₂ : K.to_lie_submodule ≠ ⊤ :=
by rwa [ne.def, ← lie_submodule.coe_to_submodule_eq_iff, K.coe_to_lie_submodule,
lie_submodule.top_coe_submodule, ← lie_subalgebra.top_coe_submodule,
K.coe_to_submodule_eq_iff],
exact submodule.quotient.nontrivial_of_lt_top _ hK₂.lt_top, },
haveI : lie_module.is_nilpotent R K (L' ⧸ K.to_lie_submodule),
{ refine hK₁ _ (λ x, _),
have hx := lie_algebra.is_nilpotent_ad_of_is_nilpotent (h x),
exact module.End.is_nilpotent.mapq _ hx, },
exact nontrivial_max_triv_of_is_nilpotent R K (L' ⧸ K.to_lie_submodule), },
haveI _i5 : is_noetherian R L' :=
is_noetherian_of_surjective L _ (linear_map.range_range_restrict (to_endomorphism R L M)),
obtain ⟨K, hK₁, hK₂⟩ :=
well_founded.well_founded_iff_has_max'.mp (lie_subalgebra.well_founded_of_noetherian R L') s hs,
have hK₃ : K = ⊤,
{ by_contra contra,
obtain ⟨K', hK'₁, hK'₂⟩ := this K hK₁ contra,
specialize hK₂ K' hK'₁ (le_of_lt hK'₂),
replace hK'₂ := (ne_of_lt hK'₂).symm,
contradiction, },
exact hK₃ ▸ hK₁,
end
/-- Engel's theorem. -/
lemma lie_module.is_nilpotent_iff_forall :
lie_module.is_nilpotent R L M ↔ ∀ x, is_nilpotent $ to_endomorphism R L M x :=
⟨begin
introsI h,
obtain ⟨k, hk⟩ := nilpotent_endo_of_nilpotent_module R L M,
exact λ x, ⟨k, hk x⟩,
end,
λ h, lie_algebra.is_engelian_of_is_noetherian M h⟩
/-- Engel's theorem. -/
lemma lie_algebra.is_nilpotent_iff_forall :
lie_algebra.is_nilpotent R L ↔ ∀ x, is_nilpotent $ lie_algebra.ad R L x :=
lie_module.is_nilpotent_iff_forall
end lie_algebra
|
• Semikhah could only be conveyed by a quorum of three judges, one of whom must himself have Semikhah.5 Semikhah could be conferred verbally or in writing. The “laying on of hands” was only practiced in the earlier generations. It was not practiced beyond the generation of Moses and Joshua.
• Both the grantor and recipient must be in Israel at the time Semikhah is given. • In order to receive Semikhah, one must be an expert in all areas of Torah law. He must also be of proper character and zealously observant of the mitzvos and words of the sages.
• Yoreh Yoreh (He shall instruct, he shall instruct) – This ordination was for matters of religious and ritual law.
• Yadin Yadin (He shall judge, he shall judge) – This ordination qualified the scholar to matters of civil, criminal, and monetary law.
• Yatir Yatir or Yatir Bechoros Yatir (He shall permit, he shall permit) – This ordination qualified its holder to rule on matters of animal sacrifices and ritual purity.
By the fourth and fifth centuries the Romans had driven most of the rabbinic community across the border into what is now Iraq. With few sages remaining in Israel, the chain of Semikhah eventually broke.8 For the next several centuries, the title “rabbi” would not be used.9 Instead, a scholar would either be referred to as “khokham” (wise one) or, if he held a position of authority, as a Gaon (eminence).
4 Most of this material is taken from Maimonides, Hil. Sanhedrin 4.
5 Sanhedrin 13b-14a. Hilchos Sanhedrin 4:5.
8 There are some Gaonic traditions indicating that ordination may have continued beyond the fourth century. See the Kovetz Shaarei Tzedek, p. 29-30 and Sefer HaShtarot, p. 132. However, even these concur that there is no modern semikhah.
9 The term “Rabbi” is not all that common in the Talmud either. There are many honorifics used in the Talmud for Torah scholars. However, most of them are referred to simply by their names or sobriquets.
In modern times, Semikhah refers to a degree or diploma certifying one as having completed a course of study in halakhah, Jewish law. The impetus for this new Semikhah was the rise of the medieval university, which began to issue diplomas and degrees. Jewish communities, in constant flux, saw the value of credentialing its religious scholars. They called this academic degree Semikhah in commemoration of the classical Semikhah. While this Semikhah caught on in the European Jewish world, Sephardic communities did not adopt it until very late.
• Rav U-Manhig – The equivalent of a Bachelor’s degree, this Semikhah originated in the 20th century at Ner Israel Rabbinical College in Baltimore, MD. It certifies the holder as a teacher and as knowing the basic laws of the synagogue ritual service and observance of the holidays. Not all yeshivas issue this Semikhah or accept it as valid. Where accepted, the holder may use the title Rabbi.
• Yoreh Yoreh – Equivalent of a Master’s degree. Based on the classical Yoreh Yoreh, this is usually awarded following a course of study in kashrus (dietary laws), Shabbat, Niddah (laws pertaining to married women), and Aveilus (mourning). Traditionally, the final exam is given in Issur ve-heter (a very detailed sub-section of the dietary laws). This is the most common Semikhah today. A Rabbi with this Semikhah, who holds a position of communal authority, may be called Rav.
• Yadin Yadin – Also based on the classical Semikhah, this ordination is the equivalent of a Ph.D. It requires extensive study of the laws of monetary and civil damages, as well as the laws of marriage and divorce. One who holds this ordination may be called Dayan. In the US, however, they are usually called Rabbi or Rav.
There is a fourth level that is very uncommon in our times called heter horaah (although this term is confusingly applied to other ordinations as well) or Semikhahs Moreh Horaah. This is an all-encompassing Semikhah awarded to rare scholars who have mastered the entire body of Torah literature. Very few people receive this today.
• Semikhah is first and foremost a certification in Torah Law. Biblical interpretation, philosophy, and theology, are rarely, if ever, part of the curriculum. semikhah is only relevant to the study of Torah law – it is not awarded for knowledge of other areas.
• Semikhah is an academic degree attained after a course of study and examination. It is not awarded based on righteousness or character. There are people with semikhah who are not particularly pleasant.
• One who has semikhah at one level may not teach or answer questions about law from a higher level. Someone with Yoreh Yoreh should not answer questions about Yadin Yadin material.
• Because it is possible to get semikhah in only one narrow area, it means that one does have to be a Torah scholar anymore to be a Rabbi. Likewise, one doesn’t need to be a rabbi to be a Torah scholar.
• One does not have to study at a yeshiva to attain semikhah. Either a person can study at a yeshiva and receive semikhah from the Yeshiva, or one can study privately and be examined by a renowned Torah scholar.
Ultimately, the world of Torah scholarship is a meritocracy – the greater scholars receive the greatest recognition and are accorded authority on the merits of their achievements. For this reason, many of the greatest Torah scholars and authorities of the past 150 years never bothered with semikhah. |
\documentclass{article}
\usepackage{Sweave}
\begin{document}
\input{04_analysis-concordance}
\section{Analysis}
So now that we've gone over each variable selection method, how do we know which one to use? We could see how each model (Lasso, P Value-based forward selection, BIC-based forward selection) performs from a predictive standpoint and use the model that has the most predictive power. However, the model that has the most predictive power is not necessarily the model that is the best at variable selection. The optimal model for variable selection is the model that is able to most accurately estimate beta coefficients for our explanatory variables. Unfortunately, given that we don't know each explanatory variable's true effect on graduation rate, there is no way for us to check how accurate our beta coefficient estimates are. As a result, there's no simple way for us to pick the model that is the best at variable selection.
All is not lost, however. While we may not be able to pick a single model to use for variable selection, we can just use all of 3 of our models. As a result, each model will identify a set of relevant explanatory variables. If a subset of these variables appear in all 3 of our models, we can identify these variables as extremely relevant to graduation rates. In the next section, we're going to go over how we used our Shiny R app to help us find the relevant variables for graduation rates.
\end{document}
|
Formal statement is: lemma is_first_power [simp]: "is_nth_power 1 x" Informal statement is: $1$ is the first power of any number. |
open import Common.Prelude
_test_ : Nat → Nat → Nat
m test_ = m +_
|
(*
* Copyright 2014, NICTA
*
* This software may be distributed and modified according to the terms of
* the BSD 2-Clause license. Note that NO WARRANTY is provided.
* See "LICENSE_BSD2.txt" for details.
*
* @TAG(NICTA_BSD)
*)
theory locvarfncall
imports "../CTranslation"
begin
install_C_file "locvarfncall.c"
context "locvarfncall"
begin
thm something_body_def
thm something_else_body_def
thm another_body_def
lemma "\<Gamma> \<turnstile> \<lbrace> True \<rbrace> \<acute>ret__int :== CALL something() \<lbrace> \<acute>ret__int = 112 \<rbrace>"
apply vcg
apply simp
done
lemma "\<Gamma> \<turnstile> \<lbrace> True \<rbrace> \<acute>ret__int :== CALL something_else(4)
\<lbrace> \<acute>ret__int = 50 \<rbrace>"
apply vcg
apply simp
done
lemma "\<Gamma> \<turnstile> \<lbrace> True \<rbrace> \<acute>ret__int :== CALL another(4)
\<lbrace> \<acute>ret__int = 51 \<rbrace>"
apply vcg
apply simp
done
end
end
|
[GOAL]
α : Type u_1
β : Type u_2
inst✝² : TopologicalSpace α
inst✝¹ : PartialOrder α
inst✝ : TopologicalSpace β
a : α
f : α → β
⊢ ContinuousWithinAt f (Ioi a) a ↔ ContinuousWithinAt f (Ici a) a
[PROOFSTEP]
simp only [← Ici_diff_left, continuousWithinAt_diff_self]
[GOAL]
α : Type u_1
β : Type u_2
inst✝² : TopologicalSpace α
inst✝¹ : LinearOrder α
inst✝ : TopologicalSpace β
a : α
⊢ 𝓝[Iic a] a ⊔ 𝓝[Ici a] a = 𝓝 a
[PROOFSTEP]
rw [← nhdsWithin_union, Iic_union_Ici, nhdsWithin_univ]
[GOAL]
α : Type u_1
β : Type u_2
inst✝² : TopologicalSpace α
inst✝¹ : LinearOrder α
inst✝ : TopologicalSpace β
a : α
⊢ 𝓝[Iio a] a ⊔ 𝓝[Ici a] a = 𝓝 a
[PROOFSTEP]
rw [← nhdsWithin_union, Iio_union_Ici, nhdsWithin_univ]
[GOAL]
α : Type u_1
β : Type u_2
inst✝² : TopologicalSpace α
inst✝¹ : LinearOrder α
inst✝ : TopologicalSpace β
a : α
⊢ 𝓝[Iic a] a ⊔ 𝓝[Ioi a] a = 𝓝 a
[PROOFSTEP]
rw [← nhdsWithin_union, Iic_union_Ioi, nhdsWithin_univ]
[GOAL]
α : Type u_1
β : Type u_2
inst✝² : TopologicalSpace α
inst✝¹ : LinearOrder α
inst✝ : TopologicalSpace β
a : α
⊢ 𝓝[Iio a] a ⊔ 𝓝[Ioi a] a = 𝓝[{a}ᶜ] a
[PROOFSTEP]
rw [← nhdsWithin_union, Iio_union_Ioi]
[GOAL]
α : Type u_1
β : Type u_2
inst✝² : TopologicalSpace α
inst✝¹ : LinearOrder α
inst✝ : TopologicalSpace β
a : α
f : α → β
⊢ ContinuousAt f a ↔ ContinuousWithinAt f (Iic a) a ∧ ContinuousWithinAt f (Ici a) a
[PROOFSTEP]
simp only [ContinuousWithinAt, ContinuousAt, ← tendsto_sup, nhds_left_sup_nhds_right]
[GOAL]
α : Type u_1
β : Type u_2
inst✝² : TopologicalSpace α
inst✝¹ : LinearOrder α
inst✝ : TopologicalSpace β
a : α
f : α → β
⊢ ContinuousAt f a ↔ ContinuousWithinAt f (Iio a) a ∧ ContinuousWithinAt f (Ioi a) a
[PROOFSTEP]
rw [continuousWithinAt_Ioi_iff_Ici, continuousWithinAt_Iio_iff_Iic, continuousAt_iff_continuous_left_right]
|
Formal statement is: lemma (in ring_of_sets) continuous_from_above_iff_empty_continuous: fixes f :: "'a set \<Rightarrow> ennreal" assumes f: "positive M f" "additive M f" shows "(\<forall>A. range A \<subseteq> M \<longrightarrow> decseq A \<longrightarrow> (\<Inter>i. A i) \<in> M \<longrightarrow> (\<forall>i. f (A i) \<noteq> \<infinity>) \<longrightarrow> (\<lambda>i. f (A i)) \<longlonglongrightarrow> f (\<Inter>i. A i)) \<longleftrightarrow> (\<forall>A. range A \<subseteq> M \<longrightarrow> decseq A \<longrightarrow> (\<Inter>i. A i) = {} \<longrightarrow> (\<forall>i. f (A i) \<noteq> \<infinity>) \<longrightarrow> (\<lambda>i. f (A i)) \<longlonglongrightarrow> 0)" Informal statement is: Let $f$ be a positive additive set function on a ring of sets $M$. Then $f$ is continuous from above if and only if $f$ is continuous from above on the empty set. |
(** * PostScript *)
(* ################################################################# *)
(** * Future Directions *)
(** We have lots of plans for future directions:
- Mutation testing tool
- Automatic derivation of generators and shrinkers for data
satisfying Inductive relations
- Vellum2 testing
- DeepSpec Web Server
- Testing-only variant of _Software Foundations_?
*)
(* ################################################################# *)
(** * Recommended Reading *)
(** The material presented in this short course serves as an
introduction to property based random testing using
QuickChick. For the interested reader, we provide a few more
references for additional reading:
- The original QuickCheck paper by Koen Claessen and John Hughes
from ICFP 2000.
http://www.cs.tufts.edu/~nr/cs257/archive/john-hughes/quick.pdf
- The original QuickChick paper that focuses on a framework for
proving the correctness of QuickChick generators.
http://www.cis.upenn.edu/~llamp/pdf/Foundational.pdf
- A case study that uses QuickCheck to test non-interference for
information-flow-control abstract machines.
http://www.cis.upenn.edu/~llamp/pdf/TestingNonInterferenceQuickly.pdf
- Code for that case study exists under the QuickChick
organization of github (https://github.com/QuickChick) for
both Haskell ("Testing Noninterference") and Coq ("IFC").
- A paper on deriving QuickChick generators for a large class of
inductive relations.
http://www.cis.upenn.edu/~llamp/pdf/GeneratingGoodGenerators.pdf
*) |
C @(#)proctx.f 20.4 2/13/96
subroutine proctx
include 'ipfinc/parametr.inc'
include 'ipfinc/alpha.inc'
include 'ipfinc/blank.inc'
include 'ipfinc/bus.inc'
include 'ipfinc/ikk.inc'
include 'ipfinc/merge.inc'
include 'ipfinc/prt.inc'
include 'ipfinc/qksrt.inc'
include 'ipfinc/red2.inc'
common /is_batch / is_batch
external kpface,spface
dimension mtrx(MAXBUS)
C INITIALIZE "IKK"
do 100 kt=1,ntot
ikk(4,kt)=0
ikk(5,kt)=0
100 continue
C BEGIN PROCEDURE: IDENTIFY INTERIOR NODES OF "INTERFACE BRANCHES
if (itface.eq.0) go to 200
nx=0
nl=0
level=1
do i=1,itface
kt=face(1,i)
ikk(5,kt)=level
nx=nx+1
mtrx(nx)=kt
enddo
C PROPAGATE OUTWARDS FROM KERNEL SUBSYSTEM
120 nf=nl+1
nl=nx
if (nf.gt.nl) go to 180
level=level+1
do 170 l=nf,nl
kt=mtrx(l)
if(ikk(1,kt) .le. 0) call erexit
i5=ikkind(1,kt)
i6=ikkind(2,kt)
do 160 i=1,i6
mt=kolum(i+i5-1)
if (ikk(5,mt) .gt. 0) then
else if (ikk(1,mt) .eq. 1) then
nx=nx+1
mtrx(nx)=mt
ikk(1,mt)=1
ikk(2,mt)=1
ikk(5,mt)=level
C Note: The following test determines if the adjacent node
C MT is unprocessed. If true, it is changed into a
C retained (interior) node. If false, the node is
C tested to determine whether it is specifically a
C border node. If this subtest is false, an incomplete
C enclosure is detected.
c
else if (ikk(1,mt) .eq. 0 .and. ikk(2,mt) .eq. 0) then
nx=nx+1
mtrx(nx)=mt
ikk(1,mt)=1
ikk(2,mt)=1
ikk(5,mt)=level
else if (ikk(1,mt) .le. 0) then
if (level .gt. 2) then
jlevel=level-1
write (errbuf(1),150) bus(kt),base(kt),jlevel,bus(mt),
1 base(mt)
150 format ('INCOMPLETE INTERFACE ENCLOSURE. INTERIOR '
1 ,'BUS (',a8,f6.1,') IS ',i2,' NODES ADJACENT ',
2 'TO EXTERIOR BUS (',a8,f6.1,')')
if (is_batch .eq. 0) then
call prterx ('E',1)
else
call prterx ('F',1)
endif
kerrsw=kerrsw+1
endif
endif
160 continue
170 continue
go to 120
180 do 190 i=1,ntot
ikk(5,i)=0
190 continue
C END PROCEDURE
C BEGIN PROCEDURE: IDENTIFY ALL INTERFACE BRANCHES
200 nsyst=0
nsave=0
do 210 i=1,ntot
ikk(2,i)=0
ikk(4,i)=0
ikk(5,i)=0
if (ikk(1,i) .gt. 0) nsave=nsave+1
210 continue
if (nsave.gt.0) go to 230
write (errbuf(1),220)
220 format ('NO BUSES IN RETAINED SUBNETWORK.')
if (is_batch .eq. 0) then
call prterx ('E',1)
else
call prterx ('F',1)
endif
go to 410
230 continue
na=0
nl=0
do 310 jt=1,ntot
kt=jt
i5=ikkind(1,kt)
i6=ikkind(2,kt)
if (ikk(1,kt).le.0) go to 310
if (ikk(4,kt).ne.0) go to 310
na=na+1
nl=na
mtrx(na)=kt
nsyst=nsyst+1
ikk(4,kt)=nsyst
240 do 300 l=1,i6
mt=kolum(l+i5-1)
if (ikk(1,mt).le.0) go to 250
if (ikk(4,mt).ne.0) go to 300
na=na+1
mtrx(na)=mt
ikk(4,mt)=nsyst
go to 300
250 ikk(2,kt)=1
ikk(2,mt)=1
do 260 i=1,itface
if (face(1,i).ne.kt) go to 260
if (face(2,i).ne.mt) go to 260
face(4,i) = nsyst
go to 300
260 continue
itface=itface+1
if (itface .le. 200) then
face(1,itface)=kt
face(2,itface)=mt
ifcsw = ikk(3,kt)
face(3,itface)=ifcsw
face(4,itface)=nsyst
else
write (errbuf(1),280) bus(kt),base(kt),bus(mt),base(mt)
280 format ('EXCESS "INTERFACE BRANCHES" IGNORED (',a8,f6.1,2x,a8
1 ,f6.1,')')
call prterx ('W',1)
itface=200
endif
300 continue
nl=nl+1
if (nl.gt.nsave) go to 320
if (nl.gt.na) go to 310
kt=mtrx(nl)
i5=ikkind(1,kt)
i6=ikkind(2,kt)
go to 240
310 continue
call erexit
C
C END PROCEDURE
C
320 continue
kdupsw=2
C
C SUMMARIZE "FACE" ARRAY BY SUBSYSTEMS
C
if (itface-1) 410,340,330
330 key = 1
call qiksrt (1,itface,kpface,spface)
340 continue
nf = 1
do 400 is = 1,nsyst
write (outbuf,350) is
350 format ('0 SUMMARY OF INTERFACE BRANCHES FOR SUBSYSTEM ',i2)
call prtout(1)
write (outbuf,360)
360 format ('0 RETAINED BUSES ELIMINATED BUSES INTERFACE LEVEL ')
call prtout(1)
outbuf = ' '
call prtout(1)
if (nf.gt.itface) then
write (errbuf(1),362) is
362 format('0 MERGE SUBSYSTEM ',i2,' CONSISTING OF THE FOLLOWING',
1 ' BUSES HAS NO INTERFACE BRANCHES.')
errbuf(2)=' '
call prterx ('W',2)
knt=1
outbuf=' '
do 366 kt=1,ntot
if(ikk(4,kt).ne.is) go to 366
write (outbuf(knt:knt+19),364) bus(kt),base(kt)
364 format(5x,a8,f7.1)
knt=knt+20
if (knt.lt.101) go to 366
call prtout (1)
knt=1
outbuf=' '
366 continue
if(knt.gt.1) call prtout (1)
else
do 380 i=nf,itface
if (face(4,i).ne.is) go to 390
kt = face(1,i)
mt = face(2,i)
write (outbuf,370) bus(kt),base(kt),bus(mt),base(mt)
370 format (2x,a8,f6.1,2x,a8,f6.1,10x,i2)
if (ikk(1,mt) .lt. 0) then
outbuf(50:) = '(INTERFACE IS IGNORED)'
endif
call prtout (1)
380 continue
i=itface + 1
390 nf=i
endif
400 continue
410 continue
return
end
|
#include <mpi.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <gsl/gsl_math.h>
#include "allvars.h"
#include "proto.h"
#ifdef RADTRANSFER
#ifdef EDDINGTON_TENSOR_STARS
static struct stardata_in
{
MyDouble Pos[3], Density, Mass;
MyFloat Hsml;
int NodeList[NODELISTLENGTH];
}
*StarDataIn, *StarDataGet;
#endif
void star_density(void)
{
int j;
#ifdef EDDINGTON_TENSOR_STARS
int i, dummy;
int ngrp, sendTask, recvTask, place, nexport, nimport, ndone, ndone_flag;
#endif
/* clear Je in all gas particles */
for(j = 0; j < N_gas; j++)
{
if(P[j].Type == 0)
SphP[j].Je = 0;
#ifdef SFR
if(P[j].Type == 0)
{
SphP[j].Je += SphP[j].Sfr * All.IonizingLumPerSFR *
(PROTONMASS / (P[j].Mass * All.UnitMass_in_g / All.HubbleParam)) * All.UnitTime_in_s /
All.HubbleParam;
printf("ET get_JE ID: %d Proc %d", P[j].ID, ThisTask);
}
#endif
}
#ifdef EDDINGTON_TENSOR_STARS
/* allocate buffers to arrange communication */
Ngblist = (int *) mymalloc(NumPart * sizeof(int));
All.BunchSize =
(int) ((All.BufferSize * 1024 * 1024) / (sizeof(struct data_index) + sizeof(struct data_nodelist) +
2 * sizeof(struct stardata_in)));
DataIndexTable = (struct data_index *) mymalloc(All.BunchSize * sizeof(struct data_index));
DataNodeList = (struct data_nodelist *) mymalloc(All.BunchSize * sizeof(struct data_nodelist));
i = FirstActiveParticle; /* beginn with this index */
do
{
for(j = 0; j < NTask; j++)
{
Send_count[j] = 0;
Exportflag[j] = -1;
}
/* do local particles and prepare export list */
for(nexport = 0; i >= 0; i = NextActiveParticle[i])
{
if(P[i].Type == 4)
{
if(star_density_evaluate(i, 0, &nexport, Send_count) < 0)
break;
}
}
#ifdef MYSORT
mysort_dataindex(DataIndexTable, nexport, sizeof(struct data_index), data_index_compare);
#else
qsort(DataIndexTable, nexport, sizeof(struct data_index), data_index_compare);
#endif
MPI_Allgather(Send_count, NTask, MPI_INT, Sendcount_matrix, NTask, MPI_INT, MPI_COMM_WORLD);
for(j = 0, nimport = 0, Recv_offset[0] = 0, Send_offset[0] = 0; j < NTask; j++)
{
Recv_count[j] = Sendcount_matrix[j * NTask + ThisTask];
nimport += Recv_count[j];
if(j > 0)
{
Send_offset[j] = Send_offset[j - 1] + Send_count[j - 1];
Recv_offset[j] = Recv_offset[j - 1] + Recv_count[j - 1];
}
}
StarDataGet = (struct stardata_in *) mymalloc(nimport * sizeof(struct stardata_in));
StarDataIn = (struct stardata_in *) mymalloc(nexport * sizeof(struct stardata_in));
/* prepare particle data for export */
for(j = 0; j < nexport; j++)
{
place = DataIndexTable[j].Index;
StarDataIn[j].Pos[0] = P[place].Pos[0];
StarDataIn[j].Pos[1] = P[place].Pos[1];
StarDataIn[j].Pos[2] = P[place].Pos[2];
StarDataIn[j].Hsml = PPP[place].Hsml;
StarDataIn[j].Density = P[place].DensAroundStar;
StarDataIn[j].Mass = P[place].Mass;
memcpy(StarDataIn[j].NodeList,
DataNodeList[DataIndexTable[j].IndexGet].NodeList, NODELISTLENGTH * sizeof(int));
}
/* exchange particle data */
for(ngrp = 1; ngrp < (1 << PTask); ngrp++)
{
sendTask = ThisTask;
recvTask = ThisTask ^ ngrp;
if(recvTask < NTask)
{
if(Send_count[recvTask] > 0 || Recv_count[recvTask] > 0)
{
/* get the particles */
MPI_Sendrecv(&StarDataIn[Send_offset[recvTask]],
Send_count[recvTask] * sizeof(struct stardata_in), MPI_BYTE,
recvTask, TAG_DENS_A,
&StarDataGet[Recv_offset[recvTask]],
Recv_count[recvTask] * sizeof(struct stardata_in), MPI_BYTE,
recvTask, TAG_DENS_A, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
}
}
}
myfree(StarDataIn);
/* now do the particles that were sent to us */
for(j = 0; j < nimport; j++)
star_density_evaluate(j, 1, &dummy, &dummy);
/* check whether this is the last iteration */
if(i < 0)
ndone_flag = 1;
else
ndone_flag = 0;
MPI_Allreduce(&ndone_flag, &ndone, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
myfree(StarDataGet);
}
while(ndone < NTask);
myfree(DataNodeList);
myfree(DataIndexTable);
myfree(Ngblist);
#endif //for EDDINGTON_TENSOR_STARS
}
#ifdef EDDINGTON_TENSOR_STARS
/*! This function represents the core of the SPH density computation. The
* target particle may either be local, or reside in the communication
* buffer.
*/
int star_density_evaluate(int target, int mode, int *nexport, int *nsend_local)
{
int j, n, numngb;
int startnode, listindex = 0;
double h, hinv, h2, mass_j, weight, hinv3;
double wk, mass, density, lum;
double dx, dy, dz, r, r2, u, a3inv;
MyDouble *pos;
#ifdef PERIODIC
double boxsize, boxhalf;
boxsize = All.BoxSize;
boxhalf = 0.5 * All.BoxSize;
#endif
if(All.ComovingIntegrationOn)
a3inv = 1.0 / (All.Time * All.Time * All.Time);
else
a3inv = 1.0;
if(mode == 0)
{
pos = P[target].Pos;
h = PPP[target].Hsml;
density = P[target].DensAroundStar;
mass = P[target].Mass;
}
else
{
pos = StarDataGet[target].Pos;
h = StarDataGet[target].Hsml;
density = StarDataGet[target].Density;
mass = StarDataGet[target].Mass;
}
h2 = h * h;
hinv = 1.0 / h;
hinv3 = hinv * hinv * hinv;
lum = mass * All.IonizingLumPerSolarMass * (PROTONMASS / SOLAR_MASS) * All.UnitTime_in_s / All.HubbleParam;
if(mode == 0)
{
startnode = All.MaxPart; /* root node */
}
else
{
startnode = StarDataGet[target].NodeList[0];
startnode = Nodes[startnode].u.d.nextnode; /* open it */
}
while(startnode >= 0)
{
while(startnode >= 0)
{
numngb = ngb_treefind_variable(pos, h, target, &startnode, mode, nexport, nsend_local);
if(numngb < 0)
return -1;
for(n = 0; n < numngb; n++)
{
j = Ngblist[n];
dx = pos[0] - P[j].Pos[0];
dy = pos[1] - P[j].Pos[1];
dz = pos[2] - P[j].Pos[2];
#ifdef PERIODIC
if(dx > boxHalf_X)
dx -= boxSize_X;
if(dx < -boxHalf_X)
dx += boxSize_X;
if(dy > boxHalf_Y)
dy -= boxSize_Y;
if(dy < -boxHalf_Y)
dy += boxSize_Y;
if(dz > boxHalf_Z)
dz -= boxSize_Z;
if(dz < -boxHalf_Z)
dz += boxSize_Z;
#endif
r2 = dx * dx + dy * dy + dz * dz;
r = sqrt(r2);
if(r2 < h2)
{
u = r * hinv;
if(u < 0.5)
wk = hinv3 * (KERNEL_COEFF_1 + KERNEL_COEFF_2 * (u - 1) * u * u);
else
wk = hinv3 * KERNEL_COEFF_5 * (1.0 - u) * (1.0 - u) * (1.0 - u);
}
else
wk = 0;
mass_j = P[j].Mass;
weight = mass_j * wk / density;
SphP[j].Je += lum * weight / mass_j;
}
}
if(mode == 1)
{
listindex++;
if(listindex < NODELISTLENGTH)
{
startnode = StarDataGet[target].NodeList[listindex];
if(startnode >= 0)
startnode = Nodes[startnode].u.d.nextnode; /* open it */
}
}
}
return 0;
}
#endif
#endif
|
From mathcomp Require Import ssreflect.
Require Import img_example.
Require Import Coq.Sets.Relations_3_facts.
Section RelationExample.
Definition ID (U:Type): Relation U := fun x:U => fun y:U => x = y.
Definition ID' (U:Type): U -> U -> Prop := fun x:U => fun y:U => x = y.
Check (forall (U:Type) (x:U), (ID U x x)).
Check Equivalence.
Check ID.
Check ID'.
Check (forall (U:Type) (x:U), ID U x x).
Goal forall (U:Type), Equivalence U (ID U).
Proof.
move => U.
split.
unfold Reflexive.
move => x.
unfold ID.
reflexivity.
unfold Transitive.
move => x y z.
unfold ID.
move => H0 H1.
rewrite H0.
apply H1.
unfold Symmetric.
move => x y.
unfold ID.
move => H.
rewrite H.
reflexivity.
Qed.
Goal forall (U:Type), Equivalence U (ID' U).
Proof.
move => U.
split.
unfold Reflexive.
move => x.
unfold ID'.
reflexivity.
unfold Transitive.
move => x y z.
unfold ID'.
move => H0 H1.
rewrite H0.
apply H1.
unfold Symmetric.
move => x y.
unfold ID'.
move => H.
rewrite H.
reflexivity.
Qed.
End RelationExample.
|
How do you define crazy? Crazy means deranged. Deranged means insane. Insane means crazy. It’s an ouroborus. But deep down, we all know crazy when we see it or hear it because there’s something universal about expectations of thinking and behavior that transcends description.
In my effort to use this blog as a means of teaching how to develop skills of interpreting literature, today we have an amazing opportunity to learn about characterization at the hands of a master storyteller, Christopher Nolan.
Information/description given by the narrator without any judgment- ex. The narrator of a story tells you, “John was a tall man.” It’s descriptive, but there’s no implication or judgment involved.
In literature, all of these are easily accessible. In film, however, Numbers 1 & 2 only apply when there’s a Voice-Over Narration. So I’ll be focusing this analysis of The Dark Knight on numbers 3,4,5. Number six is accessible in the film only through voice over.
Here we go. It starts with a claim. After you make a claim, the next thing you have to do is look for examples in the text (this case Nolan’s film) to support your claim. Those examples are called Textual Evidence. That’s right-evidence – information gathered to support a conclusion.
CLAIM: Actor Heath Ledger said of the character The Joker that he played in the film The Dark Knight, that The Joker was “a psychotic, mass murdering, schizophrenic clown with zero empathy” who merely wants to “upset the social order through crime.” However, if one looks at the definition of these clinical terms, one sees that throughout the film The Joker displays clear attributes of the opposite. It’s the Hamlet question – Is he insane or merely pretending to be?
Ledger’s performance is a tour de force, a fine line that balances completely on characterization. Ledger claims the Joker is a psychotic. A clinical definition deems psychosis to be characterized by changes in personality, impaired functioning, or a nonexistent sense of objective reality. Let’s take these one by one.
From our first encounter with The Joker, we see that he is consistent. He is revealed as the master mind behind a bank robbery in progress and through his manipulation of his henchmen and by playing them for their greediness, he manages to be the only one leaving with the money. He is pretending in this scene to be merely another lackey on the heist, this establishes pretense as a tool in his behavioral arsenal. While his actions seem erratic or capricious, we see evidence of advanced planning every step of the way. In the scene where he introduces his plan to get rid of Batman to a gathering of Gotham City’s mob bosses, he is threatened. When this happens, he opens his coat revealing live grenades one can only assume he is more than willing to use. His use of video messages to convey his intentions is designed to instill fear and panic as well as to manipulate certain responses that lead to his arrest. The way he escapes through the clever use of a cell phone also shows the advanced mind of someone well versed in premeditation. Joker’s consistency in all areas shows how his character’s personality shows little change.
Does he exhibit impaired functioning? I would argue the opposite. Harvey Dent accuses Joker of having ‘planned’ all of this, to which Joker replies, “Do I really look like a guy with a plan?” of course he is wearing a nurse’s uniform when he says this, another example of how much ‘planning’ he has actually done to manage to get near Dent who is under police protection. Joker says, “I just do things. I try to show the schemers how pathetic their attempts to control things really are. I am an agent of chaos.” Joker says these things, but his actions show advanced strategic planning on multiple levels constantly throughout the film. It takes intellect to ‘scheme’ as well as he does. The only other character that comes close to understanding the level of the Joker’s threats is Alfred. He forces others to make impossible ethical choices. He uses obfuscation to make people see Batman as the cause of Gotham’s problems, not him. He turns a simple act of Batman refusing to reveal his identity as Batman making a choice to allow people to be killed. But Joker’s anonymity protects him as much as Batman’s protects Bruce Wayne’s. The Joker’s ability to function is not only unimpaired, I’d say it is heightened.
As for a disturbed or nonexistent sense of objective reality goes, Joker embodies the object realities that people wish to dismiss, ignore, or completely deny. Dent says, “The only morality in a cruel world is chance.” That may be true if your morality is only based on equality, but not fairness. Batman says to Alfred, “Criminals aren’t complicated, Alfred. We just need to figure out what he’s after.” Objective reality is something being true outside a person’s own biases or feelings. I challenge anyone to find a statement the Joker makes about society that’s untrue. He tells Batman, “You either die a hero or you live long enough to become a villain.” This becomes a kind of prophecy by the end of the film. He says, “When the chips are down, these civilized people will eat each other.” He sets up his ‘social experiment’ on the boats to prove this. And while ultimately the two boats don’t blow each other up, they vote on one boat to do it. Joker is correct…they will eat each other, just not while others are looking. The civilized people show a lack of empathy for the criminals on the other boat. It’s understandable to the viewing audience because we see it’s rooted in self-preservation. Just because no one ultimately has the conviction to do it or no one takes the ethical stance of greater good for the greatest number of people, does not prove the Joker’s assessment of society to be wrong. He tells Batman, “You have all these rules and you think they’ll save you. The only way to live in this world is without rules. Killing is making a choice.” Joker understands truths about our social contracts that we dare not acknowledge. And those that do, we label as crazy. Joker has a keen sense of objective reality. He simply choses to try and alter that reality.
He does not hallucinate, nor is he delusional. He knows there are consequences for his actions and at times seems to welcome them, like when Batman is racing towards him in a dead on collision. He stands in the street says, “Come on. Come one. Hit Me!” He’s not schizophrenic. At no time does he lose contact with reality, though at times he feigns insanity for effect. Viewers may judge his actions criminal; however, he is aware that his actions are illegal and he disregards that concept. Not because of a lack of understanding, but a choice. “Killing is making a choice,” remember? He exhibits none of the five subcategories of schizophrenia. He is not paranoid. He is not disorganized. He definitely is not catatonic or undifferentiated. Nor is he residual. So how then can we say he is clinically insane?
The only argument I can see supporting this is Ledger’s claim that Joker has zero empathy. He takes life without remorse. He causes damage at every turn. He mercilessly manipulates others to his advantage. But for me, the scene where he goes to see Dent in the hospital is one of the most complex revelations of Joker’s character in the film. He knows what damage he has done to Dent. Yet he goes there trying to give him the words he needs to hear. That it’s okay to be angry. It’s okay to cause chaos and to break the rules. It’s a bizarre tender moment, one in which he even takes Dent by the hand. So there is empathy, though many may not see it that way. It’s not empathy on a grand scale. It’s much more personal. He’s welcoming Dent into the Hall of Freaks that up until now held only himself and the Batman.
Is the Joker legally insane or just evil? A look at Characterization. |
[GOAL]
z : ℂ
hz : 0 < z.im
n : ℤ
⊢ ‖cexp (↑π * I * ↑n ^ 2 * z)‖ ≤ rexp (-π * z.im) ^ Int.natAbs n
[PROOFSTEP]
let y := rexp (-π * z.im)
[GOAL]
z : ℂ
hz : 0 < z.im
n : ℤ
y : ℝ := rexp (-π * z.im)
⊢ ‖cexp (↑π * I * ↑n ^ 2 * z)‖ ≤ rexp (-π * z.im) ^ Int.natAbs n
[PROOFSTEP]
have h : y < 1 := exp_lt_one_iff.mpr (mul_neg_of_neg_of_pos (neg_lt_zero.mpr pi_pos) hz)
[GOAL]
z : ℂ
hz : 0 < z.im
n : ℤ
y : ℝ := rexp (-π * z.im)
h : y < 1
⊢ ‖cexp (↑π * I * ↑n ^ 2 * z)‖ ≤ rexp (-π * z.im) ^ Int.natAbs n
[PROOFSTEP]
refine' (le_of_eq _).trans (_ : y ^ n ^ 2 ≤ _)
[GOAL]
case refine'_1
z : ℂ
hz : 0 < z.im
n : ℤ
y : ℝ := rexp (-π * z.im)
h : y < 1
⊢ ‖cexp (↑π * I * ↑n ^ 2 * z)‖ = y ^ n ^ 2
[PROOFSTEP]
rw [Complex.norm_eq_abs, Complex.abs_exp]
[GOAL]
case refine'_1
z : ℂ
hz : 0 < z.im
n : ℤ
y : ℝ := rexp (-π * z.im)
h : y < 1
⊢ rexp (↑π * I * ↑n ^ 2 * z).re = y ^ n ^ 2
[PROOFSTEP]
have : (↑π * I * (n : ℂ) ^ 2 * z).re = -π * z.im * (n : ℝ) ^ 2 :=
by
rw [(by push_cast ; ring : ↑π * I * (n : ℂ) ^ 2 * z = ↑(π * (n : ℝ) ^ 2) * (z * I)), ofReal_mul_re, mul_I_re]
ring
[GOAL]
z : ℂ
hz : 0 < z.im
n : ℤ
y : ℝ := rexp (-π * z.im)
h : y < 1
⊢ (↑π * I * ↑n ^ 2 * z).re = -π * z.im * ↑n ^ 2
[PROOFSTEP]
rw [(by push_cast ; ring : ↑π * I * (n : ℂ) ^ 2 * z = ↑(π * (n : ℝ) ^ 2) * (z * I)), ofReal_mul_re, mul_I_re]
[GOAL]
z : ℂ
hz : 0 < z.im
n : ℤ
y : ℝ := rexp (-π * z.im)
h : y < 1
⊢ ↑π * I * ↑n ^ 2 * z = ↑(π * ↑n ^ 2) * (z * I)
[PROOFSTEP]
push_cast
[GOAL]
z : ℂ
hz : 0 < z.im
n : ℤ
y : ℝ := rexp (-π * z.im)
h : y < 1
⊢ ↑π * I * ↑n ^ 2 * z = ↑π * ↑n ^ 2 * (z * I)
[PROOFSTEP]
ring
[GOAL]
z : ℂ
hz : 0 < z.im
n : ℤ
y : ℝ := rexp (-π * z.im)
h : y < 1
⊢ π * ↑n ^ 2 * -z.im = -π * z.im * ↑n ^ 2
[PROOFSTEP]
ring
[GOAL]
case refine'_1
z : ℂ
hz : 0 < z.im
n : ℤ
y : ℝ := rexp (-π * z.im)
h : y < 1
this : (↑π * I * ↑n ^ 2 * z).re = -π * z.im * ↑n ^ 2
⊢ rexp (↑π * I * ↑n ^ 2 * z).re = y ^ n ^ 2
[PROOFSTEP]
obtain ⟨m, hm⟩ := Int.eq_ofNat_of_zero_le (sq_nonneg n)
[GOAL]
case refine'_1.intro
z : ℂ
hz : 0 < z.im
n : ℤ
y : ℝ := rexp (-π * z.im)
h : y < 1
this : (↑π * I * ↑n ^ 2 * z).re = -π * z.im * ↑n ^ 2
m : ℕ
hm : n ^ 2 = ↑m
⊢ rexp (↑π * I * ↑n ^ 2 * z).re = y ^ n ^ 2
[PROOFSTEP]
rw [this, exp_mul, ← Int.cast_pow, rpow_int_cast, hm, zpow_ofNat]
[GOAL]
case refine'_2
z : ℂ
hz : 0 < z.im
n : ℤ
y : ℝ := rexp (-π * z.im)
h : y < 1
⊢ y ^ n ^ 2 ≤ rexp (-π * z.im) ^ Int.natAbs n
[PROOFSTEP]
have : n ^ 2 = ↑(n.natAbs ^ 2) := by rw [Nat.cast_pow, Int.natAbs_sq]
[GOAL]
z : ℂ
hz : 0 < z.im
n : ℤ
y : ℝ := rexp (-π * z.im)
h : y < 1
⊢ n ^ 2 = ↑(Int.natAbs n ^ 2)
[PROOFSTEP]
rw [Nat.cast_pow, Int.natAbs_sq]
[GOAL]
case refine'_2
z : ℂ
hz : 0 < z.im
n : ℤ
y : ℝ := rexp (-π * z.im)
h : y < 1
this : n ^ 2 = ↑(Int.natAbs n ^ 2)
⊢ y ^ n ^ 2 ≤ rexp (-π * z.im) ^ Int.natAbs n
[PROOFSTEP]
rw [this, zpow_ofNat]
[GOAL]
case refine'_2
z : ℂ
hz : 0 < z.im
n : ℤ
y : ℝ := rexp (-π * z.im)
h : y < 1
this : n ^ 2 = ↑(Int.natAbs n ^ 2)
⊢ y ^ Int.natAbs n ^ 2 ≤ rexp (-π * z.im) ^ Int.natAbs n
[PROOFSTEP]
exact pow_le_pow_of_le_one (exp_pos _).le h.le ((sq n.natAbs).symm ▸ n.natAbs.le_mul_self)
[GOAL]
R : ℝ
hR : 0 < R
⊢ ∃ bd, Summable bd ∧ ∀ {τ : ℂ}, R ≤ τ.im → ∀ (n : ℤ), ‖cexp (↑π * I * ↑n ^ 2 * τ)‖ ≤ bd n
[PROOFSTEP]
let y := rexp (-π * R)
[GOAL]
R : ℝ
hR : 0 < R
y : ℝ := rexp (-π * R)
⊢ ∃ bd, Summable bd ∧ ∀ {τ : ℂ}, R ≤ τ.im → ∀ (n : ℤ), ‖cexp (↑π * I * ↑n ^ 2 * τ)‖ ≤ bd n
[PROOFSTEP]
have h : y < 1 := exp_lt_one_iff.mpr (mul_neg_of_neg_of_pos (neg_lt_zero.mpr pi_pos) hR)
[GOAL]
R : ℝ
hR : 0 < R
y : ℝ := rexp (-π * R)
h : y < 1
⊢ ∃ bd, Summable bd ∧ ∀ {τ : ℂ}, R ≤ τ.im → ∀ (n : ℤ), ‖cexp (↑π * I * ↑n ^ 2 * τ)‖ ≤ bd n
[PROOFSTEP]
refine' ⟨fun n => y ^ n.natAbs, summable_int_of_summable_nat _ _, fun hτ n => _⟩
[GOAL]
case refine'_1
R : ℝ
hR : 0 < R
y : ℝ := rexp (-π * R)
h : y < 1
⊢ Summable fun n => y ^ Int.natAbs ↑n
case refine'_2
R : ℝ
hR : 0 < R
y : ℝ := rexp (-π * R)
h : y < 1
⊢ Summable fun n => y ^ Int.natAbs (-↑n)
case refine'_3
R : ℝ
hR : 0 < R
y : ℝ := rexp (-π * R)
h : y < 1
τ✝ : ℂ
hτ : R ≤ τ✝.im
n : ℤ
⊢ ‖cexp (↑π * I * ↑n ^ 2 * τ✝)‖ ≤ (fun n => y ^ Int.natAbs n) n
[PROOFSTEP]
pick_goal 3
[GOAL]
case refine'_3
R : ℝ
hR : 0 < R
y : ℝ := rexp (-π * R)
h : y < 1
τ✝ : ℂ
hτ : R ≤ τ✝.im
n : ℤ
⊢ ‖cexp (↑π * I * ↑n ^ 2 * τ✝)‖ ≤ (fun n => y ^ Int.natAbs n) n
[PROOFSTEP]
refine' (norm_exp_mul_sq_le (hR.trans_le hτ) n).trans _
[GOAL]
case refine'_3
R : ℝ
hR : 0 < R
y : ℝ := rexp (-π * R)
h : y < 1
τ✝ : ℂ
hτ : R ≤ τ✝.im
n : ℤ
⊢ rexp (-π * τ✝.im) ^ Int.natAbs n ≤ (fun n => y ^ Int.natAbs n) n
[PROOFSTEP]
refine' pow_le_pow_of_le_left (exp_pos _).le (Real.exp_le_exp.mpr _) _
[GOAL]
case refine'_3
R : ℝ
hR : 0 < R
y : ℝ := rexp (-π * R)
h : y < 1
τ✝ : ℂ
hτ : R ≤ τ✝.im
n : ℤ
⊢ -π * τ✝.im ≤ -π * R
[PROOFSTEP]
rwa [mul_le_mul_left_of_neg (neg_lt_zero.mpr pi_pos)]
[GOAL]
case refine'_1
R : ℝ
hR : 0 < R
y : ℝ := rexp (-π * R)
h : y < 1
⊢ Summable fun n => y ^ Int.natAbs ↑n
case refine'_2 R : ℝ hR : 0 < R y : ℝ := rexp (-π * R) h : y < 1 ⊢ Summable fun n => y ^ Int.natAbs (-↑n)
[PROOFSTEP]
all_goals simpa only [Int.natAbs_neg, Int.natAbs_ofNat] using summable_geometric_of_lt_1 (Real.exp_pos _).le h
[GOAL]
case refine'_1
R : ℝ
hR : 0 < R
y : ℝ := rexp (-π * R)
h : y < 1
⊢ Summable fun n => y ^ Int.natAbs ↑n
[PROOFSTEP]
simpa only [Int.natAbs_neg, Int.natAbs_ofNat] using summable_geometric_of_lt_1 (Real.exp_pos _).le h
[GOAL]
case refine'_2
R : ℝ
hR : 0 < R
y : ℝ := rexp (-π * R)
h : y < 1
⊢ Summable fun n => y ^ Int.natAbs (-↑n)
[PROOFSTEP]
simpa only [Int.natAbs_neg, Int.natAbs_ofNat] using summable_geometric_of_lt_1 (Real.exp_pos _).le h
[GOAL]
z : ℂ
⊢ jacobiTheta (2 + z) = jacobiTheta z
[PROOFSTEP]
refine' tsum_congr fun n => _
[GOAL]
z : ℂ
n : ℤ
⊢ cexp (↑π * I * ↑n ^ 2 * (2 + z)) = cexp (↑π * I * ↑n ^ 2 * z)
[PROOFSTEP]
suffices cexp (↑π * I * (n : ℂ) ^ 2 * 2) = 1 by rw [mul_add, Complex.exp_add, this, one_mul]
[GOAL]
z : ℂ
n : ℤ
this : cexp (↑π * I * ↑n ^ 2 * 2) = 1
⊢ cexp (↑π * I * ↑n ^ 2 * (2 + z)) = cexp (↑π * I * ↑n ^ 2 * z)
[PROOFSTEP]
rw [mul_add, Complex.exp_add, this, one_mul]
[GOAL]
z : ℂ
n : ℤ
⊢ cexp (↑π * I * ↑n ^ 2 * 2) = 1
[PROOFSTEP]
rw [(by push_cast ; ring : ↑π * I * ↑n ^ 2 * 2 = ↑(n ^ 2) * (2 * π * I)), Complex.exp_int_mul, Complex.exp_two_pi_mul_I,
one_zpow]
[GOAL]
z : ℂ
n : ℤ
⊢ ↑π * I * ↑n ^ 2 * 2 = ↑(n ^ 2) * (2 * ↑π * I)
[PROOFSTEP]
push_cast
[GOAL]
z : ℂ
n : ℤ
⊢ ↑π * I * ↑n ^ 2 * 2 = ↑n ^ 2 * (2 * ↑π * I)
[PROOFSTEP]
ring
[GOAL]
τ : ℍ
⊢ jacobiTheta ↑(ModularGroup.T ^ 2 • τ) = jacobiTheta ↑τ
[PROOFSTEP]
suffices ↑(ModularGroup.T ^ 2 • τ) = (2 : ℂ) + ↑τ by simp_rw [this, jacobiTheta_two_add]
[GOAL]
τ : ℍ
this : ↑(ModularGroup.T ^ 2 • τ) = 2 + ↑τ
⊢ jacobiTheta ↑(ModularGroup.T ^ 2 • τ) = jacobiTheta ↑τ
[PROOFSTEP]
simp_rw [this, jacobiTheta_two_add]
[GOAL]
τ : ℍ
⊢ ↑(ModularGroup.T ^ 2 • τ) = 2 + ↑τ
[PROOFSTEP]
have : ModularGroup.T ^ (2 : ℕ) = ModularGroup.T ^ (2 : ℤ) := by rfl
[GOAL]
τ : ℍ
⊢ ModularGroup.T ^ 2 = ModularGroup.T ^ 2
[PROOFSTEP]
rfl
[GOAL]
τ : ℍ
this : ModularGroup.T ^ 2 = ModularGroup.T ^ 2
⊢ ↑(ModularGroup.T ^ 2 • τ) = 2 + ↑τ
[PROOFSTEP]
simp_rw [this, UpperHalfPlane.modular_T_zpow_smul, UpperHalfPlane.coe_vadd]
[GOAL]
τ : ℍ
this : ModularGroup.T ^ 2 = ModularGroup.T ^ 2
⊢ ↑↑2 + ↑τ = 2 + ↑τ
[PROOFSTEP]
norm_cast
[GOAL]
τ : ℍ
⊢ jacobiTheta ↑(ModularGroup.S • τ) = (-I * ↑τ) ^ (1 / 2) * jacobiTheta ↑τ
[PROOFSTEP]
unfold jacobiTheta
[GOAL]
τ : ℍ
⊢ ∑' (n : ℤ), cexp (↑π * I * ↑n ^ 2 * ↑(ModularGroup.S • τ)) =
(-I * ↑τ) ^ (1 / 2) * ∑' (n : ℤ), cexp (↑π * I * ↑n ^ 2 * ↑τ)
[PROOFSTEP]
rw [UpperHalfPlane.modular_S_smul, UpperHalfPlane.coe_mk]
[GOAL]
τ : ℍ
⊢ ∑' (n : ℤ), cexp (↑π * I * ↑n ^ 2 * (-↑τ)⁻¹) = (-I * ↑τ) ^ (1 / 2) * ∑' (n : ℤ), cexp (↑π * I * ↑n ^ 2 * ↑τ)
[PROOFSTEP]
have ha : 0 < (-I * τ).re :=
by
rw [neg_mul, neg_re, mul_re, I_re, I_im, zero_mul, one_mul, zero_sub, neg_neg]
exact τ.im_pos
[GOAL]
τ : ℍ
⊢ 0 < (-I * ↑τ).re
[PROOFSTEP]
rw [neg_mul, neg_re, mul_re, I_re, I_im, zero_mul, one_mul, zero_sub, neg_neg]
[GOAL]
τ : ℍ
⊢ 0 < (↑τ).im
[PROOFSTEP]
exact τ.im_pos
[GOAL]
τ : ℍ
ha : 0 < (-I * ↑τ).re
⊢ ∑' (n : ℤ), cexp (↑π * I * ↑n ^ 2 * (-↑τ)⁻¹) = (-I * ↑τ) ^ (1 / 2) * ∑' (n : ℤ), cexp (↑π * I * ↑n ^ 2 * ↑τ)
[PROOFSTEP]
have ha' : (-I * τ) ^ (1 / 2 : ℂ) ≠ 0 := by
rw [Ne.def, cpow_eq_zero_iff]
contrapose! ha
rw [ha.1, zero_re]
[GOAL]
τ : ℍ
ha : 0 < (-I * ↑τ).re
⊢ (-I * ↑τ) ^ (1 / 2) ≠ 0
[PROOFSTEP]
rw [Ne.def, cpow_eq_zero_iff]
[GOAL]
τ : ℍ
ha : 0 < (-I * ↑τ).re
⊢ ¬(-I * ↑τ = 0 ∧ 1 / 2 ≠ 0)
[PROOFSTEP]
contrapose! ha
[GOAL]
τ : ℍ
ha : -I * ↑τ = 0 ∧ 1 / 2 ≠ 0
⊢ (-I * ↑τ).re ≤ 0
[PROOFSTEP]
rw [ha.1, zero_re]
[GOAL]
τ : ℍ
ha : 0 < (-I * ↑τ).re
ha' : (-I * ↑τ) ^ (1 / 2) ≠ 0
⊢ ∑' (n : ℤ), cexp (↑π * I * ↑n ^ 2 * (-↑τ)⁻¹) = (-I * ↑τ) ^ (1 / 2) * ∑' (n : ℤ), cexp (↑π * I * ↑n ^ 2 * ↑τ)
[PROOFSTEP]
have hτ : (τ : ℂ) ≠ 0 := τ.ne_zero
[GOAL]
τ : ℍ
ha : 0 < (-I * ↑τ).re
ha' : (-I * ↑τ) ^ (1 / 2) ≠ 0
hτ : ↑τ ≠ 0
⊢ ∑' (n : ℤ), cexp (↑π * I * ↑n ^ 2 * (-↑τ)⁻¹) = (-I * ↑τ) ^ (1 / 2) * ∑' (n : ℤ), cexp (↑π * I * ↑n ^ 2 * ↑τ)
[PROOFSTEP]
have := Complex.tsum_exp_neg_mul_int_sq ha
[GOAL]
τ : ℍ
ha : 0 < (-I * ↑τ).re
ha' : (-I * ↑τ) ^ (1 / 2) ≠ 0
hτ : ↑τ ≠ 0
this :
∑' (n : ℤ), cexp (-↑π * (-I * ↑τ) * ↑n ^ 2) = 1 / (-I * ↑τ) ^ (1 / 2) * ∑' (n : ℤ), cexp (-↑π / (-I * ↑τ) * ↑n ^ 2)
⊢ ∑' (n : ℤ), cexp (↑π * I * ↑n ^ 2 * (-↑τ)⁻¹) = (-I * ↑τ) ^ (1 / 2) * ∑' (n : ℤ), cexp (↑π * I * ↑n ^ 2 * ↑τ)
[PROOFSTEP]
rw [mul_comm ((1 : ℂ) / _) _, mul_one_div, eq_div_iff ha', mul_comm _ (_ ^ _), eq_comm] at this
[GOAL]
τ : ℍ
ha : 0 < (-I * ↑τ).re
ha' : (-I * ↑τ) ^ (1 / 2) ≠ 0
hτ : ↑τ ≠ 0
this : ∑' (n : ℤ), cexp (-↑π / (-I * ↑τ) * ↑n ^ 2) = (-I * ↑τ) ^ (1 / 2) * ∑' (n : ℤ), cexp (-↑π * (-I * ↑τ) * ↑n ^ 2)
⊢ ∑' (n : ℤ), cexp (↑π * I * ↑n ^ 2 * (-↑τ)⁻¹) = (-I * ↑τ) ^ (1 / 2) * ∑' (n : ℤ), cexp (↑π * I * ↑n ^ 2 * ↑τ)
[PROOFSTEP]
have expo1 : ∀ n : ℤ, -↑π / (-I * ↑τ) * (n : ℂ) ^ 2 = ↑π * I * (n : ℂ) ^ 2 * (-↑τ)⁻¹ :=
by
intro n
field_simp [hτ, I_ne_zero]
ring_nf
rw [I_sq, mul_neg, mul_one, neg_neg]
[GOAL]
τ : ℍ
ha : 0 < (-I * ↑τ).re
ha' : (-I * ↑τ) ^ (1 / 2) ≠ 0
hτ : ↑τ ≠ 0
this : ∑' (n : ℤ), cexp (-↑π / (-I * ↑τ) * ↑n ^ 2) = (-I * ↑τ) ^ (1 / 2) * ∑' (n : ℤ), cexp (-↑π * (-I * ↑τ) * ↑n ^ 2)
⊢ ∀ (n : ℤ), -↑π / (-I * ↑τ) * ↑n ^ 2 = ↑π * I * ↑n ^ 2 * (-↑τ)⁻¹
[PROOFSTEP]
intro n
[GOAL]
τ : ℍ
ha : 0 < (-I * ↑τ).re
ha' : (-I * ↑τ) ^ (1 / 2) ≠ 0
hτ : ↑τ ≠ 0
this : ∑' (n : ℤ), cexp (-↑π / (-I * ↑τ) * ↑n ^ 2) = (-I * ↑τ) ^ (1 / 2) * ∑' (n : ℤ), cexp (-↑π * (-I * ↑τ) * ↑n ^ 2)
n : ℤ
⊢ -↑π / (-I * ↑τ) * ↑n ^ 2 = ↑π * I * ↑n ^ 2 * (-↑τ)⁻¹
[PROOFSTEP]
field_simp [hτ, I_ne_zero]
[GOAL]
τ : ℍ
ha : 0 < (-I * ↑τ).re
ha' : (-I * ↑τ) ^ (1 / 2) ≠ 0
hτ : ↑τ ≠ 0
this : ∑' (n : ℤ), cexp (-↑π / (-I * ↑τ) * ↑n ^ 2) = (-I * ↑τ) ^ (1 / 2) * ∑' (n : ℤ), cexp (-↑π * (-I * ↑τ) * ↑n ^ 2)
n : ℤ
⊢ ↑π * ↑n ^ 2 * ↑τ = -(↑π * I * ↑n ^ 2 * (I * ↑τ))
[PROOFSTEP]
ring_nf
[GOAL]
τ : ℍ
ha : 0 < (-I * ↑τ).re
ha' : (-I * ↑τ) ^ (1 / 2) ≠ 0
hτ : ↑τ ≠ 0
this : ∑' (n : ℤ), cexp (-↑π / (-I * ↑τ) * ↑n ^ 2) = (-I * ↑τ) ^ (1 / 2) * ∑' (n : ℤ), cexp (-↑π * (-I * ↑τ) * ↑n ^ 2)
n : ℤ
⊢ ↑π * ↑n ^ 2 * ↑τ = -(↑π * ↑n ^ 2 * ↑τ * I ^ 2)
[PROOFSTEP]
rw [I_sq, mul_neg, mul_one, neg_neg]
[GOAL]
τ : ℍ
ha : 0 < (-I * ↑τ).re
ha' : (-I * ↑τ) ^ (1 / 2) ≠ 0
hτ : ↑τ ≠ 0
this : ∑' (n : ℤ), cexp (-↑π / (-I * ↑τ) * ↑n ^ 2) = (-I * ↑τ) ^ (1 / 2) * ∑' (n : ℤ), cexp (-↑π * (-I * ↑τ) * ↑n ^ 2)
expo1 : ∀ (n : ℤ), -↑π / (-I * ↑τ) * ↑n ^ 2 = ↑π * I * ↑n ^ 2 * (-↑τ)⁻¹
⊢ ∑' (n : ℤ), cexp (↑π * I * ↑n ^ 2 * (-↑τ)⁻¹) = (-I * ↑τ) ^ (1 / 2) * ∑' (n : ℤ), cexp (↑π * I * ↑n ^ 2 * ↑τ)
[PROOFSTEP]
simp_rw [expo1] at this
[GOAL]
τ : ℍ
ha : 0 < (-I * ↑τ).re
ha' : (-I * ↑τ) ^ (1 / 2) ≠ 0
hτ : ↑τ ≠ 0
expo1 : ∀ (n : ℤ), -↑π / (-I * ↑τ) * ↑n ^ 2 = ↑π * I * ↑n ^ 2 * (-↑τ)⁻¹
this : ∑' (n : ℤ), cexp (↑π * I * ↑n ^ 2 * (-↑τ)⁻¹) = (-I * ↑τ) ^ (1 / 2) * ∑' (n : ℤ), cexp (-↑π * (-I * ↑τ) * ↑n ^ 2)
⊢ ∑' (n : ℤ), cexp (↑π * I * ↑n ^ 2 * (-↑τ)⁻¹) = (-I * ↑τ) ^ (1 / 2) * ∑' (n : ℤ), cexp (↑π * I * ↑n ^ 2 * ↑τ)
[PROOFSTEP]
have expo2 : ∀ n : ℤ, -↑π * (-I * ↑τ) * (n : ℂ) ^ 2 = ↑π * I * (n : ℂ) ^ 2 * ↑τ :=
by
intro n
ring_nf
[GOAL]
τ : ℍ
ha : 0 < (-I * ↑τ).re
ha' : (-I * ↑τ) ^ (1 / 2) ≠ 0
hτ : ↑τ ≠ 0
expo1 : ∀ (n : ℤ), -↑π / (-I * ↑τ) * ↑n ^ 2 = ↑π * I * ↑n ^ 2 * (-↑τ)⁻¹
this : ∑' (n : ℤ), cexp (↑π * I * ↑n ^ 2 * (-↑τ)⁻¹) = (-I * ↑τ) ^ (1 / 2) * ∑' (n : ℤ), cexp (-↑π * (-I * ↑τ) * ↑n ^ 2)
⊢ ∀ (n : ℤ), -↑π * (-I * ↑τ) * ↑n ^ 2 = ↑π * I * ↑n ^ 2 * ↑τ
[PROOFSTEP]
intro n
[GOAL]
τ : ℍ
ha : 0 < (-I * ↑τ).re
ha' : (-I * ↑τ) ^ (1 / 2) ≠ 0
hτ : ↑τ ≠ 0
expo1 : ∀ (n : ℤ), -↑π / (-I * ↑τ) * ↑n ^ 2 = ↑π * I * ↑n ^ 2 * (-↑τ)⁻¹
this : ∑' (n : ℤ), cexp (↑π * I * ↑n ^ 2 * (-↑τ)⁻¹) = (-I * ↑τ) ^ (1 / 2) * ∑' (n : ℤ), cexp (-↑π * (-I * ↑τ) * ↑n ^ 2)
n : ℤ
⊢ -↑π * (-I * ↑τ) * ↑n ^ 2 = ↑π * I * ↑n ^ 2 * ↑τ
[PROOFSTEP]
ring_nf
[GOAL]
τ : ℍ
ha : 0 < (-I * ↑τ).re
ha' : (-I * ↑τ) ^ (1 / 2) ≠ 0
hτ : ↑τ ≠ 0
expo1 : ∀ (n : ℤ), -↑π / (-I * ↑τ) * ↑n ^ 2 = ↑π * I * ↑n ^ 2 * (-↑τ)⁻¹
this : ∑' (n : ℤ), cexp (↑π * I * ↑n ^ 2 * (-↑τ)⁻¹) = (-I * ↑τ) ^ (1 / 2) * ∑' (n : ℤ), cexp (-↑π * (-I * ↑τ) * ↑n ^ 2)
expo2 : ∀ (n : ℤ), -↑π * (-I * ↑τ) * ↑n ^ 2 = ↑π * I * ↑n ^ 2 * ↑τ
⊢ ∑' (n : ℤ), cexp (↑π * I * ↑n ^ 2 * (-↑τ)⁻¹) = (-I * ↑τ) ^ (1 / 2) * ∑' (n : ℤ), cexp (↑π * I * ↑n ^ 2 * ↑τ)
[PROOFSTEP]
simp_rw [expo2] at this
[GOAL]
τ : ℍ
ha : 0 < (-I * ↑τ).re
ha' : (-I * ↑τ) ^ (1 / 2) ≠ 0
hτ : ↑τ ≠ 0
expo1 : ∀ (n : ℤ), -↑π / (-I * ↑τ) * ↑n ^ 2 = ↑π * I * ↑n ^ 2 * (-↑τ)⁻¹
expo2 : ∀ (n : ℤ), -↑π * (-I * ↑τ) * ↑n ^ 2 = ↑π * I * ↑n ^ 2 * ↑τ
this : ∑' (n : ℤ), cexp (↑π * I * ↑n ^ 2 * (-↑τ)⁻¹) = (-I * ↑τ) ^ (1 / 2) * ∑' (n : ℤ), cexp (↑π * I * ↑n ^ 2 * ↑τ)
⊢ ∑' (n : ℤ), cexp (↑π * I * ↑n ^ 2 * (-↑τ)⁻¹) = (-I * ↑τ) ^ (1 / 2) * ∑' (n : ℤ), cexp (↑π * I * ↑n ^ 2 * ↑τ)
[PROOFSTEP]
exact this
[GOAL]
z : ℂ
hz : 0 < z.im
⊢ HasSum (fun n => cexp (↑π * I * (↑n + 1) ^ 2 * z)) ((jacobiTheta z - 1) / 2)
[PROOFSTEP]
have := (summable_exp_mul_sq hz).hasSum.sum_nat_of_sum_int
[GOAL]
z : ℂ
hz : 0 < z.im
this :
HasSum (fun n => cexp (↑π * I * ↑↑n ^ 2 * z) + cexp (↑π * I * ↑(-↑n) ^ 2 * z))
(∑' (b : ℤ), cexp (↑π * I * ↑b ^ 2 * z) + cexp (↑π * I * ↑0 ^ 2 * z))
⊢ HasSum (fun n => cexp (↑π * I * (↑n + 1) ^ 2 * z)) ((jacobiTheta z - 1) / 2)
[PROOFSTEP]
rw [← @hasSum_nat_add_iff' ℂ _ _ _ _ 1] at this
[GOAL]
z : ℂ
hz : 0 < z.im
this :
HasSum (fun n => cexp (↑π * I * ↑↑(n + 1) ^ 2 * z) + cexp (↑π * I * ↑(-↑(n + 1)) ^ 2 * z))
(∑' (b : ℤ), cexp (↑π * I * ↑b ^ 2 * z) + cexp (↑π * I * ↑0 ^ 2 * z) -
∑ i in Finset.range 1, (cexp (↑π * I * ↑↑i ^ 2 * z) + cexp (↑π * I * ↑(-↑i) ^ 2 * z)))
⊢ HasSum (fun n => cexp (↑π * I * (↑n + 1) ^ 2 * z)) ((jacobiTheta z - 1) / 2)
[PROOFSTEP]
simp_rw [Finset.sum_range_one, Int.cast_neg, Int.cast_ofNat, Nat.cast_zero, neg_zero, Int.cast_zero, sq (0 : ℂ),
mul_zero, zero_mul, neg_sq, ← mul_two, Complex.exp_zero, add_sub_assoc, (by norm_num : (1 : ℂ) - 1 * 2 = -1), ←
sub_eq_add_neg, Nat.cast_add, Nat.cast_one] at this
[GOAL]
z : ℂ
hz : 0 < z.im
this : HasSum (fun n => cexp (↑π * I * ↑(n + 1) ^ 2 * z) * 2) (∑' (b : ℤ), cexp (↑π * I * ↑b ^ 2 * z) + (1 - 1 * 2))
⊢ 1 - 1 * 2 = -1
[PROOFSTEP]
norm_num
[GOAL]
z : ℂ
hz : 0 < z.im
this : HasSum (fun n => cexp (↑π * I * (↑n + 1) ^ 2 * z) * 2) (∑' (b : ℤ), cexp (↑π * I * ↑b ^ 2 * z) - 1)
⊢ HasSum (fun n => cexp (↑π * I * (↑n + 1) ^ 2 * z)) ((jacobiTheta z - 1) / 2)
[PROOFSTEP]
convert this.div_const 2 using 1
[GOAL]
case h.e'_5
z : ℂ
hz : 0 < z.im
this : HasSum (fun n => cexp (↑π * I * (↑n + 1) ^ 2 * z) * 2) (∑' (b : ℤ), cexp (↑π * I * ↑b ^ 2 * z) - 1)
⊢ (fun n => cexp (↑π * I * (↑n + 1) ^ 2 * z)) = fun i => cexp (↑π * I * (↑i + 1) ^ 2 * z) * 2 / 2
[PROOFSTEP]
simp_rw [mul_div_cancel (G₀ := ℂ) _ two_ne_zero]
[GOAL]
z : ℂ
hz : 0 < z.im
⊢ jacobiTheta z = 1 + 2 * ∑' (n : ℕ), cexp (↑π * I * (↑n + 1) ^ 2 * z)
[PROOFSTEP]
rw [(hasSum_nat_jacobiTheta hz).tsum_eq, mul_div_cancel' _ (two_ne_zero' ℂ), ← add_sub_assoc, add_sub_cancel']
[GOAL]
z : ℂ
hz : 0 < z.im
⊢ ‖jacobiTheta z - 1‖ ≤ 2 / (1 - rexp (-π * z.im)) * rexp (-π * z.im)
[PROOFSTEP]
suffices ‖∑' n : ℕ, cexp (π * I * ((n : ℂ) + 1) ^ 2 * z)‖ ≤ rexp (-π * z.im) / (1 - rexp (-π * z.im)) by
calc
‖jacobiTheta z - 1‖ = ↑2 * ‖∑' n : ℕ, cexp (π * I * ((n : ℂ) + 1) ^ 2 * z)‖ := by
rw [sub_eq_iff_eq_add'.mpr (jacobiTheta_eq_tsum_nat hz), norm_mul, Complex.norm_eq_abs, Complex.abs_two]
_ ≤ 2 * (rexp (-π * z.im) / (1 - rexp (-π * z.im))) := by rwa [mul_le_mul_left (zero_lt_two' ℝ)]
_ = 2 / (1 - rexp (-π * z.im)) * rexp (-π * z.im) := by rw [div_mul_comm, mul_comm]
[GOAL]
z : ℂ
hz : 0 < z.im
this : ‖∑' (n : ℕ), cexp (↑π * I * (↑n + 1) ^ 2 * z)‖ ≤ rexp (-π * z.im) / (1 - rexp (-π * z.im))
⊢ ‖jacobiTheta z - 1‖ ≤ 2 / (1 - rexp (-π * z.im)) * rexp (-π * z.im)
[PROOFSTEP]
calc
‖jacobiTheta z - 1‖ = ↑2 * ‖∑' n : ℕ, cexp (π * I * ((n : ℂ) + 1) ^ 2 * z)‖ := by
rw [sub_eq_iff_eq_add'.mpr (jacobiTheta_eq_tsum_nat hz), norm_mul, Complex.norm_eq_abs, Complex.abs_two]
_ ≤ 2 * (rexp (-π * z.im) / (1 - rexp (-π * z.im))) := by rwa [mul_le_mul_left (zero_lt_two' ℝ)]
_ = 2 / (1 - rexp (-π * z.im)) * rexp (-π * z.im) := by rw [div_mul_comm, mul_comm]
[GOAL]
z : ℂ
hz : 0 < z.im
this : ‖∑' (n : ℕ), cexp (↑π * I * (↑n + 1) ^ 2 * z)‖ ≤ rexp (-π * z.im) / (1 - rexp (-π * z.im))
⊢ ‖jacobiTheta z - 1‖ = 2 * ‖∑' (n : ℕ), cexp (↑π * I * (↑n + 1) ^ 2 * z)‖
[PROOFSTEP]
rw [sub_eq_iff_eq_add'.mpr (jacobiTheta_eq_tsum_nat hz), norm_mul, Complex.norm_eq_abs, Complex.abs_two]
[GOAL]
z : ℂ
hz : 0 < z.im
this : ‖∑' (n : ℕ), cexp (↑π * I * (↑n + 1) ^ 2 * z)‖ ≤ rexp (-π * z.im) / (1 - rexp (-π * z.im))
⊢ 2 * ‖∑' (n : ℕ), cexp (↑π * I * (↑n + 1) ^ 2 * z)‖ ≤ 2 * (rexp (-π * z.im) / (1 - rexp (-π * z.im)))
[PROOFSTEP]
rwa [mul_le_mul_left (zero_lt_two' ℝ)]
[GOAL]
z : ℂ
hz : 0 < z.im
this : ‖∑' (n : ℕ), cexp (↑π * I * (↑n + 1) ^ 2 * z)‖ ≤ rexp (-π * z.im) / (1 - rexp (-π * z.im))
⊢ 2 * (rexp (-π * z.im) / (1 - rexp (-π * z.im))) = 2 / (1 - rexp (-π * z.im)) * rexp (-π * z.im)
[PROOFSTEP]
rw [div_mul_comm, mul_comm]
[GOAL]
z : ℂ
hz : 0 < z.im
⊢ ‖∑' (n : ℕ), cexp (↑π * I * (↑n + 1) ^ 2 * z)‖ ≤ rexp (-π * z.im) / (1 - rexp (-π * z.im))
[PROOFSTEP]
have : ∀ n : ℕ, ‖cexp (π * I * ((n : ℂ) + 1) ^ 2 * z)‖ ≤ rexp (-π * z.im) ^ (n + 1) :=
by
intro n
simpa only [Int.cast_add, Int.cast_one] using norm_exp_mul_sq_le hz (n + 1)
[GOAL]
z : ℂ
hz : 0 < z.im
⊢ ∀ (n : ℕ), ‖cexp (↑π * I * (↑n + 1) ^ 2 * z)‖ ≤ rexp (-π * z.im) ^ (n + 1)
[PROOFSTEP]
intro n
[GOAL]
z : ℂ
hz : 0 < z.im
n : ℕ
⊢ ‖cexp (↑π * I * (↑n + 1) ^ 2 * z)‖ ≤ rexp (-π * z.im) ^ (n + 1)
[PROOFSTEP]
simpa only [Int.cast_add, Int.cast_one] using norm_exp_mul_sq_le hz (n + 1)
[GOAL]
z : ℂ
hz : 0 < z.im
this : ∀ (n : ℕ), ‖cexp (↑π * I * (↑n + 1) ^ 2 * z)‖ ≤ rexp (-π * z.im) ^ (n + 1)
⊢ ‖∑' (n : ℕ), cexp (↑π * I * (↑n + 1) ^ 2 * z)‖ ≤ rexp (-π * z.im) / (1 - rexp (-π * z.im))
[PROOFSTEP]
have s : HasSum (fun n : ℕ => rexp (-π * z.im) ^ (n + 1)) (rexp (-π * z.im) / (1 - rexp (-π * z.im))) :=
by
simp_rw [pow_succ, div_eq_mul_inv, hasSum_mul_left_iff (Real.exp_ne_zero _)]
exact
hasSum_geometric_of_lt_1 (exp_pos (-π * z.im)).le
(exp_lt_one_iff.mpr <| mul_neg_of_neg_of_pos (neg_lt_zero.mpr pi_pos) hz)
[GOAL]
z : ℂ
hz : 0 < z.im
this : ∀ (n : ℕ), ‖cexp (↑π * I * (↑n + 1) ^ 2 * z)‖ ≤ rexp (-π * z.im) ^ (n + 1)
⊢ HasSum (fun n => rexp (-π * z.im) ^ (n + 1)) (rexp (-π * z.im) / (1 - rexp (-π * z.im)))
[PROOFSTEP]
simp_rw [pow_succ, div_eq_mul_inv, hasSum_mul_left_iff (Real.exp_ne_zero _)]
[GOAL]
z : ℂ
hz : 0 < z.im
this : ∀ (n : ℕ), ‖cexp (↑π * I * (↑n + 1) ^ 2 * z)‖ ≤ rexp (-π * z.im) ^ (n + 1)
⊢ HasSum (fun i => rexp (-π * z.im) ^ i) (1 - rexp (-π * z.im))⁻¹
[PROOFSTEP]
exact
hasSum_geometric_of_lt_1 (exp_pos (-π * z.im)).le
(exp_lt_one_iff.mpr <| mul_neg_of_neg_of_pos (neg_lt_zero.mpr pi_pos) hz)
[GOAL]
z : ℂ
hz : 0 < z.im
this : ∀ (n : ℕ), ‖cexp (↑π * I * (↑n + 1) ^ 2 * z)‖ ≤ rexp (-π * z.im) ^ (n + 1)
s : HasSum (fun n => rexp (-π * z.im) ^ (n + 1)) (rexp (-π * z.im) / (1 - rexp (-π * z.im)))
⊢ ‖∑' (n : ℕ), cexp (↑π * I * (↑n + 1) ^ 2 * z)‖ ≤ rexp (-π * z.im) / (1 - rexp (-π * z.im))
[PROOFSTEP]
have aux : Summable fun n : ℕ => ‖cexp (π * I * ((n : ℂ) + 1) ^ 2 * z)‖ :=
summable_of_nonneg_of_le (fun n => norm_nonneg _) this s.summable
[GOAL]
z : ℂ
hz : 0 < z.im
this : ∀ (n : ℕ), ‖cexp (↑π * I * (↑n + 1) ^ 2 * z)‖ ≤ rexp (-π * z.im) ^ (n + 1)
s : HasSum (fun n => rexp (-π * z.im) ^ (n + 1)) (rexp (-π * z.im) / (1 - rexp (-π * z.im)))
aux : Summable fun n => ‖cexp (↑π * I * (↑n + 1) ^ 2 * z)‖
⊢ ‖∑' (n : ℕ), cexp (↑π * I * (↑n + 1) ^ 2 * z)‖ ≤ rexp (-π * z.im) / (1 - rexp (-π * z.im))
[PROOFSTEP]
exact (norm_tsum_le_tsum_norm aux).trans ((tsum_mono aux s.summable this).trans (le_of_eq s.tsum_eq))
[GOAL]
⊢ (fun τ => jacobiTheta τ - 1) =O[comap im atTop] fun τ => rexp (-π * τ.im)
[PROOFSTEP]
simp_rw [IsBigO, IsBigOWith, Filter.eventually_comap, Filter.eventually_atTop]
[GOAL]
⊢ ∃ c a, ∀ (b : ℝ), b ≥ a → ∀ (a : ℂ), a.im = b → ‖jacobiTheta a - 1‖ ≤ c * ‖rexp (-π * a.im)‖
[PROOFSTEP]
refine'
⟨2 / (1 - rexp (-π)), 1, fun y hy z hz =>
(norm_jacobiTheta_sub_one_le (hz.symm ▸ zero_lt_one.trans_le hy : 0 < im z)).trans _⟩
[GOAL]
y : ℝ
hy : y ≥ 1
z : ℂ
hz : z.im = y
⊢ 2 / (1 - rexp (-π * z.im)) * rexp (-π * z.im) ≤ 2 / (1 - rexp (-π)) * ‖rexp (-π * z.im)‖
[PROOFSTEP]
rw [Real.norm_eq_abs, Real.abs_exp]
[GOAL]
y : ℝ
hy : y ≥ 1
z : ℂ
hz : z.im = y
⊢ 2 / (1 - rexp (-π * z.im)) * rexp (-π * z.im) ≤ 2 / (1 - rexp (-π)) * rexp (-π * z.im)
[PROOFSTEP]
refine' mul_le_mul_of_nonneg_right _ (exp_pos _).le
[GOAL]
y : ℝ
hy : y ≥ 1
z : ℂ
hz : z.im = y
⊢ 2 / (1 - rexp (-π * z.im)) ≤ 2 / (1 - rexp (-π))
[PROOFSTEP]
rw [div_le_div_left (zero_lt_two' ℝ), sub_le_sub_iff_left, exp_le_exp, neg_mul, neg_le_neg_iff]
[GOAL]
y : ℝ
hy : y ≥ 1
z : ℂ
hz : z.im = y
⊢ π ≤ π * z.im
[PROOFSTEP]
exact le_mul_of_one_le_right pi_pos.le (hz.symm ▸ hy)
[GOAL]
case hb
y : ℝ
hy : y ≥ 1
z : ℂ
hz : z.im = y
⊢ 0 < 1 - rexp (-π * z.im)
[PROOFSTEP]
rw [sub_pos, exp_lt_one_iff, neg_mul, neg_lt_zero]
[GOAL]
case hb
y : ℝ
hy : y ≥ 1
z : ℂ
hz : z.im = y
⊢ 0 < π * z.im
[PROOFSTEP]
exact mul_pos pi_pos (hz.symm ▸ zero_lt_one.trans_le hy)
[GOAL]
case hc
y : ℝ
hy : y ≥ 1
z : ℂ
hz : z.im = y
⊢ 0 < 1 - rexp (-π)
[PROOFSTEP]
rw [sub_pos, exp_lt_one_iff, neg_lt_zero]
[GOAL]
case hc
y : ℝ
hy : y ≥ 1
z : ℂ
hz : z.im = y
⊢ 0 < π
[PROOFSTEP]
exact pi_pos
[GOAL]
z : ℂ
hz : 0 < z.im
⊢ DifferentiableAt ℂ jacobiTheta z
[PROOFSTEP]
suffices
∀ (y : ℝ) (_ : 0 < y), DifferentiableOn ℂ (fun z => ∑' n : ℤ, cexp (π * I * (n : ℂ) ^ 2 * z)) {w : ℂ | y < im w}
by
let ⟨y, hy, hy'⟩ := exists_between hz
exact (this y hy).differentiableAt ((Complex.continuous_im.isOpen_preimage _ isOpen_Ioi).mem_nhds hy')
[GOAL]
z : ℂ
hz : 0 < z.im
this : ∀ (y : ℝ), 0 < y → DifferentiableOn ℂ (fun z => ∑' (n : ℤ), cexp (↑π * I * ↑n ^ 2 * z)) {w | y < w.im}
⊢ DifferentiableAt ℂ jacobiTheta z
[PROOFSTEP]
let ⟨y, hy, hy'⟩ := exists_between hz
[GOAL]
z : ℂ
hz : 0 < z.im
this : ∀ (y : ℝ), 0 < y → DifferentiableOn ℂ (fun z => ∑' (n : ℤ), cexp (↑π * I * ↑n ^ 2 * z)) {w | y < w.im}
y : ℝ
hy : 0 < y
hy' : y < z.im
⊢ DifferentiableAt ℂ jacobiTheta z
[PROOFSTEP]
exact (this y hy).differentiableAt ((Complex.continuous_im.isOpen_preimage _ isOpen_Ioi).mem_nhds hy')
[GOAL]
z : ℂ
hz : 0 < z.im
⊢ ∀ (y : ℝ), 0 < y → DifferentiableOn ℂ (fun z => ∑' (n : ℤ), cexp (↑π * I * ↑n ^ 2 * z)) {w | y < w.im}
[PROOFSTEP]
intro y hy
[GOAL]
z : ℂ
hz : 0 < z.im
y : ℝ
hy : 0 < y
⊢ DifferentiableOn ℂ (fun z => ∑' (n : ℤ), cexp (↑π * I * ↑n ^ 2 * z)) {w | y < w.im}
[PROOFSTEP]
have h1 :
∀ (n : ℤ) (w : ℂ) (_ : y < im w),
DifferentiableWithinAt ℂ (fun v : ℂ => cexp (π * I * (n : ℂ) ^ 2 * v)) {z : ℂ | y < im z} w :=
fun n w _ => (differentiableAt_id.const_mul _).cexp.differentiableWithinAt
[GOAL]
z : ℂ
hz : 0 < z.im
y : ℝ
hy : 0 < y
h1 : ∀ (n : ℤ) (w : ℂ), y < w.im → DifferentiableWithinAt ℂ (fun v => cexp (↑π * I * ↑n ^ 2 * v)) {z | y < z.im} w
⊢ DifferentiableOn ℂ (fun z => ∑' (n : ℤ), cexp (↑π * I * ↑n ^ 2 * z)) {w | y < w.im}
[PROOFSTEP]
have h2 : IsOpen {w : ℂ | y < im w} := continuous_im.isOpen_preimage _ isOpen_Ioi
[GOAL]
z : ℂ
hz : 0 < z.im
y : ℝ
hy : 0 < y
h1 : ∀ (n : ℤ) (w : ℂ), y < w.im → DifferentiableWithinAt ℂ (fun v => cexp (↑π * I * ↑n ^ 2 * v)) {z | y < z.im} w
h2 : IsOpen {w | y < w.im}
⊢ DifferentiableOn ℂ (fun z => ∑' (n : ℤ), cexp (↑π * I * ↑n ^ 2 * z)) {w | y < w.im}
[PROOFSTEP]
obtain ⟨bd, bd_s, le_bd⟩ := exists_summable_bound_exp_mul_sq hy
[GOAL]
case intro.intro
z : ℂ
hz : 0 < z.im
y : ℝ
hy : 0 < y
h1 : ∀ (n : ℤ) (w : ℂ), y < w.im → DifferentiableWithinAt ℂ (fun v => cexp (↑π * I * ↑n ^ 2 * v)) {z | y < z.im} w
h2 : IsOpen {w | y < w.im}
bd : ℤ → ℝ
bd_s : Summable bd
le_bd : ∀ {τ : ℂ}, y ≤ τ.im → ∀ (n : ℤ), ‖cexp (↑π * I * ↑n ^ 2 * τ)‖ ≤ bd n
⊢ DifferentiableOn ℂ (fun z => ∑' (n : ℤ), cexp (↑π * I * ↑n ^ 2 * z)) {w | y < w.im}
[PROOFSTEP]
exact differentiableOn_tsum_of_summable_norm bd_s h1 h2 fun i w hw => le_bd (le_of_lt hw) i
|
<p align="center">
</p>
## Interactive Confidence Interval Demonstration
### Boostrap and Analytical Confidence Intervals
* we calculate the confidence interval in the mean with boostrap and compare to the analytical expression
* with this workflow we all provide an interactive plot demonstration with matplotlib and ipywidget packages
#### Michael Pyrcz, Associate Professor, University of Texas at Austin
##### [Twitter](https://twitter.com/geostatsguy) | [GitHub](https://github.com/GeostatsGuy) | [Website](http://michaelpyrcz.com) | [GoogleScholar](https://scholar.google.com/citations?user=QVZ20eQAAAAJ&hl=en&oi=ao) | [Book](https://www.amazon.com/Geostatistical-Reservoir-Modeling-Michael-Pyrcz/dp/0199731446) | [YouTube](https://www.youtube.com/channel/UCLqEr-xV-ceHdXXXrTId5ig) | [LinkedIn](https://www.linkedin.com/in/michael-pyrcz-61a648a1) | [GeostatsPy](https://github.com/GeostatsGuy/GeostatsPy)
#### Confidence Intevals
Confidence intervals are the uncertainty in a sample statistic or model parameter
* for uncertainty in the sample mean we have:
* center on the sample proportion, $\hat{p}$
* the standard error in the proportion for the dispersion (spread)
* Student's t distributed for small samples and Gaussian distributed for large sample sizes
The analytical form for small samples ($n \lt 30$) is:
\begin{equation}
CI: \hat{p} \pm t_{\frac{\alpha}{2},n-1} \times \frac {\sqrt{p(1-p)}}{\sqrt{n}}
\end{equation}
where the sampling distribution of the proportion is student's t distributed with number of samples, $n$ - 1, degrees of freedom and $\alpha$ is the signficance level divded by 2 for the two tails.
When the number of samples is large ($n \ge 30$) then the analytical form converges to Gaussian distributed:
\begin{equation}
CI: \hat{p} \pm N_{\frac{\alpha}{2}} \times \frac {\sqrt{p(1-p)}}{\sqrt{n}}
\end{equation}
#### Bootstrap
Uncertainty in the sample statistics
* one source of uncertainty is the paucity of data.
* do 200 or even less wells provide a precise (and accurate estimate) of the mean? standard deviation? skew? P13?
Would it be useful to know the uncertainty in these statistics due to limited sampling?
* what is the impact of uncertainty in the mean porosity e.g. 20%+/-2%?
**Bootstrap** is a method to assess the uncertainty in a sample statistic by repeated random sampling with replacement.
Assumptions
* sufficient, representative sampling, identical, idependent samples
Limitations
1. assumes the samples are representative
2. assumes stationarity
3. only accounts for uncertainty due to too few samples, e.g. no uncertainty due to changes away from data
4. does not account for boundary of area of interest
5. assumes the samples are independent
6. does not account for other local information sources
The Bootstrap Approach (Efron, 1982)
Statistical resampling procedure to calculate uncertainty in a calculated statistic from the data itself.
* Does this work? Prove it to yourself, for uncertainty in the mean solution is standard error:
\begin{equation}
\sigma^2_\overline{x} = \frac{\sigma^2_s}{n}
\end{equation}
Extremely powerful - could calculate uncertainty in any statistic! e.g. P13, skew etc.
* Would not be possible access general uncertainty in any statistic without bootstrap.
* Advanced forms account for spatial information and sampling strategy (game theory and Journel’s spatial bootstrap (1993).
Steps:
1. assemble a sample set, must be representative, reasonable to assume independence between samples
2. optional: build a cumulative distribution function (CDF)
* may account for declustering weights, tail extrapolation
* could use analogous data to support
3. For $\ell = 1, \ldots, L$ realizations, do the following:
* For $i = \alpha, \ldots, n$ data, do the following:
* Draw a random sample with replacement from the sample set or Monte Carlo simulate from the CDF (if available).
6. Calculate a realization of the sammary statistic of interest from the $n$ samples, e.g. $m^\ell$, $\sigma^2_{\ell}$. Return to 3 for another realization.
7. Compile and summarize the $L$ realizations of the statistic of interest.
This is a very powerful method. Let's try it out and compare the result to the analytical form of the confidence interval for the sample mean.
#### Objective
Provide an example and demonstration for:
1. interactive plotting in Jupyter Notebooks with Python packages matplotlib and ipywidgets
2. provide an intuitive hands-on example of confidence intervals and compare to statistical boostrap
#### Getting Started
Here's the steps to get setup in Python with the GeostatsPy package:
1. Install Anaconda 3 on your machine (https://www.anaconda.com/download/).
2. Open Jupyter and in the top block get started by copy and pasting the code block below from this Jupyter Notebook to start using the geostatspy functionality.
#### Load the Required Libraries
The following code loads the required libraries.
```python
%matplotlib inline
from ipywidgets import interactive # widgets and interactivity
from ipywidgets import widgets
from ipywidgets import Layout
from ipywidgets import Label
from ipywidgets import VBox, HBox
import matplotlib.pyplot as plt # plotting
from matplotlib.ticker import (MultipleLocator, AutoMinorLocator) # control of axes ticks
plt.rc('axes', axisbelow=True) # set axes and grids in the background for all plots
import numpy as np # working with arrays
import pandas as pd # working with DataFrames
import seaborn as sns # for matrix scatter plots
from scipy.stats import triang # parametric distributions
from scipy.stats import binom
from scipy.stats import norm
from scipy.stats import uniform
from scipy.stats import triang
from scipy.stats import t
from scipy import stats # statistical calculations
import random # random drawing / bootstrap realizations of the data
from matplotlib.gridspec import GridSpec # nonstandard subplots
import math # square root operator
```
#### Make a Synthetic Dataset
This is an interactive method to:
* select a parametric distribution
* select the distribution parameters
* select the number of samples and visualize the synthetic dataset distribution
```python
# parameters for the synthetic dataset
bins = np.linspace(0,1000,1000)
# interactive calculation of the sample set (control of source parametric distribution and number of samples)
l = widgets.Text(value=' Simple Boostrap Demonstration, Michael Pyrcz, Associate Professor, The University of Texas at Austin',layout=Layout(width='950px', height='30px'))
a = widgets.IntSlider(min=0, max = 100, value = 2, step = 1, description = '$n_{red}$',orientation='horizontal',layout=Layout(width='400px', height='20px'),continuous_update=False)
a.style.handle_color = 'red'
b = widgets.IntSlider(min=0, max = 100, value = 3, step = 1, description = '$n_{green}$',orientation='horizontal',layout=Layout(width='400px', height='20px'),continuous_update=False)
b.style.handle_color = 'green'
c = widgets.IntSlider(min=1, max = 16, value = 3, step = 1, description = '$L$',orientation='horizontal',layout=Layout(width='400px', height='20px'),continuous_update=False)
c.style.handle_color = 'gray'
ui = widgets.HBox([a,b,c],) # basic widget formatting
ui2 = widgets.VBox([l,ui],)
def f_make(a, b, c): # function to take parameters, make sample and plot
red_freq = make_data(a, b, c)
labels = ['Red', 'Green']
nrows = np.round(np.sqrt(c)+0.4,0); ncols = np.round(c / nrows + 0.4,0)
plt.clf()
for i in range(0, c):
plt.subplot(ncols,nrows,i + 1)
draw = [red_freq[i],a + b - red_freq[i]]
plt.grid(zorder=0, color='black', axis = 'y', alpha = 0.2); plt.ylim(0,a + b);
plt.ylabel('Frequency'); plt.xlabel('Balls Drawn')
plt.yticks(np.arange(0,a + b + 1,max(1,round((a+b)/10))))
barlist = plt.bar(labels,draw,edgecolor = "black",linewidth = 1,alpha = 0.8); plt.title('Realization #' + str(i+1),zorder = 1)
barlist[0].set_color('r'); barlist[1].set_color('g')
plt.subplots_adjust(left=0.0, bottom=0.0, right=2.0, top=1.2 * nrows, wspace=0.2, hspace=0.2)
plt.show()
def make_data(a, b, c): # function to check parameters and make sample
prop_red = np.zeros(c)
for i in range(0, c):
prop_red[i] = np.random.multinomial(a+b,[a/(a+b),b/(a+b)], size = 1)[0][0]
return prop_red
# connect the function to make the samples and plot to the widgets
interactive_plot = widgets.interactive_output(f_make, {'a': a, 'b': b, 'c': c})
interactive_plot.clear_output(wait = True) # reduce flickering by delaying plot updating
```
### Simple Bootstrap Demonstration - Drawing Red and Green Balls from a Virtual Cowboy Hat
* drawing red and green balls from a hat with replacement to access uncertainty in the proportion
* interactive plot demonstration with ipywidget, matplotlib packages
#### Michael Pyrcz, Associate Professor, University of Texas at Austin
##### [Twitter](https://twitter.com/geostatsguy) | [GitHub](https://github.com/GeostatsGuy) | [Website](http://michaelpyrcz.com) | [GoogleScholar](https://scholar.google.com/citations?user=QVZ20eQAAAAJ&hl=en&oi=ao) | [Book](https://www.amazon.com/Geostatistical-Reservoir-Modeling-Michael-Pyrcz/dp/0199731446) | [YouTube](https://www.youtube.com/channel/UCLqEr-xV-ceHdXXXrTId5ig) | [LinkedIn](https://www.linkedin.com/in/michael-pyrcz-61a648a1) | [GeostatsPy](https://github.com/GeostatsGuy/GeostatsPy)
### The Problem
Let's simulate bootstrap, resampling with replacement from a hat with $n_{red}$ and $n_{green}$ balls
* **$n_{red}$**: number of red balls in the sample (placed in the hat)
* **$n_{green}$**: number of green balls in the sample (placed in the hat)
* **$L$**: number of bootstrap realizations
```python
display(ui2, interactive_plot) # display the interactive plot
```
VBox(children=(Text(value=' Simple Boostrap Demonstration, Michael Pyrcz, Assoc…
Output()
#### Summarizing Bootstrap Uncertainty
* Run more bootstrap realizations and evaluate the uncertianty model
Now instead of looking at each bootstrap result, let's make many and summarize with:
* **box and whisker plot** of the red and green ball frequencies
* **histograms** of the red and green ball frequencies.
```python
# parameters for the synthetic dataset
bins = np.linspace(0,1000,1000)
# interactive calculation of the sample set (control of source parametric distribution and number of samples)
l2 = widgets.Text(value=' Confidence Interval for Proportions, Analytical and Bootstrap Demonstration, Michael Pyrcz, Associate Professor, The University of Texas at Austin',layout=Layout(width='950px', height='30px'))
a2 = widgets.IntSlider(min=0, max = 100, value = 20, step = 1, description = '$n_{red}$',orientation='horizontal',layout=Layout(width='400px', height='20px'),continuous_update=False)
a2.style.handle_color = 'red'
b2 = widgets.IntSlider(min=0, max = 100, value = 30, step = 1, description = '$n_{green}$',orientation='horizontal',layout=Layout(width='400px', height='20px'),continuous_update=False)
b2.style.handle_color = 'green'
c2 = widgets.IntSlider(min=5, max = 1000, value = 1000, step = 1, description = '$L$',orientation='horizontal',layout=Layout(width='400px', height='20px'),continuous_update=False)
c2.style.handle_color = 'gray'
alpha = widgets.FloatSlider(min=0.01, max = 0.40, value = 0.05, step = 0.01, description = r'$\alpha$',orientation='horizontal',layout=Layout(width='400px', height='20px'),continuous_update=False)
alpha.style.handle_color = 'gray'
uib = widgets.HBox([a2,b2,c2,alpha],) # basic widget formatting
uib2 = widgets.VBox([l2,uib],)
def s_make(a, b, c, alpha): # function to take parameters, make sample and plot
dof = a + b - 1
red_freq = make_data(a, b, c)
pred = red_freq/(a+b)
red_prop = (a / (a+b))
red_SE = math.sqrt((red_prop * (1.0 - red_prop)) / (a+b))
green_freq = (a + b) - red_freq
pgreen = green_freq/(a+b)
green_prop = (b / (a+b))
green_SE = math.sqrt((green_prop * (1.0 - green_prop)) / (a+b))
prop_red = red_freq / (a + b)
prop_green = green_freq / (a + b)
labels = ['Red Balls', 'Green Balls']
bins = np.linspace(0,a + b, a + b)
fig = plt.figure(constrained_layout=False)
gs = GridSpec(3, 2, figure=fig)
ax1 = fig.add_subplot(gs[:, 0])
boxplot = ax1.boxplot([pred,pgreen],labels = labels, notch = True, sym = '+',patch_artist=True)
colors = ['red','green']
for patch, color in zip(boxplot['boxes'], colors):
patch.set_facecolor(color)
for patch, color in zip(boxplot['medians'], colors):
patch.set_color('black')
ax1.set_ylim([0,1])
ax1.grid(zorder=0, color='black', axis = 'y', alpha = 0.2)
ax1.set_ylabel('Proportion of Balls'); ax1.set_xlabel('Ball Color');ax1.set_title('Bootstrap Uncertainty - Proportion Distributions')
ax1.grid(True, which='major',axis='y',linewidth = 1.0); ax1.grid(True, which='minor',axis='y',linewidth = 0.2) # add y grids
ax1.tick_params(which='major',length=7); ax1.tick_params(which='minor', length=4)
ax1.xaxis.set_minor_locator(AutoMinorLocator()); ax1.yaxis.set_minor_locator(AutoMinorLocator()) # turn on minor ticks
cumul_prob = np.linspace(0.0,1.0,100)
if a <= 30 or b <= 30:
red_prop_values = t.ppf(cumul_prob, dof)
red_lower = t.ppf(alpha/2, dof); red_upper = t.ppf(1-alpha/2, dof)
else:
red_prop_values = norm.ppf(cumul_prob)
red_lower = norm.ppf(alpha/2); red_upper = norm.ppf(1-alpha/2)
red_prop_values = red_prop_values * red_SE + red_prop
red_lower = red_lower * red_SE + red_prop
red_upper = red_upper * red_SE + red_prop
cumul_prob = np.linspace(0.01,0.99,100)
if a <= 30 or b <= 30:
green_prop_values = t.ppf(cumul_prob, dof)
green_lower = t.ppf(alpha/2, dof); green_upper = t.ppf(1-alpha/2, dof)
else:
green_prop_values = norm.ppf(cumul_prob)
green_lower = norm.ppf(alpha/2); green_upper = norm.ppf(1-alpha/2)
green_prop_values = green_prop_values * green_SE + green_prop
green_lower = green_lower * green_SE + green_prop
green_upper = green_upper * green_SE + green_prop
ax2 = fig.add_subplot(gs[0, 1])
ax2.hist(prop_red,cumulative = True, density = True, alpha=0.7,color="red",edgecolor="black",linewidth=2,bins = np.linspace(0,1,50), label = 'Bootstrap')
ax2.plot([red_lower,red_lower],[0,1],color='black',linewidth=2,linestyle='--',label='Lower/Upper')
ax2.plot([red_upper,red_upper],[0,1],color='black',linewidth=2,linestyle='--')
ax2.plot([red_prop,red_prop],[0,1],color='black',linewidth=3,label='Exp.')
ax2.set_title('Uncertainty in Proportion of Red Balls'); ax2.set_xlabel('Proportion of Red Balls'); ax2.set_ylabel('Cumulative Probability')
ax2.set_xlim([0,1]); ax2.set_ylim([0,1])
ax2.plot(red_prop_values, cumul_prob, color = 'black', linewidth = 2, label = 'Analytical')
ax2.legend()
ax3 = fig.add_subplot(gs[1, 1])
ax3.hist(prop_green,cumulative = True, density = True, alpha=0.7,color="green",edgecolor="black",linewidth=2,bins = np.linspace(0,1,50), label = 'Bootstrap')
ax3.plot([green_lower,green_lower],[0,1],color='black',linewidth=2,linestyle='--',label='Lower/Upper')
ax3.plot([green_upper,green_upper],[0,1],color='black',linewidth=2,linestyle='--')
ax3.plot([green_prop,green_prop],[0,1],color='black',linewidth=3,label='Exp.')
ax3.set_title('Uncertainty in Proportion of Green Balls'); ax3.set_xlabel('Proportion of Green Balls'); ax3.set_ylabel('Cumulative Probability')
ax3.set_xlim([0,1]); ax3.set_ylim([0,1])
ax3.plot(green_prop_values, cumul_prob, color = 'black', linewidth = 2, label = 'Analytical')
ax3.legend()
ax4 = fig.add_subplot(gs[2, 1])
ax4.hist(prop_green,cumulative = False, density = True, alpha=0.7,color="green",edgecolor="black",linewidth=2, bins = np.linspace(0,1,50), label = 'Bootstrap Prop. Green')
ax4.hist(prop_red,cumulative = False, density = True, alpha=0.7,color="red",edgecolor="black",linewidth=2, bins = np.linspace(0,1,50), label = 'Bootstrap Prop. Red')
ax4.set_title('Confidence Interval in Proportion of Red and Green Balls (Alpha = ' + str(alpha) + ')'); ax3.set_xlabel('Proportion of Green Balls')
ax4.set_xlabel('Proportion of Red and Green Balls'); ax4.set_ylabel('Frequency')
ax4.set_xlim([0,1])
prop_values = np.linspace(0.0,1.0,100)
if a <= 30 and b <= 30:
green_density = t.pdf(prop_values,loc = green_prop, df = dof, scale = green_SE)
else:
green_density = norm.pdf(prop_values,loc = green_prop, scale = green_SE)
ax4.plot(prop_values, green_density, color = 'black', linewidth = 5,zorder=99)
ax4.plot(prop_values, green_density, color = 'green', linewidth = 3, label = 'Analytical Prop. Green',zorder=100)
if a <= 30 and b <= 30:
red_density = t.pdf(prop_values,loc = red_prop, df = dof, scale = red_SE)
else:
red_density = norm.pdf(prop_values,loc = red_prop, scale = red_SE)
ax4.plot(prop_values, red_density, color = 'black', linewidth = 5,zorder=99)
ax4.plot(prop_values, red_density, color = 'red', linewidth = 3, label = 'Analytical Prop. Red',zorder=100)
ax4.fill_between(prop_values, 0, green_density, where = prop_values <= green_lower, facecolor='green', interpolate=True, alpha = 0.9,zorder=101)
ax4.fill_between(prop_values, 0, green_density, where = prop_values >= green_upper, facecolor='green', interpolate=True, alpha = 0.9,zorder=101)
ax4.fill_between(prop_values, 0, red_density, where = prop_values <= red_lower, facecolor='darkred', interpolate=True, alpha = 0.9,zorder=101)
ax4.fill_between(prop_values, 0, red_density, where = prop_values >= red_upper, facecolor='darkred', interpolate=True, alpha = 0.9,zorder=101)
ax4.legend()
plt.subplots_adjust(left=0.0, bottom=0.0, right=2.5, top=3.0, wspace=0.2, hspace=0.3)
plt.show()
# connect the function to make the samples and plot to the widgets
interactive_plot = widgets.interactive_output(s_make, {'a': a2, 'b': b2, 'c': c2, 'alpha': alpha})
interactive_plot.clear_output(wait = True) # reduce flickering by delaying plot updating
```
### Simple Bootstrap and Analytical Confidence Interval Demonstration for Sample Proportions
* drawing red and green balls from a hat with replacement to access uncertainty in the proportion
* run many bootstrap realizations and summarize the results and compare to the analytical sampling distribution for the proportion
* interactive plot demonstration with ipywidget, matplotlib packages
#### Michael Pyrcz, Associate Professor, The University of Texas at Austin
##### [Twitter](https://twitter.com/geostatsguy) | [GitHub](https://github.com/GeostatsGuy) | [Website](http://michaelpyrcz.com) | [GoogleScholar](https://scholar.google.com/citations?user=QVZ20eQAAAAJ&hl=en&oi=ao) | [Book](https://www.amazon.com/Geostatistical-Reservoir-Modeling-Michael-Pyrcz/dp/0199731446) | [YouTube](https://www.youtube.com/channel/UCLqEr-xV-ceHdXXXrTId5ig) | [LinkedIn](https://www.linkedin.com/in/michael-pyrcz-61a648a1) | [GeostatsPy](https://github.com/GeostatsGuy/GeostatsPy)
### The Problem
Let's simulate bootstrap, resampling with replacement from a hat with $n_{red}$ and $n_{green}$ balls
* **$n_{red}$**: number of red balls in the sample (placed in the hat)
* **$n_{green}$**: number of green balls in the sample (placed in the hat)
* **$L$**: number of bootstrap realizations
* **$\alpha$**: alpha level for the confidence interval (significance level)
and then compare the uncertainty in the proportion of balls to the analytical expression.
### Confidence Interval Demonstration for Sample Proportions, Analytical and Bootstrap
#### Michael Pyrcz, Associate Professor, University of Texas at Austin
##### [Twitter](https://twitter.com/geostatsguy) | [GitHub](https://github.com/GeostatsGuy) | [Website](http://michaelpyrcz.com) | [GoogleScholar](https://scholar.google.com/citations?user=QVZ20eQAAAAJ&hl=en&oi=ao) | [Book](https://www.amazon.com/Geostatistical-Reservoir-Modeling-Michael-Pyrcz/dp/0199731446) | [YouTube](https://www.youtube.com/channel/UCLqEr-xV-ceHdXXXrTId5ig) | [LinkedIn](https://www.linkedin.com/in/michael-pyrcz-61a648a1) | [GeostatsPy](https://github.com/GeostatsGuy/GeostatsPy)
```python
display(uib2, interactive_plot) # display the interactive plot
```
VBox(children=(Text(value=' Confidence Interval for Proportions, Analytical and Bootstrap Demonstration, M…
Output()
#### Observations
Some observations:
* sampling distribution for proportions become discrete with too few samples, as only $n$ cases are possible
* enough bootstrap realizations are required for stable statistics
* the analytical sampling distribution for the uncertainty in the sample proportion matches the results from bootstrap
#### Comments
This was a simple demonstration of interactive plots in Jupyter Notebook Python with the ipywidgets and matplotlib packages.
I have many other demonstrations on data analytics and machine learning, e.g. on the basics of working with DataFrames, ndarrays, univariate statistics, plotting data, declustering, data transformations, trend modeling and many other workflows available at https://github.com/GeostatsGuy/PythonNumericalDemos and https://github.com/GeostatsGuy/GeostatsPy.
I hope this was helpful,
*Michael*
#### The Author:
### Michael Pyrcz, Associate Professor, University of Texas at Austin
*Novel Data Analytics, Geostatistics and Machine Learning Subsurface Solutions*
With over 17 years of experience in subsurface consulting, research and development, Michael has returned to academia driven by his passion for teaching and enthusiasm for enhancing engineers' and geoscientists' impact in subsurface resource development.
For more about Michael check out these links:
#### [Twitter](https://twitter.com/geostatsguy) | [GitHub](https://github.com/GeostatsGuy) | [Website](http://michaelpyrcz.com) | [GoogleScholar](https://scholar.google.com/citations?user=QVZ20eQAAAAJ&hl=en&oi=ao) | [Book](https://www.amazon.com/Geostatistical-Reservoir-Modeling-Michael-Pyrcz/dp/0199731446) | [YouTube](https://www.youtube.com/channel/UCLqEr-xV-ceHdXXXrTId5ig) | [LinkedIn](https://www.linkedin.com/in/michael-pyrcz-61a648a1)
#### Want to Work Together?
I hope this content is helpful to those that want to learn more about subsurface modeling, data analytics and machine learning. Students and working professionals are welcome to participate.
* Want to invite me to visit your company for training, mentoring, project review, workflow design and / or consulting? I'd be happy to drop by and work with you!
* Interested in partnering, supporting my graduate student research or my Subsurface Data Analytics and Machine Learning consortium (co-PIs including Profs. Foster, Torres-Verdin and van Oort)? My research combines data analytics, stochastic modeling and machine learning theory with practice to develop novel methods and workflows to add value. We are solving challenging subsurface problems!
* I can be reached at [email protected].
I'm always happy to discuss,
*Michael*
Michael Pyrcz, Ph.D., P.Eng. Associate Professor The Hildebrand Department of Petroleum and Geosystems Engineering, Bureau of Economic Geology, The Jackson School of Geosciences, The University of Texas at Austin
#### More Resources Available at: [Twitter](https://twitter.com/geostatsguy) | [GitHub](https://github.com/GeostatsGuy) | [Website](http://michaelpyrcz.com) | [GoogleScholar](https://scholar.google.com/citations?user=QVZ20eQAAAAJ&hl=en&oi=ao) | [Book](https://www.amazon.com/Geostatistical-Reservoir-Modeling-Michael-Pyrcz/dp/0199731446) | [YouTube](https://www.youtube.com/channel/UCLqEr-xV-ceHdXXXrTId5ig) | [LinkedIn](https://www.linkedin.com/in/michael-pyrcz-61a648a1)
|
/-
Copyright (c) 2020 Fox Thomson. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Fox Thomson
-/
import set_theory.game.winner
import tactic.nth_rewrite.default
import tactic.equiv_rw
/-!
# Basic definitions about impartial (pre-)games
We will define an impartial game, one in which left and right can make exactly the same moves.
Our definition differs slightly by saying that the game is always equivalent to its negative,
no matter what moves are played. This allows for games such as poker-nim to be classifed as
impartial.
-/
universe u
namespace pgame
local infix ` ≈ ` := equiv
/-- The definition for a impartial game, defined using Conway induction -/
def impartial_aux : pgame → Prop
| G := G ≈ -G ∧ (∀ i, impartial_aux (G.move_left i)) ∧ (∀ j, impartial_aux (G.move_right j))
using_well_founded { dec_tac := pgame_wf_tac }
lemma impartial_aux_def {G : pgame} : G.impartial_aux ↔ G ≈ -G ∧
(∀ i, impartial_aux (G.move_left i)) ∧ (∀ j, impartial_aux (G.move_right j)) :=
begin
split,
{ intro hi,
unfold1 impartial_aux at hi,
exact hi },
{ intro hi,
unfold1 impartial_aux,
exact hi }
end
/-- A typeclass on impartial games. -/
class impartial (G : pgame) : Prop := (out : impartial_aux G)
lemma impartial_iff_aux {G : pgame} : G.impartial ↔ G.impartial_aux :=
⟨λ h, h.1, λ h, ⟨h⟩⟩
lemma impartial_def {G : pgame} : G.impartial ↔ G ≈ -G ∧
(∀ i, impartial (G.move_left i)) ∧ (∀ j, impartial (G.move_right j)) :=
by simpa only [impartial_iff_aux] using impartial_aux_def
namespace impartial
instance impartial_zero : impartial 0 :=
by { rw impartial_def, dsimp, simp }
lemma neg_equiv_self (G : pgame) [h : G.impartial] : G ≈ -G := (impartial_def.1 h).1
instance move_left_impartial {G : pgame} [h : G.impartial] (i : G.left_moves) :
(G.move_left i).impartial :=
(impartial_def.1 h).2.1 i
instance move_right_impartial {G : pgame} [h : G.impartial] (j : G.right_moves) :
(G.move_right j).impartial :=
(impartial_def.1 h).2.2 j
instance impartial_add : ∀ (G H : pgame) [G.impartial] [H.impartial], (G + H).impartial
| G H :=
begin
introsI hG hH,
rw impartial_def,
split,
{ apply equiv_trans _ (neg_add_relabelling G H).equiv.symm,
exact add_congr (neg_equiv_self _) (neg_equiv_self _) },
split,
all_goals
{ intro i,
equiv_rw pgame.left_moves_add G H at i <|> equiv_rw pgame.right_moves_add G H at i,
cases i },
all_goals
{ simp only [add_move_left_inl, add_move_right_inl, add_move_left_inr, add_move_right_inr],
exact impartial_add _ _ }
end
using_well_founded { dec_tac := pgame_wf_tac }
instance impartial_neg : ∀ (G : pgame) [G.impartial], (-G).impartial
| G :=
begin
introI hG,
rw impartial_def,
split,
{ rw neg_neg,
symmetry,
exact neg_equiv_self G },
split,
all_goals
{ intro i,
equiv_rw G.left_moves_neg at i <|> equiv_rw G.right_moves_neg at i,
simp only [move_left_left_moves_neg_symm, move_right_right_moves_neg_symm],
exact impartial_neg _ }
end
using_well_founded { dec_tac := pgame_wf_tac }
lemma winner_cases (G : pgame) [G.impartial] : G.first_loses ∨ G.first_wins :=
begin
rcases G.winner_cases with hl | hr | hp | hn,
{ cases hl with hpos hnonneg,
rw ←not_lt at hnonneg,
have hneg := lt_of_lt_of_equiv hpos (neg_equiv_self G),
rw [lt_iff_neg_gt, neg_neg, neg_zero] at hneg,
contradiction },
{ cases hr with hnonpos hneg,
rw ←not_lt at hnonpos,
have hpos := lt_of_equiv_of_lt (neg_equiv_self G).symm hneg,
rw [lt_iff_neg_gt, neg_neg, neg_zero] at hpos,
contradiction },
{ left, assumption },
{ right, assumption }
end
lemma not_first_wins (G : pgame) [G.impartial] : ¬G.first_wins ↔ G.first_loses :=
by cases winner_cases G; finish using [not_first_loses_of_first_wins]
lemma not_first_loses (G : pgame) [G.impartial] : ¬G.first_loses ↔ G.first_wins :=
iff.symm $ iff_not_comm.1 $ iff.symm $ not_first_wins G
lemma add_self (G : pgame) [G.impartial] : (G + G).first_loses :=
first_loses_is_zero.2 $ equiv_trans (add_congr (neg_equiv_self G) G.equiv_refl)
add_left_neg_equiv
lemma equiv_iff_sum_first_loses (G H : pgame) [G.impartial] [H.impartial] :
G ≈ H ↔ (G + H).first_loses :=
begin
split,
{ intro heq,
exact first_loses_of_equiv (add_congr (equiv_refl _) heq) (add_self G) },
{ intro hGHp,
split,
{ rw le_iff_sub_nonneg,
exact le_trans hGHp.2
(le_trans add_comm_le $ le_of_le_of_equiv (le_refl _) $ add_congr (equiv_refl _)
(neg_equiv_self G)) },
{ rw le_iff_sub_nonneg,
exact le_trans hGHp.2
(le_of_le_of_equiv (le_refl _) $ add_congr (equiv_refl _) (neg_equiv_self H)) } }
end
lemma le_zero_iff {G : pgame} [G.impartial] : G ≤ 0 ↔ 0 ≤ G :=
by rw [le_zero_iff_zero_le_neg, le_congr (equiv_refl 0) (neg_equiv_self G)]
lemma lt_zero_iff {G : pgame} [G.impartial] : G < 0 ↔ 0 < G :=
by rw [lt_iff_neg_gt, neg_zero, lt_congr (equiv_refl 0) (neg_equiv_self G)]
lemma first_loses_symm (G : pgame) [G.impartial] : G.first_loses ↔ G ≤ 0 :=
⟨and.left, λ h, ⟨h, le_zero_iff.1 h⟩⟩
lemma first_wins_symm (G : pgame) [G.impartial] : G.first_wins ↔ G < 0 :=
⟨and.right, λ h, ⟨lt_zero_iff.1 h, h⟩⟩
lemma first_loses_symm' (G : pgame) [G.impartial] : G.first_loses ↔ 0 ≤ G :=
⟨and.right, λ h, ⟨le_zero_iff.2 h, h⟩⟩
lemma first_wins_symm' (G : pgame) [G.impartial] : G.first_wins ↔ 0 < G :=
⟨and.left, λ h, ⟨h, lt_zero_iff.2 h⟩⟩
lemma no_good_left_moves_iff_first_loses (G : pgame) [G.impartial] :
(∀ (i : G.left_moves), (G.move_left i).first_wins) ↔ G.first_loses :=
begin
split,
{ intro hbad,
rw [first_loses_symm G, le_def_lt],
split,
{ intro i,
specialize hbad i,
exact hbad.2 },
{ intro j,
exact pempty.elim j } },
{ intros hp i,
rw first_wins_symm,
exact (le_def_lt.1 $ (first_loses_symm G).1 hp).1 i }
end
lemma no_good_right_moves_iff_first_loses (G : pgame) [G.impartial] :
(∀ (j : G.right_moves), (G.move_right j).first_wins) ↔ G.first_loses :=
begin
rw [first_loses_of_equiv_iff (neg_equiv_self G), ←no_good_left_moves_iff_first_loses],
refine ⟨λ h i, _, λ h i, _⟩,
{ simpa [first_wins_of_equiv_iff (neg_equiv_self ((-G).move_left i))]
using h (left_moves_neg _ i) },
{ simpa [first_wins_of_equiv_iff (neg_equiv_self (G.move_right i))]
using h ((left_moves_neg _).symm i) }
end
lemma good_left_move_iff_first_wins (G : pgame) [G.impartial] :
(∃ (i : G.left_moves), (G.move_left i).first_loses) ↔ G.first_wins :=
begin
refine ⟨λ ⟨i, hi⟩, (first_wins_symm' G).2 (lt_def_le.2 $ or.inl ⟨i, hi.2⟩), λ hn, _⟩,
rw [first_wins_symm' G, lt_def_le] at hn,
rcases hn with ⟨i, hi⟩ | ⟨j, _⟩,
{ exact ⟨i, (first_loses_symm' _).2 hi⟩ },
{ exact pempty.elim j }
end
lemma good_right_move_iff_first_wins (G : pgame) [G.impartial] :
(∃ j : G.right_moves, (G.move_right j).first_loses) ↔ G.first_wins :=
begin
refine ⟨λ ⟨j, hj⟩, (first_wins_symm G).2 (lt_def_le.2 $ or.inr ⟨j, hj.1⟩), λ hn, _⟩,
rw [first_wins_symm G, lt_def_le] at hn,
rcases hn with ⟨i, _⟩ | ⟨j, hj⟩,
{ exact pempty.elim i },
{ exact ⟨j, (first_loses_symm _).2 hj⟩ }
end
end impartial
end pgame
|
module TyTTP.HTTP.Consumer
import Control.Monad.Trans
import Control.Monad.Either
import Data.Buffer
import Data.List
import Data.List.Quantifiers
import TyTTP
import TyTTP.HTTP
import TyTTP.HTTP.Combinators
public export
interface Accept t where
contentType : (ty : Type) -> { auto p : ty = t } -> List String
public export
data IsAccept : (t : Type) -> Type where
ItIsAccept : Accept t => IsAccept t
public export
ConsumerError : Type
ConsumerError = String
public export
interface Accept t => Consumer a t where
consumeRaw : (ty : Type) -> { auto p : ty = t } -> (ct : String) -> (raw : Buffer) -> Either ConsumerError a
public export
data IsConsumer : (a : Type) -> (t : Type) -> Type where
ItIsConsumer : Consumer a t => IsConsumer a t
consumePayload :
(t : Type)
-> (isConsumer : IsConsumer a t)
-> (ct : String)
-> (raw : Buffer)
-> Either ConsumerError a
consumePayload t ItIsConsumer ct raw =
consumeRaw t ct raw
safeConsume :
Error e
=> MonadTrans m
=> Alternative (m (Promise e IO))
=> HasContentType h1
=> (list: List Type)
-> (areAccepts : All IsAccept list)
-> (areConsumers : All (IsConsumer a) list)
-> (ct : String)
-> (
Step Method u h1 s h2 (Either ConsumerError a) b
-> Promise e IO $ Step Method u' h1' s' h2' a' b'
)
-> Step Method u h1 s h2 (Publisher IO e Buffer) b
-> m (Promise e IO) $
Step Method u' h1' s' h2' (Publisher IO e Buffer) b'
safeConsume [] _ _ _ _ _ = empty
safeConsume (t::ts) (ItIsAccept::as) (c::cs) ct handler step =
if elem ct (contentType t)
then lift $ flip unsafeConsumeBody step $ \s => MkPromise $ \cb => do
let raw = s.request.body
result = handler $ { request.body := consumePayload t c ct raw } s
result.continuation $ MkCallbacks
{ onSucceded = \r => cb.onSucceded $ { request.body := singleton raw } r
, onFailed = \err => cb.onFailed err }
else safeConsume ts as cs ct handler step
export
consumes :
Error e
=> MonadTrans m
=> Alternative (m (Promise e IO))
=> HasContentType h1
=> (list: List Type)
-> {auto isNonEmpty : NonEmpty list}
-> {auto areAccepts : All IsAccept list}
-> {auto areConsumers : All (IsConsumer a) list}
-> (
Step Method u h1 s h2 (Either ConsumerError a) b
-> Promise e IO $ Step Method u' h1' s' h2' a' b'
)
-> Step Method u h1 s h2 (Publisher IO e Buffer) b
-> m (Promise e IO) $
Step Method u' h1' s' h2' (Publisher IO e Buffer) b'
consumes list {isNonEmpty} {areAccepts} {areConsumers} handler step = do
let Just ct = getContentType step.request.headers
| _ => empty
safeConsume list areAccepts areConsumers ct handler step
export
consumes' :
Error e
=> MonadTrans m
=> Alternative (m (Promise e IO))
=> HasContentType h1
=> (list: List Type)
-> {auto isNonEmpty : NonEmpty list}
-> {auto areAccepts : All IsAccept list}
-> {auto areConsumers : All (IsConsumer a) list}
-> (
Step Method u h1 s h2 ConsumerError b
-> Promise e IO $ Step Method u' h1' s' h2' a' b'
)
-> (
Step Method u h1 s h2 a b
-> Promise e IO $ Step Method u' h1' s' h2' a'' b'
)
-> Step Method u h1 s h2 (Publisher IO e Buffer) b
-> m (Promise e IO) $
Step Method u' h1' s' h2' (Publisher IO e Buffer) b'
consumes' list {isNonEmpty} {areAccepts} {areConsumers} errHandler handler step =
let handler' :
Step Method u h1 s h2 (Either ConsumerError a) b
-> Promise e IO $ Step Method u' h1' s' h2' () b'
handler' s =
case s.request.body of
Right r => do
result <- handler $ { request.body := r } s
pure $ { request.body := () } result
Left l => do
result <- errHandler $ { request.body := l } s
pure $ { request.body := () } result
in consumes list handler' step
|
State Before: m : Type u_1 → Type u_2
α : Type u_1
p : α → Prop
inst✝¹ : Applicative m
inst✝ : LawfulApplicative m
x : m α
h : ∀ (a : α), p a
⊢ Subtype.val <$> (fun a => { val := a, property := (_ : p a) }) <$> x = x State After: no goals Tactic: simp [← comp_map, Function.comp] |
{-# OPTIONS --rewriting #-}
module Properties where
import Properties.Contradiction
import Properties.Dec
import Properties.Equality
import Properties.Functions
import Properties.Remember
import Properties.Step
import Properties.StrictMode
import Properties.Subtyping
import Properties.TypeCheck
|
function this = set_from_affine_geometry(this, affineTransformation, nVoxels, TR_s)
% Sets dimInfo from affine affineTransformation, assuming 4D nifti data
%
% Y = MrDimInfo()
% Y.set_from_affine_geometry(affineTransformation, nVoxels, TR_s)
%
% This is a method of class MrDimInfo.
%
% IN
%
% OUT
%
% EXAMPLE
% set_from_affine_geometry
%
% See also MrDimInfo
% Author: Lars Kasper
% Created: 2017-11-07
% Copyright (C) 2017 Institute for Biomedical Engineering
% University of Zurich and ETH Zurich
%
% This file is part of the TAPAS UniQC Toolbox, which is released
% under the terms of the GNU General Public License (GPL), version 3.
% You can redistribute it and/or modify it under the terms of the GPL
% (either version 3 or, at your option, any later version).
% For further details, see the file COPYING or
% <http://www.gnu.org/licenses/>.
if nargin < 3
nVoxels = [1 1 1 1];
end
if nargin < 4
TR_s = 1;
end
% or add them...
dimLabelsGeom = {'x','y','z', 't'};
units = {'mm', 'mm', 'mm', 's'};
iDimGeom = 1:4;
% update existing geom dimensions, add new ones for
% non-existing
iValidDimLabels = this.get_dim_index(dimLabelsGeom);
iDimGeomExisting = find(iValidDimLabels);
iDimGeomAdd = setdiff(iDimGeom, iDimGeomExisting);
% need nifti to reference first sampling point as offcenter
resolutions = [affineTransformation.scaling TR_s];
% voxel position by voxel center, time starts at 0
firstSamplingPoint = [affineTransformation.scaling 0]/2;
% if dimension labels exist, just update values
this.set_dims(dimLabelsGeom(iDimGeomExisting), ...
'resolutions', resolutions(iDimGeomExisting), ...
'nSamples', nVoxels(iDimGeomExisting), ...
'firstSamplingPoint', firstSamplingPoint(iDimGeomExisting), ...
'units', units(iDimGeomExisting));
% if they do not exist, create dims
this.add_dims(dimLabelsGeom(iDimGeomAdd), ...
'resolutions', resolutions(iDimGeomAdd), ...
'nSamples', nVoxels(iDimGeomAdd), ...
'firstSamplingPoint', firstSamplingPoint(iDimGeomAdd), ...
'units', units(iDimGeomAdd)); |
[GOAL]
α : Type u_1
s✝ t s : Set α
⊢ encard univ = encard s
[PROOFSTEP]
rw [encard, encard, PartENat.card_congr (Equiv.Set.univ ↑s)]
[GOAL]
α✝ : Type ?u.625
s t : Set α✝
α : Type u_1
⊢ encard univ = ↑PartENat.withTopEquiv (PartENat.card α)
[PROOFSTEP]
rw [encard, PartENat.card_congr (Equiv.Set.univ α)]
[GOAL]
α : Type u_1
s t : Set α
h : Set.Finite s
⊢ encard s = ↑(Finset.card (Finite.toFinset h))
[PROOFSTEP]
have := h.fintype
[GOAL]
α : Type u_1
s t : Set α
h : Set.Finite s
this : Fintype ↑s
⊢ encard s = ↑(Finset.card (Finite.toFinset h))
[PROOFSTEP]
rw [encard, PartENat.card_eq_coe_fintype_card, PartENat.withTopEquiv_natCast, toFinite_toFinset, toFinset_card]
[GOAL]
α : Type u_1
s✝ t s : Set α
inst✝ : Fintype ↑s
⊢ encard s = ↑(Finset.card (toFinset s))
[PROOFSTEP]
have h := toFinite s
[GOAL]
α : Type u_1
s✝ t s : Set α
inst✝ : Fintype ↑s
h : Set.Finite s
⊢ encard s = ↑(Finset.card (toFinset s))
[PROOFSTEP]
rw [h.encard_eq_coe_toFinset_card, toFinite_toFinset, toFinset_card]
[GOAL]
α : Type u_1
s✝ t : Set α
s : Finset α
⊢ encard ↑s = ↑(Finset.card s)
[PROOFSTEP]
rw [Finite.encard_eq_coe_toFinset_card (Finset.finite_toSet s)]
[GOAL]
α : Type u_1
s✝ t : Set α
s : Finset α
⊢ ↑(Finset.card (Finite.toFinset (_ : Set.Finite ↑s))) = ↑(Finset.card s)
[PROOFSTEP]
simp
[GOAL]
α : Type u_1
s✝ t s : Set α
h : Set.Infinite s
⊢ encard s = ⊤
[PROOFSTEP]
have := h.to_subtype
[GOAL]
α : Type u_1
s✝ t s : Set α
h : Set.Infinite s
this : Infinite ↑s
⊢ encard s = ⊤
[PROOFSTEP]
rw [encard, ← PartENat.withTopEquiv.symm.injective.eq_iff, Equiv.symm_apply_apply, PartENat.withTopEquiv_symm_top,
PartENat.card_eq_top_of_infinite]
[GOAL]
α : Type u_1
s t : Set α
⊢ encard s = 0 ↔ s = ∅
[PROOFSTEP]
rw [encard, ← PartENat.withTopEquiv.symm.injective.eq_iff, Equiv.symm_apply_apply, PartENat.withTopEquiv_symm_zero,
PartENat.card_eq_zero_iff_empty, isEmpty_subtype, eq_empty_iff_forall_not_mem]
[GOAL]
α : Type u_1
s t : Set α
⊢ encard ∅ = 0
[PROOFSTEP]
rw [encard_eq_zero]
[GOAL]
α : Type u_1
s t : Set α
h : encard s ≠ 0
⊢ Set.Nonempty s
[PROOFSTEP]
rwa [nonempty_iff_ne_empty, Ne.def, ← encard_eq_zero]
[GOAL]
α : Type u_1
s t : Set α
⊢ encard s ≠ 0 ↔ Set.Nonempty s
[PROOFSTEP]
rw [ne_eq, encard_eq_zero, nonempty_iff_ne_empty]
[GOAL]
α : Type u_1
s t : Set α
⊢ 0 < encard s ↔ Set.Nonempty s
[PROOFSTEP]
rw [pos_iff_ne_zero, encard_ne_zero]
[GOAL]
α : Type u_1
s t : Set α
e : α
⊢ encard {e} = 1
[PROOFSTEP]
rw [encard, ← PartENat.withTopEquiv.symm.injective.eq_iff, Equiv.symm_apply_apply, PartENat.card_eq_coe_fintype_card,
Fintype.card_ofSubsingleton, Nat.cast_one]
[GOAL]
α : Type u_1
s t : Set α
e : α
⊢ 1 = ↑PartENat.withTopEquiv.symm 1
[PROOFSTEP]
rfl
[GOAL]
α : Type u_1
s t : Set α
h : Disjoint s t
⊢ encard (s ∪ t) = encard s + encard t
[PROOFSTEP]
classical
have e := (Equiv.Set.union (by rwa [subset_empty_iff, ← disjoint_iff_inter_eq_empty])).symm
simp [encard, ← PartENat.card_congr e, PartENat.card_sum, PartENat.withTopEquiv]
[GOAL]
α : Type u_1
s t : Set α
h : Disjoint s t
⊢ encard (s ∪ t) = encard s + encard t
[PROOFSTEP]
have e := (Equiv.Set.union (by rwa [subset_empty_iff, ← disjoint_iff_inter_eq_empty])).symm
[GOAL]
α : Type u_1
s t : Set α
h : Disjoint s t
⊢ ?m.6436 ∩ ?m.6437 ⊆ ∅
[PROOFSTEP]
rwa [subset_empty_iff, ← disjoint_iff_inter_eq_empty]
[GOAL]
α : Type u_1
s t : Set α
h : Disjoint s t
e : ↑s ⊕ ↑t ≃ ↑(s ∪ t)
⊢ encard (s ∪ t) = encard s + encard t
[PROOFSTEP]
simp [encard, ← PartENat.card_congr e, PartENat.card_sum, PartENat.withTopEquiv]
[GOAL]
α : Type u_1
s t : Set α
a : α
has : ¬a ∈ s
⊢ encard (insert a s) = encard s + 1
[PROOFSTEP]
rw [← union_singleton, encard_union_eq (by simpa), encard_singleton]
[GOAL]
α : Type u_1
s t : Set α
a : α
has : ¬a ∈ s
⊢ Disjoint s {a}
[PROOFSTEP]
simpa
[GOAL]
α : Type u_1
s t : Set α
h : Set.Finite s
⊢ encard s < ⊤
[PROOFSTEP]
refine' h.induction_on (by simpa using WithTop.zero_lt_top) _
[GOAL]
α : Type u_1
s t : Set α
h : Set.Finite s
⊢ encard ∅ < ⊤
[PROOFSTEP]
simpa using WithTop.zero_lt_top
[GOAL]
α : Type u_1
s t : Set α
h : Set.Finite s
⊢ ∀ {a : α} {s : Set α}, ¬a ∈ s → Set.Finite s → encard s < ⊤ → encard (insert a s) < ⊤
[PROOFSTEP]
rintro a t hat _ ht'
[GOAL]
α : Type u_1
s t✝ : Set α
h : Set.Finite s
a : α
t : Set α
hat : ¬a ∈ t
a✝ : Set.Finite t
ht' : encard t < ⊤
⊢ encard (insert a t) < ⊤
[PROOFSTEP]
rw [encard_insert_of_not_mem hat]
[GOAL]
α : Type u_1
s t✝ : Set α
h : Set.Finite s
a : α
t : Set α
hat : ¬a ∈ t
a✝ : Set.Finite t
ht' : encard t < ⊤
⊢ encard t + 1 < ⊤
[PROOFSTEP]
exact lt_tsub_iff_right.1 ht'
[GOAL]
α : Type u_1
s t : Set α
⊢ encard s = ⊤ ↔ Set.Infinite s
[PROOFSTEP]
rw [← not_iff_not, ← Ne.def, ← lt_top_iff_ne_top, encard_lt_top_iff, not_infinite]
[GOAL]
α : Type u_1
s t : Set α
⊢ encard s ≠ ⊤ ↔ Set.Finite s
[PROOFSTEP]
simp
[GOAL]
α : Type u_1
s t : Set α
k : ℕ
h : encard s ≤ ↑k
⊢ Set.Finite s
[PROOFSTEP]
rw [← encard_lt_top_iff]
[GOAL]
α : Type u_1
s t : Set α
k : ℕ
h : encard s ≤ ↑k
⊢ encard s < ⊤
[PROOFSTEP]
exact h.trans_lt (WithTop.coe_lt_top _)
[GOAL]
α : Type u_1
s t : Set α
k : ℕ
h : encard s ≤ ↑k
⊢ ∃ n₀, encard s = ↑n₀ ∧ n₀ ≤ k
[PROOFSTEP]
rwa [ENat.le_coe_iff] at h
[GOAL]
α : Type u_1
s t : Set α
k : ℕ
x✝ : Set.Finite s ∧ ∃ n₀, encard s = ↑n₀ ∧ n₀ ≤ k
left✝ : Set.Finite s
n₀ : ℕ
hs : encard s = ↑n₀
hle : n₀ ≤ k
⊢ encard s ≤ ↑k
[PROOFSTEP]
rwa [hs, Nat.cast_le]
[GOAL]
α : Type u_1
s t : Set α
h : s ⊆ t
⊢ encard s ≤ encard t
[PROOFSTEP]
rw [← union_diff_cancel h, encard_union_eq disjoint_sdiff_right]
[GOAL]
α : Type u_1
s t : Set α
h : s ⊆ t
⊢ encard s ≤ encard s + encard (t \ s)
[PROOFSTEP]
exact le_self_add
[GOAL]
α : Type u_1
s t : Set α
h : s ⊆ t
⊢ encard (t \ s) + encard s = encard t
[PROOFSTEP]
rw [← encard_union_eq disjoint_sdiff_left, diff_union_self, union_eq_self_of_subset_right h]
[GOAL]
α : Type u_1
s t : Set α
⊢ 1 ≤ encard s ↔ Set.Nonempty s
[PROOFSTEP]
rw [nonempty_iff_ne_empty, Ne.def, ← encard_eq_zero, ENat.one_le_iff_ne_zero]
[GOAL]
α : Type u_1
s✝ t✝ s t : Set α
⊢ encard (s \ t) + encard (s ∩ t) = encard s
[PROOFSTEP]
rw [← encard_union_eq (disjoint_of_subset_right (inter_subset_right _ _) disjoint_sdiff_left), diff_union_inter]
[GOAL]
α : Type u_1
s✝ t✝ s t : Set α
⊢ encard (s ∪ t) + encard (s ∩ t) = encard s + encard t
[PROOFSTEP]
rw [← diff_union_self, encard_union_eq disjoint_sdiff_left, add_right_comm, encard_diff_add_encard_inter]
[GOAL]
α : Type u_1
s t : Set α
h : Set.Finite (s ∩ t)
⊢ encard s = encard t ↔ encard (s \ t) = encard (t \ s)
[PROOFSTEP]
rw [← encard_diff_add_encard_inter s t, ← encard_diff_add_encard_inter t s, inter_comm t s,
WithTop.add_right_cancel_iff h.encard_lt_top.ne]
[GOAL]
α : Type u_1
s t : Set α
h : Set.Finite (s ∩ t)
⊢ encard s ≤ encard t ↔ encard (s \ t) ≤ encard (t \ s)
[PROOFSTEP]
rw [← encard_diff_add_encard_inter s t, ← encard_diff_add_encard_inter t s, inter_comm t s,
WithTop.add_le_add_iff_right h.encard_lt_top.ne]
[GOAL]
α : Type u_1
s t : Set α
h : Set.Finite (s ∩ t)
⊢ encard s < encard t ↔ encard (s \ t) < encard (t \ s)
[PROOFSTEP]
rw [← encard_diff_add_encard_inter s t, ← encard_diff_add_encard_inter t s, inter_comm t s,
WithTop.add_lt_add_iff_right h.encard_lt_top.ne]
[GOAL]
α : Type u_1
s✝ t✝ s t : Set α
⊢ encard (s ∪ t) ≤ encard s + encard t
[PROOFSTEP]
rw [← encard_union_add_encard_inter]
[GOAL]
α : Type u_1
s✝ t✝ s t : Set α
⊢ encard (s ∪ t) ≤ encard (s ∪ t) + encard (s ∩ t)
[PROOFSTEP]
exact le_self_add
[GOAL]
α : Type u_1
s t : Set α
h : encard s = encard t
⊢ Set.Finite s ↔ Set.Finite t
[PROOFSTEP]
rw [← encard_lt_top_iff, ← encard_lt_top_iff, h]
[GOAL]
α : Type u_1
s t : Set α
h : encard s = encard t
⊢ Set.Infinite s ↔ Set.Infinite t
[PROOFSTEP]
rw [← encard_eq_top_iff, h, encard_eq_top_iff]
[GOAL]
α : Type u_1
s t : Set α
ht : Set.Finite t
hst : s ⊆ t
hts : encard t ≤ encard s
⊢ s = t
[PROOFSTEP]
rw [← zero_add (a := encard s), ← encard_diff_add_encard_of_subset hst] at hts
[GOAL]
α : Type u_1
s t : Set α
ht : Set.Finite t
hst : s ⊆ t
hts : encard (t \ s) + encard s ≤ 0 + encard s
⊢ s = t
[PROOFSTEP]
have hdiff := WithTop.le_of_add_le_add_right (ht.subset hst).encard_lt_top.ne hts
[GOAL]
α : Type u_1
s t : Set α
ht : Set.Finite t
hst : s ⊆ t
hts : encard (t \ s) + encard s ≤ 0 + encard s
hdiff : encard (t \ s) ≤ 0
⊢ s = t
[PROOFSTEP]
rw [nonpos_iff_eq_zero, encard_eq_zero, diff_eq_empty] at hdiff
[GOAL]
α : Type u_1
s t : Set α
ht : Set.Finite t
hst : s ⊆ t
hts : encard (t \ s) + encard s ≤ 0 + encard s
hdiff : t ⊆ s
⊢ s = t
[PROOFSTEP]
exact hst.antisymm hdiff
[GOAL]
α : Type u_1
s✝ t✝ s t : Set α
⊢ encard (s \ t) + encard t = encard (s ∪ t)
[PROOFSTEP]
rw [← encard_union_eq disjoint_sdiff_left, diff_union_self]
[GOAL]
α : Type u_1
s✝ t✝ s t : Set α
⊢ encard s - encard t ≤ encard (s \ t)
[PROOFSTEP]
rw [tsub_le_iff_left, add_comm]
[GOAL]
α : Type u_1
s✝ t✝ s t : Set α
⊢ encard s ≤ encard (s \ t) + encard t
[PROOFSTEP]
apply encard_le_encard_diff_add_encard
[GOAL]
α : Type u_1
s✝ t s : Set α
⊢ encard s + encard sᶜ = encard univ
[PROOFSTEP]
rw [← encard_union_eq disjoint_compl_right, union_compl_self]
[GOAL]
α : Type u_1
s✝ t s : Set α
x : α
⊢ encard (insert x s) ≤ encard s + 1
[PROOFSTEP]
rw [← union_singleton, ← encard_singleton x]
[GOAL]
α : Type u_1
s✝ t s : Set α
x : α
⊢ encard (s ∪ {x}) ≤ encard s + encard {x}
[PROOFSTEP]
apply encard_union_le
[GOAL]
α : Type u_1
s✝ t s : Set α
x : α
⊢ encard ({x} ∩ s) ≤ 1
[PROOFSTEP]
rw [← encard_singleton x]
[GOAL]
α : Type u_1
s✝ t s : Set α
x : α
⊢ encard ({x} ∩ s) ≤ encard {x}
[PROOFSTEP]
exact encard_le_of_subset (inter_subset_left _ _)
[GOAL]
α : Type u_1
s t : Set α
a : α
h : a ∈ s
⊢ encard (s \ {a}) + 1 = encard s
[PROOFSTEP]
rw [← encard_insert_of_not_mem (fun h ↦ h.2 rfl), insert_diff_singleton, insert_eq_of_mem h]
[GOAL]
α : Type u_1
s t : Set α
a : α
h : a ∈ s
⊢ encard (s \ {a}) = encard s - 1
[PROOFSTEP]
rw [← encard_diff_singleton_add_one h, ← WithTop.add_right_cancel_iff WithTop.one_ne_top,
tsub_add_cancel_of_le (self_le_add_left _ _)]
[GOAL]
α : Type u_1
s✝ t s : Set α
x : α
⊢ encard s - 1 ≤ encard (s \ {x})
[PROOFSTEP]
rw [← encard_singleton x]
[GOAL]
α : Type u_1
s✝ t s : Set α
x : α
⊢ encard s - encard {x} ≤ encard (s \ {x})
[PROOFSTEP]
apply tsub_encard_le_encard_diff
[GOAL]
α : Type u_1
s t : Set α
a b : α
ha : ¬a ∈ s
hb : b ∈ s
⊢ encard (insert a (s \ {b})) = encard s
[PROOFSTEP]
rw [encard_insert_of_not_mem, encard_diff_singleton_add_one hb]
[GOAL]
α : Type u_1
s t : Set α
a b : α
ha : ¬a ∈ s
hb : b ∈ s
⊢ ¬a ∈ s \ {b}
[PROOFSTEP]
simp_all only [not_true, mem_diff, mem_singleton_iff, false_and, not_false_eq_true]
[GOAL]
α : Type u_1
s t : Set α
a b : α
ha : ¬a ∈ s
hb : b ∈ s
⊢ encard (insert a s \ {b}) = encard s
[PROOFSTEP]
rw [← insert_diff_singleton_comm (by rintro rfl; exact ha hb), encard_exchange ha hb]
[GOAL]
α : Type u_1
s t : Set α
a b : α
ha : ¬a ∈ s
hb : b ∈ s
⊢ a ≠ b
[PROOFSTEP]
rintro rfl
[GOAL]
α : Type u_1
s t : Set α
a : α
ha : ¬a ∈ s
hb : a ∈ s
⊢ False
[PROOFSTEP]
exact ha hb
[GOAL]
α : Type u_1
s t : Set α
k : ℕ∞
⊢ encard s = k + 1 ↔ ∃ a t, ¬a ∈ t ∧ insert a t = s ∧ encard t = k
[PROOFSTEP]
refine' ⟨fun h ↦ _, _⟩
[GOAL]
case refine'_1
α : Type u_1
s t : Set α
k : ℕ∞
h : encard s = k + 1
⊢ ∃ a t, ¬a ∈ t ∧ insert a t = s ∧ encard t = k
[PROOFSTEP]
obtain ⟨a, ha⟩ := nonempty_of_encard_ne_zero (s := s) (by simp [h])
[GOAL]
α : Type u_1
s t : Set α
k : ℕ∞
h : encard s = k + 1
⊢ encard s ≠ 0
[PROOFSTEP]
simp [h]
[GOAL]
case refine'_1.intro
α : Type u_1
s t : Set α
k : ℕ∞
h : encard s = k + 1
a : α
ha : a ∈ s
⊢ ∃ a t, ¬a ∈ t ∧ insert a t = s ∧ encard t = k
[PROOFSTEP]
refine' ⟨a, s \ { a }, fun h ↦ h.2 rfl, by rwa [insert_diff_singleton, insert_eq_of_mem], _⟩
[GOAL]
α : Type u_1
s t : Set α
k : ℕ∞
h : encard s = k + 1
a : α
ha : a ∈ s
⊢ insert a (s \ {a}) = s
[PROOFSTEP]
rwa [insert_diff_singleton, insert_eq_of_mem]
[GOAL]
case refine'_1.intro
α : Type u_1
s t : Set α
k : ℕ∞
h : encard s = k + 1
a : α
ha : a ∈ s
⊢ encard (s \ {a}) = k
[PROOFSTEP]
rw [← WithTop.add_right_cancel_iff WithTop.one_ne_top, ← h, encard_diff_singleton_add_one ha]
[GOAL]
case refine'_2
α : Type u_1
s t : Set α
k : ℕ∞
⊢ (∃ a t, ¬a ∈ t ∧ insert a t = s ∧ encard t = k) → encard s = k + 1
[PROOFSTEP]
rintro ⟨a, t, h, rfl, rfl⟩
[GOAL]
case refine'_2.intro.intro.intro.intro
α : Type u_1
t✝ : Set α
a : α
t : Set α
h : ¬a ∈ t
⊢ encard (insert a t) = encard t + 1
[PROOFSTEP]
rw [encard_insert_of_not_mem h]
[GOAL]
α : Type u_1
s✝ t s : Set α
⊢ s = ∅ ∨ encard s = ⊤ ∨ ∃ a, a ∈ s ∧ encard (s \ {a}) < encard s
[PROOFSTEP]
refine'
s.eq_empty_or_nonempty.elim Or.inl
(Or.inr ∘ fun ⟨a, ha⟩ ↦ (s.finite_or_infinite.elim (fun hfin ↦ Or.inr ⟨a, ha, _⟩) (Or.inl ∘ Infinite.encard_eq)))
[GOAL]
α : Type u_1
s✝ t s : Set α
x✝ : Set.Nonempty s
a : α
ha : a ∈ s
hfin : Set.Finite s
⊢ encard (s \ {a}) < encard s
[PROOFSTEP]
rw [← encard_diff_singleton_add_one ha]
[GOAL]
α : Type u_1
s✝ t s : Set α
x✝ : Set.Nonempty s
a : α
ha : a ∈ s
hfin : Set.Finite s
⊢ encard (s \ {a}) < encard (s \ {a}) + 1
[PROOFSTEP]
nth_rw 1 [← add_zero (encard _)]
[GOAL]
α : Type u_1
s✝ t s : Set α
x✝ : Set.Nonempty s
a : α
ha : a ∈ s
hfin : Set.Finite s
⊢ encard (s \ {a}) + 0 < encard (s \ {a}) + 1
[PROOFSTEP]
exact WithTop.add_lt_add_left (hfin.diff _).encard_lt_top.ne zero_lt_one
[GOAL]
α : Type u_1
s t : Set α
x y : α
hne : x ≠ y
⊢ encard {x, y} = 2
[PROOFSTEP]
rw [encard_insert_of_not_mem (by simpa), ← one_add_one_eq_two, WithTop.add_right_cancel_iff WithTop.one_ne_top,
encard_singleton]
[GOAL]
α : Type u_1
s t : Set α
x y : α
hne : x ≠ y
⊢ ¬x ∈ {y}
[PROOFSTEP]
simpa
[GOAL]
α : Type u_1
s t : Set α
⊢ encard s = 1 ↔ ∃ x, s = {x}
[PROOFSTEP]
refine' ⟨fun h ↦ _, fun ⟨x, hx⟩ ↦ by rw [hx, encard_singleton]⟩
[GOAL]
α : Type u_1
s t : Set α
x✝ : ∃ x, s = {x}
x : α
hx : s = {x}
⊢ encard s = 1
[PROOFSTEP]
rw [hx, encard_singleton]
[GOAL]
α : Type u_1
s t : Set α
h : encard s = 1
⊢ ∃ x, s = {x}
[PROOFSTEP]
obtain ⟨x, hx⟩ := nonempty_of_encard_ne_zero (s := s) (by rw [h]; simp)
[GOAL]
α : Type u_1
s t : Set α
h : encard s = 1
⊢ encard s ≠ 0
[PROOFSTEP]
rw [h]
[GOAL]
α : Type u_1
s t : Set α
h : encard s = 1
⊢ 1 ≠ 0
[PROOFSTEP]
simp
[GOAL]
case intro
α : Type u_1
s t : Set α
h : encard s = 1
x : α
hx : x ∈ s
⊢ ∃ x, s = {x}
[PROOFSTEP]
exact ⟨x, ((finite_singleton x).eq_of_subset_of_encard_le' (by simpa) (by simp [h])).symm⟩
[GOAL]
α : Type u_1
s t : Set α
h : encard s = 1
x : α
hx : x ∈ s
⊢ {x} ⊆ s
[PROOFSTEP]
simpa
[GOAL]
α : Type u_1
s t : Set α
h : encard s = 1
x : α
hx : x ∈ s
⊢ encard s ≤ encard {x}
[PROOFSTEP]
simp [h]
[GOAL]
α : Type u_1
s t : Set α
⊢ encard s ≤ 1 ↔ s = ∅ ∨ ∃ x, s = {x}
[PROOFSTEP]
rw [le_iff_lt_or_eq, lt_iff_not_le, ENat.one_le_iff_ne_zero, not_not, encard_eq_zero, encard_eq_one]
[GOAL]
α : Type u_1
s t : Set α
⊢ encard s ≤ 1 ↔ ∀ (a b : α), a ∈ s → b ∈ s → a = b
[PROOFSTEP]
rw [encard_le_one_iff_eq, or_iff_not_imp_left, ← Ne.def, ← nonempty_iff_ne_empty]
[GOAL]
α : Type u_1
s t : Set α
⊢ (Set.Nonempty s → ∃ x, s = {x}) ↔ ∀ (a b : α), a ∈ s → b ∈ s → a = b
[PROOFSTEP]
refine' ⟨fun h a b has hbs ↦ _, fun h ⟨x, hx⟩ ↦ ⟨x, ((singleton_subset_iff.2 hx).antisymm' (fun y hy ↦ h _ _ hy hx))⟩⟩
[GOAL]
α : Type u_1
s t : Set α
h : Set.Nonempty s → ∃ x, s = {x}
a b : α
has : a ∈ s
hbs : b ∈ s
⊢ a = b
[PROOFSTEP]
obtain ⟨x, rfl⟩ := h ⟨_, has⟩
[GOAL]
case intro
α : Type u_1
t : Set α
a b x : α
h : Set.Nonempty {x} → ∃ x_1, {x} = {x_1}
has : a ∈ {x}
hbs : b ∈ {x}
⊢ a = b
[PROOFSTEP]
rw [(has : a = x), (hbs : b = x)]
[GOAL]
α : Type u_1
s t : Set α
⊢ 1 < encard s ↔ ∃ a b, a ∈ s ∧ b ∈ s ∧ a ≠ b
[PROOFSTEP]
rw [← not_iff_not, not_exists, not_lt, encard_le_one_iff]
[GOAL]
α : Type u_1
s t : Set α
⊢ (∀ (a b : α), a ∈ s → b ∈ s → a = b) ↔ ∀ (x : α), ¬∃ b, x ∈ s ∧ b ∈ s ∧ x ≠ b
[PROOFSTEP]
aesop
[GOAL]
α : Type u_1
s t : Set α
h : 1 < encard s
a : α
⊢ ∃ b, b ∈ s ∧ b ≠ a
[PROOFSTEP]
by_contra' h'
[GOAL]
α : Type u_1
s t : Set α
h : 1 < encard s
a : α
h' : ∀ (b : α), b ∈ s → b = a
⊢ False
[PROOFSTEP]
obtain ⟨b, b', hb, hb', hne⟩ := one_lt_encard_iff.1 h
[GOAL]
case intro.intro.intro.intro
α : Type u_1
s t : Set α
h : 1 < encard s
a : α
h' : ∀ (b : α), b ∈ s → b = a
b b' : α
hb : b ∈ s
hb' : b' ∈ s
hne : b ≠ b'
⊢ False
[PROOFSTEP]
apply hne
[GOAL]
case intro.intro.intro.intro
α : Type u_1
s t : Set α
h : 1 < encard s
a : α
h' : ∀ (b : α), b ∈ s → b = a
b b' : α
hb : b ∈ s
hb' : b' ∈ s
hne : b ≠ b'
⊢ b = b'
[PROOFSTEP]
rw [h' b hb, h' b' hb']
[GOAL]
α : Type u_1
s t : Set α
⊢ encard s = 2 ↔ ∃ x y, x ≠ y ∧ s = {x, y}
[PROOFSTEP]
refine' ⟨fun h ↦ _, fun ⟨x, y, hne, hs⟩ ↦ by rw [hs, encard_pair hne]⟩
[GOAL]
α : Type u_1
s t : Set α
x✝ : ∃ x y, x ≠ y ∧ s = {x, y}
x y : α
hne : x ≠ y
hs : s = {x, y}
⊢ encard s = 2
[PROOFSTEP]
rw [hs, encard_pair hne]
[GOAL]
α : Type u_1
s t : Set α
h : encard s = 2
⊢ ∃ x y, x ≠ y ∧ s = {x, y}
[PROOFSTEP]
obtain ⟨x, hx⟩ := nonempty_of_encard_ne_zero (s := s) (by rw [h]; simp)
[GOAL]
α : Type u_1
s t : Set α
h : encard s = 2
⊢ encard s ≠ 0
[PROOFSTEP]
rw [h]
[GOAL]
α : Type u_1
s t : Set α
h : encard s = 2
⊢ 2 ≠ 0
[PROOFSTEP]
simp
[GOAL]
case intro
α : Type u_1
s t : Set α
h : encard s = 2
x : α
hx : x ∈ s
⊢ ∃ x y, x ≠ y ∧ s = {x, y}
[PROOFSTEP]
rw [← insert_eq_of_mem hx, ← insert_diff_singleton, encard_insert_of_not_mem (fun h ↦ h.2 rfl), ← one_add_one_eq_two,
WithTop.add_right_cancel_iff (WithTop.one_ne_top), encard_eq_one] at h
[GOAL]
case intro
α : Type u_1
s t : Set α
x : α
h : ∃ x_1, s \ {x} = {x_1}
hx : x ∈ s
⊢ ∃ x y, x ≠ y ∧ s = {x, y}
[PROOFSTEP]
obtain ⟨y, h⟩ := h
[GOAL]
case intro.intro
α : Type u_1
s t : Set α
x : α
hx : x ∈ s
y : α
h : s \ {x} = {y}
⊢ ∃ x y, x ≠ y ∧ s = {x, y}
[PROOFSTEP]
refine' ⟨x, y, by rintro rfl; exact (h.symm.subset rfl).2 rfl, _⟩
[GOAL]
α : Type u_1
s t : Set α
x : α
hx : x ∈ s
y : α
h : s \ {x} = {y}
⊢ x ≠ y
[PROOFSTEP]
rintro rfl
[GOAL]
α : Type u_1
s t : Set α
x : α
hx : x ∈ s
h : s \ {x} = {x}
⊢ False
[PROOFSTEP]
exact (h.symm.subset rfl).2 rfl
[GOAL]
case intro.intro
α : Type u_1
s t : Set α
x : α
hx : x ∈ s
y : α
h : s \ {x} = {y}
⊢ s = {x, y}
[PROOFSTEP]
rw [← h, insert_diff_singleton, insert_eq_of_mem hx]
[GOAL]
α✝ : Type ?u.68373
s✝ t : Set α✝
α : Type u_1
s : Set α
⊢ encard s = 3 ↔ ∃ x y z, x ≠ y ∧ x ≠ z ∧ y ≠ z ∧ s = {x, y, z}
[PROOFSTEP]
refine' ⟨fun h ↦ _, fun ⟨x, y, z, hxy, hyz, hxz, hs⟩ ↦ _⟩
[GOAL]
case refine'_1
α✝ : Type ?u.68373
s✝ t : Set α✝
α : Type u_1
s : Set α
h : encard s = 3
⊢ ∃ x y z, x ≠ y ∧ x ≠ z ∧ y ≠ z ∧ s = {x, y, z}
[PROOFSTEP]
obtain ⟨x, hx⟩ := nonempty_of_encard_ne_zero (s := s) (by rw [h]; simp)
[GOAL]
α✝ : Type ?u.68373
s✝ t : Set α✝
α : Type u_1
s : Set α
h : encard s = 3
⊢ encard s ≠ 0
[PROOFSTEP]
rw [h]
[GOAL]
α✝ : Type ?u.68373
s✝ t : Set α✝
α : Type u_1
s : Set α
h : encard s = 3
⊢ 3 ≠ 0
[PROOFSTEP]
simp
[GOAL]
case refine'_1.intro
α✝ : Type ?u.68373
s✝ t : Set α✝
α : Type u_1
s : Set α
h : encard s = 3
x : α
hx : x ∈ s
⊢ ∃ x y z, x ≠ y ∧ x ≠ z ∧ y ≠ z ∧ s = {x, y, z}
[PROOFSTEP]
rw [← insert_eq_of_mem hx, ← insert_diff_singleton, encard_insert_of_not_mem (fun h ↦ h.2 rfl),
(by exact rfl : (3 : ℕ∞) = 2 + 1), WithTop.add_right_cancel_iff WithTop.one_ne_top, encard_eq_two] at h
[GOAL]
α✝ : Type ?u.68373
s✝ t : Set α✝
α : Type u_1
s : Set α
x : α
h : encard (s \ {x}) + 1 = 3
hx : x ∈ s
⊢ 3 = 2 + 1
[PROOFSTEP]
exact rfl
[GOAL]
case refine'_1.intro
α✝ : Type ?u.68373
s✝ t : Set α✝
α : Type u_1
s : Set α
x : α
h : ∃ x_1 y, x_1 ≠ y ∧ s \ {x} = {x_1, y}
hx : x ∈ s
⊢ ∃ x y z, x ≠ y ∧ x ≠ z ∧ y ≠ z ∧ s = {x, y, z}
[PROOFSTEP]
obtain ⟨y, z, hne, hs⟩ := h
[GOAL]
case refine'_1.intro.intro.intro.intro
α✝ : Type ?u.68373
s✝ t : Set α✝
α : Type u_1
s : Set α
x : α
hx : x ∈ s
y z : α
hne : y ≠ z
hs : s \ {x} = {y, z}
⊢ ∃ x y z, x ≠ y ∧ x ≠ z ∧ y ≠ z ∧ s = {x, y, z}
[PROOFSTEP]
refine' ⟨x, y, z, _, _, hne, _⟩
[GOAL]
case refine'_1.intro.intro.intro.intro.refine'_1
α✝ : Type ?u.68373
s✝ t : Set α✝
α : Type u_1
s : Set α
x : α
hx : x ∈ s
y z : α
hne : y ≠ z
hs : s \ {x} = {y, z}
⊢ x ≠ y
[PROOFSTEP]
rintro rfl
[GOAL]
case refine'_1.intro.intro.intro.intro.refine'_1
α✝ : Type ?u.68373
s✝ t : Set α✝
α : Type u_1
s : Set α
x : α
hx : x ∈ s
z : α
hne : x ≠ z
hs : s \ {x} = {x, z}
⊢ False
[PROOFSTEP]
exact (hs.symm.subset (Or.inl rfl)).2 rfl
[GOAL]
case refine'_1.intro.intro.intro.intro.refine'_2
α✝ : Type ?u.68373
s✝ t : Set α✝
α : Type u_1
s : Set α
x : α
hx : x ∈ s
y z : α
hne : y ≠ z
hs : s \ {x} = {y, z}
⊢ x ≠ z
[PROOFSTEP]
rintro rfl
[GOAL]
case refine'_1.intro.intro.intro.intro.refine'_2
α✝ : Type ?u.68373
s✝ t : Set α✝
α : Type u_1
s : Set α
x : α
hx : x ∈ s
y : α
hne : y ≠ x
hs : s \ {x} = {y, x}
⊢ False
[PROOFSTEP]
exact (hs.symm.subset (Or.inr rfl)).2 rfl
[GOAL]
case refine'_1.intro.intro.intro.intro.refine'_3
α✝ : Type ?u.68373
s✝ t : Set α✝
α : Type u_1
s : Set α
x : α
hx : x ∈ s
y z : α
hne : y ≠ z
hs : s \ {x} = {y, z}
⊢ s = {x, y, z}
[PROOFSTEP]
rw [← hs, insert_diff_singleton, insert_eq_of_mem hx]
[GOAL]
case refine'_2
α✝ : Type ?u.68373
s✝ t : Set α✝
α : Type u_1
s : Set α
x✝ : ∃ x y z, x ≠ y ∧ x ≠ z ∧ y ≠ z ∧ s = {x, y, z}
x y z : α
hxy : x ≠ y
hyz : x ≠ z
hxz : y ≠ z
hs : s = {x, y, z}
⊢ encard s = 3
[PROOFSTEP]
rw [hs, encard_insert_of_not_mem, encard_insert_of_not_mem, encard_singleton]
[GOAL]
case refine'_2
α✝ : Type ?u.68373
s✝ t : Set α✝
α : Type u_1
s : Set α
x✝ : ∃ x y z, x ≠ y ∧ x ≠ z ∧ y ≠ z ∧ s = {x, y, z}
x y z : α
hxy : x ≠ y
hyz : x ≠ z
hxz : y ≠ z
hs : s = {x, y, z}
⊢ 1 + 1 + 1 = 3
[PROOFSTEP]
aesop
[GOAL]
case refine'_2
α✝ : Type ?u.68373
s✝ t : Set α✝
α : Type u_1
s : Set α
x✝ : ∃ x y z, x ≠ y ∧ x ≠ z ∧ y ≠ z ∧ s = {x, y, z}
x y z : α
hxy : x ≠ y
hyz : x ≠ z
hxz : y ≠ z
hs : s = {x, y, z}
⊢ ¬y ∈ {z}
[PROOFSTEP]
aesop
[GOAL]
case refine'_2
α✝ : Type ?u.68373
s✝ t : Set α✝
α : Type u_1
s : Set α
x✝ : ∃ x y z, x ≠ y ∧ x ≠ z ∧ y ≠ z ∧ s = {x, y, z}
x y z : α
hxy : x ≠ y
hyz : x ≠ z
hxz : y ≠ z
hs : s = {x, y, z}
⊢ ¬x ∈ {y, z}
[PROOFSTEP]
aesop
[GOAL]
α : Type ?u.102702
s t : Set α
k : ℕ
⊢ encard {i | i < k} = ↑k
[PROOFSTEP]
convert encard_coe_eq_coe_finsetCard (Finset.range k) using 1
[GOAL]
case h.e'_2
α : Type ?u.102702
s t : Set α
k : ℕ
⊢ encard {i | i < k} = encard ↑(Finset.range k)
[PROOFSTEP]
rw [Finset.coe_range, Iio_def]
[GOAL]
case h.e'_3
α : Type ?u.102702
s t : Set α
k : ℕ
⊢ ↑k = ↑(Finset.card (Finset.range k))
[PROOFSTEP]
rw [Finset.card_range]
[GOAL]
α : Type u_1
s t : Set α
hs : Set.Finite s
h : s ⊆ t
hst : encard t = encard s + 1
⊢ ∃ a, t = insert a s
[PROOFSTEP]
rw [← encard_diff_add_encard_of_subset h, add_comm, WithTop.add_left_cancel_iff hs.encard_lt_top.ne, encard_eq_one] at
hst
[GOAL]
α : Type u_1
s t : Set α
hs : Set.Finite s
h : s ⊆ t
hst : ∃ x, t \ s = {x}
⊢ ∃ a, t = insert a s
[PROOFSTEP]
obtain ⟨x, hx⟩ := hst
[GOAL]
case intro
α : Type u_1
s t : Set α
hs : Set.Finite s
h : s ⊆ t
x : α
hx : t \ s = {x}
⊢ ∃ a, t = insert a s
[PROOFSTEP]
use x
[GOAL]
case h
α : Type u_1
s t : Set α
hs : Set.Finite s
h : s ⊆ t
x : α
hx : t \ s = {x}
⊢ t = insert x s
[PROOFSTEP]
rw [← diff_union_of_subset h, hx, singleton_union]
[GOAL]
α : Type u_1
s t : Set α
k : (fun x => ℕ∞) (PartENat.card ↑s)
hk : k ≤ encard s
⊢ ∃ t, t ⊆ s ∧ encard t = k
[PROOFSTEP]
revert hk
[GOAL]
α : Type u_1
s t : Set α
k : (fun x => ℕ∞) (PartENat.card ↑s)
⊢ k ≤ encard s → ∃ t, t ⊆ s ∧ encard t = k
[PROOFSTEP]
refine' ENat.nat_induction k (fun _ ↦ ⟨∅, empty_subset _, by simp⟩) (fun n IH hle ↦ _) _
[GOAL]
α : Type u_1
s t : Set α
k : (fun x => ℕ∞) (PartENat.card ↑s)
x✝ : 0 ≤ encard s
⊢ encard ∅ = 0
[PROOFSTEP]
simp
[GOAL]
case refine'_1
α : Type u_1
s t : Set α
k : (fun x => ℕ∞) (PartENat.card ↑s)
n : ℕ
IH : ↑n ≤ encard s → ∃ t, t ⊆ s ∧ encard t = ↑n
hle : ↑(Nat.succ n) ≤ encard s
⊢ ∃ t, t ⊆ s ∧ encard t = ↑(Nat.succ n)
[PROOFSTEP]
obtain ⟨t₀, ht₀s, ht₀⟩ := IH (le_trans (by simp) hle)
[GOAL]
α : Type u_1
s t : Set α
k : (fun x => ℕ∞) (PartENat.card ↑s)
n : ℕ
IH : ↑n ≤ encard s → ∃ t, t ⊆ s ∧ encard t = ↑n
hle : ↑(Nat.succ n) ≤ encard s
⊢ ↑n ≤ ↑(Nat.succ n)
[PROOFSTEP]
simp
[GOAL]
case refine'_1.intro.intro
α : Type u_1
s t : Set α
k : (fun x => ℕ∞) (PartENat.card ↑s)
n : ℕ
IH : ↑n ≤ encard s → ∃ t, t ⊆ s ∧ encard t = ↑n
hle : ↑(Nat.succ n) ≤ encard s
t₀ : Set α
ht₀s : t₀ ⊆ s
ht₀ : encard t₀ = ↑n
⊢ ∃ t, t ⊆ s ∧ encard t = ↑(Nat.succ n)
[PROOFSTEP]
simp only [Nat.cast_succ] at *
[GOAL]
case refine'_1.intro.intro
α : Type u_1
s t : Set α
k : (fun x => ℕ∞) (PartENat.card ↑s)
n : ℕ
IH : ↑n ≤ encard s → ∃ t, t ⊆ s ∧ encard t = ↑n
t₀ : Set α
ht₀s : t₀ ⊆ s
ht₀ : encard t₀ = ↑n
hle : ↑n + 1 ≤ encard s
⊢ ∃ t, t ⊆ s ∧ encard t = ↑n + 1
[PROOFSTEP]
have hne : t₀ ≠ s
[GOAL]
case hne
α : Type u_1
s t : Set α
k : (fun x => ℕ∞) (PartENat.card ↑s)
n : ℕ
IH : ↑n ≤ encard s → ∃ t, t ⊆ s ∧ encard t = ↑n
t₀ : Set α
ht₀s : t₀ ⊆ s
ht₀ : encard t₀ = ↑n
hle : ↑n + 1 ≤ encard s
⊢ t₀ ≠ s
[PROOFSTEP]
rintro rfl
[GOAL]
case hne
α : Type u_1
t : Set α
n : ℕ
t₀ : Set α
ht₀ : encard t₀ = ↑n
k : ℕ∞
IH : ↑n ≤ encard t₀ → ∃ t, t ⊆ t₀ ∧ encard t = ↑n
ht₀s : t₀ ⊆ t₀
hle : ↑n + 1 ≤ encard t₀
⊢ False
[PROOFSTEP]
rw [ht₀, ← Nat.cast_one, ← Nat.cast_add, Nat.cast_le] at hle
[GOAL]
case hne
α : Type u_1
t : Set α
n : ℕ
t₀ : Set α
ht₀ : encard t₀ = ↑n
k : ℕ∞
IH : ↑n ≤ encard t₀ → ∃ t, t ⊆ t₀ ∧ encard t = ↑n
ht₀s : t₀ ⊆ t₀
hle : n + 1 ≤ n
⊢ False
[PROOFSTEP]
simp at hle
[GOAL]
case refine'_1.intro.intro
α : Type u_1
s t : Set α
k : (fun x => ℕ∞) (PartENat.card ↑s)
n : ℕ
IH : ↑n ≤ encard s → ∃ t, t ⊆ s ∧ encard t = ↑n
t₀ : Set α
ht₀s : t₀ ⊆ s
ht₀ : encard t₀ = ↑n
hle : ↑n + 1 ≤ encard s
hne : t₀ ≠ s
⊢ ∃ t, t ⊆ s ∧ encard t = ↑n + 1
[PROOFSTEP]
obtain ⟨x, hx⟩ := exists_of_ssubset (ht₀s.ssubset_of_ne hne)
[GOAL]
case refine'_1.intro.intro.intro
α : Type u_1
s t : Set α
k : (fun x => ℕ∞) (PartENat.card ↑s)
n : ℕ
IH : ↑n ≤ encard s → ∃ t, t ⊆ s ∧ encard t = ↑n
t₀ : Set α
ht₀s : t₀ ⊆ s
ht₀ : encard t₀ = ↑n
hle : ↑n + 1 ≤ encard s
hne : t₀ ≠ s
x : α
hx : x ∈ s ∧ ¬x ∈ t₀
⊢ ∃ t, t ⊆ s ∧ encard t = ↑n + 1
[PROOFSTEP]
exact ⟨insert x t₀, insert_subset hx.1 ht₀s, by rw [encard_insert_of_not_mem hx.2, ht₀]⟩
[GOAL]
α : Type u_1
s t : Set α
k : (fun x => ℕ∞) (PartENat.card ↑s)
n : ℕ
IH : ↑n ≤ encard s → ∃ t, t ⊆ s ∧ encard t = ↑n
t₀ : Set α
ht₀s : t₀ ⊆ s
ht₀ : encard t₀ = ↑n
hle : ↑n + 1 ≤ encard s
hne : t₀ ≠ s
x : α
hx : x ∈ s ∧ ¬x ∈ t₀
⊢ encard (insert x t₀) = ↑n + 1
[PROOFSTEP]
rw [encard_insert_of_not_mem hx.2, ht₀]
[GOAL]
case refine'_2
α : Type u_1
s t : Set α
k : (fun x => ℕ∞) (PartENat.card ↑s)
⊢ (∀ (n : ℕ), ↑n ≤ encard s → ∃ t, t ⊆ s ∧ encard t = ↑n) → ⊤ ≤ encard s → ∃ t, t ⊆ s ∧ encard t = ⊤
[PROOFSTEP]
simp only [top_le_iff, encard_eq_top_iff]
[GOAL]
case refine'_2
α : Type u_1
s t : Set α
k : (fun x => ℕ∞) (PartENat.card ↑s)
⊢ (∀ (n : ℕ), ↑n ≤ encard s → ∃ t, t ⊆ s ∧ encard t = ↑n) → Set.Infinite s → ∃ t, t ⊆ s ∧ Set.Infinite t
[PROOFSTEP]
exact fun _ hi ↦ ⟨s, Subset.rfl, hi⟩
[GOAL]
α : Type u_1
s t : Set α
k : (fun x => ℕ∞) (PartENat.card ↑s)
hst : s ⊆ t
hsk : encard s ≤ k
hkt : k ≤ encard t
⊢ ∃ r, s ⊆ r ∧ r ⊆ t ∧ encard r = k
[PROOFSTEP]
obtain (hs | hs) := eq_or_ne s.encard ⊤
[GOAL]
case inl
α : Type u_1
s t : Set α
k : (fun x => ℕ∞) (PartENat.card ↑s)
hst : s ⊆ t
hsk : encard s ≤ k
hkt : k ≤ encard t
hs : encard s = ⊤
⊢ ∃ r, s ⊆ r ∧ r ⊆ t ∧ encard r = k
[PROOFSTEP]
rw [hs, top_le_iff] at hsk
[GOAL]
case inl
α : Type u_1
s t : Set α
k : (fun x => ℕ∞) (PartENat.card ↑s)
hst : s ⊆ t
hsk : k = ⊤
hkt : k ≤ encard t
hs : encard s = ⊤
⊢ ∃ r, s ⊆ r ∧ r ⊆ t ∧ encard r = k
[PROOFSTEP]
subst hsk
[GOAL]
case inl
α : Type u_1
s t : Set α
hst : s ⊆ t
hs : encard s = ⊤
hkt : ⊤ ≤ encard t
⊢ ∃ r, s ⊆ r ∧ r ⊆ t ∧ encard r = ⊤
[PROOFSTEP]
exact ⟨s, Subset.rfl, hst, hs⟩
[GOAL]
case inr
α : Type u_1
s t : Set α
k : (fun x => ℕ∞) (PartENat.card ↑s)
hst : s ⊆ t
hsk : encard s ≤ k
hkt : k ≤ encard t
hs : encard s ≠ ⊤
⊢ ∃ r, s ⊆ r ∧ r ⊆ t ∧ encard r = k
[PROOFSTEP]
obtain ⟨k, rfl⟩ := exists_add_of_le hsk
[GOAL]
case inr.intro
α : Type u_1
s t : Set α
hst : s ⊆ t
hs : encard s ≠ ⊤
k : ℕ∞
hsk : encard s ≤ encard s + k
hkt : encard s + k ≤ encard t
⊢ ∃ r, s ⊆ r ∧ r ⊆ t ∧ encard r = encard s + k
[PROOFSTEP]
obtain ⟨k', hk'⟩ := exists_add_of_le hkt
[GOAL]
case inr.intro.intro
α : Type u_1
s t : Set α
hst : s ⊆ t
hs : encard s ≠ ⊤
k : ℕ∞
hsk : encard s ≤ encard s + k
hkt : encard s + k ≤ encard t
k' : ℕ∞
hk' : encard t = encard s + k + k'
⊢ ∃ r, s ⊆ r ∧ r ⊆ t ∧ encard r = encard s + k
[PROOFSTEP]
have hk : k ≤ encard (t \ s)
[GOAL]
case hk
α : Type u_1
s t : Set α
hst : s ⊆ t
hs : encard s ≠ ⊤
k : ℕ∞
hsk : encard s ≤ encard s + k
hkt : encard s + k ≤ encard t
k' : ℕ∞
hk' : encard t = encard s + k + k'
⊢ k ≤ encard (t \ s)
[PROOFSTEP]
rw [← encard_diff_add_encard_of_subset hst, add_comm] at hkt
[GOAL]
case hk
α : Type u_1
s t : Set α
hst : s ⊆ t
hs : encard s ≠ ⊤
k : ℕ∞
hsk : encard s ≤ encard s + k
hkt : k + encard s ≤ encard (t \ s) + encard s
k' : ℕ∞
hk' : encard t = encard s + k + k'
⊢ k ≤ encard (t \ s)
[PROOFSTEP]
exact WithTop.le_of_add_le_add_right hs hkt
[GOAL]
case inr.intro.intro
α : Type u_1
s t : Set α
hst : s ⊆ t
hs : encard s ≠ ⊤
k : ℕ∞
hsk : encard s ≤ encard s + k
hkt : encard s + k ≤ encard t
k' : ℕ∞
hk' : encard t = encard s + k + k'
hk : k ≤ encard (t \ s)
⊢ ∃ r, s ⊆ r ∧ r ⊆ t ∧ encard r = encard s + k
[PROOFSTEP]
obtain ⟨r', hr', rfl⟩ := exists_subset_encard_eq hk
[GOAL]
case inr.intro.intro.intro.intro
α : Type u_1
s t : Set α
hst : s ⊆ t
hs : encard s ≠ ⊤
k' : ℕ∞
r' : Set α
hr' : r' ⊆ t \ s
hsk : encard s ≤ encard s + encard r'
hkt : encard s + encard r' ≤ encard t
hk' : encard t = encard s + encard r' + k'
hk : encard r' ≤ encard (t \ s)
⊢ ∃ r, s ⊆ r ∧ r ⊆ t ∧ encard r = encard s + encard r'
[PROOFSTEP]
refine' ⟨s ∪ r', subset_union_left _ _, union_subset hst (hr'.trans (diff_subset _ _)), _⟩
[GOAL]
case inr.intro.intro.intro.intro
α : Type u_1
s t : Set α
hst : s ⊆ t
hs : encard s ≠ ⊤
k' : ℕ∞
r' : Set α
hr' : r' ⊆ t \ s
hsk : encard s ≤ encard s + encard r'
hkt : encard s + encard r' ≤ encard t
hk' : encard t = encard s + encard r' + k'
hk : encard r' ≤ encard (t \ s)
⊢ encard (s ∪ r') = encard s + encard r'
[PROOFSTEP]
rw [encard_union_eq (disjoint_of_subset_right hr' disjoint_sdiff_right)]
[GOAL]
α : Type u_2
β : Type u_1
s✝ t✝ s : Set α
t : Set β
f : α → β
h : InjOn f s
⊢ encard (f '' s) = encard s
[PROOFSTEP]
rw [encard, PartENat.card_image_of_injOn h, encard]
[GOAL]
α : Type u_1
β : Type u_2
s✝ t✝ s : Set α
t : Set β
f : α → β
e : ↑s ≃ ↑t
⊢ encard s = encard t
[PROOFSTEP]
rw [← encard_univ_coe, ← encard_univ_coe t, encard_univ, encard_univ, PartENat.card_congr e]
[GOAL]
α : Type u_2
β : Type u_1
s✝ t✝ s : Set α
t : Set β
f : α → β
e : ↑s ↪ ↑t
⊢ encard s ≤ encard t
[PROOFSTEP]
rw [← encard_univ_coe, ← e.injective.encard_image, ← Subtype.coe_injective.encard_image]
[GOAL]
α : Type u_2
β : Type u_1
s✝ t✝ s : Set α
t : Set β
f : α → β
e : ↑s ↪ ↑t
⊢ encard ((fun a => ↑a) '' (↑e '' univ)) ≤ encard t
[PROOFSTEP]
exact encard_mono (by simp)
[GOAL]
α : Type u_2
β : Type u_1
s✝ t✝ s : Set α
t : Set β
f : α → β
e : ↑s ↪ ↑t
⊢ (fun a => ↑a) '' (↑e '' univ) ≤ t
[PROOFSTEP]
simp
[GOAL]
α : Type u_1
β : Type u_2
s✝¹ t✝ s✝ : Set α
t : Set β
f✝ f : α → β
s : Set α
⊢ encard (f '' s) ≤ encard s
[PROOFSTEP]
obtain (h | h) := isEmpty_or_nonempty α
[GOAL]
case inl
α : Type u_1
β : Type u_2
s✝¹ t✝ s✝ : Set α
t : Set β
f✝ f : α → β
s : Set α
h : IsEmpty α
⊢ encard (f '' s) ≤ encard s
[PROOFSTEP]
rw [s.eq_empty_of_isEmpty]
[GOAL]
case inl
α : Type u_1
β : Type u_2
s✝¹ t✝ s✝ : Set α
t : Set β
f✝ f : α → β
s : Set α
h : IsEmpty α
⊢ encard (f '' ∅) ≤ encard ∅
[PROOFSTEP]
simp
[GOAL]
case inr
α : Type u_1
β : Type u_2
s✝¹ t✝ s✝ : Set α
t : Set β
f✝ f : α → β
s : Set α
h : Nonempty α
⊢ encard (f '' s) ≤ encard s
[PROOFSTEP]
rw [← (f.invFunOn_injOn_image s).encard_image]
[GOAL]
case inr
α : Type u_1
β : Type u_2
s✝¹ t✝ s✝ : Set α
t : Set β
f✝ f : α → β
s : Set α
h : Nonempty α
⊢ encard (Function.invFunOn f s '' (f '' s)) ≤ encard s
[PROOFSTEP]
apply encard_le_of_subset
[GOAL]
case inr.h
α : Type u_1
β : Type u_2
s✝¹ t✝ s✝ : Set α
t : Set β
f✝ f : α → β
s : Set α
h : Nonempty α
⊢ Function.invFunOn f s '' (f '' s) ⊆ s
[PROOFSTEP]
exact f.invFunOn_image_image_subset s
[GOAL]
α : Type u_1
β : Type u_2
s✝ t✝ s : Set α
t : Set β
f : α → β
hs : Set.Finite s
h : encard (f '' s) = encard s
⊢ InjOn f s
[PROOFSTEP]
obtain (h' | hne) := isEmpty_or_nonempty α
[GOAL]
case inl
α : Type u_1
β : Type u_2
s✝ t✝ s : Set α
t : Set β
f : α → β
hs : Set.Finite s
h : encard (f '' s) = encard s
h' : IsEmpty α
⊢ InjOn f s
[PROOFSTEP]
rw [s.eq_empty_of_isEmpty]
[GOAL]
case inl
α : Type u_1
β : Type u_2
s✝ t✝ s : Set α
t : Set β
f : α → β
hs : Set.Finite s
h : encard (f '' s) = encard s
h' : IsEmpty α
⊢ InjOn f ∅
[PROOFSTEP]
simp
[GOAL]
case inr
α : Type u_1
β : Type u_2
s✝ t✝ s : Set α
t : Set β
f : α → β
hs : Set.Finite s
h : encard (f '' s) = encard s
hne : Nonempty α
⊢ InjOn f s
[PROOFSTEP]
rw [← (f.invFunOn_injOn_image s).encard_image] at h
[GOAL]
case inr
α : Type u_1
β : Type u_2
s✝ t✝ s : Set α
t : Set β
f : α → β
hs : Set.Finite s
hne : Nonempty α
h : encard (Function.invFunOn f s '' (f '' s)) = encard s
⊢ InjOn f s
[PROOFSTEP]
rw [injOn_iff_invFunOn_image_image_eq_self]
[GOAL]
case inr
α : Type u_1
β : Type u_2
s✝ t✝ s : Set α
t : Set β
f : α → β
hs : Set.Finite s
hne : Nonempty α
h : encard (Function.invFunOn f s '' (f '' s)) = encard s
⊢ Function.invFunOn f s '' (f '' s) = s
[PROOFSTEP]
exact hs.eq_of_subset_of_encard_le (f.invFunOn_image_image_subset s) h.symm.le
[GOAL]
α : Type u_1
β : Type u_2
s✝ t✝ s : Set α
t : Set β
f : α → β
hf : Function.Injective f
ht : t ⊆ range f
⊢ encard (f ⁻¹' t) = encard t
[PROOFSTEP]
rw [← hf.encard_image, image_preimage_eq_inter_range, inter_eq_self_of_subset_left ht]
[GOAL]
α : Type u_2
β : Type u_1
s✝ t✝ s : Set α
t : Set β
f : α → β
hf : MapsTo f s t
f_inj : InjOn f s
⊢ encard s ≤ encard t
[PROOFSTEP]
rw [← f_inj.encard_image]
[GOAL]
α : Type u_2
β : Type u_1
s✝ t✝ s : Set α
t : Set β
f : α → β
hf : MapsTo f s t
f_inj : InjOn f s
⊢ encard (f '' s) ≤ encard t
[PROOFSTEP]
apply encard_le_of_subset
[GOAL]
case h
α : Type u_2
β : Type u_1
s✝ t✝ s : Set α
t : Set β
f : α → β
hf : MapsTo f s t
f_inj : InjOn f s
⊢ f '' s ⊆ t
[PROOFSTEP]
rintro _ ⟨x, hx, rfl⟩
[GOAL]
case h.intro.intro
α : Type u_2
β : Type u_1
s✝ t✝ s : Set α
t : Set β
f : α → β
hf : MapsTo f s t
f_inj : InjOn f s
x : α
hx : x ∈ s
⊢ f x ∈ t
[PROOFSTEP]
exact hf hx
[GOAL]
α : Type u_2
β : Type u_1
s✝¹ t✝¹ s✝ : Set α
t✝ : Set β
f : α → β
inst✝ : Nonempty β
s : Set α
t : Set β
hs : Set.Finite s
hle : encard s ≤ encard t
⊢ ∃ f, s ⊆ f ⁻¹' t ∧ InjOn f s
[PROOFSTEP]
classical
obtain (rfl | h | ⟨a, has, -⟩) := s.eq_empty_or_encard_eq_top_or_encard_diff_singleton_lt
· simp
· exact (encard_ne_top_iff.mpr hs h).elim
obtain ⟨b, hbt⟩ := encard_pos.1 ((encard_pos.2 ⟨_, has⟩).trans_le hle)
have hle' : (s \ { a }).encard ≤ (t \ { b }).encard
·
rwa [← WithTop.add_le_add_iff_right WithTop.one_ne_top, encard_diff_singleton_add_one has,
encard_diff_singleton_add_one hbt]
obtain ⟨f₀, hf₀s, hinj⟩ := exists_injOn_of_encard_le (hs.diff { a }) hle'
simp only [preimage_diff, subset_def, mem_diff, mem_singleton_iff, mem_preimage, and_imp] at hf₀s
use Function.update f₀ a b
rw [← insert_eq_of_mem has, ← insert_diff_singleton, injOn_insert (fun h ↦ h.2 rfl)]
simp only [mem_diff, mem_singleton_iff, not_true, and_false, insert_diff_singleton, subset_def, mem_insert_iff,
mem_preimage, ne_eq, Function.update_apply, forall_eq_or_imp, ite_true, and_imp, mem_image, ite_eq_left_iff,
not_exists, not_and, not_forall, exists_prop, and_iff_right hbt]
refine ⟨?_, ?_, fun x hxs hxa ↦ ⟨hxa, (hf₀s x hxs hxa).2⟩⟩
· rintro x hx; split_ifs with h; assumption; exact (hf₀s x hx h).1
exact InjOn.congr hinj (fun x ⟨_, hxa⟩ ↦ by rwa [Function.update_noteq])
[GOAL]
α : Type u_2
β : Type u_1
s✝¹ t✝¹ s✝ : Set α
t✝ : Set β
f : α → β
inst✝ : Nonempty β
s : Set α
t : Set β
hs : Set.Finite s
hle : encard s ≤ encard t
⊢ ∃ f, s ⊆ f ⁻¹' t ∧ InjOn f s
[PROOFSTEP]
obtain (rfl | h | ⟨a, has, -⟩) := s.eq_empty_or_encard_eq_top_or_encard_diff_singleton_lt
[GOAL]
case inl
α : Type u_2
β : Type u_1
s✝ t✝¹ s : Set α
t✝ : Set β
f : α → β
inst✝ : Nonempty β
t : Set β
hs : Set.Finite ∅
hle : encard ∅ ≤ encard t
⊢ ∃ f, ∅ ⊆ f ⁻¹' t ∧ InjOn f ∅
[PROOFSTEP]
simp
[GOAL]
case inr.inl
α : Type u_2
β : Type u_1
s✝¹ t✝¹ s✝ : Set α
t✝ : Set β
f : α → β
inst✝ : Nonempty β
s : Set α
t : Set β
hs : Set.Finite s
hle : encard s ≤ encard t
h : encard s = ⊤
⊢ ∃ f, s ⊆ f ⁻¹' t ∧ InjOn f s
[PROOFSTEP]
exact (encard_ne_top_iff.mpr hs h).elim
[GOAL]
case inr.inr.intro.intro
α : Type u_2
β : Type u_1
s✝¹ t✝¹ s✝ : Set α
t✝ : Set β
f : α → β
inst✝ : Nonempty β
s : Set α
t : Set β
hs : Set.Finite s
hle : encard s ≤ encard t
a : α
has : a ∈ s
⊢ ∃ f, s ⊆ f ⁻¹' t ∧ InjOn f s
[PROOFSTEP]
obtain ⟨b, hbt⟩ := encard_pos.1 ((encard_pos.2 ⟨_, has⟩).trans_le hle)
[GOAL]
case inr.inr.intro.intro.intro
α : Type u_2
β : Type u_1
s✝¹ t✝¹ s✝ : Set α
t✝ : Set β
f : α → β
inst✝ : Nonempty β
s : Set α
t : Set β
hs : Set.Finite s
hle : encard s ≤ encard t
a : α
has : a ∈ s
b : β
hbt : b ∈ t
⊢ ∃ f, s ⊆ f ⁻¹' t ∧ InjOn f s
[PROOFSTEP]
have hle' : (s \ { a }).encard ≤ (t \ { b }).encard
[GOAL]
case hle'
α : Type u_2
β : Type u_1
s✝¹ t✝¹ s✝ : Set α
t✝ : Set β
f : α → β
inst✝ : Nonempty β
s : Set α
t : Set β
hs : Set.Finite s
hle : encard s ≤ encard t
a : α
has : a ∈ s
b : β
hbt : b ∈ t
⊢ encard (s \ {a}) ≤ encard (t \ {b})
[PROOFSTEP]
rwa [← WithTop.add_le_add_iff_right WithTop.one_ne_top, encard_diff_singleton_add_one has,
encard_diff_singleton_add_one hbt]
[GOAL]
case inr.inr.intro.intro.intro
α : Type u_2
β : Type u_1
s✝¹ t✝¹ s✝ : Set α
t✝ : Set β
f : α → β
inst✝ : Nonempty β
s : Set α
t : Set β
hs : Set.Finite s
hle : encard s ≤ encard t
a : α
has : a ∈ s
b : β
hbt : b ∈ t
hle' : encard (s \ {a}) ≤ encard (t \ {b})
⊢ ∃ f, s ⊆ f ⁻¹' t ∧ InjOn f s
[PROOFSTEP]
obtain ⟨f₀, hf₀s, hinj⟩ := exists_injOn_of_encard_le (hs.diff { a }) hle'
[GOAL]
case inr.inr.intro.intro.intro.intro.intro
α : Type u_2
β : Type u_1
s✝¹ t✝¹ s✝ : Set α
t✝ : Set β
f : α → β
inst✝ : Nonempty β
s : Set α
t : Set β
hs : Set.Finite s
hle : encard s ≤ encard t
a : α
has : a ∈ s
b : β
hbt : b ∈ t
hle' : encard (s \ {a}) ≤ encard (t \ {b})
f₀ : α → β
hf₀s : s \ {a} ⊆ f₀ ⁻¹' (t \ {b})
hinj : InjOn f₀ (s \ {a})
⊢ ∃ f, s ⊆ f ⁻¹' t ∧ InjOn f s
[PROOFSTEP]
simp only [preimage_diff, subset_def, mem_diff, mem_singleton_iff, mem_preimage, and_imp] at hf₀s
[GOAL]
case inr.inr.intro.intro.intro.intro.intro
α : Type u_2
β : Type u_1
s✝¹ t✝¹ s✝ : Set α
t✝ : Set β
f : α → β
inst✝ : Nonempty β
s : Set α
t : Set β
hs : Set.Finite s
hle : encard s ≤ encard t
a : α
has : a ∈ s
b : β
hbt : b ∈ t
hle' : encard (s \ {a}) ≤ encard (t \ {b})
f₀ : α → β
hinj : InjOn f₀ (s \ {a})
hf₀s : ∀ (x : α), x ∈ s → ¬x = a → f₀ x ∈ t ∧ ¬f₀ x = b
⊢ ∃ f, s ⊆ f ⁻¹' t ∧ InjOn f s
[PROOFSTEP]
use Function.update f₀ a b
[GOAL]
case h
α : Type u_2
β : Type u_1
s✝¹ t✝¹ s✝ : Set α
t✝ : Set β
f : α → β
inst✝ : Nonempty β
s : Set α
t : Set β
hs : Set.Finite s
hle : encard s ≤ encard t
a : α
has : a ∈ s
b : β
hbt : b ∈ t
hle' : encard (s \ {a}) ≤ encard (t \ {b})
f₀ : α → β
hinj : InjOn f₀ (s \ {a})
hf₀s : ∀ (x : α), x ∈ s → ¬x = a → f₀ x ∈ t ∧ ¬f₀ x = b
⊢ s ⊆ Function.update f₀ a b ⁻¹' t ∧ InjOn (Function.update f₀ a b) s
[PROOFSTEP]
rw [← insert_eq_of_mem has, ← insert_diff_singleton, injOn_insert (fun h ↦ h.2 rfl)]
[GOAL]
case h
α : Type u_2
β : Type u_1
s✝¹ t✝¹ s✝ : Set α
t✝ : Set β
f : α → β
inst✝ : Nonempty β
s : Set α
t : Set β
hs : Set.Finite s
hle : encard s ≤ encard t
a : α
has : a ∈ s
b : β
hbt : b ∈ t
hle' : encard (s \ {a}) ≤ encard (t \ {b})
f₀ : α → β
hinj : InjOn f₀ (s \ {a})
hf₀s : ∀ (x : α), x ∈ s → ¬x = a → f₀ x ∈ t ∧ ¬f₀ x = b
⊢ insert a (s \ {a}) ⊆ Function.update f₀ a b ⁻¹' t ∧
InjOn (Function.update f₀ a b) (s \ {a}) ∧ ¬Function.update f₀ a b a ∈ Function.update f₀ a b '' (s \ {a})
[PROOFSTEP]
simp only [mem_diff, mem_singleton_iff, not_true, and_false, insert_diff_singleton, subset_def, mem_insert_iff,
mem_preimage, ne_eq, Function.update_apply, forall_eq_or_imp, ite_true, and_imp, mem_image, ite_eq_left_iff,
not_exists, not_and, not_forall, exists_prop, and_iff_right hbt]
[GOAL]
case h
α : Type u_2
β : Type u_1
s✝¹ t✝¹ s✝ : Set α
t✝ : Set β
f : α → β
inst✝ : Nonempty β
s : Set α
t : Set β
hs : Set.Finite s
hle : encard s ≤ encard t
a : α
has : a ∈ s
b : β
hbt : b ∈ t
hle' : encard (s \ {a}) ≤ encard (t \ {b})
f₀ : α → β
hinj : InjOn f₀ (s \ {a})
hf₀s : ∀ (x : α), x ∈ s → ¬x = a → f₀ x ∈ t ∧ ¬f₀ x = b
⊢ (∀ (a_1 : α), a_1 ∈ s → (if a_1 = a then b else f₀ a_1) ∈ t) ∧
InjOn (Function.update f₀ a b) (s \ {a}) ∧ ∀ (x : α), x ∈ s → ¬x = a → ¬x = a ∧ ¬f₀ x = b
[PROOFSTEP]
refine ⟨?_, ?_, fun x hxs hxa ↦ ⟨hxa, (hf₀s x hxs hxa).2⟩⟩
[GOAL]
case h.refine_1
α : Type u_2
β : Type u_1
s✝¹ t✝¹ s✝ : Set α
t✝ : Set β
f : α → β
inst✝ : Nonempty β
s : Set α
t : Set β
hs : Set.Finite s
hle : encard s ≤ encard t
a : α
has : a ∈ s
b : β
hbt : b ∈ t
hle' : encard (s \ {a}) ≤ encard (t \ {b})
f₀ : α → β
hinj : InjOn f₀ (s \ {a})
hf₀s : ∀ (x : α), x ∈ s → ¬x = a → f₀ x ∈ t ∧ ¬f₀ x = b
⊢ ∀ (a_1 : α), a_1 ∈ s → (if a_1 = a then b else f₀ a_1) ∈ t
[PROOFSTEP]
rintro x hx
[GOAL]
case h.refine_1
α : Type u_2
β : Type u_1
s✝¹ t✝¹ s✝ : Set α
t✝ : Set β
f : α → β
inst✝ : Nonempty β
s : Set α
t : Set β
hs : Set.Finite s
hle : encard s ≤ encard t
a : α
has : a ∈ s
b : β
hbt : b ∈ t
hle' : encard (s \ {a}) ≤ encard (t \ {b})
f₀ : α → β
hinj : InjOn f₀ (s \ {a})
hf₀s : ∀ (x : α), x ∈ s → ¬x = a → f₀ x ∈ t ∧ ¬f₀ x = b
x : α
hx : x ∈ s
⊢ (if x = a then b else f₀ x) ∈ t
[PROOFSTEP]
split_ifs with h
[GOAL]
case pos
α : Type u_2
β : Type u_1
s✝¹ t✝¹ s✝ : Set α
t✝ : Set β
f : α → β
inst✝ : Nonempty β
s : Set α
t : Set β
hs : Set.Finite s
hle : encard s ≤ encard t
a : α
has : a ∈ s
b : β
hbt : b ∈ t
hle' : encard (s \ {a}) ≤ encard (t \ {b})
f₀ : α → β
hinj : InjOn f₀ (s \ {a})
hf₀s : ∀ (x : α), x ∈ s → ¬x = a → f₀ x ∈ t ∧ ¬f₀ x = b
x : α
hx : x ∈ s
h : x = a
⊢ b ∈ t
case neg
α : Type u_2
β : Type u_1
s✝¹ t✝¹ s✝ : Set α
t✝ : Set β
f : α → β
inst✝ : Nonempty β
s : Set α
t : Set β
hs : Set.Finite s
hle : encard s ≤ encard t
a : α
has : a ∈ s
b : β
hbt : b ∈ t
hle' : encard (s \ {a}) ≤ encard (t \ {b})
f₀ : α → β
hinj : InjOn f₀ (s \ {a})
hf₀s : ∀ (x : α), x ∈ s → ¬x = a → f₀ x ∈ t ∧ ¬f₀ x = b
x : α
hx : x ∈ s
h : ¬x = a
⊢ f₀ x ∈ t
[PROOFSTEP]
assumption
[GOAL]
case neg
α : Type u_2
β : Type u_1
s✝¹ t✝¹ s✝ : Set α
t✝ : Set β
f : α → β
inst✝ : Nonempty β
s : Set α
t : Set β
hs : Set.Finite s
hle : encard s ≤ encard t
a : α
has : a ∈ s
b : β
hbt : b ∈ t
hle' : encard (s \ {a}) ≤ encard (t \ {b})
f₀ : α → β
hinj : InjOn f₀ (s \ {a})
hf₀s : ∀ (x : α), x ∈ s → ¬x = a → f₀ x ∈ t ∧ ¬f₀ x = b
x : α
hx : x ∈ s
h : ¬x = a
⊢ f₀ x ∈ t
[PROOFSTEP]
exact (hf₀s x hx h).1
[GOAL]
case h.refine_2
α : Type u_2
β : Type u_1
s✝¹ t✝¹ s✝ : Set α
t✝ : Set β
f : α → β
inst✝ : Nonempty β
s : Set α
t : Set β
hs : Set.Finite s
hle : encard s ≤ encard t
a : α
has : a ∈ s
b : β
hbt : b ∈ t
hle' : encard (s \ {a}) ≤ encard (t \ {b})
f₀ : α → β
hinj : InjOn f₀ (s \ {a})
hf₀s : ∀ (x : α), x ∈ s → ¬x = a → f₀ x ∈ t ∧ ¬f₀ x = b
⊢ InjOn (Function.update f₀ a b) (s \ {a})
[PROOFSTEP]
exact InjOn.congr hinj (fun x ⟨_, hxa⟩ ↦ by rwa [Function.update_noteq])
[GOAL]
α : Type u_2
β : Type u_1
s✝¹ t✝¹ s✝ : Set α
t✝ : Set β
f : α → β
inst✝ : Nonempty β
s : Set α
t : Set β
hs : Set.Finite s
hle : encard s ≤ encard t
a : α
has : a ∈ s
b : β
hbt : b ∈ t
hle' : encard (s \ {a}) ≤ encard (t \ {b})
f₀ : α → β
hinj : InjOn f₀ (s \ {a})
hf₀s : ∀ (x : α), x ∈ s → ¬x = a → f₀ x ∈ t ∧ ¬f₀ x = b
x : α
x✝ : x ∈ s \ {a}
left✝ : x ∈ s
hxa : ¬x ∈ {a}
⊢ f₀ x = Function.update f₀ a b x
[PROOFSTEP]
rwa [Function.update_noteq]
[GOAL]
α : Type u_2
β : Type u_1
s✝ t✝ s : Set α
t : Set β
f : α → β
inst✝ : Nonempty β
hs : Set.Finite s
h : encard s = encard t
⊢ ∃ f, BijOn f s t
[PROOFSTEP]
obtain ⟨f, hf, hinj⟩ := hs.exists_injOn_of_encard_le h.le
[GOAL]
case intro.intro
α : Type u_2
β : Type u_1
s✝ t✝ s : Set α
t : Set β
f✝ : α → β
inst✝ : Nonempty β
hs : Set.Finite s
h : encard s = encard t
f : α → β
hf : s ⊆ f ⁻¹' t
hinj : InjOn f s
⊢ ∃ f, BijOn f s t
[PROOFSTEP]
use f
[GOAL]
case h
α : Type u_2
β : Type u_1
s✝ t✝ s : Set α
t : Set β
f✝ : α → β
inst✝ : Nonempty β
hs : Set.Finite s
h : encard s = encard t
f : α → β
hf : s ⊆ f ⁻¹' t
hinj : InjOn f s
⊢ BijOn f s t
[PROOFSTEP]
convert hinj.bijOn_image
[GOAL]
case h.e'_5
α : Type u_2
β : Type u_1
s✝ t✝ s : Set α
t : Set β
f✝ : α → β
inst✝ : Nonempty β
hs : Set.Finite s
h : encard s = encard t
f : α → β
hf : s ⊆ f ⁻¹' t
hinj : InjOn f s
⊢ t = f '' s
[PROOFSTEP]
rw [(hs.image f).eq_of_subset_of_encard_le' (image_subset_iff.mpr hf) (h.symm.trans hinj.encard_image.symm).le]
[GOAL]
α : Type u_1
s t : Set α
hs : Set.Finite s
⊢ ↑(ncard s) = encard s
[PROOFSTEP]
rwa [ncard, ENat.coe_toNat_eq_self, ne_eq, encard_eq_top_iff, Set.Infinite, not_not]
[GOAL]
α : Type u_1
s✝ t s : Set α
⊢ Nat.card ↑s = ncard s
[PROOFSTEP]
obtain (h | h) := s.finite_or_infinite
[GOAL]
case inl
α : Type u_1
s✝ t s : Set α
h : Set.Finite s
⊢ Nat.card ↑s = ncard s
[PROOFSTEP]
have := h.fintype
[GOAL]
case inl
α : Type u_1
s✝ t s : Set α
h : Set.Finite s
this : Fintype ↑s
⊢ Nat.card ↑s = ncard s
[PROOFSTEP]
rw [ncard, h.encard_eq_coe_toFinset_card, Nat.card_eq_fintype_card, toFinite_toFinset, toFinset_card, ENat.toNat_coe]
[GOAL]
case inr
α : Type u_1
s✝ t s : Set α
h : Set.Infinite s
⊢ Nat.card ↑s = ncard s
[PROOFSTEP]
have := infinite_coe_iff.2 h
[GOAL]
case inr
α : Type u_1
s✝ t s : Set α
h : Set.Infinite s
this : Infinite ↑s
⊢ Nat.card ↑s = ncard s
[PROOFSTEP]
rw [ncard, h.encard_eq, Nat.card_eq_zero_of_infinite, ENat.toNat_top]
[GOAL]
α : Type u_1
s✝ t s : Set α
hs : autoParam (Set.Finite s) _auto✝
⊢ ncard s = Finset.card (Finite.toFinset hs)
[PROOFSTEP]
rw [← Nat.card_coe_set_eq, @Nat.card_eq_fintype_card _ hs.fintype, @Finite.card_toFinset _ _ hs.fintype hs]
[GOAL]
α : Type u_1
s✝ t s : Set α
inst✝ : Fintype ↑s
⊢ ncard s = Finset.card (toFinset s)
[PROOFSTEP]
simp [← Nat.card_coe_set_eq, Nat.card_eq_fintype_card]
[GOAL]
α : Type u_1
s t : Set α
k : ℕ
⊢ encard s ≤ ↑k ↔ Set.Finite s ∧ ncard s ≤ k
[PROOFSTEP]
rw [encard_le_coe_iff, and_congr_right_iff]
[GOAL]
α : Type u_1
s t : Set α
k : ℕ
⊢ Set.Finite s → ((∃ n₀, encard s = ↑n₀ ∧ n₀ ≤ k) ↔ ncard s ≤ k)
[PROOFSTEP]
exact fun hfin ↦
⟨fun ⟨n₀, hn₀, hle⟩ ↦ by rwa [ncard_def, hn₀, ENat.toNat_coe], fun h ↦ ⟨s.ncard, by rw [hfin.cast_ncard_eq], h⟩⟩
[GOAL]
α : Type u_1
s t : Set α
k : ℕ
hfin : Set.Finite s
x✝ : ∃ n₀, encard s = ↑n₀ ∧ n₀ ≤ k
n₀ : ℕ
hn₀ : encard s = ↑n₀
hle : n₀ ≤ k
⊢ ncard s ≤ k
[PROOFSTEP]
rwa [ncard_def, hn₀, ENat.toNat_coe]
[GOAL]
α : Type u_1
s t : Set α
k : ℕ
hfin : Set.Finite s
h : ncard s ≤ k
⊢ encard s = ↑(ncard s)
[PROOFSTEP]
rw [hfin.cast_ncard_eq]
[GOAL]
α : Type u_1
s t : Set α
hs : Set.Infinite s
⊢ Set.ncard s = 0
[PROOFSTEP]
rw [← Nat.card_coe_set_eq, @Nat.card_eq_zero_of_infinite _ hs.to_subtype]
[GOAL]
α : Type u_1
s t : Set α
hst : s ⊆ t
ht : autoParam (Set.Finite t) _auto✝
⊢ ncard s ≤ ncard t
[PROOFSTEP]
rw [← Nat.cast_le (α := ℕ∞), ht.cast_ncard_eq, (ht.subset hst).cast_ncard_eq]
[GOAL]
α : Type u_1
s t : Set α
hst : s ⊆ t
ht : autoParam (Set.Finite t) _auto✝
⊢ encard s ≤ encard t
[PROOFSTEP]
exact encard_mono hst
[GOAL]
α : Type u_1
s t : Set α
hs : autoParam (Set.Finite s) _auto✝
⊢ ncard s = 0 ↔ s = ∅
[PROOFSTEP]
rw [← Nat.cast_inj (R := ℕ∞), hs.cast_ncard_eq, Nat.cast_zero, encard_eq_zero]
[GOAL]
α : Type u_1
s✝ t : Set α
s : Finset α
⊢ ncard ↑s = Finset.card s
[PROOFSTEP]
rw [ncard_eq_toFinset_card _, Finset.finite_toSet_toFinset]
[GOAL]
α✝ : Type ?u.175384
s t : Set α✝
α : Type u_1
⊢ ncard univ = Nat.card α
[PROOFSTEP]
cases' finite_or_infinite α with h h
[GOAL]
case inl
α✝ : Type ?u.175384
s t : Set α✝
α : Type u_1
h : Finite α
⊢ ncard univ = Nat.card α
[PROOFSTEP]
have hft := Fintype.ofFinite α
[GOAL]
case inl
α✝ : Type ?u.175384
s t : Set α✝
α : Type u_1
h : Finite α
hft : Fintype α
⊢ ncard univ = Nat.card α
[PROOFSTEP]
rw [ncard_eq_toFinset_card, Finite.toFinset_univ, Finset.card_univ, Nat.card_eq_fintype_card]
[GOAL]
case inr
α✝ : Type ?u.175384
s t : Set α✝
α : Type u_1
h : Infinite α
⊢ ncard univ = Nat.card α
[PROOFSTEP]
rw [Nat.card_eq_zero_of_infinite, Infinite.ncard]
[GOAL]
case inr
α✝ : Type ?u.175384
s t : Set α✝
α : Type u_1
h : Infinite α
⊢ Set.Infinite univ
[PROOFSTEP]
exact infinite_univ
[GOAL]
α✝ : Type ?u.176860
s t : Set α✝
α : Type u_1
⊢ ncard ∅ = 0
[PROOFSTEP]
rw [ncard_eq_zero]
[GOAL]
α : Type u_1
s t : Set α
hs : autoParam (Set.Finite s) _auto✝
⊢ 0 < ncard s ↔ Set.Nonempty s
[PROOFSTEP]
rw [pos_iff_ne_zero, Ne.def, ncard_eq_zero hs, nonempty_iff_ne_empty]
[GOAL]
α : Type u_1
s t : Set α
hs : ncard s ≠ 0
⊢ Set.Nonempty s
[PROOFSTEP]
rw [nonempty_iff_ne_empty]
[GOAL]
α : Type u_1
s t : Set α
hs : ncard s ≠ 0
⊢ s ≠ ∅
[PROOFSTEP]
rintro rfl
[GOAL]
α : Type u_1
t : Set α
hs : ncard ∅ ≠ 0
⊢ False
[PROOFSTEP]
simp at hs
[GOAL]
α : Type u_1
s t : Set α
a : α
⊢ ncard {a} = 1
[PROOFSTEP]
simp [ncard_eq_toFinset_card]
[GOAL]
α : Type u_1
s✝ t : Set α
a : α
s : Set α
⊢ ncard ({a} ∩ s) ≤ 1
[PROOFSTEP]
rw [← Nat.cast_le (α := ℕ∞), (toFinite _).cast_ncard_eq, Nat.cast_one]
[GOAL]
α : Type u_1
s✝ t : Set α
a : α
s : Set α
⊢ encard ({a} ∩ s) ≤ 1
[PROOFSTEP]
apply encard_singleton_inter
[GOAL]
α : Type u_1
s t : Set α
a : α
h : ¬a ∈ s
hs : autoParam (Set.Finite s) _auto✝
⊢ ncard (insert a s) = ncard s + 1
[PROOFSTEP]
rw [← Nat.cast_inj (R := ℕ∞), (hs.insert a).cast_ncard_eq, Nat.cast_add, Nat.cast_one, hs.cast_ncard_eq,
encard_insert_of_not_mem h]
[GOAL]
α : Type u_1
s t : Set α
a : α
h : a ∈ s
⊢ ncard (insert a s) = ncard s
[PROOFSTEP]
rw [insert_eq_of_mem h]
[GOAL]
α : Type u_1
s✝ t : Set α
a : α
s : Set α
⊢ ncard (insert a s) ≤ ncard s + 1
[PROOFSTEP]
obtain hs | hs := s.finite_or_infinite
[GOAL]
case inl
α : Type u_1
s✝ t : Set α
a : α
s : Set α
hs : Set.Finite s
⊢ ncard (insert a s) ≤ ncard s + 1
[PROOFSTEP]
to_encard_tac
[GOAL]
case inl
α : Type u_1
s✝ t : Set α
a : α
s : Set α
hs : Set.Finite s
⊢ ↑(ncard (insert a s)) ≤ ↑(ncard s) + 1
[PROOFSTEP]
rw [hs.cast_ncard_eq, (hs.insert _).cast_ncard_eq]
[GOAL]
case inl
α : Type u_1
s✝ t : Set α
a : α
s : Set α
hs : Set.Finite s
⊢ encard (insert a s) ≤ encard s + 1
[PROOFSTEP]
apply encard_insert_le
[GOAL]
case inr
α : Type u_1
s✝ t : Set α
a : α
s : Set α
hs : Set.Infinite s
⊢ ncard (insert a s) ≤ ncard s + 1
[PROOFSTEP]
rw [(hs.mono (subset_insert a s)).ncard]
[GOAL]
case inr
α : Type u_1
s✝ t : Set α
a : α
s : Set α
hs : Set.Infinite s
⊢ 0 ≤ ncard s + 1
[PROOFSTEP]
exact Nat.zero_le _
[GOAL]
α : Type u_1
s t : Set α
a : α
inst✝ : Decidable (a ∈ s)
hs : autoParam (Set.Finite s) _auto✝
⊢ ncard (insert a s) = if a ∈ s then ncard s else ncard s + 1
[PROOFSTEP]
by_cases h : a ∈ s
[GOAL]
case pos
α : Type u_1
s t : Set α
a : α
inst✝ : Decidable (a ∈ s)
hs : autoParam (Set.Finite s) _auto✝
h : a ∈ s
⊢ ncard (insert a s) = if a ∈ s then ncard s else ncard s + 1
[PROOFSTEP]
rw [ncard_insert_of_mem h, if_pos h]
[GOAL]
case neg
α : Type u_1
s t : Set α
a : α
inst✝ : Decidable (a ∈ s)
hs : autoParam (Set.Finite s) _auto✝
h : ¬a ∈ s
⊢ ncard (insert a s) = if a ∈ s then ncard s else ncard s + 1
[PROOFSTEP]
rw [ncard_insert_of_not_mem h hs, if_neg h]
[GOAL]
α : Type u_1
s✝ t : Set α
a : α
s : Set α
⊢ ncard s ≤ ncard (insert a s)
[PROOFSTEP]
classical
refine' s.finite_or_infinite.elim (fun h ↦ _) (fun h ↦ by (rw [h.ncard]; exact Nat.zero_le _))
rw [ncard_insert_eq_ite h]; split_ifs <;> simp
[GOAL]
α : Type u_1
s✝ t : Set α
a : α
s : Set α
⊢ ncard s ≤ ncard (insert a s)
[PROOFSTEP]
refine' s.finite_or_infinite.elim (fun h ↦ _) (fun h ↦ by (rw [h.ncard]; exact Nat.zero_le _))
[GOAL]
α : Type u_1
s✝ t : Set α
a : α
s : Set α
h : Set.Infinite s
⊢ ncard s ≤ ncard (insert a s)
[PROOFSTEP]
rw [h.ncard]
[GOAL]
α : Type u_1
s✝ t : Set α
a : α
s : Set α
h : Set.Infinite s
⊢ 0 ≤ ncard (insert a s)
[PROOFSTEP]
exact Nat.zero_le _
[GOAL]
α : Type u_1
s✝ t : Set α
a : α
s : Set α
h : Set.Finite s
⊢ ncard s ≤ ncard (insert a s)
[PROOFSTEP]
rw [ncard_insert_eq_ite h]
[GOAL]
α : Type u_1
s✝ t : Set α
a : α
s : Set α
h : Set.Finite s
⊢ ncard s ≤ if a ∈ s then ncard s else ncard s + 1
[PROOFSTEP]
split_ifs
[GOAL]
case pos
α : Type u_1
s✝ t : Set α
a : α
s : Set α
h : Set.Finite s
h✝ : a ∈ s
⊢ ncard s ≤ ncard s
[PROOFSTEP]
simp
[GOAL]
case neg
α : Type u_1
s✝ t : Set α
a : α
s : Set α
h : Set.Finite s
h✝ : ¬a ∈ s
⊢ ncard s ≤ ncard s + 1
[PROOFSTEP]
simp
[GOAL]
α : Type u_1
s t : Set α
a b : α
h : a ≠ b
⊢ ncard {a, b} = 2
[PROOFSTEP]
rw [ncard_insert_of_not_mem, ncard_singleton]
[GOAL]
α : Type u_1
s t : Set α
a b : α
h : a ≠ b
⊢ ¬a ∈ {b}
[PROOFSTEP]
simpa
[GOAL]
α : Type u_1
s t : Set α
a : α
h : a ∈ s
hs : autoParam (Set.Finite s) _auto✝
⊢ ncard (s \ {a}) + 1 = ncard s
[PROOFSTEP]
to_encard_tac
[GOAL]
α : Type u_1
s t : Set α
a : α
h : a ∈ s
hs : autoParam (Set.Finite s) _auto✝
⊢ ↑(ncard (s \ {a})) + 1 = ↑(ncard s)
[PROOFSTEP]
rw [hs.cast_ncard_eq, (hs.diff _).cast_ncard_eq, encard_diff_singleton_add_one h]
[GOAL]
α : Type u_1
s t : Set α
a : α
h : a ∈ s
hs : autoParam (Set.Finite s) _auto✝
⊢ ncard (s \ {a}) < ncard s
[PROOFSTEP]
rw [← ncard_diff_singleton_add_one h hs]
[GOAL]
α : Type u_1
s t : Set α
a : α
h : a ∈ s
hs : autoParam (Set.Finite s) _auto✝
⊢ ncard (s \ {a}) < ncard (s \ {a}) + 1
[PROOFSTEP]
apply lt_add_one
[GOAL]
α : Type u_1
s✝ t s : Set α
a : α
⊢ ncard (s \ {a}) ≤ ncard s
[PROOFSTEP]
obtain hs | hs := s.finite_or_infinite
[GOAL]
case inl
α : Type u_1
s✝ t s : Set α
a : α
hs : Set.Finite s
⊢ ncard (s \ {a}) ≤ ncard s
[PROOFSTEP]
apply ncard_le_of_subset (diff_subset _ _) hs
[GOAL]
case inr
α : Type u_1
s✝ t s : Set α
a : α
hs : Set.Infinite s
⊢ ncard (s \ {a}) ≤ ncard s
[PROOFSTEP]
convert @zero_le ℕ _ _
[GOAL]
case h.e'_3
α : Type u_1
s✝ t s : Set α
a : α
hs : Set.Infinite s
⊢ ncard (s \ {a}) = 0
[PROOFSTEP]
exact (hs.diff (by simp : Set.Finite { a })).ncard
[GOAL]
α : Type u_1
s✝ t s : Set α
a : α
hs : Set.Infinite s
⊢ Set.Finite {a}
[PROOFSTEP]
simp
[GOAL]
α : Type u_1
s✝ t s : Set α
a : α
⊢ ncard s - 1 ≤ ncard (s \ {a})
[PROOFSTEP]
cases' s.finite_or_infinite with hs hs
[GOAL]
case inl
α : Type u_1
s✝ t s : Set α
a : α
hs : Set.Finite s
⊢ ncard s - 1 ≤ ncard (s \ {a})
[PROOFSTEP]
by_cases h : a ∈ s
[GOAL]
case pos
α : Type u_1
s✝ t s : Set α
a : α
hs : Set.Finite s
h : a ∈ s
⊢ ncard s - 1 ≤ ncard (s \ {a})
[PROOFSTEP]
rw [ncard_diff_singleton_of_mem h hs]
[GOAL]
case neg
α : Type u_1
s✝ t s : Set α
a : α
hs : Set.Finite s
h : ¬a ∈ s
⊢ ncard s - 1 ≤ ncard (s \ {a})
[PROOFSTEP]
rw [diff_singleton_eq_self h]
[GOAL]
case neg
α : Type u_1
s✝ t s : Set α
a : α
hs : Set.Finite s
h : ¬a ∈ s
⊢ ncard s - 1 ≤ ncard s
[PROOFSTEP]
apply Nat.pred_le
[GOAL]
case inr
α : Type u_1
s✝ t s : Set α
a : α
hs : Set.Infinite s
⊢ ncard s - 1 ≤ ncard (s \ {a})
[PROOFSTEP]
convert Nat.zero_le _
[GOAL]
case h.e'_3
α : Type u_1
s✝ t s : Set α
a : α
hs : Set.Infinite s
⊢ ncard s - 1 = 0
[PROOFSTEP]
rw [hs.ncard]
[GOAL]
α : Type u_1
s t : Set α
a b : α
ha : ¬a ∈ s
hb : b ∈ s
⊢ ncard (insert a s \ {b}) = ncard s
[PROOFSTEP]
rw [← ncard_exchange ha hb, ← singleton_union, ← singleton_union, union_diff_distrib,
@diff_singleton_eq_self _ b { a } fun h ↦ ha (by rwa [← mem_singleton_iff.mp h])]
[GOAL]
α : Type u_1
s t : Set α
a b : α
ha : ¬a ∈ s
hb : b ∈ s
h : b ∈ {a}
⊢ a ∈ s
[PROOFSTEP]
rwa [← mem_singleton_iff.mp h]
[GOAL]
α : Type u_2
s t : Set α
α✝ : Type u_1
f : α → α✝
hs : autoParam (Set.Finite s) _auto✝
⊢ ncard (f '' s) ≤ ncard s
[PROOFSTEP]
to_encard_tac
[GOAL]
α : Type u_2
s t : Set α
α✝ : Type u_1
f : α → α✝
hs : autoParam (Set.Finite s) _auto✝
⊢ ↑(ncard (f '' s)) ≤ ↑(ncard s)
[PROOFSTEP]
rw [hs.cast_ncard_eq, (hs.image _).cast_ncard_eq]
[GOAL]
α : Type u_2
s t : Set α
α✝ : Type u_1
f : α → α✝
hs : autoParam (Set.Finite s) _auto✝
⊢ encard (f '' s) ≤ encard s
[PROOFSTEP]
apply encard_image_le
[GOAL]
α : Type u_2
s t : Set α
α✝ : Type u_1
f : α → α✝
h : ncard (f '' s) = ncard s
hs : autoParam (Set.Finite s) _auto✝
⊢ InjOn f s
[PROOFSTEP]
rw [← Nat.cast_inj (R := ℕ∞), hs.cast_ncard_eq, (hs.image _).cast_ncard_eq] at h
[GOAL]
α : Type u_2
s t : Set α
α✝ : Type u_1
f : α → α✝
h : encard (f '' s) = encard s
hs : autoParam (Set.Finite s) _auto✝
⊢ InjOn f s
[PROOFSTEP]
exact hs.injOn_of_encard_image_eq h
[GOAL]
α : Type ?u.212510
s✝ t : Set α
β : Type u_1
α✝ : Type u_2
f : α✝ → β
s : Set β
H : Function.Injective f
hs : s ⊆ range f
⊢ ncard (f ⁻¹' s) = ncard s
[PROOFSTEP]
rw [← ncard_image_of_injective _ H, image_preimage_eq_iff.mpr hs]
[GOAL]
α : Type u_2
s t : Set α
β : Type u_1
f : α → β
y : β
hs : autoParam (Set.Finite s) _auto✝
⊢ ncard {x | x ∈ s ∧ f x = y} ≠ 0 ↔ y ∈ f '' s
[PROOFSTEP]
refine' ⟨nonempty_of_ncard_ne_zero, _⟩
[GOAL]
α : Type u_2
s t : Set α
β : Type u_1
f : α → β
y : β
hs : autoParam (Set.Finite s) _auto✝
⊢ y ∈ f '' s → ncard {x | x ∈ s ∧ f x = y} ≠ 0
[PROOFSTEP]
rintro ⟨z, hz, rfl⟩
[GOAL]
case intro.intro
α : Type u_2
s t : Set α
β : Type u_1
f : α → β
hs : autoParam (Set.Finite s) _auto✝
z : α
hz : z ∈ s
⊢ ncard {x | x ∈ s ∧ f x = f z} ≠ 0
[PROOFSTEP]
exact @ncard_ne_zero_of_mem _ ({x ∈ s | f x = f z}) z (mem_sep hz rfl) (hs.subset (sep_subset _ _))
[GOAL]
α : Type u_1
s✝ t : Set α
P : α → Prop
s : Set α
⊢ ncard {x | ↑x ∈ s} = ncard (s ∩ setOf P)
[PROOFSTEP]
convert (ncard_image_of_injective _ (@Subtype.coe_injective _ P)).symm
[GOAL]
case h.e'_3.h.e'_2
α : Type u_1
s✝ t : Set α
P : α → Prop
s : Set α
⊢ s ∩ setOf P = (fun a => ↑a) '' {x | ↑x ∈ s}
[PROOFSTEP]
ext x
[GOAL]
case h.e'_3.h.e'_2.h
α : Type u_1
s✝ t : Set α
P : α → Prop
s : Set α
x : α
⊢ x ∈ s ∩ setOf P ↔ x ∈ (fun a => ↑a) '' {x | ↑x ∈ s}
[PROOFSTEP]
simp [← and_assoc, exists_eq_right]
[GOAL]
α : Type u_1
s t : Set α
h : s ⊆ t
h' : ncard t ≤ ncard s
ht : autoParam (Set.Finite t) _auto✝
⊢ encard t ≤ encard s
[PROOFSTEP]
rwa [← Nat.cast_le (α := ℕ∞), ht.cast_ncard_eq, (ht.subset h).cast_ncard_eq] at h'
[GOAL]
α : Type u_1
s t : Set α
a : α
P : α → Prop
h : ncard {x | x ∈ s ∧ P x} = ncard s
ha : a ∈ s
hs : autoParam (Set.Finite s) _auto✝
⊢ {x | x ∈ s ∧ P x} ⊆ s
[PROOFSTEP]
simp
[GOAL]
α : Type u_1
s t : Set α
h : s ⊂ t
ht : autoParam (Set.Finite t) _auto✝
⊢ ncard s < ncard t
[PROOFSTEP]
rw [← Nat.cast_lt (α := ℕ∞), ht.cast_ncard_eq, (ht.subset h.subset).cast_ncard_eq]
[GOAL]
α : Type u_1
s t : Set α
h : s ⊂ t
ht : autoParam (Set.Finite t) _auto✝
⊢ encard s < encard t
[PROOFSTEP]
exact ht.encard_lt_encard h
[GOAL]
α : Type u_1
s t : Set α
n : ℕ
f : (i : ℕ) → i < n → α
hf : ∀ (a : α), a ∈ s → ∃ i h, f i h = a
hf' : ∀ (i : ℕ) (h : i < n), f i h ∈ s
f_inj : ∀ (i j : ℕ) (hi : i < n) (hj : j < n), f i hi = f j hj → i = j
hs : autoParam (Set.Finite s) _auto✝
⊢ ncard s = n
[PROOFSTEP]
rw [ncard_eq_toFinset_card _ hs]
[GOAL]
α : Type u_1
s t : Set α
n : ℕ
f : (i : ℕ) → i < n → α
hf : ∀ (a : α), a ∈ s → ∃ i h, f i h = a
hf' : ∀ (i : ℕ) (h : i < n), f i h ∈ s
f_inj : ∀ (i j : ℕ) (hi : i < n) (hj : j < n), f i hi = f j hj → i = j
hs : autoParam (Set.Finite s) _auto✝
⊢ Finset.card (Finite.toFinset hs) = n
[PROOFSTEP]
apply Finset.card_eq_of_bijective
[GOAL]
case hf
α : Type u_1
s t : Set α
n : ℕ
f : (i : ℕ) → i < n → α
hf : ∀ (a : α), a ∈ s → ∃ i h, f i h = a
hf' : ∀ (i : ℕ) (h : i < n), f i h ∈ s
f_inj : ∀ (i j : ℕ) (hi : i < n) (hj : j < n), f i hi = f j hj → i = j
hs : autoParam (Set.Finite s) _auto✝
⊢ ∀ (a : α), a ∈ Finite.toFinset hs → ∃ i h, ?f i h = a
case hf'
α : Type u_1
s t : Set α
n : ℕ
f : (i : ℕ) → i < n → α
hf : ∀ (a : α), a ∈ s → ∃ i h, f i h = a
hf' : ∀ (i : ℕ) (h : i < n), f i h ∈ s
f_inj : ∀ (i j : ℕ) (hi : i < n) (hj : j < n), f i hi = f j hj → i = j
hs : autoParam (Set.Finite s) _auto✝
⊢ ∀ (i : ℕ) (h : i < n), ?f i h ∈ Finite.toFinset hs
case f_inj
α : Type u_1
s t : Set α
n : ℕ
f : (i : ℕ) → i < n → α
hf : ∀ (a : α), a ∈ s → ∃ i h, f i h = a
hf' : ∀ (i : ℕ) (h : i < n), f i h ∈ s
f_inj : ∀ (i j : ℕ) (hi : i < n) (hj : j < n), f i hi = f j hj → i = j
hs : autoParam (Set.Finite s) _auto✝
⊢ ∀ (i j : ℕ) (hi : i < n) (hj : j < n), ?f i hi = ?f j hj → i = j
case f
α : Type u_1
s t : Set α
n : ℕ
f : (i : ℕ) → i < n → α
hf : ∀ (a : α), a ∈ s → ∃ i h, f i h = a
hf' : ∀ (i : ℕ) (h : i < n), f i h ∈ s
f_inj : ∀ (i j : ℕ) (hi : i < n) (hj : j < n), f i hi = f j hj → i = j
hs : autoParam (Set.Finite s) _auto✝
⊢ (i : ℕ) → i < n → α
[PROOFSTEP]
all_goals simpa
[GOAL]
case hf
α : Type u_1
s t : Set α
n : ℕ
f : (i : ℕ) → i < n → α
hf : ∀ (a : α), a ∈ s → ∃ i h, f i h = a
hf' : ∀ (i : ℕ) (h : i < n), f i h ∈ s
f_inj : ∀ (i j : ℕ) (hi : i < n) (hj : j < n), f i hi = f j hj → i = j
hs : autoParam (Set.Finite s) _auto✝
⊢ ∀ (a : α), a ∈ Finite.toFinset hs → ∃ i h, ?f i h = a
[PROOFSTEP]
simpa
[GOAL]
case hf'
α : Type u_1
s t : Set α
n : ℕ
f : (i : ℕ) → i < n → α
hf : ∀ (a : α), a ∈ s → ∃ i h, f i h = a
hf' : ∀ (i : ℕ) (h : i < n), f i h ∈ s
f_inj : ∀ (i j : ℕ) (hi : i < n) (hj : j < n), f i hi = f j hj → i = j
hs : autoParam (Set.Finite s) _auto✝
⊢ ∀ (i : ℕ) (h : i < n), f i h ∈ Finite.toFinset hs
[PROOFSTEP]
simpa
[GOAL]
case f_inj
α : Type u_1
s t : Set α
n : ℕ
f : (i : ℕ) → i < n → α
hf : ∀ (a : α), a ∈ s → ∃ i h, f i h = a
hf' : ∀ (i : ℕ) (h : i < n), f i h ∈ s
f_inj : ∀ (i j : ℕ) (hi : i < n) (hj : j < n), f i hi = f j hj → i = j
hs : autoParam (Set.Finite s) _auto✝
⊢ ∀ (i j : ℕ) (hi : i < n) (hj : j < n), f i hi = f j hj → i = j
[PROOFSTEP]
simpa
[GOAL]
α : Type u_2
s t✝ : Set α
β : Type u_1
t : Set β
f : (a : α) → a ∈ s → β
h₁ : ∀ (a : α) (ha : a ∈ s), f a ha ∈ t
h₂ : ∀ (a b : α) (ha : a ∈ s) (hb : b ∈ s), f a ha = f b hb → a = b
h₃ : ∀ (b : β), b ∈ t → ∃ a ha, f a ha = b
⊢ ncard s = ncard t
[PROOFSTEP]
set f' : s → t := fun x ↦ ⟨f x.1 x.2, h₁ _ _⟩
[GOAL]
α : Type u_2
s t✝ : Set α
β : Type u_1
t : Set β
f : (a : α) → a ∈ s → β
h₁ : ∀ (a : α) (ha : a ∈ s), f a ha ∈ t
h₂ : ∀ (a b : α) (ha : a ∈ s) (hb : b ∈ s), f a ha = f b hb → a = b
h₃ : ∀ (b : β), b ∈ t → ∃ a ha, f a ha = b
f' : ↑s → ↑t := fun x => { val := f ↑x (_ : ↑x ∈ s), property := (_ : f ↑x (_ : ↑x ∈ s) ∈ t) }
⊢ ncard s = ncard t
[PROOFSTEP]
have hbij : f'.Bijective := by
constructor
· rintro ⟨x, hx⟩ ⟨y, hy⟩ hxy
simp only [Subtype.mk.injEq] at hxy ⊢
exact h₂ _ _ hx hy hxy
rintro ⟨y, hy⟩
obtain ⟨a, ha, rfl⟩ := h₃ y hy
simp only [Subtype.mk.injEq, Subtype.exists]
exact ⟨_, ha, rfl⟩
[GOAL]
α : Type u_2
s t✝ : Set α
β : Type u_1
t : Set β
f : (a : α) → a ∈ s → β
h₁ : ∀ (a : α) (ha : a ∈ s), f a ha ∈ t
h₂ : ∀ (a b : α) (ha : a ∈ s) (hb : b ∈ s), f a ha = f b hb → a = b
h₃ : ∀ (b : β), b ∈ t → ∃ a ha, f a ha = b
f' : ↑s → ↑t := fun x => { val := f ↑x (_ : ↑x ∈ s), property := (_ : f ↑x (_ : ↑x ∈ s) ∈ t) }
⊢ Function.Bijective f'
[PROOFSTEP]
constructor
[GOAL]
case left
α : Type u_2
s t✝ : Set α
β : Type u_1
t : Set β
f : (a : α) → a ∈ s → β
h₁ : ∀ (a : α) (ha : a ∈ s), f a ha ∈ t
h₂ : ∀ (a b : α) (ha : a ∈ s) (hb : b ∈ s), f a ha = f b hb → a = b
h₃ : ∀ (b : β), b ∈ t → ∃ a ha, f a ha = b
f' : ↑s → ↑t := fun x => { val := f ↑x (_ : ↑x ∈ s), property := (_ : f ↑x (_ : ↑x ∈ s) ∈ t) }
⊢ Function.Injective f'
[PROOFSTEP]
rintro ⟨x, hx⟩ ⟨y, hy⟩ hxy
[GOAL]
case left.mk.mk
α : Type u_2
s t✝ : Set α
β : Type u_1
t : Set β
f : (a : α) → a ∈ s → β
h₁ : ∀ (a : α) (ha : a ∈ s), f a ha ∈ t
h₂ : ∀ (a b : α) (ha : a ∈ s) (hb : b ∈ s), f a ha = f b hb → a = b
h₃ : ∀ (b : β), b ∈ t → ∃ a ha, f a ha = b
f' : ↑s → ↑t := fun x => { val := f ↑x (_ : ↑x ∈ s), property := (_ : f ↑x (_ : ↑x ∈ s) ∈ t) }
x : α
hx : x ∈ s
y : α
hy : y ∈ s
hxy : f' { val := x, property := hx } = f' { val := y, property := hy }
⊢ { val := x, property := hx } = { val := y, property := hy }
[PROOFSTEP]
simp only [Subtype.mk.injEq] at hxy ⊢
[GOAL]
case left.mk.mk
α : Type u_2
s t✝ : Set α
β : Type u_1
t : Set β
f : (a : α) → a ∈ s → β
h₁ : ∀ (a : α) (ha : a ∈ s), f a ha ∈ t
h₂ : ∀ (a b : α) (ha : a ∈ s) (hb : b ∈ s), f a ha = f b hb → a = b
h₃ : ∀ (b : β), b ∈ t → ∃ a ha, f a ha = b
f' : ↑s → ↑t := fun x => { val := f ↑x (_ : ↑x ∈ s), property := (_ : f ↑x (_ : ↑x ∈ s) ∈ t) }
x : α
hx : x ∈ s
y : α
hy : y ∈ s
hxy : f x (_ : ↑{ val := x, property := hx } ∈ s) = f y (_ : ↑{ val := y, property := hy } ∈ s)
⊢ x = y
[PROOFSTEP]
exact h₂ _ _ hx hy hxy
[GOAL]
case right
α : Type u_2
s t✝ : Set α
β : Type u_1
t : Set β
f : (a : α) → a ∈ s → β
h₁ : ∀ (a : α) (ha : a ∈ s), f a ha ∈ t
h₂ : ∀ (a b : α) (ha : a ∈ s) (hb : b ∈ s), f a ha = f b hb → a = b
h₃ : ∀ (b : β), b ∈ t → ∃ a ha, f a ha = b
f' : ↑s → ↑t := fun x => { val := f ↑x (_ : ↑x ∈ s), property := (_ : f ↑x (_ : ↑x ∈ s) ∈ t) }
⊢ Function.Surjective f'
[PROOFSTEP]
rintro ⟨y, hy⟩
[GOAL]
case right.mk
α : Type u_2
s t✝ : Set α
β : Type u_1
t : Set β
f : (a : α) → a ∈ s → β
h₁ : ∀ (a : α) (ha : a ∈ s), f a ha ∈ t
h₂ : ∀ (a b : α) (ha : a ∈ s) (hb : b ∈ s), f a ha = f b hb → a = b
h₃ : ∀ (b : β), b ∈ t → ∃ a ha, f a ha = b
f' : ↑s → ↑t := fun x => { val := f ↑x (_ : ↑x ∈ s), property := (_ : f ↑x (_ : ↑x ∈ s) ∈ t) }
y : β
hy : y ∈ t
⊢ ∃ a, f' a = { val := y, property := hy }
[PROOFSTEP]
obtain ⟨a, ha, rfl⟩ := h₃ y hy
[GOAL]
case right.mk.intro.intro
α : Type u_2
s t✝ : Set α
β : Type u_1
t : Set β
f : (a : α) → a ∈ s → β
h₁ : ∀ (a : α) (ha : a ∈ s), f a ha ∈ t
h₂ : ∀ (a b : α) (ha : a ∈ s) (hb : b ∈ s), f a ha = f b hb → a = b
h₃ : ∀ (b : β), b ∈ t → ∃ a ha, f a ha = b
f' : ↑s → ↑t := fun x => { val := f ↑x (_ : ↑x ∈ s), property := (_ : f ↑x (_ : ↑x ∈ s) ∈ t) }
a : α
ha : a ∈ s
hy : f a ha ∈ t
⊢ ∃ a_1, f' a_1 = { val := f a ha, property := hy }
[PROOFSTEP]
simp only [Subtype.mk.injEq, Subtype.exists]
[GOAL]
case right.mk.intro.intro
α : Type u_2
s t✝ : Set α
β : Type u_1
t : Set β
f : (a : α) → a ∈ s → β
h₁ : ∀ (a : α) (ha : a ∈ s), f a ha ∈ t
h₂ : ∀ (a b : α) (ha : a ∈ s) (hb : b ∈ s), f a ha = f b hb → a = b
h₃ : ∀ (b : β), b ∈ t → ∃ a ha, f a ha = b
f' : ↑s → ↑t := fun x => { val := f ↑x (_ : ↑x ∈ s), property := (_ : f ↑x (_ : ↑x ∈ s) ∈ t) }
a : α
ha : a ∈ s
hy : f a ha ∈ t
⊢ ∃ a_1 h, f a_1 (_ : ↑{ val := a_1, property := (_ : a_1 ∈ s) } ∈ s) = f a ha
[PROOFSTEP]
exact ⟨_, ha, rfl⟩
[GOAL]
α : Type u_2
s t✝ : Set α
β : Type u_1
t : Set β
f : (a : α) → a ∈ s → β
h₁ : ∀ (a : α) (ha : a ∈ s), f a ha ∈ t
h₂ : ∀ (a b : α) (ha : a ∈ s) (hb : b ∈ s), f a ha = f b hb → a = b
h₃ : ∀ (b : β), b ∈ t → ∃ a ha, f a ha = b
f' : ↑s → ↑t := fun x => { val := f ↑x (_ : ↑x ∈ s), property := (_ : f ↑x (_ : ↑x ∈ s) ∈ t) }
hbij : Function.Bijective f'
⊢ ncard s = ncard t
[PROOFSTEP]
simp_rw [← Nat.card_coe_set_eq]
[GOAL]
α : Type u_2
s t✝ : Set α
β : Type u_1
t : Set β
f : (a : α) → a ∈ s → β
h₁ : ∀ (a : α) (ha : a ∈ s), f a ha ∈ t
h₂ : ∀ (a b : α) (ha : a ∈ s) (hb : b ∈ s), f a ha = f b hb → a = b
h₃ : ∀ (b : β), b ∈ t → ∃ a ha, f a ha = b
f' : ↑s → ↑t := fun x => { val := f ↑x (_ : ↑x ∈ s), property := (_ : f ↑x (_ : ↑x ∈ s) ∈ t) }
hbij : Function.Bijective f'
⊢ Nat.card ↑s = Nat.card ↑t
[PROOFSTEP]
exact Nat.card_congr (Equiv.ofBijective f' hbij)
[GOAL]
α : Type u_2
s t✝ : Set α
β : Type u_1
t : Set β
f : α → β
hf : ∀ (a : α), a ∈ s → f a ∈ t
f_inj : InjOn f s
ht : autoParam (Set.Finite t) _auto✝
⊢ ncard s ≤ ncard t
[PROOFSTEP]
have hle := encard_le_encard_of_injOn hf f_inj
[GOAL]
α : Type u_2
s t✝ : Set α
β : Type u_1
t : Set β
f : α → β
hf : ∀ (a : α), a ∈ s → f a ∈ t
f_inj : InjOn f s
ht : autoParam (Set.Finite t) _auto✝
hle : encard s ≤ encard t
⊢ ncard s ≤ ncard t
[PROOFSTEP]
to_encard_tac
[GOAL]
α : Type u_2
s t✝ : Set α
β : Type u_1
t : Set β
f : α → β
hf : ∀ (a : α), a ∈ s → f a ∈ t
f_inj : InjOn f s
ht : autoParam (Set.Finite t) _auto✝
hle : encard s ≤ encard t
⊢ ↑(ncard s) ≤ ↑(ncard t)
[PROOFSTEP]
rwa [ht.cast_ncard_eq, (ht.finite_of_encard_le hle).cast_ncard_eq]
[GOAL]
α : Type u_2
s t✝ : Set α
β : Type u_1
t : Set β
hc : ncard t < ncard s
f : α → β
hf : ∀ (a : α), a ∈ s → f a ∈ t
ht : autoParam (Set.Finite t) _auto✝
⊢ ∃ x, x ∈ s ∧ ∃ y, y ∈ s ∧ x ≠ y ∧ f x = f y
[PROOFSTEP]
by_contra h'
[GOAL]
α : Type u_2
s t✝ : Set α
β : Type u_1
t : Set β
hc : ncard t < ncard s
f : α → β
hf : ∀ (a : α), a ∈ s → f a ∈ t
ht : autoParam (Set.Finite t) _auto✝
h' : ¬∃ x, x ∈ s ∧ ∃ y, y ∈ s ∧ x ≠ y ∧ f x = f y
⊢ False
[PROOFSTEP]
simp only [Ne.def, exists_prop, not_exists, not_and, not_imp_not] at h'
[GOAL]
α : Type u_2
s t✝ : Set α
β : Type u_1
t : Set β
hc : ncard t < ncard s
f : α → β
hf : ∀ (a : α), a ∈ s → f a ∈ t
ht : autoParam (Set.Finite t) _auto✝
h' : ∀ (x : α), x ∈ s → ∀ (x_1 : α), x_1 ∈ s → f x = f x_1 → x = x_1
⊢ False
[PROOFSTEP]
exact (ncard_le_ncard_of_injOn f hf h' ht).not_lt hc
[GOAL]
α : Type u_1
s t : Set α
n : ℕ
f : ℕ → α
hf : ∀ (i : ℕ), i < n → f i ∈ s
f_inj : ∀ (i : ℕ), i < n → ∀ (j : ℕ), j < n → f i = f j → i = j
hs : autoParam (Set.Finite s) _auto✝
⊢ n ≤ ncard s
[PROOFSTEP]
rw [ncard_eq_toFinset_card _ hs]
[GOAL]
α : Type u_1
s t : Set α
n : ℕ
f : ℕ → α
hf : ∀ (i : ℕ), i < n → f i ∈ s
f_inj : ∀ (i : ℕ), i < n → ∀ (j : ℕ), j < n → f i = f j → i = j
hs : autoParam (Set.Finite s) _auto✝
⊢ n ≤ Finset.card (Finite.toFinset hs)
[PROOFSTEP]
apply Finset.le_card_of_inj_on_range
[GOAL]
case hf
α : Type u_1
s t : Set α
n : ℕ
f : ℕ → α
hf : ∀ (i : ℕ), i < n → f i ∈ s
f_inj : ∀ (i : ℕ), i < n → ∀ (j : ℕ), j < n → f i = f j → i = j
hs : autoParam (Set.Finite s) _auto✝
⊢ ∀ (i : ℕ), i < n → ?f i ∈ Finite.toFinset hs
[PROOFSTEP]
simpa
[GOAL]
case f_inj
α : Type u_1
s t : Set α
n : ℕ
f : ℕ → α
hf : ∀ (i : ℕ), i < n → f i ∈ s
f_inj : ∀ (i : ℕ), i < n → ∀ (j : ℕ), j < n → f i = f j → i = j
hs : autoParam (Set.Finite s) _auto✝
⊢ ∀ (i : ℕ), i < n → ∀ (j : ℕ), j < n → f i = f j → i = j
[PROOFSTEP]
simpa
[GOAL]
α : Type u_2
s t✝ : Set α
β : Type u_1
t : Set β
f : (a : α) → a ∈ s → β
hf : ∀ (a : α) (ha : a ∈ s), f a ha ∈ t
hinj : ∀ (a₁ a₂ : α) (ha₁ : a₁ ∈ s) (ha₂ : a₂ ∈ s), f a₁ ha₁ = f a₂ ha₂ → a₁ = a₂
hst : ncard t ≤ ncard s
ht : autoParam (Set.Finite t) _auto✝
⊢ ∀ (b : β), b ∈ t → ∃ a ha, b = f a ha
[PROOFSTEP]
intro b hb
[GOAL]
α : Type u_2
s t✝ : Set α
β : Type u_1
t : Set β
f : (a : α) → a ∈ s → β
hf : ∀ (a : α) (ha : a ∈ s), f a ha ∈ t
hinj : ∀ (a₁ a₂ : α) (ha₁ : a₁ ∈ s) (ha₂ : a₂ ∈ s), f a₁ ha₁ = f a₂ ha₂ → a₁ = a₂
hst : ncard t ≤ ncard s
ht : autoParam (Set.Finite t) _auto✝
b : β
hb : b ∈ t
⊢ ∃ a ha, b = f a ha
[PROOFSTEP]
set f' : s → t := fun x ↦ ⟨f x.1 x.2, hf _ _⟩
[GOAL]
α : Type u_2
s t✝ : Set α
β : Type u_1
t : Set β
f : (a : α) → a ∈ s → β
hf : ∀ (a : α) (ha : a ∈ s), f a ha ∈ t
hinj : ∀ (a₁ a₂ : α) (ha₁ : a₁ ∈ s) (ha₂ : a₂ ∈ s), f a₁ ha₁ = f a₂ ha₂ → a₁ = a₂
hst : ncard t ≤ ncard s
ht : autoParam (Set.Finite t) _auto✝
b : β
hb : b ∈ t
f' : ↑s → ↑t := fun x => { val := f ↑x (_ : ↑x ∈ s), property := (_ : f ↑x (_ : ↑x ∈ s) ∈ t) }
⊢ ∃ a ha, b = f a ha
[PROOFSTEP]
have finj : f'.Injective := by
rintro ⟨x, hx⟩ ⟨y, hy⟩ hxy
simp only [Subtype.mk.injEq] at hxy ⊢
apply hinj _ _ hx hy hxy
[GOAL]
α : Type u_2
s t✝ : Set α
β : Type u_1
t : Set β
f : (a : α) → a ∈ s → β
hf : ∀ (a : α) (ha : a ∈ s), f a ha ∈ t
hinj : ∀ (a₁ a₂ : α) (ha₁ : a₁ ∈ s) (ha₂ : a₂ ∈ s), f a₁ ha₁ = f a₂ ha₂ → a₁ = a₂
hst : ncard t ≤ ncard s
ht : autoParam (Set.Finite t) _auto✝
b : β
hb : b ∈ t
f' : ↑s → ↑t := fun x => { val := f ↑x (_ : ↑x ∈ s), property := (_ : f ↑x (_ : ↑x ∈ s) ∈ t) }
⊢ Function.Injective f'
[PROOFSTEP]
rintro ⟨x, hx⟩ ⟨y, hy⟩ hxy
[GOAL]
case mk.mk
α : Type u_2
s t✝ : Set α
β : Type u_1
t : Set β
f : (a : α) → a ∈ s → β
hf : ∀ (a : α) (ha : a ∈ s), f a ha ∈ t
hinj : ∀ (a₁ a₂ : α) (ha₁ : a₁ ∈ s) (ha₂ : a₂ ∈ s), f a₁ ha₁ = f a₂ ha₂ → a₁ = a₂
hst : ncard t ≤ ncard s
ht : autoParam (Set.Finite t) _auto✝
b : β
hb : b ∈ t
f' : ↑s → ↑t := fun x => { val := f ↑x (_ : ↑x ∈ s), property := (_ : f ↑x (_ : ↑x ∈ s) ∈ t) }
x : α
hx : x ∈ s
y : α
hy : y ∈ s
hxy : f' { val := x, property := hx } = f' { val := y, property := hy }
⊢ { val := x, property := hx } = { val := y, property := hy }
[PROOFSTEP]
simp only [Subtype.mk.injEq] at hxy ⊢
[GOAL]
case mk.mk
α : Type u_2
s t✝ : Set α
β : Type u_1
t : Set β
f : (a : α) → a ∈ s → β
hf : ∀ (a : α) (ha : a ∈ s), f a ha ∈ t
hinj : ∀ (a₁ a₂ : α) (ha₁ : a₁ ∈ s) (ha₂ : a₂ ∈ s), f a₁ ha₁ = f a₂ ha₂ → a₁ = a₂
hst : ncard t ≤ ncard s
ht : autoParam (Set.Finite t) _auto✝
b : β
hb : b ∈ t
f' : ↑s → ↑t := fun x => { val := f ↑x (_ : ↑x ∈ s), property := (_ : f ↑x (_ : ↑x ∈ s) ∈ t) }
x : α
hx : x ∈ s
y : α
hy : y ∈ s
hxy : f x (_ : ↑{ val := x, property := hx } ∈ s) = f y (_ : ↑{ val := y, property := hy } ∈ s)
⊢ x = y
[PROOFSTEP]
apply hinj _ _ hx hy hxy
[GOAL]
α : Type u_2
s t✝ : Set α
β : Type u_1
t : Set β
f : (a : α) → a ∈ s → β
hf : ∀ (a : α) (ha : a ∈ s), f a ha ∈ t
hinj : ∀ (a₁ a₂ : α) (ha₁ : a₁ ∈ s) (ha₂ : a₂ ∈ s), f a₁ ha₁ = f a₂ ha₂ → a₁ = a₂
hst : ncard t ≤ ncard s
ht : autoParam (Set.Finite t) _auto✝
b : β
hb : b ∈ t
f' : ↑s → ↑t := fun x => { val := f ↑x (_ : ↑x ∈ s), property := (_ : f ↑x (_ : ↑x ∈ s) ∈ t) }
finj : Function.Injective f'
⊢ ∃ a ha, b = f a ha
[PROOFSTEP]
have hft := ht.fintype
[GOAL]
α : Type u_2
s t✝ : Set α
β : Type u_1
t : Set β
f : (a : α) → a ∈ s → β
hf : ∀ (a : α) (ha : a ∈ s), f a ha ∈ t
hinj : ∀ (a₁ a₂ : α) (ha₁ : a₁ ∈ s) (ha₂ : a₂ ∈ s), f a₁ ha₁ = f a₂ ha₂ → a₁ = a₂
hst : ncard t ≤ ncard s
ht : autoParam (Set.Finite t) _auto✝
b : β
hb : b ∈ t
f' : ↑s → ↑t := fun x => { val := f ↑x (_ : ↑x ∈ s), property := (_ : f ↑x (_ : ↑x ∈ s) ∈ t) }
finj : Function.Injective f'
hft : Fintype ↑t
⊢ ∃ a ha, b = f a ha
[PROOFSTEP]
have hft' := Fintype.ofInjective f' finj
[GOAL]
α : Type u_2
s t✝ : Set α
β : Type u_1
t : Set β
f : (a : α) → a ∈ s → β
hf : ∀ (a : α) (ha : a ∈ s), f a ha ∈ t
hinj : ∀ (a₁ a₂ : α) (ha₁ : a₁ ∈ s) (ha₂ : a₂ ∈ s), f a₁ ha₁ = f a₂ ha₂ → a₁ = a₂
hst : ncard t ≤ ncard s
ht : autoParam (Set.Finite t) _auto✝
b : β
hb : b ∈ t
f' : ↑s → ↑t := fun x => { val := f ↑x (_ : ↑x ∈ s), property := (_ : f ↑x (_ : ↑x ∈ s) ∈ t) }
finj : Function.Injective f'
hft : Fintype ↑t
hft' : Fintype ↑s
⊢ ∃ a ha, b = f a ha
[PROOFSTEP]
set f'' : ∀ a, a ∈ s.toFinset → β := fun a h ↦ f a (by simpa using h)
[GOAL]
α : Type u_2
s t✝ : Set α
β : Type u_1
t : Set β
f : (a : α) → a ∈ s → β
hf : ∀ (a : α) (ha : a ∈ s), f a ha ∈ t
hinj : ∀ (a₁ a₂ : α) (ha₁ : a₁ ∈ s) (ha₂ : a₂ ∈ s), f a₁ ha₁ = f a₂ ha₂ → a₁ = a₂
hst : ncard t ≤ ncard s
ht : autoParam (Set.Finite t) _auto✝
b : β
hb : b ∈ t
f' : ↑s → ↑t := fun x => { val := f ↑x (_ : ↑x ∈ s), property := (_ : f ↑x (_ : ↑x ∈ s) ∈ t) }
finj : Function.Injective f'
hft : Fintype ↑t
hft' : Fintype ↑s
a : α
h : a ∈ toFinset s
⊢ a ∈ s
[PROOFSTEP]
simpa using h
[GOAL]
α : Type u_2
s t✝ : Set α
β : Type u_1
t : Set β
f : (a : α) → a ∈ s → β
hf : ∀ (a : α) (ha : a ∈ s), f a ha ∈ t
hinj : ∀ (a₁ a₂ : α) (ha₁ : a₁ ∈ s) (ha₂ : a₂ ∈ s), f a₁ ha₁ = f a₂ ha₂ → a₁ = a₂
hst : ncard t ≤ ncard s
ht : autoParam (Set.Finite t) _auto✝
b : β
hb : b ∈ t
f' : ↑s → ↑t := fun x => { val := f ↑x (_ : ↑x ∈ s), property := (_ : f ↑x (_ : ↑x ∈ s) ∈ t) }
finj : Function.Injective f'
hft : Fintype ↑t
hft' : Fintype ↑s
f'' : (a : α) → a ∈ toFinset s → β := fun a h => f a (_ : a ∈ s)
⊢ ∃ a ha, b = f a ha
[PROOFSTEP]
convert @Finset.surj_on_of_inj_on_of_card_le _ _ _ t.toFinset f'' _ _ _ _ (by simpa)
[GOAL]
α : Type u_2
s t✝ : Set α
β : Type u_1
t : Set β
f : (a : α) → a ∈ s → β
hf : ∀ (a : α) (ha : a ∈ s), f a ha ∈ t
hinj : ∀ (a₁ a₂ : α) (ha₁ : a₁ ∈ s) (ha₂ : a₂ ∈ s), f a₁ ha₁ = f a₂ ha₂ → a₁ = a₂
hst : ncard t ≤ ncard s
ht : autoParam (Set.Finite t) _auto✝
b : β
hb : b ∈ t
f' : ↑s → ↑t := fun x => { val := f ↑x (_ : ↑x ∈ s), property := (_ : f ↑x (_ : ↑x ∈ s) ∈ t) }
finj : Function.Injective f'
hft : Fintype ↑t
hft' : Fintype ↑s
f'' : (a : α) → a ∈ toFinset s → β := fun a h => f a (_ : a ∈ s)
⊢ ?m.261721 ∈ toFinset t
[PROOFSTEP]
simpa
[GOAL]
case h.e'_2.h.h.e'_1.a
α : Type u_2
s t✝ : Set α
β : Type u_1
t : Set β
f : (a : α) → a ∈ s → β
hf : ∀ (a : α) (ha : a ∈ s), f a ha ∈ t
hinj : ∀ (a₁ a₂ : α) (ha₁ : a₁ ∈ s) (ha₂ : a₂ ∈ s), f a₁ ha₁ = f a₂ ha₂ → a₁ = a₂
hst : ncard t ≤ ncard s
ht : autoParam (Set.Finite t) _auto✝
b : β
hb : b ∈ t
f' : ↑s → ↑t := fun x => { val := f ↑x (_ : ↑x ∈ s), property := (_ : f ↑x (_ : ↑x ∈ s) ∈ t) }
finj : Function.Injective f'
hft : Fintype ↑t
hft' : Fintype ↑s
f'' : (a : α) → a ∈ toFinset s → β := fun a h => f a (_ : a ∈ s)
x✝ : α
⊢ x✝ ∈ s ↔ x✝ ∈ toFinset s
[PROOFSTEP]
simp
[GOAL]
case convert_1
α : Type u_2
s t✝ : Set α
β : Type u_1
t : Set β
f : (a : α) → a ∈ s → β
hf : ∀ (a : α) (ha : a ∈ s), f a ha ∈ t
hinj : ∀ (a₁ a₂ : α) (ha₁ : a₁ ∈ s) (ha₂ : a₂ ∈ s), f a₁ ha₁ = f a₂ ha₂ → a₁ = a₂
hst : ncard t ≤ ncard s
ht : autoParam (Set.Finite t) _auto✝
b : β
hb : b ∈ t
f' : ↑s → ↑t := fun x => { val := f ↑x (_ : ↑x ∈ s), property := (_ : f ↑x (_ : ↑x ∈ s) ∈ t) }
finj : Function.Injective f'
hft : Fintype ↑t
hft' : Fintype ↑s
f'' : (a : α) → a ∈ toFinset s → β := fun a h => f a (_ : a ∈ s)
⊢ ∀ (a : α) (ha : a ∈ toFinset s), f'' a ha ∈ toFinset t
[PROOFSTEP]
simp [hf]
[GOAL]
case convert_2
α : Type u_2
s t✝ : Set α
β : Type u_1
t : Set β
f : (a : α) → a ∈ s → β
hf : ∀ (a : α) (ha : a ∈ s), f a ha ∈ t
hinj : ∀ (a₁ a₂ : α) (ha₁ : a₁ ∈ s) (ha₂ : a₂ ∈ s), f a₁ ha₁ = f a₂ ha₂ → a₁ = a₂
hst : ncard t ≤ ncard s
ht : autoParam (Set.Finite t) _auto✝
b : β
hb : b ∈ t
f' : ↑s → ↑t := fun x => { val := f ↑x (_ : ↑x ∈ s), property := (_ : f ↑x (_ : ↑x ∈ s) ∈ t) }
finj : Function.Injective f'
hft : Fintype ↑t
hft' : Fintype ↑s
f'' : (a : α) → a ∈ toFinset s → β := fun a h => f a (_ : a ∈ s)
⊢ ∀ (a₁ a₂ : α) (ha₁ : a₁ ∈ toFinset s) (ha₂ : a₂ ∈ toFinset s), f'' a₁ ha₁ = f'' a₂ ha₂ → a₁ = a₂
[PROOFSTEP]
intros a₁ a₂ ha₁ ha₂ h
[GOAL]
case convert_2
α : Type u_2
s t✝ : Set α
β : Type u_1
t : Set β
f : (a : α) → a ∈ s → β
hf : ∀ (a : α) (ha : a ∈ s), f a ha ∈ t
hinj : ∀ (a₁ a₂ : α) (ha₁ : a₁ ∈ s) (ha₂ : a₂ ∈ s), f a₁ ha₁ = f a₂ ha₂ → a₁ = a₂
hst : ncard t ≤ ncard s
ht : autoParam (Set.Finite t) _auto✝
b : β
hb : b ∈ t
f' : ↑s → ↑t := fun x => { val := f ↑x (_ : ↑x ∈ s), property := (_ : f ↑x (_ : ↑x ∈ s) ∈ t) }
finj : Function.Injective f'
hft : Fintype ↑t
hft' : Fintype ↑s
f'' : (a : α) → a ∈ toFinset s → β := fun a h => f a (_ : a ∈ s)
a₁ a₂ : α
ha₁ : a₁ ∈ toFinset s
ha₂ : a₂ ∈ toFinset s
h : f'' a₁ ha₁ = f'' a₂ ha₂
⊢ a₁ = a₂
[PROOFSTEP]
rw [mem_toFinset] at ha₁ ha₂
[GOAL]
case convert_2
α : Type u_2
s t✝ : Set α
β : Type u_1
t : Set β
f : (a : α) → a ∈ s → β
hf : ∀ (a : α) (ha : a ∈ s), f a ha ∈ t
hinj : ∀ (a₁ a₂ : α) (ha₁ : a₁ ∈ s) (ha₂ : a₂ ∈ s), f a₁ ha₁ = f a₂ ha₂ → a₁ = a₂
hst : ncard t ≤ ncard s
ht : autoParam (Set.Finite t) _auto✝
b : β
hb : b ∈ t
f' : ↑s → ↑t := fun x => { val := f ↑x (_ : ↑x ∈ s), property := (_ : f ↑x (_ : ↑x ∈ s) ∈ t) }
finj : Function.Injective f'
hft : Fintype ↑t
hft' : Fintype ↑s
f'' : (a : α) → a ∈ toFinset s → β := fun a h => f a (_ : a ∈ s)
a₁ a₂ : α
ha₁✝ : a₁ ∈ toFinset s
ha₁ : a₁ ∈ s
ha₂✝ : a₂ ∈ toFinset s
ha₂ : a₂ ∈ s
h : f'' a₁ ha₁✝ = f'' a₂ ha₂✝
⊢ a₁ = a₂
[PROOFSTEP]
exact hinj _ _ ha₁ ha₂ h
[GOAL]
case convert_3
α : Type u_2
s t✝ : Set α
β : Type u_1
t : Set β
f : (a : α) → a ∈ s → β
hf : ∀ (a : α) (ha : a ∈ s), f a ha ∈ t
hinj : ∀ (a₁ a₂ : α) (ha₁ : a₁ ∈ s) (ha₂ : a₂ ∈ s), f a₁ ha₁ = f a₂ ha₂ → a₁ = a₂
hst : ncard t ≤ ncard s
ht : autoParam (Set.Finite t) _auto✝
b : β
hb : b ∈ t
f' : ↑s → ↑t := fun x => { val := f ↑x (_ : ↑x ∈ s), property := (_ : f ↑x (_ : ↑x ∈ s) ∈ t) }
finj : Function.Injective f'
hft : Fintype ↑t
hft' : Fintype ↑s
f'' : (a : α) → a ∈ toFinset s → β := fun a h => f a (_ : a ∈ s)
⊢ Finset.card (toFinset t) ≤ Finset.card (toFinset s)
[PROOFSTEP]
rwa [← ncard_eq_toFinset_card', ← ncard_eq_toFinset_card']
[GOAL]
α : Type u_2
s t✝ : Set α
β : Type u_1
t : Set β
f : (a : α) → a ∈ s → β
hf : ∀ (a : α) (ha : a ∈ s), f a ha ∈ t
hsurj : ∀ (b : β), b ∈ t → ∃ a ha, b = f a ha
hst : ncard s ≤ ncard t
a₁ a₂ : α
ha₁ : a₁ ∈ s
ha₂ : a₂ ∈ s
ha₁a₂ : f a₁ ha₁ = f a₂ ha₂
hs : autoParam (Set.Finite s) _auto✝
⊢ a₁ = a₂
[PROOFSTEP]
classical
set f' : s → t := fun x ↦ ⟨f x.1 x.2, hf _ _⟩
have hsurj : f'.Surjective := by
rintro ⟨y, hy⟩
obtain ⟨a, ha, rfl⟩ := hsurj y hy
simp only [Subtype.mk.injEq, Subtype.exists]
exact ⟨_, ha, rfl⟩
haveI := hs.fintype
haveI := Fintype.ofSurjective _ hsurj
set f'' : ∀ a, a ∈ s.toFinset → β := fun a h ↦ f a (by simpa using h)
exact
@Finset.inj_on_of_surj_on_of_card_le _ _ _ t.toFinset f''
(fun a ha ↦ by {rw [mem_toFinset] at ha ⊢; exact hf a ha
})
(by simpa)
(by {rwa [← ncard_eq_toFinset_card', ← ncard_eq_toFinset_card']
})
a₁ a₂ (by simpa) (by simpa) (by simpa)
[GOAL]
α : Type u_2
s t✝ : Set α
β : Type u_1
t : Set β
f : (a : α) → a ∈ s → β
hf : ∀ (a : α) (ha : a ∈ s), f a ha ∈ t
hsurj : ∀ (b : β), b ∈ t → ∃ a ha, b = f a ha
hst : ncard s ≤ ncard t
a₁ a₂ : α
ha₁ : a₁ ∈ s
ha₂ : a₂ ∈ s
ha₁a₂ : f a₁ ha₁ = f a₂ ha₂
hs : autoParam (Set.Finite s) _auto✝
⊢ a₁ = a₂
[PROOFSTEP]
set f' : s → t := fun x ↦ ⟨f x.1 x.2, hf _ _⟩
[GOAL]
α : Type u_2
s t✝ : Set α
β : Type u_1
t : Set β
f : (a : α) → a ∈ s → β
hf : ∀ (a : α) (ha : a ∈ s), f a ha ∈ t
hsurj : ∀ (b : β), b ∈ t → ∃ a ha, b = f a ha
hst : ncard s ≤ ncard t
a₁ a₂ : α
ha₁ : a₁ ∈ s
ha₂ : a₂ ∈ s
ha₁a₂ : f a₁ ha₁ = f a₂ ha₂
hs : autoParam (Set.Finite s) _auto✝
f' : ↑s → ↑t := fun x => { val := f ↑x (_ : ↑x ∈ s), property := (_ : f ↑x (_ : ↑x ∈ s) ∈ t) }
⊢ a₁ = a₂
[PROOFSTEP]
have hsurj : f'.Surjective := by
rintro ⟨y, hy⟩
obtain ⟨a, ha, rfl⟩ := hsurj y hy
simp only [Subtype.mk.injEq, Subtype.exists]
exact ⟨_, ha, rfl⟩
[GOAL]
α : Type u_2
s t✝ : Set α
β : Type u_1
t : Set β
f : (a : α) → a ∈ s → β
hf : ∀ (a : α) (ha : a ∈ s), f a ha ∈ t
hsurj : ∀ (b : β), b ∈ t → ∃ a ha, b = f a ha
hst : ncard s ≤ ncard t
a₁ a₂ : α
ha₁ : a₁ ∈ s
ha₂ : a₂ ∈ s
ha₁a₂ : f a₁ ha₁ = f a₂ ha₂
hs : autoParam (Set.Finite s) _auto✝
f' : ↑s → ↑t := fun x => { val := f ↑x (_ : ↑x ∈ s), property := (_ : f ↑x (_ : ↑x ∈ s) ∈ t) }
⊢ Function.Surjective f'
[PROOFSTEP]
rintro ⟨y, hy⟩
[GOAL]
case mk
α : Type u_2
s t✝ : Set α
β : Type u_1
t : Set β
f : (a : α) → a ∈ s → β
hf : ∀ (a : α) (ha : a ∈ s), f a ha ∈ t
hsurj : ∀ (b : β), b ∈ t → ∃ a ha, b = f a ha
hst : ncard s ≤ ncard t
a₁ a₂ : α
ha₁ : a₁ ∈ s
ha₂ : a₂ ∈ s
ha₁a₂ : f a₁ ha₁ = f a₂ ha₂
hs : autoParam (Set.Finite s) _auto✝
f' : ↑s → ↑t := fun x => { val := f ↑x (_ : ↑x ∈ s), property := (_ : f ↑x (_ : ↑x ∈ s) ∈ t) }
y : β
hy : y ∈ t
⊢ ∃ a, f' a = { val := y, property := hy }
[PROOFSTEP]
obtain ⟨a, ha, rfl⟩ := hsurj y hy
[GOAL]
case mk.intro.intro
α : Type u_2
s t✝ : Set α
β : Type u_1
t : Set β
f : (a : α) → a ∈ s → β
hf : ∀ (a : α) (ha : a ∈ s), f a ha ∈ t
hsurj : ∀ (b : β), b ∈ t → ∃ a ha, b = f a ha
hst : ncard s ≤ ncard t
a₁ a₂ : α
ha₁ : a₁ ∈ s
ha₂ : a₂ ∈ s
ha₁a₂ : f a₁ ha₁ = f a₂ ha₂
hs : autoParam (Set.Finite s) _auto✝
f' : ↑s → ↑t := fun x => { val := f ↑x (_ : ↑x ∈ s), property := (_ : f ↑x (_ : ↑x ∈ s) ∈ t) }
a : α
ha : a ∈ s
hy : f a ha ∈ t
⊢ ∃ a_1, f' a_1 = { val := f a ha, property := hy }
[PROOFSTEP]
simp only [Subtype.mk.injEq, Subtype.exists]
[GOAL]
case mk.intro.intro
α : Type u_2
s t✝ : Set α
β : Type u_1
t : Set β
f : (a : α) → a ∈ s → β
hf : ∀ (a : α) (ha : a ∈ s), f a ha ∈ t
hsurj : ∀ (b : β), b ∈ t → ∃ a ha, b = f a ha
hst : ncard s ≤ ncard t
a₁ a₂ : α
ha₁ : a₁ ∈ s
ha₂ : a₂ ∈ s
ha₁a₂ : f a₁ ha₁ = f a₂ ha₂
hs : autoParam (Set.Finite s) _auto✝
f' : ↑s → ↑t := fun x => { val := f ↑x (_ : ↑x ∈ s), property := (_ : f ↑x (_ : ↑x ∈ s) ∈ t) }
a : α
ha : a ∈ s
hy : f a ha ∈ t
⊢ ∃ a_1 h, f a_1 (_ : ↑{ val := a_1, property := (_ : a_1 ∈ s) } ∈ s) = f a ha
[PROOFSTEP]
exact ⟨_, ha, rfl⟩
[GOAL]
α : Type u_2
s t✝ : Set α
β : Type u_1
t : Set β
f : (a : α) → a ∈ s → β
hf : ∀ (a : α) (ha : a ∈ s), f a ha ∈ t
hsurj✝ : ∀ (b : β), b ∈ t → ∃ a ha, b = f a ha
hst : ncard s ≤ ncard t
a₁ a₂ : α
ha₁ : a₁ ∈ s
ha₂ : a₂ ∈ s
ha₁a₂ : f a₁ ha₁ = f a₂ ha₂
hs : autoParam (Set.Finite s) _auto✝
f' : ↑s → ↑t := fun x => { val := f ↑x (_ : ↑x ∈ s), property := (_ : f ↑x (_ : ↑x ∈ s) ∈ t) }
hsurj : Function.Surjective f'
⊢ a₁ = a₂
[PROOFSTEP]
haveI := hs.fintype
[GOAL]
α : Type u_2
s t✝ : Set α
β : Type u_1
t : Set β
f : (a : α) → a ∈ s → β
hf : ∀ (a : α) (ha : a ∈ s), f a ha ∈ t
hsurj✝ : ∀ (b : β), b ∈ t → ∃ a ha, b = f a ha
hst : ncard s ≤ ncard t
a₁ a₂ : α
ha₁ : a₁ ∈ s
ha₂ : a₂ ∈ s
ha₁a₂ : f a₁ ha₁ = f a₂ ha₂
hs : autoParam (Set.Finite s) _auto✝
f' : ↑s → ↑t := fun x => { val := f ↑x (_ : ↑x ∈ s), property := (_ : f ↑x (_ : ↑x ∈ s) ∈ t) }
hsurj : Function.Surjective f'
this : Fintype ↑s
⊢ a₁ = a₂
[PROOFSTEP]
haveI := Fintype.ofSurjective _ hsurj
[GOAL]
α : Type u_2
s t✝ : Set α
β : Type u_1
t : Set β
f : (a : α) → a ∈ s → β
hf : ∀ (a : α) (ha : a ∈ s), f a ha ∈ t
hsurj✝ : ∀ (b : β), b ∈ t → ∃ a ha, b = f a ha
hst : ncard s ≤ ncard t
a₁ a₂ : α
ha₁ : a₁ ∈ s
ha₂ : a₂ ∈ s
ha₁a₂ : f a₁ ha₁ = f a₂ ha₂
hs : autoParam (Set.Finite s) _auto✝
f' : ↑s → ↑t := fun x => { val := f ↑x (_ : ↑x ∈ s), property := (_ : f ↑x (_ : ↑x ∈ s) ∈ t) }
hsurj : Function.Surjective f'
this✝ : Fintype ↑s
this : Fintype ↑t
⊢ a₁ = a₂
[PROOFSTEP]
set f'' : ∀ a, a ∈ s.toFinset → β := fun a h ↦ f a (by simpa using h)
[GOAL]
α : Type u_2
s t✝ : Set α
β : Type u_1
t : Set β
f : (a : α) → a ∈ s → β
hf : ∀ (a : α) (ha : a ∈ s), f a ha ∈ t
hsurj✝ : ∀ (b : β), b ∈ t → ∃ a ha, b = f a ha
hst : ncard s ≤ ncard t
a₁ a₂ : α
ha₁ : a₁ ∈ s
ha₂ : a₂ ∈ s
ha₁a₂ : f a₁ ha₁ = f a₂ ha₂
hs : autoParam (Set.Finite s) _auto✝
f' : ↑s → ↑t := fun x => { val := f ↑x (_ : ↑x ∈ s), property := (_ : f ↑x (_ : ↑x ∈ s) ∈ t) }
hsurj : Function.Surjective f'
this✝ : Fintype ↑s
this : Fintype ↑t
a : α
h : a ∈ toFinset s
⊢ a ∈ s
[PROOFSTEP]
simpa using h
[GOAL]
α : Type u_2
s t✝ : Set α
β : Type u_1
t : Set β
f : (a : α) → a ∈ s → β
hf : ∀ (a : α) (ha : a ∈ s), f a ha ∈ t
hsurj✝ : ∀ (b : β), b ∈ t → ∃ a ha, b = f a ha
hst : ncard s ≤ ncard t
a₁ a₂ : α
ha₁ : a₁ ∈ s
ha₂ : a₂ ∈ s
ha₁a₂ : f a₁ ha₁ = f a₂ ha₂
hs : autoParam (Set.Finite s) _auto✝
f' : ↑s → ↑t := fun x => { val := f ↑x (_ : ↑x ∈ s), property := (_ : f ↑x (_ : ↑x ∈ s) ∈ t) }
hsurj : Function.Surjective f'
this✝ : Fintype ↑s
this : Fintype ↑t
f'' : (a : α) → a ∈ toFinset s → β := fun a h => f a (_ : a ∈ s)
⊢ a₁ = a₂
[PROOFSTEP]
exact
@Finset.inj_on_of_surj_on_of_card_le _ _ _ t.toFinset f''
(fun a ha ↦ by {rw [mem_toFinset] at ha ⊢; exact hf a ha
})
(by simpa)
(by {rwa [← ncard_eq_toFinset_card', ← ncard_eq_toFinset_card']
})
a₁ a₂ (by simpa) (by simpa) (by simpa)
[GOAL]
α : Type u_2
s t✝ : Set α
β : Type u_1
t : Set β
f : (a : α) → a ∈ s → β
hf : ∀ (a : α) (ha : a ∈ s), f a ha ∈ t
hsurj✝ : ∀ (b : β), b ∈ t → ∃ a ha, b = f a ha
hst : ncard s ≤ ncard t
a₁ a₂ : α
ha₁ : a₁ ∈ s
ha₂ : a₂ ∈ s
ha₁a₂ : f a₁ ha₁ = f a₂ ha₂
hs : autoParam (Set.Finite s) _auto✝
f' : ↑s → ↑t := fun x => { val := f ↑x (_ : ↑x ∈ s), property := (_ : f ↑x (_ : ↑x ∈ s) ∈ t) }
hsurj : Function.Surjective f'
this✝ : Fintype ↑s
this : Fintype ↑t
f'' : (a : α) → a ∈ toFinset s → β := fun a h => f a (_ : a ∈ s)
a : α
ha : a ∈ toFinset s
⊢ f'' a ha ∈ toFinset t
[PROOFSTEP]
{rw [mem_toFinset] at ha ⊢; exact hf a ha
}
[GOAL]
α : Type u_2
s t✝ : Set α
β : Type u_1
t : Set β
f : (a : α) → a ∈ s → β
hf : ∀ (a : α) (ha : a ∈ s), f a ha ∈ t
hsurj✝ : ∀ (b : β), b ∈ t → ∃ a ha, b = f a ha
hst : ncard s ≤ ncard t
a₁ a₂ : α
ha₁ : a₁ ∈ s
ha₂ : a₂ ∈ s
ha₁a₂ : f a₁ ha₁ = f a₂ ha₂
hs : autoParam (Set.Finite s) _auto✝
f' : ↑s → ↑t := fun x => { val := f ↑x (_ : ↑x ∈ s), property := (_ : f ↑x (_ : ↑x ∈ s) ∈ t) }
hsurj : Function.Surjective f'
this✝ : Fintype ↑s
this : Fintype ↑t
f'' : (a : α) → a ∈ toFinset s → β := fun a h => f a (_ : a ∈ s)
a : α
ha : a ∈ toFinset s
⊢ f'' a ha ∈ toFinset t
[PROOFSTEP]
rw [mem_toFinset] at ha ⊢
[GOAL]
α : Type u_2
s t✝ : Set α
β : Type u_1
t : Set β
f : (a : α) → a ∈ s → β
hf : ∀ (a : α) (ha : a ∈ s), f a ha ∈ t
hsurj✝ : ∀ (b : β), b ∈ t → ∃ a ha, b = f a ha
hst : ncard s ≤ ncard t
a₁ a₂ : α
ha₁ : a₁ ∈ s
ha₂ : a₂ ∈ s
ha₁a₂ : f a₁ ha₁ = f a₂ ha₂
hs : autoParam (Set.Finite s) _auto✝
f' : ↑s → ↑t := fun x => { val := f ↑x (_ : ↑x ∈ s), property := (_ : f ↑x (_ : ↑x ∈ s) ∈ t) }
hsurj : Function.Surjective f'
this✝ : Fintype ↑s
this : Fintype ↑t
f'' : (a : α) → a ∈ toFinset s → β := fun a h => f a (_ : a ∈ s)
a : α
ha✝ : a ∈ toFinset s
ha : a ∈ s
⊢ f'' a ha✝ ∈ t
[PROOFSTEP]
exact hf a ha
[GOAL]
α : Type u_2
s t✝ : Set α
β : Type u_1
t : Set β
f : (a : α) → a ∈ s → β
hf : ∀ (a : α) (ha : a ∈ s), f a ha ∈ t
hsurj✝ : ∀ (b : β), b ∈ t → ∃ a ha, b = f a ha
hst : ncard s ≤ ncard t
a₁ a₂ : α
ha₁ : a₁ ∈ s
ha₂ : a₂ ∈ s
ha₁a₂ : f a₁ ha₁ = f a₂ ha₂
hs : autoParam (Set.Finite s) _auto✝
f' : ↑s → ↑t := fun x => { val := f ↑x (_ : ↑x ∈ s), property := (_ : f ↑x (_ : ↑x ∈ s) ∈ t) }
hsurj : Function.Surjective f'
this✝ : Fintype ↑s
this : Fintype ↑t
f'' : (a : α) → a ∈ toFinset s → β := fun a h => f a (_ : a ∈ s)
⊢ ∀ (b : β), b ∈ toFinset t → ∃ a ha, b = f'' a ha
[PROOFSTEP]
simpa
[GOAL]
α : Type u_2
s t✝ : Set α
β : Type u_1
t : Set β
f : (a : α) → a ∈ s → β
hf : ∀ (a : α) (ha : a ∈ s), f a ha ∈ t
hsurj✝ : ∀ (b : β), b ∈ t → ∃ a ha, b = f a ha
hst : ncard s ≤ ncard t
a₁ a₂ : α
ha₁ : a₁ ∈ s
ha₂ : a₂ ∈ s
ha₁a₂ : f a₁ ha₁ = f a₂ ha₂
hs : autoParam (Set.Finite s) _auto✝
f' : ↑s → ↑t := fun x => { val := f ↑x (_ : ↑x ∈ s), property := (_ : f ↑x (_ : ↑x ∈ s) ∈ t) }
hsurj : Function.Surjective f'
this✝ : Fintype ↑s
this : Fintype ↑t
f'' : (a : α) → a ∈ toFinset s → β := fun a h => f a (_ : a ∈ s)
⊢ Finset.card (toFinset s) ≤ Finset.card (toFinset t)
[PROOFSTEP]
{rwa [← ncard_eq_toFinset_card', ← ncard_eq_toFinset_card']
}
[GOAL]
α : Type u_2
s t✝ : Set α
β : Type u_1
t : Set β
f : (a : α) → a ∈ s → β
hf : ∀ (a : α) (ha : a ∈ s), f a ha ∈ t
hsurj✝ : ∀ (b : β), b ∈ t → ∃ a ha, b = f a ha
hst : ncard s ≤ ncard t
a₁ a₂ : α
ha₁ : a₁ ∈ s
ha₂ : a₂ ∈ s
ha₁a₂ : f a₁ ha₁ = f a₂ ha₂
hs : autoParam (Set.Finite s) _auto✝
f' : ↑s → ↑t := fun x => { val := f ↑x (_ : ↑x ∈ s), property := (_ : f ↑x (_ : ↑x ∈ s) ∈ t) }
hsurj : Function.Surjective f'
this✝ : Fintype ↑s
this : Fintype ↑t
f'' : (a : α) → a ∈ toFinset s → β := fun a h => f a (_ : a ∈ s)
⊢ Finset.card (toFinset s) ≤ Finset.card (toFinset t)
[PROOFSTEP]
rwa [← ncard_eq_toFinset_card', ← ncard_eq_toFinset_card']
[GOAL]
α : Type u_2
s t✝ : Set α
β : Type u_1
t : Set β
f : (a : α) → a ∈ s → β
hf : ∀ (a : α) (ha : a ∈ s), f a ha ∈ t
hsurj✝ : ∀ (b : β), b ∈ t → ∃ a ha, b = f a ha
hst : ncard s ≤ ncard t
a₁ a₂ : α
ha₁ : a₁ ∈ s
ha₂ : a₂ ∈ s
ha₁a₂ : f a₁ ha₁ = f a₂ ha₂
hs : autoParam (Set.Finite s) _auto✝
f' : ↑s → ↑t := fun x => { val := f ↑x (_ : ↑x ∈ s), property := (_ : f ↑x (_ : ↑x ∈ s) ∈ t) }
hsurj : Function.Surjective f'
this✝ : Fintype ↑s
this : Fintype ↑t
f'' : (a : α) → a ∈ toFinset s → β := fun a h => f a (_ : a ∈ s)
⊢ a₁ ∈ toFinset s
[PROOFSTEP]
simpa
[GOAL]
α : Type u_2
s t✝ : Set α
β : Type u_1
t : Set β
f : (a : α) → a ∈ s → β
hf : ∀ (a : α) (ha : a ∈ s), f a ha ∈ t
hsurj✝ : ∀ (b : β), b ∈ t → ∃ a ha, b = f a ha
hst : ncard s ≤ ncard t
a₁ a₂ : α
ha₁ : a₁ ∈ s
ha₂ : a₂ ∈ s
ha₁a₂ : f a₁ ha₁ = f a₂ ha₂
hs : autoParam (Set.Finite s) _auto✝
f' : ↑s → ↑t := fun x => { val := f ↑x (_ : ↑x ∈ s), property := (_ : f ↑x (_ : ↑x ∈ s) ∈ t) }
hsurj : Function.Surjective f'
this✝ : Fintype ↑s
this : Fintype ↑t
f'' : (a : α) → a ∈ toFinset s → β := fun a h => f a (_ : a ∈ s)
⊢ a₂ ∈ toFinset s
[PROOFSTEP]
simpa
[GOAL]
α : Type u_2
s t✝ : Set α
β : Type u_1
t : Set β
f : (a : α) → a ∈ s → β
hf : ∀ (a : α) (ha : a ∈ s), f a ha ∈ t
hsurj✝ : ∀ (b : β), b ∈ t → ∃ a ha, b = f a ha
hst : ncard s ≤ ncard t
a₁ a₂ : α
ha₁ : a₁ ∈ s
ha₂ : a₂ ∈ s
ha₁a₂ : f a₁ ha₁ = f a₂ ha₂
hs : autoParam (Set.Finite s) _auto✝
f' : ↑s → ↑t := fun x => { val := f ↑x (_ : ↑x ∈ s), property := (_ : f ↑x (_ : ↑x ∈ s) ∈ t) }
hsurj : Function.Surjective f'
this✝ : Fintype ↑s
this : Fintype ↑t
f'' : (a : α) → a ∈ toFinset s → β := fun a h => f a (_ : a ∈ s)
⊢ f'' a₁ (_ : a₁ ∈ toFinset s) = f'' a₂ (_ : a₂ ∈ toFinset s)
[PROOFSTEP]
simpa
[GOAL]
α : Type u_1
s✝ t✝ s t : Set α
hs : autoParam (Set.Finite s) _auto✝
ht : autoParam (Set.Finite t) _auto✝
⊢ ncard (s ∪ t) + ncard (s ∩ t) = ncard s + ncard t
[PROOFSTEP]
to_encard_tac
[GOAL]
α : Type u_1
s✝ t✝ s t : Set α
hs : autoParam (Set.Finite s) _auto✝
ht : autoParam (Set.Finite t) _auto✝
⊢ ↑(ncard (s ∪ t)) + ↑(ncard (s ∩ t)) = ↑(ncard s) + ↑(ncard t)
[PROOFSTEP]
rw [hs.cast_ncard_eq, ht.cast_ncard_eq, (hs.union ht).cast_ncard_eq, (hs.subset (inter_subset_left _ _)).cast_ncard_eq,
encard_union_add_encard_inter]
[GOAL]
α : Type u_1
s✝ t✝ s t : Set α
hs : autoParam (Set.Finite s) _auto✝
ht : autoParam (Set.Finite t) _auto✝
⊢ ncard (s ∩ t) + ncard (s ∪ t) = ncard s + ncard t
[PROOFSTEP]
rw [add_comm, ncard_union_add_ncard_inter _ _ hs ht]
[GOAL]
α : Type u_1
s✝ t✝ s t : Set α
⊢ ncard (s ∪ t) ≤ ncard s + ncard t
[PROOFSTEP]
obtain (h | h) := (s ∪ t).finite_or_infinite
[GOAL]
case inl
α : Type u_1
s✝ t✝ s t : Set α
h : Set.Finite (s ∪ t)
⊢ ncard (s ∪ t) ≤ ncard s + ncard t
[PROOFSTEP]
to_encard_tac
[GOAL]
case inl
α : Type u_1
s✝ t✝ s t : Set α
h : Set.Finite (s ∪ t)
⊢ ↑(ncard (s ∪ t)) ≤ ↑(ncard s) + ↑(ncard t)
[PROOFSTEP]
rw [h.cast_ncard_eq, (h.subset (subset_union_left _ _)).cast_ncard_eq,
(h.subset (subset_union_right _ _)).cast_ncard_eq]
[GOAL]
case inl
α : Type u_1
s✝ t✝ s t : Set α
h : Set.Finite (s ∪ t)
⊢ encard (s ∪ t) ≤ encard s + encard t
[PROOFSTEP]
apply encard_union_le
[GOAL]
case inr
α : Type u_1
s✝ t✝ s t : Set α
h : Set.Infinite (s ∪ t)
⊢ ncard (s ∪ t) ≤ ncard s + ncard t
[PROOFSTEP]
rw [h.ncard]
[GOAL]
case inr
α : Type u_1
s✝ t✝ s t : Set α
h : Set.Infinite (s ∪ t)
⊢ 0 ≤ ncard s + ncard t
[PROOFSTEP]
apply zero_le
[GOAL]
α : Type u_1
s t : Set α
h : Disjoint s t
hs : autoParam (Set.Finite s) _auto✝
ht : autoParam (Set.Finite t) _auto✝
⊢ ncard (s ∪ t) = ncard s + ncard t
[PROOFSTEP]
to_encard_tac
[GOAL]
α : Type u_1
s t : Set α
h : Disjoint s t
hs : autoParam (Set.Finite s) _auto✝
ht : autoParam (Set.Finite t) _auto✝
⊢ ↑(ncard (s ∪ t)) = ↑(ncard s) + ↑(ncard t)
[PROOFSTEP]
rw [hs.cast_ncard_eq, ht.cast_ncard_eq, (hs.union ht).cast_ncard_eq, encard_union_eq h]
[GOAL]
α : Type u_1
s t : Set α
h : s ⊆ t
ht : autoParam (Set.Finite t) _auto✝
⊢ ncard (t \ s) + ncard s = ncard t
[PROOFSTEP]
to_encard_tac
[GOAL]
α : Type u_1
s t : Set α
h : s ⊆ t
ht : autoParam (Set.Finite t) _auto✝
⊢ ↑(ncard (t \ s)) + ↑(ncard s) = ↑(ncard t)
[PROOFSTEP]
rw [ht.cast_ncard_eq, (ht.subset h).cast_ncard_eq, (ht.diff _).cast_ncard_eq, encard_diff_add_encard_of_subset h]
[GOAL]
α : Type u_1
s t : Set α
h : s ⊆ t
ht : autoParam (Set.Finite t) _auto✝
⊢ ncard (t \ s) = ncard t - ncard s
[PROOFSTEP]
rw [← ncard_diff_add_ncard_of_subset h ht, add_tsub_cancel_right]
[GOAL]
α : Type u_1
s✝ t✝ s t : Set α
ht : autoParam (Set.Finite t) _auto✝
⊢ ncard s ≤ ncard (s \ t) + ncard t
[PROOFSTEP]
cases' s.finite_or_infinite with hs hs
[GOAL]
case inl
α : Type u_1
s✝ t✝ s t : Set α
ht : autoParam (Set.Finite t) _auto✝
hs : Set.Finite s
⊢ ncard s ≤ ncard (s \ t) + ncard t
[PROOFSTEP]
to_encard_tac
[GOAL]
case inl
α : Type u_1
s✝ t✝ s t : Set α
ht : autoParam (Set.Finite t) _auto✝
hs : Set.Finite s
⊢ ↑(ncard s) ≤ ↑(ncard (s \ t)) + ↑(ncard t)
[PROOFSTEP]
rw [ht.cast_ncard_eq, hs.cast_ncard_eq, (hs.diff _).cast_ncard_eq]
[GOAL]
case inl
α : Type u_1
s✝ t✝ s t : Set α
ht : autoParam (Set.Finite t) _auto✝
hs : Set.Finite s
⊢ encard s ≤ encard (s \ t) + encard t
[PROOFSTEP]
apply encard_le_encard_diff_add_encard
[GOAL]
case inr
α : Type u_1
s✝ t✝ s t : Set α
ht : autoParam (Set.Finite t) _auto✝
hs : Set.Infinite s
⊢ ncard s ≤ ncard (s \ t) + ncard t
[PROOFSTEP]
convert Nat.zero_le _
[GOAL]
case h.e'_3
α : Type u_1
s✝ t✝ s t : Set α
ht : autoParam (Set.Finite t) _auto✝
hs : Set.Infinite s
⊢ ncard s = 0
[PROOFSTEP]
rw [hs.ncard]
[GOAL]
α : Type u_1
s✝ t✝ s t : Set α
hs : autoParam (Set.Finite s) _auto✝
⊢ ncard t ≤ ncard s + ncard (t \ s)
[PROOFSTEP]
rw [add_comm]
[GOAL]
α : Type u_1
s✝ t✝ s t : Set α
hs : autoParam (Set.Finite s) _auto✝
⊢ ncard t ≤ ncard (t \ s) + ncard s
[PROOFSTEP]
apply ncard_le_ncard_diff_add_ncard _ _ hs
[GOAL]
α : Type u_1
s✝ t✝ s t : Set α
hs : autoParam (Set.Finite s) _auto✝
ht : autoParam (Set.Finite t) _auto✝
⊢ ncard (s \ t) + ncard t = ncard (s ∪ t)
[PROOFSTEP]
rw [← ncard_union_eq disjoint_sdiff_left (hs.diff _) ht, diff_union_self]
[GOAL]
α : Type u_1
s t : Set α
h : ncard s < ncard t
hs : autoParam (Set.Finite s) _auto✝
⊢ Set.Nonempty (t \ s)
[PROOFSTEP]
rw [Set.nonempty_iff_ne_empty, Ne.def, diff_eq_empty]
[GOAL]
α : Type u_1
s t : Set α
h : ncard s < ncard t
hs : autoParam (Set.Finite s) _auto✝
⊢ ¬t ⊆ s
[PROOFSTEP]
exact fun h' ↦ h.not_le (ncard_le_of_subset h' hs)
[GOAL]
α : Type u_1
s✝ t✝ s t : Set α
hs : autoParam (Set.Finite s) _auto✝
⊢ ncard (s ∩ t) + ncard (s \ t) = ncard s
[PROOFSTEP]
rw [←
ncard_union_eq (disjoint_of_subset_left (inter_subset_right _ _) disjoint_sdiff_right) (hs.inter_of_left _)
(hs.diff _),
union_comm, diff_union_inter]
[GOAL]
α : Type u_1
s t : Set α
hs : autoParam (Set.Finite s) _auto✝
ht : autoParam (Set.Finite t) _auto✝
⊢ ncard s = ncard t ↔ ncard (s \ t) = ncard (t \ s)
[PROOFSTEP]
rw [← ncard_inter_add_ncard_diff_eq_ncard s t hs, ← ncard_inter_add_ncard_diff_eq_ncard t s ht, inter_comm,
add_right_inj]
[GOAL]
α : Type u_1
s t : Set α
hs : autoParam (Set.Finite s) _auto✝
ht : autoParam (Set.Finite t) _auto✝
⊢ ncard s ≤ ncard t ↔ ncard (s \ t) ≤ ncard (t \ s)
[PROOFSTEP]
rw [← ncard_inter_add_ncard_diff_eq_ncard s t hs, ← ncard_inter_add_ncard_diff_eq_ncard t s ht, inter_comm,
add_le_add_iff_left]
[GOAL]
α : Type u_1
s t : Set α
hs : autoParam (Set.Finite s) _auto✝
ht : autoParam (Set.Finite t) _auto✝
⊢ ncard s < ncard t ↔ ncard (s \ t) < ncard (t \ s)
[PROOFSTEP]
rw [← ncard_inter_add_ncard_diff_eq_ncard s t hs, ← ncard_inter_add_ncard_diff_eq_ncard t s ht, inter_comm,
add_lt_add_iff_left]
[GOAL]
α : Type u_1
s✝ t s : Set α
hs : autoParam (Set.Finite s) _auto✝
hsc : autoParam (Set.Finite sᶜ) _auto✝
⊢ ncard s + ncard sᶜ = Nat.card α
[PROOFSTEP]
rw [← ncard_univ, ← ncard_union_eq (@disjoint_compl_right _ _ s) hs hsc, union_compl_self]
[GOAL]
α : Type u_1
s t : Set α
i : ℕ
h₁ : i + ncard s ≤ ncard t
h₂ : s ⊆ t
⊢ ∃ r, s ⊆ r ∧ r ⊆ t ∧ ncard r = i + ncard s
[PROOFSTEP]
cases' t.finite_or_infinite with ht ht
[GOAL]
case inl
α : Type u_1
s t : Set α
i : ℕ
h₁ : i + ncard s ≤ ncard t
h₂ : s ⊆ t
ht : Set.Finite t
⊢ ∃ r, s ⊆ r ∧ r ⊆ t ∧ ncard r = i + ncard s
[PROOFSTEP]
rw [ncard_eq_toFinset_card _ (ht.subset h₂)] at h₁ ⊢
[GOAL]
case inl
α : Type u_1
s t : Set α
i : ℕ
h₂ : s ⊆ t
ht : Set.Finite t
h₁ : i + Finset.card (Finite.toFinset (_ : Set.Finite s)) ≤ ncard t
⊢ ∃ r, s ⊆ r ∧ r ⊆ t ∧ ncard r = i + Finset.card (Finite.toFinset (_ : Set.Finite s))
[PROOFSTEP]
rw [ncard_eq_toFinset_card t ht] at h₁
[GOAL]
case inl
α : Type u_1
s t : Set α
i : ℕ
h₂ : s ⊆ t
ht : Set.Finite t
h₁ : i + Finset.card (Finite.toFinset (_ : Set.Finite s)) ≤ Finset.card (Finite.toFinset ht)
⊢ ∃ r, s ⊆ r ∧ r ⊆ t ∧ ncard r = i + Finset.card (Finite.toFinset (_ : Set.Finite s))
[PROOFSTEP]
obtain ⟨r', hsr', hr't, hr'⟩ := Finset.exists_intermediate_set _ h₁ (by simpa)
[GOAL]
α : Type u_1
s t : Set α
i : ℕ
h₂ : s ⊆ t
ht : Set.Finite t
h₁ : i + Finset.card (Finite.toFinset (_ : Set.Finite s)) ≤ Finset.card (Finite.toFinset ht)
⊢ Finite.toFinset (_ : Set.Finite s) ⊆ Finite.toFinset ht
[PROOFSTEP]
simpa
[GOAL]
case inl.intro.intro.intro
α : Type u_1
s t : Set α
i : ℕ
h₂ : s ⊆ t
ht : Set.Finite t
h₁ : i + Finset.card (Finite.toFinset (_ : Set.Finite s)) ≤ Finset.card (Finite.toFinset ht)
r' : Finset α
hsr' : Finite.toFinset (_ : Set.Finite s) ⊆ r'
hr't : r' ⊆ Finite.toFinset ht
hr' : Finset.card r' = i + Finset.card (Finite.toFinset (_ : Set.Finite s))
⊢ ∃ r, s ⊆ r ∧ r ⊆ t ∧ ncard r = i + Finset.card (Finite.toFinset (_ : Set.Finite s))
[PROOFSTEP]
exact ⟨r', by simpa using hsr', by simpa using hr't, by rw [← hr', ncard_coe_Finset]⟩
[GOAL]
α : Type u_1
s t : Set α
i : ℕ
h₂ : s ⊆ t
ht : Set.Finite t
h₁ : i + Finset.card (Finite.toFinset (_ : Set.Finite s)) ≤ Finset.card (Finite.toFinset ht)
r' : Finset α
hsr' : Finite.toFinset (_ : Set.Finite s) ⊆ r'
hr't : r' ⊆ Finite.toFinset ht
hr' : Finset.card r' = i + Finset.card (Finite.toFinset (_ : Set.Finite s))
⊢ s ⊆ ↑r'
[PROOFSTEP]
simpa using hsr'
[GOAL]
α : Type u_1
s t : Set α
i : ℕ
h₂ : s ⊆ t
ht : Set.Finite t
h₁ : i + Finset.card (Finite.toFinset (_ : Set.Finite s)) ≤ Finset.card (Finite.toFinset ht)
r' : Finset α
hsr' : Finite.toFinset (_ : Set.Finite s) ⊆ r'
hr't : r' ⊆ Finite.toFinset ht
hr' : Finset.card r' = i + Finset.card (Finite.toFinset (_ : Set.Finite s))
⊢ ↑r' ⊆ t
[PROOFSTEP]
simpa using hr't
[GOAL]
α : Type u_1
s t : Set α
i : ℕ
h₂ : s ⊆ t
ht : Set.Finite t
h₁ : i + Finset.card (Finite.toFinset (_ : Set.Finite s)) ≤ Finset.card (Finite.toFinset ht)
r' : Finset α
hsr' : Finite.toFinset (_ : Set.Finite s) ⊆ r'
hr't : r' ⊆ Finite.toFinset ht
hr' : Finset.card r' = i + Finset.card (Finite.toFinset (_ : Set.Finite s))
⊢ ncard ↑r' = i + Finset.card (Finite.toFinset (_ : Set.Finite s))
[PROOFSTEP]
rw [← hr', ncard_coe_Finset]
[GOAL]
case inr
α : Type u_1
s t : Set α
i : ℕ
h₁ : i + ncard s ≤ ncard t
h₂ : s ⊆ t
ht : Set.Infinite t
⊢ ∃ r, s ⊆ r ∧ r ⊆ t ∧ ncard r = i + ncard s
[PROOFSTEP]
rw [ht.ncard] at h₁
[GOAL]
case inr
α : Type u_1
s t : Set α
i : ℕ
h₁ : i + ncard s ≤ 0
h₂ : s ⊆ t
ht : Set.Infinite t
⊢ ∃ r, s ⊆ r ∧ r ⊆ t ∧ ncard r = i + ncard s
[PROOFSTEP]
have h₁' := Nat.eq_zero_of_le_zero h₁
[GOAL]
case inr
α : Type u_1
s t : Set α
i : ℕ
h₁ : i + ncard s ≤ 0
h₂ : s ⊆ t
ht : Set.Infinite t
h₁' : i + ncard s = 0
⊢ ∃ r, s ⊆ r ∧ r ⊆ t ∧ ncard r = i + ncard s
[PROOFSTEP]
rw [add_eq_zero_iff] at h₁'
[GOAL]
case inr
α : Type u_1
s t : Set α
i : ℕ
h₁ : i + ncard s ≤ 0
h₂ : s ⊆ t
ht : Set.Infinite t
h₁' : i = 0 ∧ ncard s = 0
⊢ ∃ r, s ⊆ r ∧ r ⊆ t ∧ ncard r = i + ncard s
[PROOFSTEP]
refine' ⟨t, h₂, rfl.subset, _⟩
[GOAL]
case inr
α : Type u_1
s t : Set α
i : ℕ
h₁ : i + ncard s ≤ 0
h₂ : s ⊆ t
ht : Set.Infinite t
h₁' : i = 0 ∧ ncard s = 0
⊢ ncard t = i + ncard s
[PROOFSTEP]
rw [h₁'.2, h₁'.1, ht.ncard, add_zero]
[GOAL]
α : Type u_1
s t : Set α
m : ℕ
hs : ncard s ≤ m
ht : m ≤ ncard t
h : s ⊆ t
⊢ ∃ r, s ⊆ r ∧ r ⊆ t ∧ ncard r = m
[PROOFSTEP]
obtain ⟨r, hsr, hrt, hc⟩ := exists_intermediate_Set (m - s.ncard) (by rwa [tsub_add_cancel_of_le hs]) h
[GOAL]
α : Type u_1
s t : Set α
m : ℕ
hs : ncard s ≤ m
ht : m ≤ ncard t
h : s ⊆ t
⊢ m - ncard s + ncard s ≤ ncard t
[PROOFSTEP]
rwa [tsub_add_cancel_of_le hs]
[GOAL]
case intro.intro.intro
α : Type u_1
s t : Set α
m : ℕ
hs : ncard s ≤ m
ht : m ≤ ncard t
h : s ⊆ t
r : Set α
hsr : s ⊆ r
hrt : r ⊆ t
hc : ncard r = m - ncard s + ncard s
⊢ ∃ r, s ⊆ r ∧ r ⊆ t ∧ ncard r = m
[PROOFSTEP]
rw [tsub_add_cancel_of_le hs] at hc
[GOAL]
case intro.intro.intro
α : Type u_1
s t : Set α
m : ℕ
hs : ncard s ≤ m
ht : m ≤ ncard t
h : s ⊆ t
r : Set α
hsr : s ⊆ r
hrt : r ⊆ t
hc : ncard r = m
⊢ ∃ r, s ⊆ r ∧ r ⊆ t ∧ ncard r = m
[PROOFSTEP]
exact ⟨r, hsr, hrt, hc⟩
[GOAL]
α : Type u_1
s✝ t s : Set α
i : ℕ
h₁ : i ≤ ncard s
⊢ i + ncard ∅ ≤ ncard s
[PROOFSTEP]
simpa
[GOAL]
α : Type u_1
s✝ t✝ s : Set α
i : ℕ
h₁ : i ≤ ncard s
t : Set α
ht : ∅ ⊆ t ∧ t ⊆ s ∧ ncard t = i + ncard ∅
⊢ ncard t = i
[PROOFSTEP]
simpa using ht.2.2
[GOAL]
α : Type u_1
s✝ t s : Set α
hs : Set.Infinite s
k : ℕ
⊢ ∃ t, t ⊆ s ∧ Set.Finite t ∧ Set.ncard t = k
[PROOFSTEP]
have := hs.to_subtype
[GOAL]
α : Type u_1
s✝ t s : Set α
hs : Set.Infinite s
k : ℕ
this : Infinite ↑s
⊢ ∃ t, t ⊆ s ∧ Set.Finite t ∧ Set.ncard t = k
[PROOFSTEP]
obtain ⟨t', -, rfl⟩ := @Infinite.exists_subset_card_eq s univ infinite_univ k
[GOAL]
case intro.intro
α : Type u_1
s✝ t s : Set α
hs : Set.Infinite s
this : Infinite ↑s
t' : Finset ↑s
⊢ ∃ t, t ⊆ s ∧ Set.Finite t ∧ Set.ncard t = Finset.card t'
[PROOFSTEP]
refine' ⟨Subtype.val '' (t' : Set s), by simp, Finite.image _ (by simp), _⟩
[GOAL]
α : Type u_1
s✝ t s : Set α
hs : Set.Infinite s
this : Infinite ↑s
t' : Finset ↑s
⊢ Subtype.val '' ↑t' ⊆ s
[PROOFSTEP]
simp
[GOAL]
α : Type u_1
s✝ t s : Set α
hs : Set.Infinite s
this : Infinite ↑s
t' : Finset ↑s
⊢ Set.Finite ↑t'
[PROOFSTEP]
simp
[GOAL]
case intro.intro
α : Type u_1
s✝ t s : Set α
hs : Set.Infinite s
this : Infinite ↑s
t' : Finset ↑s
⊢ Set.ncard (Subtype.val '' ↑t') = Finset.card t'
[PROOFSTEP]
rw [ncard_image_of_injective _ Subtype.coe_injective]
[GOAL]
case intro.intro
α : Type u_1
s✝ t s : Set α
hs : Set.Infinite s
this : Infinite ↑s
t' : Finset ↑s
⊢ Set.ncard ↑t' = Finset.card t'
[PROOFSTEP]
simp
[GOAL]
α : Type u_1
s✝ t✝ s t : Set α
ht : Set.Infinite t
hst : s ⊆ t
hs : Set.Finite s
k : ℕ
hsk : Set.ncard s ≤ k
⊢ ∃ s', s ⊆ s' ∧ s' ⊆ t ∧ Set.ncard s' = k
[PROOFSTEP]
obtain ⟨s₁, hs₁, hs₁fin, hs₁card⟩ := (ht.diff hs).exists_subset_ncard_eq (k - s.ncard)
[GOAL]
case intro.intro.intro
α : Type u_1
s✝ t✝ s t : Set α
ht : Set.Infinite t
hst : s ⊆ t
hs : Set.Finite s
k : ℕ
hsk : Set.ncard s ≤ k
s₁ : Set α
hs₁ : s₁ ⊆ t \ s
hs₁fin : Set.Finite s₁
hs₁card : Set.ncard s₁ = k - Set.ncard s
⊢ ∃ s', s ⊆ s' ∧ s' ⊆ t ∧ Set.ncard s' = k
[PROOFSTEP]
refine' ⟨s ∪ s₁, subset_union_left _ _, union_subset hst (hs₁.trans (diff_subset _ _)), _⟩
[GOAL]
case intro.intro.intro
α : Type u_1
s✝ t✝ s t : Set α
ht : Set.Infinite t
hst : s ⊆ t
hs : Set.Finite s
k : ℕ
hsk : Set.ncard s ≤ k
s₁ : Set α
hs₁ : s₁ ⊆ t \ s
hs₁fin : Set.Finite s₁
hs₁card : Set.ncard s₁ = k - Set.ncard s
⊢ Set.ncard (s ∪ s₁) = k
[PROOFSTEP]
rwa [ncard_union_eq (disjoint_of_subset_right hs₁ disjoint_sdiff_right) hs hs₁fin, hs₁card, add_tsub_cancel_of_le]
[GOAL]
α : Type u_1
s t : Set α
n : ℕ
hst : 2 * n < ncard (s ∪ t)
⊢ ∃ r, n < ncard r ∧ (r ⊆ s ∨ r ⊆ t)
[PROOFSTEP]
classical
have hu := finite_of_ncard_ne_zero ((Nat.zero_le _).trans_lt hst).ne.symm
rw [ncard_eq_toFinset_card _ hu,
Finite.toFinset_union (hu.subset (subset_union_left _ _)) (hu.subset (subset_union_right _ _))] at hst
obtain ⟨r', hnr', hr'⟩ := Finset.exists_subset_or_subset_of_two_mul_lt_card hst
exact ⟨r', by simpa, by simpa using hr'⟩
[GOAL]
α : Type u_1
s t : Set α
n : ℕ
hst : 2 * n < ncard (s ∪ t)
⊢ ∃ r, n < ncard r ∧ (r ⊆ s ∨ r ⊆ t)
[PROOFSTEP]
have hu := finite_of_ncard_ne_zero ((Nat.zero_le _).trans_lt hst).ne.symm
[GOAL]
α : Type u_1
s t : Set α
n : ℕ
hst : 2 * n < ncard (s ∪ t)
hu : Set.Finite (s ∪ t)
⊢ ∃ r, n < ncard r ∧ (r ⊆ s ∨ r ⊆ t)
[PROOFSTEP]
rw [ncard_eq_toFinset_card _ hu,
Finite.toFinset_union (hu.subset (subset_union_left _ _)) (hu.subset (subset_union_right _ _))] at hst
[GOAL]
α : Type u_1
s t : Set α
n : ℕ
hu : Set.Finite (s ∪ t)
hst : 2 * n < Finset.card (Finite.toFinset (_ : Set.Finite s) ∪ Finite.toFinset (_ : Set.Finite t))
⊢ ∃ r, n < ncard r ∧ (r ⊆ s ∨ r ⊆ t)
[PROOFSTEP]
obtain ⟨r', hnr', hr'⟩ := Finset.exists_subset_or_subset_of_two_mul_lt_card hst
[GOAL]
case intro.intro
α : Type u_1
s t : Set α
n : ℕ
hu : Set.Finite (s ∪ t)
hst : 2 * n < Finset.card (Finite.toFinset (_ : Set.Finite s) ∪ Finite.toFinset (_ : Set.Finite t))
r' : Finset α
hnr' : n < Finset.card r'
hr' : r' ⊆ Finite.toFinset (_ : Set.Finite s) ∨ r' ⊆ Finite.toFinset (_ : Set.Finite t)
⊢ ∃ r, n < ncard r ∧ (r ⊆ s ∨ r ⊆ t)
[PROOFSTEP]
exact ⟨r', by simpa, by simpa using hr'⟩
[GOAL]
α : Type u_1
s t : Set α
n : ℕ
hu : Set.Finite (s ∪ t)
hst : 2 * n < Finset.card (Finite.toFinset (_ : Set.Finite s) ∪ Finite.toFinset (_ : Set.Finite t))
r' : Finset α
hnr' : n < Finset.card r'
hr' : r' ⊆ Finite.toFinset (_ : Set.Finite s) ∨ r' ⊆ Finite.toFinset (_ : Set.Finite t)
⊢ n < ncard ↑r'
[PROOFSTEP]
simpa
[GOAL]
α : Type u_1
s t : Set α
n : ℕ
hu : Set.Finite (s ∪ t)
hst : 2 * n < Finset.card (Finite.toFinset (_ : Set.Finite s) ∪ Finite.toFinset (_ : Set.Finite t))
r' : Finset α
hnr' : n < Finset.card r'
hr' : r' ⊆ Finite.toFinset (_ : Set.Finite s) ∨ r' ⊆ Finite.toFinset (_ : Set.Finite t)
⊢ ↑r' ⊆ s ∨ ↑r' ⊆ t
[PROOFSTEP]
simpa using hr'
[GOAL]
α : Type u_1
s t : Set α
⊢ ncard s = 1 ↔ ∃ a, s = {a}
[PROOFSTEP]
refine' ⟨fun h ↦ _, by rintro ⟨a, rfl⟩; rw [ncard_singleton]⟩
[GOAL]
α : Type u_1
s t : Set α
⊢ (∃ a, s = {a}) → ncard s = 1
[PROOFSTEP]
rintro ⟨a, rfl⟩
[GOAL]
case intro
α : Type u_1
t : Set α
a : α
⊢ ncard {a} = 1
[PROOFSTEP]
rw [ncard_singleton]
[GOAL]
α : Type u_1
s t : Set α
h : ncard s = 1
⊢ ∃ a, s = {a}
[PROOFSTEP]
have hft := (finite_of_ncard_ne_zero (ne_zero_of_eq_one h)).fintype
[GOAL]
α : Type u_1
s t : Set α
h : ncard s = 1
hft : Fintype ↑s
⊢ ∃ a, s = {a}
[PROOFSTEP]
simp_rw [ncard_eq_toFinset_card', @Finset.card_eq_one _ (toFinset s)] at h
[GOAL]
α : Type u_1
s t : Set α
hft : Fintype ↑s
h : ∃ a, toFinset s = {a}
⊢ ∃ a, s = {a}
[PROOFSTEP]
refine' h.imp fun a ha ↦ _
[GOAL]
α : Type u_1
s t : Set α
hft : Fintype ↑s
h : ∃ a, toFinset s = {a}
a : α
ha : toFinset s = {a}
⊢ s = {a}
[PROOFSTEP]
simp_rw [Set.ext_iff, mem_singleton_iff]
[GOAL]
α : Type u_1
s t : Set α
hft : Fintype ↑s
h : ∃ a, toFinset s = {a}
a : α
ha : toFinset s = {a}
⊢ ∀ (x : α), x ∈ s ↔ x = a
[PROOFSTEP]
simp only [Finset.ext_iff, mem_toFinset, Finset.mem_singleton] at ha
[GOAL]
α : Type u_1
s t : Set α
hft : Fintype ↑s
h : ∃ a, toFinset s = {a}
a : α
ha : ∀ (a_1 : α), a_1 ∈ s ↔ a_1 = a
⊢ ∀ (x : α), x ∈ s ↔ x = a
[PROOFSTEP]
exact ha
[GOAL]
α : Type u_1
s t : Set α
hs : autoParam (Set.Finite s) _auto✝
⊢ (∃ a x, insert a s = t) ↔ s ⊆ t ∧ ncard s + 1 = ncard t
[PROOFSTEP]
classical
cases' t.finite_or_infinite with ht ht
· rw [ncard_eq_toFinset_card _ hs, ncard_eq_toFinset_card _ ht, ← @Finite.toFinset_subset_toFinset _ _ _ hs ht, ←
Finset.exists_eq_insert_iff]
convert Iff.rfl using 2; simp
ext x
simp [Finset.ext_iff, Set.ext_iff]
simp only [ht.ncard, exists_prop, add_eq_zero, and_false, iff_false, not_exists, not_and]
rintro x - rfl
exact ht (hs.insert x)
[GOAL]
α : Type u_1
s t : Set α
hs : autoParam (Set.Finite s) _auto✝
⊢ (∃ a x, insert a s = t) ↔ s ⊆ t ∧ ncard s + 1 = ncard t
[PROOFSTEP]
cases' t.finite_or_infinite with ht ht
[GOAL]
case inl
α : Type u_1
s t : Set α
hs : autoParam (Set.Finite s) _auto✝
ht : Set.Finite t
⊢ (∃ a x, insert a s = t) ↔ s ⊆ t ∧ ncard s + 1 = ncard t
[PROOFSTEP]
rw [ncard_eq_toFinset_card _ hs, ncard_eq_toFinset_card _ ht, ← @Finite.toFinset_subset_toFinset _ _ _ hs ht, ←
Finset.exists_eq_insert_iff]
[GOAL]
case inl
α : Type u_1
s t : Set α
hs : autoParam (Set.Finite s) _auto✝
ht : Set.Finite t
⊢ (∃ a x, insert a s = t) ↔ ∃ a x, insert a (Finite.toFinset hs) = Finite.toFinset ht
[PROOFSTEP]
convert Iff.rfl using 2
[GOAL]
case h.e'_2.h.e'_2
α : Type u_1
s t : Set α
hs : autoParam (Set.Finite s) _auto✝
ht : Set.Finite t
⊢ (fun a => ∃ x, insert a (Finite.toFinset hs) = Finite.toFinset ht) = fun a => ∃ x, insert a s = t
[PROOFSTEP]
simp
[GOAL]
case h.e'_2.h.e'_2
α : Type u_1
s t : Set α
hs : autoParam (Set.Finite s) _auto✝
ht : Set.Finite t
⊢ (fun a => ¬a ∈ s ∧ insert a (Finite.toFinset hs) = Finite.toFinset ht) = fun a => ¬a ∈ s ∧ insert a s = t
[PROOFSTEP]
ext x
[GOAL]
case h.e'_2.h.e'_2.h.a
α : Type u_1
s t : Set α
hs : autoParam (Set.Finite s) _auto✝
ht : Set.Finite t
x : α
⊢ ¬x ∈ s ∧ insert x (Finite.toFinset hs) = Finite.toFinset ht ↔ ¬x ∈ s ∧ insert x s = t
[PROOFSTEP]
simp [Finset.ext_iff, Set.ext_iff]
[GOAL]
case inr
α : Type u_1
s t : Set α
hs : autoParam (Set.Finite s) _auto✝
ht : Set.Infinite t
⊢ (∃ a x, insert a s = t) ↔ s ⊆ t ∧ ncard s + 1 = ncard t
[PROOFSTEP]
simp only [ht.ncard, exists_prop, add_eq_zero, and_false, iff_false, not_exists, not_and]
[GOAL]
case inr
α : Type u_1
s t : Set α
hs : autoParam (Set.Finite s) _auto✝
ht : Set.Infinite t
⊢ ∀ (x : α), ¬x ∈ s → ¬insert x s = t
[PROOFSTEP]
rintro x - rfl
[GOAL]
case inr
α : Type u_1
s : Set α
hs : autoParam (Set.Finite s) _auto✝
x : α
ht : Set.Infinite (insert x s)
⊢ False
[PROOFSTEP]
exact ht (hs.insert x)
[GOAL]
α : Type u_1
s t : Set α
hs : autoParam (Set.Finite s) _auto✝
⊢ ncard s ≤ 1 ↔ ∀ (a : α), a ∈ s → ∀ (b : α), b ∈ s → a = b
[PROOFSTEP]
simp_rw [ncard_eq_toFinset_card _ hs, Finset.card_le_one, Finite.mem_toFinset]
[GOAL]
α : Type u_1
s t : Set α
hs : autoParam (Set.Finite s) _auto✝
⊢ ncard s ≤ 1 ↔ ∀ {a b : α}, a ∈ s → b ∈ s → a = b
[PROOFSTEP]
rw [ncard_le_one hs]
[GOAL]
α : Type u_1
s t : Set α
hs : autoParam (Set.Finite s) _auto✝
⊢ (∀ (a : α), a ∈ s → ∀ (b : α), b ∈ s → a = b) ↔ ∀ {a b : α}, a ∈ s → b ∈ s → a = b
[PROOFSTEP]
tauto
[GOAL]
α : Type u_1
s t : Set α
hs : autoParam (Set.Finite s) _auto✝
⊢ ncard s ≤ 1 ↔ s = ∅ ∨ ∃ a, s = {a}
[PROOFSTEP]
obtain rfl | ⟨x, hx⟩ := s.eq_empty_or_nonempty
[GOAL]
case inl
α : Type u_1
t : Set α
hs : autoParam (Set.Finite ∅) _auto✝
⊢ ncard ∅ ≤ 1 ↔ ∅ = ∅ ∨ ∃ a, ∅ = {a}
[PROOFSTEP]
exact iff_of_true (by simp) (Or.inl rfl)
[GOAL]
α : Type u_1
t : Set α
hs : autoParam (Set.Finite ∅) _auto✝
⊢ ncard ∅ ≤ 1
[PROOFSTEP]
simp
[GOAL]
case inr.intro
α : Type u_1
s t : Set α
hs : autoParam (Set.Finite s) _auto✝
x : α
hx : x ∈ s
⊢ ncard s ≤ 1 ↔ s = ∅ ∨ ∃ a, s = {a}
[PROOFSTEP]
rw [ncard_le_one_iff hs]
[GOAL]
case inr.intro
α : Type u_1
s t : Set α
hs : autoParam (Set.Finite s) _auto✝
x : α
hx : x ∈ s
⊢ (∀ {a b : α}, a ∈ s → b ∈ s → a = b) ↔ s = ∅ ∨ ∃ a, s = {a}
[PROOFSTEP]
refine' ⟨fun h ↦ Or.inr ⟨x, (singleton_subset_iff.mpr hx).antisymm' fun y hy ↦ h hy hx⟩, _⟩
[GOAL]
case inr.intro
α : Type u_1
s t : Set α
hs : autoParam (Set.Finite s) _auto✝
x : α
hx : x ∈ s
⊢ (s = ∅ ∨ ∃ a, s = {a}) → ∀ {a b : α}, a ∈ s → b ∈ s → a = b
[PROOFSTEP]
rintro (rfl | ⟨a, rfl⟩)
[GOAL]
case inr.intro.inl
α : Type u_1
t : Set α
x : α
hs : autoParam (Set.Finite ∅) _auto✝
hx : x ∈ ∅
⊢ ∀ {a b : α}, a ∈ ∅ → b ∈ ∅ → a = b
[PROOFSTEP]
exact (not_mem_empty _ hx).elim
[GOAL]
case inr.intro.inr.intro
α : Type u_1
t : Set α
x a : α
hs : autoParam (Set.Finite {a}) _auto✝
hx : x ∈ {a}
⊢ ∀ {a_1 b : α}, a_1 ∈ {a} → b ∈ {a} → a_1 = b
[PROOFSTEP]
simp_rw [mem_singleton_iff] at hx ⊢
[GOAL]
case inr.intro.inr.intro
α : Type u_1
t : Set α
x a : α
hs : autoParam (Set.Finite {a}) _auto✝
hx : x = a
⊢ ∀ {a_1 b : α}, a_1 = a → b = a → a_1 = b
[PROOFSTEP]
subst hx
[GOAL]
case inr.intro.inr.intro
α : Type u_1
t : Set α
x : α
hs : autoParam (Set.Finite {x}) _auto✝
⊢ ∀ {a b : α}, a = x → b = x → a = b
[PROOFSTEP]
simp only [forall_eq_apply_imp_iff', imp_self, implies_true]
[GOAL]
α : Type u_1
s t : Set α
inst✝ : Nonempty α
hs : autoParam (Set.Finite s) _auto✝
⊢ ncard s ≤ 1 ↔ ∃ x, s ⊆ {x}
[PROOFSTEP]
simp_rw [ncard_eq_toFinset_card _ hs, Finset.card_le_one_iff_subset_singleton, Finite.toFinset_subset,
Finset.coe_singleton]
[GOAL]
α : Type u_1
s✝ t : Set α
inst✝ : Subsingleton α
s : Set α
⊢ ncard s ≤ 1
[PROOFSTEP]
rw [ncard_eq_toFinset_card]
[GOAL]
α : Type u_1
s✝ t : Set α
inst✝ : Subsingleton α
s : Set α
⊢ Finset.card (Finite.toFinset (_ : Set.Finite s)) ≤ 1
[PROOFSTEP]
exact Finset.card_le_one_of_subsingleton _
[GOAL]
α : Type u_1
s t : Set α
hs : autoParam (Set.Finite s) _auto✝
⊢ 1 < ncard s ↔ ∃ a, a ∈ s ∧ ∃ b, b ∈ s ∧ a ≠ b
[PROOFSTEP]
simp_rw [ncard_eq_toFinset_card _ hs, Finset.one_lt_card, Finite.mem_toFinset]
[GOAL]
α : Type u_1
s t : Set α
hs : autoParam (Set.Finite s) _auto✝
⊢ 1 < ncard s ↔ ∃ a b, a ∈ s ∧ b ∈ s ∧ a ≠ b
[PROOFSTEP]
rw [one_lt_ncard hs]
[GOAL]
α : Type u_1
s t : Set α
hs : autoParam (Set.Finite s) _auto✝
⊢ (∃ a, a ∈ s ∧ ∃ b, b ∈ s ∧ a ≠ b) ↔ ∃ a b, a ∈ s ∧ b ∈ s ∧ a ≠ b
[PROOFSTEP]
simp only [exists_prop, exists_and_left]
[GOAL]
α : Type u_1
s t : Set α
hs : autoParam (Set.Finite s) _auto✝
⊢ 2 < ncard s ↔ ∃ a b c, a ∈ s ∧ b ∈ s ∧ c ∈ s ∧ a ≠ b ∧ a ≠ c ∧ b ≠ c
[PROOFSTEP]
simp_rw [ncard_eq_toFinset_card _ hs, Finset.two_lt_card_iff, Finite.mem_toFinset]
[GOAL]
α : Type u_1
s t : Set α
hs : autoParam (Set.Finite s) _auto✝
⊢ 2 < ncard s ↔ ∃ a, a ∈ s ∧ ∃ b, b ∈ s ∧ ∃ c, c ∈ s ∧ a ≠ b ∧ a ≠ c ∧ b ≠ c
[PROOFSTEP]
simp only [two_lt_ncard_iff hs, exists_and_left, exists_prop]
[GOAL]
α : Type u_1
s t : Set α
hs : 1 < ncard s
a : α
⊢ ∃ b, b ∈ s ∧ b ≠ a
[PROOFSTEP]
have hsf := (finite_of_ncard_ne_zero (zero_lt_one.trans hs).ne.symm)
[GOAL]
α : Type u_1
s t : Set α
hs : 1 < ncard s
a : α
hsf : Set.Finite s
⊢ ∃ b, b ∈ s ∧ b ≠ a
[PROOFSTEP]
rw [ncard_eq_toFinset_card _ hsf] at hs
[GOAL]
α : Type u_1
s t : Set α
a : α
hsf : Set.Finite s
hs : 1 < Finset.card (Finite.toFinset hsf)
⊢ ∃ b, b ∈ s ∧ b ≠ a
[PROOFSTEP]
simpa only [Finite.mem_toFinset] using Finset.exists_ne_of_one_lt_card hs a
[GOAL]
α : Type u_1
s t : Set α
n : ℕ
h : ncard s = n + 1
⊢ ∃ a t, ¬a ∈ t ∧ insert a t = s ∧ ncard t = n
[PROOFSTEP]
classical
have hsf := finite_of_ncard_pos (n.zero_lt_succ.trans_eq h.symm)
rw [ncard_eq_toFinset_card _ hsf, Finset.card_eq_succ] at h
obtain ⟨a, t, hat, hts, rfl⟩ := h
simp only [Finset.ext_iff, Finset.mem_insert, Finite.mem_toFinset] at hts
refine' ⟨a, t, hat, _, _⟩
· simp only [Finset.mem_coe, ext_iff, mem_insert_iff]
tauto
simp
[GOAL]
α : Type u_1
s t : Set α
n : ℕ
h : ncard s = n + 1
⊢ ∃ a t, ¬a ∈ t ∧ insert a t = s ∧ ncard t = n
[PROOFSTEP]
have hsf := finite_of_ncard_pos (n.zero_lt_succ.trans_eq h.symm)
[GOAL]
α : Type u_1
s t : Set α
n : ℕ
h : ncard s = n + 1
hsf : Set.Finite s
⊢ ∃ a t, ¬a ∈ t ∧ insert a t = s ∧ ncard t = n
[PROOFSTEP]
rw [ncard_eq_toFinset_card _ hsf, Finset.card_eq_succ] at h
[GOAL]
α : Type u_1
s t : Set α
n : ℕ
hsf : Set.Finite s
h✝ : Finset.card (Finite.toFinset hsf) = n + 1
h : ∃ a t, ¬a ∈ t ∧ insert a t = Finite.toFinset hsf ∧ Finset.card t = n
⊢ ∃ a t, ¬a ∈ t ∧ insert a t = s ∧ ncard t = n
[PROOFSTEP]
obtain ⟨a, t, hat, hts, rfl⟩ := h
[GOAL]
case intro.intro.intro.intro
α : Type u_1
s t✝ : Set α
hsf : Set.Finite s
a : α
t : Finset α
hat : ¬a ∈ t
hts : insert a t = Finite.toFinset hsf
h : Finset.card (Finite.toFinset hsf) = Finset.card t + 1
⊢ ∃ a t_1, ¬a ∈ t_1 ∧ insert a t_1 = s ∧ ncard t_1 = Finset.card t
[PROOFSTEP]
simp only [Finset.ext_iff, Finset.mem_insert, Finite.mem_toFinset] at hts
[GOAL]
case intro.intro.intro.intro
α : Type u_1
s t✝ : Set α
hsf : Set.Finite s
a : α
t : Finset α
hat : ¬a ∈ t
h : Finset.card (Finite.toFinset hsf) = Finset.card t + 1
hts : ∀ (a_1 : α), a_1 = a ∨ a_1 ∈ t ↔ a_1 ∈ s
⊢ ∃ a t_1, ¬a ∈ t_1 ∧ insert a t_1 = s ∧ ncard t_1 = Finset.card t
[PROOFSTEP]
refine' ⟨a, t, hat, _, _⟩
[GOAL]
case intro.intro.intro.intro.refine'_1
α : Type u_1
s t✝ : Set α
hsf : Set.Finite s
a : α
t : Finset α
hat : ¬a ∈ t
h : Finset.card (Finite.toFinset hsf) = Finset.card t + 1
hts : ∀ (a_1 : α), a_1 = a ∨ a_1 ∈ t ↔ a_1 ∈ s
⊢ insert a ↑t = s
[PROOFSTEP]
simp only [Finset.mem_coe, ext_iff, mem_insert_iff]
[GOAL]
case intro.intro.intro.intro.refine'_1
α : Type u_1
s t✝ : Set α
hsf : Set.Finite s
a : α
t : Finset α
hat : ¬a ∈ t
h : Finset.card (Finite.toFinset hsf) = Finset.card t + 1
hts : ∀ (a_1 : α), a_1 = a ∨ a_1 ∈ t ↔ a_1 ∈ s
⊢ ∀ (x : α), x = a ∨ x ∈ t ↔ x ∈ s
[PROOFSTEP]
tauto
[GOAL]
case intro.intro.intro.intro.refine'_2
α : Type u_1
s t✝ : Set α
hsf : Set.Finite s
a : α
t : Finset α
hat : ¬a ∈ t
h : Finset.card (Finite.toFinset hsf) = Finset.card t + 1
hts : ∀ (a_1 : α), a_1 = a ∨ a_1 ∈ t ↔ a_1 ∈ s
⊢ ncard ↑t = Finset.card t
[PROOFSTEP]
simp
[GOAL]
α : Type u_1
s t : Set α
n : ℕ
hs : autoParam (Set.Finite s) _auto✝
⊢ ncard s = n + 1 ↔ ∃ a t, ¬a ∈ t ∧ insert a t = s ∧ ncard t = n
[PROOFSTEP]
refine' ⟨eq_insert_of_ncard_eq_succ, _⟩
[GOAL]
α : Type u_1
s t : Set α
n : ℕ
hs : autoParam (Set.Finite s) _auto✝
⊢ (∃ a t, ¬a ∈ t ∧ insert a t = s ∧ ncard t = n) → ncard s = n + 1
[PROOFSTEP]
rintro ⟨a, t, hat, h, rfl⟩
[GOAL]
case intro.intro.intro.intro
α : Type u_1
s t✝ : Set α
hs : autoParam (Set.Finite s) _auto✝
a : α
t : Set α
hat : ¬a ∈ t
h : insert a t = s
⊢ ncard s = ncard t + 1
[PROOFSTEP]
rw [← h, ncard_insert_of_not_mem hat (hs.subset ((subset_insert a t).trans_eq h))]
[GOAL]
α : Type u_1
s t : Set α
⊢ ncard s = 2 ↔ ∃ x y, x ≠ y ∧ s = {x, y}
[PROOFSTEP]
rw [← encard_eq_two, ncard_def, ← Nat.cast_inj (R := ℕ∞), Nat.cast_ofNat]
[GOAL]
α : Type u_1
s t : Set α
⊢ ↑(↑ENat.toNat (encard s)) = 2 ↔ encard s = 2
[PROOFSTEP]
refine' ⟨fun h ↦ _, fun h ↦ _⟩
[GOAL]
case refine'_1
α : Type u_1
s t : Set α
h : ↑(↑ENat.toNat (encard s)) = 2
⊢ encard s = 2
[PROOFSTEP]
rwa [ENat.coe_toNat] at h
[GOAL]
case refine'_1
α : Type u_1
s t : Set α
h : ↑(↑ENat.toNat (encard s)) = 2
⊢ encard s ≠ ⊤
[PROOFSTEP]
rintro h'
[GOAL]
case refine'_1
α : Type u_1
s t : Set α
h : ↑(↑ENat.toNat (encard s)) = 2
h' : encard s = ⊤
⊢ False
[PROOFSTEP]
simp [h'] at h
[GOAL]
case refine'_2
α : Type u_1
s t : Set α
h : encard s = 2
⊢ ↑(↑ENat.toNat (encard s)) = 2
[PROOFSTEP]
simp [h]
[GOAL]
case refine'_2
α : Type u_1
s t : Set α
h : encard s = 2
⊢ ¬2 = ⊤
[PROOFSTEP]
exact Iff.mp ENat.coe_toNat_eq_self rfl
[GOAL]
α : Type u_1
s t : Set α
⊢ ncard s = 3 ↔ ∃ x y z, x ≠ y ∧ x ≠ z ∧ y ≠ z ∧ s = {x, y, z}
[PROOFSTEP]
rw [← encard_eq_three, ncard_def, ← Nat.cast_inj (R := ℕ∞), Nat.cast_ofNat]
[GOAL]
α : Type u_1
s t : Set α
⊢ ↑(↑ENat.toNat (encard s)) = 3 ↔ encard s = 3
[PROOFSTEP]
refine' ⟨fun h ↦ _, fun h ↦ _⟩
[GOAL]
case refine'_1
α : Type u_1
s t : Set α
h : ↑(↑ENat.toNat (encard s)) = 3
⊢ encard s = 3
[PROOFSTEP]
rwa [ENat.coe_toNat] at h
[GOAL]
case refine'_1
α : Type u_1
s t : Set α
h : ↑(↑ENat.toNat (encard s)) = 3
⊢ encard s ≠ ⊤
[PROOFSTEP]
rintro h'
[GOAL]
case refine'_1
α : Type u_1
s t : Set α
h : ↑(↑ENat.toNat (encard s)) = 3
h' : encard s = ⊤
⊢ False
[PROOFSTEP]
simp [h'] at h
[GOAL]
case refine'_2
α : Type u_1
s t : Set α
h : encard s = 3
⊢ ↑(↑ENat.toNat (encard s)) = 3
[PROOFSTEP]
simp [h]
[GOAL]
case refine'_2
α : Type u_1
s t : Set α
h : encard s = 3
⊢ ¬3 = ⊤
[PROOFSTEP]
exact Iff.mp ENat.coe_toNat_eq_self rfl
|
lemma pred_intros_finite[measurable (raw)]: "finite I \<Longrightarrow> (\<And>i. i \<in> I \<Longrightarrow> pred M (\<lambda>x. x \<in> N x i)) \<Longrightarrow> pred M (\<lambda>x. x \<in> (\<Inter>i\<in>I. N x i))" "finite I \<Longrightarrow> (\<And>i. i \<in> I \<Longrightarrow> pred M (\<lambda>x. x \<in> N x i)) \<Longrightarrow> pred M (\<lambda>x. x \<in> (\<Union>i\<in>I. N x i))" "finite I \<Longrightarrow> (\<And>i. i \<in> I \<Longrightarrow> pred M (\<lambda>x. P x i)) \<Longrightarrow> pred M (\<lambda>x. \<forall>i\<in>I. P x i)" "finite I \<Longrightarrow> (\<And>i. i \<in> I \<Longrightarrow> pred M (\<lambda>x. P x i)) \<Longrightarrow> pred M (\<lambda>x. \<exists>i\<in>I. P x i)" |
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import tensorflow.contrib.learn as skflow
import pandas as pd
from sklearn.utils import shuffle
df = pd.read_csv('../data/boston.csv', header=0)
print(df.describe())
f, ax1 = plt.subplots()
plt.figure()
y = df['MEDV']
for i in range(1, 8):
number = 420 + i
ax1.locator_params(nbins=3)
ax1 = plt.subplot(number)
plt.title(list(df)[i])
# print a scatter draw of datapoints
ax1.scatter(df[df.columns[i]], y)
plt.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0)
plt.show()
X = tf.placeholder("float", name="X")
Y = tf.placeholder("float", name="Y")
with tf.name_scope("Model"):
w = tf.Variable(tf.random_normal([2], stddev=0.01), name="b0")
b = tf.Variable(tf.random_normal([2], stddev=0.01), name="b1")
def model(x, w, b):
return tf.add(x, w) + b
y_model = model(X, w, b)
with tf.name_scope("CostFunction"):
# use square error for cost func
cost = tf.reduce_mean(tf.pow(Y - y_model, 2))
train_op = tf.train.AdamOptimizer(0.001).minimize(cost)
sess = tf.Session()
init = tf.global_variables_initializer()
# tf.summary.FileWriter(sess.graph, './graphs', 'graph.pbtxt')
cost_op = tf.summary.scalar("loss", cost)
merged = tf.summary.merge_all()
sess.run(init)
# writer = tf.summary.FileWriter('./graphs', sess.graph)
x_vals = df[[df.columns[2], df.columns[4]]].values.astype(float)
y_vals = df[df.columns[12]].values.astype(float)
b0_temp = b.eval(session=sess)
b1_temp = w.eval(session=sess)
for a in range(1, 10):
cost_1 = 0.0
for i, j in zip(x_vals, y_vals):
sess.run(train_op, feed_dict={X: i, Y: j})
cost_1 += sess.run(cost, feed_dict={X: i, Y: j})/506.00
x_vals, y_vals = shuffle(x_vals,y_vals)
print(cost_1)
b0_temp = b.eval(session=sess)
b1_temp = w.eval(session=sess)
print(b0_temp)
print(b1_temp)
|
import linear_algebra.linear_independent
import ..dual
noncomputable theory
open_locale classical
variables {E 𝔽 ι : Type*} [field 𝔽] [finite E] {M : matroid E}
namespace matroid
/-- A matroid representation -/
structure rep (𝔽 : Type*) [field 𝔽] (M : matroid E) (ι : Type*):=
(to_fun : E → (ι → 𝔽))
(valid : ∀ (I : set E), linear_independent 𝔽 (λ (e : I), to_fun (e : E)) ↔ M.indep I)
instance : has_coe_to_fun (rep 𝔽 M ι) (λ _, E → (ι → 𝔽)) := ⟨λ φ, φ.to_fun⟩
/-- `M` is `𝔽`-representable if it has an `𝔽`-representation. -/
def is_representable (M : matroid E) (𝔽 : Type*) [field 𝔽] : Prop :=
∃ ι, nonempty (rep 𝔽 M ι)
lemma of_base (φ : rep 𝔽 M ι) {B : set E} (hB : M.base B) (e : E) :
φ e ∈ submodule.span 𝔽 (φ '' B) :=
begin
by_cases e ∈ B,
{ have h2 := @submodule.subset_span 𝔽 _ _ _ _ (φ.to_fun '' B),
have h3 : φ.to_fun e ∈ (φ.to_fun '' B),
apply (set.mem_image φ.to_fun B (φ.to_fun e)).2,
use e,
use h,
have h4 := set.mem_of_subset_of_mem h2 h3,
simp at h4,
exact h4 },
have h2 : ¬ linear_independent 𝔽 (λ f : insert e B, φ.to_fun (f : E)),
{ rw rep.valid,
apply base.dep_of_insert hB h },
by_contra h3,
apply h2,
rw linear_independent_insert' h,
refine ⟨_, h3⟩,
rw rep.valid,
apply base.indep hB,
end
lemma foo (h : M.is_representable 𝔽) :
nonempty (rep 𝔽 M (fin M.rk)) :=
begin
obtain ⟨ι, ⟨φ⟩⟩ := h,
obtain ⟨B, hB⟩ := M.exists_base,
have := of_base φ hB,
end
-- lemma foo (e f : E) (hne : e ≠ f) (h : M.r {e,f} = 1) :
end matroid
|
Relocation section '.rela.text' at offset .* contains 6 entries:
Offset Info Type Sym.Value Sym. Name \+ Addend
00080006 .* R_PPC_ADDR16_HA 00080000 .text \+ 4000020
0008000a .* R_PPC_ADDR16_LO 00080000 .text \+ 4000020
00080012 .* R_PPC_ADDR16_HA 00080000 .text \+ 4000020
00080016 .* R_PPC_ADDR16_LO 00080000 .text \+ 4000020
0408002a .* R_PPC_ADDR16_HA 00080000 _start \+ 0
0408002e .* R_PPC_ADDR16_LO 00080000 _start \+ 0
|
Before we start, let's make sure we got access to a GPU:
Edit->Notebook Setting->Hardware Accelerator
```
from tensorflow.python.client import device_lib
device_lib.list_local_devices()
```
# ML Coding example #2: Atomization Energies with PyTorch (Feed-Forward Neural Network)
The QM7 dataset contains XYZ structures for 7101 small molecules, with up to 7 atoms of type CNO, saturated with H.
Some of them look like this:
Attempt to map all organic molecules with up to 7 CNO-atoms.
For each molecule you will be given the atomization energy calculated using a QM method (PBE0/def2-TZVP).
* Instead of the raw 7101 XYZ files, we will use the Bag-of-Bonds features for each molecule, along with the atomization energies.
References:
1. Rupp et al. (2012) Phys Rev Lett https://doi.org/10.1103/PhysRevLett.108.058301
2. Hansen et al. (2015) J Phys Chem Lett https://doi.org/10.1021/acs.jpclett.5b00831
```
!wget -O bob.npy https://www.dropbox.com/s/vyiwza2uy4jkczg/bob.npy
!wget -O hof.npy https://www.dropbox.com/s/zy717f8mwxaegff/hof.npy
```
Load the data into numpy arrays and plot the distribution of energies:
```
import numpy as np
Xall = np.load("bob.npy")
Yall = np.load("hof.npy")
import matplotlib.pyplot as plt
plt.hist(Yall)
plt.xlabel("Atomization Energy [kcal/mol]")
plt.ylabel("Counts")
plt.show()
```
The values are very large and negative, which can be a problem for neural networks. To avoid this, we can normalize the data or scale all the target values by a factor.
Scale the values by the mean of the dataset and divide into Training and Test sets:
```
n_train = 4000
n_valid = 1000
n_test = 2000
Yall = np.load("hof.npy")
y_mean = np.mean(Yall)
print(y_mean)
Yall /= y_mean
Xtrain = Xall[:n_train]
Xvalid = Xall[n_train:n_valid+n_train]
Xtest = Xall[-n_test:]
Ytrain = Yall[:n_train]
Yvalid = Yall[n_train:n_valid+n_train]
Ytest = Yall[-n_test:]
```
Plotting again!
```
plt.hist(Yall, label="All")
plt.hist(Ytrain, label="Training")
plt.hist(Ytest, label="Test")
plt.hist(Yvalid, label="Validation")
plt.xlabel("Target value")
plt.ylabel("Counts")
plt.legend()
plt.show()
```
Wondering what our representations look like? Print a row in `Xtrain`!
```
print(Xtrain[1])
print(len(Xtrain[1]))
```
## Defining out Architecture:
Four layers (including input and output), so this qualifies for the buzzword "Deep Learning":
Fully-connected neural network with two hidden layer in PyTorch:
```
import torch
import torch.nn as nn
class NeuralNet(nn.Module):
def __init__(self, input_size, hidden1_size, hidden2_size):
super(NeuralNet, self).__init__()
# Define the operations in the layers
# Here "Linear", i.e: y = a*X.T + b
self.fc1 = nn.Linear(input_size,hidden1_size)
self.fc2 = nn.Linear(hidden1_size,hidden2_size)
self.fc3 = nn.Linear(hidden2_size, 1)
def forward(self, x):
# Define the activation functions,
# i.e. the "forward" passes
x = torch.sigmoid(self.fc1(x))
x = torch.sigmoid(self.fc2(x))
x = self.fc3(x)
return x
```
Sigmoid functions again:\begin{equation}
\sigma\left(x\right) = \frac{1}{1 + \exp\left(-x\right)}
\end{equation}
Linear connections between the layers:
\begin{equation}
y = b + \sum_i x_i w_i
\end{equation}
Since we are using a GPU, all the training and test data must be moved to the GPU:
```
dtype = torch.float
device = torch.device("cuda:0")
x = torch.from_numpy(Xtrain).to(device, dtype)
y = torch.from_numpy(Ytrain.reshape((n_train,1))).to(device, dtype)
xv = torch.from_numpy(Xvalid).to(device, dtype)
yv = torch.from_numpy(Yvalid.reshape((n_valid,1))).to(device, dtype)
xs = torch.from_numpy(Xtest).to(device, dtype)
ys = torch.from_numpy(Ytest.reshape((n_test,1))).to(device, dtype)
```
## Optimizing the Neural Network weights:
First, lets define the neural network, and the sizes of the hidden layers. Secondly, we have to send it to the GPU.
```
input_size = 465
# 30 & 5 are good values
# 64 & 50 also worth a try
hidden1_size = 30
hidden2_size = 5
# Define our model
model = NeuralNet(input_size, hidden1_size, hidden2_size)
# Send it to the GPU
model = model.to(device)
```
Check parameters:
```
for param in model.parameters():
print(param.shape)
print(param)
```
Next, we have to select a loss function. For $y = sin(x)$ we minimized the squared error:
\begin{equation}
L_2\left(\mathbf{w}\right) = \sum_{i=1}^{N} \left(y_i^\mathrm{true} - y_i^\mathrm{predicted} \right)^{2}
\end{equation}
From experience, the L1-loss function (mean absolute error) works better for this experiment:
\begin{equation}
L_1\left(\mathbf{w}\right) = \frac{1}{N}\sum_{i=1}^{N} | y_i^\mathrm{true} - y_i^\mathrm{predicted} |
\end{equation}
Lastly, we also need to define the optimizer. In this case we choose the "Adam" optimizer, which is a variant of gradient-descent optimization.
* Kingma & Ba (2014) *arXiv*, https://arxiv.org/abs/1412.6980
```
# L2 loss function (Mean Squared Error)
# loss_fn = nn.MSELoss()
# L1 loss (Mean Absoulte Error)
loss_fn = nn.L1Loss()
# Optimizer, lr = learning rate
optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)
```
Now, we are write the optimization procedure
1. Make prediction
2. Calculate the loss
3. Calculate the gradient of the loss function
4. Take a step in the direction suggested by the gradient.
For example 30000 epoch (about 30-60 sec on Google Colab):
```
for epoch in range(30000):
# Make a prediction with the model
y_predicted = model.forward(x)
# Calculate the Loss-function
loss = loss_fn(y_predicted, y)
# Set gradient to zero
optimizer.zero_grad()
# Take the gradient of the loss function
loss.backward()
# Take a step with the optimizer
optimizer.step()
# Print additional aoutput
if (epoch % 1000 == 0):
mse =(y - y_predicted).pow(2).sum()
rmsd = torch.sqrt(mse/n_train) * abs(y_mean)
mae = (y - y_predicted).abs().sum() * abs(y_mean) / n_train
yvs = model.forward(xv)
rmsd_v = torch.sqrt((yvs - yv).pow(2).sum()/n_valid) * abs(y_mean)
mae_v = (yvs - yv).abs().sum() * abs(y_mean) / n_valid
mae = mae.to("cpu").detach().numpy()
rmsd = rmsd.to("cpu").detach().numpy()
mae_v = mae_v.to("cpu").detach().numpy()
rmsd_v = rmsd_v.to("cpu").detach().numpy()
print("EPOCH %7i MAE(train) = %7.2f RMSE(train) = %7.2f MAE(valid) = %7.2f RMSE(valid) = %7.2f [kcal/mol]" %
(epoch, mae, rmsd, mae_v, rmsd_v))
```
### Calcluate test errors:
With the optimized neural network, we can now predict the error on the test set.
Because everything is stored on the GPU, we also have to copy back to the CPU and convert to Numpy array for convenience:
```
# Calculate test, copy back to CPU and convert to numpy
y_pred = model(xs).to("cpu").detach().numpy().flatten() * y_mean
# Copy true test values back to CPU and convert to numpy
y_true = ys.to("cpu").detach().numpy().flatten() * y_mean
print(y_pred)
print(y_true)
mae = np.mean(np.abs(y_pred - y_true))
rmse = np.sqrt(np.mean(np.square(y_pred - y_true)))
print("MAE:", mae)
print("RMSE:", rmse)
```
Lastly, lets plot a correlation plot between the true and predicted atomization energies:
```
import matplotlib.pyplot as plt
plt.scatter(y_true,y_pred)
plt.xlabel("True Atomization Energy [kcal/mol]")
plt.ylabel("Predicted Atomization Energy [kcal/mol]")
plt.show()
```
### Note on hyper parameters and optimization
In order to have the best performing neural network, we could have changed a lot of things:
* Number of hidden layers
* Hidden layer sizes
* Type of loss function
* Optimizer + setting
* Regularization of parameters
If you are wondering, how we could have added regularization to the loss function, here is an example for example L2 regularization:
```
l2reg = 1e-4
for param in model.parameters():
loss += torch.norm(param) * l2reg
```
|
If $p$ and $q$ are continuous maps from a compact set $T$ to itself such that $p \circ q = q \circ p = \text{id}_T$, then $q$ is continuous. |
#include <CGAL/Exact_predicates_inexact_constructions_kernel.h>
#include <CGAL/Delaunay_triangulation_2.h>
#include <CGAL/boost/graph/graph_traits_Delaunay_triangulation_2.h>
#include <boost/graph/kruskal_min_spanning_tree.hpp>
#include <fstream>
#include <iostream>
#include <map>
typedef CGAL::Exact_predicates_inexact_constructions_kernel K;
typedef K::Point_2 Point;
typedef CGAL::Delaunay_triangulation_2<K> Triangulation;
typedef boost::graph_traits<Triangulation>::vertex_descriptor vertex_descriptor;
typedef boost::graph_traits<Triangulation>::vertex_iterator vertex_iterator;
typedef boost::graph_traits<Triangulation>::edge_descriptor edge_descriptor;
// The BGL makes use of indices associated to the vertices
// We use a std::map to store the index
typedef std::map<vertex_descriptor,int> VertexIndexMap;
// A std::map is not a property map, because it is not lightweight
typedef boost::associative_property_map<VertexIndexMap> VertexIdPropertyMap;
int main(int argc,char* argv[])
{
const char* filename = (argc > 1) ? argv[1] : "data/points.xy";
std::ifstream input(filename);
Triangulation tr;
Point p;
while(input >> p)
tr.insert(p);
// Associate indices to the vertices
VertexIndexMap vertex_id_map;
VertexIdPropertyMap vertex_index_pmap(vertex_id_map);
int index = 0;
for(vertex_descriptor vd : vertices(tr))
vertex_id_map[vd] = index++;
// We use the default edge weight which is the squared length of the edge
// This property map is defined in graph_traits_Triangulation_2.h
// In the function call you can see a named parameter: vertex_index_map
std::list<edge_descriptor> mst;
boost::kruskal_minimum_spanning_tree(tr, std::back_inserter(mst),
vertex_index_map(vertex_index_pmap));
std::cout << "The edges of the Euclidean mimimum spanning tree:" << std::endl;
for(edge_descriptor ed : mst)
{
vertex_descriptor svd = source(ed, tr);
vertex_descriptor tvd = target(ed, tr);
Triangulation::Vertex_handle sv = svd;
Triangulation::Vertex_handle tv = tvd;
std::cout << "[ " << sv->point() << " | " << tv->point() << " ] " << std::endl;
}
return EXIT_SUCCESS;
}
|
import os
import io
import datetime
import json
import numpy as np
import nextcord
import matplotlib.pyplot as plt
plt.style.use("dark_background")
from requests import Request, Session
from dotenv import load_dotenv
load_dotenv()
DISCORD_TOKEN = os.getenv("DISCORD_TOKEN")
#################
# API CALL HACK #
#################
def do_req(url):
headers = {"Accepts": "application/json"}
session = Session()
session.headers.update(headers)
try:
response = session.get(url)
data = json.loads(response.text)
except Exception as e:
print(e)
data = None
return data
##################
# COINGECKO PART #
##################
# get historical data for one coin
def get_coin_historic_prix_gecko(coin, days):
print(coin)
data = do_req("https://api.coingecko.com/api/v3/coins/" + coin + "/market_chart?vs_currency=usd&days=" + str(days))["prices"]
data_arr = np.flip(np.array(data), 0)
prixes = data_arr[:, 1]
dates = data_arr[:, 0]
readable_dates = []
for date in dates:
readable_dates.append(datetime.datetime.utcfromtimestamp(int(date)/1000).strftime('%Y-%m-%d %H:%M:%S') + " UTC")
readable = np.array(readable_dates)
return prixes, readable, dates
###################
# MATPLOTLIB PART #
###################
async def print_graph(coins, data, timestamps, longreadable, longstamp, colors, linestyle, linewidth, discord_channel, days, title_addon):
# threshold for number of datapoints for displaying avax price and showing dates on x axis labels
thresh = 35
# rounding
rounding = 1
# plot size
plt.figure(figsize=(15, 6))
# define ticks (hacky)
days_num = longstamp.shape[0] if days == "max" else int(days)
tikz = np.int32(np.linspace(0, longstamp.shape[0] - 1, 25 if days_num == 1 else (thresh if days_num > thresh else (days_num + 1))))
mini = 1
# for each currency in data
for i, d in enumerate(data):
# normalize
normalized = d / d.max()
# plot line for one currency using defined color, linestyle, etc.
plt.plot(
timestamps[i],
normalized,
color = "#" + colors[i],
linestyle = linestyle[i],
linewidth = linewidth[i]
)
# if avax plot price
if coins[i] == "avalanche-2":
tikzava = np.int32(np.linspace(0, timestamps[i].shape[0] - 1, 25 if days_num == 1 else (thresh if days_num > thresh else (days_num + 1))))
if timestamps[i].shape[0] * 2 > longstamp.shape[0]:
for x, y, z in zip(timestamps[i][tikzava], np.array(normalized)[tikzava], np.array(d)[tikzava]):
plt.text(
x,
y,
str(round(z, rounding)),
color = "white",
weight = "bold",
size = 10,
rotation = 90,
)
# find global minimum
norm_mini = normalized.min()
if norm_mini < mini:
mini = norm_mini
# print full date instead of number of points in the past
plt.gca().set_xticks(longstamp[tikz])
plt.gca().set_xticklabels(longreadable[tikz], rotation = 45, ha = "right")
# other matplotlib stuff
plt.gca().set_yticks(np.linspace(0, 1, 11))
plt.gca().set_ylim(mini - 0.05, 1.05)
plt.grid(color = "#595959", linestyle = '--')
plt.legend(coins, bbox_to_anchor = (1.04, 1), loc = "upper left")
plt.xlabel("time")
plt.ylabel("$$$")
plt.title(title_addon + ": " + ("last day" if days_num == 1 else days + " days") + " crypto price comparison")
# print image ot discord channel
buf = io.BytesIO()
plt.savefig(buf, format="png", bbox_inches="tight")
buf.seek(0)
await discord_channel.send(file=nextcord.File(buf, "image.png"))
buf.close()
plt.close()
async def print_all_graphs(discord_channel, days):
coins = ["avalanche-2", "avalaunch", "roco-finance", "lydia-finance", "penguin-finance", "avaware", "yay-games"]
colors = ["ff0000", "ffff00", "f55307", "9c5bfc", "e314a8", "1864d6", "00ff00"]
linestyle = ["-", "-", "--", "--", "-", "-", "-"]
linewidth = [2, 2, 1, 1, 1, 1, 1]
title_addon = "Launchpads"
await call_print_graph(discord_channel, days, coins, colors, linestyle, linewidth, title_addon)
coins = ["avalanche-2", "pangolin", "benqi", "snowball-token", "hurricaneswap-token", "elk-finance", "yield-yak", "oh-finance", "beefy-finance", "chainlink"]
colors = ["ff0000", "ed8c0c", "065f9e" , "0cdcf7", "8a07e8", "1f6e07", "36c70a", "ed05d6", "ffffff", "3271fa"]
linestyle = ["-", "-", "-", "--", "--", "--", "--", "-", "-", "--"]
linewidth = [2, 2, 2, 1, 1, 1, 1, 1, 1, 1]
title_addon = "Defi"
await call_print_graph(discord_channel, days, coins, colors, linestyle, linewidth, title_addon)
coins = ["avalanche-2", "crabada", "treasure-under-sea", "talecraft"]
colors = ["ff0000", "25c4fa", "ffff00", "f20ca9"]
linestyle = ["-", "-", "-", "-"]
linewidth = [2, 2, 2, 2]
title_addon = "Game-Fi"
await call_print_graph(discord_channel, days, coins, colors, linestyle, linewidth, title_addon)
coins = ["avalanche-2", "baguette", "snowbank", "hoopoe", "gondola-finance", "sherpa"]
colors = ["ff0000", "ffdb6e", "858585", "ffbf00", "ff0000", "00ff00"]
linestyle = ["-", "-", "-", "-", "--", "-"]
linewidth = [2, 2, 2, 2, 2, 2]
title_addon = "Gems #1"
await call_print_graph(discord_channel, days, coins, colors, linestyle, linewidth, title_addon)
coins = ["bitcoin", "ethereum", "monero", "avalanche-2", "avalaunch"]
colors = ["ffffff", "9e9e9e", "ff8800", "ff0000", "ffff00"]
linestyle = [":", ":", ":", "-", "-"]
linewidth = [2, 2, 2, 2, 2]
title_addon = "Old coins"
await call_print_graph(discord_channel, days, coins, colors, linestyle, linewidth, title_addon)
coins = ["shiba-inu", "dogecoin", "polkadot", "cardano", "ripple", "binancecoin", "solana", "avalanche-2", "bitcoin", "ethereum", "monero"]
colors = ["f58b00", "f0e19e", "e60bba", "2240e6", "fffef0", "f5dd42", "429ef5", "ff0000", "ffffff", "9e9e9e", "ff8800"]
linestyle = ["-", "-", "--", "--", "--", "--", "--", "-","-", "-", "-"]
linewidth = [1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2]
title_addon = "Top coins"
await call_print_graph(discord_channel, days, coins, colors, linestyle, linewidth, title_addon)
async def call_print_graph(discord_channel, days, coins, colors, linestyle, linewidth, title_addon):
data = []
timestamps = []
longreadable = None
longstamp = None
age = np.Inf
for i, coin in enumerate(coins):
history, datescoin, ts = get_coin_historic_prix_gecko(coin, days)
# find oldest coin
if age > ts[-1]:
longreadable = datescoin
longstamp = ts
age = ts[-1]
# append one coin to data
data.append(history)
timestamps.append(ts)
await print_graph(coins, data, timestamps, longreadable, longstamp, colors, linestyle, linewidth, discord_channel, days, title_addon)
###############
# DICORD PART #
###############
discord_client = nextcord.Client()
@discord_client.event
async def on_ready():
print(str(discord_client.user) + " reporting for duty!")
@discord_client.event
async def on_message(message):
if message.author == discord_client.user:
return
command_one = "!kekw"
if message.content[: len(command_one)] == command_one:
await message.channel.send("Roger, roger!")
await print_all_graphs(message.channel, message.content.split()[1])
discord_client.run(DISCORD_TOKEN)
|
Continue reading "The Daily 3: What Makes Me Happy"
I dig the design you picked! I’m excited to read more. You know how I feel about Maribel the writer!
Thanks Pearl, the reader! I’m excited to share and hear what people have to say. So PLEASE post whenever you want!
I dig the design you picked! I'm excited to read more. You know how I feel about Maribel the writer! |
module 14-implicitConfigurations where
postulate
Integral : Set → Set
add : ∀ {A} {{ intA : Integral A }} → A → A → A
mul : ∀ {A} {{ intA : Integral A }} → A → A → A
mod : ∀ {A} {{ intA : Integral A }} → A → A → A
N : Set
zero one two three : N
nInt : Integral N
private postulate Token : Set
record Modulus (s : Token) (A : Set) : Set where
field modulus : A
data M (s : Token) (A : Set) : Set where
MkM : A → M s A
unMkM : ∀ {A s} → M s A → A
unMkM (MkM a) = a
private postulate theOnlyToken : Token
withModulus :
∀ {A} → {{ intA : Integral A }} → (modulus : A) →
(∀ {s} → {{ mod : Modulus s A }} → M s A) → A
withModulus modulus f = unMkM
(f {theOnlyToken} {{ record { modulus = modulus } }})
open Modulus {{...}}
normalize : ∀ {s A} {{intA : Integral A}} {{mod : Modulus s A}} →
A → M s A
normalize a = MkM (mod modulus a)
_+_ : ∀ {s A} {{intA : Integral A}} {{mod : Modulus s A}} →
M s A → M s A → M s A
(MkM a) + (MkM b) = normalize (add a b)
_*_ : ∀ {s A} → {{intA : Integral A}} → {{mod : Modulus s A}} →
M s A → M s A → M s A
(MkM a) * (MkM b) = normalize (mul a b)
test₁ : N
test₁ = withModulus two (let o = MkM one in (o + o)*(o + o))
testExpr : ∀ {s} → {{mod : Modulus s N}} → M s N
testExpr = let o = MkM one ; t = MkM two in
(o + t) * t
test₂ : N
test₂ = withModulus three testExpr
|
Formal statement is: lemma measurable_top[measurable]: "top \<in> measurable M (count_space UNIV)" Informal statement is: The constant function $f(x) = 1$ is measurable. |
# Example : 2.5B Chapter : 2.5 Pageno : 88
# Inverse of the given matrices
B<-matrix(c(4,8,3,7),ncol=2)
C<-matrix(c(6,6,6,0),ncol=2)
S<-matrix(c(1,1,1,0,1,1,0,0,1),ncol=3)
B1<-solve(B)
C1<-solve(C)
S1<-solve(S)
print("Inverses of given matrices ")
print(B1)
print(C1)
print(S1) |
theory Submission
imports Defs
begin
fun compl' :: "intervals \<Rightarrow> intervals" where
"compl' [[l,\<infinity>)] = []"
| "compl' [[l,r)] = [[r, \<infinity>)]"
| "compl' ([l1,r1)#[l2,r2)#is) = [r1, l2)#compl' ([l2,r2)#is)"
| "compl' [] = []"
fun compl where
"compl [] = [[0, \<infinity>)]"
| "compl ([l, r)#is) = (if l = 0 then [] else [[0, l)]) @ compl' ([l, r)#is)"
\<comment> \<open>Simon: Needed 15 min for reading & def.\<close>
lemma inv'_anti_mono:
"inv' k' ins" if "inv' k ins" "k \<ge> k'"
using that
apply (induction ins rule: inv'.induct)
apply auto
done
lemma inv'_tighten:
"inv' l ([l, r)#ins)" if "inv' k ([l, r)#ins)"
using that
using inv'.elims(3) by fastforce
lemma compl'_inv':
assumes "inv' k ins"
shows "inv' (Suc k) (compl' ins)"
using assms
apply (induction ins arbitrary: k rule: compl'.induct)
apply (simp_all add: inv_def)
apply (auto simp add: inv_def)
subgoal
using inv'.elims(2) by fastforce
(*
subgoal premises prems
using prems(2-) apply -
apply (frule prems(1))
apply (rule inv'_anti_mono, assumption)
using inv'_anti_mono
oops
*)
by (smt (verit) Suc_le_eq compl'.elims interval.inject inv'.elims(3) inv'.simps(3) inv'.simps(4) list.discI list.inject)
(*
by (smt (z3) Suc_le_eq compl'.elims interval.inject inv'.simps(1) inv'.simps(2) inv'.simps(3) inv'.simps(4) list.inject)
*)
(*
by (smt (z3) Suc_le_eq compl'.elims compl'.simps(1) enat.inject interval.inject inv'.elims(1) inv'.simps(1) inv'.simps(2) inv'.simps(4) list.inject zero_le)
*)
(*
lemma compl'_inv:
assumes "inv ins"
shows "inv (compl' ins)"
using assms compl'_inv' by (simp add: inv_def)
*)
lemma [simp]:
"set_of [i] = set_of_i i"
unfolding set_of_def by simp
lemma set_of_cons':
"fold (\<union>) (map set_of_i xs) S = S \<union> fold (\<union>) (map set_of_i xs) {}"
unfolding set_of_def
apply (induction xs arbitrary: S)
apply (simp add: set_of_def)
by (smt (z3) Un_assoc Un_left_commute fold_simps(2) list.simps(9))
lemma set_of_cons:
"set_of (a # xs) = set_of_i a \<union> set_of xs"
unfolding set_of_def
apply simp
apply (rule set_of_cons')
done
lemma Un_Diff_R:
assumes "A \<inter> C = {}"
shows "A \<union> B - C = A \<union> (B - C)"
using assms
by blast
lemma inv'_inf:
assumes "inv' k ([l,\<infinity>) # ins)"
shows "ins = []"
using assms
apply (induction "[l,\<infinity>)#ins" arbitrary: l ins rule: inv'.induct)
apply auto
done
lemma compl'_lb':
assumes "inv' k ([l,r)#ins)"
shows "set_of (compl' ([l,r)#ins)) \<subseteq> {r..}"
using assms
apply (induction "[l,r)#ins" arbitrary: k l r ins rule: compl'.induct)
apply (auto simp add: set_of_def; fail)
apply (simp add: set_of_cons)
apply rule
apply force
subgoal for l1 r1 l2 r2 "is" k
apply (cases r2)
apply simp
apply force
apply simp
apply (auto dest!: inv'_inf simp: set_of_def)
done
done
lemma compl'_lb:
assumes "inv' k ([l,r)#ins)"
shows "set_of (compl' ([l,r)#ins)) \<subseteq> (case r of \<infinity> \<Rightarrow> {} | enat r \<Rightarrow> {r..})"
using assms
apply (cases r)
apply simp
apply (rule compl'_lb')
apply force
apply simp
apply (drule inv'_inf)
apply simp
done
lemma interval_aux:
fixes l r1 r2 :: nat
assumes "l < r1" "r1 < l2"
shows "{0..<l} \<union> {r1..<l2} = {0..<l2} - {l..<r1}"
using assms by auto
lemma compl'_correct:
assumes "inv' k ([l,r)#ins)"
shows "set_of (compl' ([l,r)#ins)) \<union> {0..<l} = -set_of ([l,r)#ins)"
using assms
apply (induction ins arbitrary: k l r)
subgoal for k l r
apply (cases "[[l,r)]" rule: compl'.cases)
apply (auto simp add: set_of_def; fail)
apply (auto simp add: set_of_def; fail)
apply (auto simp add: set_of_def; fail)
apply (auto simp add: set_of_def; fail)
apply (auto simp add: set_of_def; fail)
done
subgoal for a ins k l r
apply (cases "[l,r) # a # ins" rule: compl'.cases)
apply (auto simp add: set_of_def; fail)
apply (auto simp add: set_of_def; fail)
defer
apply (auto simp add: set_of_def; fail)
apply (auto simp add: set_of_def; fail)
apply simp
subgoal premises prems for r1 l2 r2
using prems(1)[of "Suc r1" "l2" "r2"] prems(2-)
apply simp
apply (subst set_of_cons)
apply simp
apply (subgoal_tac "
{r1..<l2} \<union> set_of (compl' ([l2,r2) # ins)) \<union> {0..<l} = (set_of (compl' ([l2,r2) # ins)) \<union> {0..<l2}) - {l..<r1}")
apply simp
apply (simp add: set_of_cons)
apply blast
apply (subgoal_tac "
{r1..<l2} \<union> {0..<l} = {0..<l2} - {l..<r1}")
apply (subst Un_commute)
apply (subst Un_assoc)
apply (subst Un_Diff_R)
subgoal
apply (elim conjE)
thm compl'_lb
apply (frule compl'_lb)
apply (auto split: enat.split_asm)
done
apply simp
apply (subst Un_commute)
apply (rule interval_aux)
apply auto
thm inv'.cases
apply (cases r2)
apply simp
apply simp
apply (frule inv'_inf)
apply auto
done
done
done
lemma compl_set_of:
assumes "inv ins"
shows "set_of (compl ins) = -set_of ins"
apply (cases ins rule: compl.cases)
apply simp
apply simp
using assms[unfolded inv_def]
apply simp
apply (frule compl'_correct)
apply (auto simp: set_of_cons)
done
\<comment> \<open>Simon: Needed ~2h including def.\<close>
theorem compl_inv:
assumes "inv ins"
shows "inv (compl ins)"
apply -
subgoal
using assms
unfolding inv_def
apply (cases ins rule: compl.cases)
apply simp
apply (auto intro: compl'_inv')
defer
thm compl'_inv'
apply (rule compl'_inv')
thm inv'_tighten
apply (erule inv'_tighten)
apply (drule compl'_inv')
apply (erule inv'_anti_mono)
apply rule
done
done
end |
[STATEMENT]
lemma RepFun_ecut: "y \<le> z \<Longrightarrow> RepFun y (ecut f z) = RepFun y f"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. y \<le> z \<Longrightarrow> RepFun y (ecut f z) = RepFun y f
[PROOF STEP]
apply (auto simp: hf_ext)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>x u. \<lbrakk>y \<le> z; u \<^bold>\<in> y; \<forall>xa. (xa \<^bold>\<in> x) = (xa \<^bold>\<in> ecut f z u)\<rbrakk> \<Longrightarrow> \<exists>ua. ua \<^bold>\<in> y \<and> (\<forall>x. (x \<^bold>\<in> ecut f z u) = (x \<^bold>\<in> f ua))
2. \<And>x u. \<lbrakk>y \<le> z; u \<^bold>\<in> y; \<forall>xa. (xa \<^bold>\<in> x) = (xa \<^bold>\<in> f u)\<rbrakk> \<Longrightarrow> \<exists>ua. ua \<^bold>\<in> y \<and> (\<forall>x. (x \<^bold>\<in> f u) = (x \<^bold>\<in> ecut f z ua))
[PROOF STEP]
apply (metis ecut_def hsubsetD le_eclose)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>x u. \<lbrakk>y \<le> z; u \<^bold>\<in> y; \<forall>xa. (xa \<^bold>\<in> x) = (xa \<^bold>\<in> f u)\<rbrakk> \<Longrightarrow> \<exists>ua. ua \<^bold>\<in> y \<and> (\<forall>x. (x \<^bold>\<in> f u) = (x \<^bold>\<in> ecut f z ua))
[PROOF STEP]
apply (metis ecut_apply le_eclose hsubsetD)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done |
D L
B G
N Z
V Y
G R
E L
O W
Q X
Y J
W R
H P
L P
X T
S K
U R
P Z
A Z
M F
Z K
J K
I C
K C
R C
T F
F C
E C
H S
X K
E Y
I T
T C
R T
X U
I R
I K
L A
P C
D S
Q A
N G
W P
B E
M T
L C
E F
M I
M Z
U F
W K
E W
B W
A I
M K
X F
O Z
W C
Q M
K R
Y C
A T
G L
G A
J I
N R
D W
L R
Y P
S M
O I
Z J
H M
L K
U M
G T
O L
Z R
N E
U I
G Q
H R
U C
L U
H Z
P F
I F
B N
J R
O S
Y T
G S
N O
Y X
B X
A F
Z I
K F
J C
D G
P M
K T
X M
|
lemma bounded_linear_componentwise_iff: "(bounded_linear f') \<longleftrightarrow> (\<forall>i\<in>Basis. bounded_linear (\<lambda>x. f' x \<bullet> i))" (is "?lhs = ?rhs") |
active component C {
internal port p priority "abc"
}
|
{-# OPTIONS --without-K --safe #-}
open import Categories.Category
module Categories.Category.Construction.Properties.Presheaves.Cartesian {o ℓ e} (C : Category o ℓ e) where
open import Level
open import Data.Unit
open import Data.Product using (_,_)
open import Data.Product.Relation.Binary.Pointwise.NonDependent
open import Function.Equality using (Π) renaming (_∘_ to _∙_)
open import Relation.Binary
open import Categories.Category.Cartesian
open import Categories.Category.Construction.Presheaves
open import Categories.Category.Instance.Setoids
open import Categories.Functor
open import Categories.Functor.Properties
open import Categories.Functor.Presheaf
open import Categories.NaturalTransformation
import Categories.Object.Product as Prod
import Categories.Morphism.Reasoning as MR
open Π using (_⟨$⟩_)
module _ {o′ ℓ′ o″ ℓ″} where
Presheaves× : ∀ (A : Presheaf C (Setoids o′ ℓ′)) (A : Presheaf C (Setoids o″ ℓ″)) → Presheaf C (Setoids (o′ ⊔ o″) (ℓ′ ⊔ ℓ″))
Presheaves× A B = record
{ F₀ = λ X → ×-setoid (A.₀ X) (B.₀ X)
; F₁ = λ f → record
{ _⟨$⟩_ = λ { (a , b) → A.₁ f ⟨$⟩ a , B.₁ f ⟨$⟩ b }
; cong = λ { (eq₁ , eq₂) → Π.cong (A.₁ f) eq₁ , Π.cong (B.₁ f) eq₂ }
}
; identity = λ { (eq₁ , eq₂) → A.identity eq₁ , B.identity eq₂ }
; homomorphism = λ { (eq₁ , eq₂) → A.homomorphism eq₁ , B.homomorphism eq₂ }
; F-resp-≈ = λ { eq (eq₁ , eq₂) → A.F-resp-≈ eq eq₁ , B.F-resp-≈ eq eq₂ }
}
where module A = Functor A
module B = Functor B
module IsCartesian o′ ℓ′ where
private
module C = Category C
open C
P = Presheaves′ o′ ℓ′ C
module P = Category P
S = Setoids o′ ℓ′
module S = Category S
Presheaves-Cartesian : Cartesian P
Presheaves-Cartesian = record
{ terminal = record
{ ⊤ = record
{ F₀ = λ x → record
{ Carrier = Lift o′ ⊤
; _≈_ = λ _ _ → Lift ℓ′ ⊤
; isEquivalence = _
}
}
; ! = _
; !-unique = _
}
; products = record
{ product = λ {A B} →
let module A = Functor A
module B = Functor B
in record
{ A×B = Presheaves× A B
; π₁ = ntHelper record
{ η = λ X → record
{ _⟨$⟩_ = λ { (fst , _) → fst }
; cong = λ { (eq , _) → eq }
}
; commute = λ { f (eq , _) → Π.cong (A.F₁ f) eq }
}
; π₂ = ntHelper record
{ η = λ X → record
{ _⟨$⟩_ = λ { (_ , snd) → snd }
; cong = λ { (_ , eq) → eq }
}
; commute = λ { f (_ , eq) → Π.cong (B.F₁ f) eq }
}
; ⟨_,_⟩ = λ {F} α β →
let module F = Functor F
module α = NaturalTransformation α
module β = NaturalTransformation β
in ntHelper record
{ η = λ Y → record
{ _⟨$⟩_ = λ S → α.η Y ⟨$⟩ S , β.η Y ⟨$⟩ S
; cong = λ eq → Π.cong (α.η Y) eq , Π.cong (β.η Y) eq
}
; commute = λ f eq → α.commute f eq , β.commute f eq
}
; project₁ = λ {F α β x} eq →
let module F = Functor F
module α = NaturalTransformation α
module β = NaturalTransformation β
in Π.cong (α.η x) eq
; project₂ = λ {F α β x} eq →
let module F = Functor F
module α = NaturalTransformation α
module β = NaturalTransformation β
in Π.cong (β.η x) eq
; unique = λ {F α β δ} eq₁ eq₂ {x} eq →
let module F = Functor F
module α = NaturalTransformation α
module β = NaturalTransformation β
module δ = NaturalTransformation δ
in Setoid.sym (A.₀ x) (eq₁ (Setoid.sym (F.₀ x) eq))
, Setoid.sym (B.₀ x) (eq₂ (Setoid.sym (F.₀ x) eq))
}
}
}
module Presheaves-Cartesian = Cartesian Presheaves-Cartesian
|
Formal statement is: lemma winding_number_homotopic_paths_null_explicit_eq: assumes "path p" and \<zeta>: "\<zeta> \<notin> path_image p" shows "winding_number p \<zeta> = 0 \<longleftrightarrow> homotopic_paths (-{\<zeta>}) p (linepath (pathstart p) (pathstart p))" (is "?lhs = ?rhs") Informal statement is: If $p$ is a path and $\zeta$ is not in the image of $p$, then the winding number of $p$ around $\zeta$ is zero if and only if $p$ is homotopic to the constant path. |
If $F$ is an inf-continuous function from the set of measurable functions to itself, then the greatest fixed point of $F$ is measurable. |
theory KPair
imports UPair SetRel
begin
context GZF begin
subsection \<open>Kuratowski ordered pairs\<close>
definition kpair :: "['a,'a] \<Rightarrow> 'a" where
"kpair a b \<equiv> {{a,b},{a,a}}"
definition is_kpair :: \<open>'a \<Rightarrow> bool\<close>
where "is_kpair p \<equiv> p : SetOf Set \<and> (\<exists>a b. p = kpair a b)"
lemma kpair_typ_setof : "kpair : SetMem \<rightarrow> SetMem \<rightarrow> SetOf Set" unfolding kpair_def
by (rule funI, rule funI, rule upair_setof[OF upair_set upair_set])
lemmas kpair_setof [typ_intro] = funE[OF funE[OF kpair_typ_setof]]
lemma is_kpairI :
assumes "b : SetMem" "c : SetMem"
shows "kpair b c : is_kpair"
using kpair_setof assms
unfolding is_kpair_def has_ty_def by auto
lemma is_kpairE :
assumes "p : is_kpair"
obtains b c where
"b : SetMem" "c : SetMem" "p = kpair b c"
proof -
from assms obtain b c where
"p : SetOf Set" and p_eq:"p = kpair b c"
unfolding is_kpair_def has_ty_def by blast
hence bb:"{b,b} : SetMem" and bc:"{b,c} : SetMem"
using upair_set_setmem[OF setof_set] unfolding kpair_def by auto
hence "{b,c} : Set"
using setof_mem[OF \<open>p : SetOf Set\<close>]
upairI1[OF bc bb]
unfolding \<open>p = kpair b c\<close> kpair_def by auto
hence "b : SetMem" "c : SetMem" "p = kpair b c"
using upair_set_setmem p_eq by auto
thus ?thesis ..
qed
lemma kpair_iff :
assumes "a : SetMem" "b : SetMem" "c : SetMem" "d : SetMem"
shows "kpair a b = kpair c d \<longleftrightarrow> a = c \<and> b = d"
proof -
have "upair a b : SetMem" "upair a a : SetMem" "upair c d : SetMem" "upair c c : SetMem"
using set_setmem[OF upair_set] assms by auto
thus ?thesis unfolding kpair_def using upair_eq_iff assms by fastforce
qed
lemma is_kpairD :
assumes "kpair b c : is_kpair"
shows "b : SetMem \<and> c : SetMem"
proof
from assms have
kp:"kpair b c : SetOf Set"
unfolding is_kpair_def has_ty_def by blast
hence bb:"{b,b} : SetMem" and bc:"{b,c} : SetMem"
using upair_set_setmem[OF setof_set] unfolding kpair_def by auto
hence "{b,c} : Set" using setof_mem[OF kp] upairI1[OF bc bb]
unfolding kpair_def by auto
thus "b : SetMem" "c : SetMem"
using upair_set_setmem by auto
qed
theorem GZF_OPair :
"class.OPair is_kpair kpair SetMem"
proof
show "kpair : SetMem \<rightarrow> SetMem \<rightarrow> is_kpair \<triangle> SetMem"
by (rule funI, rule funI, rule intI, rule is_kpairI, auto,
rule set_setmem[OF setof_set[OF kpair_setof]], auto)
show "\<forall>a : SetMem. \<forall>b : SetMem. \<forall>c : SetMem. \<forall>d : SetMem.
(kpair a b = kpair c d) = (a = c \<and> b = d)"
unfolding tall_def
using kpair_iff by auto
show "\<forall>p : is_kpair. \<exists>a b. p = kpair a b"
unfolding tall_def
using is_kpairE by meson
show "\<forall>b. SetMem b \<longleftrightarrow> (\<exists>p : is_kpair. \<exists>c. p = kpair b c \<or> p = kpair c b)"
proof (auto)
fix b assume "SetMem b"
hence "kpair b b : is_kpair"
using is_kpairI unfolding has_ty_def by auto
thus "\<exists>p : is_kpair. \<exists>c. p = kpair b c \<or> p = kpair c b"
by auto
next
fix b c assume "kpair b c : is_kpair"
thus "SetMem b"
using is_kpairD unfolding has_ty_def by auto
next
fix b c assume "kpair b c : is_kpair"
thus "SetMem c"
using is_kpairD unfolding has_ty_def by auto
qed
qed
sublocale OPair is_kpair kpair SetMem GZF_default
by (intro_locales, rule GZF_OPair)
section \<open>Cartesian Products of Kuratowski Pairs\<close>
(*Interpret Cartesian Product locale, using GZF for sets, and kpair for ordered pairs. *)
lemma kpair_typ_iskpair : "kpair : SetMem \<rightarrow> SetMem \<rightarrow> is_kpair"
by (rule funI, rule funI, rule is_kpairI)
lemma kpair_typ_setmem : "kpair : SetMem \<rightarrow> SetMem \<rightarrow> SetMem"
by (rule funI, rule funI, rule set_setmem[OF setof_set[OF kpair_setof]])
theorem GZF_CartProd :
"class.CartProd_axioms SetMem is_kpair kpair SetMem"
by (unfold_locales, rule kpair_typ_iskpair, rule kpair_typ_setmem)
end
end |
using PortHamiltonian
using PyPlot
using LinearAlgebra
using ForwardDiff
include("dataexperiment.jl")
# load structural parameters
pslosh, pbeam, prigid = dataexperiment(0.4);
L = pslosh["a"];
N = 20;
disc = discrete_phs_closed(N, 0, L)
Br = [Matrix(1I, N, N); zeros(N,N);zeros(1,N)];
B = blkdiag(zeros(2*N,1),[1]);
D = blkdiag(disc.D,[0]);
Jrb = [0];
invM = disc.Q[N+1:end,N+1:end]
R = 000000.00*Br*norm(invM)*Br';
J = blkdiag(disc.J,Jrb)-R;
# defining Q for saint-venant equation (no rotation)
rho = pslosh["rho"];
g = pslosh["g"];
b = pslosh["b"];
hbar = pslosh["h"];
xi,w = PortHamiltonian.lgwt(N,0,L);
Q = blkdiag(disc.Q,[0])
pfluid = PortHamiltonian.Phs(J, B, D, Q);
#PortHamiltonian.set_constraint!(pfluid, B[:,1],[0])
function Hamiltonian(X)
alpha1 = X[1:N]
alpha2 = X[N+1:2*N]
p = X[end]
M = w' * (alpha1 .* alpha2)
H = (w'*((alpha1 .* (alpha2.^2))/rho .+ rho*g*((alpha1).^2)/b)/2 .+ ((p.-M).^2)/(2*prigid["mass"]))[1]
return H
end
hess = x->ForwardDiff.hessian(Hamiltonian, x)
xeq = [ones(N,1)*b*hbar;zeros(N,1);0.001*prigid["mass"]*0][:]
Qlin = hess(xeq[:])
pfluid.Q = Qlin
pfluid.Hamiltonian = x-> Hamiltonian(x)
pfluid.GradHam = x-> ForwardDiff.gradient(Hamiltonian, x)
pfluid.hessian = x-> hess(x)
#pfluid.GradHam = x -> Q*x
function dynamics(t, X, Xd)
Xd[1:end]= pfluid.J * pfluid.GradHam(X[1:end]) + 0.2*pfluid.B[:,2] * sin(2*pi*0.5*t)
#r[end:end] = pfluid.G' * pfluid.hessian(X[1:end-1]) * (pfluid.J * pfluid.GradHam(X[1:end-1]) + pfluid.B[:,3] * sin(2*pi*t) *0.5+ pfluid.G * X[end])
end
#
using Sundials
#
t = range(0,stop = 8,length = 1000)
yout = Sundials.cvode(dynamics,xeq, collect(t))
#yout, ypout = Sundials.idasol(dynamics2, [xeq;l0;0], [xeq;0;0]*0, [t])
pfluid.GradHam = x-> Qlin*x
youtlin = Sundials.cvode(dynamics,xeq, collect(t))
#youtlin, ypoutlin = Sundials.idasol(dynamics2, [xeq;l0; 0], [xeq;0; 0]*0, [t])
#pn = PortHamiltonian.constraint_elimination(pfluid)
using PyPlot
figure(1);
surf(xi,t,yout[:,N+1:end-1])
#surf(xi,t,yout[:,1:N])
#include("anima.jl") |
\documentclass[pdftex,10pt,a4paper]{article}
%Can change the pt, papersize etc.
\usepackage{amsmath} %For both in-line and equation mode
\numberwithin{equation}{section} %Numbering of our equations per section
\usepackage{algorithm}
\usepackage{algorithmic} %Algorithm styles, need to be nested for the example shown
\usepackage{fancyhdr} %For our headers
\usepackage{graphicx} %Inserting images
\usepackage{lipsum} %Blank text fill, delete me when finished
\usepackage{setspace} %Spacing on the front page for crest and titles
\usepackage[]{fncychap} % Styles can be Sonny, Lenny, Glenn, Conny, Rejne, Bjarne and Bjornstrup
\usepackage[hyphens]{url} %Deals with hyphens in urls to make them clickable
\usepackage{xcolor} %Great if you want coloured text
\usepackage{tabularx}
\usepackage{appendix} %Take a wild guess slick
\usepackage{amsmath, amssymb, amsthm}
\usepackage{wasysym}
\usepackage[export]{adjustbox}
%KEEP THIS ONE LAST it's quite buggy, it allows you to click on links within the pdf and web links without changing the colour. The mouse cursor simply changes its icon to indicate to the user. Great tool - still awkward
\usepackage[hidelinks]{hyperref}
%This will tell the compiler to do the header style, page and spacing between the header and text
\fancyhf{}
\pagestyle{fancy}
\renewcommand{\headrulewidth}{0.2pt}
\fancyhead[L]{Group 19}
\fancyhead[C]{CISC 204: Modelling Project (FINAL)}
\fancyhead[R]{Page \thepage}
%%%%%%%%%%%%%%%%%%%%%%%%%% DOCUMENT STARTS %%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\newcommand{\groupid}{GRP\_123}
\newcommand{\projectname}{Project Name}
\newcommand{\authors}
{
Name of Teammate 1 (and their NetID)\\s
Name of Teammate 2 (and their NetID)\\
Name of Teammate 3 (and their NetID)\\
}
%Lets begin the document, some chapters have examples in to give you an idea
\begin{document}
\tableofcontents
\section*{Summary}
\addcontentsline{toc}{section}{Summary}
% \vspace{2cm}
We are exploring the popular board game 'The Resistance' through a formal logic lens, where the turn-based format with 2 teams (Spies, Resistance) and hidden player identities allow the establishment of propositional models that are solvable by computer. The 7 players go on a series of 'missions' which are voted to go ahead or not by a subset of all players selected by a leader in each of the 5 rounds. A mission succeeds for the Resistance team and gains them a point if all participants submit a 'success' card in an anonymous choice procedure, which is the key game mechanic as a given player doesn't directly know the team membership of other players and is forced to infer this information as the rounds progress. Spies can submit a 'fail' card and cause the mission to fail which gives them one point, and either team wins the game with 3 points.
Implementation proved to be non-trivial due to assigning models encompassing all possible play combinations per round and evaluating their satisfiability. Critically, votes were assigned randomly to simulate player behaviour which enabled a method of calculating the likelihood of the Resistance team winning at each stage of the game, as suggested by Prof. Muise after the proposal to properly expand the scope of our project.
\section*{Propositions}
\addcontentsline{toc}{section}{Propositions}
%List of the propositions used in the model, and their (English) interpretation.
$X_r$ is true at position $r$ depending on the round number (e.g. $X_2$ is true for r2)\newline
$Z_s$ is true if current mission is rejected $s$ times (e.g. $Z_3$ means the 3rd consecutive rejection) \newline
Note that $r$ and $s$ both range from 1 to 5, but $X_r$ and $Z_s$ don't necessarily have the same truth value when $r=s$ due to how the game functions\newline
$Y_k$ is true if mission $k$ is approved, false if not (e.g. $\neg Y_4$ means mission 4 not approved) \newline
$P_i$ is true if player $i$ has voted to approve the current mission and false otherwise, where $i$ ranges from 1 to 6 \newline
$Q_j$ is true if player $j$ has played a success token in the currently approved mission, false if player $j$ has played a fail token (e.g. $\neg Q_1$ means player 1 has played a fail token) \newline
Note that $i$ ranges from either 1 to 3 or 1 to 4 depending on the number of players per round \newline
$S_r$ is true if the current mission is a success for team Resistance and false otherwise, and vice-versa for team Spies \newline
$V$ represents the overall game victory condition: true for the Resistance winning the game, false for the Spies emerging victorious\newline
\section*{Constraints}
\addcontentsline{toc}{section}{Constrains}
%List of constraint types used in the model and their (English) interpretation. You only need to provide one example for each constraint type: e.g., if you have constraints saying “cars have one colour assigned” in a car configuration setting, then you only need to show the constraints for a single car. Essentially, we want to see the pattern for all of the types of constraints, and not every constraint enumerated.\newline
Detailed description of the game rules:
7 players total $\rightarrow$ 4 R, 3 S\newline
\indent 5 total rounds ('missions')$\rightarrow$ a team wins if they succeed in 3/5 in any order\newline
Round structure:
\indent r1: 3 players \newline
\indent r2: 4 players \newline
\indent r3: 3 players \newline
\indent r4: 4 players \newline
\indent r5: 4 players \newline
Before the game starts (i.e. r0) each player is randomly assigned a team which none of the other players know. This assignment lasts until the game ends.
At the start of each round, one player is assigned the role of 'mission leader' \& the remaining 6 are given 1 accept and 1 reject token for voting purposes. The leader then gets to choose the corresponding number of players per round from the pool to send on the mission as above (leader not included).\newline
Mission start:\newline \newline
\indent Selected players vote to either accept or reject the current mission. \newline
\indent If a majority reject the mission, then the next counterclockwise player is \indent assigned leader. NOTE: If 5 rejections occur in a row then the S team \indent wins automatically. The vote tracker resets every round (e.g. r2 can have 1 \indent rejection, r3 0, r4 3, etc.)\newline
\indent If a majority accept the mission, then each active player is given 1 success \indent card and 1 failure card. NOTE: R players have to play success every time, \indent whereas S players can play either success or failure. Once all players turn in \indent their cards then the total number is counted. If there's $\ge$1 fail cards then \indent the current mission fails $\rightarrow$ 1 point for team S. Otherwise, 1 point for team R \newline
\indent If there's a tie of accept/reject tokens (possible on rounds w/ an even \indent number of players) then flip a coin to see whether the mission happens or \indent not (H yes, T no).\newline
Mission end\newline
At the end of any round, if a team gets 3 points then they are declared the \indent winner, unless the above win condition for team S is satisfied. \newline
As per Prof. Moose's suggestion, we've expanded the scope of the constraints by imagining that at the time of player selection for each mission the leader will have access to a computer with the Python scripts for this project, and they will attempt to calculate an estimate of winning based on the probability that the other players are on team R or team S. This is done by simply counting the number of valid models (propositional formulas evaluating to true under the selected conditions) and dividing by the total number of models:
\begin{equation}
P(win)=\frac{n_{valid}}{n_{valid} + n_{invalid}}
\end{equation} \newline
We proceed through an example game with specific votes \& tokens to illustrate how the constraints are expressed in propositional logic.\newline
Round 1:\newline
$X_1 \land (\neg P_1 \land \neg P_2 \land P_3 \land \neg P_4 \land \neg P_5 \land P_6) \rightarrow \neg Y_1 \land Z_1$ \newline
Here 4/6 players voted to reject the mission so $Y_1, Z_1$ are updated. \newline \newline
$X_1 \land (P_1 \land \neg P_2 \land \neg P_3 \land \neg P_4 \land \neg P_5 \land \neg P_6) \rightarrow \neg Y_1 \land Z_1 \land Z_2$ \newline
Here 5/6 players voted to reject the mission, with $Z_2$ added accordingly.
...\newline
\indent Voting rounds 3 and 4 also result in rejections.\newline
\indent ...\newline
$X_1 \land (\neg P_1 \land \neg P_2 \land \neg P_3 \land \neg P_4 \land \neg P_5 \land \neg P_6) \rightarrow \neg Y_1 \land Z_1 \land Z_2 \land Z_3 \land Z_4 \land Z_5$ \newline
\indent $Z_5 \rightarrow \neg V$\newline
After 5 rejections, the Spies automatically win the game. \newline
Alternatively, let's say the 1st mission is approved:
$X_1 \land ( P_1 \land \neg P_2 \land P_3 \land P_4 \land \neg P_5 \land P_6) \rightarrow Y_1 $ \newline
Only 2/6 players voted against so now the mission can go ahead. \newline
$Y_1 \land ( Q_1 \land Q_2 \land Q_3) \rightarrow S_1 $\newline
\indent In this scenario round 1 counts as a success for the Resistance as none of the \indent players put forward a fail token.\newline
Round 2:
$X_2 \land ( P_1 \land P_2 \land P_3 \land \neg P_4 \land P_5 \land P_6) \rightarrow Y_2 $ \newline
$Y_2 \land ( \neg Q_1 \land Q_2 \land Q_3 \land \neg Q_4) \rightarrow \neg S_1 $\newline
Here the mission is approved right away, and Spies win the round by playing \indent a single failure token.\newline
Round 3:
$X_3 \land ( \neg P_1 \land \neg P_2 \land P_3 \land \neg P_4 \land P_5 \land \neg P_6) \rightarrow \neg Y_3 \land Z_1$ \newline
$X_3 \land ( P_1 \land P_2 \land P_3 \land \neg P_4 \land P_5 \land P_6) \rightarrow Y_3 \land Z_1 \land \neg Z_2$ \newline
Here the mission is approved on the 2nd voting round; notice how the $Z_s$ \indent counter resets to 1 at every round.\newline
$Y_3 \land ( \neg Q_1 \land Q_2 \land Q_3) \rightarrow S_3 $\newline
The $Q_j$'s and $P_i$'s are also different per round. Here the Resistance wins \indent mission 3.\newline \newline
\indent...\newline
\indent Round 4 is a win for team Spies\newline
\indent...\newline
Round 5:\newline
$X_5 \land ( P_1 \land P_2 \land P_3 \land P_4 \land P_5 \land P_6) \rightarrow Y_5 $ \newline
$Y_5 \land (Q_1 \land Q_2 \land Q_3 \land Q_4) \rightarrow \neg S_5 $\newline
The final tally is then: \newline
$S_1 \land \neg S_2 \land S_3 \land \neg S_4 \land S_5 \rightarrow V $ \newline \newline \indent RESISTANCE WINS - YAHOO! \newline
Alternatively, let's assume that the Spies win in rounds 1, 2, and 4:\newline
$\neg S_1 \land \neg S_2 \land S_3 \land \neg S_4 \rightarrow \neg V $ \newline
In this case the Spies automatically win after round 4 finishes as they have \indent the required 3/5 missions. It's also possible for the Resistance to win after \indent having played only 3 rounds, etc.\newline
It's also possible to chain together a massive series of disjunctions for all the \indent winning combinations of $S_r$'s for the Resistance:
\begin{equation}
(S_1 \land S_2 \land S_3 \land \neg S_4 \land S_5) \lor (\neg S_1 \land S_2 \land S_3 \land S_4 \land S_5) \lor ... \lor (S_1 \land S_2 \land \neg S_3 \land \neg S_4 \land S_5) \rightarrow V
\end{equation} \newline
In the same way we can list all the possibilities that lead to a victory for the \indent Spies. Bada-bing!
\section*{Jape Proofs}
\addcontentsline{toc}{section}{Jape Proofs}
Please see the attached file proofs.jp.\newline
We had trouble assigning subscripts, so the following images are for clarification purposes (variable names are different than in the Propositions section, i.e. $R_i=Q_j, S=S_r, Q=V$):\newline
\includegraphics[scale=0.7]{im1.jpg}\newline
\includegraphics[scale=0.7]{im2.jpg}\newline
\includegraphics[scale=0.7]{im3.jpg}\newline
\includegraphics[scale=0.7]{im4.jpg}\newline
\includegraphics[scale=0.7]{im5.jpg}\newline
\section*{Model Exploration}
\addcontentsline{toc}{section}{Model Exploration}
When creating the model for the game, it was unclear on how to proceed with adding the constraints since the core of the game did not only depend on if the player played a Resistance token or Spy token but also factors such as vote tracker and round won had to be kept in mind.
In the initial stages of creating the encoding, there were only constraints for the token themselves. Such as mapping if R or S is true or false. However, as mentioned above this proved to be futile when taking into account other variables like vote tracking (Z). Whilst it was still necessary to maintain a mapping of the Resistance and Spy token; ultimately a better approach was to create constraints of the game condition itself per round. That way the model could be assessed based upon which missions were won or lost per game depending on what tokens were played and also if the mission was rejected via the vote tracker. \newline
For example: \newline
\includegraphics[scale=0.4, center]{scr3.jpg}\newline
In this case for mission two, it is clear that since there exists $R_1$ \& $R_2$ \& $R_3$, mission two should map out to true. However, as observed from the vote tracking, the second round’s mission was a failure as indicated by $Z_5$ in the vote tracker; therefore, mission two maps out to False rather than True.\newline
Additionally, a small alteration was done to assess if the model is satisfiable. Because of the nature of the constraints the models would continuously assess to true despite having a clear hypothesis that only certain models should map out to true if the game is won which occurs when the resistance wins 3 rounds. Therefore with the creation of a function called ResistanceWin() (which assesses if the resistance wins a game) and using that information conjoined with the is\_satisfiable() function was an alternative approach to examine if a model was satisfiable or not. \newline
\includegraphics[scale=0.4, center]{scr4.jpg}\newline \newline \newline \newline
After the likelihood function was implemented using the dsharp solver included with the given library, repeated tests were done to ensure that the output made sense. Initially a problem occurred: the probabilities were exclusively 1.0 or 0.0 – either the Resistance was guaranteed a cakewalk or devastating loss! This binary (pun intended) outcome was mystifying, but was eventually discovered to be caused by using the wrong variable in the denominator of the fraction. After a quick fix the probabilities were proven to be in the proper range of between 0 to 1.0 via several tests (shown below). Interestingly, the odds of team R winning never went above 50\% and were frequently below that, which indicates that according to our model the game is tilted in favour of the Spies. From this result it’s evident that the game is structured to keep naively honest players on their toes, although more real-life testing is required to verify this.\newline
\includegraphics[scale=0.4, center]{scr1.jpg}\newline
\includegraphics[scale=0.4, center]{scr2.jpg}\newline
Lastly, a final way the model was implemented was through a simple kind of automation. The various mission functions were coded so that (as per Prof. Muise's excellent advice), depending on what token was played the computer would make the most 'optimal' decision in choosing the team to go on the mission for the following rounds. This led to some interesting observations:\newline
\begin{enumerate}
\item Since Mission Two and Mission Three have the same amount of tokens per their respective round, if the vote tracker for neither rounds is equal to $Z_5$, if Mission Two is true then in all cases Mission Three is true because the computer sends the same tokens from Mission Two onto Mission Three.
\item If Mission One and Mission Two are false, the vote tracker for neither round is equal to $Z_5$ and tokens $S_1$, $S_2$ and $S_3$ are mapped to false then it is true for all cases that Mission Three, Four and Five are true, and as a result the Resistance wins the entire game. This is because the model has already identified the 3 spies prior; hence, there is a guaranteed win for the next three rounds.
\item If Mission Five exists and Mission Four evaluates to true such that the vote tracker for round 4 is not equal to $Z_5$, it can be said that in all cases Mission Five will evaluate to true and the resistance will win the game. Hence, the model will be satisfiable and evaluates to true as well. This is because missions four and five contain the same number of player slots so the computer will automatically send all player tokens from Mission Four to Mission Five to guarantee a win for Mission Five. Additionally, the existence of Mission Five being true suggests that the game has not ended yet as none of the resistance or spy players have reached 3 points. Therefore, a guaranteed win for Mission Five (or evaluation to True) suggests that the Resistance are guaranteed to win the game and the model will be satisfiable. \newline
\end{enumerate}
%Describe how you might extend your model to a predicate logic setting, including how both the propositions and constraints would be updated. There is no need to implement this extension!
\section*{First Order Extension}
\addcontentsline{toc}{section}{First Order Extension}
Given the enumerative nature of our constraints in each voting round and mission, expanding the propositions to predicates is straightforward. \newline
For a rejection to occur the conjunctions involving $P_j$ are replaced with \indent$\forall x:P(x)$, where a single vote against is represented by $\exists x: \neg P(x)$. \newline \newline
\indent e.g. in round 1 ($i=1$):\newline
\indent$\exists i:X(i) \land \forall x:P(x) \rightarrow \exists i:Y(i)$\newline
\indent$\exists i:X(i) \land \exists x:\neg P(x) \rightarrow \exists i:\neg Y(i)$\newline
The 5 rejections in a row victory condition for the Spies directly follows:\newline
\indent$\forall y:Z(y) \rightarrow \forall k:\neg V(k)$\newline
After mission approval, a single $\exists z: \neg Q(z)$ representing a played fail token \indent causes the current mission to fail:\newline
\indent$\exists i:Y(i)\land \exists z:\neg Q(z) \rightarrow \exists i:\neg S(i)$\newline
The final tally for the Resistance or Spies win condition is easily represented \indent with existential quantifiers:\newline
\indent$\exists a: \exists b: \exists c: S(a,b,c) \rightarrow \forall k:V(k)$\newline
\indent$\exists d: \exists e: \exists f: \neg S(d,e,f) \rightarrow \forall k:\neg V(k)$\newline
This radically simplifies things, as now it's no longer necessary to count every single different case where the Resistance wins 3 or more rounds to obtain an overall victory. Additional flexibility is gained for the possibility of increasing the number of rounds to an arbitrary number and subsequently changing the proportion of successes required for a given team to win the game.
\qed
Thank you for taking the time to read this lengthy report! All the best
%\[ \wedge \hspace{4mm} \vee \hspace{4mm} \neg \hspace{4mm} \rightarrow \hspace{4mm} \forall \hspace{4mm} \exists \]
\end{document}
|
classdef MimRemapGrey < MimGuiPlugin
% MimRemapGrey. Gui Plugin for setting paint colour
%
% You should not use this class within your own code. It is intended to
% be used by the gui of the TD MIM Toolkit.
%
% MimRemapGrey is a Gui Plugin for the MIM Toolkit.
%
%
% Licence
% -------
% Part of the TD MIM Toolkit. https://github.com/tomdoel
% Author: Tom Doel, Copyright Tom Doel 2014. www.tomdoel.com
% Distributed under the MIT licence. Please see website for details.
%
properties
ButtonText = 'Multiple'
SelectedText = 'Multiple'
ToolTip = 'Marks airway as supplying multiple lobes'
Category = 'Airway label'
Visibility = 'Dataset'
Mode = 'Edit'
HidePluginInDisplay = false
PTKVersion = '1'
ButtonWidth = 5
ButtonHeight = 1
Icon = 'paint.png'
IconColour = GemMarkerPoint.DefaultColours{7}
Location = 37
end
methods (Static)
function RunGuiPlugin(gui_app)
gui_app.ImagePanel.PaintBrushColour = 7;
end
function enabled = IsEnabled(gui_app)
enabled = gui_app.IsDatasetLoaded && gui_app.ImagePanel.OverlayImage.ImageExists && ...
isequal(gui_app.ImagePanel.SelectedControl, 'Map');
end
function is_selected = IsSelected(gui_app)
is_selected = gui_app.ImagePanel.PaintBrushColour == 7;
end
end
end |
module nested_ifdef
#ifdef OUTER
use mod_1
#ifdef INNER
use mod_2
#endif
#endif
contains
subroutine sub1
print *, "in sub1"
end subroutine sub1
end module nested_ifdef
|
module Ch04.NaturalInduction
-- TODO: Figure out how to implement the function below (if it's at all possible).
--natural_induction_principle : {a,b : Type} ->
-- (size : a -> Nat) ->
-- (f : (x : a) -> Either b (x' : a ** LT (size x') (size x))) ->
-- (g : (a -> b) ** (x : a) -> (case f x of
-- Left y => (g x = y)
-- Right (y ** _) => (g x = g y)))
lemma : {x : Nat} ->
LTE x 0 ->
(x = 0)
lemma {x = Z} pf = Refl
lemma {x = (S k)} pf = absurd (succNotLTEzero pf)
public export
inductive_construction : {a,b : Type} ->
(size : a -> Nat) ->
(f : (x : a) -> Either b (x' : a ** LT (size x') (size x))) ->
a -> b
inductive_construction {a} {b} size f x = helper x (size x) lteRefl where
helper : (x : a) -> (k : Nat) -> LTE (size x) k -> b
helper x k bound = case f x of
Left y => y
Right (x' ** pf) => case k of
Z => let temp = replace (lemma bound) pf in
absurd (succNotLTEzero temp)
(S j) => helper x' j (fromLteSucc (lteTransitive pf bound))
|
Formal statement is: lemma Heine_Borel_lemma: assumes "compact S" and Ssub: "S \<subseteq> \<Union>\<G>" and opn: "\<And>G. G \<in> \<G> \<Longrightarrow> open G" obtains e where "0 < e" "\<And>x. x \<in> S \<Longrightarrow> \<exists>G \<in> \<G>. ball x e \<subseteq> G" Informal statement is: Suppose $S$ is a compact set and $\mathcal{G}$ is a collection of open sets such that $S \subseteq \bigcup \mathcal{G}$. Then there exists an $\epsilon > 0$ such that for every $x \in S$, there exists a set $G \in \mathcal{G}$ such that $B(x, \epsilon) \subseteq G$. |
[GOAL]
E : Type u_1
inst✝⁶ : NormedAddCommGroup E
inst✝⁵ : NormedSpace ℝ E
inst✝⁴ : Nontrivial E
inst✝³ : FiniteDimensional ℝ E
inst✝² : MeasurableSpace E
inst✝¹ : BorelSpace E
μ : Measure E
inst✝ : IsAddHaarMeasure μ
⊢ NoAtoms μ
[PROOFSTEP]
infer_instance
[GOAL]
E : Type u_1
inst✝⁸ : NormedAddCommGroup E
inst✝⁷ : NormedSpace ℝ E
inst✝⁶ : MeasurableSpace E
inst✝⁵ : BorelSpace E
inst✝⁴ : FiniteDimensional ℝ E
μ : Measure E
inst✝³ : IsAddHaarMeasure μ
F : Type u_2
inst✝² : NormedAddCommGroup F
inst✝¹ : NormedSpace ℝ F
inst✝ : CompleteSpace F
s : Set E
f : E → F
R : ℝ
⊢ ∫ (x : E), f (R • x) ∂μ = |(R ^ finrank ℝ E)⁻¹| • ∫ (x : E), f x ∂μ
[PROOFSTEP]
rcases eq_or_ne R 0 with (rfl | hR)
[GOAL]
case inl
E : Type u_1
inst✝⁸ : NormedAddCommGroup E
inst✝⁷ : NormedSpace ℝ E
inst✝⁶ : MeasurableSpace E
inst✝⁵ : BorelSpace E
inst✝⁴ : FiniteDimensional ℝ E
μ : Measure E
inst✝³ : IsAddHaarMeasure μ
F : Type u_2
inst✝² : NormedAddCommGroup F
inst✝¹ : NormedSpace ℝ F
inst✝ : CompleteSpace F
s : Set E
f : E → F
⊢ ∫ (x : E), f (0 • x) ∂μ = |(0 ^ finrank ℝ E)⁻¹| • ∫ (x : E), f x ∂μ
[PROOFSTEP]
simp only [zero_smul, integral_const]
[GOAL]
case inl
E : Type u_1
inst✝⁸ : NormedAddCommGroup E
inst✝⁷ : NormedSpace ℝ E
inst✝⁶ : MeasurableSpace E
inst✝⁵ : BorelSpace E
inst✝⁴ : FiniteDimensional ℝ E
μ : Measure E
inst✝³ : IsAddHaarMeasure μ
F : Type u_2
inst✝² : NormedAddCommGroup F
inst✝¹ : NormedSpace ℝ F
inst✝ : CompleteSpace F
s : Set E
f : E → F
⊢ ENNReal.toReal (↑↑μ univ) • f 0 = |(0 ^ finrank ℝ E)⁻¹| • ∫ (x : E), f x ∂μ
[PROOFSTEP]
rcases Nat.eq_zero_or_pos (finrank ℝ E) with (hE | hE)
[GOAL]
case inl.inl
E : Type u_1
inst✝⁸ : NormedAddCommGroup E
inst✝⁷ : NormedSpace ℝ E
inst✝⁶ : MeasurableSpace E
inst✝⁵ : BorelSpace E
inst✝⁴ : FiniteDimensional ℝ E
μ : Measure E
inst✝³ : IsAddHaarMeasure μ
F : Type u_2
inst✝² : NormedAddCommGroup F
inst✝¹ : NormedSpace ℝ F
inst✝ : CompleteSpace F
s : Set E
f : E → F
hE : finrank ℝ E = 0
⊢ ENNReal.toReal (↑↑μ univ) • f 0 = |(0 ^ finrank ℝ E)⁻¹| • ∫ (x : E), f x ∂μ
[PROOFSTEP]
have : Subsingleton E := finrank_zero_iff.1 hE
[GOAL]
case inl.inl
E : Type u_1
inst✝⁸ : NormedAddCommGroup E
inst✝⁷ : NormedSpace ℝ E
inst✝⁶ : MeasurableSpace E
inst✝⁵ : BorelSpace E
inst✝⁴ : FiniteDimensional ℝ E
μ : Measure E
inst✝³ : IsAddHaarMeasure μ
F : Type u_2
inst✝² : NormedAddCommGroup F
inst✝¹ : NormedSpace ℝ F
inst✝ : CompleteSpace F
s : Set E
f : E → F
hE : finrank ℝ E = 0
this : Subsingleton E
⊢ ENNReal.toReal (↑↑μ univ) • f 0 = |(0 ^ finrank ℝ E)⁻¹| • ∫ (x : E), f x ∂μ
[PROOFSTEP]
have : f = fun _ => f 0 := by ext x; rw [Subsingleton.elim x 0]
[GOAL]
E : Type u_1
inst✝⁸ : NormedAddCommGroup E
inst✝⁷ : NormedSpace ℝ E
inst✝⁶ : MeasurableSpace E
inst✝⁵ : BorelSpace E
inst✝⁴ : FiniteDimensional ℝ E
μ : Measure E
inst✝³ : IsAddHaarMeasure μ
F : Type u_2
inst✝² : NormedAddCommGroup F
inst✝¹ : NormedSpace ℝ F
inst✝ : CompleteSpace F
s : Set E
f : E → F
hE : finrank ℝ E = 0
this : Subsingleton E
⊢ f = fun x => f 0
[PROOFSTEP]
ext x
[GOAL]
case h
E : Type u_1
inst✝⁸ : NormedAddCommGroup E
inst✝⁷ : NormedSpace ℝ E
inst✝⁶ : MeasurableSpace E
inst✝⁵ : BorelSpace E
inst✝⁴ : FiniteDimensional ℝ E
μ : Measure E
inst✝³ : IsAddHaarMeasure μ
F : Type u_2
inst✝² : NormedAddCommGroup F
inst✝¹ : NormedSpace ℝ F
inst✝ : CompleteSpace F
s : Set E
f : E → F
hE : finrank ℝ E = 0
this : Subsingleton E
x : E
⊢ f x = f 0
[PROOFSTEP]
rw [Subsingleton.elim x 0]
[GOAL]
case inl.inl
E : Type u_1
inst✝⁸ : NormedAddCommGroup E
inst✝⁷ : NormedSpace ℝ E
inst✝⁶ : MeasurableSpace E
inst✝⁵ : BorelSpace E
inst✝⁴ : FiniteDimensional ℝ E
μ : Measure E
inst✝³ : IsAddHaarMeasure μ
F : Type u_2
inst✝² : NormedAddCommGroup F
inst✝¹ : NormedSpace ℝ F
inst✝ : CompleteSpace F
s : Set E
f : E → F
hE : finrank ℝ E = 0
this✝ : Subsingleton E
this : f = fun x => f 0
⊢ ENNReal.toReal (↑↑μ univ) • f 0 = |(0 ^ finrank ℝ E)⁻¹| • ∫ (x : E), f x ∂μ
[PROOFSTEP]
conv_rhs => rw [this]
[GOAL]
E : Type u_1
inst✝⁸ : NormedAddCommGroup E
inst✝⁷ : NormedSpace ℝ E
inst✝⁶ : MeasurableSpace E
inst✝⁵ : BorelSpace E
inst✝⁴ : FiniteDimensional ℝ E
μ : Measure E
inst✝³ : IsAddHaarMeasure μ
F : Type u_2
inst✝² : NormedAddCommGroup F
inst✝¹ : NormedSpace ℝ F
inst✝ : CompleteSpace F
s : Set E
f : E → F
hE : finrank ℝ E = 0
this✝ : Subsingleton E
this : f = fun x => f 0
| |(0 ^ finrank ℝ E)⁻¹| • ∫ (x : E), f x ∂μ
[PROOFSTEP]
rw [this]
[GOAL]
E : Type u_1
inst✝⁸ : NormedAddCommGroup E
inst✝⁷ : NormedSpace ℝ E
inst✝⁶ : MeasurableSpace E
inst✝⁵ : BorelSpace E
inst✝⁴ : FiniteDimensional ℝ E
μ : Measure E
inst✝³ : IsAddHaarMeasure μ
F : Type u_2
inst✝² : NormedAddCommGroup F
inst✝¹ : NormedSpace ℝ F
inst✝ : CompleteSpace F
s : Set E
f : E → F
hE : finrank ℝ E = 0
this✝ : Subsingleton E
this : f = fun x => f 0
| |(0 ^ finrank ℝ E)⁻¹| • ∫ (x : E), f x ∂μ
[PROOFSTEP]
rw [this]
[GOAL]
E : Type u_1
inst✝⁸ : NormedAddCommGroup E
inst✝⁷ : NormedSpace ℝ E
inst✝⁶ : MeasurableSpace E
inst✝⁵ : BorelSpace E
inst✝⁴ : FiniteDimensional ℝ E
μ : Measure E
inst✝³ : IsAddHaarMeasure μ
F : Type u_2
inst✝² : NormedAddCommGroup F
inst✝¹ : NormedSpace ℝ F
inst✝ : CompleteSpace F
s : Set E
f : E → F
hE : finrank ℝ E = 0
this✝ : Subsingleton E
this : f = fun x => f 0
| |(0 ^ finrank ℝ E)⁻¹| • ∫ (x : E), f x ∂μ
[PROOFSTEP]
rw [this]
[GOAL]
case inl.inl
E : Type u_1
inst✝⁸ : NormedAddCommGroup E
inst✝⁷ : NormedSpace ℝ E
inst✝⁶ : MeasurableSpace E
inst✝⁵ : BorelSpace E
inst✝⁴ : FiniteDimensional ℝ E
μ : Measure E
inst✝³ : IsAddHaarMeasure μ
F : Type u_2
inst✝² : NormedAddCommGroup F
inst✝¹ : NormedSpace ℝ F
inst✝ : CompleteSpace F
s : Set E
f : E → F
hE : finrank ℝ E = 0
this✝ : Subsingleton E
this : f = fun x => f 0
⊢ ENNReal.toReal (↑↑μ univ) • f 0 = |(0 ^ finrank ℝ E)⁻¹| • ∫ (x : E), (fun x => f 0) x ∂μ
[PROOFSTEP]
simp only [hE, pow_zero, inv_one, abs_one, one_smul, integral_const]
[GOAL]
case inl.inr
E : Type u_1
inst✝⁸ : NormedAddCommGroup E
inst✝⁷ : NormedSpace ℝ E
inst✝⁶ : MeasurableSpace E
inst✝⁵ : BorelSpace E
inst✝⁴ : FiniteDimensional ℝ E
μ : Measure E
inst✝³ : IsAddHaarMeasure μ
F : Type u_2
inst✝² : NormedAddCommGroup F
inst✝¹ : NormedSpace ℝ F
inst✝ : CompleteSpace F
s : Set E
f : E → F
hE : finrank ℝ E > 0
⊢ ENNReal.toReal (↑↑μ univ) • f 0 = |(0 ^ finrank ℝ E)⁻¹| • ∫ (x : E), f x ∂μ
[PROOFSTEP]
have : Nontrivial E := finrank_pos_iff.1 hE
[GOAL]
case inl.inr
E : Type u_1
inst✝⁸ : NormedAddCommGroup E
inst✝⁷ : NormedSpace ℝ E
inst✝⁶ : MeasurableSpace E
inst✝⁵ : BorelSpace E
inst✝⁴ : FiniteDimensional ℝ E
μ : Measure E
inst✝³ : IsAddHaarMeasure μ
F : Type u_2
inst✝² : NormedAddCommGroup F
inst✝¹ : NormedSpace ℝ F
inst✝ : CompleteSpace F
s : Set E
f : E → F
hE : finrank ℝ E > 0
this : Nontrivial E
⊢ ENNReal.toReal (↑↑μ univ) • f 0 = |(0 ^ finrank ℝ E)⁻¹| • ∫ (x : E), f x ∂μ
[PROOFSTEP]
simp only [zero_pow hE, measure_univ_of_isAddLeftInvariant, ENNReal.top_toReal, zero_smul, inv_zero, abs_zero]
[GOAL]
case inr
E : Type u_1
inst✝⁸ : NormedAddCommGroup E
inst✝⁷ : NormedSpace ℝ E
inst✝⁶ : MeasurableSpace E
inst✝⁵ : BorelSpace E
inst✝⁴ : FiniteDimensional ℝ E
μ : Measure E
inst✝³ : IsAddHaarMeasure μ
F : Type u_2
inst✝² : NormedAddCommGroup F
inst✝¹ : NormedSpace ℝ F
inst✝ : CompleteSpace F
s : Set E
f : E → F
R : ℝ
hR : R ≠ 0
⊢ ∫ (x : E), f (R • x) ∂μ = |(R ^ finrank ℝ E)⁻¹| • ∫ (x : E), f x ∂μ
[PROOFSTEP]
calc
(∫ x, f (R • x) ∂μ) = ∫ y, f y ∂Measure.map (fun x => R • x) μ :=
(integral_map_equiv (Homeomorph.smul (isUnit_iff_ne_zero.2 hR).unit).toMeasurableEquiv f).symm
_ = |(R ^ finrank ℝ E)⁻¹| • ∫ x, f x ∂μ := by
simp only [map_addHaar_smul μ hR, integral_smul_measure, ENNReal.toReal_ofReal, abs_nonneg]
[GOAL]
E : Type u_1
inst✝⁸ : NormedAddCommGroup E
inst✝⁷ : NormedSpace ℝ E
inst✝⁶ : MeasurableSpace E
inst✝⁵ : BorelSpace E
inst✝⁴ : FiniteDimensional ℝ E
μ : Measure E
inst✝³ : IsAddHaarMeasure μ
F : Type u_2
inst✝² : NormedAddCommGroup F
inst✝¹ : NormedSpace ℝ F
inst✝ : CompleteSpace F
s : Set E
f : E → F
R : ℝ
hR : R ≠ 0
⊢ ∫ (y : E), f y ∂map (fun x => R • x) μ = |(R ^ finrank ℝ E)⁻¹| • ∫ (x : E), f x ∂μ
[PROOFSTEP]
simp only [map_addHaar_smul μ hR, integral_smul_measure, ENNReal.toReal_ofReal, abs_nonneg]
[GOAL]
E : Type u_1
inst✝⁸ : NormedAddCommGroup E
inst✝⁷ : NormedSpace ℝ E
inst✝⁶ : MeasurableSpace E
inst✝⁵ : BorelSpace E
inst✝⁴ : FiniteDimensional ℝ E
μ : Measure E
inst✝³ : IsAddHaarMeasure μ
F : Type u_2
inst✝² : NormedAddCommGroup F
inst✝¹ : NormedSpace ℝ F
inst✝ : CompleteSpace F
s : Set E
f : E → F
R : ℝ
hR : 0 ≤ R
⊢ ∫ (x : E), f (R • x) ∂μ = (R ^ finrank ℝ E)⁻¹ • ∫ (x : E), f x ∂μ
[PROOFSTEP]
rw [integral_comp_smul μ f R, abs_of_nonneg (inv_nonneg.2 (pow_nonneg hR _))]
[GOAL]
E : Type u_1
inst✝⁸ : NormedAddCommGroup E
inst✝⁷ : NormedSpace ℝ E
inst✝⁶ : MeasurableSpace E
inst✝⁵ : BorelSpace E
inst✝⁴ : FiniteDimensional ℝ E
μ : Measure E
inst✝³ : IsAddHaarMeasure μ
F : Type u_2
inst✝² : NormedAddCommGroup F
inst✝¹ : NormedSpace ℝ F
inst✝ : CompleteSpace F
s : Set E
f : E → F
R : ℝ
⊢ ∫ (x : E), f (R⁻¹ • x) ∂μ = |R ^ finrank ℝ E| • ∫ (x : E), f x ∂μ
[PROOFSTEP]
rw [integral_comp_smul μ f R⁻¹, inv_pow, inv_inv]
[GOAL]
E : Type u_1
inst✝⁸ : NormedAddCommGroup E
inst✝⁷ : NormedSpace ℝ E
inst✝⁶ : MeasurableSpace E
inst✝⁵ : BorelSpace E
inst✝⁴ : FiniteDimensional ℝ E
μ : Measure E
inst✝³ : IsAddHaarMeasure μ
F : Type u_2
inst✝² : NormedAddCommGroup F
inst✝¹ : NormedSpace ℝ F
inst✝ : CompleteSpace F
s : Set E
f : E → F
R : ℝ
hR : 0 ≤ R
⊢ ∫ (x : E), f (R⁻¹ • x) ∂μ = R ^ finrank ℝ E • ∫ (x : E), f x ∂μ
[PROOFSTEP]
rw [integral_comp_inv_smul μ f R, abs_of_nonneg (pow_nonneg hR _)]
[GOAL]
E : Type u_1
inst✝⁸ : NormedAddCommGroup E
inst✝⁷ : NormedSpace ℝ E
inst✝⁶ : MeasurableSpace E
inst✝⁵ : BorelSpace E
inst✝⁴ : FiniteDimensional ℝ E
μ : Measure E
inst✝³ : IsAddHaarMeasure μ
F : Type u_2
inst✝² : NormedAddCommGroup F
inst✝¹ : NormedSpace ℝ F
inst✝ : CompleteSpace F
s : Set E
g : ℝ → F
a : ℝ
⊢ ∫ (x : ℝ), g (a * x) = |a⁻¹| • ∫ (y : ℝ), g y
[PROOFSTEP]
simp_rw [← smul_eq_mul, Measure.integral_comp_smul, FiniteDimensional.finrank_self, pow_one]
[GOAL]
E : Type u_1
inst✝⁸ : NormedAddCommGroup E
inst✝⁷ : NormedSpace ℝ E
inst✝⁶ : MeasurableSpace E
inst✝⁵ : BorelSpace E
inst✝⁴ : FiniteDimensional ℝ E
μ : Measure E
inst✝³ : IsAddHaarMeasure μ
F : Type u_2
inst✝² : NormedAddCommGroup F
inst✝¹ : NormedSpace ℝ F
inst✝ : CompleteSpace F
s : Set E
g : ℝ → F
a : ℝ
⊢ ∫ (x : ℝ), g (a⁻¹ * x) = |a| • ∫ (y : ℝ), g y
[PROOFSTEP]
simp_rw [← smul_eq_mul, Measure.integral_comp_inv_smul, FiniteDimensional.finrank_self, pow_one]
[GOAL]
E : Type u_1
inst✝⁸ : NormedAddCommGroup E
inst✝⁷ : NormedSpace ℝ E
inst✝⁶ : MeasurableSpace E
inst✝⁵ : BorelSpace E
inst✝⁴ : FiniteDimensional ℝ E
μ : Measure E
inst✝³ : IsAddHaarMeasure μ
F : Type u_2
inst✝² : NormedAddCommGroup F
inst✝¹ : NormedSpace ℝ F
inst✝ : CompleteSpace F
s : Set E
g : ℝ → F
a : ℝ
⊢ ∫ (x : ℝ), g (x * a) = |a⁻¹| • ∫ (y : ℝ), g y
[PROOFSTEP]
simpa only [mul_comm] using integral_comp_mul_left g a
[GOAL]
E : Type u_1
inst✝⁸ : NormedAddCommGroup E
inst✝⁷ : NormedSpace ℝ E
inst✝⁶ : MeasurableSpace E
inst✝⁵ : BorelSpace E
inst✝⁴ : FiniteDimensional ℝ E
μ : Measure E
inst✝³ : IsAddHaarMeasure μ
F : Type u_2
inst✝² : NormedAddCommGroup F
inst✝¹ : NormedSpace ℝ F
inst✝ : CompleteSpace F
s : Set E
g : ℝ → F
a : ℝ
⊢ ∫ (x : ℝ), g (x * a⁻¹) = |a| • ∫ (y : ℝ), g y
[PROOFSTEP]
simpa only [mul_comm] using integral_comp_inv_mul_left g a
[GOAL]
F : Type u_1
inst✝⁶ : NormedAddCommGroup F
E : Type u_2
inst✝⁵ : NormedAddCommGroup E
inst✝⁴ : NormedSpace ℝ E
inst✝³ : MeasurableSpace E
inst✝² : BorelSpace E
inst✝¹ : FiniteDimensional ℝ E
μ : Measure E
inst✝ : IsAddHaarMeasure μ
f : E → F
R : ℝ
hR : R ≠ 0
⊢ (Integrable fun x => f (R • x)) ↔ Integrable f
[PROOFSTEP]
suffices ∀ {g : E → F} (hg : Integrable g μ) {S : ℝ} (hS : S ≠ 0), Integrable (fun x => g (S • x)) μ
by
refine' ⟨fun hf => _, fun hf => this hf hR⟩
convert this hf (inv_ne_zero hR)
rw [← mul_smul, mul_inv_cancel hR, one_smul]
-- now prove
[GOAL]
F : Type u_1
inst✝⁶ : NormedAddCommGroup F
E : Type u_2
inst✝⁵ : NormedAddCommGroup E
inst✝⁴ : NormedSpace ℝ E
inst✝³ : MeasurableSpace E
inst✝² : BorelSpace E
inst✝¹ : FiniteDimensional ℝ E
μ : Measure E
inst✝ : IsAddHaarMeasure μ
f : E → F
R : ℝ
hR : R ≠ 0
this : ∀ {g : E → F}, Integrable g → ∀ {S : ℝ}, S ≠ 0 → Integrable fun x => g (S • x)
⊢ (Integrable fun x => f (R • x)) ↔ Integrable f
[PROOFSTEP]
refine' ⟨fun hf => _, fun hf => this hf hR⟩
[GOAL]
F : Type u_1
inst✝⁶ : NormedAddCommGroup F
E : Type u_2
inst✝⁵ : NormedAddCommGroup E
inst✝⁴ : NormedSpace ℝ E
inst✝³ : MeasurableSpace E
inst✝² : BorelSpace E
inst✝¹ : FiniteDimensional ℝ E
μ : Measure E
inst✝ : IsAddHaarMeasure μ
f : E → F
R : ℝ
hR : R ≠ 0
this : ∀ {g : E → F}, Integrable g → ∀ {S : ℝ}, S ≠ 0 → Integrable fun x => g (S • x)
hf : Integrable fun x => f (R • x)
⊢ Integrable f
[PROOFSTEP]
convert this hf (inv_ne_zero hR)
[GOAL]
case h.e'_5.h.h.e'_1
F : Type u_1
inst✝⁶ : NormedAddCommGroup F
E : Type u_2
inst✝⁵ : NormedAddCommGroup E
inst✝⁴ : NormedSpace ℝ E
inst✝³ : MeasurableSpace E
inst✝² : BorelSpace E
inst✝¹ : FiniteDimensional ℝ E
μ : Measure E
inst✝ : IsAddHaarMeasure μ
f : E → F
R : ℝ
hR : R ≠ 0
this : ∀ {g : E → F}, Integrable g → ∀ {S : ℝ}, S ≠ 0 → Integrable fun x => g (S • x)
hf : Integrable fun x => f (R • x)
x✝ : E
⊢ x✝ = R • R⁻¹ • x✝
[PROOFSTEP]
rw [← mul_smul, mul_inv_cancel hR, one_smul]
-- now prove
[GOAL]
F : Type u_1
inst✝⁶ : NormedAddCommGroup F
E : Type u_2
inst✝⁵ : NormedAddCommGroup E
inst✝⁴ : NormedSpace ℝ E
inst✝³ : MeasurableSpace E
inst✝² : BorelSpace E
inst✝¹ : FiniteDimensional ℝ E
μ : Measure E
inst✝ : IsAddHaarMeasure μ
f : E → F
R : ℝ
hR : R ≠ 0
⊢ ∀ {g : E → F}, Integrable g → ∀ {S : ℝ}, S ≠ 0 → Integrable fun x => g (S • x)
[PROOFSTEP]
intro g hg S hS
[GOAL]
F : Type u_1
inst✝⁶ : NormedAddCommGroup F
E : Type u_2
inst✝⁵ : NormedAddCommGroup E
inst✝⁴ : NormedSpace ℝ E
inst✝³ : MeasurableSpace E
inst✝² : BorelSpace E
inst✝¹ : FiniteDimensional ℝ E
μ : Measure E
inst✝ : IsAddHaarMeasure μ
f : E → F
R : ℝ
hR : R ≠ 0
g : E → F
hg : Integrable g
S : ℝ
hS : S ≠ 0
⊢ Integrable fun x => g (S • x)
[PROOFSTEP]
let t := ((Homeomorph.smul (isUnit_iff_ne_zero.2 hS).unit).toMeasurableEquiv : E ≃ᵐ E)
[GOAL]
F : Type u_1
inst✝⁶ : NormedAddCommGroup F
E : Type u_2
inst✝⁵ : NormedAddCommGroup E
inst✝⁴ : NormedSpace ℝ E
inst✝³ : MeasurableSpace E
inst✝² : BorelSpace E
inst✝¹ : FiniteDimensional ℝ E
μ : Measure E
inst✝ : IsAddHaarMeasure μ
f : E → F
R : ℝ
hR : R ≠ 0
g : E → F
hg : Integrable g
S : ℝ
hS : S ≠ 0
t : E ≃ᵐ E := Homeomorph.toMeasurableEquiv (Homeomorph.smul (IsUnit.unit (_ : IsUnit S)))
⊢ Integrable fun x => g (S • x)
[PROOFSTEP]
refine' (integrable_map_equiv t g).mp (_ : Integrable g (map (S • ·) μ))
[GOAL]
F : Type u_1
inst✝⁶ : NormedAddCommGroup F
E : Type u_2
inst✝⁵ : NormedAddCommGroup E
inst✝⁴ : NormedSpace ℝ E
inst✝³ : MeasurableSpace E
inst✝² : BorelSpace E
inst✝¹ : FiniteDimensional ℝ E
μ : Measure E
inst✝ : IsAddHaarMeasure μ
f : E → F
R : ℝ
hR : R ≠ 0
g : E → F
hg : Integrable g
S : ℝ
hS : S ≠ 0
t : E ≃ᵐ E := Homeomorph.toMeasurableEquiv (Homeomorph.smul (IsUnit.unit (_ : IsUnit S)))
⊢ Integrable g
[PROOFSTEP]
rwa [map_addHaar_smul μ hS, integrable_smul_measure _ ENNReal.ofReal_ne_top]
[GOAL]
F : Type u_1
inst✝⁶ : NormedAddCommGroup F
E : Type u_2
inst✝⁵ : NormedAddCommGroup E
inst✝⁴ : NormedSpace ℝ E
inst✝³ : MeasurableSpace E
inst✝² : BorelSpace E
inst✝¹ : FiniteDimensional ℝ E
μ : Measure E
inst✝ : IsAddHaarMeasure μ
f : E → F
R : ℝ
hR : R ≠ 0
g : E → F
hg : Integrable g
S : ℝ
hS : S ≠ 0
t : E ≃ᵐ E := Homeomorph.toMeasurableEquiv (Homeomorph.smul (IsUnit.unit (_ : IsUnit S)))
⊢ ENNReal.ofReal |(S ^ finrank ℝ E)⁻¹| ≠ 0
[PROOFSTEP]
simpa only [Ne.def, ENNReal.ofReal_eq_zero, not_le, abs_pos] using inv_ne_zero (pow_ne_zero _ hS)
[GOAL]
F : Type u_1
inst✝ : NormedAddCommGroup F
g : ℝ → F
R : ℝ
hR : R ≠ 0
⊢ (Integrable fun x => g (R * x)) ↔ Integrable g
[PROOFSTEP]
simpa only [smul_eq_mul] using integrable_comp_smul_iff volume g hR
[GOAL]
F : Type u_1
inst✝ : NormedAddCommGroup F
g : ℝ → F
R : ℝ
hR : R ≠ 0
⊢ (Integrable fun x => g (x * R)) ↔ Integrable g
[PROOFSTEP]
simpa only [mul_comm] using integrable_comp_mul_left_iff g hR
|
Atari Games Corp. v. Nintendo of Am., Inc.
Nintendo designed a program, the 10NES, for its Nintendo Entertain System (NES) to prevent the NES from accepting unauthorized game cartridges. The 10NES was programmed onto chips located in the NES console and in each game cartridge. Thus, only 10NES-enabled cartridges could “unlock” access to the NES console.
After a failed attempt to replicate the 10NES program, Atari, in December 1987, licensed the technology from Nintendo under strict licensing terms. Nintendo themselves placed Atari’s games in 10NES-enabled cartridges, and limited Atari, as well as other licensees, to five new NES games per year.
In early 1988, Atari applied to the Copyright Office for a reproduction of the 10NES source code, which the Copyright Office provided to Atari based on a false allegation by Atari that Atari needed the copy for pending litigation. There in fact was no pending litigation.
Based on the acquired source code, Atari developed its own program, the Rabbit program, which generated signals indistinguishable from the 10NES program and gave Atari access to the NES without Nintendo’s license conditions. The line-by-line instructions of the programs vary because Atari used a different microprocessor and programming language.
Nintendo brought suit against Atari alleging copyright infringement (and patent infringement, among other allegations). Atari brought several claims against Nintendo and asserted a copyright misuse defense. The United States District Court for the Northern District of California granted Nintendo’s request for a preliminary injunction based on copyright infringement. Atari appealed.
The Federal Circuit observed that while it had jurisdiction based on patent infringement claims included in the action, the court applies the law of the regional circuits, the Ninth Circuit in this case, to resolve issues of copyright law.
The Federal Circuit stated that “[t]o prevail on its copyright infringement claim, Nintendo must show ownership of the 10NES program copyright and copying by Atari of protectable expression from the 10NES program.” Ownership of the 10NES program was not in dispute, and thus, Nintendo had to prove only the copying of protectable expression.
Next, the court held that “Nintendo is likely to show successfully that Atari infringed the 10NES copyright by obtaining and copying the source code from the Copyright Office.” Atari obtained a copy from the Copyright Office by providing false information about a pending litigation, therefore obtaining an unauthorized reproduction. Thus, Atari infringed Nintendo’s copyright based on the reproduction of an unauthorized copy from the Copyright Office.
The court then also held that “Nintendo is likely to prove substantial similarity between the Rabbit and 10NES programs sufficient to support its infringement claims.” The court primarily relied on the fact that Rabbit incorporated unnecessary features, including features that were deleted from the original 10NES program. Thus, the court concluded that “copying of fully extraneous instructions unnecessary to the 10NES program’s functionality” suggests copying, not independent creation.
Atari argued as a defense to copyright infringement that Nintendo misused its copyright. Because there was no statutory basis for the copyright misuse defense, the court determined that the defense was solely an equitable doctrine. Therefore, while copyright misuse may be a viable defense, any party seeking the defense must have clean hands. Atari was ineligible to invoke the defense because of its lie to the Copyright Office to obtain a copy of the 10NES program.
The court therefore held that the district court did not err by granting the preliminary injunction against Atari. |
[STATEMENT]
lemma RF_f: "x \<notin> RF\<^sup>\<circ> (TER f)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. x \<notin> RF\<^sup>\<circ> (TER f)
[PROOF STEP]
proof
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. x \<in> RF\<^sup>\<circ> (TER f) \<Longrightarrow> False
[PROOF STEP]
assume *: "x \<in> RF\<^sup>\<circ> (TER f)"
[PROOF STATE]
proof (state)
this:
x \<in> RF\<^sup>\<circ> (TER f)
goal (1 subgoal):
1. x \<in> RF\<^sup>\<circ> (TER f) \<Longrightarrow> False
[PROOF STEP]
from x
[PROOF STATE]
proof (chain)
picking this:
x \<in> \<E> (TER f \<union> TER g)
[PROOF STEP]
obtain p y where p: "path \<Gamma> x p y" and y: "y \<in> B \<Gamma>"
and bypass: "\<And>z. \<lbrakk>x \<noteq> y; z \<in> set p\<rbrakk> \<Longrightarrow> z = x \<or> z \<notin> TER f \<union> TER g"
[PROOF STATE]
proof (prove)
using this:
x \<in> \<E> (TER f \<union> TER g)
goal (1 subgoal):
1. (\<And>p y. \<lbrakk>path \<Gamma> x p y; y \<in> B \<Gamma>; \<And>z. \<lbrakk>x \<noteq> y; z \<in> set p\<rbrakk> \<Longrightarrow> z = x \<or> z \<notin> TER f \<union> TER g\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by(rule \<E>_E) blast
[PROOF STATE]
proof (state)
this:
path \<Gamma> x p y
y \<in> B \<Gamma>
\<lbrakk>x \<noteq> y; ?z \<in> set p\<rbrakk> \<Longrightarrow> ?z = x \<or> ?z \<notin> TER f \<union> TER g
goal (1 subgoal):
1. x \<in> RF\<^sup>\<circ> (TER f) \<Longrightarrow> False
[PROOF STEP]
from rtrancl_path_distinct[OF p]
[PROOF STATE]
proof (chain)
picking this:
(\<And>xs'. \<lbrakk>path \<Gamma> x xs' y; distinct (x # xs'); set xs' \<subseteq> set p\<rbrakk> \<Longrightarrow> ?thesis) \<Longrightarrow> ?thesis
[PROOF STEP]
obtain p'
where p: "path \<Gamma> x p' y" and p': "set p' \<subseteq> set p" and distinct: "distinct (x # p')"
[PROOF STATE]
proof (prove)
using this:
(\<And>xs'. \<lbrakk>path \<Gamma> x xs' y; distinct (x # xs'); set xs' \<subseteq> set p\<rbrakk> \<Longrightarrow> ?thesis) \<Longrightarrow> ?thesis
goal (1 subgoal):
1. (\<And>p'. \<lbrakk>path \<Gamma> x p' y; set p' \<subseteq> set p; distinct (x # p')\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
.
[PROOF STATE]
proof (state)
this:
path \<Gamma> x p' y
set p' \<subseteq> set p
distinct (x # p')
goal (1 subgoal):
1. x \<in> RF\<^sup>\<circ> (TER f) \<Longrightarrow> False
[PROOF STEP]
from *
[PROOF STATE]
proof (chain)
picking this:
x \<in> RF\<^sup>\<circ> (TER f)
[PROOF STEP]
have x': "x \<in> RF (TER f)" and \<E>: "x \<notin> \<E> (TER f)"
[PROOF STATE]
proof (prove)
using this:
x \<in> RF\<^sup>\<circ> (TER f)
goal (1 subgoal):
1. x \<in> RF (TER f) &&& x \<notin> \<E> (TER f)
[PROOF STEP]
by(auto simp add: roofed_circ_def)
[PROOF STATE]
proof (state)
this:
x \<in> RF (TER f)
x \<notin> \<E> (TER f)
goal (1 subgoal):
1. x \<in> RF\<^sup>\<circ> (TER f) \<Longrightarrow> False
[PROOF STEP]
hence "x \<notin> TER f"
[PROOF STATE]
proof (prove)
using this:
x \<in> RF (TER f)
x \<notin> \<E> (TER f)
goal (1 subgoal):
1. x \<notin> TER f
[PROOF STEP]
using not_essentialD[OF _ p y] p' bypass
[PROOF STATE]
proof (prove)
using this:
x \<in> RF (TER f)
x \<notin> \<E> (TER f)
\<not> essential \<Gamma> (B \<Gamma>) ?S x \<Longrightarrow> x \<noteq> y \<and> (\<exists>z\<in>set p'. z \<noteq> x \<and> z \<in> ?S)
set p' \<subseteq> set p
\<lbrakk>x \<noteq> y; ?z \<in> set p\<rbrakk> \<Longrightarrow> ?z = x \<or> ?z \<notin> TER f \<union> TER g
goal (1 subgoal):
1. x \<notin> TER f
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
x \<notin> TER f
goal (1 subgoal):
1. x \<in> RF\<^sup>\<circ> (TER f) \<Longrightarrow> False
[PROOF STEP]
with roofedD[OF x' p y]
[PROOF STATE]
proof (chain)
picking this:
(\<exists>z\<in>set p'. z \<in> TER f) \<or> x \<in> TER f
x \<notin> TER f
[PROOF STEP]
obtain z where z: "z \<in> set p'" "z \<in> TER f"
[PROOF STATE]
proof (prove)
using this:
(\<exists>z\<in>set p'. z \<in> TER f) \<or> x \<in> TER f
x \<notin> TER f
goal (1 subgoal):
1. (\<And>z. \<lbrakk>z \<in> set p'; z \<in> TER f\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
z \<in> set p'
z \<in> TER f
goal (1 subgoal):
1. x \<in> RF\<^sup>\<circ> (TER f) \<Longrightarrow> False
[PROOF STEP]
with p
[PROOF STATE]
proof (chain)
picking this:
path \<Gamma> x p' y
z \<in> set p'
z \<in> TER f
[PROOF STEP]
have "y \<in> set p'"
[PROOF STATE]
proof (prove)
using this:
path \<Gamma> x p' y
z \<in> set p'
z \<in> TER f
goal (1 subgoal):
1. y \<in> set p'
[PROOF STEP]
by(auto dest!: rtrancl_path_last intro: last_in_set)
[PROOF STATE]
proof (state)
this:
y \<in> set p'
goal (1 subgoal):
1. x \<in> RF\<^sup>\<circ> (TER f) \<Longrightarrow> False
[PROOF STEP]
with distinct
[PROOF STATE]
proof (chain)
picking this:
distinct (x # p')
y \<in> set p'
[PROOF STEP]
have "x \<noteq> y"
[PROOF STATE]
proof (prove)
using this:
distinct (x # p')
y \<in> set p'
goal (1 subgoal):
1. x \<noteq> y
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
x \<noteq> y
goal (1 subgoal):
1. x \<in> RF\<^sup>\<circ> (TER f) \<Longrightarrow> False
[PROOF STEP]
with bypass z p' distinct
[PROOF STATE]
proof (chain)
picking this:
\<lbrakk>x \<noteq> y; ?z \<in> set p\<rbrakk> \<Longrightarrow> ?z = x \<or> ?z \<notin> TER f \<union> TER g
z \<in> set p'
z \<in> TER f
set p' \<subseteq> set p
distinct (x # p')
x \<noteq> y
[PROOF STEP]
show False
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>x \<noteq> y; ?z \<in> set p\<rbrakk> \<Longrightarrow> ?z = x \<or> ?z \<notin> TER f \<union> TER g
z \<in> set p'
z \<in> TER f
set p' \<subseteq> set p
distinct (x # p')
x \<noteq> y
goal (1 subgoal):
1. False
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
False
goal:
No subgoals!
[PROOF STEP]
qed |
module Finmath.Finite
--------------------------------------------------------------------------------
-- Cartesian sums and products of finite sets
--------------------------------------------------------------------------------
||| Map an element of either of two sets, into their Cartesian sum
fSetSum : Either (Fin n) (Fin m) -> Fin (n + m)
fSetSum (Left fZ) = fZ
fSetSum (Left (fS k)) = fS (fSetSum (Left k))
fSetSum {n=Z} (Right right) = right
fSetSum {n=(S k)} (Right right) = fS (fSetSum {n=k} (Right right))
||| Map a pair of elements from each of two sets, into their Cartesian product
fSetProduct : (Fin n, Fin m) -> Fin (n * m)
fSetProduct (fZ, right) = fSetSum (Left right)
fSetProduct {n=(S k)} ((fS left), right) = fSetSum (Right (fSetProduct (left, right)))
||| The inverse map of fSetSum
fSetSumInv : Fin (n + m) -> Either (Fin n) (Fin m)
fSetSumInv {n=Z} x = Right x
fSetSumInv {n=(S k)} fZ = Left fZ
fSetSumInv {n=(S k)} (fS c) with (fSetSumInv {n=k} c)
| Left a = Left (fS a)
| Right b = Right b
--------------------------------------------------------------------------------
-- Misc
--------------------------------------------------------------------------------
||| Functions respect equality
fEq : (f : a -> b) -> (left : a) -> (right : a) -> (p : left = right) -> f left = f right
fEq f left _ refl = refl
--------------------------------------------------------------------------------
-- Proofs about inequality
--------------------------------------------------------------------------------
||| Proof that LTE respects adding a constant to both sides
ltePlus : LTE m l -> (n : Nat) -> LTE (n + m) (n + l)
ltePlus p Z = p
ltePlus p (S k) = lteSucc (ltePlus p k)
||| Proof that n <= n
lteN : (n : Nat) -> LTE n n
lteN n = (rewrite (fEq (\k => LTE n k) _ _ (plusCommutative 0 n)) in
(rewrite (fEq (\k => LTE k (n + 0)) _ _ (plusCommutative 0 n)) in
(ltePlus (lteZero {right=Z}) n)))
||| Proof that if n <= m and m <= l then n <= l
lteTrans : LTE n m -> LTE m l -> LTE n l
lteTrans lteZero _ = lteZero
lteTrans (lteSucc w) (lteSucc w') = lteSucc (lteTrans w w')
||| Dichotomy for lte
lteDichotomy : LTE n m -> Either (n = m) (LT n m)
lteDichotomy (lteZero {right=Z}) = Left refl
lteDichotomy (lteZero {right=(S k)}) = Right (lteSucc lteZero)
lteDichotomy (lteSucc p) = f (lteDichotomy p)
where f : Either (l = k) (LT l k) -> Either ((S l) = (S k)) (LT (S l) (S k))
f (Left p) = Left (eqSucc _ _ p)
f (Right p) = Right (lteSucc p)
||| Dichotomy
dichotomy : (n : Nat) -> (m : Nat) -> Either (LTE n m) (LT m n)
dichotomy Z _ = Left lteZero
dichotomy (S k) Z = Right (lteSucc lteZero)
dichotomy (S l) (S k) = f (dichotomy l k)
where f : Either (LTE l k) (LT k l) -> Either (LTE (S l) (S k)) (LT (S k) (S l))
f (Left p) = Left (lteSucc p)
f (Right p) = Right (lteSucc p)
||| Trichotomy
trichotomy : (n : Nat) -> (m : Nat) -> Either (Either (n = m) (LT n m)) (LT m n)
trichotomy n m with (dichotomy n m)
| Left a = Left (lteDichotomy a)
| Right b = Right b
|
using Asteroids: main
main(keepalive=!Base.isinteractive())
|
Require Import ucos_include.
Require Import os_ucos_h.
Require Import sep_lemmas_ext.
Require Import linv_solver.
Local Open Scope code_scope.
Local Open Scope Z_scope.
Local Open Scope int_scope.
Lemma absimp_taskcre_prio_invalid:
forall P v1 v2 v3 sch,
can_change_aop P ->
Int.ltu (Int.repr OS_LOWEST_PRIO) v3 = true ->
absinfer sch ( <|| taskcrecode (v1 :: v2 :: (Vint32 v3) :: nil) ||> ** P)
( <|| END (Some (Vint32 (Int.repr PRIO_ERR))) ||> ** P).
Proof.
infer_solver 0%nat.
Qed.
Lemma absimp_taskcre_prio_already_exists:
forall P v1 v2 v3 sch mqls tls t ct,
can_change_aop P ->
absinfer sch ( <|| taskcrecode (v1 :: v2 :: (Vint32 v3) :: nil) ||> **
HECBList mqls ** HTCBList tls ** HTime t ** HCurTCB ct ** P)
( <|| END (Some (Vint32 (Int.repr OS_PRIO_EXIST))) ||> **
HECBList mqls ** HTCBList tls ** HTime t ** HCurTCB ct ** P) .
Proof.
infer_solver 1%nat.
Qed.
Lemma absimp_taskcre_no_more_tcb:
forall P v1 v2 v3 sch,
can_change_aop P ->
absinfer sch ( <|| taskcrecode (v1 :: v2 :: (Vint32 v3) :: nil) ||> ** P)
( <|| END (Some (Vint32 (Int.repr OS_NO_MORE_TCB))) ||> ** P).
Proof.
infer_solver 2%nat.
Qed.
Lemma absimp_taskcre_succ:
forall P v1 v2 v3 sch t tls mqls ct ,
can_change_aop P ->
(* Int.lt ($ 63) v3 = false ->
* (* OSAbstMod.get O abtcblsid = Some (abstcblist tls) -> *)
* ~ (exists t' st msg, TcbMod.get tls t' = Some (v3, st, msg)) ->
* (exists t', TcbMod.join tls (TcbMod.sig t' (v3, rdy, Vnull)) tls' )-> *)
absinfer sch ( <|| taskcrecode (v1 :: v2 :: (Vint32 v3) :: nil) ||> **
HECBList mqls ** HTCBList tls ** HTime t ** HCurTCB ct ** P)
( <|| scrt (v1 :: v2 :: (Vint32 v3) :: nil);;(* taskcre_succ (|(v1 :: v2 :: (Vint32 v3) :: nil)|) ;; *)isched ;; END (Some (Vint32 (Int.repr NO_ERR))) ||> ** HECBList mqls ** HTCBList tls ** HTime t ** HCurTCB ct ** P).
Proof.
intros.
unfold taskcrecode.
infer_branch 3%nat.
eapply absinfer_eq.
Qed.
Lemma retpost_tcbinitpost:
retpost OS_TCBInitPost.
Proof.
unfolds.
intros.
unfold getasrt in H.
unfold OS_TCBInitPost in H.
unfold OS_TCBInitPost' in H.
sep lift 6%nat in H.
disj_asrt_destruct H.
sep split in H.
intro.
subst .
inverts H5.
intro.
subst.
sep split in H.
inverts H0.
Qed.
Local Ltac smartunfold3 :=
match goal with
| |- ?e _ _ _ => unfold e in *
end.
Lemma struct_pv_overlap:
forall p v1 v2 s P,
s |= Astruct p OS_TCB_flag v1 **
PV p @ Int8u |-> v2 **
P ->
False.
Proof.
intros.
unfold Astruct in H.
unfold OS_TCB_flag in H.
unfold Astruct' in H.
destruct v1.
sep destroy H.
simpl in H0; tryfalse.
destruct p.
sep normal in H.
Set Printing Depth 999.
(* ** ac: Show. *)
remember ( match v1 with
| nil => Afalse
| v :: vl' =>
PV (b, i +ᵢ $ Z.of_nat (typelen STRUCT os_tcb ⋆)) @
STRUCT os_tcb ⋆ |-> v **
match vl' with
| nil => Afalse
| v0 :: vl'0 =>
PV (b,
(i +ᵢ $ Z.of_nat (typelen STRUCT os_tcb ⋆)) +ᵢ
$ Z.of_nat (typelen STRUCT os_tcb ⋆)) @
OS_EVENT ∗ |-> v0 **
match vl'0 with
| nil => Afalse
| v1 :: vl'1 =>
PV (b,
((i +ᵢ $ Z.of_nat (typelen STRUCT os_tcb ⋆)) +ᵢ
$ Z.of_nat (typelen STRUCT os_tcb ⋆)) +ᵢ
$ Z.of_nat (typelen OS_EVENT ∗)) @
(Void) ∗ |-> v1 **
match vl'1 with
| nil => Afalse
| v2 :: vl'2 =>
PV (b,
(((i +ᵢ $ Z.of_nat (typelen STRUCT os_tcb ⋆))
+ᵢ $ Z.of_nat (typelen STRUCT os_tcb ⋆)) +ᵢ
$ Z.of_nat (typelen OS_EVENT ∗)) +ᵢ
$ Z.of_nat (typelen (Void) ∗)) @
Int16u |-> v2 **
match vl'2 with
| nil => Afalse
| v3 :: vl'3 =>
PV (b,
((((i +ᵢ
$ Z.of_nat (typelen STRUCT os_tcb ⋆)) +ᵢ
$ Z.of_nat (typelen STRUCT os_tcb ⋆)) +ᵢ
$ Z.of_nat (typelen OS_EVENT ∗)) +ᵢ
$ Z.of_nat (typelen (Void) ∗)) +ᵢ
$ Z.of_nat (typelen Int16u)) @
Int8u |-> v3 **
match vl'3 with
| nil => Afalse
| v4 :: vl'4 =>
PV (b,
(((((i +ᵢ
$ Z.of_nat (typelen STRUCT os_tcb ⋆))
+ᵢ
$ Z.of_nat (typelen STRUCT os_tcb ⋆))
+ᵢ $ Z.of_nat (typelen OS_EVENT ∗))
+ᵢ $ Z.of_nat (typelen (Void) ∗)) +ᵢ
$ Z.of_nat (typelen Int16u)) +ᵢ
$ Z.of_nat (typelen Int8u)) @
Int8u |-> v4 **
match vl'4 with
| nil => Afalse
| v5 :: vl'5 =>
PV (b,
((((((i +ᵢ
$
Z.of_nat
(typelen STRUCT os_tcb ⋆))
+ᵢ
$
Z.of_nat
(typelen STRUCT os_tcb ⋆)) +ᵢ
$ Z.of_nat (typelen OS_EVENT ∗))
+ᵢ $ Z.of_nat (typelen (Void) ∗))
+ᵢ $ Z.of_nat (typelen Int16u))
+ᵢ $ Z.of_nat (typelen Int8u)) +ᵢ
$ Z.of_nat (typelen Int8u)) @
Int8u |-> v5 **
match vl'5 with
| nil => Afalse
| v6 :: vl'6 =>
PV (b,
(((((((i +ᵢ
$
Z.of_nat
(typelen STRUCT os_tcb ⋆))
+ᵢ
$
Z.of_nat
(typelen STRUCT os_tcb ⋆))
+ᵢ
$
Z.of_nat
(typelen OS_EVENT ∗)) +ᵢ
$ Z.of_nat (typelen (Void) ∗))
+ᵢ
$ Z.of_nat (typelen Int16u))
+ᵢ
$ Z.of_nat (typelen Int8u)) +ᵢ
$ Z.of_nat (typelen Int8u)) +ᵢ
$ Z.of_nat (typelen Int8u)) @
Int8u |-> v6 **
match vl'6 with
| nil => Afalse
| v7 :: vl'7 =>
PV (b,
((((((((i +ᵢ
$
Z.of_nat
(typelen STRUCT os_tcb ⋆))
+ᵢ
$
Z.of_nat
(typelen STRUCT os_tcb ⋆))
+ᵢ
$
Z.of_nat
(typelen OS_EVENT ∗)) +ᵢ
$
Z.of_nat (typelen (Void) ∗))
+ᵢ
$ Z.of_nat (typelen Int16u))
+ᵢ
$ Z.of_nat (typelen Int8u))
+ᵢ
$ Z.of_nat (typelen Int8u))
+ᵢ
$ Z.of_nat (typelen Int8u))
+ᵢ
$ Z.of_nat (typelen Int8u)) @
Int8u |-> v7 **
match vl'7 with
| nil => Afalse
| v8 :: vl'8 =>
PV
(b,
(((((((((i +ᵢ
$
Z.of_nat
(typelen STRUCT os_tcb ⋆))
+ᵢ
$
Z.of_nat
(typelen STRUCT os_tcb ⋆))
+ᵢ
$
Z.of_nat
(typelen OS_EVENT ∗)) +ᵢ
$
Z.of_nat (typelen (Void) ∗))
+ᵢ
$ Z.of_nat (typelen Int16u))
+ᵢ
$ Z.of_nat (typelen Int8u))
+ᵢ
$ Z.of_nat (typelen Int8u))
+ᵢ
$ Z.of_nat (typelen Int8u))
+ᵢ
$ Z.of_nat (typelen Int8u))
+ᵢ
$ Z.of_nat (typelen Int8u)) @
Int8u |-> v8 **
match vl'8 with
| nil => Aemp
| _ :: _ => Afalse
end
end
end
end
end
end
end
end
end
end
end ).
clear Heqa.
assert ( (b,i) <> (b,i)).
eapply pv_false.
Focus 3.
instantiate (6:= s).
sep auto.
intro.
unfolds in H0.
destruct H0; simpljoin; tryfalse.
destruct H0; simpljoin; tryfalse.
intro.
unfolds in H0.
destruct H0; simpljoin; tryfalse.
destruct H0; simpljoin; tryfalse.
apply H0; auto.
Qed.
Lemma R_ECB_ETbl_P_hold_for_add_tcb:
forall x0 p v'14 v'42 v'5 v'34,
R_ECB_ETbl_P x0 p v'14 ->
TcbJoin v'34 (v'42, rdy, Vnull) v'14 v'5 ->
R_ECB_ETbl_P x0 p v'5.
Proof.
intros.
destruct p.
unfold R_ECB_ETbl_P in *.
splits.
simpljoin.
clear H1 H2.
smartunfold3.
simpljoin.
splits.
{
smartunfold3.
intros .
lets bb: H prio H4 H5.
simpljoin.
exists x.
unfold get in *; simpl in *.
erewrite TcbMod.join_get_r.
eauto.
eauto.
eauto.
}
{
smartunfold3.
intros .
lets bb: H1 prio H4 H5.
simpljoin.
exists x.
unfold get in *; simpl in *.
erewrite TcbMod.join_get_r.
eauto.
eauto.
eauto.
}
{
smartunfold3.
intros .
lets bb: H2 prio H4 H5.
simpljoin.
exists x.
unfold get in *; simpl in *.
erewrite TcbMod.join_get_r.
eauto.
eauto.
eauto.
}
{
smartunfold3.
intros .
lets bb: H3 prio H4 H5.
simpljoin.
exists x.
unfold get in *; simpl in *.
erewrite TcbMod.join_get_r.
eauto.
eauto.
eauto.
}
simpljoin.
clear H H2.
smartunfold3.
simpljoin.
splits.
{
smartunfold3.
intros.
unfold TcbJoin in H0.
unfold get, join, sig in *; simpl in *.
assert (TcbMod.get v'14 tid = Some (prio, wait (os_stat_q x0) n, m)).
eapply TcbMod.join_get_or in H4.
2:eauto.
destruct H4; intros.
assert ( v'34 = tid \/ v'34 <> tid).
tauto.
destruct H5.
subst.
rewrite TcbMod.get_a_sig_a in H4.
inverts H4.
go.
rewrite TcbMod.get_a_sig_a' in H4.
inverts H4.
go.
auto.
eapply H.
eauto.
}
{
smartunfold3.
intros.
unfold TcbJoin in H0.
unfold get, join, sig in *; simpl in *.
assert (TcbMod.get v'14 tid = Some (prio, wait (os_stat_sem x0) n, m)).
eapply TcbMod.join_get_or in H4.
2:eauto.
destruct H4; intros.
assert ( v'34 = tid \/ v'34 <> tid).
tauto.
destruct H5.
subst.
rewrite TcbMod.get_a_sig_a in H4.
inverts H4.
go.
rewrite TcbMod.get_a_sig_a' in H4.
inverts H4.
go.
auto.
eapply H1.
eauto.
}
{
smartunfold3.
intros.
unfold TcbJoin in H0.
unfold get, join, sig in *; simpl in *.
assert (TcbMod.get v'14 tid = Some (prio, wait (os_stat_mbox x0) n, m)).
eapply TcbMod.join_get_or in H4.
2:eauto.
destruct H4; intros.
assert ( v'34 = tid \/ v'34 <> tid).
tauto.
destruct H5.
subst.
rewrite TcbMod.get_a_sig_a in H4.
inverts H4.
go.
rewrite TcbMod.get_a_sig_a' in H4.
inverts H4.
go.
auto.
eapply H2.
eauto.
}
{
smartunfold3.
intros.
unfold TcbJoin in H0.
unfold get, join, sig in *; simpl in *.
assert (TcbMod.get v'14 tid = Some (prio, wait (os_stat_mutexsem x0) n, m)).
eapply TcbMod.join_get_or in H4.
2:eauto.
destruct H4; intros.
assert ( v'34 = tid \/ v'34 <> tid).
tauto.
destruct H5.
subst.
rewrite TcbMod.get_a_sig_a in H4.
inverts H4.
go.
rewrite TcbMod.get_a_sig_a' in H4.
inverts H4.
go.
auto.
eapply H3.
eauto.
}
simpljoin; auto.
Qed.
Lemma ecblist_hold_for_add_tcb :
forall v'4 x v'3 v'13 v'14 v'5 v'42 v'34,
ECBList_P x Vnull v'4 v'3 v'13 v'14 ->
TcbJoin v'34 (v'42, rdy, Vnull) v'14 v'5 ->
ECBList_P x Vnull v'4 v'3 v'13 v'5.
Proof.
induction v'4.
intros.
simpl.
simpl in H.
auto.
intros.
unfold1 ECBList_P in *.
simpljoin.
destruct v'3; tryfalse.
destruct a.
simpljoin.
eexists.
splits; eauto.
eapply R_ECB_ETbl_P_hold_for_add_tcb; eauto.
repeat tri_exists_and_solver1.
Qed.
Lemma nv'2nv:
forall vl n x,
nth_val' n vl = x ->
x <> Vundef ->
nth_val n vl = Some x.
Proof.
induction vl.
induction n.
intros.
simpl in H.
tryfalse.
intros.
simpl in H.
tryfalse.
induction n.
intros.
simpl in H.
simpl.
inverts H.
auto.
intros.
simpl.
simpl in H.
apply IHvl.
auto.
auto.
Qed.
Lemma r_priotbl_p_hold_for_add_tcb :
forall v'14 v'5 v'42 v'34 v'43 v'28,
(* Int.unsigned v'42 < 64 -> *)
v'43 <> v'34 ->
nth_val' (Z.to_nat (Int.unsigned v'42)) v'28 = Vnull ->
R_PrioTbl_P v'28 v'14 v'43 ->
TcbJoin v'34 (v'42, rdy, Vnull) v'14 v'5 ->
R_PrioTbl_P
(update_nth_val (Z.to_nat (Int.unsigned v'42)) v'28 (Vptr v'34)) v'5
v'43.
Proof.
introv HHHH.
intros.
smartunfold3.
simpljoin.
assert ( R_Prio_No_Dup v'5) as special.
{
unfold R_Prio_No_Dup in *.
intros.
assert (tid = v'34 \/ tid <> v'34) by tauto.
assert (tid' = v'34 \/ tid' <> v'34) by tauto.
destruct H7; destruct H8.
subst.
tryfalse.
{
subst v'34.
unfold get, join, sig in *; simpl in *.
erewrite TcbMod.join_get_l in H5.
2:eauto.
2:go.
Focus 2.
assert (tidspec.beq tid tid = true).
go.
rewrite H7.
eauto.
inverts H5.
eapply TcbMod.join_get_or in H6.
2: eauto.
destruct H6.
unfold get, join, sig in *; simpl in *.
rewrite TcbMod.get_a_sig_a' in H5.
inverts H5.
go.
lets bbb: H2 H5.
intro.
subst.
simpljoin.
eapply nv'2nv in H.
unfold nat_of_Z in *.
rewrite H in H6.
inverts H6.
intro; tryfalse.
}
{
subst v'34.
unfold get, join, sig in *; simpl in *.
erewrite TcbMod.join_get_l in H6.
2:eauto.
2:go.
Focus 2.
assert (tidspec.beq tid' tid' = true).
go.
rewrite H8.
eauto.
inverts H6.
eapply TcbMod.join_get_or in H1.
2: eauto.
destruct H1.
unfold get, join, sig in *; simpl in *.
rewrite TcbMod.get_a_sig_a' in H1.
inverts H1.
go.
lets bbb: H2 H1.
intro.
subst.
simpljoin.
eapply nv'2nv in H.
unfold nat_of_Z in *.
rewrite H in H6.
inverts H6.
intro; tryfalse.
}
{
unfold get, sig, join in *; simpl in *.
eapply TcbMod.join_get_or in H5; eauto.
eapply TcbMod.join_get_or in H6; eauto.
destruct H5.
unfold get, sig, join in *; simpl in *.
rewrite TcbMod.get_a_sig_a' in H5.
inverts H5.
go.
destruct H6.
unfold get, sig, join in *; simpl in *.
rewrite TcbMod.get_a_sig_a' in H6.
inverts H6.
go.
eapply H3.
2:eauto.
2:eauto.
eauto.
}
}
splits; auto.
intros.
assert ( prio = v'42 \/ prio <> v'42).
tauto.
destruct H7.
rewrite H7 in *.
unfold nat_of_Z in *.
erewrite hoare_assign.update_nth in H5.
inverts H5.
unfold TcbJoin in *.
unfold get, join, sig in *; simpl in *.
do 2 eexists.
eapply TcbMod.join_get_l.
eauto.
inverts H7.
eapply TcbMod.get_a_sig_a.
go.
(* ** ac: SearchAbout nth_val. *)
(* ** ac: Print nth_val'. *)
(* ** ac: Show. *)
eapply nv'2nv; eauto.
(* intro; tryfalse. *)
unfold nat_of_Z in *.
(* ** ac: SearchAbout nth_val. *)
assert (exists st m, get v'14 tcbid = Some (prio, st, m)).
eapply H0; eauto.
eapply nth_upd_neq.
2:eauto.
intro.
(* ** ac: SearchAbout Z.to_nat. *)
apply Z2Nat.inj in H8.
(* ** ac: SearchAbout (Int.unsigned _ = Int.unsigned _). *)
apply unsigned_inj in H8.
tryfalse.
clear; int auto.
clear; int auto.
simpljoin.
unfold TcbJoin in H1.
unfold get, join, sig in *; simpl in *.
do 2 eexists.
go.
intros.
unfold nat_of_Z in *.
eapply TcbMod.join_get_or in H4; eauto.
2:exact H1.
destruct H4.
assert (tcbid = v'34 \/ tcbid <> v'34).
tauto.
destruct H5.
subst.
erewrite TcbMod.get_a_sig_a in H4.
inverts H4.
erewrite hoare_assign.update_nth.
splits; auto.
eapply nv'2nv.
eauto.
intro; tryfalse.
go.
erewrite TcbMod.get_a_sig_a' in H4.
inverts H4.
go.
lets bb: H2 H4.
assert (prio = v'42 \/ prio <> v'42) by tauto.
destruct H5.
subst.
apply nv'2nv in H.
rewrite H in bb.
destruct bb.
tryfalse.
intro; tryfalse.
simpljoin.
splits; auto.
erewrite nth_upd_neqrev.
eauto.
intro.
2:eauto.
apply Z2Nat.inj in H8.
apply unsigned_inj in H8.
tryfalse.
clear; int auto.
clear; int auto.
Qed.
Lemma nth_upd_neqeq:
forall (vl : vallist) (n m : nat) (x : val),
n <> m ->
nth_val n (update_nth_val m vl x) = nth_val n vl.
Proof.
intros.
simpl.
auto.
intros.
remember (nth_val n vl).
destruct o.
erewrite nth_upd_neqrev.
eauto.
auto.
auto.
gen n.
gen m.
induction vl.
simpl.
auto.
intros.
gen m.
induction n.
simpl in Heqo.
inverts Heqo.
induction m.
intros.
simpl.
simpl in Heqo.
auto.
intros.
simpl.
simpl in Heqo.
apply IHvl.
auto.
auto.
Qed.
Lemma tcblist_p_hold_for_upd_1 :
forall a b ls c d e,
TCBList_P a (b::ls) c d ->
TCBList_P a (update_nth_val 1 b e :: ls) c d.
Proof.
intros.
unfold1 TCBList_P in *.
simpljoin.
repeat tri_exists_and_solver1.
unfolds.
(* ** ac: SearchAbout nth_val. *)
eapply nth_upd_neqrev.
omega.
auto.
unfold TCBNode_P in *.
destruct x2; destruct p.
simpljoin.
splits ; try (eapply nth_upd_neqrev; [omega| auto]).
unfold RL_TCBblk_P in *.
simpljoin.
unfold V_OSTCBPrio, V_OSTCBX, V_OSTCBY, V_OSTCBBitX, V_OSTCBBitY, V_OSTCBStat, V_OSTCBEventPtr .
repeat (erewrite nth_upd_neqrev; [idtac| try omega| eauto 1]).
repeat tri_exists_and_solver1.
unfold R_TCB_Status_P in *.
unfold RLH_RdyI_P, RHL_RdyI_P, RLH_TCB_Status_Wait_P, RHL_TCB_Status_Wait_P in *.
unfold RLH_Wait_P, RLH_WaitS_P, RLH_WaitQ_P, RLH_WaitMB_P, RLH_WaitMS_P,
RHL_Wait_P, RHL_WaitS_P, RHL_WaitQ_P, RHL_WaitMB_P, RHL_WaitMS_P
in *.
unfold WaitTCBblk in *.
unfold RdyTCBblk in *.
unfold V_OSTCBPrio, V_OSTCBX, V_OSTCBY, V_OSTCBBitX, V_OSTCBBitY, V_OSTCBStat, V_OSTCBEventPtr, V_OSTCBDly in *.
simpljoin.
repeat (erewrite nth_upd_neqeq; [idtac| try omega]).
splits; auto.
Qed.
Lemma tcblist_p_hold_for_add_tcb_lemma :
forall v l v'26 v'23 v'42 ,
0 <= Int.unsigned v'42 < 64 ->
array_type_vallist_match Int8u v'26 ->
length v'26 = ∘ OS_RDY_TBL_SIZE ->
TCBList_P v l v'26 v'23 ->
(~ exists id t m, get v'23 id = Some (v'42, t, m) )->
TCBList_P v l
(update_nth_val (Z.to_nat (Int.unsigned (v'42 >>ᵢ $ 3))) v'26
(val_inj
(or (nth_val' (Z.to_nat (Int.unsigned (v'42 >>ᵢ $ 3))) v'26)
(nth_val' (Z.to_nat (Int.unsigned (v'42&ᵢ$ 7))) OSMapVallist))))
v'23.
Proof.
introv HHH.
Require Import protect.
protect HHH.
intros HHH2 HHH3.
gen v.
gen v'23.
gen l.
induction l.
intros.
simpl.
simpl in H.
auto.
intros.
unfold1 TCBList_P.
unfold1 TCBList_P in H.
simpljoin.
repeat tri_exists_and_solver1.
Focus 2.
eapply IHl.
auto.
intro.
simpljoin.
eapply H0.
do 3 eexists.
unfold get,sig,join in *; simpl in *.
unfold get,sig,join in *; simpl in *.
eapply TcbMod.join_get_r.
eauto.
exact H.
auto.
Require Import OSQPostPure.
(* ** ac: Check prio_in_tbl_orself . *)
(* ** ac: SearchAbout TCBNode_P. *)
cut (exists grp, nth_val ∘ (Int.unsigned (v'42 >>ᵢ $ 3)) v'26 = Some (Vint32 grp) ).
intro.
simpljoin.
lets bbb: H.
unfold nat_of_Z in bbb.
(* ** ac: SearchAbout nth_val. *)
eapply new_inv.nth_val_nth_val'_some_eq in bbb.
rewrite bbb.
(* ** ac: Check TCBNode_P_rtbl_add. *)
cut ((nth_val' (Z.to_nat (Int.unsigned (v'42&ᵢ$ 7))) OSMapVallist) = Vint32 ($ 1<<ᵢ(v'42&ᵢ$ 7)) ).
intro.
rewrite H5.
simpl.
destruct x2.
destruct p.
eapply TCBNode_P_rtbl_add; eauto.
Focus 3.
unfolds in H3.
simpljoin.
auto.
Focus 2.
intro.
apply H0.
subst v'42.
do 3 eexists.
unfold get,sig,join in *; simpl in *.
unfold get,sig,join in *; simpl in *.
eapply TcbMod.join_get_l.
eauto.
eapply TcbMod.get_a_sig_a.
go.
unfolds in H3.
simpljoin.
unfolds in H7.
simpljoin.
rewrite H6 in H7.
inverts H7.
auto.
(* ** ac: SearchAbout OSMapVallist. *)
assert ((Int.unsigned (v'42&ᵢ$ 7)) <= 7).
clear -HHH.
unprotect HHH.
mauto.
clear -H5.
remember (v'42&ᵢ$ 7) .
mauto.
eapply new_rtbl.prio_set_rdy_in_tbl_lemma_1; auto.
Qed.
Lemma tcblist_p_hold_for_add_tcb :
forall tid v'9 v'10 v'26 v'23 v'42 v'34,
0 <= Int.unsigned v'42 < 64 ->
array_type_vallist_match Int8u v'26 ->
length v'26 = ∘ OS_RDY_TBL_SIZE ->
TCBList_P (Vptr tid) (v'9 :: v'10) v'26 v'23 ->
(~ exists id t m, get v'23 id = Some (v'42, t, m) )->
TCBList_P (Vptr tid) (update_nth_val 1 v'9 (v'34) :: v'10)
(update_nth_val (Z.to_nat (Int.unsigned (v'42 >>ᵢ $ 3))) v'26
(val_inj
(or (nth_val' (Z.to_nat (Int.unsigned (v'42 >>ᵢ $ 3))) v'26)
(nth_val' (Z.to_nat (Int.unsigned (v'42&ᵢ$ 7))) OSMapVallist))))
v'23.
Proof.
introv HHH.
protect HHH.
intros HHH2 HHH3.
intros.
eapply tcblist_p_hold_for_upd_1.
remember (v'9 :: v'10).
clear Heql.
remember (Vptr tid).
clear Heqv.
gen v.
gen v'23.
gen l.
induction l.
intros.
simpl.
simpl in H.
auto.
intros.
unfold1 TCBList_P.
unfold1 TCBList_P in H.
simpljoin.
repeat tri_exists_and_solver1.
Focus 2.
eapply IHl.
auto.
intro.
simpljoin.
eapply H0.
do 3 eexists.
unfold get,sig,join in *; simpl in *.
unfold get,sig,join in *; simpl in *.
eapply TcbMod.join_get_r.
eauto.
exact H.
auto.
Require Import OSQPostPure.
(* ** ac: Check prio_in_tbl_orself . *)
(* ** ac: SearchAbout TCBNode_P. *)
cut (exists grp, nth_val ∘ (Int.unsigned (v'42 >>ᵢ $ 3)) v'26 = Some (Vint32 grp) ).
intro.
simpljoin.
lets bbb: H.
unfold nat_of_Z in bbb.
(* ** ac: SearchAbout nth_val. *)
eapply new_inv.nth_val_nth_val'_some_eq in bbb.
rewrite bbb.
(* ** ac: Check TCBNode_P_rtbl_add. *)
cut ((nth_val' (Z.to_nat (Int.unsigned (v'42&ᵢ$ 7))) OSMapVallist) = Vint32 ($ 1<<ᵢ(v'42&ᵢ$ 7)) ).
intro.
rewrite H5.
simpl.
destruct x2.
destruct p.
eapply TCBNode_P_rtbl_add; eauto.
Focus 3.
unfolds in H3.
simpljoin.
auto.
Focus 2.
intro.
apply H0.
subst v'42.
do 3 eexists.
unfold get,sig,join in *; simpl in *.
unfold get,sig,join in *; simpl in *.
eapply TcbMod.join_get_l.
eauto.
eapply TcbMod.get_a_sig_a.
go.
unfolds in H3.
simpljoin.
unfolds in H7.
simpljoin.
rewrite H6 in H7.
inverts H7.
auto.
assert ((Int.unsigned (v'42&ᵢ$ 7)) <= 7).
clear -HHH.
unprotect HHH.
mauto.
clear -H5.
remember (v'42&ᵢ$ 7) .
mauto.
eapply new_rtbl.prio_set_rdy_in_tbl_lemma_1; auto.
Qed.
(* Lemma tcblist_p_hold_for_add_tcb' :
* forall v'26 v'42 v'34 v'39 v'30,
* (* TCBList_P v'30 nil v'26 v'22 -> *)
* new_tcb_node_p v'42 Vnull v'30 v'39 ->
* TCBList_P (Vptr v'34) ((v'39 :: nil) ++ nil)
* (update_nth_val (Z.to_nat (Int.unsigned (v'42 >>ᵢ $ 3))) v'26
* (val_inj
* (or (nth_val' (Z.to_nat (Int.unsigned (v'42 >>ᵢ $ 3))) v'26)
* (nth_val' (Z.to_nat (Int.unsigned (v'42&ᵢ$ 7))) OSMapVallist))))
* (sig v'34 (v'42, rdy, Vnull)).
* Admitted. *)
Lemma TCBList_P_nil_empty:
forall v'30 v'26 v'22,
TCBList_P v'30 nil v'26 v'22 ->
v'22 = empenv.
Proof.
intros.
simpl in H.
auto.
Qed.
Lemma rh_t_e_p_hold_for_add_tcb:
forall v'13 v'14 tid v'34 v'42 v'5,
RH_TCBList_ECBList_P v'13 v'14 tid ->
TcbJoin v'34 (v'42, rdy, Vnull) v'14 v'5 ->
RH_TCBList_ECBList_P v'13 v'5 tid.
Proof.
intros.
smartunfold3.
simpljoin.
splits.
{
smartunfold3.
simpljoin.
splits.
{
intros.
lets bb: H H5.
simpljoin.
do 3 eexists.
unfold get, sig, join in *; simpl in *.
go.
}
{
intros.
unfold get, sig, join in *; simpl in *.
eapply TcbMod.join_get_or in H5.
2: exact H0.
destruct H5.
assert (tid0 = v'34 \/ tid0 <> v'34).
tauto.
destruct H6.
subst.
rewrite TcbMod.get_a_sig_a in H5.
inverts H5.
go.
rewrite TcbMod.get_a_sig_a' in H5.
inverts H5.
go.
eapply H4.
eauto.
}
}
{
Local Ltac swapname H H' :=
let HH := fresh in
rename H into HH; rename H' into H; rename HH into H'.
swapname H H1.
smartunfold3.
simpljoin.
splits.
{
intros.
lets bb: H H5.
simpljoin.
do 3 eexists.
unfold get, sig, join in *; simpl in *.
go.
}
{
intros.
unfold get, sig, join in *; simpl in *.
eapply TcbMod.join_get_or in H5.
2: exact H0.
destruct H5.
assert (tid0 = v'34 \/ tid0 <> v'34).
tauto.
destruct H6.
subst.
rewrite TcbMod.get_a_sig_a in H5.
inverts H5.
go.
rewrite TcbMod.get_a_sig_a' in H5.
inverts H5.
go.
eapply H4.
eauto.
}
}
{
swapname H H2.
smartunfold3.
simpljoin.
splits.
{
intros.
lets bb: H H5.
simpljoin.
do 3 eexists.
unfold get, sig, join in *; simpl in *.
go.
}
{
intros.
unfold get, sig, join in *; simpl in *.
eapply TcbMod.join_get_or in H5.
2: exact H0.
destruct H5.
assert (tid0 = v'34 \/ tid0 <> v'34).
tauto.
destruct H6.
subst.
rewrite TcbMod.get_a_sig_a in H5.
inverts H5.
go.
rewrite TcbMod.get_a_sig_a' in H5.
inverts H5.
go.
eapply H4.
eauto.
}
}
{
swapname H H3.
smartunfold3.
simpljoin.
splits.
{
intros.
lets bb: H H6.
simpljoin.
do 3 eexists.
unfold get, sig, join in *; simpl in *.
go.
}
{
intros.
unfold get, sig, join in *; simpl in *.
eapply TcbMod.join_get_or in H6.
2: exact H0.
destruct H6.
assert (tid0 = v'34 \/ tid0 <> v'34).
tauto.
destruct H7.
subst.
rewrite TcbMod.get_a_sig_a in H6.
inverts H6.
go.
rewrite TcbMod.get_a_sig_a' in H6.
inverts H6.
go.
eapply H4.
eauto.
}
unfolds.
intros.
unfolds in H5.
lets bb: H5 H6.
simpljoin.
do 3 eexists.
unfold get, sig, join in *; simpl in *.
go.
}
Qed.
Lemma update_eq :
forall ls n c,
nth_val n ls= Some c ->
ls = update_nth_val n ls c.
Proof.
induction ls.
intros.
simpl in H.
inversion H.
induction n.
intros.
simpl in H.
simpl.
inverts H.
auto.
intros.
simpl.
assert (ls = update_nth_val n ls c).
apply IHls.
simpl in H.
auto.
rewrite <- H0.
auto.
Qed.
Lemma tcblist_p_hold_for_add_tcb'' :
forall v'26 v'42 v'34 v'39 v'30 vleft x v'22,
0 <= Int.unsigned v'42 < 64 ->
array_type_vallist_match Int8u v'26 ->
length v'26 = ∘ OS_RDY_TBL_SIZE ->
~ (exists id t m, get v'22 id = Some (v'42, t, m)) ->
TCBList_P v'30 vleft v'26 v'22 ->
new_tcb_node_p v'42 Vnull v'30 v'39 ->
join (sig v'34 (v'42, rdy, Vnull)) v'22 x ->
TCBList_P (Vptr v'34) ((v'39 :: nil) ++ vleft)
(update_nth_val (Z.to_nat (Int.unsigned (v'42 >>ᵢ $ 3))) v'26
(val_inj
(or (nth_val' (Z.to_nat (Int.unsigned (v'42 >>ᵢ $ 3))) v'26)
(nth_val' (Z.to_nat (Int.unsigned (v'42&ᵢ$ 7))) OSMapVallist))))
x.
Proof.
intros.
change ((v'39::nil) ++ vleft) with (v'39 :: vleft).
unfold1 TCBList_P.
repeat tri_exists_and_solver1.
Focus 3.
eapply tcblist_p_hold_for_add_tcb_lemma; eauto.
unfolds in H4.
simpljoin; auto.
(* ** ac: SearchAbout TCBNode_P. *)
assert ((nth_val' (Z.to_nat (Int.unsigned (v'42&ᵢ$ 7))) OSMapVallist) = Vint32 ($ 1<<ᵢ(v'42&ᵢ$ 7)) ) as HH1.
assert ((Int.unsigned (v'42&ᵢ$ 7)) <= 7).
clear -H.
mauto.
clear -H6.
remember (v'42&ᵢ$ 7) .
mauto.
assert (exists grp, nth_val ∘ (Int.unsigned (v'42 >>ᵢ $ 3)) v'26 = Some (Vint32 grp) ) as HH2.
eapply new_rtbl.prio_set_rdy_in_tbl_lemma_1; auto.
destruct HH2 as (group & HH2).
unfold nat_of_Z in HH2.
lets HH2': (nth_val_nth_val'_some_eq _ _ HH2).
rewrite HH1, HH2'.
unfolds.
unfolds in H4.
splits; simpljoin; auto.
unfolds.
repeat tri_exists_and_solver1.
Focus 3.
(* ** ac: SearchAbout R_TCB_Status_P. *)
unfolds.
splits.
{
unfolds.
intros.
unfolds in H18.
simpljoin.
rewrite H6 in H18.
inverts H18.
repeat tri_exists_and_solver1.
}
{
unfolds.
intros.
inverts H18.
repeat tri_exists_and_solver1.
unfolds.
splits.
auto.
(* ** ac: Check prio_in_tbl_orself. *)
simpl.
eapply prio_in_tbl_orself.
}
{
unfolds.
splits.
{
unfolds.
intros.
unfolds in H18.
simpljoin.
rewrite H6 in H18.
inverts H18.
lets bb: prio_notin_tbl_orself H17 HH2.
simpl in H20.
tryfalse.
}
{
unfolds.
intros.
unfolds in H18.
simpljoin.
rewrite H6 in H18.
inverts H18.
lets bb: prio_notin_tbl_orself H17 HH2.
simpl in H20.
tryfalse.
}
{
unfolds.
intros.
unfolds in H18.
simpljoin.
rewrite H6 in H18.
inverts H18.
lets bb: prio_notin_tbl_orself H17 HH2.
simpl in H20.
tryfalse.
}
{
unfolds.
intros.
unfolds in H18.
simpljoin.
rewrite H6 in H18.
inverts H18.
lets bb: prio_notin_tbl_orself H17 HH2.
simpl in H20.
tryfalse.
}
{
unfolds.
intros.
unfolds in H18.
simpljoin.
rewrite H6 in H18.
inverts H18.
lets bb: prio_notin_tbl_orself H17 HH2.
simpl in H20.
tryfalse.
}
}
{
unfolds.
splits.
{
unfolds.
intros.
inverts H18.
}
{
unfolds.
intros.
inverts H18.
}
{
unfolds.
intros.
inverts H18.
}
{
unfolds.
intros.
inverts H18.
}
{
unfolds.
intros.
inverts H18.
}
}
Unfocus.
rewrite HH1 in H12.
auto.
assert ((Int.unsigned (v'42>>ᵢ$ 3)) <= 7).
clear -H17.
mauto.
assert ((nth_val' (Z.to_nat (Int.unsigned (v'42>>ᵢ$ 3))) OSMapVallist) = Vint32 ($ 1<<ᵢ(v'42>>ᵢ$ 3)) ) as HH3.
clear -H18.
remember (v'42>>ᵢ$ 3) .
mauto.
rewrite HH3 in H10.
auto.
Qed.
Lemma mem_overlap_PV:
forall s p v0 v P,
s |= PV p @ STRUCT os_tcb ⋆ |-> v0 **
PV p @ STRUCT os_tcb ⋆ |-> v ** P ->
False.
Proof.
intros.
assert (p <> p).
(* ** ac: Check pv_false. *)
eapply pv_false.
3: eauto.
unfold array_struct.
intro.
destruct H0; simpljoin; tryfalse.
destruct H0; simpljoin; tryfalse.
unfold array_struct.
intro.
destruct H0; simpljoin; tryfalse.
destruct H0; simpljoin; tryfalse.
apply H0; auto.
Qed.
Lemma mem_overlap_struct:
forall s v1 v2 p P,
s |= Astruct p OS_TCB_flag v1 ** Astruct p OS_TCB_flag v2 ** P ->
False.
Proof.
intros.
unfold Astruct in H.
unfold OS_TCB_flag in H.
unfold Astruct' in H.
destruct v1.
sep destroy H.
simpl in H0; tryfalse.
destruct p.
destruct v2.
sep destroy H.
simpl in H1; tryfalse.
sep normal in H.
sep lift 3%nat in H.
Set Printing Depth 999.
(* ** ac: Show. *)
remember (match v1 with
| nil => Afalse
| v :: vl' =>
PV (b, i +ᵢ $ Z.of_nat (typelen STRUCT os_tcb ⋆)) @
STRUCT os_tcb ⋆ |-> v **
match vl' with
| nil => Afalse
| v0 :: vl'0 =>
PV (b,
(i +ᵢ $ Z.of_nat (typelen STRUCT os_tcb ⋆)) +ᵢ
$ Z.of_nat (typelen STRUCT os_tcb ⋆)) @
OS_EVENT ∗ |-> v0 **
match vl'0 with
| nil => Afalse
| v1 :: vl'1 =>
PV (b,
((i +ᵢ $ Z.of_nat (typelen STRUCT os_tcb ⋆)) +ᵢ
$ Z.of_nat (typelen STRUCT os_tcb ⋆)) +ᵢ
$ Z.of_nat (typelen OS_EVENT ∗)) @
(Void) ∗ |-> v1 **
match vl'1 with
| nil => Afalse
| v2 :: vl'2 =>
PV (b,
(((i +ᵢ $ Z.of_nat (typelen STRUCT os_tcb ⋆))
+ᵢ $ Z.of_nat (typelen STRUCT os_tcb ⋆)) +ᵢ
$ Z.of_nat (typelen OS_EVENT ∗)) +ᵢ
$ Z.of_nat (typelen (Void) ∗)) @
Int16u |-> v2 **
match vl'2 with
| nil => Afalse
| v3 :: vl'3 =>
PV (b,
((((i +ᵢ
$ Z.of_nat (typelen STRUCT os_tcb ⋆)) +ᵢ
$ Z.of_nat (typelen STRUCT os_tcb ⋆)) +ᵢ
$ Z.of_nat (typelen OS_EVENT ∗)) +ᵢ
$ Z.of_nat (typelen (Void) ∗)) +ᵢ
$ Z.of_nat (typelen Int16u)) @
Int8u |-> v3 **
match vl'3 with
| nil => Afalse
| v4 :: vl'4 =>
PV (b,
(((((i +ᵢ
$ Z.of_nat (typelen STRUCT os_tcb ⋆))
+ᵢ
$ Z.of_nat (typelen STRUCT os_tcb ⋆))
+ᵢ $ Z.of_nat (typelen OS_EVENT ∗))
+ᵢ $ Z.of_nat (typelen (Void) ∗)) +ᵢ
$ Z.of_nat (typelen Int16u)) +ᵢ
$ Z.of_nat (typelen Int8u)) @
Int8u |-> v4 **
match vl'4 with
| nil => Afalse
| v5 :: vl'5 =>
PV (b,
((((((i +ᵢ
$
Z.of_nat
(typelen STRUCT os_tcb ⋆))
+ᵢ
$
Z.of_nat
(typelen STRUCT os_tcb ⋆)) +ᵢ
$ Z.of_nat (typelen OS_EVENT ∗))
+ᵢ $ Z.of_nat (typelen (Void) ∗))
+ᵢ $ Z.of_nat (typelen Int16u))
+ᵢ $ Z.of_nat (typelen Int8u)) +ᵢ
$ Z.of_nat (typelen Int8u)) @
Int8u |-> v5 **
match vl'5 with
| nil => Afalse
| v6 :: vl'6 =>
PV (b,
(((((((i +ᵢ
$
Z.of_nat
(typelen STRUCT os_tcb ⋆))
+ᵢ
$
Z.of_nat
(typelen STRUCT os_tcb ⋆))
+ᵢ
$
Z.of_nat
(typelen OS_EVENT ∗)) +ᵢ
$ Z.of_nat (typelen (Void) ∗))
+ᵢ
$ Z.of_nat (typelen Int16u))
+ᵢ
$ Z.of_nat (typelen Int8u)) +ᵢ
$ Z.of_nat (typelen Int8u)) +ᵢ
$ Z.of_nat (typelen Int8u)) @
Int8u |-> v6 **
match vl'6 with
| nil => Afalse
| v7 :: vl'7 =>
PV (b,
((((((((i +ᵢ
$
Z.of_nat
(typelen STRUCT os_tcb ⋆))
+ᵢ
$
Z.of_nat
(typelen STRUCT os_tcb ⋆))
+ᵢ
$
Z.of_nat
(typelen OS_EVENT ∗)) +ᵢ
$
Z.of_nat (typelen (Void) ∗))
+ᵢ
$ Z.of_nat (typelen Int16u))
+ᵢ
$ Z.of_nat (typelen Int8u))
+ᵢ
$ Z.of_nat (typelen Int8u))
+ᵢ
$ Z.of_nat (typelen Int8u))
+ᵢ
$ Z.of_nat (typelen Int8u)) @
Int8u |-> v7 **
match vl'7 with
| nil => Afalse
| v8 :: vl'8 =>
PV
(b,
(((((((((i +ᵢ
$
Z.of_nat
(typelen STRUCT os_tcb ⋆))
+ᵢ
$
Z.of_nat
(typelen STRUCT os_tcb ⋆))
+ᵢ
$
Z.of_nat
(typelen OS_EVENT ∗)) +ᵢ
$
Z.of_nat (typelen (Void) ∗))
+ᵢ
$ Z.of_nat (typelen Int16u))
+ᵢ
$ Z.of_nat (typelen Int8u))
+ᵢ
$ Z.of_nat (typelen Int8u))
+ᵢ
$ Z.of_nat (typelen Int8u))
+ᵢ
$ Z.of_nat (typelen Int8u))
+ᵢ
$ Z.of_nat (typelen Int8u)) @
Int8u |-> v8 **
match vl'8 with
| nil => Aemp
| _ :: _ => Afalse
end
end
end
end
end
end
end
end
end
end
end).
remember (
match v2 with
| nil => Afalse
| v :: vl' =>
PV (b, i +ᵢ $ Z.of_nat (typelen STRUCT os_tcb ⋆)) @
STRUCT os_tcb ⋆ |-> v **
match vl' with
| nil => Afalse
| v0 :: vl'0 =>
PV (b,
(i +ᵢ $ Z.of_nat (typelen STRUCT os_tcb ⋆)) +ᵢ
$ Z.of_nat (typelen STRUCT os_tcb ⋆)) @
OS_EVENT ∗ |-> v0 **
match vl'0 with
| nil => Afalse
| v1 :: vl'1 =>
PV (b,
((i +ᵢ $ Z.of_nat (typelen STRUCT os_tcb ⋆)) +ᵢ
$ Z.of_nat (typelen STRUCT os_tcb ⋆)) +ᵢ
$ Z.of_nat (typelen OS_EVENT ∗)) @
(Void) ∗ |-> v1 **
match vl'1 with
| nil => Afalse
| v2 :: vl'2 =>
PV (b,
(((i +ᵢ $ Z.of_nat (typelen STRUCT os_tcb ⋆))
+ᵢ $ Z.of_nat (typelen STRUCT os_tcb ⋆)) +ᵢ
$ Z.of_nat (typelen OS_EVENT ∗)) +ᵢ
$ Z.of_nat (typelen (Void) ∗)) @
Int16u |-> v2 **
match vl'2 with
| nil => Afalse
| v3 :: vl'3 =>
PV (b,
((((i +ᵢ
$ Z.of_nat (typelen STRUCT os_tcb ⋆)) +ᵢ
$ Z.of_nat (typelen STRUCT os_tcb ⋆)) +ᵢ
$ Z.of_nat (typelen OS_EVENT ∗)) +ᵢ
$ Z.of_nat (typelen (Void) ∗)) +ᵢ
$ Z.of_nat (typelen Int16u)) @
Int8u |-> v3 **
match vl'3 with
| nil => Afalse
| v4 :: vl'4 =>
PV (b,
(((((i +ᵢ
$ Z.of_nat (typelen STRUCT os_tcb ⋆))
+ᵢ
$ Z.of_nat (typelen STRUCT os_tcb ⋆))
+ᵢ $ Z.of_nat (typelen OS_EVENT ∗))
+ᵢ $ Z.of_nat (typelen (Void) ∗)) +ᵢ
$ Z.of_nat (typelen Int16u)) +ᵢ
$ Z.of_nat (typelen Int8u)) @
Int8u |-> v4 **
match vl'4 with
| nil => Afalse
| v5 :: vl'5 =>
PV (b,
((((((i +ᵢ
$
Z.of_nat
(typelen STRUCT os_tcb ⋆))
+ᵢ
$
Z.of_nat
(typelen STRUCT os_tcb ⋆)) +ᵢ
$ Z.of_nat (typelen OS_EVENT ∗))
+ᵢ $ Z.of_nat (typelen (Void) ∗))
+ᵢ $ Z.of_nat (typelen Int16u))
+ᵢ $ Z.of_nat (typelen Int8u)) +ᵢ
$ Z.of_nat (typelen Int8u)) @
Int8u |-> v5 **
match vl'5 with
| nil => Afalse
| v6 :: vl'6 =>
PV (b,
(((((((i +ᵢ
$
Z.of_nat
(typelen STRUCT os_tcb ⋆))
+ᵢ
$
Z.of_nat
(typelen STRUCT os_tcb ⋆))
+ᵢ
$
Z.of_nat
(typelen OS_EVENT ∗)) +ᵢ
$ Z.of_nat (typelen (Void) ∗))
+ᵢ
$ Z.of_nat (typelen Int16u))
+ᵢ
$ Z.of_nat (typelen Int8u)) +ᵢ
$ Z.of_nat (typelen Int8u)) +ᵢ
$ Z.of_nat (typelen Int8u)) @
Int8u |-> v6 **
match vl'6 with
| nil => Afalse
| v7 :: vl'7 =>
PV (b,
((((((((i +ᵢ
$
Z.of_nat
(typelen STRUCT os_tcb ⋆))
+ᵢ
$
Z.of_nat
(typelen STRUCT os_tcb ⋆))
+ᵢ
$
Z.of_nat
(typelen OS_EVENT ∗)) +ᵢ
$
Z.of_nat (typelen (Void) ∗))
+ᵢ
$ Z.of_nat (typelen Int16u))
+ᵢ
$ Z.of_nat (typelen Int8u))
+ᵢ
$ Z.of_nat (typelen Int8u))
+ᵢ
$ Z.of_nat (typelen Int8u))
+ᵢ
$ Z.of_nat (typelen Int8u)) @
Int8u |-> v7 **
match vl'7 with
| nil => Afalse
| v8 :: vl'8 =>
PV
(b,
(((((((((i +ᵢ
$
Z.of_nat
(typelen STRUCT os_tcb ⋆))
+ᵢ
$
Z.of_nat
(typelen STRUCT os_tcb ⋆))
+ᵢ
$
Z.of_nat
(typelen OS_EVENT ∗)) +ᵢ
$
Z.of_nat (typelen (Void) ∗))
+ᵢ
$ Z.of_nat (typelen Int16u))
+ᵢ
$ Z.of_nat (typelen Int8u))
+ᵢ
$ Z.of_nat (typelen Int8u))
+ᵢ
$ Z.of_nat (typelen Int8u))
+ᵢ
$ Z.of_nat (typelen Int8u))
+ᵢ
$ Z.of_nat (typelen Int8u)) @
Int8u |-> v8 **
match vl'8 with
| nil => Aemp
| _ :: _ => Afalse
end
end
end
end
end
end
end
end
end
end
end ).
clear -H.
eapply mem_overlap_PV.
instantiate (6:=s).
sep cancel 1%nat 1%nat.
sep cancel 1%nat 1%nat.
eauto.
Qed.
Lemma sometcblist_lemma:
forall v v'22 s ptr vv a0 a2 a3 v'26 P a1,
s|= node (Vptr ptr) vv OS_TCB_flag ** tcbdllseg a0 a1 a2 a3 v ** P ->
TCBList_P a0 v v'26 v'22 ->
~ TcbMod.indom v'22 ptr.
Proof.
induction v.
intros.
simpl in H0.
subst.
intro.
unfolds in H0.
inverts H0.
inverts H1.
intros.
lets back: H0.
unfold1 TCBList_P in H0.
simpljoin.
unfold tcbdllseg in H.
unfold1 dllseg in H.
sep normal in H.
sep destruct H.
sep split in H.
assert (~ TcbMod.indom x1 ptr).
eapply IHv.
instantiate (7 := s).
sep cancel 3%nat 1%nat.
unfold tcbdllseg.
sep cancel 2%nat 1%nat.
eauto.
rewrite H1 in H0.
inverts H0.
eauto.
assert ( x <> ptr).
intro.
sep lift 3%nat in H.
unfold node in H.
sep normal in H.
sep destruct H.
sep split in H.
subst x.
simpljoin.
inverts H10.
inverts H8.
eapply mem_overlap_struct.
eauto.
intro.
unfolds in H9.
simpljoin.
unfolds in H2.
eapply TcbMod.join_get_or in H9.
2: exact H2.
destruct H9.
unfold sig in H9; simpl in H9.
rewrite TcbMod.get_a_sig_a' in H9.
inverts H9.
go.
apply H7.
eexists.
eauto.
Qed.
Lemma not_in_priotbl_no_priotcb:
forall v'28 v'14 v'43 v'42,
R_PrioTbl_P v'28 v'14 v'43 ->
Int.unsigned v'42 < 64 ->
nth_val' (Z.to_nat (Int.unsigned v'42)) v'28 = Vnull ->
~ (exists id t m, get v'14 id = Some (v'42, t, m)).
Proof.
intros.
unfolds in H.
simpljoin.
intro.
simpljoin.
lets bb: H2 H4.
simpljoin.
apply nv'2nv in H1.
unfold nat_of_Z in H5.
rewrite H1 in H5.
inverts H5.
intro; tryfalse.
Qed.
|
After I restart MCS and load a saved Session all trigger configuration has been lost. Is this normal?
No, please report within the bug report forum.
I reported this a few weeks ago to the Bugs Report thread and have not gotten a response. Could you please look into this? |
(* I have not worked with any partners. *)
(** * Basics: Functional Programming in Coq *)
(* REMINDER:
#####################################################
### PLEASE DO NOT DISTRIBUTE SOLUTIONS PUBLICLY ###
#####################################################
(See the [Preface] for why.)
*)
(* ################################################################# *)
(** * Introduction *)
(** The functional programming style is founded on simple, everyday
mathematical intuition: If a procedure or method has no side
effects, then (ignoring efficiency) all we need to understand
about it is how it maps inputs to outputs -- that is, we can think
of it as just a concrete method for computing a mathematical
function. This is one sense of the word "functional" in
"functional programming." The direct connection between programs
and simple mathematical objects supports both formal correctness
proofs and sound informal reasoning about program behavior.
The other sense in which functional programming is "functional" is
that it emphasizes the use of functions (or methods) as
_first-class_ values -- i.e., values that can be passed as
arguments to other functions, returned as results, included in
data structures, etc. The recognition that functions can be
treated as data gives rise to a host of useful and powerful
programming idioms.
Other common features of functional languages include _algebraic
data types_ and _pattern matching_, which make it easy to
construct and manipulate rich data structures, and sophisticated
_polymorphic type systems_ supporting abstraction and code reuse.
Coq offers all of these features.
The first half of this chapter introduces the most essential
elements of Coq's functional programming language, called
_Gallina_. The second half introduces some basic _tactics_ that
can be used to prove properties of Coq programs. *)
(* ################################################################# *)
(** * Enumerated Types *)
(** One notable aspect of Coq is that its set of built-in
features is _extremely_ small. For example, instead of providing
the usual palette of atomic data types (booleans, integers,
strings, etc.), Coq offers a powerful mechanism for defining new
data types from scratch, with all these familiar types as
instances.
Naturally, the Coq distribution comes preloaded with an extensive
standard library providing definitions of booleans, numbers, and
many common data structures like lists and hash tables. But there
is nothing magic or primitive about these library definitions. To
illustrate this, we will explicitly recapitulate all the
definitions we need in this course, rather than just getting them
implicitly from the library. *)
(* ================================================================= *)
(** ** Days of the Week *)
(** To see how this definition mechanism works, let's start with
a very simple example. The following declaration tells Coq that
we are defining a new set of data values -- a _type_. *)
Inductive day : Type :=
| monday : day
| tuesday : day
| wednesday : day
| thursday : day
| friday : day
| saturday : day
| sunday : day.
(** The type is called [day], and its members are [monday],
[tuesday], etc. The second and following lines of the definition
can be read "[monday] is a [day], [tuesday] is a [day], etc."
Having defined [day], we can write functions that operate on
days. *)
Definition next_weekday (d:day) : day :=
match d with
| monday => tuesday
| tuesday => wednesday
| wednesday => thursday
| thursday => friday
| friday => monday
| saturday => monday
| sunday => monday
end.
(** One thing to note is that the argument and return types of
this function are explicitly declared. Like most functional
programming languages, Coq can often figure out these types for
itself when they are not given explicitly -- i.e., it can do _type
inference_ -- but we'll generally include them to make reading
easier. *)
(** Having defined a function, we should check that it works on
some examples. There are actually three different ways to do this
in Coq. First, we can use the command [Compute] to evaluate a
compound expression involving [next_weekday]. *)
Compute (next_weekday friday).
(* ==> monday : day *)
Compute (next_weekday (next_weekday saturday)).
(* ==> tuesday : day *)
(** (We show Coq's responses in comments, but, if you have a
computer handy, this would be an excellent moment to fire up the
Coq interpreter under your favorite IDE -- either CoqIde or Proof
General -- and try this for yourself. Load this file, [Basics.v],
from the book's Coq sources, find the above example, submit it to
Coq, and observe the result.)
Second, we can record what we _expect_ the result to be in the
form of a Coq example: *)
Example test_next_weekday:
(next_weekday (next_weekday saturday)) = tuesday.
(** This declaration does two things: it makes an
assertion (that the second weekday after [saturday] is [tuesday]),
and it gives the assertion a name that can be used to refer to it
later. Having made the assertion, we can also ask Coq to verify
it, like this: *)
Proof. simpl. reflexivity. Qed.
(** The details are not important for now (we'll come back to
them in a bit), but essentially this can be read as "The assertion
we've just made can be proved by observing that both sides of the
equality evaluate to the same thing, after some simplification."
Third, we can ask Coq to _extract_, from our [Definition], a
program in some other, more conventional, programming
language (OCaml, Scheme, or Haskell) with a high-performance
compiler. This facility is very interesting, since it gives us a
way to go from proved-correct algorithms written in Gallina to
efficient machine code. (Of course, we are trusting the
correctness of the OCaml/Haskell/Scheme compiler, and of Coq's
extraction facility itself, but this is still a big step forward
from the way most software is developed today.) Indeed, this is
one of the main uses for which Coq was developed. We'll come back
to this topic in later chapters. *)
(* ================================================================= *)
(** ** Homework Submission Guidelines *)
(** If you are using Software Foundations in a course, your instructor
may use automatic scripts to help grade your homework assignments.
In order for these scripts to work correctly (so that you get full
credit for your work!), please be careful to follow these rules:
- The grading scripts work by extracting marked regions of the
.v files that you submit. It is therefore important that you
do not alter the "markup" that delimits exercises: the
Exercise header, the name of the exercise, the "empty square
bracket" marker at the end, etc. Please leave this markup
exactly as you find it.
- Do not delete exercises. If you skip an exercise (e.g.,
because it is marked Optional, or because you can't solve it),
it is OK to leave a partial proof in your .v file, but in this
case please make sure it ends with [Admitted] (not, for
example [Abort]). *)
(* ================================================================= *)
(** ** Booleans *)
(** In a similar way, we can define the standard type [bool] of
booleans, with members [true] and [false]. *)
Inductive bool : Type :=
| true : bool
| false : bool.
(** Although we are rolling our own booleans here for the sake
of building up everything from scratch, Coq does, of course,
provide a default implementation of the booleans, together with a
multitude of useful functions and lemmas. (Take a look at
[Coq.Init.Datatypes] in the Coq library documentation if you're
interested.) Whenever possible, we'll name our own definitions
and theorems so that they exactly coincide with the ones in the
standard library.
Functions over booleans can be defined in the same way as
above: *)
Definition negb (b:bool) : bool :=
match b with
| true => false
| false => true
end.
Definition andb (b1:bool) (b2:bool) : bool :=
match b1 with
| true => b2
| false => false
end.
Definition orb (b1:bool) (b2:bool) : bool :=
match b1 with
| true => true
| false => b2
end.
(** The last two of these illustrate Coq's syntax for
multi-argument function definitions. The corresponding
multi-argument application syntax is illustrated by the following
"unit tests," which constitute a complete specification -- a truth
table -- for the [orb] function: *)
Example test_orb1: (orb true false) = true.
Proof. simpl. reflexivity. Qed.
Example test_orb2: (orb false false) = false.
Proof. simpl. reflexivity. Qed.
Example test_orb3: (orb false true) = true.
Proof. simpl. reflexivity. Qed.
Example test_orb4: (orb true true) = true.
Proof. simpl. reflexivity. Qed.
(** We can also introduce some familiar syntax for the boolean
operations we have just defined. The [Infix] command defines a new
symbolic notation for an existing definition. *)
Infix "&&" := andb.
Infix "||" := orb.
Example test_orb5: false || false || true = true.
Proof. simpl. reflexivity. Qed.
(** _A note on notation_: In [.v] files, we use square brackets
to delimit fragments of Coq code within comments; this convention,
also used by the [coqdoc] documentation tool, keeps them visually
separate from the surrounding text. In the html version of the
files, these pieces of text appear in a [different font].
The command [Admitted] can be used as a placeholder for an
incomplete proof. We'll use it in exercises, to indicate the
parts that we're leaving for you -- i.e., your job is to replace
[Admitted]s with real proofs. *)
(** **** Exercise: 1 star (nandb) *)
(** Remove "[Admitted.]" and complete the definition of the following
function; then make sure that the [Example] assertions below can
each be verified by Coq. (Remove "[Admitted.]" and fill in each
proof, following the model of the [orb] tests above.) The function
should return [true] if either or both of its inputs are
[false]. *)
Definition nandb (b1:bool) (b2:bool) : bool :=
(negb (andb b1 b2)).
Example test_nandb1: (nandb true false) = true.
Proof. simpl. reflexivity. Qed.
Example test_nandb2: (nandb false false) = true.
Proof. simpl. reflexivity. Qed.
Example test_nandb3: (nandb false true) = true.
Proof. simpl. reflexivity. Qed.
Example test_nandb4: (nandb true true) = false.
Proof. simpl. reflexivity. Qed.
(** [] *)
(** **** Exercise: 1 star (andb3) *)
(** Do the same for the [andb3] function below. This function should
return [true] when all of its inputs are [true], and [false]
otherwise. *)
Definition andb3 (b1:bool) (b2:bool) (b3:bool) : bool :=
match b1 with
| true => (andb b2 b3)
| false => false
end.
Example test_andb31: (andb3 true true true) = true.
Proof. simpl. reflexivity. Qed.
Example test_andb32: (andb3 false true true) = false.
Proof. simpl. reflexivity. Qed.
Example test_andb33: (andb3 true false true) = false.
Proof. simpl. reflexivity. Qed.
Example test_andb34: (andb3 true true false) = false.
Proof. simpl. reflexivity. Qed.
(** [] *)
(* ================================================================= *)
(** ** Function Types *)
(** Every expression in Coq has a type, describing what sort of
thing it computes. The [Check] command asks Coq to print the type
of an expression. *)
Check true.
(* ===> true : bool *)
Check (negb true).
(* ===> negb true : bool *)
(** Functions like [negb] itself are also data values, just like
[true] and [false]. Their types are called _function types_, and
they are written with arrows. *)
Check negb.
(* ===> negb : bool -> bool *)
(** The type of [negb], written [bool -> bool] and pronounced
"[bool] arrow [bool]," can be read, "Given an input of type
[bool], this function produces an output of type [bool]."
Similarly, the type of [andb], written [bool -> bool -> bool], can
be read, "Given two inputs, both of type [bool], this function
produces an output of type [bool]." *)
(* ================================================================= *)
(** ** Modules *)
(** Coq provides a _module system_, to aid in organizing large
developments. In this course we won't need most of its features,
but one is useful: If we enclose a collection of declarations
between [Module X] and [End X] markers, then, in the remainder of
the file after the [End], these definitions are referred to by
names like [X.foo] instead of just [foo]. We will use this
feature to introduce the definition of the type [nat] in an inner
module so that it does not interfere with the one from the
standard library (which we want to use in the rest because it
comes with a tiny bit of convenient special notation). *)
Module NatPlayground.
(* ================================================================= *)
(** ** Numbers *)
(** The types we have defined so far are examples of "enumerated
types": their definitions explicitly enumerate a finite set of
elements. A more interesting way of defining a type is to give a
collection of _inductive rules_ describing its elements. For
example, we can define (a unary representation of) the natural
numbers as follows: *)
Inductive nat : Type :=
| O : nat
| S : nat -> nat.
(** The clauses of this definition can be read:
- [O] is a natural number (note that this is the letter "[O],"
not the numeral "[0]").
- [S] can be put in front of a natural number to yield another
one -- if [n] is a natural number, then [S n] is too. *)
(** Let's look at this in a little more detail.
Every inductively defined set ([day], [nat], [bool], etc.) is
actually a set of _expressions_ built from _constructors_
like [O], [S], [true], [false], [monday], etc. The definition of
[nat] says how expressions in the set [nat] can be built:
- [O] and [S] are constructors;
- the expression [O] belongs to the set [nat];
- if [n] is an expression belonging to the set [nat], then [S n]
is also an expression belonging to the set [nat]; and
- expressions formed in these two ways are the only ones belonging
to the set [nat]. *)
(** The same rules apply for our definitions of [day] and
[bool]. (The annotations we used for their constructors are
analogous to the one for the [O] constructor, indicating that they
don't take any arguments.)
The above conditions are the precise force of the [Inductive]
declaration. They imply that the expression [O], the expression
[S O], the expression [S (S O)], the expression [S (S (S O))], and
so on all belong to the set [nat], while other expressions built
from data constructors, like [true], [andb true false], [S (S
false)], and [O (O (O S))] do not.
A critical point here is that what we've done so far is just to
define a _representation_ of numbers: a way of writing them down.
The names [O] and [S] are arbitrary, and at this point they have
no special meaning -- they are just two different marks that we
can use to write down numbers (together with a rule that says any
[nat] will be written as some string of [S] marks followed by an
[O]). If we like, we can write essentially the same definition
this way: *)
Inductive nat' : Type :=
| stop : nat'
| tick : nat' -> nat'.
(** The _interpretation_ of these marks comes from how we use them to
compute. *)
(** We can do this by writing functions that pattern match on
representations of natural numbers just as we did above with
booleans and days -- for example, here is the predecessor
function: *)
Definition pred (n : nat) : nat :=
match n with
| O => O
| S n' => n'
end.
(** The second branch can be read: "if [n] has the form [S n']
for some [n'], then return [n']." *)
End NatPlayground.
Definition minustwo (n : nat) : nat :=
match n with
| O => O
| S O => O
| S (S n') => n'
end.
(** Because natural numbers are such a pervasive form of data,
Coq provides a tiny bit of built-in magic for parsing and printing
them: ordinary arabic numerals can be used as an alternative to
the "unary" notation defined by the constructors [S] and [O]. Coq
prints numbers in arabic form by default: *)
Check (S (S (S (S O)))).
(* ===> 4 : nat *)
Compute (minustwo 4).
(* ===> 2 : nat *)
(** The constructor [S] has the type [nat -> nat], just like the
functions [minustwo] and [pred]: *)
Check S.
Check pred.
Check minustwo.
(** These are all things that can be applied to a number to yield a
number. However, there is a fundamental difference between the
first one and the other two: functions like [pred] and [minustwo]
come with _computation rules_ -- e.g., the definition of [pred]
says that [pred 2] can be simplified to [1] -- while the
definition of [S] has no such behavior attached. Although it is
like a function in the sense that it can be applied to an
argument, it does not _do_ anything at all! It is just a way of
writing down numbers. (Think about standard arabic numerals: the
numeral [1] is not a computation; it's a piece of data. When we
write [111] to mean the number one hundred and eleven, we are
using [1], three times, to write down a concrete representation of
a number.)
For most function definitions over numbers, just pattern matching
is not enough: we also need recursion. For example, to check that
a number [n] is even, we may need to recursively check whether
[n-2] is even. To write such functions, we use the keyword
[Fixpoint]. *)
Fixpoint evenb (n:nat) : bool :=
match n with
| O => true
| S O => false
| S (S n') => evenb n'
end.
(** We can define [oddb] by a similar [Fixpoint] declaration, but here
is a simpler definition: *)
Definition oddb (n:nat) : bool := negb (evenb n).
Example test_oddb1: oddb 1 = true.
Proof. simpl. reflexivity. Qed.
Example test_oddb2: oddb 4 = false.
Proof. simpl. reflexivity. Qed.
(** (You will notice if you step through these proofs that
[simpl] actually has no effect on the goal -- all of the work is
done by [reflexivity]. We'll see more about why that is shortly.)
Naturally, we can also define multi-argument functions by
recursion. *)
Module NatPlayground2.
Fixpoint plus (n : nat) (m : nat) : nat :=
match n with
| O => m
| S n' => S (plus n' m)
end.
(** Adding three to two now gives us five, as we'd expect. *)
Compute (plus 3 2).
(** The simplification that Coq performs to reach this conclusion can
be visualized as follows: *)
(* [plus (S (S (S O))) (S (S O))]
==> [S (plus (S (S O)) (S (S O)))]
by the second clause of the [match]
==> [S (S (plus (S O) (S (S O))))]
by the second clause of the [match]
==> [S (S (S (plus O (S (S O)))))]
by the second clause of the [match]
==> [S (S (S (S (S O))))]
by the first clause of the [match]
*)
(** As a notational convenience, if two or more arguments have
the same type, they can be written together. In the following
definition, [(n m : nat)] means just the same as if we had written
[(n : nat) (m : nat)]. *)
Fixpoint mult (n m : nat) : nat :=
match n with
| O => O
| S n' => plus m (mult n' m)
end.
Example test_mult1: (mult 3 3) = 9.
Proof. simpl. reflexivity. Qed.
(** You can match two expressions at once by putting a comma
between them: *)
Fixpoint minus (n m:nat) : nat :=
match (n, m) with
| (O , _) => O
| (S _ , O) => n
| (S n', S m') => minus n' m'
end.
(** The _ in the first line is a _wildcard pattern_. Writing _ in a
pattern is the same as writing some variable that doesn't get used
on the right-hand side. This avoids the need to invent a variable
name. *)
End NatPlayground2.
Fixpoint exp (base power : nat) : nat :=
match power with
| O => S O
| S p => mult base (exp base p)
end.
(** **** Exercise: 1 star (factorial) *)
(** Recall the standard mathematical factorial function:
factorial(0) = 1
factorial(n) = n * factorial(n-1) (if n>0)
Translate this into Coq. *)
Fixpoint factorial (n:nat) : nat :=
match n with
| O => S O
| S p => mult n (factorial p)
end.
Example test_factorial1: (factorial 3) = 6.
Proof. simpl. reflexivity. Qed.
Example test_factorial2: (factorial 5) = (mult 10 12).
Proof. simpl. reflexivity. Qed.
(** [] *)
(** We can make numerical expressions a little easier to read and
write by introducing _notations_ for addition, multiplication, and
subtraction. *)
Notation "x + y" := (plus x y)
(at level 50, left associativity)
: nat_scope.
Notation "x - y" := (minus x y)
(at level 50, left associativity)
: nat_scope.
Notation "x * y" := (mult x y)
(at level 40, left associativity)
: nat_scope.
Check ((0 + 1) + 1).
(** (The [level], [associativity], and [nat_scope] annotations
control how these notations are treated by Coq's parser. The
details are not important for our purposes, but interested readers
can refer to the optional "More on Notation" section at the end of
this chapter.)
Note that these do not change the definitions we've already made:
they are simply instructions to the Coq parser to accept [x + y]
in place of [plus x y] and, conversely, to the Coq pretty-printer
to display [plus x y] as [x + y]. *)
(** When we say that Coq comes with almost nothing built-in, we really
mean it: even equality testing for numbers is a user-defined
operation! We now define a function [beq_nat], which tests
[nat]ural numbers for [eq]uality, yielding a [b]oolean. Note the
use of nested [match]es (we could also have used a simultaneous
match, as we did in [minus].) *)
Fixpoint beq_nat (n m : nat) : bool :=
match n with
| O => match m with
| O => true
| S m' => false
end
| S n' => match m with
| O => false
| S m' => beq_nat n' m'
end
end.
(** The [leb] function tests whether its first argument is less than or
equal to its second argument, yielding a boolean. *)
Fixpoint leb (n m : nat) : bool :=
match n with
| O => true
| S n' =>
match m with
| O => false
| S m' => leb n' m'
end
end.
Example test_leb1: (leb 2 2) = true.
Proof. simpl. reflexivity. Qed.
Example test_leb2: (leb 2 4) = true.
Proof. simpl. reflexivity. Qed.
Example test_leb3: (leb 4 2) = false.
Proof. simpl. reflexivity. Qed.
(** **** Exercise: 1 star (blt_nat) *)
(** The [blt_nat] function tests [nat]ural numbers for [l]ess-[t]han,
yielding a [b]oolean. Instead of making up a new [Fixpoint] for
this one, define it in terms of a previously defined function. *)
Definition blt_nat (n m : nat) : bool :=
match (leb n m) with
| true => match (beq_nat n m) with
| true => false
| false => true
end
| false => false
end.
Example test_blt_nat1: (blt_nat 2 2) = false.
Proof. simpl. reflexivity. Qed.
Example test_blt_nat2: (blt_nat 2 4) = true.
Proof. simpl. reflexivity. Qed.
Example test_blt_nat3: (blt_nat 4 2) = false.
Proof. simpl. reflexivity. Qed.
(** [] *)
(* ################################################################# *)
(** * Proof by Simplification *)
(** Now that we've defined a few datatypes and functions, let's
turn to stating and proving properties of their behavior.
Actually, we've already started doing this: each [Example] in the
previous sections makes a precise claim about the behavior of some
function on some particular inputs. The proofs of these claims
were always the same: use [simpl] to simplify both sides of the
equation, then use [reflexivity] to check that both sides contain
identical values.
The same sort of "proof by simplification" can be used to prove
more interesting properties as well. For example, the fact that
[0] is a "neutral element" for [+] on the left can be proved just
by observing that [0 + n] reduces to [n] no matter what [n] is, a
fact that can be read directly off the definition of [plus].*)
Theorem plus_O_n : forall n : nat, 0 + n = n.
Proof.
intros n. simpl. reflexivity. Qed.
(** (You may notice that the above statement looks different in
the [.v] file in your IDE than it does in the HTML rendition in
your browser, if you are viewing both. In [.v] files, we write the
[forall] universal quantifier using the reserved identifier
"forall." When the [.v] files are converted to HTML, this gets
transformed into an upside-down-A symbol.)
This is a good place to mention that [reflexivity] is a bit
more powerful than we have admitted. In the examples we have seen,
the calls to [simpl] were actually not needed, because
[reflexivity] can perform some simplification automatically when
checking that two sides are equal; [simpl] was just added so that
we could see the intermediate state -- after simplification but
before finishing the proof. Here is a shorter proof of the
theorem: *)
Theorem plus_O_n' : forall n : nat, 0 + n = n.
Proof.
intros n. reflexivity. Qed.
(** Moreover, it will be useful later to know that [reflexivity]
does somewhat _more_ simplification than [simpl] does -- for
example, it tries "unfolding" defined terms, replacing them with
their right-hand sides. The reason for this difference is that,
if reflexivity succeeds, the whole goal is finished and we don't
need to look at whatever expanded expressions [reflexivity] has
created by all this simplification and unfolding; by contrast,
[simpl] is used in situations where we may have to read and
understand the new goal that it creates, so we would not want it
blindly expanding definitions and leaving the goal in a messy
state.
The form of the theorem we just stated and its proof are almost
exactly the same as the simpler examples we saw earlier; there are
just a few differences.
First, we've used the keyword [Theorem] instead of [Example].
This difference is mostly a matter of style; the keywords
[Example] and [Theorem] (and a few others, including [Lemma],
[Fact], and [Remark]) mean pretty much the same thing to Coq.
Second, we've added the quantifier [forall n:nat], so that our
theorem talks about _all_ natural numbers [n]. Informally, to
prove theorems of this form, we generally start by saying "Suppose
[n] is some number..." Formally, this is achieved in the proof by
[intros n], which moves [n] from the quantifier in the goal to a
_context_ of current assumptions.
The keywords [intros], [simpl], and [reflexivity] are examples of
_tactics_. A tactic is a command that is used between [Proof] and
[Qed] to guide the process of checking some claim we are making.
We will see several more tactics in the rest of this chapter and
yet more in future chapters.
Other similar theorems can be proved with the same pattern. *)
Theorem plus_1_l : forall n:nat, 1 + n = S n.
Proof.
intros n. reflexivity. Qed.
Theorem mult_0_l : forall n:nat, 0 * n = 0.
Proof.
intros n. reflexivity. Qed.
(** The [_l] suffix in the names of these theorems is
pronounced "on the left." *)
(** It is worth stepping through these proofs to observe how the
context and the goal change. You may want to add calls to [simpl]
before [reflexivity] to see the simplifications that Coq performs
on the terms before checking that they are equal.
Although simplification is powerful enough to prove some fairly
general facts, there are many statements that cannot be handled by
simplification alone. For instance, we cannot use it to prove
that [0] is also a neutral element for [+] _on the right_. *)
Theorem plus_n_O : forall n, n = n + 0.
Proof.
intros n. simpl. (* Doesn't do anything! *)
(** (Can you explain why this happens? Step through both proofs
with Coq and notice how the goal and context change.)
When stuck in the middle of a proof, we can use the [Abort]
command to give up on it for the moment. *)
Abort.
(** The next chapter will introduce _induction_, a powerful
technique that can be used for proving this goal. For the moment,
though, let's look at a few more simple tactics. *)
(* ################################################################# *)
(** * Proof by Rewriting *)
(** This theorem is a bit more interesting than the others we've
seen: *)
Theorem plus_id_example : forall n m:nat,
n = m ->
n + n = m + m.
(** Instead of making a universal claim about all numbers [n] and [m],
it talks about a more specialized property that only holds when [n
= m]. The arrow symbol is pronounced "implies."
As before, we need to be able to reason by assuming we are given such
numbers [n] and [m]. We also need to assume the hypothesis
[n = m]. The [intros] tactic will serve to move all three of these
from the goal into assumptions in the current context.
Since [n] and [m] are arbitrary numbers, we can't just use
simplification to prove this theorem. Instead, we prove it by
observing that, if we are assuming [n = m], then we can replace
[n] with [m] in the goal statement and obtain an equality with the
same expression on both sides. The tactic that tells Coq to
perform this replacement is called [rewrite]. *)
Proof.
(* move both quantifiers into the context: *)
intros n m.
(* move the hypothesis into the context: *)
intros H.
(* rewrite the goal using the hypothesis: *)
rewrite -> H.
reflexivity. Qed.
(** The first line of the proof moves the universally quantified
variables [n] and [m] into the context. The second moves the
hypothesis [n = m] into the context and gives it the name [H].
The third tells Coq to rewrite the current goal ([n + n = m + m])
by replacing the left side of the equality hypothesis [H] with the
right side.
(The arrow symbol in the [rewrite] has nothing to do with
implication: it tells Coq to apply the rewrite from left to right.
To rewrite from right to left, you can use [rewrite <-]. Try
making this change in the above proof and see what difference it
makes.) *)
(** **** Exercise: 1 star (plus_id_exercise) *)
(** Remove "[Admitted.]" and fill in the proof. *)
Theorem plus_id_exercise : forall n m o : nat,
n = m -> m = o -> n + m = m + o.
Proof.
intros n m o.
intros H.
rewrite <- H.
intros H'.
rewrite -> H'.
reflexivity.
Qed.
(** The [Admitted] command tells Coq that we want to skip trying
to prove this theorem and just accept it as a given. This can be
useful for developing longer proofs, since we can state subsidiary
lemmas that we believe will be useful for making some larger
argument, use [Admitted] to accept them on faith for the moment,
and continue working on the main argument until we are sure it
makes sense; then we can go back and fill in the proofs we
skipped. Be careful, though: every time you say [Admitted] you
are leaving a door open for total nonsense to enter Coq's nice,
rigorous, formally checked world! *)
(** We can also use the [rewrite] tactic with a previously proved
theorem instead of a hypothesis from the context. If the statement
of the previously proved theorem involves quantified variables,
as in the example below, Coq tries to instantiate them
by matching with the current goal. *)
Theorem mult_0_plus : forall n m : nat,
(0 + n) * m = n * m.
Proof.
intros n m.
rewrite -> plus_O_n.
reflexivity. Qed.
(** **** Exercise: 2 stars (mult_S_1) *)
Theorem mult_S_1 : forall n m : nat,
m = S n ->
m * (1 + n) = m * m.
Proof.
intros n m.
intros H.
rewrite -> plus_1_l.
rewrite <- H.
reflexivity. Qed.
(* (N.b. This proof can actually be completed without using [rewrite],
but please do use [rewrite] for the sake of the exercise.) *)
(** [] *)
(* ################################################################# *)
(** * Proof by Case Analysis *)
(** Of course, not everything can be proved by simple
calculation and rewriting: In general, unknown, hypothetical
values (arbitrary numbers, booleans, lists, etc.) can block
simplification. For example, if we try to prove the following
fact using the [simpl] tactic as above, we get stuck. *)
Theorem plus_1_neq_0_firsttry : forall n : nat,
beq_nat (n + 1) 0 = false.
Proof.
intros n.
simpl. (* does nothing! *)
Abort.
(** The reason for this is that the definitions of both
[beq_nat] and [+] begin by performing a [match] on their first
argument. But here, the first argument to [+] is the unknown
number [n] and the argument to [beq_nat] is the compound
expression [n + 1]; neither can be simplified.
To make progress, we need to consider the possible forms of [n]
separately. If [n] is [O], then we can calculate the final result
of [beq_nat (n + 1) 0] and check that it is, indeed, [false]. And
if [n = S n'] for some [n'], then, although we don't know exactly
what number [n + 1] yields, we can calculate that, at least, it
will begin with one [S], and this is enough to calculate that,
again, [beq_nat (n + 1) 0] will yield [false].
The tactic that tells Coq to consider, separately, the cases where
[n = O] and where [n = S n'] is called [destruct]. *)
Theorem plus_1_neq_0 : forall n : nat,
beq_nat (n + 1) 0 = false.
Proof.
intros n. destruct n as [| n'].
- reflexivity.
- reflexivity. Qed.
(** The [destruct] generates _two_ subgoals, which we must then
prove, separately, in order to get Coq to accept the theorem. The
annotation "[as [| n']]" is called an _intro pattern_. It tells
Coq what variable names to introduce in each subgoal. In general,
what goes between the square brackets is a _list of lists_ of
names, separated by [|]. In this case, the first component is
empty, since the [O] constructor is nullary (it doesn't have any
arguments). The second component gives a single name, [n'], since
[S] is a unary constructor.
The [-] signs on the second and third lines are called _bullets_,
and they mark the parts of the proof that correspond to each
generated subgoal. The proof script that comes after a bullet is
the entire proof for a subgoal. In this example, each of the
subgoals is easily proved by a single use of [reflexivity], which
itself performs some simplification -- e.g., the first one
simplifies [beq_nat (S n' + 1) 0] to [false] by first rewriting
[(S n' + 1)] to [S (n' + 1)], then unfolding [beq_nat], and then
simplifying the [match].
Marking cases with bullets is entirely optional: if bullets are
not present, Coq simply asks you to prove each subgoal in
sequence, one at a time. But it is a good idea to use bullets.
For one thing, they make the structure of a proof apparent, making
it more readable. Also, bullets instruct Coq to ensure that a
subgoal is complete before trying to verify the next one,
preventing proofs for different subgoals from getting mixed
up. These issues become especially important in large
developments, where fragile proofs lead to long debugging
sessions.
There are no hard and fast rules for how proofs should be
formatted in Coq -- in particular, where lines should be broken
and how sections of the proof should be indented to indicate their
nested structure. However, if the places where multiple subgoals
are generated are marked with explicit bullets at the beginning of
lines, then the proof will be readable almost no matter what
choices are made about other aspects of layout.
This is also a good place to mention one other piece of somewhat
obvious advice about line lengths. Beginning Coq users sometimes
tend to the extremes, either writing each tactic on its own line
or writing entire proofs on one line. Good style lies somewhere
in the middle. One reasonable convention is to limit yourself to
80-character lines.
The [destruct] tactic can be used with any inductively defined
datatype. For example, we use it next to prove that boolean
negation is involutive -- i.e., that negation is its own
inverse. *)
Theorem negb_involutive : forall b : bool,
negb (negb b) = b.
Proof.
intros b. destruct b.
- reflexivity.
- reflexivity. Qed.
(** Note that the [destruct] here has no [as] clause because
none of the subcases of the [destruct] need to bind any variables,
so there is no need to specify any names. (We could also have
written [as [|]], or [as []].) In fact, we can omit the [as]
clause from _any_ [destruct] and Coq will fill in variable names
automatically. This is generally considered bad style, since Coq
often makes confusing choices of names when left to its own
devices.
It is sometimes useful to invoke [destruct] inside a subgoal,
generating yet more proof obligations. In this case, we use
different kinds of bullets to mark goals on different "levels."
For example: *)
Theorem andb_commutative : forall b c, andb b c = andb c b.
Proof.
intros b c. destruct b.
- destruct c.
+ reflexivity.
+ reflexivity.
- destruct c.
+ reflexivity.
+ reflexivity.
Qed.
(** Each pair of calls to [reflexivity] corresponds to the
subgoals that were generated after the execution of the [destruct c]
line right above it. *)
(** Besides [-] and [+], we can use [*] (asterisk) as a third kind of
bullet. We can also enclose sub-proofs in curly braces, which is
useful in case we ever encounter a proof that generates more than
three levels of subgoals: *)
Theorem andb_commutative' : forall b c, andb b c = andb c b.
Proof.
intros b c. destruct b.
{ destruct c.
{ reflexivity. }
{ reflexivity. } }
{ destruct c.
{ reflexivity. }
{ reflexivity. } }
Qed.
(** Since curly braces mark both the beginning and the end of a
proof, they can be used for multiple subgoal levels, as this
example shows. Furthermore, curly braces allow us to reuse the
same bullet shapes at multiple levels in a proof: *)
Theorem andb3_exchange :
forall b c d, andb (andb b c) d = andb (andb b d) c.
Proof.
intros b c d. destruct b.
- destruct c.
{ destruct d.
- reflexivity.
- reflexivity. }
{ destruct d.
- reflexivity.
- reflexivity. }
- destruct c.
{ destruct d.
- reflexivity.
- reflexivity. }
{ destruct d.
- reflexivity.
- reflexivity. }
Qed.
(** Before closing the chapter, let's mention one final
convenience. As you may have noticed, many proofs perform case
analysis on a variable right after introducing it:
intros x y. destruct y as [|y].
This pattern is so common that Coq provides a shorthand for it: we
can perform case analysis on a variable when introducing it by
using an intro pattern instead of a variable name. For instance,
here is a shorter proof of the [plus_1_neq_0] theorem above. *)
Theorem plus_1_neq_0' : forall n : nat,
beq_nat (n + 1) 0 = false.
Proof.
intros [|n].
- reflexivity.
- reflexivity. Qed.
(** If there are no arguments to name, we can just write [[]]. *)
Theorem andb_commutative'' :
forall b c, andb b c = andb c b.
Proof.
intros [] [].
- reflexivity.
- reflexivity.
- reflexivity.
- reflexivity.
Qed.
(** **** Exercise: 2 stars (andb_true_elim2) *)
(** Prove the following claim, marking cases (and subcases) with
bullets when you use [destruct]. *)
Theorem andb_true_elim2 : forall b c : bool,
andb b c = true -> c = true.
Proof.
intros [] [] [].
- reflexivity.
- reflexivity.
- reflexivity.
- reflexivity.
Qed.
(** **** Exercise: 1 star (zero_nbeq_plus_1) *)
Theorem zero_nbeq_plus_1 : forall n : nat,
beq_nat 0 (n + 1) = false.
Proof.
intros [|n].
- reflexivity.
- reflexivity.
Qed.
(** [] *)
(* ================================================================= *)
(** ** More on Notation (Optional) *)
(** (In general, sections marked Optional are not needed to follow the
rest of the book, except possibly other Optional sections. On a
first reading, you might want to skim these sections so that you
know what's there for future reference.)
Recall the notation definitions for infix plus and times: *)
Notation "x + y" := (plus x y)
(at level 50, left associativity)
: nat_scope.
Notation "x * y" := (mult x y)
(at level 40, left associativity)
: nat_scope.
(** For each notation symbol in Coq, we can specify its _precedence
level_ and its _associativity_. The precedence level [n] is
specified by writing [at level n]; this helps Coq parse compound
expressions. The associativity setting helps to disambiguate
expressions containing multiple occurrences of the same
symbol. For example, the parameters specified above for [+] and
[*] say that the expression [1+2*3*4] is shorthand for
[(1+((2*3)*4))]. Coq uses precedence levels from 0 to 100, and
_left_, _right_, or _no_ associativity. We will see more examples
of this later, e.g., in the [Lists]
chapter.
Each notation symbol is also associated with a _notation scope_.
Coq tries to guess what scope is meant from context, so when it
sees [S(O*O)] it guesses [nat_scope], but when it sees the
cartesian product (tuple) type [bool*bool] (which we'll see in
later chapters) it guesses [type_scope]. Occasionally, it is
necessary to help it out with percent-notation by writing
[(x*y)%nat], and sometimes in what Coq prints it will use [%nat]
to indicate what scope a notation is in.
Notation scopes also apply to numeral notation ([3], [4], [5],
etc.), so you may sometimes see [0%nat], which means [O] (the
natural number [0] that we're using in this chapter), or [0%Z],
which means the Integer zero (which comes from a different part of
the standard library).
Pro tip: Coq's notation mechanism is not especially powerful.
Don't expect too much from it! *)
(* ================================================================= *)
(** ** Fixpoints and Structural Recursion (Optional) *)
(** Here is a copy of the definition of addition: *)
Fixpoint plus' (n : nat) (m : nat) : nat :=
match n with
| O => m
| S n' => S (plus' n' m)
end.
(** When Coq checks this definition, it notes that [plus'] is
"decreasing on 1st argument." What this means is that we are
performing a _structural recursion_ over the argument [n] -- i.e.,
that we make recursive calls only on strictly smaller values of
[n]. This implies that all calls to [plus'] will eventually
terminate. Coq demands that some argument of _every_ [Fixpoint]
definition is "decreasing."
This requirement is a fundamental feature of Coq's design: In
particular, it guarantees that every function that can be defined
in Coq will terminate on all inputs. However, because Coq's
"decreasing analysis" is not very sophisticated, it is sometimes
necessary to write functions in slightly unnatural ways. *)
(** **** Exercise: 2 stars, optional (decreasing) *)
(** To get a concrete sense of this, find a way to write a sensible
[Fixpoint] definition (of a simple function on numbers, say) that
_does_ terminate on all inputs, but that Coq will reject because
of this restriction. *)
(** Fixpoint sensible (n : nat) (m : nat) : nat :=
match n with
| O => m
| (S n') => match m with
| (S m') => (sensible n' m')
| O => (sensible m n)
end
end. *)
(** [] *)
(* ################################################################# *)
(** * More Exercises *)
(** **** Exercise: 2 stars (boolean_functions) *)
(** Use the tactics you have learned so far to prove the following
theorem about boolean functions. *)
Theorem identity_fn_applied_twice :
forall (f : bool -> bool),
(forall (x : bool), f x = x) ->
forall (b : bool), f (f b) = b.
Proof.
intros f. intros l. intros [].
- rewrite l. rewrite l. reflexivity.
- rewrite l. rewrite l. reflexivity.
Qed.
(** Now state and prove a theorem [negation_fn_applied_twice] similar
to the previous one but where the second hypothesis says that the
function [f] has the property that [f x = negb x].*)
Theorem negation_fn_applied_twice :
forall (f : bool -> bool),
(forall (x : bool), f x = x) ->
forall (b : bool), f (f b) = b.
Proof.
intros f. intros l. intros [].
- rewrite l. rewrite l. reflexivity.
- rewrite l. rewrite l. reflexivity.
Qed.
(** **** Exercise: 3 stars, optional (andb_eq_orb) *)
(** Prove the following theorem. (Hint: This one can be a bit tricky,
depending on how you approach it. You will probably need both
[destruct] and [rewrite], but destructing everything in sight is
not the best way.) *)
Lemma andb_negb : forall (b : bool), (andb b (negb b)) = false.
Proof.
intros [].
- reflexivity.
- reflexivity.
Qed.
Lemma orb_negb : forall (b : bool), (orb b (negb b)) = true.
Proof.
intros [].
- reflexivity.
- reflexivity.
Qed.
Theorem andb_eq_orb :
forall (b c : bool),
(andb b c = orb b c) ->
b = c.
Proof.
intros [] [].
- reflexivity.
- rewrite andb_negb. rewrite orb_negb. intros h. rewrite<-h. reflexivity.
- rewrite andb_negb. rewrite orb_negb. intros h. rewrite<-h. reflexivity.
- reflexivity.
Qed.
(** [] *)
(** **** Exercise: 3 stars (binary) *)
(** Consider a different, more efficient representation of natural
numbers using a binary rather than unary system. That is, instead
of saying that each natural number is either zero or the successor
of a natural number, we can say that each binary number is either
- zero,
- twice a binary number, or
- one more than twice a binary number.
(a) First, write an inductive definition of the type [bin]
corresponding to this description of binary numbers.
(Hint: Recall that the definition of [nat] above,
Inductive nat : Type := | O : nat | S : nat -> nat.
says nothing about what [O] and [S] "mean." It just says "[O] is
in the set called [nat], and if [n] is in the set then so is [S
n]." The interpretation of [O] as zero and [S] as successor/plus
one comes from the way that we _use_ [nat] values, by writing
functions to do things with them, proving things about them, and
so on. Your definition of [bin] should be correspondingly simple;
it is the functions you will write next that will give it
mathematical meaning.)
(b) Next, write an increment function [incr] for binary numbers,
and a function [bin_to_nat] to convert binary numbers to unary
numbers.
(c) Write five unit tests [test_bin_incr1], [test_bin_incr2], etc.
for your increment and binary-to-unary functions. (A "unit
test" in Coq is a specific [Example] that can be proved with
just [reflexivity], as we've done for several of our
definitions.) Notice that incrementing a binary number and
then converting it to unary should yield the same result as
first converting it to unary and then incrementing. *)
Inductive bin : Type :=
| B : bin
| T : bin -> bin
| P : bin -> bin.
Fixpoint incr (b : bin) : bin :=
match b with
| B => P B
| T b' => P b'
| P b' => T (incr b')
end.
Fixpoint bin_to_nat (b : bin) : nat :=
match b with
| B => 0
| T b' => mult (bin_to_nat b') 2
| P b' => S (mult (bin_to_nat b') 2)
end.
Example test_bin_nat_convert1: bin_to_nat B = 0.
Proof. simpl. reflexivity. Qed.
Example test_bin_nat_convert2: bin_to_nat (P (T (P B))) = 5.
Proof. simpl. reflexivity. Qed.
Example test_bin_nat_convert3: bin_to_nat (T (T (T (P B)))) = 8.
Proof. simpl. reflexivity. Qed.
Example test_bin_incr1: bin_to_nat (P (incr (P B))) = 5.
Proof. simpl. reflexivity. Qed.
Example test_bin_incr2: bin_to_nat (incr (P (incr (P B)))) =
2 * ((bin_to_nat (P B)) + 1) + 1 + 1.
Proof. simpl. reflexivity. Qed.
Example test_bin_incr3: bin_to_nat (T (P (incr (P B)))) =
bin_to_nat (T (incr (incr (incr (incr (incr (T B))))))).
Proof. simpl. reflexivity. Qed.
(** [] *)
(** $Date: 2017-08-24 17:13:02 -0400 (Thu, 24 Aug 2017) $ *)
|
function vl_setupnn()
%VL_SETUPNN Setup the MatConvNet toolbox.
% VL_SETUPNN() function adds the MatConvNet toolbox to MATLAB path.
% Copyright (C) 2014-15 Andrea Vedaldi.
% All rights reserved.
%
% This file is part of the VLFeat library and is made available under
% the terms of the BSD license (see the COPYING file).
root = vl_rootnn() ;
addpath(fullfile(root, 'matlab')) ;
addpath(fullfile(root, 'matlab', 'mex')) ;
addpath(fullfile(root, 'matlab', 'simplenn')) ;
addpath(fullfile(root, 'matlab', 'xtest')) ;
addpath(fullfile(root, 'examples')) ;
if ~exist('gather')
warning('The MATLAB Parallel Toolbox does not seem to be installed. Activating compatibility functions.') ;
addpath(fullfile(root, 'matlab', 'compatibility', 'parallel')) ;
end
if numel(dir(fullfile(root, 'matlab', 'mex', 'vl_nnconv.mex*'))) == 0
warning('MatConvNet is not compiled. Consider running `vl_compilenn`.');
end
|
Require Import Contractive Sty Map Msg Tac Var.
Create HintDb tl discriminated.
CoInductive Tl :=
| Tl_unit : Tl
| Tl_send : Msg -> Tl -> Tl
| Tl_recv : Msg -> Tl -> Tl
| Tl_echoice : Tl -> Tl -> Tl
| Tl_ichoice : Tl -> Tl -> Tl
.
Hint Constructors Tl : tl.
Inductive Tl_bisim_gen Tl_bisim : Tl -> Tl -> Prop :=
| Tl_bisim_gen_unit :
Tl_bisim_gen Tl_bisim Tl_unit Tl_unit
| Tl_bisim_gen_send :
forall l l' B,
Tl_bisim l l' ->
Tl_bisim_gen Tl_bisim (Tl_send B l) (Tl_send B l')
| Tl_bisim_gen_recv :
forall l l' B,
Tl_bisim l l' ->
Tl_bisim_gen Tl_bisim (Tl_recv B l) (Tl_recv B l')
| Tl_bisim_gen_echoice :
forall l1 l1' l2 l2',
Tl_bisim l1 l1' ->
Tl_bisim l2 l2' ->
Tl_bisim_gen Tl_bisim (Tl_echoice l1 l2) (Tl_echoice l1' l2')
| Tl_bisim_gen_ichoice :
forall l1 l1' l2 l2',
Tl_bisim l1 l1' ->
Tl_bisim l2 l2' ->
Tl_bisim_gen Tl_bisim (Tl_ichoice l1 l2) (Tl_ichoice l1' l2')
.
Hint Constructors Tl_bisim_gen : tl.
CoInductive Tl_bisim : Tl -> Tl -> Prop :=
| Tl_bisim_fold : forall l l', Tl_bisim_gen Tl_bisim l l' -> Tl_bisim l l'.
Hint Constructors Tl_bisim.
Lemma Tl_bisim_coind : forall R,
(forall l l', R l l' -> Tl_bisim_gen R l l')
-> forall l l', R l l' -> Tl_bisim l l'.
Proof. cofix CIH. introv H HR. apply H in HR. inverts HR; eauto with tl. Qed.
(*
The following lemmae are used in the definition of tl below. We introduce
them in order to prevent the unfolding of contractiveness proofs used in tl,
which would otherwise lead to large and confusing goals.
*)
Ltac Contractive_inv :=
let H := fresh in
intro H; inversion H; auto
.
Lemma Contractive_inv_send {S B} :
Contractive (send B S) -> Contractive S.
Proof. Contractive_inv. Qed.
Lemma Contractive_inv_recv {S B} :
Contractive (recv B S) -> Contractive S.
Proof. Contractive_inv. Qed.
Lemma Contractive_inv_echoice1 {S1 S2} :
Contractive (echoice S1 S2) -> Contractive S1.
Proof. Contractive_inv. Qed.
Lemma Contractive_inv_echoice2 {S1 S2} :
Contractive (echoice S1 S2) -> Contractive S2.
Proof. Contractive_inv. Qed.
Lemma Contractive_inv_ichoice1 {S1 S2} :
Contractive (ichoice S1 S2) -> Contractive S1.
Proof. Contractive_inv. Qed.
Lemma Contractive_inv_ichoice2 {S1 S2} :
Contractive (ichoice S1 S2) -> Contractive S2.
Proof. Contractive_inv. Qed.
Lemma Contractive_inv_mu {S X} :
Contractive (mu X S) -> Contractive S.
Proof. Contractive_inv. Qed.
(*
The following two axioms are used exclusively in the definition of tl below. We
justify their use here:
tl_fix is used to circumvent the productivity checker, which doesn't consider
tl productive due to the case for (mu X S). This is correct insofar as tl is
only productive if the input S is contractive, therefore we require a proof
of contractiveness for every application of tl.
With S contractive, productivity follows:
For S in {unit, send _ _, recv _ _, echoice _ _, ichoice _ _}, we produce a
constructor before recursing into tl.
For S = var _, we don't recurse at all.
For S = mu X S', S' <> var _ due to S contractive. Thus, the recursive call
(tl _ S' _) produces at least one constructor before potentially recursing
further.
Thus, the use of tl_fix is safe. tl_fix_fix merely stipulates that tl_fix
behaves like a fixpoint combinator.
*)
Axiom tl_fix : (Tl -> Tl) -> Tl.
Axiom tl_fix_fix :
forall F,
tl_fix F = F (tl_fix F).
Fixpoint tl (eta : Var -> Tl) (S : Sty) (Scontr : Contractive S) : Tl :=
match S return Contractive S -> Tl with
| unit => fun _ =>
Tl_unit
| send B S' => fun c =>
Tl_send B (tl eta S' (Contractive_inv_send c))
| recv B S' => fun c =>
Tl_recv B (tl eta S' (Contractive_inv_recv c))
| echoice S1 S2 => fun c =>
Tl_echoice
(tl eta S1 (Contractive_inv_echoice1 c))
(tl eta S2 (Contractive_inv_echoice2 c))
| ichoice S1 S2 => fun c =>
Tl_ichoice
(tl eta S1 (Contractive_inv_ichoice1 c))
(tl eta S2 (Contractive_inv_ichoice2 c))
| mu X S' => fun c =>
tl_fix (fun lx => tl (eta_override eta X lx) S' (Contractive_inv_mu c))
| var X => fun _ =>
eta X
end
Scontr
.
|
Nouveau Newfoundland: Breathing life into our traditional cuisine.
“The cod cheek is a beautiful piece of the fish. I like to cook with all parts of the fish or the animal,” says Chef Jeremy Charles, co-owner of Raymonds Restaurant.
And so begins your food journey in St. John’s. It’s an exploration of centuries-old, locally-farmed and harvested ingredients that are now being creatively reinvented to include pride and sense of place.
As national and international food writers have attested with exuberance, the food scene in St. John’s is a beacon for food lovers, and we have the delicacies of the land and sea, and the talented chefs and kitchen brigades to thank for this.
We asked some of our renowned chefs to explore the reasons the food culture is an integral part of the Newfoundland experience, and how local ingredients are being celebrated in the dishes they create.
As thousands of coastal communities were only accessible by boat until 60 years ago (some still are), the fishery was the main source of income for most communities. Families used what was on hand. Root vegetables grow readily here; wild berries are plentiful, rabbit, moose, partridge, seabird, and, of course, fish. And when we say fish, we really mean cod. Cod was king, is king. Ask Chef Todd Perrin of Mallard Cottage what is the quintessential meal that represents Newfoundland and he will tell you cod.
Imagine a recipe that has been handed down from generation to generation. A yellowing page in a recipe book, dog-eared and perfectly stained with buttered thumb prints. This depicts the food scene for generations. It was familial, traditional, and unaltered.
Roary’s pick is boiled dinner or Jiggs’ Dinner (root vegetables and salt meat), in addition to his grandmother’s bread and the pure joy of eating the whole, warm loaf in one sitting.
Todd’s fond memory is that food brought people together. The ethos of the Newfoundland culture, the conviviality of the Newfoundlander, the infamous kitchen party all spawn from the gathering of family and friends at the dinner table.
is a range of eclectic sophistication to rustic fare, the simple, staple ingredients of our culinary past are still the stars of this new culinary renaissance. They are just celebrated differently.
Equally as exciting for the chefs is experimenting with the ingredients that were overlooked. In some cases, ingredients that were literally walked over.
Jeremy was a city boy who had the pleasure of growing up in rural Newfoundland at his grandparents’ house in the summer. His hands-on experience of pulling vegetables from the garden, catching rabbits, hauling traps and cleaning fish right on the wharf have served him well. It rooted in him a sincere appreciation for the ingredients that the sea and land provide.
“We take these beautiful ingredients and use them as background notes in a dish or they may be the star of the plate at Raymonds,” says Jeremy.
Accompanying a piece of skin-on-cod you may find some sea grass, wild flowers, and kelp. Items which normally provide nutrients to the soil or feed for the sheep are being served in ways that were never considered before.
Five years ago, you wouldn’t have found moose carpaccio or seal sliders on any menu. Having wild game readily available and having the ability to serve it commercially is certainly one thing that sets the destination apart from the rest of North America.
Knowing where your food comes from is playing an important role for many in choosing where and what to eat. Our chefs are working one-on-one with the purveyors of food.
Farmers on the west coast of the island are experimenting with Romanesca broccoli. Apiaries are growing and creating wild flower honey from the healthy bee population. Sea buckthorn berries are becoming more readily available.
Harvesting from the land and the sea, our ‘wild’ cuisine creates an uncommon and transformative dining experience that our chefs are excited to share. |
SUBROUTINE YMEAN (
*
* inputs
*
: DATA, NS, MASK, WIDTH,
*
* outputs
*
: RESULT, ISTAT)
*
* Module number:
*
* Module name:
*
* Keyphrase:
* ----------
* moving average with mask
*
* Description:
* ------------
* Output pixel value is the average of pixel values of WIDTH (must be an odd
* number) points of the input data array at the corresponding position.
* If any of the WIDTH points is masked or outside the array, the averaging
* only includes symmetric points on both sides of the pixel point before
* reaching the invalid point(s). For example, if WIDTH = 5, and if
* pixel 6 is masked, then the output pixel value of the 4th point is the
* average of input points number 3, 4, and 5.
*
* FORTRAN name: YMEAN.FOR
*
* Keywords of accessed files and tables:
* --------------------------------------
* Name I/O Description / Comments
*
* Subroutines Called:
* -------------------
* CDBS:
* None
* SDAS:
* UMSPUT
* Others:
* None
*
* History:
* --------
* Version Date Author Description
* 1 06-13-89 J.-C. HSU coding
* 1.1 23-Apr-93 H. Bushouse Declare passed arrays as (*), not (1)
*
*-------------------------------------------------------------------------------
*
*== input:
* --input data array
REAL DATA(*)
* --input array size
INTEGER NS,
* --input mask array
: MASK(*),
* --mean filter width
: WIDTH
*
*== output:
* --output array
REAL RESULT(*)
* --error status
INTEGER ISTAT
*
*== local:
* --error message
CHARACTER*130 CONTXT, MESS
REAL SUM
INTEGER I, J, K, NPTS, STATOK
*
*=========================begin hsp.inc=========================================
* --status return code
INTEGER OK, ERRNUM(20)
INTEGER DEST, PRIO
DATA OK /0/
DATA ERRNUM /701, 702, 703, 704, 705, 706, 707, 708, 709, 710,
: 711, 712, 713, 714, 715, 716, 717, 718, 719, 720/
* --message destination and priority
DATA DEST, PRIO /5, 0/
*=========================end hsp.inc===========================================
*------------------------------------------------------------------------------
*
* initialize the output result array by copying input array to it
*
DO 10 I = 1, NS
RESULT(I) = DATA(I)
10 CONTINUE
*
* if WIDTH is an even number, issue error and exit
*
IF ((WIDTH/2)*2 .EQ. WIDTH) THEN
ISTAT = ERRNUM(1)
CONTXT = 'even number is specified for average width'
GO TO 999
END IF
*
* proceed only if WIDTH is larger than 1
*
IF (WIDTH .GT. 1) THEN
DO 60 I = 1, NS
*
* continue only if the mask is OK (=0)
*
IF (MASK(I) .EQ. 0) THEN
DO 20 J = 1, WIDTH/2
*
* decide how many points to be used in the moving average
* first, for the end points
*
IF ((I-J) .LT. 1 .OR. (I+J) .GT. NS .OR.
*
* second, for the points whose adjacent points are masked out
*
: MASK(I-J) .NE. 0 .OR. MASK(I+J) .NE. 0)
: GO TO 30
20 CONTINUE
NPTS = WIDTH / 2
GO TO 40
30 NPTS = J - 1
40 SUM = 0.
DO 50 K = I-NPTS, I+NPTS
SUM = DATA(K) + SUM
50 CONTINUE
RESULT(I) = SUM / REAL(2*NPTS+1)
END IF
60 CONTINUE
END IF
*
ISTAT = OK
GO TO 1000
*
999 MESS = 'YMEAN: ' // CONTXT
CALL YMSPUT (MESS, DEST, PRIO, STATOK)
*
1000 RETURN
END
|
% Update the heading to match your theme
\section{XYZ Challenges}
\label{challenges}
Begin the section with a one paragraph overview
of the section. Outline what the challenges are that
you are going to discuss. You can start the paragraph
with something like "Although XYZ could solve world
hunger, there are a number of challenges to developing
an XYZ."
\subsection{Challenge 1: Something is Hard, Complex, etc.}
\label{challenge1}
You should start out with a one paragraph description of
the challenge. Make sure that you immediately relate the
challenge back to the theme of the paper. Do not introduce
new terminology that you have not previously defined. Make
sure that your description of the challenge is high-level
and clear.
The second paragraph of the challenge should explain how
the challenge concretely manifests in the motivating example.
The first paragraph generally describes the challenge. This
paragraph is showing a specific example of the challenge in
the context of your motivating example. Be very specific
so that the reader understands all of the details. End
the paragraph with a sentence similar to the following:
% Make sure the ref points to a specific subsection in
% the solution section.
Section~\ref{solution} describes how we address this
challenge by QRS.
\subsection{Challenge 2: Something Else is an Issue}
\label{challenge2}
You should start out with a one paragraph description of
the challenge. Make sure that you immediately relate the
challenge back to the theme of the paper. Do not introduce
new terminology that you have not previously defined. Make
sure that your description of the challenge is high-level
and clear.
The second paragraph of the challenge should explain how
the challenge concretely manifests in the motivating example.
The first paragraph generally describes the challenge. This
paragraph is showing a specific example of the challenge in
the context of your motivating example. Be very specific
so that the reader understands all of the details. End
the paragraph with a sentence similar to the following:
% Make sure the ref points to a specific subsection in
% the solution section.
Section~\ref{solution} describes how we address this
challenge by QRS.
\subsection{Challenge 3: Another Painful Issue}
\label{challenge3}
You should start out with a one paragraph description of
the challenge. Make sure that you immediately relate the
challenge back to the theme of the paper. Do not introduce
new terminology that you have not previously defined. Make
sure that your description of the challenge is high-level
and clear.
The second paragraph of the challenge should explain how
the challenge concretely manifests in the motivating example.
The first paragraph generally describes the challenge. This
paragraph is showing a specific example of the challenge in
the context of your motivating example. Be very specific
so that the reader understands all of the details. End
the paragraph with a sentence similar to the following:
% Make sure the ref points to a specific subsection in
% the solution section.
Section~\ref{solution} describes how we address this
challenge by QRS.
|
#define BOOST_TEST_MODULE fnmatch
#include <boost/test/unit_test.hpp>
#include <bunsan/fnmatch.hpp>
BOOST_AUTO_TEST_SUITE(fnmatch)
BOOST_AUTO_TEST_CASE(translate_to_regex) {
struct str_test_t {
const char *input;
const char *result;
} test_strings[] = {
{"[ab]cd?efg*", "[ab]cd.efg.*"},
{"[iI][!^]abc[", "[iI][^^]abc\\["},
{"[]abc", "\\[\\]abc"},
{"a[]]b", "a[]]b"},
{" [!]abc", "\\ \\[\\!\\]abc"},
{"*g*", ".*g.*"},
{"[ ][^abc][!abc][*.{}][\\[\\]\\]]",
"[ ][\\^abc][^abc][*.{}][\\\\[\\\\]\\\\\\]\\]"},
{"\\*", "\\\\.*"},
{"???abc", "...abc"},
{"[efghu", "\\[efghu"},
};
for (const auto &test : test_strings) {
const std::string result =
bunsan::fnmatcher::translate_to_regex(test.input);
BOOST_CHECK_EQUAL(result, test.result);
}
}
BOOST_AUTO_TEST_CASE(fnmatch) {
BOOST_CHECK(bunsan::fnmatch("", ""));
BOOST_CHECK(bunsan::fnmatch("*", ""));
BOOST_CHECK(bunsan::fnmatch("*", "abc"));
BOOST_CHECK(bunsan::fnmatch("abc", "abc"));
BOOST_CHECK(!bunsan::fnmatch("", "abc"));
BOOST_CHECK(bunsan::fnmatch("?bc", "abc"));
BOOST_CHECK(bunsan::fnmatch("a*c", "ac"));
BOOST_CHECK(bunsan::fnmatch("a*c", "abc"));
BOOST_CHECK(bunsan::fnmatch("a*c", "abbc"));
BOOST_CHECK(bunsan::fnmatch("a[bcd]e", "abe"));
BOOST_CHECK(bunsan::fnmatch("a[bcd]e", "ace"));
BOOST_CHECK(bunsan::fnmatch("a[bcd]e", "ade"));
BOOST_CHECK(!bunsan::fnmatch("a[bcd]e", "afe"));
BOOST_CHECK(bunsan::fnmatch("a[]]b", "a]b"));
}
BOOST_AUTO_TEST_CASE(border) {
BOOST_CHECK(bunsan::fnmatch("hello", "hello"));
BOOST_CHECK(!bunsan::fnmatch("hello", "hello world"));
BOOST_CHECK(!bunsan::fnmatch("world", "hello world"));
}
BOOST_AUTO_TEST_CASE(fnmatch_icase) {
BOOST_CHECK(!bunsan::fnmatch("abc", "aBc", bunsan::fnmatcher::defaults));
BOOST_CHECK(bunsan::fnmatch("abc", "aBc", bunsan::fnmatcher::icase));
}
BOOST_AUTO_TEST_SUITE_END() // fnmatch
|
Wrapped in Red became a commercial success in the United States . Prior it its release , music commercial analysts predicted that the album would likely sell at least 60 @,@ 000 copies in its first week of release in the region , and foresaw it to be the front @-@ runner as the bestselling holiday release of the season . On the week ending November 16 , 2013 , it debuted on the Billboard 200 chart at number 3 with 70 @,@ 000 copies sold in all retailers , a 93 @,@ 000 decrease from Stronger 's first week sales of 163 @,@ 000 copies in 2011 . Nielsen Music analyst Dave Bakula attributed its low performance to the falling market share of the holiday music in general , which saw 3 @.@ 8 percent decrease in 2012 . The album 's chart debut on the Billboard 200 earned Clarkson her sixth consecutive top three studio album as well as the highest debut for a Christmas record by a female artist since Susan Boyle 's first Christmas album The Gift debuted at the top of the chart in 2010 . Wrapped in Red also debuted three other different charts , most notably at the top of the Billboard Top Holiday Albums chart . On the week ending November 30 , 2013 , by charting at number six on the Billboard 200 , the album became the lone Sony release inside the chart 's top ten , with the others being Universal Music Group releases .
|
import pandas as pd
import numpy as np
import xgboost_decision_tree as xg_dt
train = pd.read_csv('train_cleaned.csv')
test_data = pd.read_csv('test_cleaned.csv')
target = train['Survived'].values
feature_values = ['Pclass', 'Embarked', 'Fare', 'Age', 'Sex', 'Deck_Index', 'Title_Index', 'Age_Class', 'Parch', 'SibSp']
train_features = train[feature_values].values |
State Before: α : Type ?u.839566
β : Type ?u.839569
a b c d : ℝ≥0∞
r p q : ℝ≥0
ι : Sort ?u.839586
f g : ι → ℝ≥0∞
s : Set ℝ≥0∞
hs : ∀ (r : ℝ≥0∞), r ∈ s → r ≠ ⊤
⊢ ENNReal.toNNReal (sInf s) = sInf (ENNReal.toNNReal '' s) State After: α : Type ?u.839566
β : Type ?u.839569
a b c d : ℝ≥0∞
r p q : ℝ≥0
ι : Sort ?u.839586
f g : ι → ℝ≥0∞
s : Set ℝ≥0∞
hs : ∀ (r : ℝ≥0∞), r ∈ s → r ≠ ⊤
hf : ∀ (i : { x // x ∈ s }), ↑i ≠ ⊤
⊢ ENNReal.toNNReal (sInf s) = sInf (ENNReal.toNNReal '' s) Tactic: have hf : ∀ i, ((↑) : s → ℝ≥0∞) i ≠ ∞ := fun ⟨r, rs⟩ => hs r rs State Before: α : Type ?u.839566
β : Type ?u.839569
a b c d : ℝ≥0∞
r p q : ℝ≥0
ι : Sort ?u.839586
f g : ι → ℝ≥0∞
s : Set ℝ≥0∞
hs : ∀ (r : ℝ≥0∞), r ∈ s → r ≠ ⊤
hf : ∀ (i : { x // x ∈ s }), ↑i ≠ ⊤
⊢ ENNReal.toNNReal (sInf s) = sInf (ENNReal.toNNReal '' s) State After: no goals Tactic: simpa only [← sInf_range, ← image_eq_range, Subtype.range_coe_subtype] using (toNNReal_iInf hf) |
classdef MimBrushSizeSlider < MimGuiPluginSlider
% MimBrushSizeSlider. Gui Plugin for changing the size of the edit brush
%
% You should not use this class within your own code. It is intended to
% be used by the gui of the TD MIM Toolkit.
%
%
% Licence
% -------
% Part of the TD MIM Toolkit. https://github.com/tomdoel
% Author: Tom Doel, Copyright Tom Doel 2014. www.tomdoel.com
% Distributed under the MIT licence. Please see website for details.
%
properties
ButtonText = 'Brush size'
SelectedText = 'Brush size'
ToolTip = 'Change the size of the editing paint brush'
Category = 'Paint'
Visibility = 'Dataset'
Mode = 'Edit'
HidePluginInDisplay = false
PTKVersion = '1'
ButtonWidth = 6
ButtonHeight = 1
Location = 42
MinValue = 0
MaxValue = 100
SmallStep = 0.01
LargeStep = 0.1
DefaultValue = 50
EditBoxPosition = 90
EditBoxWidth = 30
StackVertically = false
end
methods (Static)
function RunGuiPlugin(gui_app)
end
function enabled = IsEnabled(gui_app)
enabled = gui_app.IsDatasetLoaded && gui_app.ImagePanel.OverlayImage.ImageExists && ...
isequal(gui_app.ImagePanel.SelectedControl, 'Paint');
end
function is_selected = IsSelected(gui_app)
is_selected = true;
end
function [value_instance_handle, value_property_name, limits_instance_handle, limits_property_name] = GetHandleAndProperty(gui_app)
value_instance_handle = gui_app.ImagePanel;
value_property_name = 'PaintBrushSize';
limits_instance_handle = [];
limits_property_name = [];
end
end
end |
The polynomial $[1]$ is equal to $1$. |
[GOAL]
F : Type u
K : Type v
L : Type w
inst✝² : Field K
inst✝¹ : Field L
inst✝ : Field F
f : K[X]
⊢ Irreducible (factor f)
[PROOFSTEP]
rw [factor]
[GOAL]
F : Type u
K : Type v
L : Type w
inst✝² : Field K
inst✝¹ : Field L
inst✝ : Field F
f : K[X]
⊢ Irreducible (if H : ∃ g, Irreducible g ∧ g ∣ f then Classical.choose H else X)
[PROOFSTEP]
split_ifs with H
[GOAL]
case pos
F : Type u
K : Type v
L : Type w
inst✝² : Field K
inst✝¹ : Field L
inst✝ : Field F
f : K[X]
H : ∃ g, Irreducible g ∧ g ∣ f
⊢ Irreducible (Classical.choose H)
[PROOFSTEP]
exact (Classical.choose_spec H).1
[GOAL]
case neg
F : Type u
K : Type v
L : Type w
inst✝² : Field K
inst✝¹ : Field L
inst✝ : Field F
f : K[X]
H : ¬∃ g, Irreducible g ∧ g ∣ f
⊢ Irreducible X
[PROOFSTEP]
exact irreducible_X
[GOAL]
F : Type u
K : Type v
L : Type w
inst✝² : Field K
inst✝¹ : Field L
inst✝ : Field F
f : K[X]
hf1 : ¬IsUnit f
⊢ factor f ∣ f
[PROOFSTEP]
by_cases hf2 : f = 0
[GOAL]
case pos
F : Type u
K : Type v
L : Type w
inst✝² : Field K
inst✝¹ : Field L
inst✝ : Field F
f : K[X]
hf1 : ¬IsUnit f
hf2 : f = 0
⊢ factor f ∣ f
[PROOFSTEP]
rw [hf2]
[GOAL]
case pos
F : Type u
K : Type v
L : Type w
inst✝² : Field K
inst✝¹ : Field L
inst✝ : Field F
f : K[X]
hf1 : ¬IsUnit f
hf2 : f = 0
⊢ factor 0 ∣ 0
[PROOFSTEP]
exact dvd_zero _
[GOAL]
case neg
F : Type u
K : Type v
L : Type w
inst✝² : Field K
inst✝¹ : Field L
inst✝ : Field F
f : K[X]
hf1 : ¬IsUnit f
hf2 : ¬f = 0
⊢ factor f ∣ f
[PROOFSTEP]
rw [factor, dif_pos (WfDvdMonoid.exists_irreducible_factor hf1 hf2)]
[GOAL]
case neg
F : Type u
K : Type v
L : Type w
inst✝² : Field K
inst✝¹ : Field L
inst✝ : Field F
f : K[X]
hf1 : ¬IsUnit f
hf2 : ¬f = 0
⊢ Classical.choose (_ : ∃ i, Irreducible i ∧ i ∣ f) ∣ f
[PROOFSTEP]
exact (Classical.choose_spec <| WfDvdMonoid.exists_irreducible_factor hf1 hf2).2
[GOAL]
F : Type u
K : Type v
L : Type w
inst✝² : Field K
inst✝¹ : Field L
inst✝ : Field F
f : K[X]
hf : natDegree f ≠ 0
⊢ (X - ↑C (AdjoinRoot.root (factor f))) * removeFactor f = map (AdjoinRoot.of (factor f)) f
[PROOFSTEP]
let ⟨g, hg⟩ := factor_dvd_of_natDegree_ne_zero hf
[GOAL]
F : Type u
K : Type v
L : Type w
inst✝² : Field K
inst✝¹ : Field L
inst✝ : Field F
f : K[X]
hf : natDegree f ≠ 0
g : K[X]
hg : f = factor f * g
⊢ (X - ↑C (AdjoinRoot.root (factor f))) * removeFactor f = map (AdjoinRoot.of (factor f)) f
[PROOFSTEP]
apply (mul_divByMonic_eq_iff_isRoot (R := AdjoinRoot f.factor) (a := AdjoinRoot.root f.factor)).mpr
[GOAL]
F : Type u
K : Type v
L : Type w
inst✝² : Field K
inst✝¹ : Field L
inst✝ : Field F
f : K[X]
hf : natDegree f ≠ 0
g : K[X]
hg : f = factor f * g
⊢ IsRoot (map (AdjoinRoot.of (factor f)) f) (AdjoinRoot.root (factor f))
[PROOFSTEP]
rw [IsRoot.def, eval_map, hg, eval₂_mul, ← hg, AdjoinRoot.eval₂_root, zero_mul]
[GOAL]
F : Type u
K : Type v
L : Type w
inst✝² : Field K
inst✝¹ : Field L
inst✝ : Field F
f : K[X]
⊢ natDegree (removeFactor f) = natDegree f - 1
[PROOFSTEP]
rw [removeFactor, natDegree_divByMonic (map (AdjoinRoot.of f.factor) f) (monic_X_sub_C _), natDegree_map,
natDegree_X_sub_C]
[GOAL]
F : Type u
K : Type v
L : Type w
inst✝² : Field K
inst✝¹ : Field L
inst✝ : Field F
f : K[X]
n : ℕ
hfn : natDegree f = n + 1
⊢ natDegree (removeFactor f) = n
[PROOFSTEP]
rw [natDegree_removeFactor, hfn, n.add_sub_cancel]
[GOAL]
F : Type u
K✝¹ : Type v
L : Type w
inst✝³ : Field K✝¹
inst✝² : Field L
inst✝¹ : Field F
n✝ : ℕ
K✝ : Type u
inst✝ : Field K✝
n : ℕ
ih :
(fun n =>
∀ {K : Type u} [inst : Field K] (f : K[X]), natDegree f = n → Splits (algebraMap K (SplittingFieldAux n f)) f)
n
K : Type u
x✝ : Field K
f : K[X]
hf : natDegree f = Nat.succ n
⊢ Splits (algebraMap K (SplittingFieldAux (Nat.succ n) f)) f
[PROOFSTEP]
rw [← splits_id_iff_splits, algebraMap_succ, ← map_map, splits_id_iff_splits, ←
X_sub_C_mul_removeFactor f fun h => by rw [h] at hf ; cases hf]
[GOAL]
F : Type u
K✝¹ : Type v
L : Type w
inst✝³ : Field K✝¹
inst✝² : Field L
inst✝¹ : Field F
n✝ : ℕ
K✝ : Type u
inst✝ : Field K✝
n : ℕ
ih :
(fun n =>
∀ {K : Type u} [inst : Field K] (f : K[X]), natDegree f = n → Splits (algebraMap K (SplittingFieldAux n f)) f)
n
K : Type u
x✝ : Field K
f : K[X]
hf : natDegree f = Nat.succ n
h : natDegree f = 0
⊢ False
[PROOFSTEP]
rw [h] at hf
[GOAL]
F : Type u
K✝¹ : Type v
L : Type w
inst✝³ : Field K✝¹
inst✝² : Field L
inst✝¹ : Field F
n✝ : ℕ
K✝ : Type u
inst✝ : Field K✝
n : ℕ
ih :
(fun n =>
∀ {K : Type u} [inst : Field K] (f : K[X]), natDegree f = n → Splits (algebraMap K (SplittingFieldAux n f)) f)
n
K : Type u
x✝ : Field K
f : K[X]
hf : 0 = Nat.succ n
h : natDegree f = 0
⊢ False
[PROOFSTEP]
cases hf
[GOAL]
F : Type u
K✝¹ : Type v
L : Type w
inst✝³ : Field K✝¹
inst✝² : Field L
inst✝¹ : Field F
n✝ : ℕ
K✝ : Type u
inst✝ : Field K✝
n : ℕ
ih :
(fun n =>
∀ {K : Type u} [inst : Field K] (f : K[X]), natDegree f = n → Splits (algebraMap K (SplittingFieldAux n f)) f)
n
K : Type u
x✝ : Field K
f : K[X]
hf : natDegree f = Nat.succ n
⊢ Splits (algebraMap (AdjoinRoot (factor f)) (SplittingFieldAux n (removeFactor f)))
((X - ↑C (AdjoinRoot.root (factor f))) * removeFactor f)
[PROOFSTEP]
exact splits_mul _ (splits_X_sub_C _) (ih _ (natDegree_removeFactor' hf))
[GOAL]
F : Type u
K✝¹ : Type v
L : Type w
inst✝³ : Field K✝¹
inst✝² : Field L
inst✝¹ : Field F
n✝ : ℕ
K✝ : Type u
inst✝ : Field K✝
n : ℕ
ih :
(fun n =>
∀ {K : Type u} [inst : Field K] (f : K[X]),
natDegree f = n →
Algebra.adjoin K ↑(Multiset.toFinset (roots (map (algebraMap K (SplittingFieldAux n f)) f))) = ⊤)
n
K : Type u
x✝ : Field K
f : K[X]
hfn : natDegree f = Nat.succ n
⊢ Algebra.adjoin K ↑(Multiset.toFinset (roots (map (algebraMap K (SplittingFieldAux (Nat.succ n) f)) f))) = ⊤
[PROOFSTEP]
have hndf : f.natDegree ≠ 0 := by intro h; rw [h] at hfn ; cases hfn
[GOAL]
F : Type u
K✝¹ : Type v
L : Type w
inst✝³ : Field K✝¹
inst✝² : Field L
inst✝¹ : Field F
n✝ : ℕ
K✝ : Type u
inst✝ : Field K✝
n : ℕ
ih :
(fun n =>
∀ {K : Type u} [inst : Field K] (f : K[X]),
natDegree f = n →
Algebra.adjoin K ↑(Multiset.toFinset (roots (map (algebraMap K (SplittingFieldAux n f)) f))) = ⊤)
n
K : Type u
x✝ : Field K
f : K[X]
hfn : natDegree f = Nat.succ n
⊢ natDegree f ≠ 0
[PROOFSTEP]
intro h
[GOAL]
F : Type u
K✝¹ : Type v
L : Type w
inst✝³ : Field K✝¹
inst✝² : Field L
inst✝¹ : Field F
n✝ : ℕ
K✝ : Type u
inst✝ : Field K✝
n : ℕ
ih :
(fun n =>
∀ {K : Type u} [inst : Field K] (f : K[X]),
natDegree f = n →
Algebra.adjoin K ↑(Multiset.toFinset (roots (map (algebraMap K (SplittingFieldAux n f)) f))) = ⊤)
n
K : Type u
x✝ : Field K
f : K[X]
hfn : natDegree f = Nat.succ n
h : natDegree f = 0
⊢ False
[PROOFSTEP]
rw [h] at hfn
[GOAL]
F : Type u
K✝¹ : Type v
L : Type w
inst✝³ : Field K✝¹
inst✝² : Field L
inst✝¹ : Field F
n✝ : ℕ
K✝ : Type u
inst✝ : Field K✝
n : ℕ
ih :
(fun n =>
∀ {K : Type u} [inst : Field K] (f : K[X]),
natDegree f = n →
Algebra.adjoin K ↑(Multiset.toFinset (roots (map (algebraMap K (SplittingFieldAux n f)) f))) = ⊤)
n
K : Type u
x✝ : Field K
f : K[X]
hfn : 0 = Nat.succ n
h : natDegree f = 0
⊢ False
[PROOFSTEP]
cases hfn
[GOAL]
F : Type u
K✝¹ : Type v
L : Type w
inst✝³ : Field K✝¹
inst✝² : Field L
inst✝¹ : Field F
n✝ : ℕ
K✝ : Type u
inst✝ : Field K✝
n : ℕ
ih :
(fun n =>
∀ {K : Type u} [inst : Field K] (f : K[X]),
natDegree f = n →
Algebra.adjoin K ↑(Multiset.toFinset (roots (map (algebraMap K (SplittingFieldAux n f)) f))) = ⊤)
n
K : Type u
x✝ : Field K
f : K[X]
hfn : natDegree f = Nat.succ n
hndf : natDegree f ≠ 0
⊢ Algebra.adjoin K ↑(Multiset.toFinset (roots (map (algebraMap K (SplittingFieldAux (Nat.succ n) f)) f))) = ⊤
[PROOFSTEP]
have hfn0 : f ≠ 0 := by intro h; rw [h] at hndf ; exact hndf rfl
[GOAL]
F : Type u
K✝¹ : Type v
L : Type w
inst✝³ : Field K✝¹
inst✝² : Field L
inst✝¹ : Field F
n✝ : ℕ
K✝ : Type u
inst✝ : Field K✝
n : ℕ
ih :
(fun n =>
∀ {K : Type u} [inst : Field K] (f : K[X]),
natDegree f = n →
Algebra.adjoin K ↑(Multiset.toFinset (roots (map (algebraMap K (SplittingFieldAux n f)) f))) = ⊤)
n
K : Type u
x✝ : Field K
f : K[X]
hfn : natDegree f = Nat.succ n
hndf : natDegree f ≠ 0
⊢ f ≠ 0
[PROOFSTEP]
intro h
[GOAL]
F : Type u
K✝¹ : Type v
L : Type w
inst✝³ : Field K✝¹
inst✝² : Field L
inst✝¹ : Field F
n✝ : ℕ
K✝ : Type u
inst✝ : Field K✝
n : ℕ
ih :
(fun n =>
∀ {K : Type u} [inst : Field K] (f : K[X]),
natDegree f = n →
Algebra.adjoin K ↑(Multiset.toFinset (roots (map (algebraMap K (SplittingFieldAux n f)) f))) = ⊤)
n
K : Type u
x✝ : Field K
f : K[X]
hfn : natDegree f = Nat.succ n
hndf : natDegree f ≠ 0
h : f = 0
⊢ False
[PROOFSTEP]
rw [h] at hndf
[GOAL]
F : Type u
K✝¹ : Type v
L : Type w
inst✝³ : Field K✝¹
inst✝² : Field L
inst✝¹ : Field F
n✝ : ℕ
K✝ : Type u
inst✝ : Field K✝
n : ℕ
ih :
(fun n =>
∀ {K : Type u} [inst : Field K] (f : K[X]),
natDegree f = n →
Algebra.adjoin K ↑(Multiset.toFinset (roots (map (algebraMap K (SplittingFieldAux n f)) f))) = ⊤)
n
K : Type u
x✝ : Field K
f : K[X]
hfn : natDegree f = Nat.succ n
hndf : natDegree 0 ≠ 0
h : f = 0
⊢ False
[PROOFSTEP]
exact hndf rfl
[GOAL]
F : Type u
K✝¹ : Type v
L : Type w
inst✝³ : Field K✝¹
inst✝² : Field L
inst✝¹ : Field F
n✝ : ℕ
K✝ : Type u
inst✝ : Field K✝
n : ℕ
ih :
(fun n =>
∀ {K : Type u} [inst : Field K] (f : K[X]),
natDegree f = n →
Algebra.adjoin K ↑(Multiset.toFinset (roots (map (algebraMap K (SplittingFieldAux n f)) f))) = ⊤)
n
K : Type u
x✝ : Field K
f : K[X]
hfn : natDegree f = Nat.succ n
hndf : natDegree f ≠ 0
hfn0 : f ≠ 0
⊢ Algebra.adjoin K ↑(Multiset.toFinset (roots (map (algebraMap K (SplittingFieldAux (Nat.succ n) f)) f))) = ⊤
[PROOFSTEP]
have hmf0 : map (algebraMap K (SplittingFieldAux n.succ f)) f ≠ 0 := map_ne_zero hfn0
[GOAL]
F : Type u
K✝¹ : Type v
L : Type w
inst✝³ : Field K✝¹
inst✝² : Field L
inst✝¹ : Field F
n✝ : ℕ
K✝ : Type u
inst✝ : Field K✝
n : ℕ
ih :
(fun n =>
∀ {K : Type u} [inst : Field K] (f : K[X]),
natDegree f = n →
Algebra.adjoin K ↑(Multiset.toFinset (roots (map (algebraMap K (SplittingFieldAux n f)) f))) = ⊤)
n
K : Type u
x✝ : Field K
f : K[X]
hfn : natDegree f = Nat.succ n
hndf : natDegree f ≠ 0
hfn0 : f ≠ 0
hmf0 : map (algebraMap K (SplittingFieldAux (Nat.succ n) f)) f ≠ 0
⊢ Algebra.adjoin K ↑(Multiset.toFinset (roots (map (algebraMap K (SplittingFieldAux (Nat.succ n) f)) f))) = ⊤
[PROOFSTEP]
rw [algebraMap_succ, ← map_map, ← X_sub_C_mul_removeFactor _ hndf, Polynomial.map_mul] at hmf0 ⊢
[GOAL]
F : Type u
K✝¹ : Type v
L : Type w
inst✝³ : Field K✝¹
inst✝² : Field L
inst✝¹ : Field F
n✝ : ℕ
K✝ : Type u
inst✝ : Field K✝
n : ℕ
ih :
(fun n =>
∀ {K : Type u} [inst : Field K] (f : K[X]),
natDegree f = n →
Algebra.adjoin K ↑(Multiset.toFinset (roots (map (algebraMap K (SplittingFieldAux n f)) f))) = ⊤)
n
K : Type u
x✝ : Field K
f : K[X]
hfn : natDegree f = Nat.succ n
hndf : natDegree f ≠ 0
hfn0 : f ≠ 0
hmf0 :
map (algebraMap (AdjoinRoot (factor f)) (SplittingFieldAux n (removeFactor f)))
(X - ↑C (AdjoinRoot.root (factor f))) *
map (algebraMap (AdjoinRoot (factor f)) (SplittingFieldAux n (removeFactor f))) (removeFactor f) ≠
0
⊢ Algebra.adjoin K
↑(Multiset.toFinset
(roots
(map (algebraMap (AdjoinRoot (factor f)) (SplittingFieldAux n (removeFactor f)))
(X - ↑C (AdjoinRoot.root (factor f))) *
map (algebraMap (AdjoinRoot (factor f)) (SplittingFieldAux n (removeFactor f))) (removeFactor f)))) =
⊤
[PROOFSTEP]
rw [roots_mul hmf0, Polynomial.map_sub, map_X, map_C, roots_X_sub_C, Multiset.toFinset_add, Finset.coe_union,
Multiset.toFinset_singleton, Finset.coe_singleton, Algebra.adjoin_union_eq_adjoin_adjoin, ← Set.image_singleton,
Algebra.adjoin_algebraMap K (AdjoinRoot f.factor) (SplittingFieldAux n f.removeFactor), AdjoinRoot.adjoinRoot_eq_top,
Algebra.map_top]
/- Porting note: was `rw [IsScalarTower.adjoin_range_toAlgHom K (AdjoinRoot f.factor)
(SplittingFieldAux n f.removeFactor)]` -/
[GOAL]
F : Type u
K✝¹ : Type v
L : Type w
inst✝³ : Field K✝¹
inst✝² : Field L
inst✝¹ : Field F
n✝ : ℕ
K✝ : Type u
inst✝ : Field K✝
n : ℕ
ih :
(fun n =>
∀ {K : Type u} [inst : Field K] (f : K[X]),
natDegree f = n →
Algebra.adjoin K ↑(Multiset.toFinset (roots (map (algebraMap K (SplittingFieldAux n f)) f))) = ⊤)
n
K : Type u
x✝ : Field K
f : K[X]
hfn : natDegree f = Nat.succ n
hndf : natDegree f ≠ 0
hfn0 : f ≠ 0
hmf0 :
map (algebraMap (AdjoinRoot (factor f)) (SplittingFieldAux n (removeFactor f)))
(X - ↑C (AdjoinRoot.root (factor f))) *
map (algebraMap (AdjoinRoot (factor f)) (SplittingFieldAux n (removeFactor f))) (removeFactor f) ≠
0
⊢ Subalgebra.restrictScalars K
(Algebra.adjoin
{ x //
x ∈ AlgHom.range (IsScalarTower.toAlgHom K (AdjoinRoot (factor f)) (SplittingFieldAux n (removeFactor f))) }
↑(Multiset.toFinset
(roots
(map (algebraMap (AdjoinRoot (factor f)) (SplittingFieldAux n (removeFactor f))) (removeFactor f))))) =
⊤
[PROOFSTEP]
have :=
IsScalarTower.adjoin_range_toAlgHom K (AdjoinRoot f.factor) (SplittingFieldAux n f.removeFactor)
(↑(f.removeFactor.map <| algebraMap (AdjoinRoot f.factor) <| SplittingFieldAux n f.removeFactor).roots.toFinset :
Set (SplittingFieldAux n f.removeFactor))
[GOAL]
F : Type u
K✝¹ : Type v
L : Type w
inst✝³ : Field K✝¹
inst✝² : Field L
inst✝¹ : Field F
n✝ : ℕ
K✝ : Type u
inst✝ : Field K✝
n : ℕ
ih :
(fun n =>
∀ {K : Type u} [inst : Field K] (f : K[X]),
natDegree f = n →
Algebra.adjoin K ↑(Multiset.toFinset (roots (map (algebraMap K (SplittingFieldAux n f)) f))) = ⊤)
n
K : Type u
x✝ : Field K
f : K[X]
hfn : natDegree f = Nat.succ n
hndf : natDegree f ≠ 0
hfn0 : f ≠ 0
hmf0 :
map (algebraMap (AdjoinRoot (factor f)) (SplittingFieldAux n (removeFactor f)))
(X - ↑C (AdjoinRoot.root (factor f))) *
map (algebraMap (AdjoinRoot (factor f)) (SplittingFieldAux n (removeFactor f))) (removeFactor f) ≠
0
this :
Subalgebra.restrictScalars K
(Algebra.adjoin
{ x //
x ∈ AlgHom.range (IsScalarTower.toAlgHom K (AdjoinRoot (factor f)) (SplittingFieldAux n (removeFactor f))) }
↑(Multiset.toFinset
(roots
(map (algebraMap (AdjoinRoot (factor f)) (SplittingFieldAux n (removeFactor f))) (removeFactor f))))) =
Subalgebra.restrictScalars K
(Algebra.adjoin (AdjoinRoot (factor f))
↑(Multiset.toFinset
(roots (map (algebraMap (AdjoinRoot (factor f)) (SplittingFieldAux n (removeFactor f))) (removeFactor f)))))
⊢ Subalgebra.restrictScalars K
(Algebra.adjoin
{ x //
x ∈ AlgHom.range (IsScalarTower.toAlgHom K (AdjoinRoot (factor f)) (SplittingFieldAux n (removeFactor f))) }
↑(Multiset.toFinset
(roots
(map (algebraMap (AdjoinRoot (factor f)) (SplittingFieldAux n (removeFactor f))) (removeFactor f))))) =
⊤
[PROOFSTEP]
refine this.trans ?_
[GOAL]
F : Type u
K✝¹ : Type v
L : Type w
inst✝³ : Field K✝¹
inst✝² : Field L
inst✝¹ : Field F
n✝ : ℕ
K✝ : Type u
inst✝ : Field K✝
n : ℕ
ih :
(fun n =>
∀ {K : Type u} [inst : Field K] (f : K[X]),
natDegree f = n →
Algebra.adjoin K ↑(Multiset.toFinset (roots (map (algebraMap K (SplittingFieldAux n f)) f))) = ⊤)
n
K : Type u
x✝ : Field K
f : K[X]
hfn : natDegree f = Nat.succ n
hndf : natDegree f ≠ 0
hfn0 : f ≠ 0
hmf0 :
map (algebraMap (AdjoinRoot (factor f)) (SplittingFieldAux n (removeFactor f)))
(X - ↑C (AdjoinRoot.root (factor f))) *
map (algebraMap (AdjoinRoot (factor f)) (SplittingFieldAux n (removeFactor f))) (removeFactor f) ≠
0
this :
Subalgebra.restrictScalars K
(Algebra.adjoin
{ x //
x ∈ AlgHom.range (IsScalarTower.toAlgHom K (AdjoinRoot (factor f)) (SplittingFieldAux n (removeFactor f))) }
↑(Multiset.toFinset
(roots
(map (algebraMap (AdjoinRoot (factor f)) (SplittingFieldAux n (removeFactor f))) (removeFactor f))))) =
Subalgebra.restrictScalars K
(Algebra.adjoin (AdjoinRoot (factor f))
↑(Multiset.toFinset
(roots (map (algebraMap (AdjoinRoot (factor f)) (SplittingFieldAux n (removeFactor f))) (removeFactor f)))))
⊢ Subalgebra.restrictScalars K
(Algebra.adjoin (AdjoinRoot (factor f))
↑(Multiset.toFinset
(roots
(map (algebraMap (AdjoinRoot (factor f)) (SplittingFieldAux n (removeFactor f))) (removeFactor f))))) =
⊤
[PROOFSTEP]
rw [ih _ (natDegree_removeFactor' hfn), Subalgebra.restrictScalars_top]
[GOAL]
F : Type u
K : Type v
L : Type w
inst✝² : Field K
inst✝¹ : Field L
inst✝ : Field F
f : K[X]
⊢ Function.Surjective ↑(ofMvPolynomial f)
[PROOFSTEP]
suffices AlgHom.range (ofMvPolynomial f) = ⊤ by rw [← Set.range_iff_surjective]; rwa [SetLike.ext'_iff] at this
[GOAL]
F : Type u
K : Type v
L : Type w
inst✝² : Field K
inst✝¹ : Field L
inst✝ : Field F
f : K[X]
this : AlgHom.range (ofMvPolynomial f) = ⊤
⊢ Function.Surjective ↑(ofMvPolynomial f)
[PROOFSTEP]
rw [← Set.range_iff_surjective]
[GOAL]
F : Type u
K : Type v
L : Type w
inst✝² : Field K
inst✝¹ : Field L
inst✝ : Field F
f : K[X]
this : AlgHom.range (ofMvPolynomial f) = ⊤
⊢ Set.range ↑(ofMvPolynomial f) = Set.univ
[PROOFSTEP]
rwa [SetLike.ext'_iff] at this
[GOAL]
F : Type u
K : Type v
L : Type w
inst✝² : Field K
inst✝¹ : Field L
inst✝ : Field F
f : K[X]
⊢ AlgHom.range (ofMvPolynomial f) = ⊤
[PROOFSTEP]
rw [ofMvPolynomial, ← Algebra.adjoin_range_eq_range_aeval K, eq_top_iff, ← adjoin_rootSet _ _ rfl]
[GOAL]
F : Type u
K : Type v
L : Type w
inst✝² : Field K
inst✝¹ : Field L
inst✝ : Field F
f : K[X]
⊢ Algebra.adjoin K (rootSet f (SplittingFieldAux (natDegree f) f)) ≤ Algebra.adjoin K (Set.range fun i => ↑i)
[PROOFSTEP]
apply Algebra.adjoin_le
[GOAL]
case H
F : Type u
K : Type v
L : Type w
inst✝² : Field K
inst✝¹ : Field L
inst✝ : Field F
f : K[X]
⊢ rootSet f (SplittingFieldAux (natDegree f) f) ⊆ ↑(Algebra.adjoin K (Set.range fun i => ↑i))
[PROOFSTEP]
intro α hα
[GOAL]
case H
F : Type u
K : Type v
L : Type w
inst✝² : Field K
inst✝¹ : Field L
inst✝ : Field F
f : K[X]
α : SplittingFieldAux (natDegree f) f
hα : α ∈ rootSet f (SplittingFieldAux (natDegree f) f)
⊢ α ∈ ↑(Algebra.adjoin K (Set.range fun i => ↑i))
[PROOFSTEP]
apply Algebra.subset_adjoin
[GOAL]
case H.a
F : Type u
K : Type v
L : Type w
inst✝² : Field K
inst✝¹ : Field L
inst✝ : Field F
f : K[X]
α : SplittingFieldAux (natDegree f) f
hα : α ∈ rootSet f (SplittingFieldAux (natDegree f) f)
⊢ α ∈ Set.range fun i => ↑i
[PROOFSTEP]
exact ⟨⟨α, hα⟩, rfl⟩
[GOAL]
F : Type u
K : Type v
L : Type w
inst✝² : Field K
inst✝¹ : Field L
inst✝ : Field F
f : K[X]
e : SplittingField f ≃ₐ[K] SplittingFieldAux (natDegree f) f := algEquivSplittingFieldAux f
a : SplittingField f
w : a ≠ 0
⊢ a * a⁻¹ = 1
[PROOFSTEP]
apply_fun e
[GOAL]
F : Type u
K : Type v
L : Type w
inst✝² : Field K
inst✝¹ : Field L
inst✝ : Field F
f : K[X]
e : SplittingField f ≃ₐ[K] SplittingFieldAux (natDegree f) f := algEquivSplittingFieldAux f
a : SplittingField f
w : a ≠ 0
⊢ ↑e (a * a⁻¹) = ↑e 1
[PROOFSTEP]
have : e a ≠ 0 := fun w' => by
apply w
simp at w'
exact w'
[GOAL]
F : Type u
K : Type v
L : Type w
inst✝² : Field K
inst✝¹ : Field L
inst✝ : Field F
f : K[X]
e : SplittingField f ≃ₐ[K] SplittingFieldAux (natDegree f) f := algEquivSplittingFieldAux f
a : SplittingField f
w : a ≠ 0
w' : ↑e a = 0
⊢ False
[PROOFSTEP]
apply w
[GOAL]
F : Type u
K : Type v
L : Type w
inst✝² : Field K
inst✝¹ : Field L
inst✝ : Field F
f : K[X]
e : SplittingField f ≃ₐ[K] SplittingFieldAux (natDegree f) f := algEquivSplittingFieldAux f
a : SplittingField f
w : a ≠ 0
w' : ↑e a = 0
⊢ a = 0
[PROOFSTEP]
simp at w'
[GOAL]
F : Type u
K : Type v
L : Type w
inst✝² : Field K
inst✝¹ : Field L
inst✝ : Field F
f : K[X]
e : SplittingField f ≃ₐ[K] SplittingFieldAux (natDegree f) f := algEquivSplittingFieldAux f
a : SplittingField f
w : a ≠ 0
w' : a = 0
⊢ a = 0
[PROOFSTEP]
exact w'
[GOAL]
F : Type u
K : Type v
L : Type w
inst✝² : Field K
inst✝¹ : Field L
inst✝ : Field F
f : K[X]
e : SplittingField f ≃ₐ[K] SplittingFieldAux (natDegree f) f := algEquivSplittingFieldAux f
a : SplittingField f
w : a ≠ 0
this : ↑e a ≠ 0
⊢ ↑e (a * a⁻¹) = ↑e 1
[PROOFSTEP]
simp only [map_mul, AlgEquiv.apply_symm_apply, ne_eq, AddEquivClass.map_eq_zero_iff, map_one]
[GOAL]
F : Type u
K : Type v
L : Type w
inst✝² : Field K
inst✝¹ : Field L
inst✝ : Field F
f : K[X]
e : SplittingField f ≃ₐ[K] SplittingFieldAux (natDegree f) f := algEquivSplittingFieldAux f
a : SplittingField f
w : a ≠ 0
this : ↑e a ≠ 0
⊢ ↑(algEquivSplittingFieldAux f) a * (↑(algEquivSplittingFieldAux f) a)⁻¹ = 1
[PROOFSTEP]
rw [mul_inv_cancel]
[GOAL]
F : Type u
K : Type v
L : Type w
inst✝² : Field K
inst✝¹ : Field L
inst✝ : Field F
f : K[X]
e : SplittingField f ≃ₐ[K] SplittingFieldAux (natDegree f) f := algEquivSplittingFieldAux f
a : SplittingField f
w : a ≠ 0
this : ↑e a ≠ 0
⊢ ↑(algEquivSplittingFieldAux f) a ≠ 0
[PROOFSTEP]
assumption
[GOAL]
F : Type u
K : Type v
L : Type w
inst✝² : Field K
inst✝¹ : Field L
inst✝ : Field F
f : K[X]
e : SplittingField f ≃ₐ[K] SplittingFieldAux (natDegree f) f := algEquivSplittingFieldAux f
⊢ 0⁻¹ = 0
[PROOFSTEP]
simp
[GOAL]
F : Type u
K : Type v
L : Type w
inst✝² : Field K
inst✝¹ : Field L
inst✝ : Field F
f : K[X]
e : SplittingField f ≃ₐ[K] SplittingFieldAux (natDegree f) f := algEquivSplittingFieldAux f
a : ℤ
b : ℕ
h1 : b ≠ 0
h2 : Nat.coprime (Int.natAbs a) b
⊢ ↑(Rat.mk' a b) = ↑a * (↑b)⁻¹
[PROOFSTEP]
apply_fun e
[GOAL]
F : Type u
K : Type v
L : Type w
inst✝² : Field K
inst✝¹ : Field L
inst✝ : Field F
f : K[X]
e : SplittingField f ≃ₐ[K] SplittingFieldAux (natDegree f) f := algEquivSplittingFieldAux f
a : ℤ
b : ℕ
h1 : b ≠ 0
h2 : Nat.coprime (Int.natAbs a) b
⊢ ↑e ↑(Rat.mk' a b) = ↑e (↑a * (↑b)⁻¹)
[PROOFSTEP]
change e (algebraMap K _ _) = _
[GOAL]
F : Type u
K : Type v
L : Type w
inst✝² : Field K
inst✝¹ : Field L
inst✝ : Field F
f : K[X]
e : SplittingField f ≃ₐ[K] SplittingFieldAux (natDegree f) f := algEquivSplittingFieldAux f
a : ℤ
b : ℕ
h1 : b ≠ 0
h2 : Nat.coprime (Int.natAbs a) b
⊢ ↑e (↑(algebraMap K (SplittingField f)) ↑(Rat.mk' a b)) = ↑e (↑a * (↑b)⁻¹)
[PROOFSTEP]
simp only [map_ratCast, map_natCast, map_mul, map_intCast, AlgEquiv.commutes, AlgEquiv.apply_symm_apply]
[GOAL]
F : Type u
K : Type v
L : Type w
inst✝² : Field K
inst✝¹ : Field L
inst✝ : Field F
f : K[X]
e : SplittingField f ≃ₐ[K] SplittingFieldAux (natDegree f) f := algEquivSplittingFieldAux f
a : ℤ
b : ℕ
h1 : b ≠ 0
h2 : Nat.coprime (Int.natAbs a) b
⊢ ↑(Rat.mk' a b) = ↑a * (↑b)⁻¹
[PROOFSTEP]
apply Field.ratCast_mk
[GOAL]
F : Type u
K : Type v
L : Type w
inst✝² : Field K
inst✝¹ : Field L
inst✝ : Field F
f : K[X]
e : SplittingField f ≃ₐ[K] SplittingFieldAux (natDegree f) f := algEquivSplittingFieldAux f
a : ℚ
x : SplittingField f
p : MvPolynomial (↑(rootSet f (SplittingFieldAux (natDegree f) f))) K
⊢ (fun x x_1 => x • x_1) a p =
(fun x x_1 => x * x_1) (↑(algebraMap K (MvPolynomial (↑(rootSet f (SplittingFieldAux (natDegree f) f))) K)) ↑a) p
[PROOFSTEP]
ext
[GOAL]
case a
F : Type u
K : Type v
L : Type w
inst✝² : Field K
inst✝¹ : Field L
inst✝ : Field F
f : K[X]
e : SplittingField f ≃ₐ[K] SplittingFieldAux (natDegree f) f := algEquivSplittingFieldAux f
a : ℚ
x : SplittingField f
p : MvPolynomial (↑(rootSet f (SplittingFieldAux (natDegree f) f))) K
m✝ : ↑(rootSet f (SplittingFieldAux (natDegree f) f)) →₀ ℕ
⊢ MvPolynomial.coeff m✝ ((fun x x_1 => x • x_1) a p) =
MvPolynomial.coeff m✝
((fun x x_1 => x * x_1) (↑(algebraMap K (MvPolynomial (↑(rootSet f (SplittingFieldAux (natDegree f) f))) K)) ↑a)
p)
[PROOFSTEP]
simp [MvPolynomial.algebraMap_eq, Rat.smul_def]
[GOAL]
F : Type u
K : Type v
L : Type w
inst✝⁴ : Field K
inst✝³ : Field L
inst✝² : Field F
inst✝¹ : Algebra K L
f : K[X]
inst✝ : IsSplittingField K L f
⊢ L ≃ₐ[K] SplittingField f
[PROOFSTEP]
refine'
AlgEquiv.ofBijective (lift L f <| splits (SplittingField f) f)
⟨RingHom.injective (lift L f <| splits (SplittingField f) f).toRingHom, _⟩
[GOAL]
F : Type u
K : Type v
L : Type w
inst✝⁴ : Field K
inst✝³ : Field L
inst✝² : Field F
inst✝¹ : Algebra K L
f : K[X]
inst✝ : IsSplittingField K L f
⊢ Function.Surjective ↑(lift L f (_ : Splits (algebraMap K (SplittingField f)) f))
[PROOFSTEP]
haveI := finiteDimensional (SplittingField f) f
[GOAL]
F : Type u
K : Type v
L : Type w
inst✝⁴ : Field K
inst✝³ : Field L
inst✝² : Field F
inst✝¹ : Algebra K L
f : K[X]
inst✝ : IsSplittingField K L f
this : FiniteDimensional K (SplittingField f)
⊢ Function.Surjective ↑(lift L f (_ : Splits (algebraMap K (SplittingField f)) f))
[PROOFSTEP]
haveI := finiteDimensional L f
[GOAL]
F : Type u
K : Type v
L : Type w
inst✝⁴ : Field K
inst✝³ : Field L
inst✝² : Field F
inst✝¹ : Algebra K L
f : K[X]
inst✝ : IsSplittingField K L f
this✝ : FiniteDimensional K (SplittingField f)
this : FiniteDimensional K L
⊢ Function.Surjective ↑(lift L f (_ : Splits (algebraMap K (SplittingField f)) f))
[PROOFSTEP]
have : FiniteDimensional.finrank K L = FiniteDimensional.finrank K (SplittingField f) :=
le_antisymm
(LinearMap.finrank_le_finrank_of_injective
(show Function.Injective (lift L f <| splits (SplittingField f) f).toLinearMap from
RingHom.injective (lift L f <| splits (SplittingField f) f : L →+* f.SplittingField)))
(LinearMap.finrank_le_finrank_of_injective
(show Function.Injective (lift (SplittingField f) f <| splits L f).toLinearMap from
RingHom.injective (lift (SplittingField f) f <| splits L f : f.SplittingField →+* L)))
[GOAL]
F : Type u
K : Type v
L : Type w
inst✝⁴ : Field K
inst✝³ : Field L
inst✝² : Field F
inst✝¹ : Algebra K L
f : K[X]
inst✝ : IsSplittingField K L f
this✝¹ : FiniteDimensional K (SplittingField f)
this✝ : FiniteDimensional K L
this : FiniteDimensional.finrank K L = FiniteDimensional.finrank K (SplittingField f)
⊢ Function.Surjective ↑(lift L f (_ : Splits (algebraMap K (SplittingField f)) f))
[PROOFSTEP]
change Function.Surjective (lift L f <| splits (SplittingField f) f).toLinearMap
[GOAL]
F : Type u
K : Type v
L : Type w
inst✝⁴ : Field K
inst✝³ : Field L
inst✝² : Field F
inst✝¹ : Algebra K L
f : K[X]
inst✝ : IsSplittingField K L f
this✝¹ : FiniteDimensional K (SplittingField f)
this✝ : FiniteDimensional K L
this : FiniteDimensional.finrank K L = FiniteDimensional.finrank K (SplittingField f)
⊢ Function.Surjective ↑(AlgHom.toLinearMap (lift L f (_ : Splits (algebraMap K (SplittingField f)) f)))
[PROOFSTEP]
refine' (LinearMap.injective_iff_surjective_of_finrank_eq_finrank this).1 _
[GOAL]
F : Type u
K : Type v
L : Type w
inst✝⁴ : Field K
inst✝³ : Field L
inst✝² : Field F
inst✝¹ : Algebra K L
f : K[X]
inst✝ : IsSplittingField K L f
this✝¹ : FiniteDimensional K (SplittingField f)
this✝ : FiniteDimensional K L
this : FiniteDimensional.finrank K L = FiniteDimensional.finrank K (SplittingField f)
⊢ Function.Injective ↑(AlgHom.toLinearMap (lift L f (_ : Splits (algebraMap K (SplittingField f)) f)))
[PROOFSTEP]
exact RingHom.injective (lift L f <| splits (SplittingField f) f : L →+* f.SplittingField)
|
------------------------------------------------------------------------
-- The Agda standard library
--
-- Finite sets defined in terms of Data.Star
------------------------------------------------------------------------
module Data.Star.Fin where
open import Data.Star
open import Data.Star.Nat as ℕ using (ℕ)
open import Data.Star.Pointer
open import Data.Unit
-- Finite sets are undecorated pointers into natural numbers.
Fin : ℕ → Set
Fin = Any (λ _ → ⊤) (λ _ → ⊤)
-- "Constructors".
zero : ∀ {n} → Fin (ℕ.suc n)
zero = this tt
suc : ∀ {n} → Fin n → Fin (ℕ.suc n)
suc = that tt
|
module Statistics.Dispersion where
import Statistics.Center
variance :: (Fractional a) => [a] -> a
variance vals = (sum $ zipWith (*) deviations deviations)/n
where n = (fromIntegral $ length vals)
mu = arithmeticMean vals
deviations = map (\x -> x-mu) vals
stdev :: (Floating a) => [a] -> a
stdev vals = sqrt $ variance vals |
Formal statement is: lemma of_real_Re [simp]: "z \<in> \<real> \<Longrightarrow> of_real (Re z) = z" Informal statement is: If $z$ is a real number, then $\operatorname{Re}(z) = z$. |
Two of her four funnels were removed , and the forward two were cut at an angle to resemble those of a German destroyer . The bow was packed with 4 @.@ 5 tons of high explosives , which were set in concrete . It was decided that the explosive charge would be timed to detonate after the raiders had left the harbour . To prevent the Germans towing her away , the crew would open the ship 's seacocks before abandoning the ship . Should she become disabled or sunk before getting to the dock , four motor launches had been detailed to take off the crew and put the commandos ashore . The charge would be reset to explode after the last boat had left .
|
import EggTactic
namespace Egg
-- From: lean4/tests/lean/run/alg.lean
-- class Group (α : Type u) extends Mul α where
-- one : α
-- one_mul (a : α) : one * a = a
-- mul_one (a : α) : a * one = a
-- inv : α → α
-- mul_assoc (a b c : α) : a * b * c = a * (b * c)
-- mul_left_inv (a : α) : (inv a) * a = one
--
-- instance [Group α] : OfNat α (nat_lit 1) where
-- ofNat := Group.one
-- Does not work with typeclasses (problem with metavariables)
-- We'll just take the integers for now and not use any additional properties
-- Copied form stdlib
inductive G : Type where
| ofNat : Nat → G
| negSucc : Nat → G
def negOfNat : Nat → G
| 0 => .ofNat 0
| .succ m => .negSucc m
def neg (n : G) : G :=
match n with
| .ofNat n => negOfNat n
| .negSucc n => .ofNat $ Nat.succ n
def subNatNat (m n : Nat) : G :=
match (n - m : Nat) with
| 0 => G.ofNat (m - n) -- m ≥ n
| (.succ k) => .negSucc k
def add (m n : G) : G :=
match m, n with
| .ofNat m, .ofNat n => .ofNat (m + n)
| .ofNat m, .negSucc n => subNatNat m (Nat.succ n)
| .negSucc m, .ofNat n => subNatNat n (Nat.succ m)
| .negSucc m, .negSucc n => .negSucc (Nat.succ (m + n))
postfix:max "⁻¹" => neg
infix:80 "∘" => add
notation "e" => G.ofNat 0
theorem one_mul (a : G) : e ∘ a = a := by sorry
theorem mul_assoc (a b c : G) : (a ∘ b) ∘ c = a ∘ (b ∘ c) := by sorry
theorem mul_one (a : G) : a ∘ e = a := by sorry
theorem mul_left_inv (a : G) : a⁻¹ ∘ a = e by sorry
theorem mul_right_inv (a : G) : a ∘ a⁻¹ = e by sorry
theorem inv_mul_cancel_left (a b : G) : a⁻¹ ∘ (a ∘ b) = b := by
try simp [mul_assoc,mul_left_inv,mul_assoc,one_mul,mul_one, mul_right_inv]
rawEgg [mul_assoc,mul_left_inv,mul_assoc,one_mul,mul_one, mul_right_inv]
theorem mul_inv_cancel_left : a ∘ (a⁻¹ ∘ b) = b := by
try simp [mul_assoc,mul_left_inv,mul_assoc,one_mul,mul_one, mul_right_inv]
rawEgg [mul_assoc,mul_left_inv,mul_assoc,one_mul,mul_one, mul_right_inv]
theorem inv_mul : (a ∘ b)⁻¹ = b⁻¹ ∘ a⁻¹ := by
try simp [mul_assoc,mul_left_inv,mul_assoc,one_mul,mul_one, mul_right_inv]
rawEgg [mul_assoc,mul_left_inv,mul_assoc,one_mul,mul_one, mul_right_inv]
theorem one_inv : e⁻¹ = e := by
try simp [mul_assoc,mul_left_inv,mul_assoc,one_mul,mul_one, mul_right_inv]
rawEgg [mul_assoc,mul_left_inv,mul_assoc,one_mul,mul_one, mul_right_inv]
theorem inv_inv : a ⁻¹ ⁻¹ = a := by
try simp [mul_assoc,mul_left_inv,mul_assoc,one_mul,mul_one, mul_right_inv]
rawEgg [mul_assoc,mul_left_inv,mul_assoc,one_mul,mul_one, mul_right_inv]
|
Add LoadPath "../theories" as Specware.
Require Import Specware.Spec.
Fixpoint spec_model2 spec : Type :=
match spec with
| Spec_Axioms axioms => conjoin_axioms axioms
| Spec_ConsOp f T oppred rest =>
{ t : T & {pf: oppred t & spec_model2 (rest t pf) } }
end.
Record refinement : Type :=
{rspec : Spec;
rinterps : list (spec_model2 rspec -> { P:Prop | P } ) }.
Require Import Coq.Arith.Plus.
Require Import Coq.Lists.List.
Import ListNotations.
Lemma add_axiom name tp axioms : conjoin_axioms (axioms ++ [ax_pair name tp]) ->
conjoin_axioms axioms.
induction axioms; intro H.
apply I.
destruct axioms.
destruct H; assumption.
destruct H; split.
assumption.
apply IHaxioms; assumption.
Qed.
Arguments add_axiom name%string tp axioms _.
Definition prove_axioms_id axioms : conjoin_axioms axioms -> conjoin_axioms axioms := fun x => x.
Require Import Coq.Logic.FunctionalExtensionality.
Goal {P:Prop | P}.
eapply exist.
instantiate (P:= (@Monoid ?[T] ?[m_zero] ?[m_plus])).
apply (fun (H:conjoin_axioms [ax_pair "m_zero_left" _;
ax_pair "m_zero_right" _;
ax_pair "m_plus_assoc" _]) =>
match H with conj pf1 (conj pf2 pf3) =>
Build_Monoid ?T ?m_zero ?m_plus pf1 pf2 pf3 end).
instantiate (m_zero:=?[g_zero]).
instantiate (m_plus:=?[g_plus]).
instantiate (g_plus:=plus).
set (my_plus := plus).
pattern Nat.add in (value of my_plus).
rewrite (functional_extensionality
Nat.add tail_plus
(fun n =>
functional_extensionality _ _ (fun m => plus_tail_plus _ _)))
in (value of my_plus).
replace Nat.add with tail_plus in (value of my_plus).
(*
remember Nat.add as my_plus.
rewrite (functional_extensionality
Nat.add tail_plus
(fun n =>
functional_extensionality _ _ (fun m => plus_tail_plus _ _)))
in Heqmy_plus.
rewrite Heqmy_plus.
*)
(*
Definition blah : conjoin_axioms
[ax_pair "m_zero_left"
(forall (x: (?[T]:Set)), (?[m_plus]: (?T -> ?T -> ?T) : Set) (?[m_zero]:?T) x = x)].
*)
Definition monoid_group_refinement : refinement.
eapply Build_refinement. instantiate (rspec:=?[__Spec]).
apply cons; [ | apply nil ]. intro __Model.
eapply exist.
instantiate (P:= (@Monoid ?[T] ?[m_zero] ?[m_plus])).
apply (fun (H:conjoin_axioms [ax_pair "m_zero_left" _;
ax_pair "m_zero_right" _;
ax_pair "m_plus_assoc" _]) =>
match H with conj pf1 (conj pf2 pf3) =>
Build_Monoid ?T ?m_zero ?m_plus pf1 pf2 pf3 end).
instantiate (T:= ?[U]).
(* rename T into U; instantiate (T:= ?U@{__Spec:=?__Spec; __Model:=__Model}). *)
instantiate (U:= (?[U_L]:Set) -> ?[U_R]:Set).
(*
set (U_L := ?U_L); unfold U_L; move m_zero after U_L (* ; move m_plus after U_L *).
set (U_R := ?U_R); unfold U_R; move m_zero after U_R (* ; move m_plus after U_R *).
move U after U_R.
*)
(* rename m_zero into g_zero; *) instantiate (m_zero:=?[g_zero]).
(* rename m_plus into g_plus; *) instantiate (m_plus:=?[g_plus]).
(* clear U. *)
set (g_double:= fun x => ?g_plus x x).
eapply (add_axiom "g_inv_left");
instantiate (tp:=(forall x, ?g_plus ((?[g_inv]) x) x = ?g_zero)).
(*
apply (add_axiom "g_inv_left" (forall x, ?g_plus ((?[g_inv]: (?U_L -> ?U_R) -> (?U_L -> ?U_R)) x) x = ?g_zero)).
*)
apply (add_axiom "g_inv_right" (forall x, ?g_plus x (?g_inv x) = ?g_zero)).
unfold app.
Show Proof.
instantiate
(__Spec:=
Spec_ConsOp
"U_L" Set Pred_Trivial
(fun U_L _ =>
Spec_ConsOp
"U_R" Set Pred_Trivial
(fun U_R _ =>
Spec_ConsOp
"g_zero" (U_L -> U_R) Pred_Trivial
(fun g_zero _ =>
Spec_ConsOp
"g_plus" ((U_L -> U_R) -> (U_L -> U_R) -> (U_L -> U_R)) Pred_Trivial
(fun g_plus _ =>
Spec_ConsOp
"g_inv" ((U_L -> U_R) -> (U_L -> U_R)) Pred_Trivial
(fun g_inv _ =>
Spec_Axioms
[ax_pair "m_zero_left"
(forall x, g_plus g_zero x = x);
ax_pair "m_zero_right"
(forall x, g_plus x g_zero = x);
ax_pair "m_plus_assoc"
(forall x y z,
g_plus x (g_plus y z) =
g_plus (g_plus x y) z);
ax_pair "g_inv_left"
(forall x,
g_plus (g_inv x) x = g_zero);
ax_pair "g_inv_right"
(forall x,
g_plus x (g_inv x) = g_zero)])))))).
(*
instantiate
(__Spec:=
Spec_ConsOp
"U_L" Set Pred_Trivial
(fun U_L _ =>
Spec_ConsOp
"U_R" Set Pred_Trivial
(fun U_R _ =>
Spec_ConsOp
"g_zero" (U_L -> U_R) Pred_Trivial
(fun g_zero _ =>
Spec_ConsOp
"g_plus" ((U_L -> U_R) -> (U_L -> U_R) -> (U_L -> U_R)) Pred_Trivial
(fun g_plus _ =>
Spec_ConsOp
"g_inv" ((U_L -> U_R) -> (U_L -> U_R)) Pred_Trivial
(fun g_inv _ =>
Spec_Axioms
?[__axioms])))))).
*)
Show Proof.
Show Existentials.
(*
destruct __Model as [U_L__param __Model];
destruct __Model as [U_L__proof __Model];
destruct __Model as [U_R__param __Model];
destruct __Model as [U_R__proof __Model];
destruct __Model as [g_zero__param __Model];
destruct __Model as [g_zero__proof __Model];
destruct __Model as [g_plus__param __Model];
destruct __Model as [g_plus__proof __Model];
destruct __Model as [g_inv__param __Model];
destruct __Model as [g_inv__proof __Model].
*)
(* unfold __Model_var in U_L. instantiate (U_L:=U_L__param). *)
(*
apply __Model.
*)
(*
apply (projT2 (projT2 (projT2 (projT2 (projT2 (projT2 (projT2 (projT2 (projT2 (projT2 __Model)))))))))).
*)
apply (match __Model with
| (existT
_ _
(existT
_ _
(existT
_ _
(existT
_ _
(existT
_ _
(existT
_ _
(existT
_ _
(existT
_ _
(existT
_ _
(existT
_ _
pf)))))))))) => pf end).
instantiate (U_L :=
match __Model with
| existT _ x _ => x end). clear U_L.
instantiate (U_R :=
match __Model with
| (existT _ _ (existT _ _ (existT _ x _))) => x end). clear U_R.
instantiate
(g_zero:=match __Model return (let (U_L,_) := __Model in U_L) ->
(let (_,x1) := __Model in
let (_,x2) := x1 in
let (U_R,_) := x2 in U_R) with
| (existT _ _ (existT _ _ (existT _ _ (existT _ _ (existT _ x _))))) => x end).
unfold g_zero; clear g_zero.
instantiate
(g_plus:=match __Model return let T :=
((let (U_L,_) := __Model in U_L) ->
(let (_,x1) := __Model in
let (_,x2) := x1 in
let (U_R,_) := x2 in U_R)) in T -> T -> T with
| (existT _ _ (existT _ _ (existT _ _ (existT _ _ (existT _ _ (existT _ _ (existT _ x _))))))) => x end).
instantiate
(g_inv:=match __Model return let T :=
((let (U_L,_) := __Model in U_L) ->
(let (_,x1) := __Model in
let (_,x2) := x1 in
let (U_R,_) := x2 in U_R)) in T -> T with
| (existT _ _ (existT _ _ (existT _ _ (existT _ _ (existT _ _ (existT _ _ (existT _ _ (existT _ _ (existT _ x _))))))))) => x end).
destruct __Model as [U_L__param __Model];
destruct __Model as [U_L__proof __Model];
destruct __Model as [U_R__param __Model];
destruct __Model as [U_R__proof __Model];
destruct __Model as [g_zero__param __Model];
destruct __Model as [g_zero__proof __Model];
destruct __Model as [g_plus__param __Model];
destruct __Model as [g_plus__proof __Model];
destruct __Model as [g_inv__param __Model];
destruct __Model as [g_inv__proof __Model].
apply __Model.
Defined.
Print monoid_group_refinement.
Definition monoid_refinement : forall (M:spec_model2 ?M), @Monoid ?x1 ?x2 ?x3.
|
\hypertarget{modelling-reactive-behavior-notation}{%
\section{Modelling Reactive Behavior
Notation}\label{modelling-reactive-behavior-notation}}
\hypertarget{finite-state-machine}{%
\subsection{Finite State Machine}\label{finite-state-machine}}
\emph{A model of state dependent behaviour}\\
In a finite state machine, the event and the action are in the opposite
direction of a technical process. The reason here is that the events of
the technical process is the input for the finite state machine.
\begin{figure}[H]
\centering
\includegraphics[width=0.5\textwidth]{figures/fsm.png}
\caption{Finite state machine}
\end{figure}
\hypertarget{uml-state-diagrams}{%
\subsubsection{UML State-Diagrams}\label{uml-state-diagrams}}
\hypertarget{time-events}{%
\paragraph{Time Events}\label{time-events}}
\begin{itemize}
\tightlist
\item
Time Events can be used to trigger Transitions
\item
Time Events occur a specified time after entering a certain state
\end{itemize}
Notation:\\
- Rhapsody: tm(timeUnits) (in this lecture: timeUnit = 1 ms) - Standard
UML: after(500ms)
\begin{figure}[H]
\centering
\includegraphics[width=0.5\textwidth]{figures/fsmTimeEvent.png}
\caption{Time Event FSM}
\end{figure}
\hypertarget{operation-calls-in-actions}{%
\paragraph{Operation Calls in
Actions}\label{operation-calls-in-actions}}
\begin{figure}[H]
\centering
\includegraphics[width=0.5\textwidth]{figures/fsmOperationCall.png}
\caption{Operation Calls}
\end{figure}
\clearpage
\hypertarget{entry-and-exit-actions}{%
\paragraph{Entry and Exit Actions}\label{entry-and-exit-actions}}
\begin{itemize}
\tightlist
\item
Entry Actions are executed every time a State is entered
\item
Exit Actions are executed every time a State left
\end{itemize}
\begin{figure}[H]
\centering
\includegraphics[width=0.5\textwidth]{figures/fsmEntryExitAction.png}
\caption{Entry and Exit Actions}
\end{figure}
\hypertarget{internal-transition}{%
\paragraph{Internal Transition}\label{internal-transition}}
\begin{figure}[H]
\centering
\includegraphics[width=0.5\textwidth]{figures/fsmInternalTransition.png}
\caption{Internal Transition}
\end{figure}
\hypertarget{completion-transitions}{%
\paragraph{Completion transitions}\label{completion-transitions}}
\begin{itemize}
\tightlist
\item
Also called null transitions
\item
Have no trigger
\item
Are executed at the after a state is entered and all entry actions are
executed (and Out-Pulses are sent see below)
\item
Belong to run-to-completion step (see below) of the triggering
transition
\item
Can have guards (see below)
\item
Can have actions
\item
With more completion transitions, guards are needed to make them
deterministic
\end{itemize}
\hypertarget{guards}{%
\paragraph{Guards}\label{guards}}
A guard is a boolean condition that can be assigned to a transition
\begin{figure}[H]
\centering
\includegraphics[width=0.5\textwidth]{figures/fsmGuard.png}
\caption{FSM Guards}
\end{figure}
\clearpage
\hypertarget{nested-states}{%
\paragraph{Nested States}\label{nested-states}}
Finite state machines can be nested.
\begin{figure}[H]
\centering
\includegraphics[width=0.5\textwidth]{figures/fsmNested.png}
\caption{FSM nested}
\end{figure}
\hypertarget{history}{%
\paragraph{History}\label{history}}
The history means, that I enter the same state into the FSM as I was
when I left the FSM.
\begin{figure}[H]
\centering
\includegraphics[width=0.5\textwidth]{figures/fsmHistory.png}
\caption{FSM History}
\end{figure}
\clearpage
\hypertarget{junction-state}{%
\paragraph{Junction State}\label{junction-state}}
Chains transition segments into a single transition or the other one
outgoing transition in multiple outgoing transitions.
\begin{figure}[H]
\centering
\includegraphics[width=0.5\textwidth]{figures/fsmJunctionState.png}
\caption{FSM Junction State}
\end{figure}
\subsection{CIRO (Communicating Interacting Reactive-Objects)}
\begin{itemize}
\tightlist
\item
UML Extension
\item
To model reactive systems
\item
Asynchronous communication with (event-, action-, communication-)
messages
\item
Synchronous interaction between reactive-objects (see below)
\item
Code-generation possible
\end{itemize}
\hypertarget{meta-model-not-important}{%
\subsubsection{Meta-model (not
important)}\label{meta-model-not-important}}
\begin{itemize}
\tightlist
\item
Meta-Model: a model of models
\item
Meta-Model defines:
\begin{itemize}
\tightlist
\item
the elements of a modelling language
\item
the set of potential models (syntactically correct models)
\item
the semantics (i.e.~the meaning) of the model elements
\end{itemize}
\end{itemize}
\clearpage
\hypertarget{reactive-system}{%
\subsubsection{Reactive-System}\label{reactive-system}}
A reactive system is the sum (logical aggregate) of all
reactive-machines of an Embedded-System. - Input: Event-Messages -
Output: Action-Messages
\begin{figure}[H]
\centering
\includegraphics[width=0.5\textwidth]{figures/reactiveSystemOverview.png}
\caption{Reactive System Overview}
\end{figure}
\begin{itemize}
\tightlist
\item
The \textbf{Reactive Machine} or \textbf{Reactive Object} is one
component of a cluster. In the vessel e.g.~the pump.
\item
The \textbf{Reactive Cluster} is one cluster in a reactive system,
e.g.~the cluster of different components in the vessel.
\item
If there would be a system with multiple clusters, all clusters
together would be the \textbf{reactive system}.
\end{itemize}
\begin{figure}[H]
\centering
\includegraphics[width=0.5\textwidth]{figures/reactiveSystemCluster.png}
\caption{Reactive System Cluster}
\end{figure}
\hypertarget{reactive-cluster}{%
\subsubsection{Reactive-Cluster}\label{reactive-cluster}}
Reacts to event-messages and generates action-messages. A group of
cooperation objects which are interacting with each other. Within a
cluster Reactive-Objects cooperate using 3 different synchronous
interaction mechanisms - Pulse-Cast - State-Inspection - Mode-Control
\hypertarget{reactive-object}{%
\subsubsection{Reactive-Object}\label{reactive-object}}
\begin{itemize}
\tightlist
\item
Component of a Reactive-Cluster
\item
Defines a finite state machine (FSM) or extend finite state machine
(see below)
\item
Interacts synchronously with other Reactive-Objects within the same
cluster
\item
Communicates indirectly via cluster-Button: Mode normal ports with the
outside world
\end{itemize}
\hypertarget{interaction-mechanisms}{%
\subsubsection{Interaction mechanisms}\label{interaction-mechanisms}}
\begin{itemize}
\tightlist
\item
Pulse-Cast
\begin{itemize}
\tightlist
\item
Multi-cast, one sender, many receivers
\item
In-Pulse
\begin{itemize}
\tightlist
\item
Input to state-machine
\item
Triggers state-transition
\end{itemize}
\item
Out-Pulse
\begin{itemize}
\tightlist
\item
Output of state-machine
\item
Generates in-pulse to receiver
\end{itemize}
\end{itemize}
\item
State-Inspection
\begin{itemize}
\tightlist
\item
Enables case differentiation on transitions
\end{itemize}
\item
Mode-Control
\begin{itemize}
\tightlist
\item
Determination of the active mode
\end{itemize}
\end{itemize}
\clearpage |
[STATEMENT]
lemma lemCPos:
assumes "IOb m"
shows "c m > 0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (0::'a) < c m
[PROOF STEP]
by (metis assms(1) lemCProps) |
" By the end of 1984 , From Time Immemorial had ... received some two hundred [ favorable ] notices ... in the United States . The only ' false ' notes in this <unk> chorus of praise were the Journal of Palestine Studies , which ran a highly critical review by Bill Farrell ; the small Chicago @-@ based newsweekly In These Times , which published a condensed version of this writer 's findings ; and Alexander Cockburn , who devoted a series of columns in The Nation exposing the hoax . ... The periodicals in which From Time Immemorial had already been favorably reviewed refused to run any critical correspondence ( e.g. The New Republic , The Atlantic Monthly , Commentary ) . Periodicals that had yet to review the book rejected a manuscript on the subject as of little or no consequence ( e.g. The Village Voice , Dissent , The New York Review of Books ) . Not a single national newspaper or columnist contacted found newsworthy that a best @-@ selling , effusively praised ' study ' of the Middle East conflict was a threadbare hoax . "
|
DIY Co Sleeper For Baby. Great Idea!! |
State Before: X : Type ?u.14903
α : Type u_3
α' : Type ?u.14909
β : Type u_2
γ : Type u_1
δ : Type ?u.14918
M : Type ?u.14921
E : Type ?u.14924
R : Type ?u.14927
inst✝⁴ : TopologicalSpace α
inst✝³ : TopologicalSpace α'
inst✝² : One β
inst✝¹ : One γ
inst✝ : One δ
g : β → γ
f : α → β
f₂ : α → γ
m : β → γ → δ
x : α
hg : ∀ {x : β}, g x = 1 ↔ x = 1
⊢ HasCompactMulSupport (g ∘ f) ↔ HasCompactMulSupport f State After: no goals Tactic: simp_rw [hasCompactMulSupport_def, mulSupport_comp_eq g (@hg) f] |
mutable struct RSWM{adaptivealg,T}
discard_length::T
end
Base.@pure function RSWM(;
discard_length=1e-15,
adaptivealg::Symbol=:RSwM3)
RSWM{adaptivealg,typeof(discard_length)}(discard_length)
end
adaptive_alg(rswm::RSWM{adaptivealg,T}) where {adaptivealg,T} = adaptivealg
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.