text
stringlengths 0
3.34M
|
---|
function interchain_stats(chains)
d = Dict()
for param in chains.name_map.parameters
μ = std(mean(chains[param], dims=1))
σ = std(std(chains[param], dims=1))
push!(d, param => Dict(:μ => μ, :σ => σ))
end
return d
end |
function transform(input::AbstractDict)
output = Dict{Char,Int}()
for (k, v) in input
for vi in v
output[lowercase(vi)] = k
end
end
return output
end
|
lemma content_smult [simp]: fixes c :: "'a :: {normalization_semidom_multiplicative, semiring_gcd}" shows "content (smult c p) = normalize c * content p" |
theory prop_10
imports Main
"$HIPSTER_HOME/IsaHipster"
begin
datatype Nat = Z | S "Nat"
fun minus :: "Nat => Nat => Nat" where
"minus (Z) y = Z"
| "minus (S z) (Z) = S z"
| "minus (S z) (S x2) = minus z x2"
(*hipster minus *)
theorem x0 :
"(minus m m) = Z"
by (tactic \<open>Subgoal.FOCUS_PARAMS (K (Tactic_Data.hard_tac @{context})) @{context} 1\<close>)
end
|
Overall , Not Quite Hollywood received positive reviews from critics . Review aggregate Rotten Tomatoes reports that 94 % of critics have given the film a positive review , " Certified Fresh " , based on 63 reviews , with an average score of 7 @.@ 4 out of 10 . Margaret Pomeranz and David Stratton of At the Movies gave the film four and three and a half out of five stars respectively ; Pomeranz commended Hartley for " the depth of his research and for creating a wildly entertaining film experience " , and claimed that " for those of us who remember the films , Not Quite Hollywood is a blast " . Sandra Hall , writing for The Sydney Morning Herald , gave the film three and a half out of five stars , believing that " Hartley 's own film is much livelier than most of those he is out to celebrate " . Jake Wilson of The Age similarly gave the film three and a half stars , but called the film " basically a feature @-@ length advertisement for its subject " , saying that it " moves far too rapidly to permit sustained analysis " . The Courier @-@ Mail 's Des <unk> , who gave the film four and a half out of five stars , disagreed , saying that " Brisk editing means the history is lively and fun " , and claimed in homage to The Castle , " Copies of Hartley 's film should go straight to pool rooms all over Australia when it becomes available on DVD . " Luke <unk> of Empire Magazine Australasia gave Not Quite Hollywood five out of five stars , calling the film " fast , thrilling and often ribald " , while Leigh Paatsch wrote for the Herald Sun that " there is not a single instant where boredom can possibly intrude " , dubbing the film " an incredibly energetic and merrily messed @-@ up celebration of Australian B @-@ movies " .
|
(************************************************************************)
(* v * The Coq Proof Assistant / The Coq Development Team *)
(* <O___,, * INRIA - CNRS - LIX - LRI - PPS - Copyright 1999-2012 *)
(* \VV/ **************************************************************)
(* // * This file is distributed under the terms of the *)
(* * GNU Lesser General Public License Version 2.1 *)
(************************************************************************)
(************************************************************************)
(* This file has been modified for the purposes of the HoTT library. *)
(************************************************************************)
Require Import Notations.
Ltac easy :=
let rec use_hyp H :=
match type of H with
| _ => try solve [inversion H]
end
with do_intro := let H := fresh in intro H; use_hyp H
with destruct_hyp H := case H; clear H; do_intro; do_intro in
let rec use_hyps :=
match goal with
| H : _ |- _ => solve [inversion H]
| _ => idtac
end in
let rec do_atom :=
solve [reflexivity | symmetry; trivial] ||
contradiction ||
(split; do_atom)
with do_ccl := trivial; repeat do_intro; do_atom in
(use_hyps; do_ccl) || fail "Cannot solve this goal".
Tactic Notation "now" tactic(t) := t; easy.
|
theory NumberTheory1
imports Complex
begin
thm floor_correct
lemma floor_smaller : fixes n::real shows " \<lfloor>n\<rfloor> \<le> n" using floor_correct
apply (rule conjE)
apply (assumption)
done
lemma floor_smaller2 : fixes n::real shows " \<lfloor>n\<rfloor> \<le> n"
proof -
from floor_correct have "of_int \<lfloor>n\<rfloor> \<le> n \<and> n < of_int (\<lfloor>n\<rfloor> + 1) " by assumption
hence "of_int \<lfloor>n\<rfloor> \<le> n" by (rule conjE)
thus " real_of_int \<lfloor>n\<rfloor> \<le> n" by (simp del : of_int_floor_le)
qed
lemma "even (n::int) \<longleftrightarrow> (\<exists> k . n = 2*k)" by (rule dvd_def)
lemma "\<forall> n \<in> \<real> . odd n \<or> even n"
proof (rule Set.ballI)
fix n::'a
assume a:"n \<in> \<real>"
show "odd n \<or> even n" by (rule excluded_middle)
qed
lemma "odd (n::int) \<longleftrightarrow> (\<exists>k. n = 2*k+1)"
using [[simp_trace_new mode=full]]
proof (rule iffI)
assume a:"odd n"
{
fix k::int
assume b:"n = 2*k + 1"
{
assume c:"even (2*k+1)"
from c and b have False by simp
}
hence "odd (2 * k + 1)" by simp
}
lemma "sqrt (real 2) \<notin> \<rat>"
proof
|
(* Title: HOL/Unix/Nested_Environment.thy
Author: Markus Wenzel, TU Muenchen
*)
section \<open>Nested environments\<close>
theory Nested_Environment
imports Main
begin
text \<open>
Consider a partial function @{term [source] "e :: 'a \<Rightarrow> 'b option"}; this may
be understood as an \<^emph>\<open>environment\<close> mapping indexes \<^typ>\<open>'a\<close> to optional
entry values \<^typ>\<open>'b\<close> (cf.\ the basic theory \<open>Map\<close> of Isabelle/HOL). This
basic idea is easily generalized to that of a \<^emph>\<open>nested environment\<close>, where
entries may be either basic values or again proper environments. Then each
entry is accessed by a \<^emph>\<open>path\<close>, i.e.\ a list of indexes leading to its
position within the structure.
\<close>
datatype (dead 'a, dead 'b, dead 'c) env =
Val 'a
| Env 'b "'c \<Rightarrow> ('a, 'b, 'c) env option"
text \<open>
\<^medskip>
In the type \<^typ>\<open>('a, 'b, 'c) env\<close> the parameter \<^typ>\<open>'a\<close> refers to
basic values (occurring in terminal positions), type \<^typ>\<open>'b\<close> to values
associated with proper (inner) environments, and type \<^typ>\<open>'c\<close> with the
index type for branching. Note that there is no restriction on any of these
types. In particular, arbitrary branching may yield rather large
(transfinite) tree structures.
\<close>
subsection \<open>The lookup operation\<close>
text \<open>
Lookup in nested environments works by following a given path of index
elements, leading to an optional result (a terminal value or nested
environment). A \<^emph>\<open>defined position\<close> within a nested environment is one where
\<^term>\<open>lookup\<close> at its path does not yield \<^term>\<open>None\<close>.
\<close>
primrec lookup :: "('a, 'b, 'c) env \<Rightarrow> 'c list \<Rightarrow> ('a, 'b, 'c) env option"
and lookup_option :: "('a, 'b, 'c) env option \<Rightarrow> 'c list \<Rightarrow> ('a, 'b, 'c) env option"
where
"lookup (Val a) xs = (if xs = [] then Some (Val a) else None)"
| "lookup (Env b es) xs =
(case xs of
[] \<Rightarrow> Some (Env b es)
| y # ys \<Rightarrow> lookup_option (es y) ys)"
| "lookup_option None xs = None"
| "lookup_option (Some e) xs = lookup e xs"
hide_const lookup_option
text \<open>
\<^medskip>
The characteristic cases of \<^term>\<open>lookup\<close> are expressed by the following
equalities.
\<close>
theorem lookup_nil: "lookup e [] = Some e"
by (cases e) simp_all
theorem lookup_val_cons: "lookup (Val a) (x # xs) = None"
by simp
theorem lookup_env_cons:
"lookup (Env b es) (x # xs) =
(case es x of
None \<Rightarrow> None
| Some e \<Rightarrow> lookup e xs)"
by (cases "es x") simp_all
lemmas lookup.simps [simp del] lookup_option.simps [simp del]
and lookup_simps [simp] = lookup_nil lookup_val_cons lookup_env_cons
theorem lookup_eq:
"lookup env xs =
(case xs of
[] \<Rightarrow> Some env
| x # xs \<Rightarrow>
(case env of
Val a \<Rightarrow> None
| Env b es \<Rightarrow>
(case es x of
None \<Rightarrow> None
| Some e \<Rightarrow> lookup e xs)))"
by (simp split: list.split env.split)
text \<open>
\<^medskip>
Displaced \<^term>\<open>lookup\<close> operations, relative to a certain base path prefix,
may be reduced as follows. There are two cases, depending whether the
environment actually extends far enough to follow the base path.
\<close>
theorem lookup_append_none:
assumes "lookup env xs = None"
shows "lookup env (xs @ ys) = None"
using assms
proof (induct xs arbitrary: env)
case Nil
then have False by simp
then show ?case ..
next
case (Cons x xs)
show ?case
proof (cases env)
case Val
then show ?thesis by simp
next
case (Env b es)
show ?thesis
proof (cases "es x")
case None
with Env show ?thesis by simp
next
case (Some e)
note es = \<open>es x = Some e\<close>
show ?thesis
proof (cases "lookup e xs")
case None
then have "lookup e (xs @ ys) = None" by (rule Cons.hyps)
with Env Some show ?thesis by simp
next
case Some
with Env es have False using Cons.prems by simp
then show ?thesis ..
qed
qed
qed
qed
theorem lookup_append_some:
assumes "lookup env xs = Some e"
shows "lookup env (xs @ ys) = lookup e ys"
using assms
proof (induct xs arbitrary: env e)
case Nil
then have "env = e" by simp
then show "lookup env ([] @ ys) = lookup e ys" by simp
next
case (Cons x xs)
note asm = \<open>lookup env (x # xs) = Some e\<close>
show "lookup env ((x # xs) @ ys) = lookup e ys"
proof (cases env)
case (Val a)
with asm have False by simp
then show ?thesis ..
next
case (Env b es)
show ?thesis
proof (cases "es x")
case None
with asm Env have False by simp
then show ?thesis ..
next
case (Some e')
note es = \<open>es x = Some e'\<close>
show ?thesis
proof (cases "lookup e' xs")
case None
with asm Env es have False by simp
then show ?thesis ..
next
case Some
with asm Env es have "lookup e' xs = Some e"
by simp
then have "lookup e' (xs @ ys) = lookup e ys" by (rule Cons.hyps)
with Env es show ?thesis by simp
qed
qed
qed
qed
text \<open>
\<^medskip>
Successful \<^term>\<open>lookup\<close> deeper down an environment structure means we are
able to peek further up as well. Note that this is basically just the
contrapositive statement of @{thm [source] lookup_append_none} above.
\<close>
theorem lookup_some_append:
assumes "lookup env (xs @ ys) = Some e"
shows "\<exists>e. lookup env xs = Some e"
proof -
from assms have "lookup env (xs @ ys) \<noteq> None" by simp
then have "lookup env xs \<noteq> None"
by (rule contrapos_nn) (simp only: lookup_append_none)
then show ?thesis by (simp)
qed
text \<open>
The subsequent statement describes in more detail how a successful \<^term>\<open>lookup\<close> with a non-empty path results in a certain situation at any upper
position.
\<close>
theorem lookup_some_upper:
assumes "lookup env (xs @ y # ys) = Some e"
shows "\<exists>b' es' env'.
lookup env xs = Some (Env b' es') \<and>
es' y = Some env' \<and>
lookup env' ys = Some e"
using assms
proof (induct xs arbitrary: env e)
case Nil
from Nil.prems have "lookup env (y # ys) = Some e"
by simp
then obtain b' es' env' where
env: "env = Env b' es'" and
es': "es' y = Some env'" and
look': "lookup env' ys = Some e"
by (auto simp add: lookup_eq split: option.splits env.splits)
from env have "lookup env [] = Some (Env b' es')" by simp
with es' look' show ?case by blast
next
case (Cons x xs)
from Cons.prems
obtain b' es' env' where
env: "env = Env b' es'" and
es': "es' x = Some env'" and
look': "lookup env' (xs @ y # ys) = Some e"
by (auto simp add: lookup_eq split: option.splits env.splits)
from Cons.hyps [OF look'] obtain b'' es'' env'' where
upper': "lookup env' xs = Some (Env b'' es'')" and
es'': "es'' y = Some env''" and
look'': "lookup env'' ys = Some e"
by blast
from env es' upper' have "lookup env (x # xs) = Some (Env b'' es'')"
by simp
with es'' look'' show ?case by blast
qed
subsection \<open>The update operation\<close>
text \<open>
Update at a certain position in a nested environment may either delete an
existing entry, or overwrite an existing one. Note that update at undefined
positions is simple absorbed, i.e.\ the environment is left unchanged.
\<close>
primrec update :: "'c list \<Rightarrow> ('a, 'b, 'c) env option \<Rightarrow>
('a, 'b, 'c) env \<Rightarrow> ('a, 'b, 'c) env"
and update_option :: "'c list \<Rightarrow> ('a, 'b, 'c) env option \<Rightarrow>
('a, 'b, 'c) env option \<Rightarrow> ('a, 'b, 'c) env option"
where
"update xs opt (Val a) =
(if xs = [] then (case opt of None \<Rightarrow> Val a | Some e \<Rightarrow> e)
else Val a)"
| "update xs opt (Env b es) =
(case xs of
[] \<Rightarrow> (case opt of None \<Rightarrow> Env b es | Some e \<Rightarrow> e)
| y # ys \<Rightarrow> Env b (es (y := update_option ys opt (es y))))"
| "update_option xs opt None =
(if xs = [] then opt else None)"
| "update_option xs opt (Some e) =
(if xs = [] then opt else Some (update xs opt e))"
hide_const update_option
text \<open>
\<^medskip>
The characteristic cases of \<^term>\<open>update\<close> are expressed by the following
equalities.
\<close>
theorem update_nil_none: "update [] None env = env"
by (cases env) simp_all
theorem update_nil_some: "update [] (Some e) env = e"
by (cases env) simp_all
theorem update_cons_val: "update (x # xs) opt (Val a) = Val a"
by simp
theorem update_cons_nil_env:
"update [x] opt (Env b es) = Env b (es (x := opt))"
by (cases "es x") simp_all
theorem update_cons_cons_env:
"update (x # y # ys) opt (Env b es) =
Env b (es (x :=
(case es x of
None \<Rightarrow> None
| Some e \<Rightarrow> Some (update (y # ys) opt e))))"
by (cases "es x") simp_all
lemmas update.simps [simp del] update_option.simps [simp del]
and update_simps [simp] = update_nil_none update_nil_some
update_cons_val update_cons_nil_env update_cons_cons_env
lemma update_eq:
"update xs opt env =
(case xs of
[] \<Rightarrow>
(case opt of
None \<Rightarrow> env
| Some e \<Rightarrow> e)
| x # xs \<Rightarrow>
(case env of
Val a \<Rightarrow> Val a
| Env b es \<Rightarrow>
(case xs of
[] \<Rightarrow> Env b (es (x := opt))
| y # ys \<Rightarrow>
Env b (es (x :=
(case es x of
None \<Rightarrow> None
| Some e \<Rightarrow> Some (update (y # ys) opt e)))))))"
by (simp split: list.split env.split option.split)
text \<open>
\<^medskip>
The most basic correspondence of \<^term>\<open>lookup\<close> and \<^term>\<open>update\<close> states
that after \<^term>\<open>update\<close> at a defined position, subsequent \<^term>\<open>lookup\<close>
operations would yield the new value.
\<close>
theorem lookup_update_some:
assumes "lookup env xs = Some e"
shows "lookup (update xs (Some env') env) xs = Some env'"
using assms
proof (induct xs arbitrary: env e)
case Nil
then have "env = e" by simp
then show ?case by simp
next
case (Cons x xs)
note hyp = Cons.hyps
and asm = \<open>lookup env (x # xs) = Some e\<close>
show ?case
proof (cases env)
case (Val a)
with asm have False by simp
then show ?thesis ..
next
case (Env b es)
show ?thesis
proof (cases "es x")
case None
with asm Env have False by simp
then show ?thesis ..
next
case (Some e')
note es = \<open>es x = Some e'\<close>
show ?thesis
proof (cases xs)
case Nil
with Env show ?thesis by simp
next
case (Cons x' xs')
from asm Env es have "lookup e' xs = Some e" by simp
then have "lookup (update xs (Some env') e') xs = Some env'" by (rule hyp)
with Env es Cons show ?thesis by simp
qed
qed
qed
qed
text \<open>
\<^medskip>
The properties of displaced \<^term>\<open>update\<close> operations are analogous to those
of \<^term>\<open>lookup\<close> above. There are two cases: below an undefined position
\<^term>\<open>update\<close> is absorbed altogether, and below a defined positions \<^term>\<open>update\<close> affects subsequent \<^term>\<open>lookup\<close> operations in the obvious way.
\<close>
theorem update_append_none:
assumes "lookup env xs = None"
shows "update (xs @ y # ys) opt env = env"
using assms
proof (induct xs arbitrary: env)
case Nil
then have False by simp
then show ?case ..
next
case (Cons x xs)
note hyp = Cons.hyps
and asm = \<open>lookup env (x # xs) = None\<close>
show "update ((x # xs) @ y # ys) opt env = env"
proof (cases env)
case (Val a)
then show ?thesis by simp
next
case (Env b es)
show ?thesis
proof (cases "es x")
case None
note es = \<open>es x = None\<close>
show ?thesis
by (cases xs) (simp_all add: es Env fun_upd_idem_iff)
next
case (Some e)
note es = \<open>es x = Some e\<close>
show ?thesis
proof (cases xs)
case Nil
with asm Env Some have False by simp
then show ?thesis ..
next
case (Cons x' xs')
from asm Env es have "lookup e xs = None" by simp
then have "update (xs @ y # ys) opt e = e" by (rule hyp)
with Env es Cons show "update ((x # xs) @ y # ys) opt env = env"
by (simp add: fun_upd_idem_iff)
qed
qed
qed
qed
theorem update_append_some:
assumes "lookup env xs = Some e"
shows "lookup (update (xs @ y # ys) opt env) xs = Some (update (y # ys) opt e)"
using assms
proof (induct xs arbitrary: env e)
case Nil
then have "env = e" by simp
then show ?case by simp
next
case (Cons x xs)
note hyp = Cons.hyps
and asm = \<open>lookup env (x # xs) = Some e\<close>
show "lookup (update ((x # xs) @ y # ys) opt env) (x # xs) =
Some (update (y # ys) opt e)"
proof (cases env)
case (Val a)
with asm have False by simp
then show ?thesis ..
next
case (Env b es)
show ?thesis
proof (cases "es x")
case None
with asm Env have False by simp
then show ?thesis ..
next
case (Some e')
note es = \<open>es x = Some e'\<close>
show ?thesis
proof (cases xs)
case Nil
with asm Env es have "e = e'" by simp
with Env es Nil show ?thesis by simp
next
case (Cons x' xs')
from asm Env es have "lookup e' xs = Some e" by simp
then have "lookup (update (xs @ y # ys) opt e') xs =
Some (update (y # ys) opt e)" by (rule hyp)
with Env es Cons show ?thesis by simp
qed
qed
qed
qed
text \<open>
\<^medskip>
Apparently, \<^term>\<open>update\<close> does not affect the result of subsequent \<^term>\<open>lookup\<close> operations at independent positions, i.e.\ in case that the paths
for \<^term>\<open>update\<close> and \<^term>\<open>lookup\<close> fork at a certain point.
\<close>
theorem lookup_update_other:
assumes neq: "y \<noteq> (z::'c)"
shows "lookup (update (xs @ z # zs) opt env) (xs @ y # ys) =
lookup env (xs @ y # ys)"
proof (induct xs arbitrary: env)
case Nil
show ?case
proof (cases env)
case Val
then show ?thesis by simp
next
case Env
show ?thesis
proof (cases zs)
case Nil
with neq Env show ?thesis by simp
next
case Cons
with neq Env show ?thesis by simp
qed
qed
next
case (Cons x xs)
note hyp = Cons.hyps
show ?case
proof (cases env)
case Val
then show ?thesis by simp
next
case (Env y es)
show ?thesis
proof (cases xs)
case Nil
show ?thesis
proof (cases "es x")
case None
with Env Nil show ?thesis by simp
next
case Some
with neq hyp and Env Nil show ?thesis by simp
qed
next
case (Cons x' xs')
show ?thesis
proof (cases "es x")
case None
with Env Cons show ?thesis by simp
next
case Some
with neq hyp and Env Cons show ?thesis by simp
qed
qed
qed
qed
subsection \<open>Code generation\<close>
lemma equal_env_code [code]:
fixes x y :: "'a::equal"
and f g :: "'c::{equal, finite} \<Rightarrow> ('b::equal, 'a, 'c) env option"
shows
"HOL.equal (Env x f) (Env y g) \<longleftrightarrow>
HOL.equal x y \<and> (\<forall>z \<in> UNIV.
case f z of
None \<Rightarrow> (case g z of None \<Rightarrow> True | Some _ \<Rightarrow> False)
| Some a \<Rightarrow> (case g z of None \<Rightarrow> False | Some b \<Rightarrow> HOL.equal a b))" (is ?env)
and "HOL.equal (Val a) (Val b) \<longleftrightarrow> HOL.equal a b"
and "HOL.equal (Val a) (Env y g) \<longleftrightarrow> False"
and "HOL.equal (Env x f) (Val b) \<longleftrightarrow> False"
proof (unfold equal)
have "f = g \<longleftrightarrow>
(\<forall>z. case f z of
None \<Rightarrow> (case g z of None \<Rightarrow> True | Some _ \<Rightarrow> False)
| Some a \<Rightarrow> (case g z of None \<Rightarrow> False | Some b \<Rightarrow> a = b))" (is "?lhs = ?rhs")
proof
assume ?lhs
then show ?rhs by (auto split: option.splits)
next
assume ?rhs (is "\<forall>z. ?prop z")
show ?lhs
proof
fix z
from \<open>?rhs\<close> have "?prop z" ..
then show "f z = g z" by (auto split: option.splits)
qed
qed
then show "Env x f = Env y g \<longleftrightarrow>
x = y \<and> (\<forall>z \<in> UNIV.
case f z of
None \<Rightarrow> (case g z of None \<Rightarrow> True | Some _ \<Rightarrow> False)
| Some a \<Rightarrow> (case g z of None \<Rightarrow> False | Some b \<Rightarrow> a = b))" by simp
qed simp_all
lemma [code nbe]: "HOL.equal (x :: (_, _, _) env) x \<longleftrightarrow> True"
by (fact equal_refl)
end
|
lemma monom_0: "monom a 0 = pCons a 0" |
(*<*)
(*:maxLineLen=78:*)
theory RecursiveVDMExamples
imports VDMToolkit
begin
(********************************************************)
section \<open>Constructive type (\<^typ>\<open>\<nat>\<close>) recursion primitive and function\<close>
\<comment> \<open>Automatic with pattern matching only, if-then-else fails\<close>
primrec factN :: \<open>\<nat> \<Rightarrow> \<nat>\<close> where
\<open>factN n = (if n = 0 then 1 else n * (factN (n - 1)))\<close>
\<comment> \<open>Automatic but forces \<^typ>\<open>\<nat>\<close> constructors\<close>
primrec factN :: \<open>\<nat> \<Rightarrow> \<nat>\<close> where
\<open>factN 0 = 1\<close>
| \<open>factN (Suc n) = (n * (factN n))\<close>
\<comment> \<open>Pattern completeness missed is allowed as a warning\<close>
primrec factNmissingConstructors :: \<open>\<nat> \<Rightarrow> \<nat>\<close> where
\<open>factNmissingConstructors 0 = 1\<close>
\<comment> \<open>Automatic termination with pattern matching\<close>
fun factN' :: \<open>\<nat> \<Rightarrow> \<nat>\<close> where
\<open>factN' n = (if n = 0 then 1 else n * (factN' (n - 1)))\<close>
\<comment> \<open>Pattern completeness missed is provided as undefined\<close>
fun factNincmplete' :: \<open>\<nat> \<Rightarrow> \<nat>\<close> where
\<open>factNincmplete' (Suc n) = (factNincmplete' n)\<close>
(********************************************************)
section \<open>Algebraic type (\<^typ>\<open>\<int>\<close>) recursion primitive and function\<close>
\<comment> \<open>Primitive recursion doesn't work for non-constructive types\<close>
primrec factZ :: \<open>VDMNat \<Rightarrow> VDMNat\<close> where
\<open>factZ 0 = 1\<close>
\<comment> \<open>Function works but can't find termination proof automatically\<close>
fun factZ :: \<open>VDMNat \<Rightarrow> VDMNat\<close> where
\<open>factZ n = (if n = 0 then 1 else n * (factZ (n - 1)))\<close>
\<comment> \<open>User must provide termination argument\<close>
function (domintros) factZ :: \<open>VDMNat \<Rightarrow> VDMNat\<close> where
\<open>factZ n = (if n = 0 then 1 else n * (factZ (n - 1)))\<close>
\<comment> \<open>pattern consistency goal\<close>
apply simp
\<comment> \<open>pattern completeness goal\<close>
by simp
\<comment> \<open>Recursive termination goal\<close>
termination
oops
\<comment> \<open>Various theorems about recursion\<close>
find_theorems name:"factZ"
(********************************************************)
section \<open>Simple recursion catering for VDM specification\<close>
\<comment> \<open>Automatically generated: implicitly inferred input type invariant check\<close>
definition pre_factV :: \<open>VDMNat \<Rightarrow> \<bool>\<close> where
\<open>pre_factV n \<equiv> inv_VDMNat n\<close>
\<comment> \<open>VDM only operates if precondition is satisfied\<close>
function (domintros) factV :: \<open>VDMNat \<Rightarrow> VDMNat\<close> where
\<open>factV n = (if pre_factV n then (if n = 0 then 1 else n * (factV (n - 1))) else undefined)\<close>
by (pat_completeness, auto) \<^marker>\<open>tag sledgehammer\<close>
\<comment> \<open>Well formedness expression relating conditions for every recursive and original calls\<close>
abbreviation factV_wf :: \<open>(VDMNat \<times> VDMNat) set\<close> where
\<open>factV_wf \<equiv> { (n - 1, n) | n . pre_factV n \<and> n \<noteq> 0 }\<close>
\<comment> \<open>Notice the psimps (partial function) simplification rules and guarding domain predicates\<close>
find_theorems name:"factV"
\<comment> \<open>For VDM nat and int, we have proved general well formedness relations theorem\<close>
termination
apply (relation \<open>(gen_VDMNat_term factV_wf)\<close>)
\<comment> \<open>This enables sledgehammer to find the well-formedness part of the proof\<close>
using l_gen_VDMNat_term_wf apply blast \<^marker>\<open>tag sledgehammer\<close>
\<comment> \<open>Remains to be shown that the local recursive relation is within the general relation space\<close>
(*Sledgehammering... No proof found *)
oops
termination
apply (relation \<open>(gen_VDMNat_term factV_wf)\<close>)
using l_gen_VDMNat_term_wf apply blast \<^marker>\<open>tag sledgehammer\<close>
\<comment> \<open>Even though sledgehammer struggles, the proof is in fact trivial\<close>
by (simp add: pre_factV_def int_ge_less_than_def)
\<comment> \<open>Notice the psimps (partial function) simplification rules are gone, and simps (total function) are in place instead\<close>
find_theorems name:factV
definition largest_wf_int_rel :: "\<int> \<Rightarrow> (\<int> \<times> \<int>) set" where
"largest_wf_int_rel d = {(z', z). d \<le> z' \<and> z' < z}"
\<comment> \<open>Flag can also generate this lemma (and proof sketch) to ensure the recursive relation is a fix-point\<close>
\<comment> \<open>This is useful when recursive relation is not within largest upper bound to discover how to prove it well formed\<close>
lemma l_fact_term_valid: \<open>(gen_VDMNat_term factV_wf) = factV_wf\<close>
apply (simp )
apply (intro equalityI subsetI)
apply (simp_all add: pre_factV_def int_ge_less_than_def case_prod_beta)
by auto
(********************************************************)
section \<open>VDM recursion over sets\<close>
\<comment> \<open>
sumset: set of nat -> nat
sumset(s) == if s = {} then 0 else let e in set s in sumset(s - {e}) + e
pre (forall n in set s & n > 5)
--@IsaMeasure({(x - { let e in set x in e }, x) | x : set of nat & x <> {}})
--@Witness(sumset({ 1 }))
measure card s;
\<close>
\<comment> \<open>Automatically generated: implicitly inferred type invariant check + user defined pre\<close>
definition pre_sumset :: \<open>VDMNat VDMSet \<Rightarrow> \<bool>\<close> where
\<open>pre_sumset s \<equiv> inv_VDMSet' inv_VDMNat s \<and> (\<forall> n \<in> s . n > 5)\<close>
\<comment> \<open>Automatically generated def set: inferred from function AST + signature\<close>
\<comment> \<open>Notice the unfolding is staggered and deep into the AST term\<close>
lemmas pre_sumset_defs = pre_sumset_def inv_VDMSet'_defs inv_VDMNat_def
\<comment> \<open>Mostly verbatim translation from VDM; let-in-set becomes Isabelle's Hilbert Choice binder (\<some>)\<close>
function (domintros) sumset :: \<open>VDMNat VDMSet \<Rightarrow> VDMNat\<close> where
\<open>sumset s = (if pre_sumset s then
(if s = {} then 0 else
let e = (\<some> x . x \<in> s) in sumset (s - {e}) + e)
else undefined)\<close>
\<comment> \<open>Automatically generated proof for pattern compatibility and completeness\<close>
\<comment> \<open>Users will have to finish this before proceeding if proof suggestion fails!\<close>
by (pat_completeness, auto)
\<comment> \<open>Recursive definitions available, yet as partial functions (psimps + dom predicate)\<close>
find_theorems name:"sumset"
\<comment> \<open>Well founded recursive relation translated from user defined @IsaMeasure\<close>
\<comment> \<open>We automatically infer recursive relations for this specific (commonly used) kind of set recursion\<close>
\<comment> \<open>It is crucial for termination proof that pre condition is included, which translator does automatically\<close>
abbreviation sumset_wf_rel :: \<open>(VDMNat VDMSet \<times> VDMNat VDMSet) set\<close> where
\<open>sumset_wf_rel \<equiv> { (s - {(\<some> e . e \<in> s)}, s)| s . pre_sumset s \<and> s \<noteq> {}}\<close>
\<comment> \<open>Translator infers recursive relation well formedness lemma being about sets\<close>
lemma l_sumset_rel_wf: \<open>wf (gen_set_term sumset_wf_rel)\<close>
\<comment> \<open>Proof in this case is discovered by sledgehammer\<close>
using l_gen_set_term_wf by blast \<^marker>\<open>tag sledgehammer\<close>
\<comment> \<open>Termination proof setup is automatically generated\<close>
termination
apply (relation \<open>(gen_set_term sumset_wf_rel)\<close>)
using l_sumset_rel_wf apply blast \<^marker>\<open>tag sledgehammer\<close>
oops
\<comment> \<open>Verbatim copy of failed goal. Perhaps could be auto generated? (Problem it might be spurious)\<close>
lemma l_pre_sumset_sumset_wf_rel:
\<open>pre_sumset s \<Longrightarrow> s \<noteq> {} \<Longrightarrow> (s - {(\<some> x. x \<in> s)}, s) \<in> (gen_set_term sumset_wf_rel)\<close>
unfolding gen_set_term_def apply (simp add: pre_sumset_defs)\<^marker>\<open>tag manual\<close>
by (metis Diff_subset member_remove psubsetI remove_def some_in_eq)\<^marker>\<open>tag sledgehammer\<close>
\<comment> \<open>Lemma enables sledgehammer to find the termination proof\<close>
termination
apply (relation \<open>(gen_set_term sumset_wf_rel)\<close>)
using l_sumset_rel_wf apply blast \<^marker>\<open>tag sledgehammer\<close>
using l_pre_sumset_sumset_wf_rel by presburger \<^marker>\<open>tag sledgehammer\<close>
\<comment> \<open>Recursive definitions available as total functions (simps)\<close>
find_theorems name:"sumset"
\<comment> \<open>Recursion over maps is similar, if more involved; see paper source\<close>
\<comment> \<open>VDM measures are not expressive enough for non-functional measures?\<close>
(********************************************************)
section \<open>Complex recursion example with automation support\<close>
\<comment> \<open>ack: nat * nat -> nat
ack(m,n) == if m = 0 then n+1
else if n = 0 then ack(m-1, 1)
else ack(m-1, ack(m, (n-1)))
--@IsaMeasure( pair_less_VDMNat )
measure is not yet specified;
\<close>
definition pre_ack :: \<open>VDMNat \<Rightarrow> VDMNat \<Rightarrow> \<bool>\<close> where
\<open>pre_ack m n \<equiv> inv_VDMNat m \<and> inv_VDMNat n\<close>
lemmas pre_ack_defs = pre_ack_def
function (domintros) ack :: \<open>VDMNat \<Rightarrow> VDMNat \<Rightarrow> VDMNat\<close> where
\<open>ack m n = (if pre_ack m n then
if m = 0 then n+1
else if n = 0 then ack (m-1) 1
else ack (m-1) (ack m (n-1))
else undefined)\<close>
by (pat_completeness, auto) \<^marker>\<open>tag sledgehammer\<close>
\<comment> \<open>User defined well formed relation, yet as part of Isabelle's high levels of automation armoury \<close>
abbreviation ack_wf :: \<open>((VDMNat \<times> VDMNat) \<times> (VDMNat \<times> VDMNat)) VDMSet\<close>
where \<open>ack_wf \<equiv> pair_less_VDMNat\<close>
\<comment> \<open>Proof is manual, but mostly discovered by sledgehammer\<close>
termination
apply (relation ack_wf)\<^marker>\<open>tag manual\<close>
using wf_pair_less_VDMNat apply blast \<^marker>\<open>tag sledgehammer\<close>
apply (simp add: l_pair_less_VDMNat_I1 pre_ack_def) \<^marker>\<open>tag sledgehammer\<close>
apply (simp add: pre_ack_defs) \<^marker>\<open>tag sledgehammer\<close>
by (simp add: pair_less_VDMNat_def pre_ack_def) \<^marker>\<open>tag sledgehammer\<close>
(********************************************************)
section \<open>Complex recursion where Isabelle proof discovers missing VDM specification!\<close>
\<comment> \<open>perm: int * int * int -> int
perm(m,n,r) == if 0 < r then perm(m, r-1, n)
else if 0 < n then perm(r, n-1, m) else m
measure is not yet specified;\<close>
definition pre_perm :: \<open>VDMInt \<Rightarrow> VDMInt \<Rightarrow> VDMInt \<Rightarrow> \<bool>\<close> where
\<open>pre_perm m n r \<equiv> inv_VDMInt m \<and> inv_VDMInt n \<and> inv_VDMInt r\<close>
lemmas pre_perm_defs = pre_perm_def inv_VDMInt_def inv_True_def
function (domintros) perm :: \<open>VDMInt \<Rightarrow> VDMInt \<Rightarrow> VDMInt \<Rightarrow> VDMInt\<close> where
\<open>perm m n r = (if pre_perm m n r then
if 0 < r then perm m (r-1) n
else if 0 < n then perm r (n-1) m else m
else undefined)\<close>
by (pat_completeness, auto) \<^marker>\<open>tag sledgehammer\<close>
\<comment> \<open>Inferred recursive relation based on recursive call patterns and VDM AST\<close>
definition perm_wf_rel :: \<open>((VDMInt \<times> VDMInt \<times> VDMInt) \<times>
(VDMInt \<times> VDMInt \<times> VDMInt)) VDMSet\<close>
where \<open>perm_wf_rel \<equiv>
{ ((m, r-1, n), (m, n, r)) | m r n . pre_perm m n r \<and> 0 < r } \<union>
{ ((r, n-1, m), (m, n, r)) | m r n . pre_perm m n r \<and> \<not> 0 < r \<and> 0 < n }\<close>
\<comment> \<open>Automatically generated lemma left for the user to discharge\<close>
\<comment> \<open>This will force the user to think of a VDM measure to use, which can be
expressed in this case using the measure method\<close>
lemma l_perm_wf_rel: \<open>wf perm_wf_rel\<close>
sorry
termination
apply (relation \<open>perm_wf_rel\<close>)
apply (simp add: l_perm_wf_rel) \<^marker>\<open>tag sledgehammer\<close>
\<comment> \<open>Sledgehammer fails here, yet the proof is "easy" \<close>
by (simp_all add: perm_wf_rel_def) \<^marker>\<open>tag manual\<close>
(*----------------------------------------------------------------*)
subsection \<open>Distilling missing proof: take 1\<close>
\<comment> \<open>Suggests a VDM measure as max(m+n+r, 0)\<close>
lemma l_perm_wf_rel_VDM_measure:
\<open>perm_wf_rel \<subseteq> measure (\<lambda> (m, r, n) . nat (max 0 (m+r+n)))\<close>
apply (intro subsetI, case_tac x)
apply (simp add: pre_perm_defs perm_wf_rel_def case_prod_beta max_def)
apply (elim disjE conjE, simp_all)
nitpick
\<comment> \<open>Counter example shows the recursion would fail for certain inputs!\<close>
\<comment> \<open>It suggests a precondition is needed.\<close>
\<comment> \<open>@NB would quickcheck find it?\<close>
sorry
\<comment> \<open>If measure lemma is proved, sledgehammer can find the missing proof\<close>
lemma l_perm_wf_rel': \<open>wf perm_wf_rel\<close>
using l_perm_wf_rel_VDM_measure wf_subset by blast
(*----------------------------------------------------------------*)
subsection \<open>Distilling missing proof: take 2\<close>
\<comment> \<open>Reviewed VDM specification must include:
* pre ((0 < r or 0 < n) => m+n+r > 0)
* measure maxs({m+n+r, 0});
\<close>
definition pre_perm' :: \<open>VDMInt \<Rightarrow> VDMInt \<Rightarrow> VDMInt \<Rightarrow> \<bool>\<close> where
\<open>pre_perm' m n r \<equiv> pre_perm m n r \<and> ((0 < r \<or> 0 < n) \<longrightarrow> m+n+r > 0)\<close>
lemmas pre_perm'_defs = pre_perm'_def pre_perm_defs
definition perm_wf_rel' :: \<open>((VDMInt \<times> VDMInt \<times> VDMInt) \<times>
(VDMInt \<times> VDMInt \<times> VDMInt)) VDMSet\<close>
where \<open>perm_wf_rel' \<equiv>
{ ((m, r-1, n), (m, n, r)) | m r n . pre_perm' m n r \<and> 0 < r } \<union>
{ ((r, n-1, m), (m, n, r)) | m r n . pre_perm' m n r \<and> \<not> 0 < r \<and> 0 < n }\<close>
function (domintros) perm' :: \<open>VDMInt \<Rightarrow> VDMInt \<Rightarrow> VDMInt \<Rightarrow> VDMInt\<close> where
\<open>perm' m n r = (if pre_perm' m n r then
if 0 < r then perm' m (r-1) n
else if 0 < n then perm' r (n-1) m else m
else undefined)\<close>
by (pat_completeness, auto) \<^marker>\<open>tag sledgehammer\<close>
lemma l_perm_wf_rel_VDM_measure':
\<open>perm_wf_rel' \<subseteq> measure (\<lambda> (m, r, n) . nat (max 0 (m+r+n)))\<close>
apply (intro subsetI, case_tac x)
apply (simp add: pre_perm'_defs perm_wf_rel'_def case_prod_beta max_def)
apply (elim disjE conjE, simp_all)
done
\<comment> \<open>With the lemma proved, sledgehammer can find the missing proof on updated spec\<close>
lemma l_perm_wf_rel'': \<open>wf perm_wf_rel'\<close>
using l_perm_wf_rel_VDM_measure' wf_subset by blast
termination
apply (relation \<open>perm_wf_rel'\<close>)
apply (simp add: l_perm_wf_rel'') \<^marker>\<open>tag sledgehammer\<close>
\<comment> \<open>Sledgehammer fails here, yet the proof is "easy" \<close>
by (simp_all add: perm_wf_rel'_def) \<^marker>\<open>tag manual\<close>
end
(*>*) |
[STATEMENT]
lemma limitin_subtopology:
"limitin (subtopology X S) f l F
\<longleftrightarrow> l \<in> S \<and> eventually (\<lambda>a. f a \<in> S) F \<and> limitin X f l F" (is "?lhs = ?rhs")
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. limitin (subtopology X S) f l F = (l \<in> S \<and> (\<forall>\<^sub>F a in F. f a \<in> S) \<and> limitin X f l F)
[PROOF STEP]
proof (cases "l \<in> S \<inter> topspace X")
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. l \<in> S \<inter> topspace X \<Longrightarrow> limitin (subtopology X S) f l F = (l \<in> S \<and> (\<forall>\<^sub>F a in F. f a \<in> S) \<and> limitin X f l F)
2. l \<notin> S \<inter> topspace X \<Longrightarrow> limitin (subtopology X S) f l F = (l \<in> S \<and> (\<forall>\<^sub>F a in F. f a \<in> S) \<and> limitin X f l F)
[PROOF STEP]
case True
[PROOF STATE]
proof (state)
this:
l \<in> S \<inter> topspace X
goal (2 subgoals):
1. l \<in> S \<inter> topspace X \<Longrightarrow> limitin (subtopology X S) f l F = (l \<in> S \<and> (\<forall>\<^sub>F a in F. f a \<in> S) \<and> limitin X f l F)
2. l \<notin> S \<inter> topspace X \<Longrightarrow> limitin (subtopology X S) f l F = (l \<in> S \<and> (\<forall>\<^sub>F a in F. f a \<in> S) \<and> limitin X f l F)
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. limitin (subtopology X S) f l F = (l \<in> S \<and> (\<forall>\<^sub>F a in F. f a \<in> S) \<and> limitin X f l F)
[PROOF STEP]
proof
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. limitin (subtopology X S) f l F \<Longrightarrow> l \<in> S \<and> (\<forall>\<^sub>F a in F. f a \<in> S) \<and> limitin X f l F
2. l \<in> S \<and> (\<forall>\<^sub>F a in F. f a \<in> S) \<and> limitin X f l F \<Longrightarrow> limitin (subtopology X S) f l F
[PROOF STEP]
assume L: ?lhs
[PROOF STATE]
proof (state)
this:
limitin (subtopology X S) f l F
goal (2 subgoals):
1. limitin (subtopology X S) f l F \<Longrightarrow> l \<in> S \<and> (\<forall>\<^sub>F a in F. f a \<in> S) \<and> limitin X f l F
2. l \<in> S \<and> (\<forall>\<^sub>F a in F. f a \<in> S) \<and> limitin X f l F \<Longrightarrow> limitin (subtopology X S) f l F
[PROOF STEP]
with True
[PROOF STATE]
proof (chain)
picking this:
l \<in> S \<inter> topspace X
limitin (subtopology X S) f l F
[PROOF STEP]
have "\<forall>\<^sub>F b in F. f b \<in> topspace X \<inter> S"
[PROOF STATE]
proof (prove)
using this:
l \<in> S \<inter> topspace X
limitin (subtopology X S) f l F
goal (1 subgoal):
1. \<forall>\<^sub>F b in F. f b \<in> topspace X \<inter> S
[PROOF STEP]
by (metis (no_types) limitin_def openin_topspace topspace_subtopology)
[PROOF STATE]
proof (state)
this:
\<forall>\<^sub>F b in F. f b \<in> topspace X \<inter> S
goal (2 subgoals):
1. limitin (subtopology X S) f l F \<Longrightarrow> l \<in> S \<and> (\<forall>\<^sub>F a in F. f a \<in> S) \<and> limitin X f l F
2. l \<in> S \<and> (\<forall>\<^sub>F a in F. f a \<in> S) \<and> limitin X f l F \<Longrightarrow> limitin (subtopology X S) f l F
[PROOF STEP]
with L
[PROOF STATE]
proof (chain)
picking this:
limitin (subtopology X S) f l F
\<forall>\<^sub>F b in F. f b \<in> topspace X \<inter> S
[PROOF STEP]
show ?rhs
[PROOF STATE]
proof (prove)
using this:
limitin (subtopology X S) f l F
\<forall>\<^sub>F b in F. f b \<in> topspace X \<inter> S
goal (1 subgoal):
1. l \<in> S \<and> (\<forall>\<^sub>F a in F. f a \<in> S) \<and> limitin X f l F
[PROOF STEP]
apply (clarsimp simp add: limitin_def eventually_mono openin_subtopology_alt)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>U. \<lbrakk>\<forall>\<^sub>F b in F. f b \<in> topspace X \<and> f b \<in> S; l \<in> topspace X; l \<in> S; \<forall>U. U \<in> (\<inter>) S ` Collect (openin X) \<and> l \<in> U \<longrightarrow> (\<forall>\<^sub>F x in F. f x \<in> U); openin X U; l \<in> U\<rbrakk> \<Longrightarrow> \<forall>\<^sub>F x in F. f x \<in> U
[PROOF STEP]
apply (drule_tac x="S \<inter> U" in spec, force simp: elim: eventually_mono)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
[PROOF STATE]
proof (state)
this:
l \<in> S \<and> (\<forall>\<^sub>F a in F. f a \<in> S) \<and> limitin X f l F
goal (1 subgoal):
1. l \<in> S \<and> (\<forall>\<^sub>F a in F. f a \<in> S) \<and> limitin X f l F \<Longrightarrow> limitin (subtopology X S) f l F
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. l \<in> S \<and> (\<forall>\<^sub>F a in F. f a \<in> S) \<and> limitin X f l F \<Longrightarrow> limitin (subtopology X S) f l F
[PROOF STEP]
assume ?rhs
[PROOF STATE]
proof (state)
this:
l \<in> S \<and> (\<forall>\<^sub>F a in F. f a \<in> S) \<and> limitin X f l F
goal (1 subgoal):
1. l \<in> S \<and> (\<forall>\<^sub>F a in F. f a \<in> S) \<and> limitin X f l F \<Longrightarrow> limitin (subtopology X S) f l F
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
l \<in> S \<and> (\<forall>\<^sub>F a in F. f a \<in> S) \<and> limitin X f l F
[PROOF STEP]
show ?lhs
[PROOF STATE]
proof (prove)
using this:
l \<in> S \<and> (\<forall>\<^sub>F a in F. f a \<in> S) \<and> limitin X f l F
goal (1 subgoal):
1. limitin (subtopology X S) f l F
[PROOF STEP]
using eventually_elim2
[PROOF STATE]
proof (prove)
using this:
l \<in> S \<and> (\<forall>\<^sub>F a in F. f a \<in> S) \<and> limitin X f l F
\<lbrakk>eventually ?P ?F; eventually ?Q ?F; \<And>i. \<lbrakk>?P i; ?Q i\<rbrakk> \<Longrightarrow> ?R i\<rbrakk> \<Longrightarrow> eventually ?R ?F
goal (1 subgoal):
1. limitin (subtopology X S) f l F
[PROOF STEP]
by (fastforce simp add: limitin_def openin_subtopology_alt)
[PROOF STATE]
proof (state)
this:
limitin (subtopology X S) f l F
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
limitin (subtopology X S) f l F = (l \<in> S \<and> (\<forall>\<^sub>F a in F. f a \<in> S) \<and> limitin X f l F)
goal (1 subgoal):
1. l \<notin> S \<inter> topspace X \<Longrightarrow> limitin (subtopology X S) f l F = (l \<in> S \<and> (\<forall>\<^sub>F a in F. f a \<in> S) \<and> limitin X f l F)
[PROOF STEP]
qed (auto simp: limitin_def) |
-- Copyright (C) 2020 by @ljt12138
import propositional.basics propositional.thms data.list
import propositional.used_formula
namespace prop_logic
lemma consistent_with_smaller_set {α : Type} (Γ Γ' : list (sentence α)) :
Γ ⊆ Γ' → consistent Γ' → consistent Γ :=
begin
intros h a contra,
unfold consistent at a, apply a,
apply provable_with_stronger_assumption,
assumption, assumption,
end
lemma consistent_with_append {α : Type} (Γ : list _) (φ : sentence α) :
consistent (φ :: Γ) → consistent Γ :=
begin
intros h,
have ht : Γ ⊆ φ :: Γ := by simp,
apply consistent_with_smaller_set, assumption, assumption
end
lemma complete_extension {α : Type} {Γ : list (sentence α)}
(l : list (sentence α)) (a : consistent Γ) :
∃ Γ', Γ ⊆ Γ' ∧ (∀ φ, φ ∈ l → φ ∈ Γ' ∨ (~φ) ∈ Γ') ∧ consistent Γ' :=
begin
induction l with φ l' ih,
case nil { simp, existsi Γ, split, simp, assumption },
case cons {
cases ih with Γ₀ h, simp at *,
cases (classical.em (Γ₀ ⊢ φ)),
case inl {
existsi (list.cons φ Γ₀), split,
{ apply list.subset.trans, exact h.left, by simp },
split, split,
{ simp },
{ simp, intros, have ht := h.right.left x H, tauto },
{
apply appending_provable, exact h.right.right,
assumption
}
},
case inr {
existsi (list.cons (φ↣⊥) Γ₀), split,
{ apply list.subset.trans, exact h.left, by simp },
split, split,
{ simp },
{ simp, intros, have ht := h.right.left x H, tauto },
{ apply appending_unprovable, assumption }
}
}
end
end prop_logic
|
# # copied from autogenerated SnoopCompile files
function _precompile_()
nothing
end
|
struct DBFGS{T1, T2} <: QuasiNewton{T1}
approx::T1
theta::T2
end
DBFGS(approx) = DBFGS(;approx=approx)
DBFGS(;approx=Inverse(), theta=0.2) = DBFGS(approx, theta)
function update!(scheme::DBFGS{<:Direct, <:Any}, B, s, y)
# We could write this as
# B .+= (y*y')/dot(s, y) - (B*s)*(s'*B)/(s'*B*s)
# B .+= (y*y')/dot(s, y) - b*b'/dot(s, b)
# where b = B*s
# But instead, we split up the calculations. First calculate the denominator
# in the first term
σ = dot(s, y)
ρ = inv(σ) # scalar
# Then calculate the vector b
b = B*s # vector temporary
sb = dot(s, b)
if σ ≥ scheme.theta*sb
θ = 1.0
# Calculate one vector divided by dot(s, b)
ρbb = inv(sb)*b
# And calculate
B .+= (inv(σ)*y)*y' .- ρbb*b'
else
θ = 0.8*sb/(sb-σ)
r = y*θ + (1-θ)*b
# Calculate one vector divided by dot(s, b)
ρbb = inv(dot(s, b))*b
# And calculate
B .+= (inv(dot(s, r))*r)*r' .- ρbb*b'
end
end
function update(scheme::DBFGS{<:Direct, <:Any}, B, s, y)
# As above, but out of place
σ = dot(s, y)
b = B*s
sb = dot(s, b)
if σ ≥ scheme.theta*sb
θ = 1.0
# Calculate one vector divided by dot(s, b)
ρbb = inv(sb)*b
# And calculate
return B .+= (inv(σ)*y)*y' .- ρbb*b'
else
θ = 0.8*sb/(sb-σ)
r = y*θ + (1-θ)*b
# Calculate one vector divided by dot(s, b)
ρbb = inv(dot(s, b))*b
# And calculate
return B .+= (inv(dot(s, r))*r)*r' .- ρbb*b'
end
end
function update(scheme::DBFGS{<:Inverse, <:Any}, H, s, y)
σ = dot(s, y)
ρ = inv(σ)
# if isfinite(ρ)
C = (I - ρ*s*y')
H = C*H*C' + ρ*s*s'
# end
H
end
function update!(scheme::DBFGS{<:Inverse, <:Any}, H, s, y)
σ = dot(s, y)
ρ = inv(σ)
if isfinite(ρ)
Hy = H*y
H .= H .+ ((σ+y'*Hy).*ρ^2)*(s*s')
Hys = Hy*s'
Hys .= Hys .+ Hys'
H .= H .- Hys.*ρ
end
H
end
function update!(scheme::DBFGS{<:Inverse, <:Any}, A::UniformScaling, s, y)
update(scheme, A, s, y)
end
function update!(scheme::DBFGS{<:Direct, <:Any}, A::UniformScaling, s, y)
update(scheme, A, s, y)
end
|
theory prop_18
imports Main
"$HIPSTER_HOME/IsaHipster"
begin
datatype 'a list = Nil2 | Cons2 "'a" "'a list"
fun append :: "'a list => 'a list => 'a list" where
"append (Nil2) y = y"
| "append (Cons2 z xs) y = Cons2 z (append xs y)"
fun rev :: "'a list => 'a list" where
"rev (Nil2) = Nil2"
| "rev (Cons2 y xs) = append (rev xs) (Cons2 y (Nil2))"
(*hipster append rev *)
lemma lemma_a [thy_expl]: "append x2 Nil2 = x2"
by (hipster_induct_schemes append.simps rev.simps)
lemma lemma_aa [thy_expl]: "append (append x1 y1) z1 = append x1 (append y1 z1)"
by (hipster_induct_schemes append.simps rev.simps)
lemma lemma_ab [thy_expl]: "append (rev x4) (rev y4) = rev (append y4 x4)"
by (hipster_induct_schemes append.simps rev.simps)
lemma lemma_ac [thy_expl]: "rev (rev x3) = x3"
by (hipster_induct_schemes append.simps rev.simps)
theorem x0 :
"(rev (append (rev x) y)) = (append (rev y) x)"
by (tactic \<open>Subgoal.FOCUS_PARAMS (K (Tactic_Data.hard_tac @{context})) @{context} 1\<close>)
end
|
(* Values of type sumor A B are either a value of type A or a proof of B *)
(*
Print sumor.
Inductive sumor (A : Type) (B : Prop) : Type :=
inleft : A -> A + {B} | inright : B -> A + {B}
For inleft, when applied to no more than 1 argument:
Arguments A, B are implicit and maximally inserted
For inleft, when applied to 2 arguments:
Argument A is implicit
For inright, when applied to no more than 1 argument:
Arguments A, B are implicit and maximally inserted
For inright, when applied to 2 arguments:
Argument B is implicit
For sumor: Argument scopes are [type_scope type_scope]
For inleft: Argument scopes are [type_scope type_scope _]
For inright: Argument scopes are [type_scope type_scope _]
*)
(* This is not a 'sumbool' type {A} + {B} but a 'sumor' type A + {B} *)
Definition pred_strong (n:nat) : {m:nat | S m = n} + {n = 0} :=
match n with
| 0 => inright (eq_refl 0)
| S n => inleft (exist _ n (eq_refl (S n)))
end.
(*
Compute pred_strong 5.
= inleft (exist (fun m : nat => S m = 5) 4 eq_refl)
: {m : nat | S m = 5} + {5 = 0}
*)
Definition maybe (n:nat) (def:nat) (x:{m:nat | S m = n} + {n = 0}) : nat :=
match x with
| inright _ => def
| inleft (exist _ m _) => m
end.
Arguments maybe {n} _ _.
(*
Compute (maybe 0 (pred_strong 10)).
*)
Example pred_strong_test1 : maybe 0 (pred_strong 10) = 9.
Proof. reflexivity. Qed.
Example pred_strong_test2 : maybe 9 (pred_strong 0) = 9.
Proof. reflexivity. Qed.
|
#-------------------------------------------------------------------
#* EMSO Model Library (EML) Copyright (C) 2004 - 2007 ALSOC.
#*
#* This LIBRARY is free software; you can distribute it and/or modify
#* it under the therms of the ALSOC FREE LICENSE as available at
#* http://www.enq.ufrgs.br/alsoc.
#*
#* EMSO Copyright (C) 2004 - 2007 ALSOC, original code
#* from http://www.rps.eng.br Copyright (C) 2002-2004.
#* All rights reserved.
#*
#* EMSO is distributed under the therms of the ALSOC LICENSE as
#* available at http://www.enq.ufrgs.br/alsoc.
#*
#*----------------------------------------------------------------------
#* Author: Marcos L. Alencastro, Estefane S. Horn (Revised Gerson B. Bicca)
#* $Id: expander.mso 687 2008-11-20 19:42:33Z bicca $
#*--------------------------------------------------------------------
type expander
expander()=begin
PP=outers.PP
NComp=outers.NComp
new(
DanaPlugin(Dict{Symbol,Any}(
:Brief=>"External Physical Properties",
:Type=>"PP"
)),
DanaInteger(Dict{Symbol,Any}(
:Brief=>"Number of chemical components",
:Lower=>1
)),
positive(Dict{Symbol,Any}(
:Brief=>"Constant of Gases",
:Unit=>"kJ/kmol/K",
:Default=>8.31451,
:Hidden=>true
)),
fill(molweight(Dict{Symbol,Any}(
:Brief=>"Molar Weight",
:Hidden=>true
)),(NComp)),
positive(Dict{Symbol,Any}(
:Brief=>"Mechanical efficiency",
:Lower=>1E-3
)),
positive(Dict{Symbol,Any}(
:Brief=>"Isentropic efficiency",
:Lower=>1E-3
)),
positive(Dict{Symbol,Any}(
:Brief=>"Pressure Ratio",
:Lower=>1E-6,
:Symbol=>"P_{ratio}"
)),
press_delta(Dict{Symbol,Any}(
:Brief=>"Pressure Drop",
:DisplayUnit=>"kPa",
:Symbol=>"\\Delta P"
)),
press_delta(Dict{Symbol,Any}(
:Brief=>"Pressure Decrease",
:DisplayUnit=>"kPa",
:Symbol=>"P_{incr}"
)),
power(Dict{Symbol,Any}(
:Brief=>"Power Losses",
:Lower=>0
)),
energy_mass(Dict{Symbol,Any}(
:Brief=>"Actual Head",
:Protected=>true
)),
energy_mass(Dict{Symbol,Any}(
:Brief=>"Isentropic Head",
:Protected=>true
)),
power(Dict{Symbol,Any}(
:Brief=>"Fluid Power",
:Protected=>true
)),
power(Dict{Symbol,Any}(
:Brief=>"Brake Power",
:Protected=>true
)),
positive(Dict{Symbol,Any}(
:Brief=>"Isentropic Coefficient",
:Lower=>0.2,
:Protected=>true
)),
temperature(Dict{Symbol,Any}(
:Brief=>"Isentropic Temperature",
:Protected=>true
)),
enth_mol(Dict{Symbol,Any}(
:Brief=>"Enthalpy at constant entropy",
:Hidden=>true
)),
molweight(Dict{Symbol,Any}(
:Brief=>"Mixture Molar Weight",
:Hidden=>true
)),
dens_mass(Dict{Symbol,Any}(
:Brief=>"Mass Density at inlet conditions",
:Lower=>1E-6,
:Protected=>true
)),
dens_mass(Dict{Symbol,Any}(
:Brief=>"Mass Density at outlet conditions",
:Lower=>1E-6,
:Protected=>true
)),
fraction(Dict{Symbol,Any}(
:Brief=>"Compressibility factor at inlet",
:Lower=>1E-3,
:Protected=>true
)),
fraction(Dict{Symbol,Any}(
:Brief=>"Compressibility factor at outlet",
:Lower=>1E-3,
:Protected=>true
)),
stream(Dict{Symbol,Any}(
:Brief=>"Inlet stream",
:PosX=>0.14,
:PosY=>0.0,
:Symbol=>"_{in}"
)),
streamPH(Dict{Symbol,Any}(
:Brief=>"Outlet stream",
:PosX=>0.83,
:PosY=>1,
:Symbol=>"_{out}"
)),
power(Dict{Symbol,Any}(
:Brief=>"Work Outlet",
:PosX=>1,
:PosY=>0.45,
:Protected=>true
)),
[
:(Outlet.F = Inlet.F),
:(Outlet.z = Inlet.z),
:(Mwm = sum(Mw*Inlet.z)),
:(Outlet.P = Inlet.P * Pratio),
:(Outlet.P = Inlet.P - Pdrop),
:(Outlet.P = Inlet.P - Pdecrease),
:(rho_in = PP.VapourDensity(Inlet.T, Inlet.P, Inlet.z)),
:(rho_out= PP.VapourDensity(Outlet.T, Outlet.P, Outlet.z)),
:(hise = PP.VapourEnthalpy(Tisentropic, Outlet.P, Outlet.z)),
:(Zfac_in = PP.VapourCompressibilityFactor(Inlet.T,Inlet.P,Inlet.z)),
:(Zfac_out = PP.VapourCompressibilityFactor(Outlet.T,Outlet.P,Outlet.z)),
:(Head*Mwm = (Outlet.h-Inlet.h)),
:(Outlet.T = Tisentropic),
:((Outlet.h-Inlet.h)= (hise-Inlet.h)*IsentropicEff),
:(PP.VapourEntropy(Tisentropic, Outlet.P, Outlet.z) = PP.VapourEntropy(Inlet.T, Inlet.P, Inlet.z)),
:(BrakePower = WorkOut),
:(BrakePower = FluidPower*MechanicalEff),
:(PowerLoss = BrakePower - FluidPower),
:(FluidPower = HeadIsentropic*Mwm*Inlet.F*IsentropicEff),
:(HeadIsentropic*Mwm*((IseCoeff-1.001)/IseCoeff) = (0.5*Zfac_in+0.5*Zfac_out)*Rgas*Inlet.T*((Pratio)^((IseCoeff-1.001)/IseCoeff) - 1)),
:(HeadIsentropic*Mwm = (hise -Inlet.h)),
],
[
"Overall Molar Balance","Component Molar Balance","Average Molecular Weight","Pressure Ratio","Pressure Drop","Pressure Decrease","Mass Density at inlet conditions","Mass Density at outlet conditions","Enthalpy at isentropic conditions","Compressibility factor at Inlet Conditions","Compressibility factor at Outlet Conditions","Actual Head","Discharge Temperature","Discharge Temperature","Isentropic Outlet Temperature","Brake Power","Brake Power","Power Loss","Fluid Power","Isentropic Coefficient","Isentropic Head",
],
[:PP,:NComp,:Rgas,:Mw,:MechanicalEff,:IsentropicEff,],
[:Pratio,:Pdrop,:Pdecrease,:PowerLoss,:Head,:HeadIsentropic,:FluidPower,:BrakePower,:IseCoeff,:Tisentropic,:hise,:Mwm,:rho_in,:rho_out,:Zfac_in,:Zfac_out,:Inlet,:Outlet,:WorkOut,]
)
end
PP::DanaPlugin
NComp::DanaInteger
Rgas::positive
Mw::Array{molweight}
MechanicalEff::positive
IsentropicEff::positive
Pratio::positive
Pdrop::press_delta
Pdecrease::press_delta
PowerLoss::power
Head::energy_mass
HeadIsentropic::energy_mass
FluidPower::power
BrakePower::power
IseCoeff::positive
Tisentropic::temperature
hise::enth_mol
Mwm::molweight
rho_in::dens_mass
rho_out::dens_mass
Zfac_in::fraction
Zfac_out::fraction
Inlet::stream
Outlet::streamPH
WorkOut::power
equations::Array{Expr,1}
equationNames::Array{String,1}
parameters::Array{Symbol,1}
variables::Array{Symbol,1}
attributes::Dict{Symbol,Any}
end
export expander
function set(in::expander)
Mw = PP.MolecularWeight()
Rgas = 8.31451*"kJ/kmol/K"
end
function setEquationFlow(in::expander)
addEquation(1)
addEquation(2)
addEquation(3)
addEquation(4)
addEquation(5)
addEquation(6)
addEquation(7)
addEquation(8)
addEquation(9)
addEquation(10)
addEquation(11)
addEquation(12)
if IsentropicEff >= 1
addEquation(13)
else
addEquation(14)
end
addEquation(15)
addEquation(16)
addEquation(17)
addEquation(18)
addEquation(19)
addEquation(20)
addEquation(21)
end
function atributes(in::expander,_::Dict{Symbol,Any})
fields::Dict{Symbol,Any}=Dict{Symbol,Any}()
fields[:Pallete]=true
fields[:Icon]="icon/expander"
fields[:Brief]="Model of an expander."
fields[:Info]="To be documented
== References ==
[1] GPSA, 1979, Engineering Data Book, Chapter 4, 5-9 - 5-10.
[2] Bloch, Heinz P., A Practical Guide to Compressor Technology, John Wiley & Sons, Incorporate, 2006."
drive!(fields,_)
return fields
end
expander(_::Dict{Symbol,Any})=begin
newModel=expander()
newModel.attributes=atributes(newModel,_)
newModel
end
|
Require Import
MathClasses.interfaces.abstract_algebra MathClasses.theory.jections.
Section contents.
Context `{StrongSetoid A}.
Global Instance: Setoid A.
Proof.
split.
intros x. rewrite <-tight_apart. now apply (irreflexivity (≶)).
intros x y. rewrite <-?tight_apart. now apply not_symmetry.
intros x y z. rewrite <-?tight_apart. intros E1 E2 E3.
destruct (cotransitive E3 y); contradiction.
Qed.
Global Instance apart_proper: Proper ((=) ==> (=) ==> iff) (≶).
Proof.
assert (∀ x₁ y x₂, x₁ ≶ y → x₁ = x₂ → x₂ ≶ y) as P1.
intros ? ? ? E Ex.
destruct (cotransitive E x₂); trivial.
apply tight_apart in Ex. destruct Ex.
now symmetry.
assert (∀ x₁ y₁ x₂ y₂, x₁ ≶ y₁ → x₁ = x₂ → y₁ = y₂ → x₂ ≶ y₂) as P2.
intros ? ? ? ? E Ex Ey.
apply P1 with x₁; trivial.
symmetry. apply P1 with y₁; trivial. now symmetry.
intros ? ? E1 ? ? E2.
split; intro; eapply P2; eauto; now symmetry.
Qed.
Instance apart_ne x y : PropHolds (x ≶ y) → PropHolds (x ≠ y).
Proof. firstorder. Qed.
Global Instance: ∀ x y, Stable (x = y).
Proof.
intros x y. unfold Stable, DN.
rewrite <-tight_apart. tauto.
Qed.
End contents.
(* Due to bug #2528 *)
#[global]
Hint Extern 3 (PropHolds (_ ≠ _)) => eapply @apart_ne : typeclass_instances.
Lemma projected_strong_setoid `{StrongSetoid B} `{Equiv A} `{Apart A} (f: A → B)
(eq_correct : ∀ x y, x = y ↔ f x = f y) (apart_correct : ∀ x y, x ≶ y ↔ f x ≶ f y) : StrongSetoid A.
Proof.
split.
intros x. red. rewrite apart_correct. apply (irreflexivity (≶)).
intros x y. rewrite !apart_correct. now symmetry.
intros x y E z. rewrite !apart_correct. apply cotransitive. now apply apart_correct.
intros x y. rewrite apart_correct, eq_correct. now apply tight_apart.
Qed.
#[global]
Instance sig_strong_setoid `{StrongSetoid A} (P: A → Prop): StrongSetoid (sig P).
Proof. now apply (projected_strong_setoid (@proj1_sig _ P)). Qed.
Section morphisms.
Context `{Equiv A} `{Apart A} `{Equiv B} `{Apart B} `{Equiv C} `{Apart C}.
Existing Instance strong_setoidmor_a.
Existing Instance strong_setoidmor_b.
Global Instance strong_morphism_proper `{!StrongSetoid_Morphism (f : A → B)} :
Setoid_Morphism f | 10.
Proof.
split; try apply _.
intros ? ?. rewrite <-!tight_apart. intros E1 E2.
destruct E1. now apply (strong_extensionality f).
Qed.
Global Instance strong_injective_injective `{!StrongInjective (f : A → B)} :
Injective f.
Proof.
pose proof (strong_injective_mor f).
split; try apply _.
intros ? ?. rewrite <-!tight_apart. intros E1 E2.
destruct E1. now apply (strong_injective f).
Qed.
(* If a morphism satisfies the binary strong extensionality property, it is
strongly extensional in both coordinates. *)
Global Instance strong_setoid_morphism_1 `{!StrongSetoid_BinaryMorphism (f : A → B → C)} :
∀ z, StrongSetoid_Morphism (f z).
Proof.
pose proof (strong_binary_setoidmor_a f).
pose proof (strong_binary_setoidmor_b f).
pose proof (strong_binary_setoidmor_c f).
intros z.
split; try apply _.
intros x y E.
destruct (strong_binary_extensionality f z x z y); trivial.
now destruct (irreflexivity (≶) z).
Qed.
Global Instance strong_setoid_morphism_unary_2 `{!StrongSetoid_BinaryMorphism (f : A → B → C)} :
∀ z, StrongSetoid_Morphism (λ x, f x z).
Proof.
pose proof (strong_binary_setoidmor_a f).
pose proof (strong_binary_setoidmor_b f).
pose proof (strong_binary_setoidmor_c f).
intros z.
split; try apply _.
intros x y E.
destruct (strong_binary_extensionality f x z y z); trivial.
now destruct (irreflexivity (≶) z).
Qed.
(* Conversely, if a morphism is strongly extensional in both coordinates, it
satisfies the binary strong extensionality property. We don't make this an
instance in order to avoid loops. *)
Lemma strong_binary_setoid_morphism_both_coordinates
`{!StrongSetoid A} `{!StrongSetoid B} `{!StrongSetoid C} {f : A → B → C}
`{∀ z, StrongSetoid_Morphism (f z)} `{∀ z, StrongSetoid_Morphism (λ x, f x z)} : StrongSetoid_BinaryMorphism f.
Proof.
split; try apply _.
intros x₁ y₁ x₂ y₂ E.
destruct (cotransitive E (f x₂ y₁)).
left. now apply (strong_extensionality (λ x, f x y₁)).
right. now apply (strong_extensionality (f x₂)).
Qed.
Global Instance binary_strong_morphism_proper `{!StrongSetoid_BinaryMorphism (f : A → B → C)} :
Proper ((=) ==> (=) ==> (=)) f.
Proof.
pose proof (strong_binary_setoidmor_a f).
pose proof (strong_binary_setoidmor_b f).
pose proof (strong_binary_setoidmor_c f).
intros x₁ y₁ E1 x₂ y₂ E2.
rewrite <-tight_apart in E1. rewrite <-tight_apart in E2.
apply tight_apart. intros E3.
edestruct (cotransitive E3 (f y₁ x₂)).
destruct E1. now apply (strong_extensionality (λ x, f x x₂)).
destruct E2. now apply (strong_extensionality (f y₁)).
Qed.
End morphisms.
Section more_morphisms.
Context `{StrongSetoid A} `{StrongSetoid B}.
Lemma strong_binary_setoid_morphism_commutative {f : A → A → B} `{!Commutative f}
`{∀ z, StrongSetoid_Morphism (f z)} : StrongSetoid_BinaryMorphism f.
Proof.
assert (∀ z, StrongSetoid_Morphism (λ x, f x z)).
split; try apply _. intros x y. rewrite !(commutativity _ z). now apply (strong_extensionality (f z)).
apply strong_binary_setoid_morphism_both_coordinates.
Qed.
End more_morphisms.
#[global]
Instance default_apart `{Equiv A} : Apart A | 20 := (≠).
Set Warnings "-unsupported-attributes". (* FIXME: remove when minimal Coq version is enough *)
#[global]
Typeclasses Opaque default_apart.
Set Warnings "+unsupported-attributes".
#[global]
Instance default_apart_trivial `{Equiv A} : TrivialApart A (Aap:=default_apart).
Proof. red. reflexivity. Qed.
(* In case we have a decidable setoid, we can construct a strong setoid. Again
we do not make this an instance as it will cause loops *)
Section dec_setoid.
Context `{Setoid A} `{Apart A} `{!TrivialApart A} `{∀ x y, Decision (x = y)}.
(* Not Global in order to avoid loops *)
Instance ne_apart x y : PropHolds (x ≠ y) → PropHolds (x ≶ y).
Proof. rewrite trivial_apart. easy. Qed.
Instance dec_strong_setoid: StrongSetoid A.
Proof.
split; try apply _.
firstorder.
intros x y. rewrite !trivial_apart. firstorder.
intros x y E1 z. rewrite !trivial_apart.
destruct (decide (x = z)) as [E2|E2]; [|tauto].
right. intros E3. rewrite trivial_apart in E1. apply E1. now rewrite E2.
intros x y. rewrite trivial_apart. split.
intros E. now apply stable.
firstorder.
Qed.
End dec_setoid.
(* And a similar result for morphisms *)
Section dec_setoid_morphisms.
Context `{StrongSetoid A} `{!TrivialApart A} `{StrongSetoid B}.
Instance dec_strong_morphism (f : A → B) `{!Setoid_Morphism f} :
StrongSetoid_Morphism f.
Proof.
split; try apply _.
intros x y E. apply trivial_apart, (setoids.morphism_ne f). now apply apart_ne.
Qed.
Context `{!TrivialApart B}.
Instance dec_strong_injective (f : A → B) `{!Injective f} :
StrongInjective f.
Proof.
pose proof (injective_mor f).
split; try apply _.
intros x y. rewrite !trivial_apart. now apply (injective_ne f).
Qed.
Context `{StrongSetoid C}.
Instance dec_strong_binary_morphism (f : A → B → C) `{!Proper ((=) ==> (=) ==> (=)) f} :
StrongSetoid_BinaryMorphism f.
Proof.
split; try apply _.
intros x₁ y₁ x₂ y₂ E1.
case (cotransitive E1 (f x₂ y₁)); rewrite !trivial_apart; intros E2.
left. intros E3. destruct (apart_ne _ _ E2). now rewrite E3.
right. intros E3. destruct (apart_ne _ _ E2). now rewrite E3.
Qed.
End dec_setoid_morphisms.
|
{-# OPTIONS --allow-unsolved-metas #-}
{-# OPTIONS --exact-split #-}
{-# OPTIONS --no-sized-types #-}
{-# OPTIONS --no-universe-polymorphism #-}
{-# OPTIONS --without-K #-}
module FOT.FOTC.Data.List.PostulatesVersusDataTypes where
-- See Agda mailing list.
-- Subject: Agda's unification: postulates versus data types
module M₁ where
data D : Set where
_∷_ : D → D → D
data List : D → Set where
cons : ∀ x xs → List xs → List (x ∷ xs)
tail : ∀ {x xs} → List (x ∷ xs) → List xs
tail {x} {xs} (cons .x .xs xsL) = xsL
module M₂ where
postulate
D : Set
_∷_ : D → D → D
data List : D → Set where
cons : ∀ x xs → List xs → List (x ∷ xs)
tail : ∀ {x xs} → List (x ∷ xs) → List xs
tail l = {!!} -- C-c C-c fails
|
lemma bigtheta_real_nat_transfer: "(f :: real \<Rightarrow> real) \<in> \<Theta>(g) \<Longrightarrow> (\<lambda>x::nat. f (real x)) \<in> \<Theta>(\<lambda>x. g (real x))" |
/-
Copyright (c) 2017 Johannes Hölzl. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Johannes Hölzl, Mario Carneiro, Yury Kudryashov
! This file was ported from Lean 3 source module topology.algebra.order.liminf_limsup
! leanprover-community/mathlib commit 98e83c3d541c77cdb7da20d79611a780ff8e7d90
! Please do not edit these lines, except to modify the commit id
! if you have ported upstream changes.
-/
import Mathlib.Algebra.BigOperators.Intervals
import Mathlib.Algebra.BigOperators.Order
import Mathlib.Algebra.IndicatorFunction
import Mathlib.Order.LiminfLimsup
import Mathlib.Order.Filter.Archimedean
import Mathlib.Topology.Order.Basic
/-!
# Lemmas about liminf and limsup in an order topology.
-/
open Filter
open Topology Classical
universe u v
variable {α : Type u} {β : Type v}
section LiminfLimsup
section OrderClosedTopology
variable [SemilatticeSup α] [TopologicalSpace α] [OrderTopology α]
theorem isBounded_le_nhds (a : α) : (𝓝 a).IsBounded (· ≤ ·) :=
(isTop_or_exists_gt a).elim (fun h ↦ ⟨a, eventually_of_forall h⟩) fun ⟨b, hb⟩ ↦
⟨b, ge_mem_nhds hb⟩
#align is_bounded_le_nhds isBounded_le_nhds
theorem Filter.Tendsto.isBoundedUnder_le {f : Filter β} {u : β → α} {a : α}
(h : Tendsto u f (𝓝 a)) : f.IsBoundedUnder (· ≤ ·) u :=
(isBounded_le_nhds a).mono h
#align filter.tendsto.is_bounded_under_le Filter.Tendsto.isBoundedUnder_le
theorem Filter.Tendsto.bddAbove_range_of_cofinite {u : β → α} {a : α}
(h : Tendsto u cofinite (𝓝 a)) : BddAbove (Set.range u) :=
h.isBoundedUnder_le.bddAbove_range_of_cofinite
#align filter.tendsto.bdd_above_range_of_cofinite Filter.Tendsto.bddAbove_range_of_cofinite
theorem Filter.Tendsto.bddAbove_range {u : ℕ → α} {a : α} (h : Tendsto u atTop (𝓝 a)) :
BddAbove (Set.range u) :=
h.isBoundedUnder_le.bddAbove_range
#align filter.tendsto.bdd_above_range Filter.Tendsto.bddAbove_range
theorem isCobounded_ge_nhds (a : α) : (𝓝 a).IsCobounded (· ≥ ·) :=
(isBounded_le_nhds a).isCobounded_flip
#align is_cobounded_ge_nhds isCobounded_ge_nhds
theorem Filter.Tendsto.isCoboundedUnder_ge {f : Filter β} {u : β → α} {a : α} [NeBot f]
(h : Tendsto u f (𝓝 a)) : f.IsCoboundedUnder (· ≥ ·) u :=
h.isBoundedUnder_le.isCobounded_flip
#align filter.tendsto.is_cobounded_under_ge Filter.Tendsto.isCoboundedUnder_ge
theorem isBounded_le_atBot (α : Type _) [hα : Nonempty α] [Preorder α] :
(atBot : Filter α).IsBounded (· ≤ ·) :=
isBounded_iff.2 ⟨Set.Iic hα.some, mem_atBot _, hα.some, fun _ hx ↦ hx⟩
#align is_bounded_le_at_bot isBounded_le_atBot
theorem Filter.Tendsto.isBoundedUnder_le_atBot {α : Type _} [Nonempty α] [Preorder α] {f : Filter β}
{u : β → α} (h : Tendsto u f atBot) : f.IsBoundedUnder (· ≤ ·) u :=
(isBounded_le_atBot α).mono h
#align filter.tendsto.is_bounded_under_le_at_bot Filter.Tendsto.isBoundedUnder_le_atBot
theorem bddAbove_range_of_tendsto_atTop_atBot {α : Type _} [Nonempty α] [SemilatticeSup α]
{u : ℕ → α} (hx : Tendsto u atTop atBot) : BddAbove (Set.range u) :=
(Filter.Tendsto.isBoundedUnder_le_atBot hx).bddAbove_range
#align bdd_above_range_of_tendsto_at_top_at_bot bddAbove_range_of_tendsto_atTop_atBot
end OrderClosedTopology
section OrderClosedTopology
variable [SemilatticeInf α] [TopologicalSpace α] [OrderTopology α]
theorem isBounded_ge_nhds (a : α) : (𝓝 a).IsBounded (· ≥ ·) :=
@isBounded_le_nhds αᵒᵈ _ _ _ a
#align is_bounded_ge_nhds isBounded_ge_nhds
theorem Filter.Tendsto.isBoundedUnder_ge {f : Filter β} {u : β → α} {a : α}
(h : Tendsto u f (𝓝 a)) : f.IsBoundedUnder (· ≥ ·) u :=
(isBounded_ge_nhds a).mono h
#align filter.tendsto.is_bounded_under_ge Filter.Tendsto.isBoundedUnder_ge
theorem Filter.Tendsto.bddBelow_range_of_cofinite {u : β → α} {a : α}
(h : Tendsto u cofinite (𝓝 a)) : BddBelow (Set.range u) :=
h.isBoundedUnder_ge.bddBelow_range_of_cofinite
#align filter.tendsto.bdd_below_range_of_cofinite Filter.Tendsto.bddBelow_range_of_cofinite
theorem Filter.Tendsto.bddBelow_range {u : ℕ → α} {a : α} (h : Tendsto u atTop (𝓝 a)) :
BddBelow (Set.range u) :=
h.isBoundedUnder_ge.bddBelow_range
#align filter.tendsto.bdd_below_range Filter.Tendsto.bddBelow_range
theorem isCobounded_le_nhds (a : α) : (𝓝 a).IsCobounded (· ≤ ·) :=
(isBounded_ge_nhds a).isCobounded_flip
#align is_cobounded_le_nhds isCobounded_le_nhds
theorem Filter.Tendsto.isCoboundedUnder_le {f : Filter β} {u : β → α} {a : α} [NeBot f]
(h : Tendsto u f (𝓝 a)) : f.IsCoboundedUnder (· ≤ ·) u :=
h.isBoundedUnder_ge.isCobounded_flip
#align filter.tendsto.is_cobounded_under_le Filter.Tendsto.isCoboundedUnder_le
theorem isBounded_ge_atTop (α : Type _) [Nonempty α] [Preorder α] :
(atTop : Filter α).IsBounded (· ≥ ·) :=
isBounded_le_atBot αᵒᵈ
#align is_bounded_ge_at_top isBounded_ge_atTop
theorem Filter.Tendsto.isBoundedUnder_ge_atTop {α : Type _} [Nonempty α] [Preorder α] {f : Filter β}
{u : β → α} (h : Tendsto u f atTop) : f.IsBoundedUnder (· ≥ ·) u :=
(isBounded_ge_atTop α).mono h
#align filter.tendsto.is_bounded_under_ge_at_top Filter.Tendsto.isBoundedUnder_ge_atTop
theorem bddBelow_range_of_tendsto_atTop_atTop {α : Type _} [Nonempty α] [SemilatticeInf α]
{u : ℕ → α} (hx : Tendsto u atTop atTop) : BddBelow (Set.range u) :=
(Filter.Tendsto.isBoundedUnder_ge_atTop hx).bddBelow_range
#align bdd_below_range_of_tendsto_at_top_at_top bddBelow_range_of_tendsto_atTop_atTop
end OrderClosedTopology
section ConditionallyCompleteLinearOrder
variable [ConditionallyCompleteLinearOrder α]
theorem lt_mem_sets_of_limsupₛ_lt {f : Filter α} {b} (h : f.IsBounded (· ≤ ·)) (l : f.limsupₛ < b) :
∀ᶠ a in f, a < b :=
let ⟨_, (h : ∀ᶠ a in f, a ≤ _), hcb⟩ := exists_lt_of_cinfₛ_lt h l
mem_of_superset h fun _ hac ↦ lt_of_le_of_lt hac hcb
set_option linter.uppercaseLean3 false in
#align lt_mem_sets_of_Limsup_lt lt_mem_sets_of_limsupₛ_lt
theorem gt_mem_sets_of_liminfₛ_gt :
∀ {f : Filter α} {b}, f.IsBounded (· ≥ ·) → b < f.liminfₛ → ∀ᶠ a in f, b < a :=
@lt_mem_sets_of_limsupₛ_lt αᵒᵈ _
set_option linter.uppercaseLean3 false in
#align gt_mem_sets_of_Liminf_gt gt_mem_sets_of_liminfₛ_gt
variable [TopologicalSpace α] [OrderTopology α]
/-- If the liminf and the limsup of a filter coincide, then this filter converges to
their common value, at least if the filter is eventually bounded above and below. -/
theorem le_nhds_of_limsupₛ_eq_liminfₛ {f : Filter α} {a : α} (hl : f.IsBounded (· ≤ ·))
(hg : f.IsBounded (· ≥ ·)) (hs : f.limsupₛ = a) (hi : f.liminfₛ = a) : f ≤ 𝓝 a :=
tendsto_order.2 ⟨fun _ hb ↦ gt_mem_sets_of_liminfₛ_gt hg <| hi.symm ▸ hb,
fun _ hb ↦ lt_mem_sets_of_limsupₛ_lt hl <| hs.symm ▸ hb⟩
set_option linter.uppercaseLean3 false in
#align le_nhds_of_Limsup_eq_Liminf le_nhds_of_limsupₛ_eq_liminfₛ
theorem limsupₛ_nhds (a : α) : limsupₛ (𝓝 a) = a :=
cinfₛ_eq_of_forall_ge_of_forall_gt_exists_lt (isBounded_le_nhds a)
(fun a' (h : { n : α | n ≤ a' } ∈ 𝓝 a) ↦ show a ≤ a' from @mem_of_mem_nhds α _ a _ h)
fun b (hba : a < b) ↦
show ∃ c, { n : α | n ≤ c } ∈ 𝓝 a ∧ c < b from
match dense_or_discrete a b with
| Or.inl ⟨c, hac, hcb⟩ => ⟨c, ge_mem_nhds hac, hcb⟩
| Or.inr ⟨_, h⟩ => ⟨a, (𝓝 a).sets_of_superset (gt_mem_nhds hba) h, hba⟩
set_option linter.uppercaseLean3 false in
#align Limsup_nhds limsupₛ_nhds
theorem liminfₛ_nhds : ∀ a : α, liminfₛ (𝓝 a) = a :=
@limsupₛ_nhds αᵒᵈ _ _ _
set_option linter.uppercaseLean3 false in
#align Liminf_nhds liminfₛ_nhds
/-- If a filter is converging, its limsup coincides with its limit. -/
theorem liminfₛ_eq_of_le_nhds {f : Filter α} {a : α} [NeBot f] (h : f ≤ 𝓝 a) : f.liminfₛ = a :=
have hb_ge : IsBounded (· ≥ ·) f := (isBounded_ge_nhds a).mono h
have hb_le : IsBounded (· ≤ ·) f := (isBounded_le_nhds a).mono h
le_antisymm
(calc
f.liminfₛ ≤ f.limsupₛ := liminfₛ_le_limsupₛ hb_le hb_ge
_ ≤ (𝓝 a).limsupₛ := limsupₛ_le_limsupₛ_of_le h hb_ge.isCobounded_flip (isBounded_le_nhds a)
_ = a := limsupₛ_nhds a)
(calc
a = (𝓝 a).liminfₛ := (liminfₛ_nhds a).symm
_ ≤ f.liminfₛ := liminfₛ_le_liminfₛ_of_le h (isBounded_ge_nhds a) hb_le.isCobounded_flip)
set_option linter.uppercaseLean3 false in
#align Liminf_eq_of_le_nhds liminfₛ_eq_of_le_nhds
/-- If a filter is converging, its liminf coincides with its limit. -/
theorem limsupₛ_eq_of_le_nhds : ∀ {f : Filter α} {a : α} [NeBot f], f ≤ 𝓝 a → f.limsupₛ = a :=
@liminfₛ_eq_of_le_nhds αᵒᵈ _ _ _
set_option linter.uppercaseLean3 false in
#align Limsup_eq_of_le_nhds limsupₛ_eq_of_le_nhds
/-- If a function has a limit, then its limsup coincides with its limit. -/
theorem Filter.Tendsto.limsup_eq {f : Filter β} {u : β → α} {a : α} [NeBot f]
(h : Tendsto u f (𝓝 a)) : limsup u f = a :=
limsupₛ_eq_of_le_nhds h
#align filter.tendsto.limsup_eq Filter.Tendsto.limsup_eq
/-- If a function has a limit, then its liminf coincides with its limit. -/
theorem Filter.Tendsto.liminf_eq {f : Filter β} {u : β → α} {a : α} [NeBot f]
(h : Tendsto u f (𝓝 a)) : liminf u f = a :=
liminfₛ_eq_of_le_nhds h
#align filter.tendsto.liminf_eq Filter.Tendsto.liminf_eq
/-- If the liminf and the limsup of a function coincide, then the limit of the function
exists and has the same value. -/
theorem tendsto_of_liminf_eq_limsup {f : Filter β} {u : β → α} {a : α} (hinf : liminf u f = a)
(hsup : limsup u f = a) (h : f.IsBoundedUnder (· ≤ ·) u := by isBoundedDefault)
(h' : f.IsBoundedUnder (· ≥ ·) u := by isBoundedDefault) : Tendsto u f (𝓝 a) :=
le_nhds_of_limsupₛ_eq_liminfₛ h h' hsup hinf
#align tendsto_of_liminf_eq_limsup tendsto_of_liminf_eq_limsup
/-- If a number `a` is less than or equal to the `liminf` of a function `f` at some filter
and is greater than or equal to the `limsup` of `f`, then `f` tends to `a` along this filter. -/
theorem tendsto_of_le_liminf_of_limsup_le {f : Filter β} {u : β → α} {a : α} (hinf : a ≤ liminf u f)
(hsup : limsup u f ≤ a) (h : f.IsBoundedUnder (· ≤ ·) u := by isBoundedDefault)
(h' : f.IsBoundedUnder (· ≥ ·) u := by isBoundedDefault) : Tendsto u f (𝓝 a) :=
if hf : f = ⊥ then hf.symm ▸ tendsto_bot
else
haveI : NeBot f := ⟨hf⟩
tendsto_of_liminf_eq_limsup (le_antisymm (le_trans (liminf_le_limsup h h') hsup) hinf)
(le_antisymm hsup (le_trans hinf (liminf_le_limsup h h'))) h h'
#align tendsto_of_le_liminf_of_limsup_le tendsto_of_le_liminf_of_limsup_le
/-- Assume that, for any `a < b`, a sequence can not be infinitely many times below `a` and
above `b`. If it is also ultimately bounded above and below, then it has to converge. This even
works if `a` and `b` are restricted to a dense subset.
-/
theorem tendsto_of_no_upcrossings [DenselyOrdered α] {f : Filter β} {u : β → α} {s : Set α}
(hs : Dense s) (H : ∀ a ∈ s, ∀ b ∈ s, a < b → ¬((∃ᶠ n in f, u n < a) ∧ ∃ᶠ n in f, b < u n))
(h : f.IsBoundedUnder (· ≤ ·) u := by isBoundedDefault)
(h' : f.IsBoundedUnder (· ≥ ·) u := by isBoundedDefault) :
∃ c : α, Tendsto u f (𝓝 c) := by
by_cases hbot : f = ⊥;
· rw [hbot]
exact ⟨infₛ ∅, tendsto_bot⟩
haveI : NeBot f := ⟨hbot⟩
refine' ⟨limsup u f, _⟩
apply tendsto_of_le_liminf_of_limsup_le _ le_rfl h h'
by_contra' hlt
obtain ⟨a, ⟨⟨la, au⟩, as⟩⟩ : ∃ a, (f.liminf u < a ∧ a < f.limsup u) ∧ a ∈ s :=
dense_iff_inter_open.1 hs (Set.Ioo (f.liminf u) (f.limsup u)) isOpen_Ioo
(Set.nonempty_Ioo.2 hlt)
obtain ⟨b, ⟨⟨ab, bu⟩, bs⟩⟩ : ∃ b, (a < b ∧ b < f.limsup u) ∧ b ∈ s :=
dense_iff_inter_open.1 hs (Set.Ioo a (f.limsup u)) isOpen_Ioo (Set.nonempty_Ioo.2 au)
have A : ∃ᶠ n in f, u n < a := frequently_lt_of_liminf_lt (IsBounded.isCobounded_ge h) la
have B : ∃ᶠ n in f, b < u n := frequently_lt_of_lt_limsup (IsBounded.isCobounded_le h') bu
exact H a as b bs ab ⟨A, B⟩
#align tendsto_of_no_upcrossings tendsto_of_no_upcrossings
end ConditionallyCompleteLinearOrder
end LiminfLimsup
section Monotone
variable {ι R S : Type _} {F : Filter ι} [NeBot F] [CompleteLinearOrder R] [TopologicalSpace R]
[OrderTopology R] [CompleteLinearOrder S] [TopologicalSpace S] [OrderTopology S]
/-- An antitone function between complete linear ordered spaces sends a `Filter.limsupₛ`
to the `Filter.liminf` of the image if it is continuous at the `limsupₛ`. -/
theorem Antitone.map_limsupₛ_of_continuousAt {F : Filter R} [NeBot F] {f : R → S}
(f_decr : Antitone f) (f_cont : ContinuousAt f F.limsupₛ) : f F.limsupₛ = F.liminf f := by
apply le_antisymm
· have A : { a : R | ∀ᶠ n : R in F, n ≤ a }.Nonempty := ⟨⊤, by simp⟩
rw [limsupₛ, f_decr.map_infₛ_of_continuousAt' f_cont A]
apply le_of_forall_lt
intro c hc
simp only [liminf, liminfₛ, lt_supₛ_iff, eventually_map, Set.mem_setOf_eq, exists_prop,
Set.mem_image, exists_exists_and_eq_and] at hc⊢
rcases hc with ⟨d, hd, h'd⟩
refine' ⟨f d, _, h'd⟩
filter_upwards [hd]with x hx using f_decr hx
· rcases eq_or_lt_of_le (bot_le : ⊥ ≤ F.limsupₛ) with (h | limsupₛ_ne_bot)
· rw [← h]
apply liminf_le_of_frequently_le _
apply frequently_of_forall
intro x
exact f_decr bot_le
by_cases h' : ∃ c, c < F.limsupₛ ∧ Set.Ioo c F.limsupₛ = ∅
· rcases h' with ⟨c, c_lt, hc⟩
have B : ∃ᶠ n in F, F.limsupₛ ≤ n := by
apply (frequently_lt_of_lt_limsupₛ (by isBoundedDefault) c_lt).mono
intro x hx
by_contra'
have : (Set.Ioo c F.limsupₛ).Nonempty := ⟨x, ⟨hx, this⟩⟩
simp only [hc, Set.not_nonempty_empty] at this
apply liminf_le_of_frequently_le _
exact B.mono fun x hx ↦ f_decr hx
by_contra' H
obtain ⟨l, l_lt, h'l⟩ : ∃ l < F.limsupₛ, Set.Ioc l F.limsupₛ ⊆ { x : R | f x < F.liminf f }
exact exists_Ioc_subset_of_mem_nhds ((tendsto_order.1 f_cont.tendsto).2 _ H) ⟨⊥, limsupₛ_ne_bot⟩
obtain ⟨m, l_m, m_lt⟩ : (Set.Ioo l F.limsupₛ).Nonempty := by
contrapose! h'
refine' ⟨l, l_lt, by rwa [Set.not_nonempty_iff_eq_empty] at h'⟩
have B : F.liminf f ≤ f m := by
apply liminf_le_of_frequently_le _
apply (frequently_lt_of_lt_limsupₛ (by isBoundedDefault) m_lt).mono
intro x hx
exact f_decr hx.le
have I : f m < F.liminf f := h'l ⟨l_m, m_lt.le⟩
exact lt_irrefl _ (B.trans_lt I)
set_option linter.uppercaseLean3 false in
#align antitone.map_Limsup_of_continuous_at Antitone.map_limsupₛ_of_continuousAt
/-- A continuous antitone function between complete linear ordered spaces sends a `Filter.limsup`
to the `Filter.liminf` of the images. -/
theorem Antitone.map_limsup_of_continuousAt {f : R → S} (f_decr : Antitone f) (a : ι → R)
(f_cont : ContinuousAt f (F.limsup a)) : f (F.limsup a) = F.liminf (f ∘ a) :=
f_decr.map_limsupₛ_of_continuousAt f_cont
#align antitone.map_limsup_of_continuous_at Antitone.map_limsup_of_continuousAt
/-- An antitone function between complete linear ordered spaces sends a `Filter.liminfₛ`
to the `Filter.limsup` of the image if it is continuous at the `liminfₛ`. -/
theorem Antitone.map_liminfₛ_of_continuousAt {F : Filter R} [NeBot F] {f : R → S}
(f_decr : Antitone f) (f_cont : ContinuousAt f F.liminfₛ) : f F.liminfₛ = F.limsup f :=
@Antitone.map_limsupₛ_of_continuousAt (OrderDual R) (OrderDual S) _ _ _ _ _ _ _ _ f f_decr.dual
f_cont
set_option linter.uppercaseLean3 false in
#align antitone.map_Liminf_of_continuous_at Antitone.map_liminfₛ_of_continuousAt
/-- A continuous antitone function between complete linear ordered spaces sends a `Filter.liminf`
to the `Filter.limsup` of the images. -/
theorem Antitone.map_liminf_of_continuousAt {f : R → S} (f_decr : Antitone f) (a : ι → R)
(f_cont : ContinuousAt f (F.liminf a)) : f (F.liminf a) = F.limsup (f ∘ a) :=
f_decr.map_liminfₛ_of_continuousAt f_cont
#align antitone.map_liminf_of_continuous_at Antitone.map_liminf_of_continuousAt
/-- A monotone function between complete linear ordered spaces sends a `Filter.limsupₛ`
to the `Filter.limsup` of the image if it is continuous at the `limsupₛ`. -/
theorem Monotone.map_limsupₛ_of_continuousAt {F : Filter R} [NeBot F] {f : R → S}
(f_incr : Monotone f) (f_cont : ContinuousAt f F.limsupₛ) : f F.limsupₛ = F.limsup f :=
@Antitone.map_limsupₛ_of_continuousAt R (OrderDual S) _ _ _ _ _ _ _ _ f f_incr f_cont
set_option linter.uppercaseLean3 false in
#align monotone.map_Limsup_of_continuous_at Monotone.map_limsupₛ_of_continuousAt
/-- A continuous monotone function between complete linear ordered spaces sends a `Filter.limsup`
to the `Filter.limsup` of the images. -/
theorem Monotone.map_limsup_of_continuousAt {f : R → S} (f_incr : Monotone f) (a : ι → R)
(f_cont : ContinuousAt f (F.limsup a)) : f (F.limsup a) = F.limsup (f ∘ a) :=
f_incr.map_limsupₛ_of_continuousAt f_cont
#align monotone.map_limsup_of_continuous_at Monotone.map_limsup_of_continuousAt
/-- A monotone function between complete linear ordered spaces sends a `Filter.liminfₛ`
to the `Filter.liminf` of the image if it is continuous at the `liminfₛ`. -/
theorem Monotone.map_liminfₛ_of_continuousAt {F : Filter R} [NeBot F] {f : R → S}
(f_incr : Monotone f) (f_cont : ContinuousAt f F.liminfₛ) : f F.liminfₛ = F.liminf f :=
@Antitone.map_liminfₛ_of_continuousAt R (OrderDual S) _ _ _ _ _ _ _ _ f f_incr f_cont
set_option linter.uppercaseLean3 false in
#align monotone.map_Liminf_of_continuous_at Monotone.map_liminfₛ_of_continuousAt
/-- A continuous monotone function between complete linear ordered spaces sends a `Filter.liminf`
to the `Filter.liminf` of the images. -/
theorem Monotone.map_liminf_of_continuousAt {f : R → S} (f_incr : Monotone f) (a : ι → R)
(f_cont : ContinuousAt f (F.liminf a)) : f (F.liminf a) = F.liminf (f ∘ a) :=
f_incr.map_liminfₛ_of_continuousAt f_cont
#align monotone.map_liminf_of_continuous_at Monotone.map_liminf_of_continuousAt
end Monotone
section InfiAndSupr
open Topology
open Filter Set
variable {ι : Type _} {R : Type _} [CompleteLinearOrder R] [TopologicalSpace R] [OrderTopology R]
theorem infᵢ_eq_of_forall_le_of_tendsto {x : R} {as : ι → R} (x_le : ∀ i, x ≤ as i) {F : Filter ι}
[Filter.NeBot F] (as_lim : Filter.Tendsto as F (𝓝 x)) : (⨅ i, as i) = x := by
refine' infᵢ_eq_of_forall_ge_of_forall_gt_exists_lt (fun i ↦ x_le i) _
apply fun w x_lt_w ↦ ‹Filter.NeBot F›.nonempty_of_mem (eventually_lt_of_tendsto_lt x_lt_w as_lim)
#align infi_eq_of_forall_le_of_tendsto infᵢ_eq_of_forall_le_of_tendsto
theorem supᵢ_eq_of_forall_le_of_tendsto {x : R} {as : ι → R} (le_x : ∀ i, as i ≤ x) {F : Filter ι}
[Filter.NeBot F] (as_lim : Filter.Tendsto as F (𝓝 x)) : (⨆ i, as i) = x :=
@infᵢ_eq_of_forall_le_of_tendsto ι (OrderDual R) _ _ _ x as le_x F _ as_lim
#align supr_eq_of_forall_le_of_tendsto supᵢ_eq_of_forall_le_of_tendsto
theorem unionᵢ_Ici_eq_Ioi_of_lt_of_tendsto {ι : Type _} (x : R) {as : ι → R} (x_lt : ∀ i, x < as i)
{F : Filter ι} [Filter.NeBot F] (as_lim : Filter.Tendsto as F (𝓝 x)) :
(⋃ i : ι, Ici (as i)) = Ioi x := by
have obs : x ∉ range as := by
intro maybe_x_is
rcases mem_range.mp maybe_x_is with ⟨i, hi⟩
simpa only [hi, lt_self_iff_false] using x_lt i
-- Porting note: `rw at *` was too destructive. Let's only rewrite `obs` and the goal.
have := infᵢ_eq_of_forall_le_of_tendsto (fun i ↦ (x_lt i).le) as_lim
rw [← this] at obs
rw [← this]
exact unionᵢ_Ici_eq_Ioi_infᵢ obs
#align Union_Ici_eq_Ioi_of_lt_of_tendsto unionᵢ_Ici_eq_Ioi_of_lt_of_tendsto
theorem unionᵢ_Iic_eq_Iio_of_lt_of_tendsto {ι : Type _} (x : R) {as : ι → R} (lt_x : ∀ i, as i < x)
{F : Filter ι} [Filter.NeBot F] (as_lim : Filter.Tendsto as F (𝓝 x)) :
(⋃ i : ι, Iic (as i)) = Iio x :=
@unionᵢ_Ici_eq_Ioi_of_lt_of_tendsto (OrderDual R) _ _ _ ι x as lt_x F _ as_lim
#align Union_Iic_eq_Iio_of_lt_of_tendsto unionᵢ_Iic_eq_Iio_of_lt_of_tendsto
end InfiAndSupr
section Indicator
open BigOperators
theorem limsup_eq_tendsto_sum_indicator_nat_atTop (s : ℕ → Set α) :
limsup s atTop = { ω | Tendsto
(fun n ↦ ∑ k in Finset.range n, (s (k + 1)).indicator (1 : α → ℕ) ω) atTop atTop } := by
ext ω
simp only [limsup_eq_infᵢ_supᵢ_of_nat, ge_iff_le, Set.supᵢ_eq_unionᵢ, Set.infᵢ_eq_interᵢ,
Set.mem_interᵢ, Set.mem_unionᵢ, exists_prop]
constructor
· intro hω
refine' tendsto_atTop_atTop_of_monotone' (fun n m hnm ↦ Finset.sum_mono_set_of_nonneg
(fun i ↦ Set.indicator_nonneg (fun _ _ ↦ zero_le_one) _) (Finset.range_mono hnm)) _
rintro ⟨i, h⟩
simp only [mem_upperBounds, Set.mem_range, forall_exists_index, forall_apply_eq_imp_iff'] at h
induction' i with k hk
· obtain ⟨j, hj₁, hj₂⟩ := hω 1
refine' not_lt.2 (h <| j + 1)
(lt_of_le_of_lt (Finset.sum_const_zero.symm : 0 = ∑ k in Finset.range (j + 1), 0).le _)
refine' Finset.sum_lt_sum (fun m _ ↦ Set.indicator_nonneg (fun _ _ ↦ zero_le_one) _)
⟨j - 1, Finset.mem_range.2 (lt_of_le_of_lt (Nat.sub_le _ _) j.lt_succ_self), _⟩
rw [Nat.sub_add_cancel hj₁, Set.indicator_of_mem hj₂]
exact zero_lt_one
· rw [imp_false] at hk
push_neg at hk
obtain ⟨i, hi⟩ := hk
obtain ⟨j, hj₁, hj₂⟩ := hω (i + 1)
replace hi : (∑ k in Finset.range i, (s (k + 1)).indicator 1 ω) = k + 1 :=
le_antisymm (h i) hi
refine' not_lt.2 (h <| j + 1) _
rw [← Finset.sum_range_add_sum_Ico _ (i.le_succ.trans (hj₁.trans j.le_succ)), hi]
refine' lt_add_of_pos_right _ _
rw [(Finset.sum_const_zero.symm : 0 = ∑ k in Finset.Ico i (j + 1), 0)]
refine' Finset.sum_lt_sum (fun m _ ↦ Set.indicator_nonneg (fun _ _ ↦ zero_le_one) _)
⟨j - 1, Finset.mem_Ico.2 ⟨(Nat.le_sub_iff_add_le (le_trans ((le_add_iff_nonneg_left _).2
zero_le') hj₁)).2 hj₁, lt_of_le_of_lt (Nat.sub_le _ _) j.lt_succ_self⟩, _⟩
rw [Nat.sub_add_cancel (le_trans ((le_add_iff_nonneg_left _).2 zero_le') hj₁),
Set.indicator_of_mem hj₂]
exact zero_lt_one
· rintro hω i
rw [Set.mem_setOf_eq, tendsto_atTop_atTop] at hω
by_contra hcon
push_neg at hcon
obtain ⟨j, h⟩ := hω (i + 1)
have : (∑ k in Finset.range j, (s (k + 1)).indicator 1 ω) ≤ i := by
have hle : ∀ j ≤ i, (∑ k in Finset.range j, (s (k + 1)).indicator 1 ω) ≤ i := by
refine' fun j hij ↦
(Finset.sum_le_card_nsmul _ _ _ _ : _ ≤ (Finset.range j).card • 1).trans _
· exact fun m _ ↦ Set.indicator_apply_le' (fun _ ↦ le_rfl) fun _ ↦ zero_le_one
· simpa only [Finset.card_range, smul_eq_mul, mul_one]
by_cases hij : j < i
· exact hle _ hij.le
· rw [← Finset.sum_range_add_sum_Ico _ (not_lt.1 hij)]
suffices (∑ k in Finset.Ico i j, (s (k + 1)).indicator 1 ω) = 0 by
rw [this, add_zero]
exact hle _ le_rfl
refine' Finset.sum_eq_zero fun m hm ↦ _
exact Set.indicator_of_not_mem (hcon _ <| (Finset.mem_Ico.1 hm).1.trans m.le_succ) _
exact not_le.2 (lt_of_lt_of_le i.lt_succ_self <| h _ le_rfl) this
#align limsup_eq_tendsto_sum_indicator_nat_at_top limsup_eq_tendsto_sum_indicator_nat_atTop
theorem limsup_eq_tendsto_sum_indicator_atTop (R : Type _) [StrictOrderedSemiring R] [Archimedean R]
(s : ℕ → Set α) : limsup s atTop = { ω | Tendsto
(fun n ↦ ∑ k in Finset.range n, (s (k + 1)).indicator (1 : α → R) ω) atTop atTop } := by
rw [limsup_eq_tendsto_sum_indicator_nat_atTop s]
ext ω
simp only [Set.mem_setOf_eq]
rw [(_ : (fun n ↦ ∑ k in Finset.range n, (s (k + 1)).indicator (1 : α → R) ω) = fun n ↦
↑(∑ k in Finset.range n, (s (k + 1)).indicator (1 : α → ℕ) ω))]
· exact tendsto_nat_cast_atTop_iff.symm
· ext n
simp only [Set.indicator, Pi.one_apply, Finset.sum_boole, Nat.cast_id]
#align limsup_eq_tendsto_sum_indicator_at_top limsup_eq_tendsto_sum_indicator_atTop
end Indicator
|
% GB (09/28/2007)
\chapter{Distributed-Memory Parallel Traversals}
\label{chap:distributedMemoryTraversals}
ROSE provides an experimental distributed-memory AST traversal mechanism meant for very large scale program
analysis. It allows you to distribute expensive program analyses among a distributed-memory system consisting of many
processors; this can be a cluster or a network of workstations. Different processes in the distributed system will get
different parts of the AST to analyze: Each process is assigned a number of defining function declarations in the AST,
and a method implemented by the user is invoked on each of these. The parts of the AST outside of function definitions
are shared among all processes, but there is no guarantee that all function definitions are visible to all processes.
The distributed memory analysis framework uses the MPI message passing library for communicating attributes among
processes. You will need an implementation of MPI to be able to build and run programs using distributed memory
traversals; consult your documentation on how to run MPI programs. (This is often done using a program named {\tt
mpirun}, {\tt mpiexecute}, or similar.)
Distributed memory analyses are performed in three phases:
\begin{enumerate}
\item A top-down traversal (the `pre-traversal') specified by the user runs on the shared AST outside of function
definitions. The inherited attributes this traversal computes for defining function declaration nodes in the AST are
saved by the framework for use in the next phase.
\item For every defining function declaration, the user-provided {\tt analyzeSubtree()} method is invoked; these calls
run concurrently, but on different function declarations, on all processors. It takes as arguments the AST node for the
function declaration and the inherited attribute computed for this node by the pre-traversal. Within {\tt
analyzeSubtree()} any analysis features provided by ROSE can be used. This method returns the value that will be used as
the synthesized attribute for this function declaration in the bottom-up traversal (the `post-traversal').
However, unlike normal bottom-up traversals, the synthesized attribute is not simply copied in memory as the AST is
distributed. The user must therefore provide the methods {\tt serializeAttribute()} and {\tt deserializeAttribute()}.
These compute a serialized representation of a synthesized attribute, and convert such a representation back to the
user's synthesized attribute type, respectively. A serialized attribute is a pair of an integer specifying the size of
the attribute in bytes and a pointer to a region of memory of that size that will be copied byte by byte across the
distributed system's communication network. Attributes from different parts of the AST may have different sizes. As
serialization of attributes will often involve dynamic memory allocation, the user can also implement the {\tt
deleteSerializedAttribute()} method to such dynamic memory after the serialized data has been copied to the
communication subsystem's internal buffer.
Within the {\tt analyzeSubtree()} method the methods {\tt numberOfProcesses()} and {\tt myID()} can be called. These
return the total number of concurrent processes, and an integer uniquely identifying the currently running process,
respectively. The ID ranges from 0 to one less than the number of processes, but has no semantics other than that it is
different for each process.
\item Finally, a bottom-up traversal is run on the shared AST outside of function definitions. The values returned by
the distributed analyzers in the previous phase are used as synthesized attributes for function definition nodes in this
traversal.
\end{enumerate}
After the bottom-up traversal has finished, the {\tt getFinalResults()} method can be invoked to obtain the final
synthesized attribute. The {\tt isRootProcess()} method returns true on exactly one designated process and can be used
to perform output, program transformations, or other tasks that are not meant to be run on each processor.
Figure~\ref{Tutorial:exampleDistributedMemoryTraversals} gives a complete example of how to use the distributed
memory analysis framework. It implements a trivial analysis that determines for each function declaration at what depth
in the AST it can be found and what its name is. Figure~\ref{Tutorial:exampleOutput_DistributedMemoryTraversals} shows
the output produced by this program when running using four processors on some input files.
% The distributedMemoryFunctionNames.C file is a copy of projects/DistributedMemoryAnalysis/functionNames.C
\begin{figure}[!h]
{\indent
{\mySmallFontSize
% Do this when processing latex to generate non-html (not using latex2html)
\begin{latexonly}
\lstinputlisting{\TutorialExampleDirectory/distributedMemoryFunctionNames.C}
\end{latexonly}
% Do this when processing latex to build html (using latex2html)
\begin{htmlonly}
\verbatiminput{\TutorialExampleDirectory/distributedMemoryFunctionNames.C}
\end{htmlonly}
% end of scope in font size
}
% End of scope in indentation
}
\caption{Example source demonstrating the use of the distributed-memory parallel analysis framework.}
\label{Tutorial:exampleDistributedMemoryTraversals}
\end{figure}
\begin{figure}[!h]
{\indent
{\mySmallFontSize
% GB (09/28/2007): This is just copied here; we can't generate it as not everyone has a copy of MPI. Might not be very
% elegant, but it's 4:42 p.m. on my last day :-)
\begin{verbatim}
----- found the following functions: ------
process 0: at depth 3: function il
process 0: at depth 5: function head
process 0: at depth 5: function eq
process 1: at depth 3: function headhead
process 1: at depth 3: function List
process 1: at depth 3: function find
process 1: at depth 3: function head
process 2: at depth 3: function operator!=
process 2: at depth 3: function find
process 2: at depth 3: function head
process 2: at depth 3: function fib
process 3: at depth 3: function xform
process 3: at depth 3: function func
process 3: at depth 3: function f
process 3: at depth 3: function g
process 3: at depth 3: function deref
-------------------------------------------
\end{verbatim}
% end of scope in font size
}
% End of scope in indentation
}
\caption{Example output of a distributed-memory analysis running on four processors.}
\label{Tutorial:exampleOutput_DistributedMemoryTraversals}
\end{figure}
|
[STATEMENT]
lemma (in GS_invar) is_on_stack_impl_correct:
shows "is_on_stack_impl v \<longleftrightarrow> v\<in>\<Union>(set p_\<alpha>)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. is_on_stack_impl v = (v \<in> \<Union> (set p_\<alpha>))
[PROOF STEP]
unfolding is_on_stack_impl_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (case I v of None \<Rightarrow> False | Some (STACK x) \<Rightarrow> True | Some DONE \<Rightarrow> False) = (v \<in> \<Union> (set p_\<alpha>))
[PROOF STEP]
using I_consistent[of v]
[PROOF STATE]
proof (prove)
using this:
(I v = Some (STACK ?j)) = (?j < length S \<and> v = S ! ?j)
goal (1 subgoal):
1. (case I v of None \<Rightarrow> False | Some (STACK x) \<Rightarrow> True | Some DONE \<Rightarrow> False) = (v \<in> \<Union> (set p_\<alpha>))
[PROOF STEP]
apply (force
simp: set_p_\<alpha>_is_set_S in_set_conv_nth
split: option.split node_state.split)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done |
-- import tactic
-- lemma eq_zero_of_five_mul_seven_mul_eq_zero {G : Type*} [add_comm_group G] (a : G)
-- (h_five : 5 • a = 0) (h_seven : 7 • a = 0) : a = 0 :=
-- begin
-- end
|
----------------------------------------------------------------------------
-- The FOTC lists of natural numbers type
----------------------------------------------------------------------------
{-# OPTIONS --exact-split #-}
{-# OPTIONS --no-sized-types #-}
{-# OPTIONS --no-universe-polymorphism #-}
{-# OPTIONS --without-K #-}
module FOTC.Data.Nat.List.Type where
open import FOTC.Base
open import FOTC.Base.List
open import FOTC.Data.Nat.Type
------------------------------------------------------------------------------
-- The FOTC lists of natural numbers type (inductive predicate for
-- total lists of natural numbers).
data ListN : D → Set where
lnnil : ListN []
lncons : ∀ {n ns} → N n → ListN ns → ListN (n ∷ ns)
{-# ATP axioms lnnil lncons #-}
-- Induction principle.
ListN-ind : (A : D → Set) →
A [] →
(∀ {n ns} → N n → A ns → A (n ∷ ns)) →
∀ {ns} → ListN ns → A ns
ListN-ind A A[] h lnnil = A[]
ListN-ind A A[] h (lncons Nn Lns) = h Nn (ListN-ind A A[] h Lns)
|
(** * 6.887 Formal Reasoning About Programs, Spring 2016 - Pset 3 *)
Require Import Frap Pset3Sig.
Set Implicit Arguments.
(* Authors: Adam Chlipala ([email protected]), Peng Wang ([email protected]) *)
Lemma assumptionsHold_sound : forall state prs (st : state) pm,
(forall x b, pm $? x = Some b
-> match prs $? x with
| Some pr => pr st <-> b = true
| None => False
end)
-> forall asms, assumptionsHold pm asms = true
-> assumptionsAccurate prs asms st.
Proof.
induct asms; simplify; propositional.
cases (assertionHolds pm a); simplify; try equality.
unfold assertionHolds, assertionAccurate in *.
cases a; simplify.
cases (pm $? AssumedPredicate); try equality.
cases b.
cases AssumedToBe; try equality.
apply H in Heq0.
cases (prs $? AssumedPredicate); propositional.
cases AssumedToBe; try equality.
apply H in Heq0.
cases (prs $? AssumedPredicate); propositional.
apply H.
assumption.
cases (assertionHolds pm a); simplify; equality.
Qed.
Lemma applyRule_sound : forall state (doIt : state -> state -> Prop)
prs x b st st' r pm pm',
(forall x b, pm $? x = Some b
-> match prs $? x with
| Some pr => pr st <-> b = true
| None => False
end)
-> (forall x b, pm' $? x = Some b
-> match prs $? x with
| Some pr => pr st' <-> b = true
| None => False
end)
-> applyRule pm pm' r $? x = Some b
-> ruleAccurate prs doIt r
-> doIt st st'
-> match prs $? x with
| Some pr => pr st' <-> b = true
| None => False
end.
Proof.
unfold applyRule; simplify.
cases r.
cases (assumptionsHold pm Assumptions).
cases (AssumedToBe Conclusion).
cases (x ==v AssumedPredicate Conclusion); simplify.
invert H1.
unfold ruleAccurate in H2; simplify.
apply H2 in H3.
unfold assertionAccurate in H3.
rewrite Heq0 in H3.
cases (prs $? AssumedPredicate Conclusion); propositional.
eapply assumptionsHold_sound.
eassumption.
assumption.
apply H0; assumption.
cases (x ==v AssumedPredicate Conclusion); simplify.
invert H1.
unfold ruleAccurate in H2; simplify.
apply H2 in H3.
unfold assertionAccurate in H3.
rewrite Heq0 in H3.
cases (prs $? AssumedPredicate Conclusion); propositional.
eapply assumptionsHold_sound.
eassumption.
assumption.
apply H0; assumption.
apply H0; assumption.
Qed.
Lemma applyRules_sound' : forall state (doIt : state -> state -> Prop)
(prs : fmap var (state -> Prop)) x b st st' pm,
(forall x b, pm $? x = Some b
-> match prs $? x with
| Some pr => pr st <-> b = true
| None => False
end)
-> doIt st st'
-> forall rs pm', (forall r, In r rs -> ruleAccurate prs doIt r)
-> applyRules pm pm' rs $? x = Some b
-> (forall x b, pm' $? x = Some b
-> match prs $? x with
| None => False
| Some pr => pr st' <-> b = true
end)
-> match prs $? x with
| None => False
| Some pr => pr st' <-> b = true
end.
Proof.
induct rs; simplify.
apply H3.
assumption.
eapply IHrs; simplify.
apply H1.
propositional.
eassumption.
eapply applyRule_sound.
apply H.
eassumption.
eassumption.
apply H1; propositional.
assumption.
Qed.
Theorem applyRules_sound : forall state (doIt : state -> state -> Prop)
(prs : fmap var (state -> Prop)) x b st st' rs pm,
(forall r, In r rs -> ruleAccurate prs doIt r)
-> (forall x b, pm $? x = Some b
-> match prs $? x with
| Some pr => pr st <-> b = true
| None => False
end)
-> applyRules pm $0 rs $? x = Some b
-> doIt st st'
-> match prs $? x with
| None => False
| Some pr => pr st' <-> b = true
end.
Proof.
simplify.
eapply applyRules_sound'.
eassumption.
eassumption.
eassumption.
eassumption.
simplify.
equality.
Qed.
Theorem predicate_abstraction_simulates : forall pc state action
(pc0 : pc) (st0 : state)
(actionOf : pc -> action -> pc -> Prop)
(doAction : action -> state -> state -> Prop)
(pa : predicate_abstraction state action),
predicate_abstraction_sound doAction pa
-> simulates (paR pa)
(actionSys pc0 st0 actionOf doAction)
(predicate_abstract pc0 actionOf pa).
Proof.
constructor; simplify.
propositional; subst.
exists (pc0, $0); propositional.
constructor; simplify.
equality.
invert H1.
invert H0.
exists (pc2, match pa.(Rules) $? act with
| None => $0
| Some rs => applyRules pm $0 rs
end); propositional.
constructor; simplify.
unfold predicate_abstraction_sound in H.
cases (Rules pa $? act); simplify; try equality.
eapply applyRules_sound.
simplify.
eapply H.
eassumption.
eassumption.
eassumption.
assumption.
assumption.
constructor; assumption.
Qed.
(* Optional part: using predicate abstraction for another example *)
Import Program2 ZArith.
Definition tr := {|AssumedPredicate := "np = npo";
AssumedToBe := true|}.
Definition fa := {|AssumedPredicate := "np = npo";
AssumedToBe := false|}.
Definition establish := [{|Assumptions := []; Conclusion := tr |}].
Definition preserve := [{|Assumptions := [tr]; Conclusion := tr |};
{|Assumptions := [fa]; Conclusion := fa |}].
Definition falsify := [{|Assumptions := [tr]; Conclusion := fa |}].
Definition locked := {|AssumedPredicate := "locked";
AssumedToBe := true|}.
Definition unlocked := {|AssumedPredicate := "locked";
AssumedToBe := false|}.
Definition take_lock := [{|Assumptions := []; Conclusion := locked |}].
Definition release_lock := [{|Assumptions := []; Conclusion := unlocked |}].
Definition preserve_lock := [{|Assumptions := [locked]; Conclusion := locked |};
{|Assumptions := [unlocked]; Conclusion := unlocked |}].
Definition while_false := [{|Assumptions := [fa]; Conclusion := locked |};
{|Assumptions := [tr; locked]; Conclusion := locked |}].
Definition sys_pa : predicate_abstraction state action := {|
Predicates := $0 $+ ("np = npo", fun st => st.(NP) = st.(NPO))
$+ ("locked", fun st => st.(HasLock) = true);
Rules := $0 $+ (LockOK, preserve ++ take_lock)
$+ (AssignA, establish ++ preserve_lock)
$+ (IfFalse, preserve ++ preserve_lock)
$+ (IfTrue, preserve ++ preserve_lock)
$+ (UnlockOK, preserve ++ release_lock)
$+ (IncA, falsify ++ preserve_lock)
$+ (WhileFalse, while_false)
$+ (WhileTrue, preserve ++ preserve_lock)
|}.
Opaque Zplus.
Theorem sys_ok : forall np npo,
invariantFor (sys np npo) (fun st => fst st = Unlock2 -> (snd st).(HasLock) = true).
Proof.
simplify.
eapply invariant_weaken.
eapply invariant_simulates.
eapply predicate_abstraction_simulates with (pa := sys_pa).
unfold predicate_abstraction_sound; simplify.
cases act; simplify; invert H; unfold ruleAccurate, assertionAccurate in *; simplify; propositional; subst;
simplify; unfold assertionAccurate in *; simplify; propositional;
invert H1; simplify; propositional; equality || (try linear_arithmetic).
model_check_infer.
simplify.
invert H.
simplify.
propositional; subst; invert H1; simplify; try equality.
specialize (H3 "locked" true); simplify.
cases (HasLock st); propositional; equality.
Qed.
|
(* Steve Awodey's book on category theory *)
(******************************************************************************)
(* Chapter 1.3: Categories *)
(******************************************************************************)
(* @suharahiromichi *)
(*
(1) ベースライン
http://www.megacz.com/berkeley/coq-categories/
これをもとに改変。Instance ... Proper を使うようにした。
*)
(*
(2) Proper関数の定義
A Gentle Introduction to Type Classes and Relations in Coq
*)
(*
(3) Setoid を使うようにし、Setsと(P,<=)のインスタンスをつくる。
http://www.iij-ii.co.jp/lab/techdoc/category/category1.html
*)
Require Import ssreflect ssrfun ssrbool eqtype ssrnat seq.
Require Import finset fintype.
Set Implicit Arguments.
Unset Strict Implicit.
Unset Printing Implicit Defensive.
Require Import Notations.
Require Import Morphisms.
Require Import Coq.Setoids.Setoid.
(*
Reserved Notation "x ~> y" (at level 51, left associativity).
*)
Reserved Notation "x \\o y" (at level 51, left associativity).
Reserved Notation "x === y" (at level 71, left associativity).
Generalizable Variables a b c d e.
Class Setoid :=
{
carrier : Type;
eqv : carrier -> carrier -> Prop;
eqv_equivalence : Equivalence eqv
}.
Coercion carrier : Setoid >-> Sortclass.
Notation "x === z" := (eqv x z).
Class Category (Obj : Type) (Hom : Obj -> Obj -> Setoid) :=
{
hom := Hom where "a ~> b" := (hom a b);
ob := Obj;
id : forall {a : Obj}, (a ~> a);
comp : forall {a b c : Obj},
(a ~> b) -> (b ~> c) -> (a ~> c)
where "f \\o g" := (comp f g);
comp_respects : forall {a b c : Obj},
Proper (@eqv (a ~> b) ==> @eqv (b ~>c) ==> @eqv (a ~> c)) comp;
left_identity : forall `(f : a ~> b), id \\o f === f;
right_identity : forall `(f : a ~> b), f \\o id === f;
associativity : forall `(f : a ~> b) `(g : b ~> c) `(h : c ~> d),
f \\o g \\o h === f \\o (g \\o h)
}.
Coercion ob : Category >-> Sortclass.
Notation "a ~> b" := (hom a b).
Notation "f === g" := (eqv f g).
Notation "f \\o g" := (comp f g).
(* Notation "a ~~{ C }~~> b" := (@hom _ _ C a b). *)
Generalizable Variables Obj Hom Prod.
(* eqv が、Reflexive と Symmetric と Transitive とを満たす。 *)
Instance category_eqv_Equiv `(C : Category Obj) (a b : Obj) :
Equivalence (@eqv (a ~> b)).
Proof.
by apply eqv_equivalence.
Qed.
(* comp は eqv について固有関数である。 *)
Instance category_comp_Proper `(C : Category Obj) (a b c : Obj) :
Proper (@eqv (a ~> b) ==> @eqv (b ~>c) ==> @eqv (a ~> c)) comp.
Proof.
by apply comp_respects.
Qed.
(* 可換性についての定理を証明する。 *)
Lemma juggle1 : forall `{C : Category}
`(f : a ~> b) `(g : b ~> c) `(h : c ~> d) `(k : d ~> e),
f \\o g \\o h \\o k === f \\o (g \\o h) \\o k.
Proof.
intros.
Check associativity f g h.
rewrite <- associativity.
reflexivity.
Defined.
Lemma juggle2 : forall `{C : Category}
`(f : a ~> b) `(g : b ~> c) `(h : c ~> d) `(k : d ~> e),
f \\o (g \\o (h \\o k)) === f \\o (g \\o h) \\o k.
Proof.
intros.
do ! rewrite <- associativity.
reflexivity.
Defined.
Lemma juggle3 : forall `{C : Category}
`(f : a ~> b) `(g : b ~> c) `(h : c ~> d) `(k : d ~> e),
f \\o g \\o (h \\o k) === f \\o (g \\o h) \\o k.
Proof.
intros.
do ! rewrite <- associativity.
reflexivity.
Defined.
Reserved Notation "x &&& y" (at level 50, left associativity).
(* 直積 *)
Class Product `{CP : Category Obj}
`(proj1 : forall {a b : Obj}, (Prod a b) ~> a)
`(proj2 : forall {a b : Obj}, (Prod a b) ~> b) :=
{
(* 仲介射 *)
mediating : forall {a b x : Obj},
(x ~> a) -> (x ~> b) -> (x ~> (Prod a b))
where "f &&& g" := (mediating f g);
med_commute1 : forall (a b x : Obj) (f : x ~> a) (g : x ~> b),
(f &&& g) \\o proj1 === f;
med_commute2 : forall (a b x : Obj) (f : x ~> a) (g : x ~> b),
(f &&& g) \\o proj2 === g;
med_unique : forall (a b x : Obj) (f : x ~> a) (g : x ~> b) (h : x ~> (Prod a b)),
h \\o proj1 === f ->
h \\o proj2 === g ->
h === (f &&& g)
}.
(* **** *)
(* Sets *)
(* **** *)
Instance EquivExt : forall (A B : Set), Equivalence (@eqfun A B) := (* notu *)
{
Equivalence_Reflexive := @frefl A B;
Equivalence_Symmetric := @fsym A B;
Equivalence_Transitive := @ftrans A B
}.
Instance EqMor : forall (A B : Set), Setoid :=
{
carrier := A -> B;
eqv := @eqfun B A
}.
Check @Category Set : (Set → Set → Setoid) → Type.
Check EqMor : Set -> Set -> Setoid.
Check @Category Set EqMor : Type.
Program Instance Sets : @Category Set EqMor.
Obligation 3.
Proof.
rewrite /Sets_obligation_2.
move=> homab homab' Hhomab hombc hombc' Hhombc.
move=> x //=.
rewrite Hhomab.
rewrite Hhombc.
by [].
Qed.
(*
Instance Prod : Product prod :=
{
proj1 A B := @fst A B;
proj2 A B := @snd A B ;
mediating A B X := fun f g x => (f x, g x)
}.
Proof.
- by rewrite //=.
- by rewrite //=.
- rewrite /commute /= /eqfun.
move=> A B X f g h H H0 x.
rewrite -H -H0.
by apply surjective_pairing.
Qed.
*)
(* **** *)
(* P,<= *)
(* **** *)
Open Scope coq_nat_scope.
Search "_ <= _".
Check 0 <= 0 : Prop.
Definition eq_le m n (p q : m <= n) := True.
Instance EquivGeq : forall m n, Equivalence (@eq_le m n). (* notu *)
Proof.
by [].
Qed.
Instance EqLe : forall m n, Setoid :=
{
carrier := m <= n;
eqv := @eq_le m n
}.
Check @Category nat : (nat → nat → Setoid) → Type.
Check EqLe : nat → nat → Setoid.
Check @Category nat EqLe.
Program Instance P_LE : @Category nat EqLe.
Obligation 2.
Proof.
by apply (@Le.le_trans a b c).
Defined.
Obligation 3.
Proof.
rewrite /P_LE_obligation_1.
rewrite /P_LE_obligation_2.
move=> homab homab' Hhomab hombc hombc' Hhombc.
by rewrite /eq_le.
Defined.
Obligation 4.
Proof.
rewrite /P_LE_obligation_1.
rewrite /P_LE_obligation_2.
by rewrite /eq_le.
Defined.
Obligation 5.
Proof.
rewrite /P_LE_obligation_1.
rewrite /P_LE_obligation_2.
by rewrite /eq_le.
Defined.
(* END *)
|
[STATEMENT]
lemma (in -) project_set_Union: "project_set h (\<Union>A) = (\<Union>X \<in> A. project_set h X)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. project_set h (\<Union> A) = \<Union> (project_set h ` A)
[PROOF STEP]
by blast |
State Before: R✝ : Type u₁
inst✝⁴ : Semiring R✝
S : Type u₂
inst✝³ : CommSemiring S
A : Type u₃
inst✝² : Semiring A
inst✝¹ : Algebra S A
r✝ : R✝ → R✝ → Prop
R : Type u₁
inst✝ : Ring R
r : R → R → Prop
a : R
⊢ -{ toQuot := Quot.mk (Rel r) a } = { toQuot := Quot.mk (Rel r) (-a) } State After: R✝ : Type u₁
inst✝⁴ : Semiring R✝
S : Type u₂
inst✝³ : CommSemiring S
A : Type u₃
inst✝² : Semiring A
inst✝¹ : Algebra S A
r✝ : R✝ → R✝ → Prop
R : Type u₁
inst✝ : Ring R
r : R → R → Prop
a : R
⊢ RingQuot.neg r { toQuot := Quot.mk (Rel r) a } = { toQuot := Quot.mk (Rel r) (-a) } Tactic: show neg r _ = _ State Before: R✝ : Type u₁
inst✝⁴ : Semiring R✝
S : Type u₂
inst✝³ : CommSemiring S
A : Type u₃
inst✝² : Semiring A
inst✝¹ : Algebra S A
r✝ : R✝ → R✝ → Prop
R : Type u₁
inst✝ : Ring R
r : R → R → Prop
a : R
⊢ RingQuot.neg r { toQuot := Quot.mk (Rel r) a } = { toQuot := Quot.mk (Rel r) (-a) } State After: R✝ : Type u₁
inst✝⁴ : Semiring R✝
S : Type u₂
inst✝³ : CommSemiring S
A : Type u₃
inst✝² : Semiring A
inst✝¹ : Algebra S A
r✝ : R✝ → R✝ → Prop
R : Type u₁
inst✝ : Ring R
r : R → R → Prop
a : R
⊢ (match { toQuot := Quot.mk (Rel r) a } with
| { toQuot := a } => { toQuot := Quot.map (fun a => -a) (_ : ∀ ⦃a b : R⦄, Rel r a b → Rel r (-a) (-b)) a }) =
{ toQuot := Quot.mk (Rel r) (-a) } Tactic: rw [neg_def] State Before: R✝ : Type u₁
inst✝⁴ : Semiring R✝
S : Type u₂
inst✝³ : CommSemiring S
A : Type u₃
inst✝² : Semiring A
inst✝¹ : Algebra S A
r✝ : R✝ → R✝ → Prop
R : Type u₁
inst✝ : Ring R
r : R → R → Prop
a : R
⊢ (match { toQuot := Quot.mk (Rel r) a } with
| { toQuot := a } => { toQuot := Quot.map (fun a => -a) (_ : ∀ ⦃a b : R⦄, Rel r a b → Rel r (-a) (-b)) a }) =
{ toQuot := Quot.mk (Rel r) (-a) } State After: no goals Tactic: rfl |
(* *********************************************************************)
(* *)
(* Coqlex verified lexer generator *)
(* *)
(* Copyright 2021 Siemens Mobility SAS and Institut National de *)
(* Recherche en Informatique et en Automatique. *)
(* All rights reserved. This file is distributed under *)
(* the terms of the INRIA Non-Commercial License Agreement (see the *)
(* LICENSE file). *)
(* *)
(* *********************************************************************)
Require Import SimpleLexerGenerator.
Require Import MatchLenSpeedUp.
Require Import Coq.Lists.List.
Require Import Coq.Strings.String.
Require Import UsefullProofs.
Require Import micromega.Lia.
Import ListNotations.
Fixpoint electCandidateSpeedUp {Action : Set} RegexpXAction_list str := match RegexpXAction_list with
| [] => None
| (re, ac)::t => (match match_len_speed_up_v2 re str with
| None => electCandidateSpeedUp t str
| Some n => (match electCandidateSpeedUp(Action:=Action) t str with
| Some c => Some (if Nat.ltb n (resultNat c) then c else (mkElectionResult Action re ac n) )
| None => Some (mkElectionResult Action re ac n)
end)
end)
end.
Lemma electCandidateSpeedUp_correct {Action : Set} : forall RegexpXAction_list str, electCandidate (Action:=Action) RegexpXAction_list str = electCandidateSpeedUp RegexpXAction_list str.
Proof.
induction RegexpXAction_list; simpl; auto.
induction a.
intros.
rewrite IHRegexpXAction_list.
rewrite match_len_speed_up_v2_correct; auto.
Qed.
Definition electionSpeedUp {Action : Set} regexp_to_action str := electCandidateSpeedUp (Action := Action) regexp_to_action str.
Lemma electionSpeedUp_correct {Action : Set} : forall regexp_to_action str, election regexp_to_action str = electionSpeedUp (Action:=Action) regexp_to_action str.
Proof.
unfold electionSpeedUp.
unfold election.
intros.
rewrite electCandidateSpeedUp_correct; auto.
Qed.
Lemma electionSpeedUp_inf_strLen {Action : Set} : forall str regexp_to_action e, electionSpeedUp (Action := Action) regexp_to_action str = Some e -> resultNat e <= String.length str.
Proof.
intros.
rewrite <- electionSpeedUp_correct in H.
eapply election_inf_strLen; eauto.
Qed.
Definition parseElectorSpeedup {Action : Set}:= mkElector Action electionSpeedUp electionSpeedUp_inf_strLen.
Definition make_lexer {Token Hist : Set} regexp_x_action (eof_action : Action (Token := Token) (Hist := Hist)) :=
mkRecLexer
Token
Hist
(lexergenerator parseElectorSpeedup regexp_x_action eof_action)
(lexergenerator_start_end_position parseElectorSpeedup regexp_x_action eof_action)
(lexergenerator_tok_position parseElectorSpeedup regexp_x_action eof_action)
(lexergenerator_start_tok_position parseElectorSpeedup regexp_x_action eof_action)
(lexergenerator_start_cur_position_abs parseElectorSpeedup regexp_x_action eof_action)
(lexergenerator_correct_consum parseElectorSpeedup regexp_x_action eof_action)
(lexergenerator_no_implementation_error parseElectorSpeedup regexp_x_action eof_action)
(lexergenerator_start_interruption_position parseElectorSpeedup regexp_x_action eof_action)
(lexergenerator_start_interruption_position2 parseElectorSpeedup regexp_x_action eof_action)
.
|
{-# language FlexibleContexts #-}
{-# language GADTs #-}
{-# language ScopedTypeVariables #-}
{-# language TypeApplications #-}
module Verify where
import Feldspar
import Feldspar.Software
import Feldspar.Software.Verify
import Feldspar.Array.Vector
import Feldspar.Array.Buffered
import Data.Bits (Bits)
import Data.Complex (Complex)
import Prelude hiding ((==), (/=), (>), (<=), length, div, reverse, sum)
--------------------------------------------------------------------------------
inc :: Software ()
inc = do
len :: SExp Length <- fget stdin
ix :: SExp Index <- fget stdin
arr :: Arr (SExp Word32) <- newArr len
-- assert (ix <= length arr) "ix out of bounds"
val <- getArr arr ix
setArr arr ix (val + 1)
copy :: Software ()
copy = do
arr1 :: Arr (SExp Word32) <- newArr 10
arr2 :: Arr (SExp Word32) <- unsafeFreezeArr arr1 >>= unsafeThawArr
--
setArr arr1 0 0
val <- getArr arr1 0
setArr arr2 0 val
inplace :: Software ()
inplace = do
st :: Store Software (SExp Word32) <- newInPlaceStore 10
arr :: Manifest Software (SExp Word32) <- store st $ (1...10)
brr <- store st $ reverse arr
val <- shareM $ sum brr
return ()
where
reverse :: Manifest Software (SExp Word32) -> Push Software (SExp Word32)
reverse = pairwise (\ix -> (ix, 10-ix-1))
--------------------------------------------------------------------------------
-- A super-simple verification example.
count :: Software ()
count = do
printf "Enter a number: "
n :: SExp Word32 <- fget stdin
let total = iter n 0 (\i n -> hint (n == i) $ i + 1)
total <- initRef total >>= unsafeFreezeRef
assert (total == n) "Count is wrong"
printf "The count is %d\n" total
--------------------------------------------------------------------------------
rev :: Software ()
rev = do
n <- fget stdin
loc :: IArr (SExp Word32) <- newArr n >>= unsafeFreezeArr
cpy :: Arr (SExp Word32) <- newArr n
assert (n > 0) "neg"
for 0 1 (n-1) $ \i -> do
setArr cpy i (loc ! (n-i-1))
rev_inplace :: Software ()
rev_inplace = do
n <- fget stdin
loc :: Arr (SExp Word32) <- newArr n
vec <- unsafeFreezeArr loc >>= unsafeThawArr
for 0 1 ((n `div` 2 :: SExp Word32)) $ \i -> do
x <- getArr vec i
y <- getArr vec (n-i-1)
setArr loc i y
setArr loc (n-i-1) x
rev_amazing :: Software ()
rev_amazing = do
n <- fget stdin
iarr :: IArr (SExp Word32) <- newArr n >>= unsafeFreezeArr
arr <- unsafeThawArr iarr
result <- manifestArr arr (pairwise @Software (\i -> (i, n-i-1)) (reverse iarr))
return ()
------------------------------------------------------------
{-
test_scProd1 = do
n <- fget stdin
printf "result: %.3f\n" $
(scProd (fmap i2n (0 ... n-1)) (fmap i2n (2 ... n+1)) :: Data Double)
test_scProd2 = do
n <- fget stdin
v1 <- manifestFresh $ fmap i2n (0 ... n-1)
v2 <- manifestFresh $ fmap i2n (2 ... n+1)
printf "result: %.3f\n" (scProd v1 v2 :: Data Double)
map_inplace :: Run ()
map_inplace = do
n <- fget stdin
loc <- newArr n
vec <- manifest loc (0 ... n-1)
manifestStore loc $ map (+1) vec
vec <- unsafeFreezeArr loc
printf "result: %d\n" $ sum vec
map2_inplace :: Run ()
map2_inplace = do
n <- fget stdin
assert (n < maxBound) "oops"
loc :: Arr (Data Word32) <- newArr (n+1)
vec <- unsafeFreezeArr loc
for (0, 1, Excl (n :: Data Word32)) $ \i -> do
setArr loc i (arrIx vec i+1)
tail_inplace :: Run ()
tail_inplace = do
n <- fget stdin
loc :: Arr (Data Word32) <- newArr n
vec <- unsafeFreezeArr loc
let when cond x = iff cond x (return ())
when (n > 0) $
for (0, 1, Excl (n-1)) $ \i -> do
setArr loc i (arrIx vec (i+1)+1)
filter_inplace :: Run ()
filter_inplace = do
n <- fget stdin
loc :: Arr (Data Word32) <- newArr n
vec <- unsafeFreezeArr loc
ref <- initRef 0
let when cond x = iff cond x (return ())
for (0, 1, Excl n) $ \i -> do
let x = arrIx vec i
when (x > 5) $ do
j <- unsafeFreezeRef ref
hint (j <= i)
setArr loc j x
setRef ref (j+1)
rev_inplace :: Software ()
rev_inplace = do
n <- fget stdin
loc :: Arr (Data Word32) <- newArr n
vec <- unsafeFreezeArr loc >>= unsafeThawArr
for (0, 1, Excl (n `div` 2 :: Data Word32)) $ \i -> do
x <- getArr vec i
y <- getArr vec (n-i-1)
setArr loc i y
setArr loc (n-i-1) x
-}
--------------------------------------------------------------------------------
|
/-
Copyright (c) 2020 Kevin Kappelmann. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Kevin Kappelmann
! This file was ported from Lean 3 source module algebra.continued_fractions.computation.terminates_iff_rat
! leanprover-community/mathlib commit a7e36e48519ab281320c4d192da6a7b348ce40ad
! Please do not edit these lines, except to modify the commit id
! if you have ported upstream changes.
-/
import Mathbin.Algebra.ContinuedFractions.Computation.Approximations
import Mathbin.Algebra.ContinuedFractions.Computation.CorrectnessTerminating
import Mathbin.Data.Rat.Floor
/-!
# Termination of Continued Fraction Computations (`gcf.of`)
## Summary
We show that the continued fraction for a value `v`, as defined in
`algebra.continued_fractions.computation.basic`, terminates if and only if `v` corresponds to a
rational number, that is `↑v = q` for some `q : ℚ`.
## Main Theorems
- `generalized_continued_fraction.coe_of_rat` shows that
`generalized_continued_fraction.of v = generalized_continued_fraction.of q` for `v : α` given that
`↑v = q` and `q : ℚ`.
- `generalized_continued_fraction.terminates_iff_rat` shows that
`generalized_continued_fraction.of v` terminates if and only if `↑v = q` for some `q : ℚ`.
## Tags
rational, continued fraction, termination
-/
namespace GeneralizedContinuedFraction
/- ./././Mathport/Syntax/Translate/Command.lean:224:11: unsupported: unusual advanced open style -/
open GeneralizedContinuedFraction (of)
variable {K : Type _} [LinearOrderedField K] [FloorRing K]
/-
We will have to constantly coerce along our structures in the following proofs using their provided
map functions.
-/
attribute [local simp] pair.map int_fract_pair.mapFr
section RatOfTerminates
/-!
### Terminating Continued Fractions Are Rational
We want to show that the computation of a continued fraction `generalized_continued_fraction.of v`
terminates if and only if `v ∈ ℚ`. In this section, we show the implication from left to right.
We first show that every finite convergent corresponds to a rational number `q` and then use the
finite correctness proof (`of_correctness_of_terminates`) of `generalized_continued_fraction.of` to
show that `v = ↑q`.
-/
variable (v : K) (n : ℕ)
theorem exists_gcf_pair_rat_eq_of_nth_conts_aux :
∃ conts : Pair ℚ, (of v).continuantsAux n = (conts.map coe : Pair K) :=
Nat.strong_induction_on n
(by
clear n
let g := of v
intro n IH
rcases n with (_ | _ | n)
-- n = 0
· suffices ∃ gp : pair ℚ, pair.mk (1 : K) 0 = gp.map coe by simpa [continuants_aux]
use pair.mk 1 0
simp
-- n = 1
· suffices ∃ conts : pair ℚ, pair.mk g.h 1 = conts.map coe by simpa [continuants_aux]
use pair.mk ⌊v⌋ 1
simp
-- 2 ≤ n
· cases' IH (n + 1) <| lt_add_one (n + 1) with pred_conts pred_conts_eq
-- invoke the IH
cases' s_ppred_nth_eq : g.s.nth n with gp_n
-- option.none
· use pred_conts
have : g.continuants_aux (n + 2) = g.continuants_aux (n + 1) :=
continuants_aux_stable_of_terminated (n + 1).le_succ s_ppred_nth_eq
simp only [this, pred_conts_eq]
-- option.some
· -- invoke the IH a second time
cases' IH n <| lt_of_le_of_lt n.le_succ <| lt_add_one <| n + 1 with
ppred_conts ppred_conts_eq
obtain ⟨a_eq_one, z, b_eq_z⟩ : gp_n.a = 1 ∧ ∃ z : ℤ, gp_n.b = (z : K)
exact of_part_num_eq_one_and_exists_int_part_denom_eq s_ppred_nth_eq
-- finally, unfold the recurrence to obtain the required rational value.
simp only [a_eq_one, b_eq_z,
continuants_aux_recurrence s_ppred_nth_eq ppred_conts_eq pred_conts_eq]
use next_continuants 1 (z : ℚ) ppred_conts pred_conts
cases ppred_conts
cases pred_conts
simp [next_continuants, next_numerator, next_denominator])
#align generalized_continued_fraction.exists_gcf_pair_rat_eq_of_nth_conts_aux GeneralizedContinuedFraction.exists_gcf_pair_rat_eq_of_nth_conts_aux
theorem exists_gcf_pair_rat_eq_nth_conts :
∃ conts : Pair ℚ, (of v).continuants n = (conts.map coe : Pair K) :=
by
rw [nth_cont_eq_succ_nth_cont_aux]
exact exists_gcf_pair_rat_eq_of_nth_conts_aux v <| n + 1
#align generalized_continued_fraction.exists_gcf_pair_rat_eq_nth_conts GeneralizedContinuedFraction.exists_gcf_pair_rat_eq_nth_conts
theorem exists_rat_eq_nth_numerator : ∃ q : ℚ, (of v).numerators n = (q : K) :=
by
rcases exists_gcf_pair_rat_eq_nth_conts v n with ⟨⟨a, _⟩, nth_cont_eq⟩
use a
simp [num_eq_conts_a, nth_cont_eq]
#align generalized_continued_fraction.exists_rat_eq_nth_numerator GeneralizedContinuedFraction.exists_rat_eq_nth_numerator
theorem exists_rat_eq_nth_denominator : ∃ q : ℚ, (of v).denominators n = (q : K) :=
by
rcases exists_gcf_pair_rat_eq_nth_conts v n with ⟨⟨_, b⟩, nth_cont_eq⟩
use b
simp [denom_eq_conts_b, nth_cont_eq]
#align generalized_continued_fraction.exists_rat_eq_nth_denominator GeneralizedContinuedFraction.exists_rat_eq_nth_denominator
/-- Every finite convergent corresponds to a rational number. -/
theorem exists_rat_eq_nth_convergent : ∃ q : ℚ, (of v).convergents n = (q : K) :=
by
rcases exists_rat_eq_nth_numerator v n with ⟨Aₙ, nth_num_eq⟩
rcases exists_rat_eq_nth_denominator v n with ⟨Bₙ, nth_denom_eq⟩
use Aₙ / Bₙ
simp [nth_num_eq, nth_denom_eq, convergent_eq_num_div_denom]
#align generalized_continued_fraction.exists_rat_eq_nth_convergent GeneralizedContinuedFraction.exists_rat_eq_nth_convergent
variable {v}
/-- Every terminating continued fraction corresponds to a rational number. -/
theorem exists_rat_eq_of_terminates (terminates : (of v).Terminates) : ∃ q : ℚ, v = ↑q :=
by
obtain ⟨n, v_eq_conv⟩ : ∃ n, v = (of v).convergents n
exact of_correctness_of_terminates terminates
obtain ⟨q, conv_eq_q⟩ : ∃ q : ℚ, (of v).convergents n = (↑q : K)
exact exists_rat_eq_nth_convergent v n
have : v = (↑q : K) := Eq.trans v_eq_conv conv_eq_q
use q, this
#align generalized_continued_fraction.exists_rat_eq_of_terminates GeneralizedContinuedFraction.exists_rat_eq_of_terminates
end RatOfTerminates
section RatTranslation
/-!
### Technical Translation Lemmas
Before we can show that the continued fraction of a rational number terminates, we have to prove
some technical translation lemmas. More precisely, in this section, we show that, given a rational
number `q : ℚ` and value `v : K` with `v = ↑q`, the continued fraction of `q` and `v` coincide.
In particular, we show that
```lean
(↑(generalized_continued_fraction.of q : generalized_continued_fraction ℚ)
: generalized_continued_fraction K)
= generalized_continued_fraction.of v`
```
in `generalized_continued_fraction.coe_of_rat`.
To do this, we proceed bottom-up, showing the correspondence between the basic functions involved in
the computation first and then lift the results step-by-step.
-/
-- The lifting works for arbitrary linear ordered fields with a floor function.
variable {v : K} {q : ℚ} (v_eq_q : v = (↑q : K)) (n : ℕ)
include v_eq_q
/-! First, we show the correspondence for the very basic functions in
`generalized_continued_fraction.int_fract_pair`. -/
namespace IntFractPair
theorem coe_of_rat_eq : ((IntFractPair.of q).mapFr coe : IntFractPair K) = IntFractPair.of v := by
simp [int_fract_pair.of, v_eq_q]
#align generalized_continued_fraction.int_fract_pair.coe_of_rat_eq GeneralizedContinuedFraction.IntFractPair.coe_of_rat_eq
theorem coe_stream_nth_rat_eq :
((IntFractPair.stream q n).map (mapFr coe) : Option <| IntFractPair K) =
IntFractPair.stream v n :=
by
induction' n with n IH
case zero => simp [int_fract_pair.stream, coe_of_rat_eq v_eq_q]
case succ =>
rw [v_eq_q] at IH
cases' stream_q_nth_eq : int_fract_pair.stream q n with ifp_n
case none => simp [int_fract_pair.stream, IH.symm, v_eq_q, stream_q_nth_eq]
case some =>
cases' ifp_n with b fr
cases' Decidable.em (fr = 0) with fr_zero fr_ne_zero
· simp [int_fract_pair.stream, IH.symm, v_eq_q, stream_q_nth_eq, fr_zero]
· replace IH : some (int_fract_pair.mk b ↑fr) = int_fract_pair.stream (↑q) n
· rwa [stream_q_nth_eq] at IH
have : (fr : K)⁻¹ = ((fr⁻¹ : ℚ) : K) := by norm_cast
have coe_of_fr := coe_of_rat_eq this
simpa [int_fract_pair.stream, IH.symm, v_eq_q, stream_q_nth_eq, fr_ne_zero]
#align generalized_continued_fraction.int_fract_pair.coe_stream_nth_rat_eq GeneralizedContinuedFraction.IntFractPair.coe_stream_nth_rat_eq
theorem coe_stream'_rat_eq :
((IntFractPair.stream q).map (Option.map (mapFr coe)) : Stream' <| Option <| IntFractPair K) =
IntFractPair.stream v :=
by
funext n
exact int_fract_pair.coe_stream_nth_rat_eq v_eq_q n
#align generalized_continued_fraction.int_fract_pair.coe_stream_rat_eq GeneralizedContinuedFraction.IntFractPair.coe_stream'_rat_eq
end IntFractPair
/-! Now we lift the coercion results to the continued fraction computation. -/
theorem coe_of_h_rat_eq : (↑((of q).h : ℚ) : K) = (of v).h :=
by
unfold of int_fract_pair.seq1
rw [← int_fract_pair.coe_of_rat_eq v_eq_q]
simp
#align generalized_continued_fraction.coe_of_h_rat_eq GeneralizedContinuedFraction.coe_of_h_rat_eq
theorem coe_of_s_nth_rat_eq :
(((of q).s.get? n).map (Pair.map coe) : Option <| Pair K) = (of v).s.get? n :=
by
simp only [of, int_fract_pair.seq1, seq.map_nth, seq.nth_tail]
simp only [seq.nth]
rw [← int_fract_pair.coe_stream_rat_eq v_eq_q]
rcases succ_nth_stream_eq : int_fract_pair.stream q (n + 1) with (_ | ⟨_, _⟩) <;>
simp [Stream'.map, Stream'.nth, succ_nth_stream_eq]
#align generalized_continued_fraction.coe_of_s_nth_rat_eq GeneralizedContinuedFraction.coe_of_s_nth_rat_eq
theorem coe_of_s_rat_eq : ((of q).s.map (Pair.map coe) : Seq <| Pair K) = (of v).s :=
by
ext n
rw [← coe_of_s_nth_rat_eq v_eq_q]
rfl
#align generalized_continued_fraction.coe_of_s_rat_eq GeneralizedContinuedFraction.coe_of_s_rat_eq
/-- Given `(v : K), (q : ℚ), and v = q`, we have that `gcf.of q = gcf.of v` -/
theorem coe_of_rat_eq :
(⟨(of q).h, (of q).s.map (Pair.map coe)⟩ : GeneralizedContinuedFraction K) = of v :=
by
cases' gcf_v_eq : of v with h s
subst v
obtain rfl : ↑⌊↑q⌋ = h := by injection gcf_v_eq
simp [coe_of_h_rat_eq rfl, coe_of_s_rat_eq rfl, gcf_v_eq]
#align generalized_continued_fraction.coe_of_rat_eq GeneralizedContinuedFraction.coe_of_rat_eq
theorem of_terminates_iff_of_rat_terminates {v : K} {q : ℚ} (v_eq_q : v = (q : K)) :
(of v).Terminates ↔ (of q).Terminates := by
constructor <;> intro h <;> cases' h with n h <;> use n <;>
simp only [seq.terminated_at, (coe_of_s_nth_rat_eq v_eq_q n).symm] at h⊢ <;>
cases (of q).s.get? n <;>
trivial
#align generalized_continued_fraction.of_terminates_iff_of_rat_terminates GeneralizedContinuedFraction.of_terminates_iff_of_rat_terminates
end RatTranslation
section TerminatesOfRat
/-!
### Continued Fractions of Rationals Terminate
Finally, we show that the continued fraction of a rational number terminates.
The crucial insight is that, given any `q : ℚ` with `0 < q < 1`, the numerator of `int.fract q` is
smaller than the numerator of `q`. As the continued fraction computation recursively operates on
the fractional part of a value `v` and `0 ≤ int.fract v < 1`, we infer that the numerator of the
fractional part in the computation decreases by at least one in each step. As `0 ≤ int.fract v`,
this process must stop after finite number of steps, and the computation hence terminates.
-/
namespace IntFractPair
variable {q : ℚ} {n : ℕ}
/-- Shows that for any `q : ℚ` with `0 < q < 1`, the numerator of the fractional part of
`int_fract_pair.of q⁻¹` is smaller than the numerator of `q`.
-/
theorem of_inv_fr_num_lt_num_of_pos (q_pos : 0 < q) : (IntFractPair.of q⁻¹).fr.num < q.num :=
Rat.fract_inv_num_lt_num_of_pos q_pos
#align generalized_continued_fraction.int_fract_pair.of_inv_fr_num_lt_num_of_pos GeneralizedContinuedFraction.IntFractPair.of_inv_fr_num_lt_num_of_pos
/-- Shows that the sequence of numerators of the fractional parts of the stream is strictly
antitone. -/
theorem stream_succ_nth_fr_num_lt_nth_fr_num_rat {ifp_n ifp_succ_n : IntFractPair ℚ}
(stream_nth_eq : IntFractPair.stream q n = some ifp_n)
(stream_succ_nth_eq : IntFractPair.stream q (n + 1) = some ifp_succ_n) :
ifp_succ_n.fr.num < ifp_n.fr.num :=
by
obtain ⟨ifp_n', stream_nth_eq', ifp_n_fract_ne_zero, int_fract_pair.of_eq_ifp_succ_n⟩ :
∃ ifp_n',
int_fract_pair.stream q n = some ifp_n' ∧
ifp_n'.fr ≠ 0 ∧ int_fract_pair.of ifp_n'.fr⁻¹ = ifp_succ_n
exact succ_nth_stream_eq_some_iff.elim_left stream_succ_nth_eq
have : ifp_n = ifp_n' := by injection Eq.trans stream_nth_eq.symm stream_nth_eq'
cases this
rw [← int_fract_pair.of_eq_ifp_succ_n]
cases' nth_stream_fr_nonneg_lt_one stream_nth_eq with zero_le_ifp_n_fract ifp_n_fract_lt_one
have : 0 < ifp_n.fr := lt_of_le_of_ne zero_le_ifp_n_fract <| ifp_n_fract_ne_zero.symm
exact of_inv_fr_num_lt_num_of_pos this
#align generalized_continued_fraction.int_fract_pair.stream_succ_nth_fr_num_lt_nth_fr_num_rat GeneralizedContinuedFraction.IntFractPair.stream_succ_nth_fr_num_lt_nth_fr_num_rat
theorem stream_nth_fr_num_le_fr_num_sub_n_rat :
∀ {ifp_n : IntFractPair ℚ},
IntFractPair.stream q n = some ifp_n → ifp_n.fr.num ≤ (IntFractPair.of q).fr.num - n :=
by
induction' n with n IH
case zero =>
intro ifp_zero stream_zero_eq
have : int_fract_pair.of q = ifp_zero := by injection stream_zero_eq
simp [le_refl, this.symm]
case succ =>
intro ifp_succ_n stream_succ_nth_eq
suffices ifp_succ_n.fr.num + 1 ≤ (int_fract_pair.of q).fr.num - n
by
rw [Int.ofNat_succ, sub_add_eq_sub_sub]
solve_by_elim [le_sub_right_of_add_le]
rcases succ_nth_stream_eq_some_iff.elim_left stream_succ_nth_eq with ⟨ifp_n, stream_nth_eq, -⟩
have : ifp_succ_n.fr.num < ifp_n.fr.num :=
stream_succ_nth_fr_num_lt_nth_fr_num_rat stream_nth_eq stream_succ_nth_eq
have : ifp_succ_n.fr.num + 1 ≤ ifp_n.fr.num := Int.add_one_le_of_lt this
exact le_trans this (IH stream_nth_eq)
#align generalized_continued_fraction.int_fract_pair.stream_nth_fr_num_le_fr_num_sub_n_rat GeneralizedContinuedFraction.IntFractPair.stream_nth_fr_num_le_fr_num_sub_n_rat
theorem exists_nth_stream_eq_none_of_rat (q : ℚ) : ∃ n : ℕ, IntFractPair.stream q n = none :=
by
let fract_q_num := (Int.fract q).num; let n := fract_q_num.nat_abs + 1
cases' stream_nth_eq : int_fract_pair.stream q n with ifp
· use n
exact stream_nth_eq
· -- arrive at a contradiction since the numerator decreased num + 1 times but every fractional
-- value is nonnegative.
have ifp_fr_num_le_q_fr_num_sub_n : ifp.fr.num ≤ fract_q_num - n :=
stream_nth_fr_num_le_fr_num_sub_n_rat stream_nth_eq
have : fract_q_num - n = -1 :=
by
have : 0 ≤ fract_q_num := rat.num_nonneg_iff_zero_le.elim_right (Int.fract_nonneg q)
simp [Int.natAbs_of_nonneg this, sub_add_eq_sub_sub_swap, sub_right_comm]
have : ifp.fr.num ≤ -1 := by rwa [this] at ifp_fr_num_le_q_fr_num_sub_n
have : 0 ≤ ifp.fr := (nth_stream_fr_nonneg_lt_one stream_nth_eq).left
have : 0 ≤ ifp.fr.num := rat.num_nonneg_iff_zero_le.elim_right this
linarith
#align generalized_continued_fraction.int_fract_pair.exists_nth_stream_eq_none_of_rat GeneralizedContinuedFraction.IntFractPair.exists_nth_stream_eq_none_of_rat
end IntFractPair
/-- The continued fraction of a rational number terminates. -/
theorem terminates_of_rat (q : ℚ) : (of q).Terminates :=
Exists.elim (IntFractPair.exists_nth_stream_eq_none_of_rat q) fun n stream_nth_eq_none =>
Exists.intro n
(have : IntFractPair.stream q (n + 1) = none := IntFractPair.stream_isSeq q stream_nth_eq_none
of_terminatedAt_n_iff_succ_nth_intFractPair_stream_eq_none.right this)
#align generalized_continued_fraction.terminates_of_rat GeneralizedContinuedFraction.terminates_of_rat
end TerminatesOfRat
/-- The continued fraction `generalized_continued_fraction.of v` terminates if and only if `v ∈ ℚ`.
-/
theorem terminates_iff_rat (v : K) : (of v).Terminates ↔ ∃ q : ℚ, v = (q : K) :=
Iff.intro
(fun terminates_v : (of v).Terminates =>
show ∃ q : ℚ, v = (q : K) from exists_rat_eq_of_terminates terminates_v)
fun exists_q_eq_v : ∃ q : ℚ, v = (↑q : K) =>
Exists.elim exists_q_eq_v fun q => fun v_eq_q : v = ↑q =>
have : (of q).Terminates := terminates_of_rat q
(of_terminates_iff_of_rat_terminates v_eq_q).right this
#align generalized_continued_fraction.terminates_iff_rat GeneralizedContinuedFraction.terminates_iff_rat
end GeneralizedContinuedFraction
|
theory Syntax
imports Main Push_State
begin
chapter \<open>While Language Syntax\<close>
(* a basic step is an action, a verification condition, and a behaviour set. *)
(* note that iterated product types are right-associated. *)
type_synonym ('a,'b) basic = "('a \<times> 'b set \<times> 'b rel)"
type_synonym ('a,'b) eff = "('b set \<times> 'b rel)"
type_synonym ('a,'b) seq = "('a,'b) basic list"
type_synonym ('a,'b) wmm = "('a,'b) basic \<Rightarrow> ('a,'b) basic \<Rightarrow> ('a,'b) basic \<Rightarrow> bool"
abbreviation tag :: "('a,'b) basic \<Rightarrow> 'a"
where "tag \<equiv> fst"
abbreviation vc :: "('a,'b) basic \<Rightarrow> 'b set"
where "vc \<alpha> \<equiv> fst (snd \<alpha>)"
abbreviation beh :: "('a,'b) basic \<Rightarrow> 'b rel"
where "beh \<alpha> \<equiv> snd (snd \<alpha>)"
abbreviation pushbasic where
"pushbasic s s' \<alpha> \<equiv> (tag \<alpha>, pushpred s (vc \<alpha>), pushrel s s' (beh \<alpha>))"
abbreviation popbasic where
"popbasic s s' \<alpha> \<equiv> (tag \<alpha>, poppred' s (vc \<alpha>), poprel' s s' (beh \<alpha>))"
text \<open>
A While language with non-deterministic choice, iteration and parallel composition.
Choice is intended to select from an arbitrary set of commands, however, this cannot
be expressed as an Isabelle datatype. To mimic a set, choice takes a function from
'labels' to arbitrary commands, where it may select any command in the function's range.
The state encoding is reused to express the notion of a label (but maybe this is a bad idea).
\<close>
datatype ('a,'b) com =
Nil
| Basic "('a,'b) basic"
| Seq "('a,'b) com" "('a,'b) wmm" "('a,'b) com" ("_ ;\<^sub>_ _ " [90,0,90] 80)
| Choice "'b \<Rightarrow> ('a,'b) com" (binder "\<Sqinter>" 10)
| Loop "('a,'b) com" "('a,'b) wmm" ("_*\<^sub>_" [90,90] 90)
| Parallel "('a,'b) com" "('a,'b) com" (infixr "||" 150)
| Thread "('a,'b) com"
| Capture 'b "('a,'b) com"
abbreviation univ_stack ("\<forall>\<^sub>c _" 100)
where "univ_stack c \<equiv> \<Sqinter>s. Capture s c"
subsection \<open>Local Command\<close>
text \<open>
Identify if a command consists of only thread local constructs.
\<close>
inductive local :: "('a,'b) com \<Rightarrow> bool"
where
"local Nil" |
"local (Basic \<alpha>)" |
"local c\<^sub>1 \<Longrightarrow> local c\<^sub>2 \<Longrightarrow> local (c\<^sub>1 ;\<^sub>r c\<^sub>2)" |
"\<forall>s. local (f s) \<Longrightarrow> local (Choice f)" |
"local c \<Longrightarrow> local (c*\<^sub>w)" |
"local c \<Longrightarrow> local (Capture k c)"
lemma local_simps [simp]:
"local Nil = True"
"local (Basic \<alpha>) = True"
"local (c\<^sub>1 ;\<^sub>r c\<^sub>2) = (local c\<^sub>1 \<and> local c\<^sub>2)"
"local (Choice f) = (\<forall>s. local (f s))"
"local (c*\<^sub>w) = local c"
"local (Capture k c) = local c"
"local (c\<^sub>1 || c\<^sub>2) = False"
"local (Thread c) = False"
by (auto intro: local.intros elim: local.cases)
end |
[STATEMENT]
theorem polref_trans:
assumes A: "p \<sqsubseteq>\<^bsub>f\<^esub>\<^sub>,\<^bsub>g\<^esub> p'"
and B: "p' \<sqsubseteq>\<^bsub>f'\<^esub>\<^sub>,\<^bsub>g'\<^esub> p''"
shows "p \<sqsubseteq>\<^bsub>f o f'\<^esub>\<^sub>,\<^bsub>g o g'\<^esub> p''"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. p \<sqsubseteq>\<^bsub>f \<circ> f'\<^esub>\<^sub>,\<^bsub>g \<circ> g'\<^esub> p''
[PROOF STEP]
apply(insert A B)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>p \<sqsubseteq>\<^bsub>f\<^esub>\<^sub>,\<^bsub>g\<^esub> p'; p' \<sqsubseteq>\<^bsub>f'\<^esub>\<^sub>,\<^bsub>g'\<^esub> p''\<rbrakk> \<Longrightarrow> p \<sqsubseteq>\<^bsub>f \<circ> f'\<^esub>\<^sub>,\<^bsub>g \<circ> g'\<^esub> p''
[PROOF STEP]
unfolding policy_refinement_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>\<forall>a. case p a of \<bottom> \<Rightarrow> True | \<lfloor>allow y\<rfloor> \<Rightarrow> \<forall>a'\<in>{x. f x = a}. \<exists>b'. p' a' = \<lfloor>allow b'\<rfloor> \<and> g b' = y | \<lfloor>deny y\<rfloor> \<Rightarrow> \<forall>a'\<in>{x. f x = a}. \<exists>b'. p' a' = \<lfloor>deny b'\<rfloor> \<and> g b' = y; \<forall>a. case p' a of \<bottom> \<Rightarrow> True | \<lfloor>allow y\<rfloor> \<Rightarrow> \<forall>a'\<in>{x. f' x = a}. \<exists>b'. p'' a' = \<lfloor>allow b'\<rfloor> \<and> g' b' = y | \<lfloor>deny y\<rfloor> \<Rightarrow> \<forall>a'\<in>{x. f' x = a}. \<exists>b'. p'' a' = \<lfloor>deny b'\<rfloor> \<and> g' b' = y\<rbrakk> \<Longrightarrow> \<forall>a. case p a of \<bottom> \<Rightarrow> True | \<lfloor>allow y\<rfloor> \<Rightarrow> \<forall>a'\<in>{x. (f \<circ> f') x = a}. \<exists>b'. p'' a' = \<lfloor>allow b'\<rfloor> \<and> (g \<circ> g') b' = y | \<lfloor>deny y\<rfloor> \<Rightarrow> \<forall>a'\<in>{x. (f \<circ> f') x = a}. \<exists>b'. p'' a' = \<lfloor>deny b'\<rfloor> \<and> (g \<circ> g') b' = y
[PROOF STEP]
apply(auto split: option.split decision.split simp: o_def)[1]
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>x1 a'. \<lbrakk>\<forall>a. case p a of \<bottom> \<Rightarrow> True | \<lfloor>allow y\<rfloor> \<Rightarrow> \<forall>a'\<in>{x. f x = a}. \<exists>b'. p' a' = \<lfloor>allow b'\<rfloor> \<and> g b' = y | \<lfloor>deny y\<rfloor> \<Rightarrow> \<forall>a'\<in>{x. f x = a}. \<exists>b'. p' a' = \<lfloor>deny b'\<rfloor> \<and> g b' = y; \<forall>a. case p' a of \<bottom> \<Rightarrow> True | \<lfloor>allow y\<rfloor> \<Rightarrow> \<forall>a'\<in>{x. f' x = a}. \<exists>b'. p'' a' = \<lfloor>allow b'\<rfloor> \<and> g' b' = y | \<lfloor>deny y\<rfloor> \<Rightarrow> \<forall>a'\<in>{x. f' x = a}. \<exists>b'. p'' a' = \<lfloor>deny b'\<rfloor> \<and> g' b' = y; p (f (f' a')) = \<lfloor>allow x1\<rfloor>\<rbrakk> \<Longrightarrow> \<exists>b'. p'' a' = \<lfloor>allow b'\<rfloor> \<and> g (g' b') = x1
2. \<And>x2a a'. \<lbrakk>\<forall>a. case p a of \<bottom> \<Rightarrow> True | \<lfloor>allow y\<rfloor> \<Rightarrow> \<forall>a'\<in>{x. f x = a}. \<exists>b'. p' a' = \<lfloor>allow b'\<rfloor> \<and> g b' = y | \<lfloor>deny y\<rfloor> \<Rightarrow> \<forall>a'\<in>{x. f x = a}. \<exists>b'. p' a' = \<lfloor>deny b'\<rfloor> \<and> g b' = y; \<forall>a. case p' a of \<bottom> \<Rightarrow> True | \<lfloor>allow y\<rfloor> \<Rightarrow> \<forall>a'\<in>{x. f' x = a}. \<exists>b'. p'' a' = \<lfloor>allow b'\<rfloor> \<and> g' b' = y | \<lfloor>deny y\<rfloor> \<Rightarrow> \<forall>a'\<in>{x. f' x = a}. \<exists>b'. p'' a' = \<lfloor>deny b'\<rfloor> \<and> g' b' = y; p (f (f' a')) = \<lfloor>deny x2a\<rfloor>\<rbrakk> \<Longrightarrow> \<exists>b'. p'' a' = \<lfloor>deny b'\<rfloor> \<and> g (g' b') = x2a
[PROOF STEP]
subgoal for a a'
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>\<forall>a. case p a of \<bottom> \<Rightarrow> True | \<lfloor>allow y\<rfloor> \<Rightarrow> \<forall>a'\<in>{x. f x = a}. \<exists>b'. p' a' = \<lfloor>allow b'\<rfloor> \<and> g b' = y | \<lfloor>deny y\<rfloor> \<Rightarrow> \<forall>a'\<in>{x. f x = a}. \<exists>b'. p' a' = \<lfloor>deny b'\<rfloor> \<and> g b' = y; \<forall>a. case p' a of \<bottom> \<Rightarrow> True | \<lfloor>allow y\<rfloor> \<Rightarrow> \<forall>a'\<in>{x. f' x = a}. \<exists>b'. p'' a' = \<lfloor>allow b'\<rfloor> \<and> g' b' = y | \<lfloor>deny y\<rfloor> \<Rightarrow> \<forall>a'\<in>{x. f' x = a}. \<exists>b'. p'' a' = \<lfloor>deny b'\<rfloor> \<and> g' b' = y; p (f (f' a')) = \<lfloor>allow a\<rfloor>\<rbrakk> \<Longrightarrow> \<exists>b'. p'' a' = \<lfloor>allow b'\<rfloor> \<and> g (g' b') = a
[PROOF STEP]
apply(erule_tac x="f (f' a')" in allE, simp)[1]
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>\<forall>a. case p' a of \<bottom> \<Rightarrow> True | \<lfloor>allow y\<rfloor> \<Rightarrow> \<forall>a'\<in>{x. f' x = a}. \<exists>b'. p'' a' = \<lfloor>allow b'\<rfloor> \<and> g' b' = y | \<lfloor>deny y\<rfloor> \<Rightarrow> \<forall>a'\<in>{x. f' x = a}. \<exists>b'. p'' a' = \<lfloor>deny b'\<rfloor> \<and> g' b' = y; p (f (f' a')) = \<lfloor>allow a\<rfloor>; \<forall>a'a. f a'a = f (f' a') \<longrightarrow> (\<exists>b'. p' a'a = \<lfloor>allow b'\<rfloor> \<and> g b' = a)\<rbrakk> \<Longrightarrow> \<exists>b'. p'' a' = \<lfloor>allow b'\<rfloor> \<and> g (g' b') = a
[PROOF STEP]
apply(erule_tac x="f' a'" in allE, auto)[1]
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>b'. \<lbrakk>\<forall>a. case p' a of \<bottom> \<Rightarrow> True | \<lfloor>allow y\<rfloor> \<Rightarrow> \<forall>a'\<in>{x. f' x = a}. \<exists>b'. p'' a' = \<lfloor>allow b'\<rfloor> \<and> g' b' = y | \<lfloor>deny y\<rfloor> \<Rightarrow> \<forall>a'\<in>{x. f' x = a}. \<exists>b'. p'' a' = \<lfloor>deny b'\<rfloor> \<and> g' b' = y; p (f (f' a')) = \<lfloor>allow (g b')\<rfloor>; p' (f' a') = \<lfloor>allow b'\<rfloor>; a = g b'\<rbrakk> \<Longrightarrow> \<exists>b'a. p'' a' = \<lfloor>allow b'a\<rfloor> \<and> g (g' b'a) = g b'
[PROOF STEP]
apply(erule_tac x=" (f' a')" in allE, auto)[1]
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>x2a a'. \<lbrakk>\<forall>a. case p a of \<bottom> \<Rightarrow> True | \<lfloor>allow y\<rfloor> \<Rightarrow> \<forall>a'\<in>{x. f x = a}. \<exists>b'. p' a' = \<lfloor>allow b'\<rfloor> \<and> g b' = y | \<lfloor>deny y\<rfloor> \<Rightarrow> \<forall>a'\<in>{x. f x = a}. \<exists>b'. p' a' = \<lfloor>deny b'\<rfloor> \<and> g b' = y; \<forall>a. case p' a of \<bottom> \<Rightarrow> True | \<lfloor>allow y\<rfloor> \<Rightarrow> \<forall>a'\<in>{x. f' x = a}. \<exists>b'. p'' a' = \<lfloor>allow b'\<rfloor> \<and> g' b' = y | \<lfloor>deny y\<rfloor> \<Rightarrow> \<forall>a'\<in>{x. f' x = a}. \<exists>b'. p'' a' = \<lfloor>deny b'\<rfloor> \<and> g' b' = y; p (f (f' a')) = \<lfloor>deny x2a\<rfloor>\<rbrakk> \<Longrightarrow> \<exists>b'. p'' a' = \<lfloor>deny b'\<rfloor> \<and> g (g' b') = x2a
[PROOF STEP]
subgoal for a a'
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>\<forall>a. case p a of \<bottom> \<Rightarrow> True | \<lfloor>allow y\<rfloor> \<Rightarrow> \<forall>a'\<in>{x. f x = a}. \<exists>b'. p' a' = \<lfloor>allow b'\<rfloor> \<and> g b' = y | \<lfloor>deny y\<rfloor> \<Rightarrow> \<forall>a'\<in>{x. f x = a}. \<exists>b'. p' a' = \<lfloor>deny b'\<rfloor> \<and> g b' = y; \<forall>a. case p' a of \<bottom> \<Rightarrow> True | \<lfloor>allow y\<rfloor> \<Rightarrow> \<forall>a'\<in>{x. f' x = a}. \<exists>b'. p'' a' = \<lfloor>allow b'\<rfloor> \<and> g' b' = y | \<lfloor>deny y\<rfloor> \<Rightarrow> \<forall>a'\<in>{x. f' x = a}. \<exists>b'. p'' a' = \<lfloor>deny b'\<rfloor> \<and> g' b' = y; p (f (f' a')) = \<lfloor>deny a\<rfloor>\<rbrakk> \<Longrightarrow> \<exists>b'. p'' a' = \<lfloor>deny b'\<rfloor> \<and> g (g' b') = a
[PROOF STEP]
apply(erule_tac x="f (f' a')" in allE, simp)[1]
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>\<forall>a. case p' a of \<bottom> \<Rightarrow> True | \<lfloor>allow y\<rfloor> \<Rightarrow> \<forall>a'\<in>{x. f' x = a}. \<exists>b'. p'' a' = \<lfloor>allow b'\<rfloor> \<and> g' b' = y | \<lfloor>deny y\<rfloor> \<Rightarrow> \<forall>a'\<in>{x. f' x = a}. \<exists>b'. p'' a' = \<lfloor>deny b'\<rfloor> \<and> g' b' = y; p (f (f' a')) = \<lfloor>deny a\<rfloor>; \<forall>a'a. f a'a = f (f' a') \<longrightarrow> (\<exists>b'. p' a'a = \<lfloor>deny b'\<rfloor> \<and> g b' = a)\<rbrakk> \<Longrightarrow> \<exists>b'. p'' a' = \<lfloor>deny b'\<rfloor> \<and> g (g' b') = a
[PROOF STEP]
apply(erule_tac x="f' a'" in allE, auto)[1]
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>b'. \<lbrakk>\<forall>a. case p' a of \<bottom> \<Rightarrow> True | \<lfloor>allow y\<rfloor> \<Rightarrow> \<forall>a'\<in>{x. f' x = a}. \<exists>b'. p'' a' = \<lfloor>allow b'\<rfloor> \<and> g' b' = y | \<lfloor>deny y\<rfloor> \<Rightarrow> \<forall>a'\<in>{x. f' x = a}. \<exists>b'. p'' a' = \<lfloor>deny b'\<rfloor> \<and> g' b' = y; p (f (f' a')) = \<lfloor>deny (g b')\<rfloor>; p' (f' a') = \<lfloor>deny b'\<rfloor>; a = g b'\<rbrakk> \<Longrightarrow> \<exists>b'a. p'' a' = \<lfloor>deny b'a\<rfloor> \<and> g (g' b'a) = g b'
[PROOF STEP]
apply(erule_tac x=" (f' a')" in allE, auto)[1]
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done |
Formal statement is: lemma bigE_nonneg_real: assumes "f \<in> Lr F (g)" "eventually (\<lambda>x. f x \<ge> 0) F" obtains c where "c > 0" "eventually (\<lambda>x. R (f x) (c * \<bar>g x\<bar>)) F" Informal statement is: If $f$ is a Lebesgue integrable function and $f \geq 0$ almost everywhere, then there exists a constant $c > 0$ such that $f(x) \leq c \cdot |g(x)|$ almost everywhere. |
/-
Copyright (c) 2018 Kenny Lau. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Kenny Lau
-/
import Mathlib.PrePort
import Mathlib.Lean3Lib.init.default
import Mathlib.data.nat.sqrt
import Mathlib.PostPort
namespace Mathlib
namespace int
/-- `sqrt n` is the square root of an integer `n`. If `n` is not a
perfect square, and is positive, it returns the largest `k:ℤ` such
that `k*k ≤ n`. If it is negative, it returns 0. For example,
`sqrt 2 = 1` and `sqrt 1 = 1` and `sqrt (-1) = 0` -/
def sqrt (n : ℤ) : ℤ :=
↑(nat.sqrt (to_nat n))
theorem sqrt_eq (n : ℤ) : sqrt (n * n) = ↑(nat_abs n) := sorry
theorem exists_mul_self (x : ℤ) : (∃ (n : ℤ), n * n = x) ↔ sqrt x * sqrt x = x := sorry
theorem sqrt_nonneg (n : ℤ) : 0 ≤ sqrt n :=
coe_nat_nonneg (nat.sqrt (to_nat n))
|
module Haskell.Prim.Applicative where
open import Haskell.Prim
open import Haskell.Prim.Either
open import Haskell.Prim.Foldable
open import Haskell.Prim.Functor
open import Haskell.Prim.List
open import Haskell.Prim.Maybe
open import Haskell.Prim.Monoid
open import Haskell.Prim.Tuple
--------------------------------------------------
-- Applicative
record Applicative (f : Set → Set) : Set₁ where
infixl 4 _<*>_
field
pure : a → f a
_<*>_ : f (a → b) → f a → f b
overlap ⦃ super ⦄ : Functor f
_<*_ : f a → f b → f a
x <* y = const <$> x <*> y
_*>_ : f a → f b → f b
x *> y = const id <$> x <*> y
open Applicative ⦃ ... ⦄ public
instance
iApplicativeList : Applicative List
iApplicativeList .pure x = x ∷ []
iApplicativeList ._<*>_ fs xs = concatMap (λ f → map f xs) fs
iApplicativeMaybe : Applicative Maybe
iApplicativeMaybe .pure = Just
iApplicativeMaybe ._<*>_ (Just f) (Just x) = Just (f x)
iApplicativeMaybe ._<*>_ _ _ = Nothing
iApplicativeEither : Applicative (Either a)
iApplicativeEither .pure = Right
iApplicativeEither ._<*>_ (Right f) (Right x) = Right (f x)
iApplicativeEither ._<*>_ (Left e) _ = Left e
iApplicativeEither ._<*>_ _ (Left e) = Left e
iApplicativeFun : Applicative (λ b → a → b)
iApplicativeFun .pure = const
iApplicativeFun ._<*>_ f g x = f x (g x)
iApplicativeTuple₂ : ⦃ Monoid a ⦄ → Applicative (a ×_)
iApplicativeTuple₂ .pure x = mempty , x
iApplicativeTuple₂ ._<*>_ (a , f) (b , x) = a <> b , f x
iApplicativeTuple₃ : ⦃ Monoid a ⦄ → ⦃ Monoid b ⦄ → Applicative (a × b ×_)
iApplicativeTuple₃ .pure x = mempty , mempty , x
iApplicativeTuple₃ ._<*>_ (a , b , f) (a₁ , b₁ , x) = a <> a₁ , b <> b₁ , f x
iApplicativeTuple₄ : ⦃ Monoid a ⦄ → ⦃ Monoid b ⦄ → ⦃ Monoid c ⦄ →
Applicative (λ d → Tuple (a ∷ b ∷ c ∷ d ∷ []))
iApplicativeTuple₄ .pure x = mempty ∷ mempty ∷ mempty ∷ x ∷ []
iApplicativeTuple₄ ._<*>_ (a ∷ b ∷ c ∷ f ∷ []) (a₁ ∷ b₁ ∷ c₁ ∷ x ∷ []) =
a <> a₁ ∷ b <> b₁ ∷ c <> c₁ ∷ f x ∷ []
|
/*! \file
\brief The usage of trie search.
Copyright (C) 2019-2021 kaoru https://www.tetengo.org/
*/
#include "usage_tetengo.trie.search_cpp.hpp"
// [search]
#include <algorithm>
#include <cassert>
#include <functional>
#include <iterator>
#include <memory>
#include <string>
#include <string_view>
#include <vector>
#include <boost/stl_interfaces/iterator_interface.hpp>
#include <tetengo/trie/default_serializer.hpp>
#include <tetengo/trie/trie.hpp>
#include <tetengo/trie/trie_iterator.hpp>
namespace usage_tetengo::trie
{
void search()
{
// Prepares a trie building observer set.
// The observer set records the inserted keys and the end.
std::vector<std::string> building_observer_reports{};
const tetengo::trie::trie<std::string, int>::building_observer_set_type building_observer_set{
[&building_observer_reports](const std::string_view& key) {
building_observer_reports.push_back(std::string{ key });
},
[&building_observer_reports]() { building_observer_reports.push_back("DONE"); }
};
// Builds a trie with initial elements.
const tetengo::trie::trie<std::string, int> trie_{ {
{ "tasakibashi", -5 },
{ "nihongiguchi", -3 },
{ "kumamotoekimae", 0 },
{ "gionbashi", 5 },
{ "gofukumachi", 10 },
{ "kawaramachi", 14 },
{ "keitokukoumae", 18 },
{ "karashimachou", 22 },
},
tetengo::trie::default_serializer<std::string>{ true },
building_observer_set };
assert(
(building_observer_reports == std::vector<std::string>{
"gionbashi",
"gofukumachi",
"karashimachou",
"kawaramachi",
"keitokukoumae",
"kumamotoekimae",
"nihongiguchi",
"tasakibashi",
"DONE",
}));
// Searches the trie.
// If a perfect-matching key is found, its value is returned.
[[maybe_unused]] const int* const p_found_for_gionbashi = trie_.find("gionbashi");
assert(p_found_for_gionbashi);
assert(*p_found_for_gionbashi == 5);
// If not found, nullptr is returned.
[[maybe_unused]] const int* const p_found_for_hanabatachou = trie_.find("hanabatachou");
assert(!p_found_for_hanabatachou);
// Creates a subtrie consisting of the elements with the common key prefix.
const auto p_subtrie = trie_.subtrie("ka");
// Enumerates the values in the subtrie.
std::vector<int> subtrie_values{};
std::copy(std::begin(*p_subtrie), std::end(*p_subtrie), std::back_inserter(subtrie_values));
assert(
(subtrie_values == std::vector<int>{
22, // karashimachou
14, // kawaramachi
}));
}
}
// [search]
|
# =========== temp
println(Base.Docs.doc(scatter))
println(Main.Docs.doc(Makie.convert_arguments, Tuple{Any, AbstractMatrix}))
println(Base.Docs.doc(Makie.convert_arguments, Tuple{Any, AbstractVector, AbstractVector, AbstractMatrix}))
println(Base.Docs.doc(Makie.convert_arguments, Tuple{Any, AbstractVector, AbstractVector, Function}))
const atomics = (
heatmap,
image,
lines,
linesegments,
mesh,
meshscatter,
scatter,
surface,
text,
Makie.volume
)
# =============================================
# Trying out heatmap plot from all_samples.jl
path = joinpath(@__DIR__, "..", "docs", "test")
scene = Scene(resolution = (500, 500))
scene = heatmap!(scene, rand(32, 32))
save("heatmap.png", scene)
path = joinpath(@__DIR__, "..", "docs", "test")
# save("heatmap.png", scene)
img = Makie.scene2image(scene)
save(path, img)
# =============================================
# Print code for when database search returns multiple results
# First find those damn entries!
entries = example_database(lines)
entries = findall(x -> x.title == database_key, database)
entries = findall(x -> x.tags == "scatter", database)
entries = findall(x -> occursin("scatter", collect(x.tags)), database)
tgs = database[2].tags
occursin("scatter", tgs)
findall(database) do entry
# find tags
tags_found = any(tag -> string(tag) in entry.tags, "scatter")
end
len_entries = length(entries)
println("len_entries = ", len_entries)
for i = 1:len_entries
# println(x.title)
# println(x.source)
println("i = ", i)
sprint() do io
print_code(
io, database, entries[1],
scope_start = "",
scope_end = "",
indent = "",
resolution = (entry)-> "resolution = (500, 500)",
outputfile = (entry, ending)-> Pkg.dir("Makie", "docs", "media", string(entry.unique_name, ending))
)
end
end
sprint(print_code(stdout, database, entries[1],
scope_start = "",
scope_end = "",
indent = "",
resolution = (entry)-> "resolution = (500, 500)",
outputfile = (entry, ending)-> Pkg.dir("Makie", "docs", "media", string(entry.unique_name, ending))
))
# autogenerate example library including plots test
# TODO: not working right now since a lot of plots are broken
# TODO: need to figure out if there is a way to "close" a Makie plot window
pathroot = joinpath(@__DIR__, "..", "docs", "src", "library-test")
imgpath = joinpath(pathroot, "plots")
mdpath = joinpath(pathroot, "library-test.md")
open(mdpath, "w") do io
println(io, "# Examples library test -- autogenerated")
isempty(database) && error("database is empty!")
counter = 1
for idx in 1:length(database)
println(io, "## index = $idx")
# println(io, "## `$(Makie.to_string(func))`")
try
println(io, "Example $counter, \"$(database[idx].title)\"")
_print_source(io, idx; style = "julia")
println(io, "`plot goes here`\n")
# TODO: add code to generate + embed plots
counter += 1
catch
println("ERROR: Didn't work with $tag at index $idx\n")
end
println(io, "\n")
end
end
|
/*
* Copyright (C) 2012-2015 Open Source Robotics Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifdef _WIN32
// Ensure that Winsock2.h is included before Windows.h, which can get
// pulled in by anybody (e.g., Boost).
#include <Winsock2.h>
#endif
#include "gazebo/sensors/DepthCameraSensor.hh"
#include "rotors_gazebo_plugins/common.h"
#include "rotors_gazebo_plugins/external/gazebo_optical_flow_plugin.h"
#include <highgui.h>
#include <math.h>
#include <string>
#include <iostream>
#include <boost/algorithm/string.hpp>
using namespace cv;
using namespace std;
using namespace gazebo;
GZ_REGISTER_SENSOR_PLUGIN(OpticalFlowPlugin)
/////////////////////////////////////////////////
OpticalFlowPlugin::OpticalFlowPlugin()
: SensorPlugin(), width(0), height(0), depth(0), timer_()
{
}
/////////////////////////////////////////////////
OpticalFlowPlugin::~OpticalFlowPlugin()
{
this->parentSensor.reset();
this->camera.reset();
}
/////////////////////////////////////////////////
void OpticalFlowPlugin::Load(sensors::SensorPtr _sensor, sdf::ElementPtr _sdf)
{
if(kPrintOnPluginLoad) {
gzdbg << __FUNCTION__ << "() called." << std::endl;
}
if (!_sensor)
gzerr << "Invalid sensor pointer.\n";
this->parentSensor =
std::dynamic_pointer_cast<sensors::CameraSensor>(_sensor);
if (!this->parentSensor)
{
gzerr << "OpticalFlowPlugin requires a CameraSensor.\n";
if (std::dynamic_pointer_cast<sensors::DepthCameraSensor>(_sensor))
gzmsg << "It is a depth camera sensor\n";
}
this->camera = this->parentSensor->GetCamera();
if (!this->parentSensor)
{
gzerr << "OpticalFlowPlugin not attached to a camera sensor\n";
return;
}
this->width = this->camera->GetImageWidth();
this->height = this->camera->GetImageHeight();
this->depth = this->camera->GetImageDepth();
this->format = this->camera->GetImageFormat();
if (this->width != 64 || this->height != 64) {
gzerr << "[gazebo_optical_flow_plugin] Incorrect image size, must by 64 x 64.\n";
}
if (_sdf->HasElement("robotNamespace"))
namespace_ = _sdf->GetElement("robotNamespace")->Get<std::string>();
else
gzwarn << "[gazebo_optical_flow_plugin] Please specify a robotNamespace.\n";
node_handle_ = transport::NodePtr(new transport::Node());
node_handle_->Init(namespace_);
const string scopedName = _sensor->GetParentName();
string topicName = "~/" + scopedName + "/opticalFlow";
boost::replace_all(topicName, "::", "/");
opticalFlow_pub_ = node_handle_->Advertise<opticalFlow_msgs::msgs::opticalFlow>(topicName, 10);
hfov = float(this->camera->GetHFOV().Radian());
first_frame_time = this->camera->GetLastRenderWallTime().Double();
old_frame_time = first_frame_time;
focal_length = (this->width/2)/tan(hfov/2);
this->newFrameConnection = this->camera->ConnectNewImageFrame(
boost::bind(&OpticalFlowPlugin::OnNewFrame, this, _1, this->width, this->height, this->depth, this->format));
this->parentSensor->SetActive(true);
//init flow
const int ouput_rate = 20; // -1 means use rate of camera
_optical_flow = new OpticalFlowOpenCV(focal_length, focal_length, ouput_rate);
// _optical_flow = new OpticalFlowPX4(focal_length, focal_length, ouput_rate, this->width);
}
/////////////////////////////////////////////////
void OpticalFlowPlugin::OnNewFrame(const unsigned char * _image,
unsigned int _width,
unsigned int _height,
unsigned int _depth,
const std::string &_format)
{
rate = this->camera->GetAvgFPS();
_image = this->camera->GetImageData(0);
frame_time = this->camera->GetLastRenderWallTime().Double();
frame_time_us = (frame_time - first_frame_time) * 1e6; //since start
timer_.stop();
float flow_x_ang = 0;
float flow_y_ang = 0;
//calculate angular flow
int quality = _optical_flow->calcFlow((uint8_t *)_image, frame_time_us, dt_us, flow_x_ang, flow_y_ang);
if (quality >= 0) { // calcFlow(...) returns -1 if data should not be published yet -> output_rate
//prepare optical flow message
opticalFlow_message.set_time_usec(0);//will be filled in simulator_mavlink.cpp
opticalFlow_message.set_sensor_id(2.0);
opticalFlow_message.set_integration_time_us(dt_us);
opticalFlow_message.set_integrated_x(flow_x_ang);
opticalFlow_message.set_integrated_y(flow_y_ang);
opticalFlow_message.set_integrated_xgyro(0.0); //get real values in gazebo_mavlink_interface.cpp
opticalFlow_message.set_integrated_ygyro(0.0); //get real values in gazebo_mavlink_interface.cpp
opticalFlow_message.set_integrated_zgyro(0.0); //get real values in gazebo_mavlink_interface.cpp
opticalFlow_message.set_temperature(20.0);
opticalFlow_message.set_quality(quality);
opticalFlow_message.set_time_delta_distance_us(0.0);
opticalFlow_message.set_distance(0.0); //get real values in gazebo_mavlink_interface.cpp
//send message
opticalFlow_pub_->Publish(opticalFlow_message);
timer_.start();
}
}
/* vim: set et fenc=utf-8 ff=unix sts=0 sw=2 ts=2 : */
|
"""
Generate examples with known number of leading zeros, for test
purposes only (no production code).
"""
module CLZ
import Random: MersenneTwister
function generate(T = UInt64, n = 9, seed = 0)
nums = rand(MersenneTwister(seed), T, n)
bitlen = 8 * sizeof(T)
ONE = one(T) << (bitlen - 1)
@assert bitstring(ONE)[1] == '1'
@assert all(bitstring(ONE)[i] == '0' for i=2:bitlen)
nums .|= ONE # all start by ONE
for i in 1:n
nums[i] >>= 2i^2 - 5i + 3
end
nums[end-2] = zero(T)
nums[end-1] = +one(T)
nums[end-0] = -one(T)
nums
end
nums = generate(UInt64, 9)
expect = leading_zeros.(nums)
nums32 = generate(UInt32, 8)
# show(reinterpret.(Int32, (generate(UInt32, 8, 0)))
end
|
test : Not (Nat = String)
test Refl impossible
test2 : Not (Char = String)
test2 Refl impossible
test3 : Not (Type = String)
test3 Refl impossible
test4 : Not (Type = Nat)
test4 Refl impossible
test5 : Not (List Nat = Type)
test5 Refl impossible
test6 : Not (Bits64 = Type)
test6 Refl impossible
test7 : Not ('a' = 'b')
test7 Refl impossible
-- The following ones are actually possible
test8 : Not (a = Type)
test8 Refl impossible
test9 : Not (a = 'a')
test9 Refl impossible
test10 : Not (a = Nat)
test10 Refl impossible
test11 : Not (3 = a)
test11 Refl impossible
|
/-
Copyright (c) 2021 Kevin Buzzard. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Kevin Buzzard, Antoine Labelle
! This file was ported from Lean 3 source module algebra.module.projective
! leanprover-community/mathlib commit 405ea5cee7a7070ff8fb8dcb4cfb003532e34bce
! Please do not edit these lines, except to modify the commit id
! if you have ported upstream changes.
-/
import Mathbin.Algebra.Module.Basic
import Mathbin.LinearAlgebra.Finsupp
import Mathbin.LinearAlgebra.FreeModule.Basic
/-!
# Projective modules
This file contains a definition of a projective module, the proof that
our definition is equivalent to a lifting property, and the
proof that all free modules are projective.
## Main definitions
Let `R` be a ring (or a semiring) and let `M` be an `R`-module.
* `is_projective R M` : the proposition saying that `M` is a projective `R`-module.
## Main theorems
* `is_projective.lifting_property` : a map from a projective module can be lifted along
a surjection.
* `is_projective.of_lifting_property` : If for all R-module surjections `A →ₗ B`, all
maps `M →ₗ B` lift to `M →ₗ A`, then `M` is projective.
* `is_projective.of_free` : Free modules are projective
## Implementation notes
The actual definition of projective we use is that the natural R-module map
from the free R-module on the type M down to M splits. This is more convenient
than certain other definitions which involve quantifying over universes,
and also universe-polymorphic (the ring and module can be in different universes).
We require that the module sits in at least as high a universe as the ring:
without this, free modules don't even exist,
and it's unclear if projective modules are even a useful notion.
## References
https://en.wikipedia.org/wiki/Projective_module
## TODO
- Direct sum of two projective modules is projective.
- Arbitrary sum of projective modules is projective.
All of these should be relatively straightforward.
## Tags
projective module
-/
universe u v
open LinearMap Finsupp
/- The actual implementation we choose: `P` is projective if the natural surjection
from the free `R`-module on `P` to `P` splits. -/
/-- An R-module is projective if it is a direct summand of a free module, or equivalently
if maps from the module lift along surjections. There are several other equivalent
definitions. -/
class Module.Projective (R : Type _) [Semiring R] (P : Type _) [AddCommMonoid P] [Module R P] :
Prop where
out : ∃ s : P →ₗ[R] P →₀ R, Function.LeftInverse (Finsupp.total P P R id) s
#align module.projective Module.Projective
namespace Module
section Semiring
variable {R : Type _} [Semiring R] {P : Type _} [AddCommMonoid P] [Module R P] {M : Type _}
[AddCommMonoid M] [Module R M] {N : Type _} [AddCommMonoid N] [Module R N]
theorem projective_def :
Projective R P ↔ ∃ s : P →ₗ[R] P →₀ R, Function.LeftInverse (Finsupp.total P P R id) s :=
⟨fun h => h.1, fun h => ⟨h⟩⟩
#align module.projective_def Module.projective_def
theorem projective_def' : Projective R P ↔ ∃ s : P →ₗ[R] P →₀ R, Finsupp.total P P R id ∘ₗ s = id :=
by simp_rw [projective_def, FunLike.ext_iff, Function.LeftInverse, coe_comp, id_coe, id.def]
#align module.projective_def' Module.projective_def'
/-- A projective R-module has the property that maps from it lift along surjections. -/
theorem projective_lifting_property [h : Projective R P] (f : M →ₗ[R] N) (g : P →ₗ[R] N)
(hf : Function.Surjective f) : ∃ h : P →ₗ[R] M, f.comp h = g :=
by
/-
Here's the first step of the proof.
Recall that `X →₀ R` is Lean's way of talking about the free `R`-module
on a type `X`. The universal property `finsupp.total` says that to a map
`X → N` from a type to an `R`-module, we get an associated R-module map
`(X →₀ R) →ₗ N`. Apply this to a (noncomputable) map `P → M` coming from the map
`P →ₗ N` and a random splitting of the surjection `M →ₗ N`, and we get
a map `φ : (P →₀ R) →ₗ M`.
-/
let φ : (P →₀ R) →ₗ[R] M := Finsupp.total _ _ _ fun p => Function.surjInv hf (g p)
-- By projectivity we have a map `P →ₗ (P →₀ R)`;
cases' h.out with s hs
-- Compose to get `P →ₗ M`. This works.
use φ.comp s
ext p
conv_rhs => rw [← hs p]
simp [φ, Finsupp.total_apply, Function.surjInv_eq hf]
#align module.projective_lifting_property Module.projective_lifting_property
variable {Q : Type _} [AddCommMonoid Q] [Module R Q]
instance [hP : Projective R P] [hQ : Projective R Q] : Projective R (P × Q) :=
by
rw [Module.projective_def']
cases' hP.out with sP hsP
cases' hQ.out with sQ hsQ
use coprod (lmap_domain R R (inl R P Q)) (lmap_domain R R (inr R P Q)) ∘ₗ sP.prod_map sQ
ext <;>
simp only [coe_inl, coe_inr, coe_comp, Function.comp_apply, prod_map_apply, map_zero,
coprod_apply, lmap_domain_apply, map_domain_zero, add_zero, zero_add, id_comp,
total_map_domain]
· rw [← fst_apply _, apply_total R]
exact hsP x
· rw [← snd_apply _, apply_total R]
exact Finsupp.total_zero_apply _ (sP x)
· rw [← fst_apply _, apply_total R]
exact Finsupp.total_zero_apply _ (sQ x)
· rw [← snd_apply _, apply_total R]
exact hsQ x
variable {ι : Type _} (A : ι → Type _) [∀ i : ι, AddCommMonoid (A i)] [∀ i : ι, Module R (A i)]
instance [h : ∀ i : ι, Projective R (A i)] : Projective R (Π₀ i, A i) := by
classical
rw [Module.projective_def']
simp_rw [projective_def] at h
choose s hs using h
letI : ∀ i : ι, AddCommMonoid (A i →₀ R) := fun i => by infer_instance
letI : ∀ i : ι, Module R (A i →₀ R) := fun i => by infer_instance
letI : AddCommMonoid (Π₀ i : ι, A i →₀ R) := @Dfinsupp.addCommMonoid ι (fun i => A i →₀ R) _
letI : Module R (Π₀ i : ι, A i →₀ R) := @Dfinsupp.module ι R (fun i => A i →₀ R) _ _ _
let f i := lmap_domain R R (Dfinsupp.single i : A i → Π₀ i, A i)
use Dfinsupp.coprodMap f ∘ₗ Dfinsupp.mapRange.linearMap s
ext (i x j)
simp only [Dfinsupp.coprodMap, DirectSum.lof, total_map_domain, coe_comp, coe_lsum, id_coe,
LinearEquiv.coe_toLinearMap, finsuppLequivDfinsupp_symm_apply, Function.comp_apply,
Dfinsupp.lsingle_apply, Dfinsupp.mapRange.linearMap_apply, Dfinsupp.mapRange_single,
lmap_domain_apply, Dfinsupp.toFinsupp_single, Finsupp.sum_single_index, id.def,
Function.comp.left_id, Dfinsupp.single_apply]
rw [← Dfinsupp.lapply_apply j, apply_total R]
obtain rfl | hij := eq_or_ne i j
· convert(hs i) x
· ext
simp
· simp
· convert Finsupp.total_zero_apply _ ((s i) x)
· ext
simp [hij]
· simp [hij]
end Semiring
section Ring
variable {R : Type _} [Ring R] {P : Type _} [AddCommGroup P] [Module R P]
/-- Free modules are projective. -/
theorem projectiveOfBasis {ι : Type _} (b : Basis ι R P) : Projective R P :=
by
-- need P →ₗ (P →₀ R) for definition of projective.
-- get it from `ι → (P →₀ R)` coming from `b`.
use b.constr ℕ fun i => Finsupp.single (b i) (1 : R)
intro m
simp only [b.constr_apply, mul_one, id.def, Finsupp.smul_single', Finsupp.total_single,
LinearMap.map_finsupp_sum]
exact b.total_repr m
#align module.projective_of_basis Module.projectiveOfBasis
instance (priority := 100) projectiveOfFree [Module.Free R P] : Module.Projective R P :=
projectiveOfBasis <| Module.Free.chooseBasis R P
#align module.projective_of_free Module.projectiveOfFree
end Ring
--This is in a different section because special universe restrictions are required.
section OfLiftingProperty
/-- A module which satisfies the universal property is projective. Note that the universe variables
in `huniv` are somewhat restricted. -/
theorem projectiveOfLiftingProperty' {R : Type u} [Semiring R] {P : Type max u v} [AddCommMonoid P]
[Module R P]
-- If for all surjections of `R`-modules `M →ₗ N`, all maps `P →ₗ N` lift to `P →ₗ M`,
(huniv :
∀ {M : Type max v u} {N : Type max u v} [AddCommMonoid M] [AddCommMonoid N],
∀ [Module R M] [Module R N],
∀ (f : M →ₗ[R] N) (g : P →ₗ[R] N),
Function.Surjective f → ∃ h : P →ₗ[R] M, f.comp h = g) :-- then `P` is projective.
Projective
R P :=
by
-- let `s` be the universal map `(P →₀ R) →ₗ P` coming from the identity map `P →ₗ P`.
obtain ⟨s, hs⟩ : ∃ s : P →ₗ[R] P →₀ R, (Finsupp.total P P R id).comp s = LinearMap.id :=
huniv (Finsupp.total P P R (id : P → P)) (LinearMap.id : P →ₗ[R] P) _
-- This `s` works.
· use s
rwa [LinearMap.ext_iff] at hs
· intro p
use Finsupp.single p 1
simp
#align module.projective_of_lifting_property' Module.projectiveOfLiftingProperty'
/-- A variant of `of_lifting_property'` when we're working over a `[ring R]`,
which only requires quantifying over modules with an `add_comm_group` instance. -/
theorem projectiveOfLiftingProperty {R : Type u} [Ring R] {P : Type max u v} [AddCommGroup P]
[Module R P]
-- If for all surjections of `R`-modules `M →ₗ N`, all maps `P →ₗ N` lift to `P →ₗ M`,
(huniv :
∀ {M : Type max v u} {N : Type max u v} [AddCommGroup M] [AddCommGroup N],
∀ [Module R M] [Module R N],
∀ (f : M →ₗ[R] N) (g : P →ₗ[R] N),
Function.Surjective f → ∃ h : P →ₗ[R] M, f.comp h = g) :-- then `P` is projective.
Projective
R P :=
by
-- We could try and prove this *using* `of_lifting_property`,
-- but this quickly leads to typeclass hell,
-- so we just prove it over again.
-- let `s` be the universal map `(P →₀ R) →ₗ P` coming from the identity map `P →ₗ P`.
obtain ⟨s, hs⟩ : ∃ s : P →ₗ[R] P →₀ R, (Finsupp.total P P R id).comp s = LinearMap.id :=
huniv (Finsupp.total P P R (id : P → P)) (LinearMap.id : P →ₗ[R] P) _
-- This `s` works.
· use s
rwa [LinearMap.ext_iff] at hs
· intro p
use Finsupp.single p 1
simp
#align module.projective_of_lifting_property Module.projectiveOfLiftingProperty
end OfLiftingProperty
end Module
|
"""
to_matrix(operator)
Converts to a dense matrix the KLocal Operator
"""
function to_matrix(op::AbsLinearOperator)
hilb = basis(op)
@assert indexable(hilb)
N = spacedimension(hilb)
mat = zeros(ComplexF64, N, N)
for (i, σ) = enumerate(states(hilb))
fun = (mel, cngs, σ) -> begin
σp = apply(σ, cngs)
j = index(hilb, σp)
mat[i, j] += mel
end
map_connections(fun, op, σ)
end
return mat
end
Base.Matrix(op::AbsLinearOperator) = to_matrix(op)
"""
to_sparse(operator)
Converts to a sparse matrix the KLocal Operator.
"""
function to_sparse(op::AbsLinearOperator)
hilb = basis(op)
@assert indexable(hilb)
i_vals = Vector{Int}()
j_vals = Vector{Int}()
mel_vals = Vector{ComplexF64}()
for (i, σ) = enumerate(states(hilb))
conns = row_valdiff(op, σ)
for (mel, cngs) = conns
σp = apply(σ, cngs)
j = index(hilb, σp)
push!(i_vals, i)
push!(j_vals, j)
push!(mel_vals, mel)
end
end
N = spacedimension(hilb)
return sparse(i_vals, j_vals, mel_vals, N, N)
end
SparseArrays.sparse(op::AbsLinearOperator) = to_sparse(op)
function to_map(op::AbsLinearOperator)
hilb = basis(op)
N = spacedimension(hilb)
σ = state(hilb)
function op_v_prod!(y::AbstractVector, x::AbstractVector)
y .= zero(eltype(y))
for (i, x_val) = enumerate(x)
x_val == 0 && continue
state!(σ, hilb, i)
conns = row_valdiff(op, σ)
for (mel, cngs) = conns
mel == 0 && continue
σp = apply(σ, cngs)
j = index(hilb, σp)
println("$i, $j => $mel -- $σ - $σp")
y[j] += x_val*mel
end
end
return y
end
return LinearMap{ComplexF64}(op_v_prod!, N, N; ismutating=true)
end
QuantumOpticsBase.DenseOperator(op::AbsLinearOperator) =
DenseOperator(convert(CompositeBasis, basis(op)), to_matrix(op))
QuantumOpticsBase.SparseOperator(op::AbsLinearOperator) =
SparseOperator(convert(CompositeBasis, basis(op)), to_sparse(op))
QuantumOpticsBase.DenseSuperOperator(op::KLocalLiouvillian) =
DenseSuperOperator(nq_to_qo_basis(basis(op)), to_matrix(op))
QuantumOpticsBase.SparseSuperOperator(op::KLocalLiouvillian) =
SparseSuperOperator(nq_to_qo_basis(basis(op)), to_sparse(op))
|
section\<open>The Axiom of Separation in $M[G]$\<close>
theory Separation_Axiom
imports Forcing_Theorems Separation_Rename
begin
context G_generic1
begin
lemma map_val :
assumes "env\<in>list(M[G])"
shows "\<exists>nenv\<in>list(M). env = map(val(G),nenv)"
using assms
proof(induct env)
case Nil
have "map(val(G),Nil) = Nil" by simp
then show ?case by force
next
case (Cons a l)
then obtain a' l' where
"l' \<in> list(M)" "l=map(val(G),l')" "a = val(G,a')"
"Cons(a,l) = map(val(G),Cons(a',l'))" "Cons(a',l') \<in> list(M)"
using GenExtD
by force
then show ?case by force
qed
lemma Collect_sats_in_MG :
assumes
"A\<in>M[G]"
"\<phi> \<in> formula" "env\<in>list(M[G])" "arity(\<phi>) \<le> 1 +\<^sub>\<omega> length(env)"
shows
"{x \<in> A . (M[G], [x] @ env \<Turnstile> \<phi>)} \<in> M[G]"
proof -
from \<open>A\<in>M[G]\<close>
obtain \<pi> where "\<pi> \<in> M" "val(G, \<pi>) = A"
using GenExt_def by auto
then
have "domain(\<pi>)\<in>M" "domain(\<pi>) \<times> \<bbbP> \<in> M"
using cartprod_closed[of _ \<bbbP>,simplified]
by (simp_all flip:setclass_iff)
let ?\<chi>="\<cdot>\<cdot> 0 \<in> (1 +\<^sub>\<omega> length(env)) \<cdot> \<and> \<phi> \<cdot>"
let ?new_form="sep_ren(length(env),forces(?\<chi>))"
let ?\<psi>="(\<cdot>\<exists>(\<cdot>\<exists>\<cdot>\<cdot>\<langle>0,1\<rangle> is 2 \<cdot> \<and> ?new_form \<cdot> \<cdot>)\<cdot>)"
note phi = \<open>\<phi>\<in>formula\<close> \<open>arity(\<phi>) \<le> 1 +\<^sub>\<omega> length(env)\<close>
then
have "?\<chi>\<in>formula" "forces(?\<chi>) \<in> formula" "arity(\<phi>) \<le> 2+\<^sub>\<omega> length(env)"
using definability le_trans[OF \<open>arity(\<phi>)\<le>_\<close>] add_le_mono[of 1 2,OF _ le_refl]
by simp_all
with \<open>env\<in>_\<close> phi
have "arity(?\<chi>) \<le> 2+\<^sub>\<omega>length(env)"
using ord_simp_union leI FOL_arities by simp
with \<open>env\<in>list(_)\<close> phi
have "arity(forces(?\<chi>)) \<le> 6 +\<^sub>\<omega> length(env)"
using arity_forces_le by simp
then
have "arity(forces(?\<chi>)) \<le> 7 +\<^sub>\<omega> length(env)"
using ord_simp_union arity_forces leI by simp
with \<open>arity(forces(?\<chi>)) \<le>7 +\<^sub>\<omega> _\<close> \<open>env \<in> _\<close> \<open>\<phi> \<in> formula\<close>
have "arity(?new_form) \<le> 7 +\<^sub>\<omega> length(env)" "?new_form \<in> formula" "?\<psi>\<in>formula"
using arity_rensep[OF definability[of "?\<chi>"]]
by auto
then
have "arity(?\<psi>) \<le> 5 +\<^sub>\<omega> length(env)"
using ord_simp_union arity_forces pred_mono[OF _ pred_mono[OF _ \<open>arity(?new_form) \<le> _\<close>]]
by (auto simp:arity)
from \<open>env \<in> _\<close>
obtain nenv where "nenv\<in>list(M)" "env = map(val(G),nenv)" "length(nenv) = length(env)"
using map_val by auto
from phi \<open>nenv\<in>_\<close> \<open>env\<in>_\<close> \<open>\<pi>\<in>M\<close> \<open>\<phi>\<in>_\<close> \<open>length(nenv) = length(env)\<close>
have "arity(?\<chi>) \<le> length([\<theta>] @ nenv @ [\<pi>])" for \<theta>
using union_abs2[OF \<open>arity(\<phi>) \<le> 2+\<^sub>\<omega> _\<close>] ord_simp_union FOL_arities
by simp
note in_M = \<open>\<pi>\<in>M\<close> \<open>domain(\<pi>) \<times> \<bbbP> \<in> M\<close>
have Equivalence: "
(M, [u,\<bbbP>,leq,\<one>,\<pi>] @ nenv \<Turnstile> ?\<psi>) \<longleftrightarrow>
(\<exists>\<theta>\<in>M. \<exists>p\<in>\<bbbP>. u =\<langle>\<theta>,p\<rangle> \<and>
(\<forall>F. M_generic(F) \<and> p \<in> F \<longrightarrow> M[F], map(val(F), [\<theta>] @ nenv @[\<pi>]) \<Turnstile> ?\<chi>))"
if "u \<in> domain(\<pi>) \<times> \<bbbP>"
for u
proof -
from \<open>u \<in> domain(\<pi>) \<times> \<bbbP>\<close> \<open>domain(\<pi>) \<times> \<bbbP> \<in> M\<close>
have "u\<in>M" by (simp add:transitivity)
have "(M, [\<theta>,p,u,\<bbbP>,leq,\<one>,\<pi>]@nenv \<Turnstile> ?new_form) \<longleftrightarrow>
(\<forall>F. M_generic(F) \<and> p \<in> F \<longrightarrow> (M[F], map(val(F), [\<theta>] @ nenv@[\<pi>]) \<Turnstile> ?\<chi>))"
if "\<theta>\<in>M" "p\<in>\<bbbP>"
for \<theta> p
proof -
from \<open>p\<in>\<bbbP>\<close>
have "p\<in>M" by (simp add: transitivity)
let ?env="[p,\<bbbP>,leq,\<one>,\<theta>] @ nenv @ [\<pi>,u]"
let ?new_env=" [\<theta>,p,u,\<bbbP>,leq,\<one>,\<pi>] @ nenv"
note types = in_M \<open>\<theta> \<in> M\<close> \<open>p\<in>M\<close> \<open>u \<in> domain(\<pi>) \<times> \<bbbP>\<close> \<open>u \<in> M\<close> \<open>nenv\<in>_\<close>
then
have tyenv:"?env \<in> list(M)" "?new_env \<in> list(M)"
by simp_all
from types
have eq_env:"[p, \<bbbP>, leq, \<one>] @ ([\<theta>] @ nenv @ [\<pi>,u]) =
([p, \<bbbP>, leq, \<one>] @ ([\<theta>] @ nenv @ [\<pi>])) @ [u]"
using app_assoc by simp
then
have "(M, [\<theta>,p,u,\<bbbP>,leq,\<one>,\<pi>] @ nenv \<Turnstile> ?new_form) \<longleftrightarrow> (M, ?new_env \<Turnstile> ?new_form)"
by simp
from tyenv \<open>length(nenv) = length(env)\<close> \<open>arity(forces(?\<chi>)) \<le> 7 +\<^sub>\<omega> length(env)\<close> \<open>forces(?\<chi>) \<in> formula\<close>
have "... \<longleftrightarrow> p \<tturnstile> ?\<chi> ([\<theta>] @ nenv @ [\<pi>,u])"
using sepren_action[of "forces(?\<chi>)" "nenv",OF _ _ \<open>nenv\<in>list(M)\<close>]
by simp
also from types phi \<open>env\<in>_\<close> \<open>length(nenv) = length(env)\<close> \<open>arity(forces(?\<chi>)) \<le> 6 +\<^sub>\<omega> length(env)\<close>
have "... \<longleftrightarrow> p \<tturnstile> ?\<chi> ([\<theta>] @ nenv @ [\<pi>])"
by (subst eq_env,rule_tac arity_sats_iff,auto)
also from types phi \<open>p\<in>\<bbbP>\<close> \<open>arity(forces(?\<chi>)) \<le> 6 +\<^sub>\<omega> length(env)\<close> \<open>arity(?\<chi>) \<le> length([\<theta>] @ nenv @ [\<pi>])\<close>
have " ... \<longleftrightarrow> (\<forall>F . M_generic(F) \<and> p \<in> F \<longrightarrow>
M[F], map(val(F), [\<theta>] @ nenv @ [\<pi>]) \<Turnstile> ?\<chi>)"
using definition_of_forcing[where \<phi>="\<cdot>\<cdot> 0 \<in> (1 +\<^sub>\<omega> length(env)) \<cdot> \<and> \<phi> \<cdot>"]
by auto
finally
show ?thesis
by simp
qed
with in_M \<open>?new_form \<in> formula\<close> \<open>?\<psi>\<in>formula\<close> \<open>nenv \<in> _\<close> \<open>u \<in> domain(\<pi>)\<times>\<bbbP>\<close>
show ?thesis
by (auto simp add: transitivity)
qed
moreover from \<open>env = _\<close> \<open>\<pi>\<in>M\<close> \<open>nenv\<in>list(M)\<close>
have map_nenv:"map(val(G), nenv @ [\<pi>]) = env @ [val(G,\<pi>)]"
using map_app_distrib append1_eq_iff by auto
ultimately
have aux:"(\<exists>\<theta>\<in>M. \<exists>p\<in>\<bbbP>. u =\<langle>\<theta>,p\<rangle> \<and> (p\<in>G \<longrightarrow> M[G], [val(G,\<theta>)] @ env @ [val(G,\<pi>)] \<Turnstile> ?\<chi>))"
(is "(\<exists>\<theta>\<in>M. \<exists>p\<in>\<bbbP>. _ ( _ \<longrightarrow> M[G] , ?vals(\<theta>) \<Turnstile> _))")
if "u \<in> domain(\<pi>) \<times> \<bbbP>" "M, [u,\<bbbP>,leq,\<one>,\<pi>] @ nenv \<Turnstile> ?\<psi>" for u
using Equivalence[THEN iffD1, OF that] generic by force
moreover
have "[val(G, \<theta>)] @ env @ [val(G, \<pi>)] \<in> list(M[G])" if "\<theta>\<in>M" for \<theta>
using \<open>\<pi>\<in>M\<close> \<open>env \<in> list(M[G])\<close> GenExtI that by force
ultimately
have "(\<exists>\<theta>\<in>M. \<exists>p\<in>\<bbbP>. u=\<langle>\<theta>,p\<rangle> \<and> (p\<in>G \<longrightarrow> val(G,\<theta>)\<in>nth(1 +\<^sub>\<omega> length(env),[val(G, \<theta>)] @ env @ [val(G, \<pi>)])
\<and> (M[G], ?vals(\<theta>) \<Turnstile> \<phi>)))"
if "u \<in> domain(\<pi>) \<times> \<bbbP>" "M, [u,\<bbbP>,leq,\<one>,\<pi>] @ nenv \<Turnstile> ?\<psi>" for u
using aux[OF that] by simp
moreover from \<open>env \<in> _\<close> \<open>\<pi>\<in>M\<close>
have nth:"nth(1 +\<^sub>\<omega> length(env),[val(G, \<theta>)] @ env @ [val(G, \<pi>)]) = val(G,\<pi>)"
if "\<theta>\<in>M" for \<theta>
using nth_concat[of "val(G,\<theta>)" "val(G,\<pi>)" "M[G]"] that GenExtI by simp
ultimately
have "(\<exists>\<theta>\<in>M. \<exists>p\<in>\<bbbP>. u=\<langle>\<theta>,p\<rangle> \<and> (p\<in>G \<longrightarrow> val(G,\<theta>)\<in>val(G,\<pi>) \<and> (M[G],?vals(\<theta>) \<Turnstile> \<phi>)))"
if "u \<in> domain(\<pi>) \<times> \<bbbP>" "M, [u,\<bbbP>,leq,\<one>,\<pi>] @ nenv \<Turnstile> ?\<psi>" for u
using that \<open>\<pi>\<in>M\<close> \<open>env \<in> _\<close> by simp
with \<open>domain(\<pi>)\<times>\<bbbP>\<in>M\<close>
have "\<forall>u\<in>domain(\<pi>)\<times>\<bbbP> . (M, [u,\<bbbP>,leq,\<one>,\<pi>] @ nenv \<Turnstile> ?\<psi>) \<longrightarrow> (\<exists>\<theta>\<in>M. \<exists>p\<in>\<bbbP>. u =\<langle>\<theta>,p\<rangle> \<and>
(p \<in> G \<longrightarrow> val(G, \<theta>)\<in>val(G, \<pi>) \<and> (M[G],?vals(\<theta>) \<Turnstile> \<phi>)))"
by (simp add:transitivity)
then
have "{u\<in>domain(\<pi>)\<times>\<bbbP> . (M,[u,\<bbbP>,leq,\<one>,\<pi>] @ nenv \<Turnstile> ?\<psi>) } \<subseteq>
{u\<in>domain(\<pi>)\<times>\<bbbP> . \<exists>\<theta>\<in>M. \<exists>p\<in>\<bbbP>. u =\<langle>\<theta>,p\<rangle> \<and>
(p \<in> G \<longrightarrow> val(G, \<theta>)\<in>val(G, \<pi>) \<and> (M[G], ?vals(\<theta>) \<Turnstile> \<phi>))}"
(is "?n\<subseteq>?m")
by auto
then
have first_incl: "val(G,?n) \<subseteq> val(G,?m)"
using val_mono by simp
note \<open>val(G,\<pi>) = A\<close> (* from the assumptions *)
with \<open>?\<psi>\<in>formula\<close> \<open>arity(?\<psi>) \<le> _\<close> in_M \<open>nenv \<in> _\<close> \<open>env \<in> _\<close> \<open>length(nenv) = _\<close>
have "?n\<in>M"
using separation_ax leI separation_iff by auto
from generic
have "filter(G)" "G\<subseteq>\<bbbP>"
by auto
from \<open>val(G,\<pi>) = A\<close>
have "val(G,?m) =
{z . t\<in>domain(\<pi>) , (\<exists>q\<in>\<bbbP> .
(\<exists>\<theta>\<in>M. \<exists>p\<in>\<bbbP>. \<langle>t,q\<rangle> = \<langle>\<theta>, p\<rangle> \<and>
(p \<in> G \<longrightarrow> val(G, \<theta>) \<in> A \<and> (M[G], [val(G, \<theta>)] @ env @ [A] \<Turnstile> \<phi>)) \<and> q \<in> G)) \<and>
z=val(G,t)}"
using val_of_name by auto
also
have "... = {z . t\<in>domain(\<pi>) , (\<exists>q\<in>\<bbbP>.
val(G, t) \<in> A \<and> (M[G], [val(G, t)] @ env @ [A] \<Turnstile> \<phi>) \<and> q \<in> G) \<and> z=val(G,t)}"
using \<open>domain(\<pi>)\<in>M\<close> by (auto simp add:transitivity)
also
have "... = {x\<in>A . \<exists>q\<in>\<bbbP>. x \<in> A \<and> (M[G], [x] @ env @ [A] \<Turnstile> \<phi>) \<and> q \<in> G}"
proof(intro equalityI, auto)
(* Now we show the other inclusion:
{x .. x \<in> A , \<exists>q\<in>\<bbbP>. x \<in> A \<and> (M[G], [x, w, c] \<Turnstile> \<phi>) \<and> q \<in> G}
\<subseteq>
{val(G,t)..t\<in>domain(\<pi>),\<exists>q\<in>\<bbbP>.val(G,t)\<in> A\<and>(M[G], [val(G,t),w] \<Turnstile> \<phi>)\<and>q\<in>G}
*)
{
fix x q
assume "M[G], Cons(x, env @ [A]) \<Turnstile> \<phi>" "x\<in>A" "q \<in> \<bbbP>" "q \<in> G"
from this \<open>val(G,\<pi>) = A\<close>
show "x \<in> {y . x \<in> domain(\<pi>), val(G, x) \<in> A \<and> (M[G], Cons(val(G, x), env @ [A]) \<Turnstile> \<phi>) \<and> (\<exists>q\<in>\<bbbP>. q \<in> G) \<and> y = val(G, x)}"
using elem_of_val by force
}
qed
also
have " ... = {x \<in> A. (M[G], [x] @ env @ [A] \<Turnstile> \<phi>)}"
using \<open>G\<subseteq>\<bbbP>\<close> G_nonempty by force
finally
have val_m: "val(G,?m) = {x \<in> A. (M[G], [x] @ env @ [A] \<Turnstile> \<phi>)}" by simp
have "val(G,?m) \<subseteq> val(G,?n)"
proof
fix x
assume "x \<in> val(G,?m)"
with val_m
have "x \<in> {x \<in> A. (M[G], [x] @ env @ [A] \<Turnstile> \<phi>)}" by simp
with \<open>val(G,\<pi>) = A\<close>
have "x \<in> val(G,\<pi>)" by simp
then
obtain \<theta> q where "\<langle>\<theta>,q\<rangle>\<in>\<pi>" "q\<in>G" "val(G,\<theta>)=x" "\<theta>\<in>M"
using elem_of_val_pair domain_trans[OF trans_M \<open>\<pi>\<in>_\<close>]
by force
with \<open>\<pi>\<in>M\<close> \<open>nenv \<in> _\<close> \<open>env = _\<close>
have "[val(G,\<theta>), val(G,\<pi>)] @ env \<in> list(M[G])" "[\<theta>] @ nenv @ [\<pi>]\<in>list(M)"
using GenExt_def by auto
with \<open>val(G,\<theta>)=x\<close> \<open>val(G,\<pi>) = A\<close> \<open>x \<in> val(G,\<pi>)\<close> nth \<open>\<theta>\<in>M\<close> \<open>x\<in> {x \<in> A . _}\<close>
have "M[G], [val(G,\<theta>)] @ env @ [val(G,\<pi>)] \<Turnstile> \<cdot>\<cdot> 0 \<in> (1 +\<^sub>\<omega> length(env)) \<cdot> \<and> \<phi> \<cdot>"
by auto
\<comment> \<open>Recall \<^term>\<open>?\<chi> = And(Member(0,1 +\<^sub>\<omega> length(env)),\<phi>)\<close>\<close>
with \<open>[_] @ nenv @ [_] \<in> _ \<close> map_nenv \<open>arity(?\<chi>) \<le> length(_)\<close> \<open>length(nenv) = _\<close>
obtain r where "r\<in>G" "r \<tturnstile> ?\<chi> ([\<theta>] @ nenv @ [\<pi>])"
using truth_lemma[OF \<open>?\<chi>\<in>_\<close>,of "[\<theta>] @ nenv @ [\<pi>]"]
by auto
with \<open>filter(G)\<close> and \<open>q\<in>G\<close>
obtain p where "p\<in>G" "p\<preceq>q" "p\<preceq>r"
unfolding filter_def compat_in_def by force
with \<open>r\<in>G\<close> \<open>q\<in>G\<close> \<open>G\<subseteq>\<bbbP>\<close>
have "p\<in>\<bbbP>" "r\<in>\<bbbP>" "q\<in>\<bbbP>" "p\<in>M"
using transitivity[OF _ P_in_M] subsetD
by simp_all
with \<open>\<phi>\<in>formula\<close> \<open>\<theta>\<in>M\<close> \<open>\<pi>\<in>M\<close> \<open>p\<preceq>r\<close> \<open>nenv \<in> _\<close> \<open>arity(?\<chi>) \<le> length(_)\<close> \<open>r \<tturnstile> ?\<chi> _\<close> \<open>env\<in>_\<close>
have "p \<tturnstile> ?\<chi> ([\<theta>] @ nenv @ [\<pi>])"
using strengthening_lemma
by simp
with \<open>p\<in>\<bbbP>\<close> \<open>\<phi>\<in>formula\<close> \<open>\<theta>\<in>M\<close> \<open>\<pi>\<in>M\<close> \<open>nenv \<in> _\<close> \<open>arity(?\<chi>) \<le> length(_)\<close>
have "\<forall>F. M_generic(F) \<and> p \<in> F \<longrightarrow>
M[F], map(val(F), [\<theta>] @ nenv @ [\<pi>]) \<Turnstile> ?\<chi>"
using definition_of_forcing[where \<phi>="\<cdot>\<cdot> 0 \<in> (1 +\<^sub>\<omega> length(env)) \<cdot> \<and> \<phi> \<cdot>"]
by simp
with \<open>p\<in>\<bbbP>\<close> \<open>\<theta>\<in>M\<close>
have Eq6: "\<exists>\<theta>'\<in>M. \<exists>p'\<in>\<bbbP>. \<langle>\<theta>,p\<rangle> = \<langle>\<theta>',p'\<rangle> \<and> (\<forall>F. M_generic(F) \<and> p' \<in> F \<longrightarrow>
M[F], map(val(F), [\<theta>'] @ nenv @ [\<pi>]) \<Turnstile> ?\<chi>)" by auto
from \<open>\<pi>\<in>M\<close> \<open>\<langle>\<theta>,q\<rangle>\<in>\<pi>\<close> \<open>\<theta>\<in>M\<close> \<open>p\<in>\<bbbP>\<close> \<open>p\<in>M\<close>
have "\<langle>\<theta>,q\<rangle> \<in> M" "\<langle>\<theta>,p\<rangle>\<in>M" "\<langle>\<theta>,p\<rangle>\<in>domain(\<pi>)\<times>\<bbbP>"
using pair_in_M_iff transitivity
by auto
with \<open>\<theta>\<in>M\<close> Eq6 \<open>p\<in>\<bbbP>\<close>
have "M, [\<langle>\<theta>,p\<rangle>,\<bbbP>,leq,\<one>,\<pi>] @ nenv \<Turnstile> ?\<psi>"
using Equivalence by auto
with \<open>\<langle>\<theta>,p\<rangle>\<in>domain(\<pi>)\<times>\<bbbP>\<close>
have "\<langle>\<theta>,p\<rangle>\<in>?n" by simp
with \<open>p\<in>G\<close> \<open>p\<in>\<bbbP>\<close>
have "val(G,\<theta>)\<in>val(G,?n)"
using val_of_elem[of \<theta> p] by simp
with \<open>val(G,\<theta>)=x\<close>
show "x\<in>val(G,?n)" by simp
qed (* proof of "val(G,?m) \<subseteq> val(G,?n)" *)
with val_m first_incl
have "val(G,?n) = {x \<in> A. (M[G], [x] @ env @ [A] \<Turnstile> \<phi>)}" by auto
also from \<open>A\<in>_\<close> phi \<open>env \<in> _\<close>
have " ... = {x \<in> A. (M[G], [x] @ env \<Turnstile> \<phi>)}"
using arity_sats_iff[where env="[_]@env"] transitivity_MG
by auto
finally
show "{x \<in> A. (M[G], [x] @ env \<Turnstile> \<phi>)}\<in> M[G]"
using \<open>?n\<in>M\<close> GenExt_def by force
qed
theorem separation_in_MG:
assumes
"\<phi>\<in>formula" and "arity(\<phi>) \<le> 1 +\<^sub>\<omega> length(env)" and "env\<in>list(M[G])"
shows
"separation(##M[G],\<lambda>x. (M[G], [x] @ env \<Turnstile> \<phi>))"
proof -
{
fix A
assume "A\<in>M[G]"
moreover from \<open>env \<in> _\<close>
obtain nenv where "nenv\<in>list(M)""env = map(val(G),nenv)" "length(env) = length(nenv)"
using GenExt_def map_val[of env] by auto
moreover note \<open>\<phi> \<in> _\<close> \<open>arity(\<phi>) \<le> _\<close> \<open>env \<in> _\<close>
ultimately
have "{x \<in> A . (M[G], [x] @ env \<Turnstile> \<phi>)} \<in> M[G]"
using Collect_sats_in_MG by auto
}
then
show ?thesis
using separation_iff rev_bexI unfolding is_Collect_def by force
qed
end \<comment> \<open>\<^locale>\<open>G_generic1\<close>\<close>
end |
-- Getting projection like functions right was a little tricky.
-- Here are the cases that didn't work and weren't caught by
-- existing test cases.
module ProjectionLikeFunctions where
record Wrap (A : Set) : Set where
constructor [_]
field unwrap : A
postulate
Nat : Set
n : Nat
Thm : Nat → Set
prf : ∀ n → Thm n
module M x (p : Thm x) (w : Wrap Nat) where
module W = Wrap w
module M′ = M n (prf n) ([ n ])
test₁ : Thm M′.W.unwrap
test₁ = prf n
eq! : ∀ x (S : Thm x) → Wrap Nat → Nat
eq! s S [n] = W.unwrap
module Local where
module W = Wrap [n]
test₂ : Thm (eq! n (prf n) [ n ])
test₂ = prf n
|
# elimat(m) returns the elimination matrix Lm
# The elimination matrix Lm is for any matrix F, Vech(F)=Lm Vec(F)
# Luca Brugnolini 2014
# Based on Yun Jung Kim
function elimat(m)
A = eye(m^2);
L = A[1:m,:];
for n = 2:m
S = A[m*(n-1)+1:n*m,:];
S = S[n:end,:];
L = [L;S];
end
return L
end
|
{-# OPTIONS --without-K #-}
module RepresPerm where
open import Enumeration using (Enum)
open import Equiv
using (_≃_; id≃; sym≃; trans≃; mkqinv; module qinv; _⋆_; path⊎)
open import Relation.Binary.PropositionalEquality using (_≡_; refl; cong)
open import Data.Nat using (ℕ; suc)
open import Data.Fin using (Fin; zero)
open import Data.Product using (_,_)
open import FinEquiv
open import Data.Unit using (⊤)
open import Data.Sum using (_⊎_)
open import LeftCancellation
-- A Representable Permutation consists of
-- 1. an Enumeration of A
-- 2. an Enumeration of B
-- 3. an isomorphism between A and B
record RPerm (A : Set) (n : ℕ) (B : Set) (m : ℕ) : Set where
constructor rp
field
#A : Enum A n
#B : Enum B m
iso : A ≃ B
open RPerm
-- first theorem about these: same size!
thm1 : ∀ {n m} {A B} → (X : RPerm A n B m) → n ≡ m
thm1 {0} {0 } (rp _ _ _ ) = refl
thm1 {0} {suc m} (rp (fA , isoA) (fB , mkqinv g α β) (f , iso)) with fA (I.g (g zero))
where module I = qinv iso
... | ()
thm1 {suc n} {0} (rp (fA , isoA) B≃Fm (f , iso)) with B≃Fm ⋆ f (IA.g zero)
where module IA = qinv isoA
... | ()
thm1 {suc n} {suc m} {A} {B} (rp A≃Fsn B≃Fsm A≃B) =
cong suc (thm1 {n} {m} {Fin n} {Fin m} (rp id≃ id≃ Fn≃Fm))
where
Fsn≃Fsm : Fin (suc n) ≃ Fin (suc m)
Fsn≃Fsm = trans≃ (trans≃ (sym≃ A≃Fsn) A≃B) B≃Fsm
1+n≃1+m : (Fin 1 ⊎ Fin n) ≃ (Fin 1 ⊎ Fin m)
1+n≃1+m = trans≃ (trans≃ (Plus.fwd-iso {suc 0} {n}) Fsn≃Fsm)
(sym≃ (Plus.fwd-iso {suc 0} {m}))
⊤⊎n≃⊤⊎m : (⊤ ⊎ Fin n) ≃ (⊤ ⊎ Fin m)
⊤⊎n≃⊤⊎m = trans≃ (trans≃ (path⊎ (sym≃ Fin1≃⊤) id≃) 1+n≃1+m)
(path⊎ Fin1≃⊤ id≃)
Fn≃Fm = left-cancel-⊤ ⊤⊎n≃⊤⊎m
|
#include <boost/smart_ptr/make_local_shared_array.hpp>
|
lemma finite_imp_bounded [intro]: "finite S \<Longrightarrow> bounded S" |
lemma polynomial_function_diff [intro]: "\<lbrakk>polynomial_function f; polynomial_function g\<rbrakk> \<Longrightarrow> polynomial_function (\<lambda>x. f x - g x)" |
What are the best Computer Animation Schools in Provo, UT?
Provo has a total population of 105,166 and a student population of 35,114. Of these students, 34,130 are enrolled in schools that offer computer animation programs.
Of the 1 computer animation schools in Provo, the largest computer animation school, by student population, is Brigham Young University. In 2010, approximately 19 students graduated from the Computer Animation program at Brigham Young University.
In 2010, 19 students graduated with a computer animation degree from one of Provo's computer animation schools. If you decide to join their ranks, you can expect to pay an average of $4,290 per year in tuition costs.
You should also anticipate spending about $1,000 for computer animation related books and supplies every year. And if you live on campus, you will face an additional expense of $6,840 per year, on average, for room and board. If you live at home, you can cut this cost down to approximately $8,990.
If you decide to work as a computer animator in Provo, your job prospects are good. The government projects that the number of computer animators in Provo will increase by 20% by the year 2018. This anticipated change is faster than the projected nationwide trend for computer animators.
The average salary you can expect to earn as a computer animator in Provo is $48,930 per year. This is lower than the state-wide average salary for computer animators. |
[STATEMENT]
lemma SF_alt: "\<turnstile> SF(A)_v = (\<diamond>\<box>\<not>Enabled(<A>_v) \<or> \<box>\<diamond><A>_v)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<turnstile> SF(A)_v = (\<diamond>\<box>\<not> Enabled (<A>_v) \<or> \<box>\<diamond><A>_v)
[PROOF STEP]
apply (unfold SF_def dmd_def)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<turnstile> (\<box>\<not> \<box>\<not> Enabled (<A>_v) \<longrightarrow> \<box>\<not> \<box>\<not> <A>_v) = (\<not> \<box>\<not> \<box>\<not> Enabled (<A>_v) \<or> \<box>\<not> \<box>\<not> <A>_v)
[PROOF STEP]
apply fastforce
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done |
How can I contact Tamela Mann?
How to contact Tamela Mann's record label, booking agent, press. This includes email address, phone number, fax number. Social media sites also included. |
# Multiple plot function
#
# ggplot objects can be passed in ..., or to plotlist (as a list of ggplot objects)
# - cols: Number of columns in layout
# - layout: A matrix specifying the layout. If present, 'cols' is ignored.
#
# If the layout is something like matrix(c(1,2,3,3), nrow=2, byrow=TRUE),
# then plot 1 will go in the upper left, 2 will go in the upper right, and
# 3 will go all the way across the bottom.
#
multiplot <- function(..., plotlist=NULL, file, cols=1, layout=NULL) {
library(grid)
# Make a list from the ... arguments and plotlist
plots <- c(list(...), plotlist)
numPlots = length(plots)
# If layout is NULL, then use 'cols' to determine layout
if (is.null(layout)) {
# Make the panel
# ncol: Number of columns of plots
# nrow: Number of rows needed, calculated from # of cols
layout <- matrix(seq(1, cols * ceiling(numPlots/cols)),
ncol = cols, nrow = ceiling(numPlots/cols))
}
if (numPlots==1) {
print(plots[[1]])
} else {
# Set up the page
grid.newpage()
pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout))))
# Make each plot, in the correct location
for (i in 1:numPlots) {
# Get the i,j matrix positions of the regions that contain this subplot
matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE))
print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row,
layout.pos.col = matchidx$col))
}
}
} |
(* Title: Nominal2_Eqvt
Author: Brian Huffman,
Author: Christian Urban
Test cases for perm_simp
*)
theory Eqvt
imports Nominal2_Base
begin
declare [[trace_eqvt = false]]
(* declare [[trace_eqvt = true]] *)
lemma
fixes B::"'a::pt"
shows "p \<bullet> (B = C)"
apply(perm_simp)
oops
lemma
fixes B::"bool"
shows "p \<bullet> (B = C)"
apply(perm_simp)
oops
lemma
fixes B::"bool"
shows "p \<bullet> (A \<longrightarrow> B = C)"
apply (perm_simp)
oops
lemma
shows "p \<bullet> (\<lambda>(x::'a::pt). A \<longrightarrow> (B::'a \<Rightarrow> bool) x = C) = foo"
apply(perm_simp)
oops
lemma
shows "p \<bullet> (\<lambda>B::bool. A \<longrightarrow> (B = C)) = foo"
apply (perm_simp)
oops
lemma
shows "p \<bullet> (\<lambda>x y. \<exists>z. x = z \<and> x = y \<longrightarrow> z \<noteq> x) = foo"
apply (perm_simp)
oops
lemma
shows "p \<bullet> (\<lambda>f x. f (g (f x))) = foo"
apply (perm_simp)
oops
lemma
fixes p q::"perm"
and x::"'a::pt"
shows "p \<bullet> (q \<bullet> x) = foo"
apply(perm_simp)
oops
lemma
fixes p q r::"perm"
and x::"'a::pt"
shows "p \<bullet> (q \<bullet> r \<bullet> x) = foo"
apply(perm_simp)
oops
lemma
fixes p r::"perm"
shows "p \<bullet> (\<lambda>q::perm. q \<bullet> (r \<bullet> x)) = foo"
apply (perm_simp)
oops
lemma
fixes C D::"bool"
shows "B (p \<bullet> (C = D))"
apply(perm_simp)
oops
declare [[trace_eqvt = false]]
text {* there is no raw eqvt-rule for The *}
lemma "p \<bullet> (THE x. P x) = foo"
apply(perm_strict_simp exclude: The)
apply(perm_simp exclude: The)
oops
lemma
fixes P :: "(('b \<Rightarrow> bool) \<Rightarrow> ('b::pt)) \<Rightarrow> ('a::pt)"
shows "p \<bullet> (P The) = foo"
apply(perm_simp exclude: The)
oops
lemma
fixes P :: "('a::pt) \<Rightarrow> ('b::pt) \<Rightarrow> bool"
shows "p \<bullet> (\<lambda>(a, b). P a b) = (\<lambda>(a, b). (p \<bullet> P) a b)"
apply(perm_simp)
oops
thm eqvts
thm eqvts_raw
ML {* Nominal_ThmDecls.is_eqvt @{context} @{term "supp"} *}
end
|
{-# OPTIONS --without-K #-}
open import HoTT
module homotopy.OneSkeleton {i} {A : Type i} {j} {B : Type j} where
private
module _ (map : A → B) where
data #OneSkeleton-aux : Type i where
#point : A → #OneSkeleton-aux
data #OneSkeleton : Type i where
#one-skeleton : #OneSkeleton-aux → (Unit → Unit) → #OneSkeleton
OneSkeleton : (A → B) → Type i
OneSkeleton = #OneSkeleton
module _ {map : A → B} where
point : A → OneSkeleton map
point a = #one-skeleton (#point a) _
postulate -- HIT
link : ∀ a₁ a₂ → map a₁ == map a₂ → point a₁ == point a₂
module OneSkeletonElim
{l} {P : OneSkeleton map → Type l}
(point* : ∀ a → P (point a))
(link* : ∀ a₁ a₂ p → point* a₁ == point* a₂ [ P ↓ link a₁ a₂ p ]) where
f : Π (OneSkeleton map) P
f = f-aux phantom where
f-aux : Phantom link* → Π (OneSkeleton map) P
f-aux phantom (#one-skeleton (#point a) _) = point* a
postulate
link-β : ∀ a₁ a₂ p → apd f (link a₁ a₂ p) == link* a₁ a₂ p
open OneSkeletonElim public using () renaming (f to OneSkeleton-elim)
module OneSkeletonRec
{l} {P : Type l}
(point* : ∀ a → P)
(link* : ∀ a₁ a₂ p → point* a₁ == point* a₂) where
private
module M = OneSkeletonElim point*
(λ a₁ a₂ p → ↓-cst-in (link* a₁ a₂ p))
f : OneSkeleton map → P
f = M.f
link-β : ∀ a₁ a₂ p → ap f (link a₁ a₂ p) == link* a₁ a₂ p
link-β a₁ a₂ p = apd=cst-in {f = f} (M.link-β a₁ a₂ p)
open OneSkeletonRec public using () renaming (f to OneSkeleton-rec)
OneSkeleton-lift : OneSkeleton map → B
OneSkeleton-lift = OneSkeleton-rec map (λ _ _ p → p)
{-
module _ {i} {A B : Set i} where
skeleton₁ : (A → B) → Set i
skeleton₁ f = Graveyard.skeleton₁ {i} {A} {B} {f}
-}
|
[STATEMENT]
lemma object_type_object_clean [simp]:
"object_type (object_clean x cmp) = object_type x"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. object_type (object_clean x cmp) = object_type x
[PROOF STEP]
by (clarsimp simp: object_clean_def) |
require(ggplot2) # for plotting
require(ggpubr) # for violin plots
require(gridExtra) # for combining plots
require(RColorBrewer) # to define color scales
require(ggrepel) # for plot labeling
# Plot residuals of aragonite dataset with respect to regressions ------------------------------------------------------------
# Build custom color scales for full dataset plot
colorscale <- unlist(brewer.pal(length(unique(dat$Analysis)), "Set1"))
names(colorscale) <- unique(dat$Analysis[order(dat$Temp)])
uniquesamples <- data.frame(samplename = unique(violin_data$sample),
Analysis = sapply(strsplit(unique(violin_data$sample), split = "_"), "[", 2)
) # Link sample to Analysis
fillscale <- colorscale[match(uniquesamples$Analysis, names(colorscale))]
names(fillscale) <- uniquesamples$samplename
# Plot residuals
York_residual_plot <- ggplot(data = dat, aes(10^6 / (Temp + 273.15) ^ 2 , D47res_York)) +
geom_ribbon(data = D47m_York_result_res,
aes(x = x,
y = fit,
ymin = lwr,
ymax = upr),
fill = "grey",
alpha = 0.5) +
geom_line(data = D47m_York_result_res,
aes(x = x, y = fit),
color = "grey",
linetype = "dashed",
cex = 1,
alpha = 1) +
geom_line(data = D47m_York_result_res,
aes(x = x, y = Anderson),
color = "black",
linetype = "dashed",
cex = 1,
alpha = 1) +
geom_line(data = D47m_York_result_res[1:1001, ],
aes(x = x, y = Meinicke),
color = "black",
linetype = "dotted",
cex = 1,
alpha = 1) +
geom_pointrange(data = D47stats,
aes(x = 10^6 / (Temp + 273.15) ^ 2,
y = D47res_York,
ymin = D47res_York - CL95,
ymax = D47res_York + CL95,
color = Analysis,
shape = type),
cex = 1,
alpha = 1) +
geom_errorbarh(data = D47stats,
aes(xmin = 10^6 / ((Temp + 2 * Temp_SD) + 273.15) ^ 2,
xmax = 10^6 / ((Temp - 2 * Temp_SD) + 273.15) ^ 2,
y = D47res_York,
color = Analysis),
cex = 1,
alpha = 1) +
# geom_violin(data = dat[which(!dat$D47_outlier), ],
# aes(10^6 / (270.15) ^ 2,
# y = D47res_York),
# fill = "black",
# kernel = "rectangular",
# scale = "width",
# position = "identity",
# width = .2,
# cex = 0,
# alpha = 0.2,
# color = NA,
# trim = TRUE) +
# geom_pointrange(data = dat[which(!dat$D47_outlier), ],
# aes(x = 10^6 / (270.15) ^ 2,
# y = mean(D47res_York),
# ymin = mean(D47res_York) - sd(D47res_York),
# ymax = mean(D47res_York) + sd(D47res_York)),
# color = "black",
# cex = 1) +
# Plot layout
ylim(-0.1, 0.1) +
scale_x_continuous(10 ^ 6 / T ^ 2 ~ "(K)",
breaks = seq(0, 14, 1),
minor_breaks = seq(0, 14, 0.25),
sec.axis = sec_axis(~ sqrt(1e6 / .) - 273.15,
"Temperature (°C)",
breaks = temp_breaks,
labels = temp_labs),
limits = c(0, 14)) +
labs(x = 10 ^ 6 / T ^ 2 ~ "(K)",
y = Delta * Delta[47] ~ "(\u2030"~"I-CDES)",
colour = "Legend") +
scale_colour_manual(values = colorscale) +
scale_shape_manual(values = c(15:20)) +
theme_bw() +
theme(legend.position = "none")
# Add separate summary violin/pointrange
York_residual_plot_margin <- ggplot(data = dat[which(!dat$D47_outlier), ]) +
geom_violin(aes(1,
y = D47res_York),
fill = "black",
kernel = "rectangular",
scale = "width",
position = "identity",
cex = 0,
alpha = 0.2,
color = NA,
trim = TRUE
) +
geom_pointrange(data = dat[which(!dat$D47_outlier), ],
aes(1,
y = mean(D47res_York),
ymin = mean(D47res_York) - sd(D47res_York),
ymax = mean(D47res_York) + sd(D47res_York)),
color = "black",
cex = 1) +
scale_y_continuous("",
breaks = seq(-0.1, 0.1, 0.1),
labels = rep("", 3),
limits = c(-0.1, 0.1)) +
scale_x_continuous("",
breaks = seq(0.5, 1.5, 0.5),
labels = rep("", 3),
sec.axis = sec_axis(~ .,
"",
breaks = seq(0.5, 1.5, 0.5),
labels = rep("", 3)),
limits = c(0.5, 1.5)) +
labs(x = "") +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.ticks = element_blank(),
plot.margin = unit(c(5.5, 0, 5.5, -20), "pt")
)
lowT_York_residual_plot <- ggplot(data = dat, aes(10^6 / (Temp + 273.15) ^ 2 , D47res_lowT_York)) +
geom_ribbon(data = D47m_lowT_York_result_res,
aes(x = x,
y = fit,
ymin = lwr,
ymax = upr),
fill = "grey",
alpha = 0.5) +
geom_line(data = D47m_lowT_York_result_res,
aes(x = x, y = fit),
color = "grey",
linetype = "dashed",
cex = 1,
alpha = 1) +
geom_line(data = D47m_lowT_York_result_res,
aes(x = x, y = Anderson),
color = "black",
linetype = "dashed",
cex = 1,
alpha = 1) +
geom_line(data = D47m_lowT_York_result_res[1:1001, ],
aes(x = x, y = Meinicke),
color = "black",
linetype = "dotted",
cex = 1,
alpha = 1) +
geom_pointrange(data = D47stats,
aes(x = 10^6 / (Temp + 273.15) ^ 2,
y = D47res_lowT_York,
ymin = D47res_lowT_York - CL95,
ymax = D47res_lowT_York + CL95,
color = Analysis,
shape = type),
cex = 1,
alpha = 1) +
geom_errorbarh(data = D47stats,
aes(xmin = 10^6 / ((Temp + 2 * Temp_SD) + 273.15) ^ 2,
xmax = 10^6 / ((Temp - 2 * Temp_SD) + 273.15) ^ 2,
y = D47res_lowT_York,
color = Analysis),
cex = 1,
alpha = 1) +
# Add summary violin/pointrange
# geom_violin(data = dat[which(!dat$D47_outlier & (dat$Analysis != "Muller17")), ],
# aes(10^6 / (270.15) ^ 2,
# y = D47res_lowT_York),
# fill = "black",
# kernel = "rectangular",
# scale = "width",
# position = "identity",
# width = .2,
# cex = 0,
# alpha = 0.2,
# color = NA,
# trim = TRUE) +
# geom_pointrange(data = dat[which(!dat$D47_outlier & (dat$Analysis != "Muller17")), ],
# aes(x = 10^6 / (270.15) ^ 2,
# y = mean(D47res_lowT_York),
# ymin = mean(D47res_lowT_York) - sd(D47res_lowT_York),
# ymax = mean(D47res_lowT_York) + sd(D47res_lowT_York)),
# color = "black",
# cex = 1) +
ylim(-0.1, 0.1) +
scale_x_continuous(10 ^ 6 / T ^ 2 ~ "(K)",
breaks = seq(0, 14, 1),
minor_breaks = seq(0, 14, 0.25),
sec.axis = sec_axis(~ sqrt(1e6 / .) - 273.15,
"Temperature (°C)",
breaks = temp_breaks,
labels = temp_labs_lowT),
limits = c(7, 14)) +
labs(x = 10 ^ 6 / T ^ 2 ~ "(K)",
y = Delta * Delta[47] ~ "(\u2030"~"I-CDES)",
colour = "Legend") +
scale_colour_manual(values = colorscale) +
scale_shape_manual(values = c(15:20)) +
theme_bw()
# Add separate summary violin/pointrange
lowT_York_residual_plot_margin <- ggplot(data = dat[which(!dat$D47_outlier), ]) +
geom_violin(aes(1,
y = D47res_lowT_York),
fill = "black",
kernel = "rectangular",
scale = "width",
position = "identity",
cex = 0,
alpha = 0.2,
color = NA,
trim = TRUE
) +
geom_pointrange(data = dat[which(!dat$D47_outlier), ],
aes(1,
y = mean(D47res_lowT_York),
ymin = mean(D47res_lowT_York) - sd(D47res_lowT_York),
ymax = mean(D47res_lowT_York) + sd(D47res_lowT_York)),
color = "black",
cex = 1) +
scale_y_continuous("",
breaks = seq(-0.1, 0.1, 0.1),
labels = rep("", 3),
limits = c(-0.1, 0.1)) +
scale_x_continuous("",
breaks = seq(0.5, 1.5, 0.5),
labels = rep("", 3),
sec.axis = sec_axis(~ .,
"",
breaks = seq(0.5, 1.5, 0.5),
labels = rep("", 3)),
limits = c(0.5, 1.5)) +
labs(x = "") +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.ticks = element_blank(),
plot.margin = unit(c(5.5, 0, 5.5, -20), "pt")
)
poly_residual_plot <- ggplot(data = dat, aes(10^6 / (Temp + 273.15) ^ 2 , D47res_poly)) +
geom_ribbon(data = D47m_poly_MC_result_res,
aes(x = x,
y = fit,
ymin = lwr,
ymax = upr),
fill = "grey",
alpha = 0.5) +
geom_line(data = D47m_poly_MC_result_res,
aes(x = x, y = fit),
color = "grey",
linetype = "dashed",
cex = 1,
alpha = 1) +
geom_line(data = D47m_poly_MC_result_res,
aes(x = x, y = Anderson),
color = "black",
linetype = "dashed",
cex = 1,
alpha = 1) +
geom_line(data = D47m_poly_MC_result_res[1:1001, ],
aes(x = x, y = Meinicke),
color = "black",
linetype = "dotted",
cex = 1,
alpha = 1) +
geom_pointrange(data = D47stats,
aes(x = 10^6 / (Temp + 273.15) ^ 2,
y = D47res_poly,
ymin = D47res_poly - CL95,
ymax = D47res_poly + CL95,
color = Analysis,
shape = type),
cex = 1,
alpha = 1) +
geom_errorbarh(data = D47stats,
aes(xmin = 10^6 / ((Temp + 2 * Temp_SD) + 273.15) ^ 2,
xmax = 10^6 / ((Temp - 2 * Temp_SD) + 273.15) ^ 2,
y = D47res_poly,
color = Analysis),
cex = 1,
alpha = 1) +
# Add summary violin/pointrange
# geom_violin(data = dat[which(!dat$D47_outlier), ],
# aes(10^6 / (270.15) ^ 2,
# y = D47res_poly),
# fill = "black",
# kernel = "rectangular",
# scale = "width",
# position = "identity",
# width = .2,
# cex = 0,
# alpha = 0.2,
# color = NA,
# trim = TRUE) +
# geom_pointrange(data = dat[which(!dat$D47_outlier), ],
# aes(x = 10^6 / (270.15) ^ 2,
# y = mean(D47res_poly),
# ymin = mean(D47res_poly) - sd(D47res_poly),
# ymax = mean(D47res_poly) + sd(D47res_poly)),
# color = "black",
# cex = 1) +
ylim(-0.1, 0.1) +
scale_x_continuous(10 ^ 6 / T ^ 2 ~ "(K)",
breaks = seq(0, 14, 1),
minor_breaks = seq(0, 14, 0.25),
sec.axis = sec_axis(~ sqrt(1e6 / .) - 273.15,
"Temperature (°C)",
breaks = temp_breaks,
labels = temp_labs),
limits = c(0, 14)) +
labs(x = 10 ^ 6 / T ^ 2 ~ "(K)",
y = Delta * Delta[47] ~ "(\u2030"~"I-CDES)",
colour = "Legend") +
scale_colour_manual(values = colorscale) +
scale_shape_manual(values = c(15:20)) +
theme_bw()
# Add separate summary violin/pointrange
poly_residual_plot_margin <- ggplot(data = dat[which(!dat$D47_outlier), ]) +
geom_violin(aes(1,
y = D47res_poly),
fill = "black",
kernel = "rectangular",
scale = "width",
position = "identity",
cex = 0,
alpha = 0.2,
color = NA,
trim = TRUE
) +
geom_pointrange(data = dat[which(!dat$D47_outlier), ],
aes(1,
y = mean(D47res_poly),
ymin = mean(D47res_poly) - sd(D47res_poly),
ymax = mean(D47res_poly) + sd(D47res_poly)),
color = "black",
cex = 1) +
scale_y_continuous("",
breaks = seq(-0.1, 0.1, 0.1),
labels = rep("", 3),
limits = c(-0.1, 0.1)) +
scale_x_continuous("",
breaks = seq(0.5, 1.5, 0.5),
labels = rep("", 3),
sec.axis = sec_axis(~ .,
"",
breaks = seq(0.5, 1.5, 0.5),
labels = rep("", 3)),
limits = c(0.5, 1.5)) +
labs(x = "") +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.ticks = element_blank(),
plot.margin = unit(c(5.5, 0, 5.5, -20), "pt")
)
Combined_residual_plot <- grid.arrange(York_residual_plot + theme(legend.position = "none"),
York_residual_plot_margin,
poly_residual_plot + theme(legend.position = "none"),
poly_residual_plot_margin,
lowT_York_residual_plot + theme(legend.position = "none"),
lowT_York_residual_plot_margin,
layout_matrix = rbind(
c(rep(1, 15), 2),
c(rep(3, 15), 4),
c(rep(5, 15), 6)
))
|
*> \brief \b IEEECK
*
* =========== DOCUMENTATION ===========
*
* Online html documentation available at
* http://www.netlib.org/lapack/explore-html/
*
*> \htmlonly
*> Download IEEECK + dependencies
*> <a href="http://www.netlib.org/cgi-bin/netlibfiles.tgz?format=tgz&filename=/lapack/lapack_routine/ieeeck.f">
*> [TGZ]</a>
*> <a href="http://www.netlib.org/cgi-bin/netlibfiles.zip?format=zip&filename=/lapack/lapack_routine/ieeeck.f">
*> [ZIP]</a>
*> <a href="http://www.netlib.org/cgi-bin/netlibfiles.txt?format=txt&filename=/lapack/lapack_routine/ieeeck.f">
*> [TXT]</a>
*> \endhtmlonly
*
* Definition:
* ===========
*
* INTEGER FUNCTION IEEECK( ISPEC, ZERO, ONE )
*
* .. Scalar Arguments ..
* INTEGER ISPEC
* REAL ONE, ZERO
* ..
*
*
*> \par Purpose:
* =============
*>
*> \verbatim
*>
*> IEEECK is called from the ILAENV to verify that Infinity and
*> possibly NaN arithmetic is safe (i.e. will not trap).
*> \endverbatim
*
* Arguments:
* ==========
*
*> \param[in] ISPEC
*> \verbatim
*> ISPEC is INTEGER
*> Specifies whether to test just for inifinity arithmetic
*> or whether to test for infinity and NaN arithmetic.
*> = 0: Verify infinity arithmetic only.
*> = 1: Verify infinity and NaN arithmetic.
*> \endverbatim
*>
*> \param[in] ZERO
*> \verbatim
*> ZERO is REAL
*> Must contain the value 0.0
*> This is passed to prevent the compiler from optimizing
*> away this code.
*> \endverbatim
*>
*> \param[in] ONE
*> \verbatim
*> ONE is REAL
*> Must contain the value 1.0
*> This is passed to prevent the compiler from optimizing
*> away this code.
*>
*> RETURN VALUE: INTEGER
*> = 0: Arithmetic failed to produce the correct answers
*> = 1: Arithmetic produced the correct answers
*> \endverbatim
*
* Authors:
* ========
*
*> \author Univ. of Tennessee
*> \author Univ. of California Berkeley
*> \author Univ. of Colorado Denver
*> \author NAG Ltd.
*
*> \date November 2011
*
*> \ingroup auxOTHERauxiliary
*
* =====================================================================
INTEGER FUNCTION IEEECK( ISPEC, ZERO, ONE )
*
* -- LAPACK auxiliary routine (version 3.4.0) --
* -- LAPACK is a software package provided by Univ. of Tennessee, --
* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..--
* November 2011
*
* .. Scalar Arguments ..
INTEGER ISPEC
REAL ONE, ZERO
* ..
*
* =====================================================================
*
* .. Local Scalars ..
REAL NAN1, NAN2, NAN3, NAN4, NAN5, NAN6, NEGINF,
$ NEGZRO, NEWZRO, POSINF
* ..
* .. Executable Statements ..
IEEECK = 1
*
POSINF = ONE / ZERO
IF( POSINF.LE.ONE ) THEN
IEEECK = 0
RETURN
END IF
*
NEGINF = -ONE / ZERO
IF( NEGINF.GE.ZERO ) THEN
IEEECK = 0
RETURN
END IF
*
NEGZRO = ONE / ( NEGINF+ONE )
IF( NEGZRO.NE.ZERO ) THEN
IEEECK = 0
RETURN
END IF
*
NEGINF = ONE / NEGZRO
IF( NEGINF.GE.ZERO ) THEN
IEEECK = 0
RETURN
END IF
*
NEWZRO = NEGZRO + ZERO
IF( NEWZRO.NE.ZERO ) THEN
IEEECK = 0
RETURN
END IF
*
POSINF = ONE / NEWZRO
IF( POSINF.LE.ONE ) THEN
IEEECK = 0
RETURN
END IF
*
NEGINF = NEGINF*POSINF
IF( NEGINF.GE.ZERO ) THEN
IEEECK = 0
RETURN
END IF
*
POSINF = POSINF*POSINF
IF( POSINF.LE.ONE ) THEN
IEEECK = 0
RETURN
END IF
*
*
*
*
* Return if we were only asked to check infinity arithmetic
*
IF( ISPEC.EQ.0 )
$ RETURN
*
NAN1 = POSINF + NEGINF
*
NAN2 = POSINF / NEGINF
*
NAN3 = POSINF / POSINF
*
NAN4 = POSINF*ZERO
*
NAN5 = NEGINF*NEGZRO
*
NAN6 = NAN5*ZERO
*
IF( NAN1.EQ.NAN1 ) THEN
IEEECK = 0
RETURN
END IF
*
IF( NAN2.EQ.NAN2 ) THEN
IEEECK = 0
RETURN
END IF
*
IF( NAN3.EQ.NAN3 ) THEN
IEEECK = 0
RETURN
END IF
*
IF( NAN4.EQ.NAN4 ) THEN
IEEECK = 0
RETURN
END IF
*
IF( NAN5.EQ.NAN5 ) THEN
IEEECK = 0
RETURN
END IF
*
IF( NAN6.EQ.NAN6 ) THEN
IEEECK = 0
RETURN
END IF
*
RETURN
END
|
! Can we get the type of an implict declaration from the data statement?
! This is an error in gfortran.
! DATA LINES/625/, FREQ/50.0/, NAME/'PAL'/
! This works fine in gfortran (and ROSE).
DATA LINES/625/, FREQ/50.0/
end
|
So basically with the money I've spent on Clairol Hydrience I could have gone just to a salon in the first place and had somebody with a brain smack me upside the head, tell me what a dumb idea it would be to darken my hair, and spruce up the fabulous color I already had.
If this doen't work tomorrow I'm going to cry like a baby. Yes, I am such a GIRL. And no, I will not post any pictures!
Oh Tracey! If I was closer, I'd volunteer to watch your girls so you could go to the salon because I KNOW how much a woman's hair means!!
It will all be okay--it may just take some time to get it back to normal. But no more fussing with those pretty blond locks once it's fixed!
Tracey, I think it best that you get thee to a salon. It's not possible to "dye" your hair a lighter color once you've applied a darker color. If you originally had darker hair, you'd have to bleach it first in order to go blonde. You have to take out the darker color - aka bleach it. Hydrience is a permanent color - meaning it can't be washed out. Since you're not in the habit of coloring your hair, I'm sure that getting this fixed will not do too much damage. Have the processing done first and then get the ends cut off if they look too dry. |
export TanhLayer
@neuron type TanhNeuron
end
@neuron forward(neuron::TanhNeuron) do
neuron.value = tanh(neuron.inputs[1])
end
∇tanh(x) = x * (1.0f0 - x)
@neuron backward(neuron::TanhNeuron) do
neuron.∇inputs[1] = ∇tanh(neuron.value) * neuron.∇
end
function TanhLayer(name::Symbol, net::Net, input_ensemble::AbstractEnsemble; copy=false)
neurons = Array(TanhNeuron, size(input_ensemble)...)
for i in 1:length(neurons)
neurons[i] = TanhNeuron()
end
if copy
ens = Ensemble(net, name, neurons)
mapping = one_to_one(ndims(input_ensemble))
add_connections(net, input_ensemble, ens, mapping)
ens
else
ActivationEnsemble(net, name, neurons, input_ensemble)
end
end
function tanh(net::Net, ens::AbstractEnsemble; copy=false)
TanhLayer(gensym("ensemble"), net, ens; copy=copy)
end
export SigmoidLayer
sigmoid(x) = 1 / (1 + exp(-x))
∇sigmoid(x) = 1.0f0 - x * x
@neuron type SigmoidNeuron
end
@neuron forward(neuron::SigmoidNeuron) do
neuron.value = sigmoid(neuron.inputs[1])
end
@neuron backward(neuron::SigmoidNeuron) do
neuron.∇inputs[1] = ∇sigmoid(neuron.value) * neuron.∇
end
function SigmoidLayer(name::Symbol, net::Net, input_ensemble::AbstractEnsemble)
neurons = Array(SigmoidNeuron, size(input_ensemble)...)
for i in 1:length(neurons)
neurons[i] = SigmoidNeuron()
end
ActivationEnsemble(net, name, neurons, input_ensemble)
end
function σ(net, ens::AbstractEnsemble)
SigmoidLayer(gensym("ensemble"), net, ens)
end
|
Just as good as I hoped they would be.
The workmanship is very good We are pleased with our choice of fabric/colour and the overall effect (we had a new carpet as well) looks great.
Plumbs give an excellent service.
My loose covers fitted perfectly & they have been admired by all my guests.
Service was fast, efficient and I had a feeling of good personal care which you do not get with commercial furniture suppliers. I did not have any problems with my order, but if I had, I'm sure they would have been dealt with in the same efficient manner. The Plumbs Rep was very helpful in helping me choose the fabric which is perfect and has transformed the room.
The upholsterer Mr Hinton of Carlton Uphostery did a fantastic job on my settee and armchair, always precisely on time and took real pride in his work - cannot praise him too highly, as the large sofa was VERY difficult to manoevre out of and back into the house! And all within a short space of time.
Very helpful staff who did an excellent job. Very pleased with my new covers.
The service was excellent being able to select fabric and discuss your requirements in your own home with a very courteous and knowledgeable young man was perfect.
Plumbs provides a superb service! Their rep./installer was outstanding.
We're delighted makes such a difference to our room. |
[GOAL]
p : ℕ
hp : Fact (Nat.Prime p)
a : ZMod p
hap : a ≠ 0
⊢ Multiset.map (fun x => Int.natAbs (valMinAbs (a * ↑x))) (Ico 1 (succ (p / 2))).val =
Multiset.map (fun a => a) (Ico 1 (succ (p / 2))).val
[PROOFSTEP]
have he : ∀ {x}, x ∈ Ico 1 (p / 2).succ → x ≠ 0 ∧ x ≤ p / 2 := by
simp (config := { contextual := true }) [Nat.lt_succ_iff, Nat.succ_le_iff, pos_iff_ne_zero]
[GOAL]
p : ℕ
hp : Fact (Nat.Prime p)
a : ZMod p
hap : a ≠ 0
⊢ ∀ {x : ℕ}, x ∈ Ico 1 (succ (p / 2)) → x ≠ 0 ∧ x ≤ p / 2
[PROOFSTEP]
simp (config := { contextual := true }) [Nat.lt_succ_iff, Nat.succ_le_iff, pos_iff_ne_zero]
[GOAL]
p : ℕ
hp : Fact (Nat.Prime p)
a : ZMod p
hap : a ≠ 0
he : ∀ {x : ℕ}, x ∈ Ico 1 (succ (p / 2)) → x ≠ 0 ∧ x ≤ p / 2
⊢ Multiset.map (fun x => Int.natAbs (valMinAbs (a * ↑x))) (Ico 1 (succ (p / 2))).val =
Multiset.map (fun a => a) (Ico 1 (succ (p / 2))).val
[PROOFSTEP]
have hep : ∀ {x}, x ∈ Ico 1 (p / 2).succ → x < p := fun hx =>
lt_of_le_of_lt (he hx).2 (Nat.div_lt_self hp.1.pos (by decide))
[GOAL]
p : ℕ
hp : Fact (Nat.Prime p)
a : ZMod p
hap : a ≠ 0
he : ∀ {x : ℕ}, x ∈ Ico 1 (succ (p / 2)) → x ≠ 0 ∧ x ≤ p / 2
x✝ : ℕ
hx : x✝ ∈ Ico 1 (succ (p / 2))
⊢ 1 < 2
[PROOFSTEP]
decide
[GOAL]
p : ℕ
hp : Fact (Nat.Prime p)
a : ZMod p
hap : a ≠ 0
he : ∀ {x : ℕ}, x ∈ Ico 1 (succ (p / 2)) → x ≠ 0 ∧ x ≤ p / 2
hep : ∀ {x : ℕ}, x ∈ Ico 1 (succ (p / 2)) → x < p
⊢ Multiset.map (fun x => Int.natAbs (valMinAbs (a * ↑x))) (Ico 1 (succ (p / 2))).val =
Multiset.map (fun a => a) (Ico 1 (succ (p / 2))).val
[PROOFSTEP]
have hpe : ∀ {x}, x ∈ Ico 1 (p / 2).succ → ¬p ∣ x := fun hx hpx =>
not_lt_of_ge (le_of_dvd (Nat.pos_of_ne_zero (he hx).1) hpx) (hep hx)
[GOAL]
p : ℕ
hp : Fact (Nat.Prime p)
a : ZMod p
hap : a ≠ 0
he : ∀ {x : ℕ}, x ∈ Ico 1 (succ (p / 2)) → x ≠ 0 ∧ x ≤ p / 2
hep : ∀ {x : ℕ}, x ∈ Ico 1 (succ (p / 2)) → x < p
hpe : ∀ {x : ℕ}, x ∈ Ico 1 (succ (p / 2)) → ¬p ∣ x
⊢ Multiset.map (fun x => Int.natAbs (valMinAbs (a * ↑x))) (Ico 1 (succ (p / 2))).val =
Multiset.map (fun a => a) (Ico 1 (succ (p / 2))).val
[PROOFSTEP]
have hmem : ∀ (x : ℕ) (hx : x ∈ Ico 1 (p / 2).succ), (a * x : ZMod p).valMinAbs.natAbs ∈ Ico 1 (p / 2).succ :=
by
intro x hx
simp [hap, CharP.cast_eq_zero_iff (ZMod p) p, hpe hx, lt_succ_iff, succ_le_iff, pos_iff_ne_zero,
natAbs_valMinAbs_le _]
[GOAL]
p : ℕ
hp : Fact (Nat.Prime p)
a : ZMod p
hap : a ≠ 0
he : ∀ {x : ℕ}, x ∈ Ico 1 (succ (p / 2)) → x ≠ 0 ∧ x ≤ p / 2
hep : ∀ {x : ℕ}, x ∈ Ico 1 (succ (p / 2)) → x < p
hpe : ∀ {x : ℕ}, x ∈ Ico 1 (succ (p / 2)) → ¬p ∣ x
⊢ ∀ (x : ℕ), x ∈ Ico 1 (succ (p / 2)) → Int.natAbs (valMinAbs (a * ↑x)) ∈ Ico 1 (succ (p / 2))
[PROOFSTEP]
intro x hx
[GOAL]
p : ℕ
hp : Fact (Nat.Prime p)
a : ZMod p
hap : a ≠ 0
he : ∀ {x : ℕ}, x ∈ Ico 1 (succ (p / 2)) → x ≠ 0 ∧ x ≤ p / 2
hep : ∀ {x : ℕ}, x ∈ Ico 1 (succ (p / 2)) → x < p
hpe : ∀ {x : ℕ}, x ∈ Ico 1 (succ (p / 2)) → ¬p ∣ x
x : ℕ
hx : x ∈ Ico 1 (succ (p / 2))
⊢ Int.natAbs (valMinAbs (a * ↑x)) ∈ Ico 1 (succ (p / 2))
[PROOFSTEP]
simp [hap, CharP.cast_eq_zero_iff (ZMod p) p, hpe hx, lt_succ_iff, succ_le_iff, pos_iff_ne_zero, natAbs_valMinAbs_le _]
[GOAL]
p : ℕ
hp : Fact (Nat.Prime p)
a : ZMod p
hap : a ≠ 0
he : ∀ {x : ℕ}, x ∈ Ico 1 (succ (p / 2)) → x ≠ 0 ∧ x ≤ p / 2
hep : ∀ {x : ℕ}, x ∈ Ico 1 (succ (p / 2)) → x < p
hpe : ∀ {x : ℕ}, x ∈ Ico 1 (succ (p / 2)) → ¬p ∣ x
hmem : ∀ (x : ℕ), x ∈ Ico 1 (succ (p / 2)) → Int.natAbs (valMinAbs (a * ↑x)) ∈ Ico 1 (succ (p / 2))
⊢ Multiset.map (fun x => Int.natAbs (valMinAbs (a * ↑x))) (Ico 1 (succ (p / 2))).val =
Multiset.map (fun a => a) (Ico 1 (succ (p / 2))).val
[PROOFSTEP]
have hsurj : ∀ (b : ℕ) (hb : b ∈ Ico 1 (p / 2).succ), ∃ x ∈ Ico 1 (p / 2).succ, b = (a * x : ZMod p).valMinAbs.natAbs :=
by
intro b hb
refine' ⟨(b / a : ZMod p).valMinAbs.natAbs, mem_Ico.mpr ⟨_, _⟩, _⟩
· apply Nat.pos_of_ne_zero
simp only [div_eq_mul_inv, hap, CharP.cast_eq_zero_iff (ZMod p) p, hpe hb, not_false_iff, valMinAbs_eq_zero,
inv_eq_zero, Int.natAbs_eq_zero, Ne.def, _root_.mul_eq_zero, or_self_iff]
· apply lt_succ_of_le; apply natAbs_valMinAbs_le
· rw [nat_cast_natAbs_valMinAbs]
split_ifs
·
erw [mul_div_cancel' _ hap, valMinAbs_def_pos, val_cast_of_lt (hep hb), if_pos (le_of_lt_succ (mem_Ico.1 hb).2),
Int.natAbs_ofNat]
·
erw [mul_neg, mul_div_cancel' _ hap, natAbs_valMinAbs_neg, valMinAbs_def_pos, val_cast_of_lt (hep hb),
if_pos (le_of_lt_succ (mem_Ico.1 hb).2), Int.natAbs_ofNat]
[GOAL]
p : ℕ
hp : Fact (Nat.Prime p)
a : ZMod p
hap : a ≠ 0
he : ∀ {x : ℕ}, x ∈ Ico 1 (succ (p / 2)) → x ≠ 0 ∧ x ≤ p / 2
hep : ∀ {x : ℕ}, x ∈ Ico 1 (succ (p / 2)) → x < p
hpe : ∀ {x : ℕ}, x ∈ Ico 1 (succ (p / 2)) → ¬p ∣ x
hmem : ∀ (x : ℕ), x ∈ Ico 1 (succ (p / 2)) → Int.natAbs (valMinAbs (a * ↑x)) ∈ Ico 1 (succ (p / 2))
⊢ ∀ (b : ℕ), b ∈ Ico 1 (succ (p / 2)) → ∃ x, x ∈ Ico 1 (succ (p / 2)) ∧ b = Int.natAbs (valMinAbs (a * ↑x))
[PROOFSTEP]
intro b hb
[GOAL]
p : ℕ
hp : Fact (Nat.Prime p)
a : ZMod p
hap : a ≠ 0
he : ∀ {x : ℕ}, x ∈ Ico 1 (succ (p / 2)) → x ≠ 0 ∧ x ≤ p / 2
hep : ∀ {x : ℕ}, x ∈ Ico 1 (succ (p / 2)) → x < p
hpe : ∀ {x : ℕ}, x ∈ Ico 1 (succ (p / 2)) → ¬p ∣ x
hmem : ∀ (x : ℕ), x ∈ Ico 1 (succ (p / 2)) → Int.natAbs (valMinAbs (a * ↑x)) ∈ Ico 1 (succ (p / 2))
b : ℕ
hb : b ∈ Ico 1 (succ (p / 2))
⊢ ∃ x, x ∈ Ico 1 (succ (p / 2)) ∧ b = Int.natAbs (valMinAbs (a * ↑x))
[PROOFSTEP]
refine' ⟨(b / a : ZMod p).valMinAbs.natAbs, mem_Ico.mpr ⟨_, _⟩, _⟩
[GOAL]
case refine'_1
p : ℕ
hp : Fact (Nat.Prime p)
a : ZMod p
hap : a ≠ 0
he : ∀ {x : ℕ}, x ∈ Ico 1 (succ (p / 2)) → x ≠ 0 ∧ x ≤ p / 2
hep : ∀ {x : ℕ}, x ∈ Ico 1 (succ (p / 2)) → x < p
hpe : ∀ {x : ℕ}, x ∈ Ico 1 (succ (p / 2)) → ¬p ∣ x
hmem : ∀ (x : ℕ), x ∈ Ico 1 (succ (p / 2)) → Int.natAbs (valMinAbs (a * ↑x)) ∈ Ico 1 (succ (p / 2))
b : ℕ
hb : b ∈ Ico 1 (succ (p / 2))
⊢ 1 ≤ Int.natAbs (valMinAbs (↑b / a))
[PROOFSTEP]
apply Nat.pos_of_ne_zero
[GOAL]
case refine'_1.a
p : ℕ
hp : Fact (Nat.Prime p)
a : ZMod p
hap : a ≠ 0
he : ∀ {x : ℕ}, x ∈ Ico 1 (succ (p / 2)) → x ≠ 0 ∧ x ≤ p / 2
hep : ∀ {x : ℕ}, x ∈ Ico 1 (succ (p / 2)) → x < p
hpe : ∀ {x : ℕ}, x ∈ Ico 1 (succ (p / 2)) → ¬p ∣ x
hmem : ∀ (x : ℕ), x ∈ Ico 1 (succ (p / 2)) → Int.natAbs (valMinAbs (a * ↑x)) ∈ Ico 1 (succ (p / 2))
b : ℕ
hb : b ∈ Ico 1 (succ (p / 2))
⊢ Int.natAbs (valMinAbs (↑b / a)) ≠ 0
[PROOFSTEP]
simp only [div_eq_mul_inv, hap, CharP.cast_eq_zero_iff (ZMod p) p, hpe hb, not_false_iff, valMinAbs_eq_zero,
inv_eq_zero, Int.natAbs_eq_zero, Ne.def, _root_.mul_eq_zero, or_self_iff]
[GOAL]
case refine'_2
p : ℕ
hp : Fact (Nat.Prime p)
a : ZMod p
hap : a ≠ 0
he : ∀ {x : ℕ}, x ∈ Ico 1 (succ (p / 2)) → x ≠ 0 ∧ x ≤ p / 2
hep : ∀ {x : ℕ}, x ∈ Ico 1 (succ (p / 2)) → x < p
hpe : ∀ {x : ℕ}, x ∈ Ico 1 (succ (p / 2)) → ¬p ∣ x
hmem : ∀ (x : ℕ), x ∈ Ico 1 (succ (p / 2)) → Int.natAbs (valMinAbs (a * ↑x)) ∈ Ico 1 (succ (p / 2))
b : ℕ
hb : b ∈ Ico 1 (succ (p / 2))
⊢ Int.natAbs (valMinAbs (↑b / a)) < succ (p / 2)
[PROOFSTEP]
apply lt_succ_of_le
[GOAL]
case refine'_2.a
p : ℕ
hp : Fact (Nat.Prime p)
a : ZMod p
hap : a ≠ 0
he : ∀ {x : ℕ}, x ∈ Ico 1 (succ (p / 2)) → x ≠ 0 ∧ x ≤ p / 2
hep : ∀ {x : ℕ}, x ∈ Ico 1 (succ (p / 2)) → x < p
hpe : ∀ {x : ℕ}, x ∈ Ico 1 (succ (p / 2)) → ¬p ∣ x
hmem : ∀ (x : ℕ), x ∈ Ico 1 (succ (p / 2)) → Int.natAbs (valMinAbs (a * ↑x)) ∈ Ico 1 (succ (p / 2))
b : ℕ
hb : b ∈ Ico 1 (succ (p / 2))
⊢ Int.natAbs (valMinAbs (↑b / a)) ≤ p / 2
[PROOFSTEP]
apply natAbs_valMinAbs_le
[GOAL]
case refine'_3
p : ℕ
hp : Fact (Nat.Prime p)
a : ZMod p
hap : a ≠ 0
he : ∀ {x : ℕ}, x ∈ Ico 1 (succ (p / 2)) → x ≠ 0 ∧ x ≤ p / 2
hep : ∀ {x : ℕ}, x ∈ Ico 1 (succ (p / 2)) → x < p
hpe : ∀ {x : ℕ}, x ∈ Ico 1 (succ (p / 2)) → ¬p ∣ x
hmem : ∀ (x : ℕ), x ∈ Ico 1 (succ (p / 2)) → Int.natAbs (valMinAbs (a * ↑x)) ∈ Ico 1 (succ (p / 2))
b : ℕ
hb : b ∈ Ico 1 (succ (p / 2))
⊢ b = Int.natAbs (valMinAbs (a * ↑(Int.natAbs (valMinAbs (↑b / a)))))
[PROOFSTEP]
rw [nat_cast_natAbs_valMinAbs]
[GOAL]
case refine'_3
p : ℕ
hp : Fact (Nat.Prime p)
a : ZMod p
hap : a ≠ 0
he : ∀ {x : ℕ}, x ∈ Ico 1 (succ (p / 2)) → x ≠ 0 ∧ x ≤ p / 2
hep : ∀ {x : ℕ}, x ∈ Ico 1 (succ (p / 2)) → x < p
hpe : ∀ {x : ℕ}, x ∈ Ico 1 (succ (p / 2)) → ¬p ∣ x
hmem : ∀ (x : ℕ), x ∈ Ico 1 (succ (p / 2)) → Int.natAbs (valMinAbs (a * ↑x)) ∈ Ico 1 (succ (p / 2))
b : ℕ
hb : b ∈ Ico 1 (succ (p / 2))
⊢ b = Int.natAbs (valMinAbs (a * if val (↑b / a) ≤ p / 2 then ↑b / a else -(↑b / a)))
[PROOFSTEP]
split_ifs
[GOAL]
case pos
p : ℕ
hp : Fact (Nat.Prime p)
a : ZMod p
hap : a ≠ 0
he : ∀ {x : ℕ}, x ∈ Ico 1 (succ (p / 2)) → x ≠ 0 ∧ x ≤ p / 2
hep : ∀ {x : ℕ}, x ∈ Ico 1 (succ (p / 2)) → x < p
hpe : ∀ {x : ℕ}, x ∈ Ico 1 (succ (p / 2)) → ¬p ∣ x
hmem : ∀ (x : ℕ), x ∈ Ico 1 (succ (p / 2)) → Int.natAbs (valMinAbs (a * ↑x)) ∈ Ico 1 (succ (p / 2))
b : ℕ
hb : b ∈ Ico 1 (succ (p / 2))
h✝ : val (↑b / a) ≤ p / 2
⊢ b = Int.natAbs (valMinAbs (a * (↑b / a)))
[PROOFSTEP]
erw [mul_div_cancel' _ hap, valMinAbs_def_pos, val_cast_of_lt (hep hb), if_pos (le_of_lt_succ (mem_Ico.1 hb).2),
Int.natAbs_ofNat]
[GOAL]
case neg
p : ℕ
hp : Fact (Nat.Prime p)
a : ZMod p
hap : a ≠ 0
he : ∀ {x : ℕ}, x ∈ Ico 1 (succ (p / 2)) → x ≠ 0 ∧ x ≤ p / 2
hep : ∀ {x : ℕ}, x ∈ Ico 1 (succ (p / 2)) → x < p
hpe : ∀ {x : ℕ}, x ∈ Ico 1 (succ (p / 2)) → ¬p ∣ x
hmem : ∀ (x : ℕ), x ∈ Ico 1 (succ (p / 2)) → Int.natAbs (valMinAbs (a * ↑x)) ∈ Ico 1 (succ (p / 2))
b : ℕ
hb : b ∈ Ico 1 (succ (p / 2))
h✝ : ¬val (↑b / a) ≤ p / 2
⊢ b = Int.natAbs (valMinAbs (a * -(↑b / a)))
[PROOFSTEP]
erw [mul_neg, mul_div_cancel' _ hap, natAbs_valMinAbs_neg, valMinAbs_def_pos, val_cast_of_lt (hep hb),
if_pos (le_of_lt_succ (mem_Ico.1 hb).2), Int.natAbs_ofNat]
[GOAL]
p : ℕ
hp : Fact (Nat.Prime p)
a : ZMod p
hap : a ≠ 0
he : ∀ {x : ℕ}, x ∈ Ico 1 (succ (p / 2)) → x ≠ 0 ∧ x ≤ p / 2
hep : ∀ {x : ℕ}, x ∈ Ico 1 (succ (p / 2)) → x < p
hpe : ∀ {x : ℕ}, x ∈ Ico 1 (succ (p / 2)) → ¬p ∣ x
hmem : ∀ (x : ℕ), x ∈ Ico 1 (succ (p / 2)) → Int.natAbs (valMinAbs (a * ↑x)) ∈ Ico 1 (succ (p / 2))
hsurj : ∀ (b : ℕ), b ∈ Ico 1 (succ (p / 2)) → ∃ x, x ∈ Ico 1 (succ (p / 2)) ∧ b = Int.natAbs (valMinAbs (a * ↑x))
⊢ Multiset.map (fun x => Int.natAbs (valMinAbs (a * ↑x))) (Ico 1 (succ (p / 2))).val =
Multiset.map (fun a => a) (Ico 1 (succ (p / 2))).val
[PROOFSTEP]
simp only [← exists_prop] at hsurj
[GOAL]
p : ℕ
hp : Fact (Nat.Prime p)
a : ZMod p
hap : a ≠ 0
he : ∀ {x : ℕ}, x ∈ Ico 1 (succ (p / 2)) → x ≠ 0 ∧ x ≤ p / 2
hep : ∀ {x : ℕ}, x ∈ Ico 1 (succ (p / 2)) → x < p
hpe : ∀ {x : ℕ}, x ∈ Ico 1 (succ (p / 2)) → ¬p ∣ x
hmem : ∀ (x : ℕ), x ∈ Ico 1 (succ (p / 2)) → Int.natAbs (valMinAbs (a * ↑x)) ∈ Ico 1 (succ (p / 2))
hsurj : ∀ (b : ℕ), b ∈ Ico 1 (succ (p / 2)) → ∃ x _h, b = Int.natAbs (valMinAbs (a * ↑x))
⊢ Multiset.map (fun x => Int.natAbs (valMinAbs (a * ↑x))) (Ico 1 (succ (p / 2))).val =
Multiset.map (fun a => a) (Ico 1 (succ (p / 2))).val
[PROOFSTEP]
exact
Multiset.map_eq_map_of_bij_of_nodup _ _ (Finset.nodup _) (Finset.nodup _)
(fun x _ => (a * x : ZMod p).valMinAbs.natAbs) hmem (fun _ _ => rfl)
(inj_on_of_surj_on_of_card_le _ hmem hsurj le_rfl) hsurj
[GOAL]
p : ℕ
inst✝ : Fact (Nat.Prime p)
a : ℤ
hap : ↑a ≠ 0
⊢ ↑(a ^ (p / 2)) * ↑(p / 2)! = ↑(∏ x in Ico 1 (succ (p / 2)), a * ↑x)
[PROOFSTEP]
rw [prod_mul_distrib, ← prod_natCast, prod_Ico_id_eq_factorial, prod_const, card_Ico, succ_sub_one]
[GOAL]
p : ℕ
inst✝ : Fact (Nat.Prime p)
a : ℤ
hap : ↑a ≠ 0
⊢ ↑(a ^ (p / 2)) * ↑(p / 2)! = ↑(a ^ (p / 2) * ↑(p / 2)!)
[PROOFSTEP]
simp
[GOAL]
p : ℕ
inst✝ : Fact (Nat.Prime p)
a : ℤ
hap : ↑a ≠ 0
⊢ ↑(∏ x in Ico 1 (succ (p / 2)), a * ↑x) = ∏ x in Ico 1 (succ (p / 2)), ↑(val (↑a * ↑x))
[PROOFSTEP]
simp
[GOAL]
p : ℕ
inst✝ : Fact (Nat.Prime p)
a : ℤ
hap : ↑a ≠ 0
x✝¹ : ℕ
x✝ : x✝¹ ∈ Ico 1 (succ (p / 2))
⊢ ↑(val (↑a * ↑x✝¹)) = (if val (↑a * ↑x✝¹) ≤ p / 2 then 1 else -1) * ↑(Int.natAbs (valMinAbs (↑a * ↑x✝¹)))
[PROOFSTEP]
simp only [nat_cast_natAbs_valMinAbs]
[GOAL]
p : ℕ
inst✝ : Fact (Nat.Prime p)
a : ℤ
hap : ↑a ≠ 0
x✝¹ : ℕ
x✝ : x✝¹ ∈ Ico 1 (succ (p / 2))
⊢ ↑(val (↑a * ↑x✝¹)) =
(if val (↑a * ↑x✝¹) ≤ p / 2 then 1 else -1) * if val (↑a * ↑x✝¹) ≤ p / 2 then ↑a * ↑x✝¹ else -(↑a * ↑x✝¹)
[PROOFSTEP]
split_ifs
[GOAL]
case pos
p : ℕ
inst✝ : Fact (Nat.Prime p)
a : ℤ
hap : ↑a ≠ 0
x✝¹ : ℕ
x✝ : x✝¹ ∈ Ico 1 (succ (p / 2))
h✝ : val (↑a * ↑x✝¹) ≤ p / 2
⊢ ↑(val (↑a * ↑x✝¹)) = 1 * (↑a * ↑x✝¹)
[PROOFSTEP]
simp
[GOAL]
case neg
p : ℕ
inst✝ : Fact (Nat.Prime p)
a : ℤ
hap : ↑a ≠ 0
x✝¹ : ℕ
x✝ : x✝¹ ∈ Ico 1 (succ (p / 2))
h✝ : ¬val (↑a * ↑x✝¹) ≤ p / 2
⊢ ↑(val (↑a * ↑x✝¹)) = -1 * -(↑a * ↑x✝¹)
[PROOFSTEP]
simp
[GOAL]
p : ℕ
inst✝ : Fact (Nat.Prime p)
a : ℤ
hap : ↑a ≠ 0
⊢ ∏ x in Ico 1 (succ (p / 2)), (if val (↑a * ↑x) ≤ p / 2 then 1 else -1) * ↑(Int.natAbs (valMinAbs (↑a * ↑x))) =
(-1) ^ Finset.card (filter (fun x => ¬val (↑a * ↑x) ≤ p / 2) (Ico 1 (succ (p / 2)))) *
∏ x in Ico 1 (succ (p / 2)), ↑(Int.natAbs (valMinAbs (↑a * ↑x)))
[PROOFSTEP]
have :
(∏ x in Ico 1 (p / 2).succ, if (a * x : ZMod p).val ≤ p / 2 then (1 : ZMod p) else -1) =
∏ x in (Ico 1 (p / 2).succ).filter fun x : ℕ => ¬(a * x : ZMod p).val ≤ p / 2, -1 :=
prod_bij_ne_one (fun x _ _ => x) (fun x => by split_ifs <;> simp_all (config := { contextual := true }))
(fun _ _ _ _ _ _ => id) (fun b h _ => ⟨b, by simp_all [-not_le]⟩) (by intros; split_ifs at * <;> simp_all)
[GOAL]
p : ℕ
inst✝ : Fact (Nat.Prime p)
a : ℤ
hap : ↑a ≠ 0
x : ℕ
⊢ ∀ (h₁ : x ∈ Ico 1 (succ (p / 2))) (h₂ : (if val (↑a * ↑x) ≤ p / 2 then 1 else -1) ≠ 1),
(fun x x_1 x_2 => x) x h₁ h₂ ∈ filter (fun x => ¬val (↑a * ↑x) ≤ p / 2) (Ico 1 (succ (p / 2)))
[PROOFSTEP]
split_ifs
[GOAL]
case pos
p : ℕ
inst✝ : Fact (Nat.Prime p)
a : ℤ
hap : ↑a ≠ 0
x : ℕ
h✝ : val (↑a * ↑x) ≤ p / 2
⊢ ∀ (h₁ : x ∈ Ico 1 (succ (p / 2))) (h₂ : (if val (↑a * ↑x) ≤ p / 2 then 1 else -1) ≠ 1),
(fun x x_1 x_2 => x) x h₁ h₂ ∈ filter (fun x => ¬val (↑a * ↑x) ≤ p / 2) (Ico 1 (succ (p / 2)))
[PROOFSTEP]
simp_all (config := { contextual := true })
[GOAL]
case neg
p : ℕ
inst✝ : Fact (Nat.Prime p)
a : ℤ
hap : ↑a ≠ 0
x : ℕ
h✝ : ¬val (↑a * ↑x) ≤ p / 2
⊢ ∀ (h₁ : x ∈ Ico 1 (succ (p / 2))) (h₂ : (if val (↑a * ↑x) ≤ p / 2 then 1 else -1) ≠ 1),
(fun x x_1 x_2 => x) x h₁ h₂ ∈ filter (fun x => ¬val (↑a * ↑x) ≤ p / 2) (Ico 1 (succ (p / 2)))
[PROOFSTEP]
simp_all (config := { contextual := true })
[GOAL]
p : ℕ
inst✝ : Fact (Nat.Prime p)
a : ℤ
hap : ↑a ≠ 0
b : ℕ
h : b ∈ filter (fun x => ¬val (↑a * ↑x) ≤ p / 2) (Ico 1 (succ (p / 2)))
x✝ : -1 ≠ 1
⊢ ∃ h₁ h₂, b = (fun x x_1 x_2 => x) b h₁ h₂
[PROOFSTEP]
simp_all [-not_le]
[GOAL]
p : ℕ
inst✝ : Fact (Nat.Prime p)
a : ℤ
hap : ↑a ≠ 0
⊢ ∀ (a_1 : ℕ),
a_1 ∈ Ico 1 (succ (p / 2)) →
(if val (↑a * ↑a_1) ≤ p / 2 then 1 else -1) ≠ 1 → (if val (↑a * ↑a_1) ≤ p / 2 then 1 else -1) = -1
[PROOFSTEP]
intros
[GOAL]
p : ℕ
inst✝ : Fact (Nat.Prime p)
a : ℤ
hap : ↑a ≠ 0
a✝ : ℕ
h₁✝ : a✝ ∈ Ico 1 (succ (p / 2))
h₂✝ : (if val (↑a * ↑a✝) ≤ p / 2 then 1 else -1) ≠ 1
⊢ (if val (↑a * ↑a✝) ≤ p / 2 then 1 else -1) = -1
[PROOFSTEP]
split_ifs at *
[GOAL]
case pos
p : ℕ
inst✝ : Fact (Nat.Prime p)
a : ℤ
hap : ↑a ≠ 0
a✝ : ℕ
h₁✝ : a✝ ∈ Ico 1 (succ (p / 2))
h✝ : val (↑a * ↑a✝) ≤ p / 2
h₂✝ : 1 ≠ 1
⊢ 1 = -1
[PROOFSTEP]
simp_all
[GOAL]
case neg
p : ℕ
inst✝ : Fact (Nat.Prime p)
a : ℤ
hap : ↑a ≠ 0
a✝ : ℕ
h₁✝ : a✝ ∈ Ico 1 (succ (p / 2))
h✝ : ¬val (↑a * ↑a✝) ≤ p / 2
h₂✝ : -1 ≠ 1
⊢ -1 = -1
[PROOFSTEP]
simp_all
[GOAL]
p : ℕ
inst✝ : Fact (Nat.Prime p)
a : ℤ
hap : ↑a ≠ 0
this :
(∏ x in Ico 1 (succ (p / 2)), if val (↑a * ↑x) ≤ p / 2 then 1 else -1) =
∏ x in filter (fun x => ¬val (↑a * ↑x) ≤ p / 2) (Ico 1 (succ (p / 2))), -1
⊢ ∏ x in Ico 1 (succ (p / 2)), (if val (↑a * ↑x) ≤ p / 2 then 1 else -1) * ↑(Int.natAbs (valMinAbs (↑a * ↑x))) =
(-1) ^ Finset.card (filter (fun x => ¬val (↑a * ↑x) ≤ p / 2) (Ico 1 (succ (p / 2)))) *
∏ x in Ico 1 (succ (p / 2)), ↑(Int.natAbs (valMinAbs (↑a * ↑x)))
[PROOFSTEP]
rw [prod_mul_distrib, this]
[GOAL]
p : ℕ
inst✝ : Fact (Nat.Prime p)
a : ℤ
hap : ↑a ≠ 0
this :
(∏ x in Ico 1 (succ (p / 2)), if val (↑a * ↑x) ≤ p / 2 then 1 else -1) =
∏ x in filter (fun x => ¬val (↑a * ↑x) ≤ p / 2) (Ico 1 (succ (p / 2))), -1
⊢ (∏ x in filter (fun x => ¬val (↑a * ↑x) ≤ p / 2) (Ico 1 (succ (p / 2))), -1) *
∏ x in Ico 1 (succ (p / 2)), ↑(Int.natAbs (valMinAbs (↑a * ↑x))) =
(-1) ^ Finset.card (filter (fun x => ¬val (↑a * ↑x) ≤ p / 2) (Ico 1 (succ (p / 2)))) *
∏ x in Ico 1 (succ (p / 2)), ↑(Int.natAbs (valMinAbs (↑a * ↑x)))
[PROOFSTEP]
simp
[GOAL]
p : ℕ
inst✝ : Fact (Nat.Prime p)
a : ℤ
hap : ↑a ≠ 0
⊢ (-1) ^ Finset.card (filter (fun x => ¬val (↑a * ↑x) ≤ p / 2) (Ico 1 (succ (p / 2)))) *
∏ x in Ico 1 (succ (p / 2)), ↑(Int.natAbs (valMinAbs (↑a * ↑x))) =
(-1) ^ Finset.card (filter (fun x => ¬val (↑a * ↑x) ≤ p / 2) (Ico 1 (succ (p / 2)))) * ↑(p / 2)!
[PROOFSTEP]
rw [← prod_natCast, Finset.prod_eq_multiset_prod, Ico_map_valMinAbs_natAbs_eq_Ico_map_id p a hap, ←
Finset.prod_eq_multiset_prod, prod_Ico_id_eq_factorial]
[GOAL]
p : ℕ
hp : Fact (Nat.Prime p)
a : ℤ
hap : ↑a ≠ 0
⊢ ↑(p / 2)! ≠ 0
[PROOFSTEP]
rw [Ne.def, CharP.cast_eq_zero_iff (ZMod p) p, hp.1.dvd_factorial, not_le]
[GOAL]
p : ℕ
hp : Fact (Nat.Prime p)
a : ℤ
hap : ↑a ≠ 0
⊢ p / 2 < p
[PROOFSTEP]
exact Nat.div_lt_self hp.1.pos (by decide)
[GOAL]
p : ℕ
hp : Fact (Nat.Prime p)
a : ℤ
hap : ↑a ≠ 0
⊢ 1 < 2
[PROOFSTEP]
decide
[GOAL]
p : ℕ
hp : Fact (Nat.Prime p)
a : ℤ
hap : ↑a ≠ 0
⊢ ↑a ^ (p / 2) * ↑(p / 2)! =
↑((-1) ^ Finset.card (filter (fun x => p / 2 < val (↑a * ↑x)) (Ico 1 (succ (p / 2))))) * ↑(p / 2)!
[PROOFSTEP]
simpa using gauss_lemma_aux₁ p hap
[GOAL]
p : ℕ
h : Fact (Nat.Prime p)
a : ℤ
hp : p ≠ 2
ha0 : ↑a ≠ 0
⊢ legendreSym p a = (-1) ^ Finset.card (filter (fun x => p / 2 < val (↑a * ↑x)) (Ico 1 (succ (p / 2))))
[PROOFSTEP]
replace hp : Odd p := h.out.odd_of_ne_two hp
[GOAL]
p : ℕ
h : Fact (Nat.Prime p)
a : ℤ
ha0 : ↑a ≠ 0
hp : Odd p
⊢ legendreSym p a = (-1) ^ Finset.card (filter (fun x => p / 2 < val (↑a * ↑x)) (Ico 1 (succ (p / 2))))
[PROOFSTEP]
have :
(legendreSym p a : ZMod p) =
(((-1) ^ ((Ico 1 (p / 2).succ).filter fun x : ℕ => p / 2 < (a * x : ZMod p).val).card : ℤ) : ZMod p) :=
by rw [legendreSym.eq_pow, gauss_lemma_aux p ha0]
[GOAL]
p : ℕ
h : Fact (Nat.Prime p)
a : ℤ
ha0 : ↑a ≠ 0
hp : Odd p
⊢ ↑(legendreSym p a) = ↑((-1) ^ Finset.card (filter (fun x => p / 2 < val (↑a * ↑x)) (Ico 1 (succ (p / 2)))))
[PROOFSTEP]
rw [legendreSym.eq_pow, gauss_lemma_aux p ha0]
[GOAL]
p : ℕ
h : Fact (Nat.Prime p)
a : ℤ
ha0 : ↑a ≠ 0
hp : Odd p
this : ↑(legendreSym p a) = ↑((-1) ^ Finset.card (filter (fun x => p / 2 < val (↑a * ↑x)) (Ico 1 (succ (p / 2)))))
⊢ legendreSym p a = (-1) ^ Finset.card (filter (fun x => p / 2 < val (↑a * ↑x)) (Ico 1 (succ (p / 2))))
[PROOFSTEP]
cases legendreSym.eq_one_or_neg_one p ha0
[GOAL]
case inl
p : ℕ
h : Fact (Nat.Prime p)
a : ℤ
ha0 : ↑a ≠ 0
hp : Odd p
this : ↑(legendreSym p a) = ↑((-1) ^ Finset.card (filter (fun x => p / 2 < val (↑a * ↑x)) (Ico 1 (succ (p / 2)))))
h✝ : legendreSym p a = 1
⊢ legendreSym p a = (-1) ^ Finset.card (filter (fun x => p / 2 < val (↑a * ↑x)) (Ico 1 (succ (p / 2))))
[PROOFSTEP]
cases neg_one_pow_eq_or ℤ ((Ico 1 (p / 2).succ).filter fun x : ℕ => p / 2 < (a * x : ZMod p).val).card
[GOAL]
case inr
p : ℕ
h : Fact (Nat.Prime p)
a : ℤ
ha0 : ↑a ≠ 0
hp : Odd p
this : ↑(legendreSym p a) = ↑((-1) ^ Finset.card (filter (fun x => p / 2 < val (↑a * ↑x)) (Ico 1 (succ (p / 2)))))
h✝ : legendreSym p a = -1
⊢ legendreSym p a = (-1) ^ Finset.card (filter (fun x => p / 2 < val (↑a * ↑x)) (Ico 1 (succ (p / 2))))
[PROOFSTEP]
cases neg_one_pow_eq_or ℤ ((Ico 1 (p / 2).succ).filter fun x : ℕ => p / 2 < (a * x : ZMod p).val).card
[GOAL]
case inl.inl
p : ℕ
h : Fact (Nat.Prime p)
a : ℤ
ha0 : ↑a ≠ 0
hp : Odd p
this : ↑(legendreSym p a) = ↑((-1) ^ Finset.card (filter (fun x => p / 2 < val (↑a * ↑x)) (Ico 1 (succ (p / 2)))))
h✝¹ : legendreSym p a = 1
h✝ : (-1) ^ Finset.card (filter (fun x => p / 2 < val (↑a * ↑x)) (Ico 1 (succ (p / 2)))) = 1
⊢ legendreSym p a = (-1) ^ Finset.card (filter (fun x => p / 2 < val (↑a * ↑x)) (Ico 1 (succ (p / 2))))
[PROOFSTEP]
simp_all [ne_neg_self hp one_ne_zero, (ne_neg_self hp one_ne_zero).symm]
[GOAL]
case inl.inr
p : ℕ
h : Fact (Nat.Prime p)
a : ℤ
ha0 : ↑a ≠ 0
hp : Odd p
this : ↑(legendreSym p a) = ↑((-1) ^ Finset.card (filter (fun x => p / 2 < val (↑a * ↑x)) (Ico 1 (succ (p / 2)))))
h✝¹ : legendreSym p a = 1
h✝ : (-1) ^ Finset.card (filter (fun x => p / 2 < val (↑a * ↑x)) (Ico 1 (succ (p / 2)))) = -1
⊢ legendreSym p a = (-1) ^ Finset.card (filter (fun x => p / 2 < val (↑a * ↑x)) (Ico 1 (succ (p / 2))))
[PROOFSTEP]
simp_all [ne_neg_self hp one_ne_zero, (ne_neg_self hp one_ne_zero).symm]
[GOAL]
case inr.inl
p : ℕ
h : Fact (Nat.Prime p)
a : ℤ
ha0 : ↑a ≠ 0
hp : Odd p
this : ↑(legendreSym p a) = ↑((-1) ^ Finset.card (filter (fun x => p / 2 < val (↑a * ↑x)) (Ico 1 (succ (p / 2)))))
h✝¹ : legendreSym p a = -1
h✝ : (-1) ^ Finset.card (filter (fun x => p / 2 < val (↑a * ↑x)) (Ico 1 (succ (p / 2)))) = 1
⊢ legendreSym p a = (-1) ^ Finset.card (filter (fun x => p / 2 < val (↑a * ↑x)) (Ico 1 (succ (p / 2))))
[PROOFSTEP]
simp_all [ne_neg_self hp one_ne_zero, (ne_neg_self hp one_ne_zero).symm]
[GOAL]
case inr.inr
p : ℕ
h : Fact (Nat.Prime p)
a : ℤ
ha0 : ↑a ≠ 0
hp : Odd p
this : ↑(legendreSym p a) = ↑((-1) ^ Finset.card (filter (fun x => p / 2 < val (↑a * ↑x)) (Ico 1 (succ (p / 2)))))
h✝¹ : legendreSym p a = -1
h✝ : (-1) ^ Finset.card (filter (fun x => p / 2 < val (↑a * ↑x)) (Ico 1 (succ (p / 2)))) = -1
⊢ legendreSym p a = (-1) ^ Finset.card (filter (fun x => p / 2 < val (↑a * ↑x)) (Ico 1 (succ (p / 2))))
[PROOFSTEP]
simp_all [ne_neg_self hp one_ne_zero, (ne_neg_self hp one_ne_zero).symm]
[GOAL]
p : ℕ
inst✝ : Fact (Nat.Prime p)
hp2✝ : Fact (p % 2 = 1)
a : ℕ
hap : ↑a ≠ 0
hp2 : ↑p = ↑1
⊢ ↑(∑ x in Ico 1 (succ (p / 2)), a * x) = ↑(∑ x in Ico 1 (succ (p / 2)), (a * x % p + p * (a * x / p)))
[PROOFSTEP]
simp only [mod_add_div]
[GOAL]
p : ℕ
inst✝ : Fact (Nat.Prime p)
hp2✝ : Fact (p % 2 = 1)
a : ℕ
hap : ↑a ≠ 0
hp2 : ↑p = ↑1
⊢ ↑(∑ x in Ico 1 (succ (p / 2)), (a * x % p + p * (a * x / p))) =
↑(∑ x in Ico 1 (succ (p / 2)), val ↑(a * x)) + ↑(∑ x in Ico 1 (succ (p / 2)), a * x / p)
[PROOFSTEP]
simp only [val_nat_cast]
[GOAL]
p : ℕ
inst✝ : Fact (Nat.Prime p)
hp2✝ : Fact (p % 2 = 1)
a : ℕ
hap : ↑a ≠ 0
hp2 : ↑p = ↑1
⊢ ↑(∑ x in Ico 1 (succ (p / 2)), (a * x % p + p * (a * x / p))) =
↑(∑ x in Ico 1 (succ (p / 2)), a * x % p) + ↑(∑ x in Ico 1 (succ (p / 2)), a * x / p)
[PROOFSTEP]
simp [sum_add_distrib, mul_sum.symm, Nat.cast_add, Nat.cast_mul, Nat.cast_sum, hp2]
[GOAL]
p : ℕ
inst✝ : Fact (Nat.Prime p)
hp2✝ : Fact (p % 2 = 1)
a : ℕ
hap : ↑a ≠ 0
hp2 : ↑p = ↑1
⊢ ↑(∑ x in Ico 1 (succ (p / 2)), val ↑(a * x)) =
∑ x in Ico 1 (succ (p / 2)), ↑(valMinAbs (↑a * ↑x) + ↑(if val (↑a * ↑x) ≤ p / 2 then 0 else p))
[PROOFSTEP]
simp only [(val_eq_ite_valMinAbs _).symm]
[GOAL]
p : ℕ
inst✝ : Fact (Nat.Prime p)
hp2✝ : Fact (p % 2 = 1)
a : ℕ
hap : ↑a ≠ 0
hp2 : ↑p = ↑1
⊢ ↑(∑ x in Ico 1 (succ (p / 2)), val ↑(a * x)) = ∑ x in Ico 1 (succ (p / 2)), ↑↑(val (↑a * ↑x))
[PROOFSTEP]
simp [Nat.cast_sum]
[GOAL]
p : ℕ
inst✝ : Fact (Nat.Prime p)
hp2✝ : Fact (p % 2 = 1)
a : ℕ
hap : ↑a ≠ 0
hp2 : ↑p = ↑1
⊢ ∑ x in Ico 1 (succ (p / 2)), ↑(valMinAbs (↑a * ↑x) + ↑(if val (↑a * ↑x) ≤ p / 2 then 0 else p)) =
↑(Finset.card (filter (fun x => p / 2 < val (↑a * ↑x)) (Ico 1 (succ (p / 2))))) +
↑(∑ x in Ico 1 (succ (p / 2)), Int.natAbs (valMinAbs (↑a * ↑x)))
[PROOFSTEP]
simp [add_comm, sum_add_distrib, Finset.sum_ite, hp2, Nat.cast_sum]
[GOAL]
p : ℕ
inst✝ : Fact (Nat.Prime p)
hp2✝ : Fact (p % 2 = 1)
a : ℕ
hap : ↑a ≠ 0
hp2 : ↑p = ↑1
⊢ ↑(Finset.card (filter (fun x => p / 2 < val (↑a * ↑x)) (Ico 1 (succ (p / 2))))) +
↑(∑ x in Ico 1 (succ (p / 2)), Int.natAbs (valMinAbs (↑a * ↑x))) =
↑(Finset.card (filter (fun x => p / 2 < val (↑a * ↑x)) (Ico 1 (succ (p / 2))))) + ↑(∑ x in Ico 1 (succ (p / 2)), x)
[PROOFSTEP]
rw [Finset.sum_eq_multiset_sum, Ico_map_valMinAbs_natAbs_eq_Ico_map_id p a hap, ← Finset.sum_eq_multiset_sum]
[GOAL]
p : ℕ
inst✝¹ : Fact (Nat.Prime p)
inst✝ : Fact (p % 2 = 1)
a : ℕ
ha2✝ : a % 2 = 1
hap : ↑a ≠ 0
ha2 : ↑a = ↑1
⊢ ↑(Finset.card (filter (fun x => p / 2 < val (↑a * ↑x)) (Ico 1 (succ (p / 2))))) -
↑(∑ x in Ico 1 (succ (p / 2)), x * a / p) =
0
[PROOFSTEP]
simpa [add_left_comm, sub_eq_add_neg, Finset.mul_sum.symm, mul_comm, ha2, Nat.cast_sum, add_neg_eq_iff_eq_add.symm,
neg_eq_self_mod_two, add_assoc] using Eq.symm (eisenstein_lemma_aux₁ p hap)
[GOAL]
a b c : ℕ
hb0 : 0 < b
hc : a / b ≤ c
⊢ a / b = Finset.card (Ico 1 (succ (a / b)))
[PROOFSTEP]
simp
[GOAL]
a b c : ℕ
hb0 : 0 < b
hc : a / b ≤ c
x : ℕ
⊢ x ∈ Ico 1 (succ (a / b)) ↔ x ∈ filter (fun x => x * b ≤ a) (Ico 1 (succ c))
[PROOFSTEP]
have : x * b ≤ a → x ≤ c := fun h => le_trans (by rwa [le_div_iff_mul_le hb0]) hc
[GOAL]
a b c : ℕ
hb0 : 0 < b
hc : a / b ≤ c
x : ℕ
h : x * b ≤ a
⊢ x ≤ a / b
[PROOFSTEP]
rwa [le_div_iff_mul_le hb0]
[GOAL]
a b c : ℕ
hb0 : 0 < b
hc : a / b ≤ c
x : ℕ
this : x * b ≤ a → x ≤ c
⊢ x ∈ Ico 1 (succ (a / b)) ↔ x ∈ filter (fun x => x * b ≤ a) (Ico 1 (succ c))
[PROOFSTEP]
simp [lt_succ_iff, le_div_iff_mul_le hb0]
[GOAL]
a b c : ℕ
hb0 : 0 < b
hc : a / b ≤ c
x : ℕ
this : x * b ≤ a → x ≤ c
⊢ x * b ≤ a → 1 ≤ x → x ≤ c
[PROOFSTEP]
tauto
[GOAL]
p q : ℕ
hp0 : p = 0
⊢ ∑ a in Ico 1 (succ (p / 2)), a * q / p =
Finset.card (filter (fun x => x.snd * p ≤ x.fst * q) (Ico 1 (succ (p / 2)) ×ˢ Ico 1 (succ (q / 2))))
[PROOFSTEP]
simp [hp0, Finset.ext_iff]
[GOAL]
p q : ℕ
hp0 : ¬p = 0
⊢ ∑ a in Ico 1 (succ (p / 2)), Finset.card (filter (fun x => x * p ≤ a * q) (Ico 1 (succ (q / 2)))) =
Finset.card (filter (fun x => x.snd * p ≤ x.fst * q) (Ico 1 (succ (p / 2)) ×ˢ Ico 1 (succ (q / 2))))
[PROOFSTEP]
rw [← card_sigma]
[GOAL]
p q : ℕ
hp0 : ¬p = 0
⊢ Finset.card (Finset.sigma (Ico 1 (succ (p / 2))) fun a => filter (fun x => x * p ≤ a * q) (Ico 1 (succ (q / 2)))) =
Finset.card (filter (fun x => x.snd * p ≤ x.fst * q) (Ico 1 (succ (p / 2)) ×ˢ Ico 1 (succ (q / 2))))
[PROOFSTEP]
exact
card_congr (fun a _ => ⟨a.1, a.2⟩)
(by
simp (config := { contextual := true }) only [mem_filter, mem_sigma, and_self_iff, forall_true_iff, mem_product])
(fun ⟨_, _⟩ ⟨_, _⟩ => by
simp (config := { contextual := true }) only [Prod.mk.inj_iff, eq_self_iff_true, and_self_iff, heq_iff_eq,
forall_true_iff])
fun ⟨b₁, b₂⟩ h =>
⟨⟨b₁, b₂⟩, by
revert h
simp (config := { contextual := true }) only [mem_filter, eq_self_iff_true, exists_prop_of_true, mem_sigma,
and_self_iff, forall_true_iff, mem_product]⟩
[GOAL]
p q : ℕ
hp0 : ¬p = 0
⊢ ∀ (a : (_ : ℕ) × ℕ)
(ha : a ∈ Finset.sigma (Ico 1 (succ (p / 2))) fun a => filter (fun x => x * p ≤ a * q) (Ico 1 (succ (q / 2)))),
(fun a x => (a.fst, a.snd)) a ha ∈
filter (fun x => x.snd * p ≤ x.fst * q) (Ico 1 (succ (p / 2)) ×ˢ Ico 1 (succ (q / 2)))
[PROOFSTEP]
simp (config := { contextual := true }) only [mem_filter, mem_sigma, and_self_iff, forall_true_iff, mem_product]
[GOAL]
p q : ℕ
hp0 : ¬p = 0
x✝¹ x✝ : (_ : ℕ) × ℕ
fst✝¹ snd✝¹ fst✝ snd✝ : ℕ
⊢ ∀
(ha :
{ fst := fst✝¹, snd := snd✝¹ } ∈
Finset.sigma (Ico 1 (succ (p / 2))) fun a => filter (fun x => x * p ≤ a * q) (Ico 1 (succ (q / 2))))
(hb :
{ fst := fst✝, snd := snd✝ } ∈
Finset.sigma (Ico 1 (succ (p / 2))) fun a => filter (fun x => x * p ≤ a * q) (Ico 1 (succ (q / 2)))),
(fun a x => (a.fst, a.snd)) { fst := fst✝¹, snd := snd✝¹ } ha =
(fun a x => (a.fst, a.snd)) { fst := fst✝, snd := snd✝ } hb →
{ fst := fst✝¹, snd := snd✝¹ } = { fst := fst✝, snd := snd✝ }
[PROOFSTEP]
simp (config := { contextual := true }) only [Prod.mk.inj_iff, eq_self_iff_true, and_self_iff, heq_iff_eq,
forall_true_iff]
[GOAL]
p q : ℕ
hp0 : ¬p = 0
x✝ : ℕ × ℕ
b₁ b₂ : ℕ
h : (b₁, b₂) ∈ filter (fun x => x.snd * p ≤ x.fst * q) (Ico 1 (succ (p / 2)) ×ˢ Ico 1 (succ (q / 2)))
⊢ ∃ ha, (fun a x => (a.fst, a.snd)) { fst := b₁, snd := b₂ } ha = (b₁, b₂)
[PROOFSTEP]
revert h
[GOAL]
p q : ℕ
hp0 : ¬p = 0
x✝ : ℕ × ℕ
b₁ b₂ : ℕ
⊢ (b₁, b₂) ∈ filter (fun x => x.snd * p ≤ x.fst * q) (Ico 1 (succ (p / 2)) ×ˢ Ico 1 (succ (q / 2))) →
∃ ha, (fun a x => (a.fst, a.snd)) { fst := b₁, snd := b₂ } ha = (b₁, b₂)
[PROOFSTEP]
simp (config := { contextual := true }) only [mem_filter, eq_self_iff_true, exists_prop_of_true, mem_sigma,
and_self_iff, forall_true_iff, mem_product]
[GOAL]
p q : ℕ
hp : Fact (Nat.Prime p)
hq0 : ↑q ≠ 0
⊢ ∑ a in Ico 1 (succ (p / 2)), a * q / p + ∑ a in Ico 1 (succ (q / 2)), a * p / q = p / 2 * (q / 2)
[PROOFSTEP]
have hswap :
((Ico 1 (q / 2).succ ×ˢ Ico 1 (p / 2).succ).filter fun x : ℕ × ℕ => x.2 * q ≤ x.1 * p).card =
((Ico 1 (p / 2).succ ×ˢ Ico 1 (q / 2).succ).filter fun x : ℕ × ℕ => x.1 * q ≤ x.2 * p).card :=
card_congr (fun x _ => Prod.swap x)
(fun ⟨_, _⟩ => by
simp (config := { contextual := true }) only [mem_filter, and_self_iff, Prod.swap_prod_mk, forall_true_iff,
mem_product])
(fun ⟨_, _⟩ ⟨_, _⟩ => by
simp (config := { contextual := true }) only [Prod.mk.inj_iff, eq_self_iff_true, and_self_iff, Prod.swap_prod_mk,
forall_true_iff])
fun ⟨x₁, x₂⟩ h =>
⟨⟨x₂, x₁⟩, by
revert h
simp (config := { contextual := true }) only [mem_filter, eq_self_iff_true, and_self_iff, exists_prop_of_true,
Prod.swap_prod_mk, forall_true_iff, mem_product]⟩
[GOAL]
p q : ℕ
hp : Fact (Nat.Prime p)
hq0 : ↑q ≠ 0
x✝ : ℕ × ℕ
fst✝ snd✝ : ℕ
⊢ ∀ (ha : (fst✝, snd✝) ∈ filter (fun x => x.snd * q ≤ x.fst * p) (Ico 1 (succ (q / 2)) ×ˢ Ico 1 (succ (p / 2)))),
(fun x x_1 => Prod.swap x) (fst✝, snd✝) ha ∈
filter (fun x => x.fst * q ≤ x.snd * p) (Ico 1 (succ (p / 2)) ×ˢ Ico 1 (succ (q / 2)))
[PROOFSTEP]
simp (config := { contextual := true }) only [mem_filter, and_self_iff, Prod.swap_prod_mk, forall_true_iff, mem_product]
[GOAL]
p q : ℕ
hp : Fact (Nat.Prime p)
hq0 : ↑q ≠ 0
x✝¹ x✝ : ℕ × ℕ
fst✝¹ snd✝¹ fst✝ snd✝ : ℕ
⊢ ∀ (ha : (fst✝¹, snd✝¹) ∈ filter (fun x => x.snd * q ≤ x.fst * p) (Ico 1 (succ (q / 2)) ×ˢ Ico 1 (succ (p / 2))))
(hb : (fst✝, snd✝) ∈ filter (fun x => x.snd * q ≤ x.fst * p) (Ico 1 (succ (q / 2)) ×ˢ Ico 1 (succ (p / 2)))),
(fun x x_1 => Prod.swap x) (fst✝¹, snd✝¹) ha = (fun x x_1 => Prod.swap x) (fst✝, snd✝) hb →
(fst✝¹, snd✝¹) = (fst✝, snd✝)
[PROOFSTEP]
simp (config := { contextual := true }) only [Prod.mk.inj_iff, eq_self_iff_true, and_self_iff, Prod.swap_prod_mk,
forall_true_iff]
[GOAL]
p q : ℕ
hp : Fact (Nat.Prime p)
hq0 : ↑q ≠ 0
x✝ : ℕ × ℕ
x₁ x₂ : ℕ
h : (x₁, x₂) ∈ filter (fun x => x.fst * q ≤ x.snd * p) (Ico 1 (succ (p / 2)) ×ˢ Ico 1 (succ (q / 2)))
⊢ ∃ ha, (fun x x_1 => Prod.swap x) (x₂, x₁) ha = (x₁, x₂)
[PROOFSTEP]
revert h
[GOAL]
p q : ℕ
hp : Fact (Nat.Prime p)
hq0 : ↑q ≠ 0
x✝ : ℕ × ℕ
x₁ x₂ : ℕ
⊢ (x₁, x₂) ∈ filter (fun x => x.fst * q ≤ x.snd * p) (Ico 1 (succ (p / 2)) ×ˢ Ico 1 (succ (q / 2))) →
∃ ha, (fun x x_1 => Prod.swap x) (x₂, x₁) ha = (x₁, x₂)
[PROOFSTEP]
simp (config := { contextual := true }) only [mem_filter, eq_self_iff_true, and_self_iff, exists_prop_of_true,
Prod.swap_prod_mk, forall_true_iff, mem_product]
[GOAL]
p q : ℕ
hp : Fact (Nat.Prime p)
hq0 : ↑q ≠ 0
hswap :
Finset.card (filter (fun x => x.snd * q ≤ x.fst * p) (Ico 1 (succ (q / 2)) ×ˢ Ico 1 (succ (p / 2)))) =
Finset.card (filter (fun x => x.fst * q ≤ x.snd * p) (Ico 1 (succ (p / 2)) ×ˢ Ico 1 (succ (q / 2))))
⊢ ∑ a in Ico 1 (succ (p / 2)), a * q / p + ∑ a in Ico 1 (succ (q / 2)), a * p / q = p / 2 * (q / 2)
[PROOFSTEP]
have hdisj :
Disjoint ((Ico 1 (p / 2).succ ×ˢ Ico 1 (q / 2).succ).filter fun x : ℕ × ℕ => x.2 * p ≤ x.1 * q)
((Ico 1 (p / 2).succ ×ˢ Ico 1 (q / 2).succ).filter fun x : ℕ × ℕ => x.1 * q ≤ x.2 * p) :=
by
apply disjoint_filter.2 fun x hx hpq hqp => ?_
have hxp : x.1 < p :=
lt_of_le_of_lt (show x.1 ≤ p / 2 by simp_all only [lt_succ_iff, mem_Ico, mem_product])
(Nat.div_lt_self hp.1.pos (by decide))
have : (x.1 : ZMod p) = 0 := by simpa [hq0] using congr_arg ((↑) : ℕ → ZMod p) (le_antisymm hpq hqp)
apply_fun ZMod.val at this
rw [val_cast_of_lt hxp, val_zero] at this
simp only [this, nonpos_iff_eq_zero, mem_Ico, one_ne_zero, false_and_iff, mem_product] at hx
[GOAL]
p q : ℕ
hp : Fact (Nat.Prime p)
hq0 : ↑q ≠ 0
hswap :
Finset.card (filter (fun x => x.snd * q ≤ x.fst * p) (Ico 1 (succ (q / 2)) ×ˢ Ico 1 (succ (p / 2)))) =
Finset.card (filter (fun x => x.fst * q ≤ x.snd * p) (Ico 1 (succ (p / 2)) ×ˢ Ico 1 (succ (q / 2))))
⊢ Disjoint (filter (fun x => x.snd * p ≤ x.fst * q) (Ico 1 (succ (p / 2)) ×ˢ Ico 1 (succ (q / 2))))
(filter (fun x => x.fst * q ≤ x.snd * p) (Ico 1 (succ (p / 2)) ×ˢ Ico 1 (succ (q / 2))))
[PROOFSTEP]
apply disjoint_filter.2 fun x hx hpq hqp => ?_
[GOAL]
p q : ℕ
hp : Fact (Nat.Prime p)
hq0 : ↑q ≠ 0
hswap :
Finset.card (filter (fun x => x.snd * q ≤ x.fst * p) (Ico 1 (succ (q / 2)) ×ˢ Ico 1 (succ (p / 2)))) =
Finset.card (filter (fun x => x.fst * q ≤ x.snd * p) (Ico 1 (succ (p / 2)) ×ˢ Ico 1 (succ (q / 2))))
x : ℕ × ℕ
hx : x ∈ Ico 1 (succ (p / 2)) ×ˢ Ico 1 (succ (q / 2))
hpq : x.snd * p ≤ x.fst * q
hqp : x.fst * q ≤ x.snd * p
⊢ False
[PROOFSTEP]
have hxp : x.1 < p :=
lt_of_le_of_lt (show x.1 ≤ p / 2 by simp_all only [lt_succ_iff, mem_Ico, mem_product])
(Nat.div_lt_self hp.1.pos (by decide))
[GOAL]
p q : ℕ
hp : Fact (Nat.Prime p)
hq0 : ↑q ≠ 0
hswap :
Finset.card (filter (fun x => x.snd * q ≤ x.fst * p) (Ico 1 (succ (q / 2)) ×ˢ Ico 1 (succ (p / 2)))) =
Finset.card (filter (fun x => x.fst * q ≤ x.snd * p) (Ico 1 (succ (p / 2)) ×ˢ Ico 1 (succ (q / 2))))
x : ℕ × ℕ
hx : x ∈ Ico 1 (succ (p / 2)) ×ˢ Ico 1 (succ (q / 2))
hpq : x.snd * p ≤ x.fst * q
hqp : x.fst * q ≤ x.snd * p
⊢ x.fst ≤ p / 2
[PROOFSTEP]
simp_all only [lt_succ_iff, mem_Ico, mem_product]
[GOAL]
p q : ℕ
hp : Fact (Nat.Prime p)
hq0 : ↑q ≠ 0
hswap :
Finset.card (filter (fun x => x.snd * q ≤ x.fst * p) (Ico 1 (succ (q / 2)) ×ˢ Ico 1 (succ (p / 2)))) =
Finset.card (filter (fun x => x.fst * q ≤ x.snd * p) (Ico 1 (succ (p / 2)) ×ˢ Ico 1 (succ (q / 2))))
x : ℕ × ℕ
hx : x ∈ Ico 1 (succ (p / 2)) ×ˢ Ico 1 (succ (q / 2))
hpq : x.snd * p ≤ x.fst * q
hqp : x.fst * q ≤ x.snd * p
⊢ 1 < 2
[PROOFSTEP]
decide
[GOAL]
p q : ℕ
hp : Fact (Nat.Prime p)
hq0 : ↑q ≠ 0
hswap :
Finset.card (filter (fun x => x.snd * q ≤ x.fst * p) (Ico 1 (succ (q / 2)) ×ˢ Ico 1 (succ (p / 2)))) =
Finset.card (filter (fun x => x.fst * q ≤ x.snd * p) (Ico 1 (succ (p / 2)) ×ˢ Ico 1 (succ (q / 2))))
x : ℕ × ℕ
hx : x ∈ Ico 1 (succ (p / 2)) ×ˢ Ico 1 (succ (q / 2))
hpq : x.snd * p ≤ x.fst * q
hqp : x.fst * q ≤ x.snd * p
hxp : x.fst < p
⊢ False
[PROOFSTEP]
have : (x.1 : ZMod p) = 0 := by simpa [hq0] using congr_arg ((↑) : ℕ → ZMod p) (le_antisymm hpq hqp)
[GOAL]
p q : ℕ
hp : Fact (Nat.Prime p)
hq0 : ↑q ≠ 0
hswap :
Finset.card (filter (fun x => x.snd * q ≤ x.fst * p) (Ico 1 (succ (q / 2)) ×ˢ Ico 1 (succ (p / 2)))) =
Finset.card (filter (fun x => x.fst * q ≤ x.snd * p) (Ico 1 (succ (p / 2)) ×ˢ Ico 1 (succ (q / 2))))
x : ℕ × ℕ
hx : x ∈ Ico 1 (succ (p / 2)) ×ˢ Ico 1 (succ (q / 2))
hpq : x.snd * p ≤ x.fst * q
hqp : x.fst * q ≤ x.snd * p
hxp : x.fst < p
⊢ ↑x.fst = 0
[PROOFSTEP]
simpa [hq0] using congr_arg ((↑) : ℕ → ZMod p) (le_antisymm hpq hqp)
[GOAL]
p q : ℕ
hp : Fact (Nat.Prime p)
hq0 : ↑q ≠ 0
hswap :
Finset.card (filter (fun x => x.snd * q ≤ x.fst * p) (Ico 1 (succ (q / 2)) ×ˢ Ico 1 (succ (p / 2)))) =
Finset.card (filter (fun x => x.fst * q ≤ x.snd * p) (Ico 1 (succ (p / 2)) ×ˢ Ico 1 (succ (q / 2))))
x : ℕ × ℕ
hx : x ∈ Ico 1 (succ (p / 2)) ×ˢ Ico 1 (succ (q / 2))
hpq : x.snd * p ≤ x.fst * q
hqp : x.fst * q ≤ x.snd * p
hxp : x.fst < p
this : ↑x.fst = 0
⊢ False
[PROOFSTEP]
apply_fun ZMod.val at this
[GOAL]
p q : ℕ
hp : Fact (Nat.Prime p)
hq0 : ↑q ≠ 0
hswap :
Finset.card (filter (fun x => x.snd * q ≤ x.fst * p) (Ico 1 (succ (q / 2)) ×ˢ Ico 1 (succ (p / 2)))) =
Finset.card (filter (fun x => x.fst * q ≤ x.snd * p) (Ico 1 (succ (p / 2)) ×ˢ Ico 1 (succ (q / 2))))
x : ℕ × ℕ
hx : x ∈ Ico 1 (succ (p / 2)) ×ˢ Ico 1 (succ (q / 2))
hpq : x.snd * p ≤ x.fst * q
hqp : x.fst * q ≤ x.snd * p
hxp : x.fst < p
this : val ↑x.fst = val 0
⊢ False
[PROOFSTEP]
rw [val_cast_of_lt hxp, val_zero] at this
[GOAL]
p q : ℕ
hp : Fact (Nat.Prime p)
hq0 : ↑q ≠ 0
hswap :
Finset.card (filter (fun x => x.snd * q ≤ x.fst * p) (Ico 1 (succ (q / 2)) ×ˢ Ico 1 (succ (p / 2)))) =
Finset.card (filter (fun x => x.fst * q ≤ x.snd * p) (Ico 1 (succ (p / 2)) ×ˢ Ico 1 (succ (q / 2))))
x : ℕ × ℕ
hx : x ∈ Ico 1 (succ (p / 2)) ×ˢ Ico 1 (succ (q / 2))
hpq : x.snd * p ≤ x.fst * q
hqp : x.fst * q ≤ x.snd * p
hxp : x.fst < p
this : x.fst = 0
⊢ False
[PROOFSTEP]
simp only [this, nonpos_iff_eq_zero, mem_Ico, one_ne_zero, false_and_iff, mem_product] at hx
[GOAL]
p q : ℕ
hp : Fact (Nat.Prime p)
hq0 : ↑q ≠ 0
hswap :
Finset.card (filter (fun x => x.snd * q ≤ x.fst * p) (Ico 1 (succ (q / 2)) ×ˢ Ico 1 (succ (p / 2)))) =
Finset.card (filter (fun x => x.fst * q ≤ x.snd * p) (Ico 1 (succ (p / 2)) ×ˢ Ico 1 (succ (q / 2))))
hdisj :
Disjoint (filter (fun x => x.snd * p ≤ x.fst * q) (Ico 1 (succ (p / 2)) ×ˢ Ico 1 (succ (q / 2))))
(filter (fun x => x.fst * q ≤ x.snd * p) (Ico 1 (succ (p / 2)) ×ˢ Ico 1 (succ (q / 2))))
⊢ ∑ a in Ico 1 (succ (p / 2)), a * q / p + ∑ a in Ico 1 (succ (q / 2)), a * p / q = p / 2 * (q / 2)
[PROOFSTEP]
have hunion :
(((Ico 1 (p / 2).succ ×ˢ Ico 1 (q / 2).succ).filter fun x : ℕ × ℕ => x.2 * p ≤ x.1 * q) ∪
(Ico 1 (p / 2).succ ×ˢ Ico 1 (q / 2).succ).filter fun x : ℕ × ℕ => x.1 * q ≤ x.2 * p) =
Ico 1 (p / 2).succ ×ˢ Ico 1 (q / 2).succ :=
Finset.ext fun x => by
have := le_total (x.2 * p) (x.1 * q)
simp only [mem_union, mem_filter, mem_Ico, mem_product]
tauto
[GOAL]
p q : ℕ
hp : Fact (Nat.Prime p)
hq0 : ↑q ≠ 0
hswap :
Finset.card (filter (fun x => x.snd * q ≤ x.fst * p) (Ico 1 (succ (q / 2)) ×ˢ Ico 1 (succ (p / 2)))) =
Finset.card (filter (fun x => x.fst * q ≤ x.snd * p) (Ico 1 (succ (p / 2)) ×ˢ Ico 1 (succ (q / 2))))
hdisj :
Disjoint (filter (fun x => x.snd * p ≤ x.fst * q) (Ico 1 (succ (p / 2)) ×ˢ Ico 1 (succ (q / 2))))
(filter (fun x => x.fst * q ≤ x.snd * p) (Ico 1 (succ (p / 2)) ×ˢ Ico 1 (succ (q / 2))))
x : ℕ × ℕ
⊢ x ∈
filter (fun x => x.snd * p ≤ x.fst * q) (Ico 1 (succ (p / 2)) ×ˢ Ico 1 (succ (q / 2))) ∪
filter (fun x => x.fst * q ≤ x.snd * p) (Ico 1 (succ (p / 2)) ×ˢ Ico 1 (succ (q / 2))) ↔
x ∈ Ico 1 (succ (p / 2)) ×ˢ Ico 1 (succ (q / 2))
[PROOFSTEP]
have := le_total (x.2 * p) (x.1 * q)
[GOAL]
p q : ℕ
hp : Fact (Nat.Prime p)
hq0 : ↑q ≠ 0
hswap :
Finset.card (filter (fun x => x.snd * q ≤ x.fst * p) (Ico 1 (succ (q / 2)) ×ˢ Ico 1 (succ (p / 2)))) =
Finset.card (filter (fun x => x.fst * q ≤ x.snd * p) (Ico 1 (succ (p / 2)) ×ˢ Ico 1 (succ (q / 2))))
hdisj :
Disjoint (filter (fun x => x.snd * p ≤ x.fst * q) (Ico 1 (succ (p / 2)) ×ˢ Ico 1 (succ (q / 2))))
(filter (fun x => x.fst * q ≤ x.snd * p) (Ico 1 (succ (p / 2)) ×ˢ Ico 1 (succ (q / 2))))
x : ℕ × ℕ
this : x.snd * p ≤ x.fst * q ∨ x.fst * q ≤ x.snd * p
⊢ x ∈
filter (fun x => x.snd * p ≤ x.fst * q) (Ico 1 (succ (p / 2)) ×ˢ Ico 1 (succ (q / 2))) ∪
filter (fun x => x.fst * q ≤ x.snd * p) (Ico 1 (succ (p / 2)) ×ˢ Ico 1 (succ (q / 2))) ↔
x ∈ Ico 1 (succ (p / 2)) ×ˢ Ico 1 (succ (q / 2))
[PROOFSTEP]
simp only [mem_union, mem_filter, mem_Ico, mem_product]
[GOAL]
p q : ℕ
hp : Fact (Nat.Prime p)
hq0 : ↑q ≠ 0
hswap :
Finset.card (filter (fun x => x.snd * q ≤ x.fst * p) (Ico 1 (succ (q / 2)) ×ˢ Ico 1 (succ (p / 2)))) =
Finset.card (filter (fun x => x.fst * q ≤ x.snd * p) (Ico 1 (succ (p / 2)) ×ˢ Ico 1 (succ (q / 2))))
hdisj :
Disjoint (filter (fun x => x.snd * p ≤ x.fst * q) (Ico 1 (succ (p / 2)) ×ˢ Ico 1 (succ (q / 2))))
(filter (fun x => x.fst * q ≤ x.snd * p) (Ico 1 (succ (p / 2)) ×ˢ Ico 1 (succ (q / 2))))
x : ℕ × ℕ
this : x.snd * p ≤ x.fst * q ∨ x.fst * q ≤ x.snd * p
⊢ ((1 ≤ x.fst ∧ x.fst < succ (p / 2)) ∧ 1 ≤ x.snd ∧ x.snd < succ (q / 2)) ∧ x.snd * p ≤ x.fst * q ∨
((1 ≤ x.fst ∧ x.fst < succ (p / 2)) ∧ 1 ≤ x.snd ∧ x.snd < succ (q / 2)) ∧ x.fst * q ≤ x.snd * p ↔
(1 ≤ x.fst ∧ x.fst < succ (p / 2)) ∧ 1 ≤ x.snd ∧ x.snd < succ (q / 2)
[PROOFSTEP]
tauto
[GOAL]
p q : ℕ
hp : Fact (Nat.Prime p)
hq0 : ↑q ≠ 0
hswap :
Finset.card (filter (fun x => x.snd * q ≤ x.fst * p) (Ico 1 (succ (q / 2)) ×ˢ Ico 1 (succ (p / 2)))) =
Finset.card (filter (fun x => x.fst * q ≤ x.snd * p) (Ico 1 (succ (p / 2)) ×ˢ Ico 1 (succ (q / 2))))
hdisj :
Disjoint (filter (fun x => x.snd * p ≤ x.fst * q) (Ico 1 (succ (p / 2)) ×ˢ Ico 1 (succ (q / 2))))
(filter (fun x => x.fst * q ≤ x.snd * p) (Ico 1 (succ (p / 2)) ×ˢ Ico 1 (succ (q / 2))))
hunion :
filter (fun x => x.snd * p ≤ x.fst * q) (Ico 1 (succ (p / 2)) ×ˢ Ico 1 (succ (q / 2))) ∪
filter (fun x => x.fst * q ≤ x.snd * p) (Ico 1 (succ (p / 2)) ×ˢ Ico 1 (succ (q / 2))) =
Ico 1 (succ (p / 2)) ×ˢ Ico 1 (succ (q / 2))
⊢ ∑ a in Ico 1 (succ (p / 2)), a * q / p + ∑ a in Ico 1 (succ (q / 2)), a * p / q = p / 2 * (q / 2)
[PROOFSTEP]
rw [sum_Ico_eq_card_lt, sum_Ico_eq_card_lt, hswap, ← card_disjoint_union hdisj, hunion, card_product]
[GOAL]
p q : ℕ
hp : Fact (Nat.Prime p)
hq0 : ↑q ≠ 0
hswap :
Finset.card (filter (fun x => x.snd * q ≤ x.fst * p) (Ico 1 (succ (q / 2)) ×ˢ Ico 1 (succ (p / 2)))) =
Finset.card (filter (fun x => x.fst * q ≤ x.snd * p) (Ico 1 (succ (p / 2)) ×ˢ Ico 1 (succ (q / 2))))
hdisj :
Disjoint (filter (fun x => x.snd * p ≤ x.fst * q) (Ico 1 (succ (p / 2)) ×ˢ Ico 1 (succ (q / 2))))
(filter (fun x => x.fst * q ≤ x.snd * p) (Ico 1 (succ (p / 2)) ×ˢ Ico 1 (succ (q / 2))))
hunion :
filter (fun x => x.snd * p ≤ x.fst * q) (Ico 1 (succ (p / 2)) ×ˢ Ico 1 (succ (q / 2))) ∪
filter (fun x => x.fst * q ≤ x.snd * p) (Ico 1 (succ (p / 2)) ×ˢ Ico 1 (succ (q / 2))) =
Ico 1 (succ (p / 2)) ×ˢ Ico 1 (succ (q / 2))
⊢ Finset.card (Ico 1 (succ (p / 2))) * Finset.card (Ico 1 (succ (q / 2))) = p / 2 * (q / 2)
[PROOFSTEP]
simp only [card_Ico, tsub_zero, succ_sub_succ_eq_sub]
[GOAL]
p : ℕ
inst✝ : Fact (Nat.Prime p)
hp : p ≠ 2
a : ℕ
ha1 : a % 2 = 1
ha0 : ↑a ≠ 0
⊢ legendreSym p ↑a = (-1) ^ ∑ x in Ico 1 (succ (p / 2)), x * a / p
[PROOFSTEP]
haveI hp' : Fact (p % 2 = 1) := ⟨Nat.Prime.mod_two_eq_one_iff_ne_two.mpr hp⟩
[GOAL]
p : ℕ
inst✝ : Fact (Nat.Prime p)
hp : p ≠ 2
a : ℕ
ha1 : a % 2 = 1
ha0 : ↑a ≠ 0
hp' : Fact (p % 2 = 1)
⊢ legendreSym p ↑a = (-1) ^ ∑ x in Ico 1 (succ (p / 2)), x * a / p
[PROOFSTEP]
have ha0' : ((a : ℤ) : ZMod p) ≠ 0 := by norm_cast
[GOAL]
p : ℕ
inst✝ : Fact (Nat.Prime p)
hp : p ≠ 2
a : ℕ
ha1 : a % 2 = 1
ha0 : ↑a ≠ 0
hp' : Fact (p % 2 = 1)
⊢ ↑↑a ≠ 0
[PROOFSTEP]
norm_cast
[GOAL]
p : ℕ
inst✝ : Fact (Nat.Prime p)
hp : p ≠ 2
a : ℕ
ha1 : a % 2 = 1
ha0 : ↑a ≠ 0
hp' : Fact (p % 2 = 1)
ha0' : ↑↑a ≠ 0
⊢ legendreSym p ↑a = (-1) ^ ∑ x in Ico 1 (succ (p / 2)), x * a / p
[PROOFSTEP]
rw [neg_one_pow_eq_pow_mod_two, gauss_lemma hp ha0', neg_one_pow_eq_pow_mod_two,
(by norm_cast : ((a : ℤ) : ZMod p) = (a : ZMod p)), show _ = _ from eisenstein_lemma_aux p ha1 ha0]
[GOAL]
p : ℕ
inst✝ : Fact (Nat.Prime p)
hp : p ≠ 2
a : ℕ
ha1 : a % 2 = 1
ha0 : ↑a ≠ 0
hp' : Fact (p % 2 = 1)
ha0' : ↑↑a ≠ 0
⊢ ↑↑a = ↑a
[PROOFSTEP]
norm_cast
|
import .size .induction .single
----------------------------------------------------------------
open_locale classical
noncomputable theory
universes u u' v v' w
-- Lemmas applying for a general nonempty fintype
-- TODO - lemmas about folding, so sum etc is also subsumed
section general_fintype
variables {α : Type*}{α' : Type*}{β : Type*}{β': Type*}
[fintype α] [nonempty α] [linear_order β] [linear_order β']
--instance fin_α : fintype α := by {letI := classical.choice _inst_1, apply_instance, }
/- theorem set.finite.exists_maximal_wrt {α : Type*} {β : Type*} [partial_order β]
(f : α → β) (s : set α) (h : s.finite) :
s.nonempty → (∃ (a : α) (H : a ∈ s), ∀ (a' : α), a' ∈ s → f a ≤ f a' → f a = f a') -/
lemma exists_max (f : α → β) :
∃ x, ∀ y, f y ≤ f x :=
fintype.exists_max f
/-letI := classical.choice _inst_1,
unfreezingI {obtain ⟨a₀⟩ := _inst_2},
obtain ⟨a, -, ha⟩ :=
set.finite.exists_maximal_wrt f set.univ (set.finite.of_fintype _) ⟨a₀, set.mem_univ a₀⟩,
refine ⟨a, λ x, _⟩,
rcases le_total (f x) (f a) with (h | h), exact h,
exact le_of_eq (ha x (set.mem_univ _) h).symm, -/
--end
lemma exists_min (f : α → β) :
∃ x, ∀ y, f x ≤ f y :=
let f' : _ → (order_dual β) := f in exists_max f'
/-- maximum value of f -/
def max_val (f : α → β) : β :=
f (classical.some (exists_max f))
/-- minimum value of f -/
def min_val (f : α → β) : β :=
f (classical.some (exists_min f))
lemma max_spec (f : α → β) :
∃ x, f x = max_val f ∧ ∀ y, f y ≤ f x :=
⟨classical.some (exists_max f), ⟨rfl, classical.some_spec (exists_max f) ⟩⟩
lemma min_spec (f : α → β) :
∃ x, f x = min_val f ∧ ∀ y, f x ≤ f y :=
⟨classical.some (exists_min f), ⟨rfl, classical.some_spec (exists_min f) ⟩⟩
lemma max_is_ub (f : α → β) (x : α) :
f x ≤ max_val f :=
by {cases max_spec f with y hy, rw ←hy.1, apply hy.2}
lemma min_is_lb (f : α → β) (x : α) :
min_val f ≤ f x :=
by {cases min_spec f with y hy, rw ←hy.1, apply hy.2}
lemma max_of_le_is_le (f f' : α → β) (hff' : ∀ a, f a ≤ f' a) :
max_val f ≤ max_val f' :=
begin
cases max_spec f with a ha,
cases max_spec f' with a' ha',
rw [←ha.1, ←ha'.1],
from le_trans (hff' _) (ha'.2 _),
end
lemma min_of_le_is_le (f f' : α → β) (hff' : ∀ a, f a ≤ f' a) :
min_val f ≤ min_val f' :=
begin
cases min_spec f with a ha,
cases min_spec f' with a' ha',
rw [←ha.1, ←ha'.1],
from le_trans (ha.2 _) (hff' a'),
end
/-- taking a max over one type is equivalent to taking one over another,
given a bijection between them -/
lemma max_reindex (φ : α → α') (hφ : function.surjective φ) (f : α' → β) :
max_val (f ∘ φ) = @max_val _ _ (fintype.of_surjective φ hφ) (nonempty.map φ _inst_2) _ f :=
begin
rcases @max_spec _ _ (fintype.of_surjective φ hφ ) (nonempty.map φ _inst_2) _ f
with ⟨x', ⟨hx'1, hx'2⟩⟩,
rcases max_spec (f ∘ φ) with ⟨x, ⟨hx1, hx2⟩ ⟩,
rw [←hx1, ←hx'1],
apply le_antisymm (hx'2 _),
cases hφ x' with z hz,
rw ←hz, apply hx2,
end
/-- taking a min over one type is equivalent to taking one over another,
given a bijection between them -/
lemma min_reindex (φ : α → α') (hφ : function.surjective φ) (f : α' → β) :
min_val (f ∘ φ) = @min_val _ _ (fintype.of_surjective φ hφ ) (nonempty.map φ _inst_2) _ f :=
begin
rcases @min_spec _ _ (fintype.of_surjective φ hφ ) (nonempty.map φ _inst_2) _ f
with ⟨x', ⟨hx'1, hx'2⟩⟩,
rcases min_spec (f ∘ φ) with ⟨x, ⟨hx1, hx2⟩ ⟩,
rw [←hx1, ←hx'1],
refine le_antisymm _ (hx'2 _),
cases hφ x' with z hz,
rw ←hz, apply hx2,
end
/-- max commutes with composing by a monotone function -/
lemma max_compose_mono (f : α → β) (g : β → β') (hg : monotone g) :
g (max_val f) = max_val (g ∘ f) :=
begin
rcases max_spec f with ⟨X, hX₁, hX₂⟩,
rcases max_spec (g ∘ f) with ⟨X',hX'₁, hX'₂ ⟩,
erw [←hX'₁ , ←hX₁],
from le_antisymm (hX'₂ _) (hg (hX₂ _)),
end
/-- min commutes with composing by a monotone function-/
lemma min_compose_mono (f : α → β) (g : β → β') (hg : monotone g) :
g (min_val f) = min_val (g ∘ f) :=
begin
rcases min_spec f with ⟨X, hX₁, hX₂⟩,
rcases min_spec (g ∘ f) with ⟨X',hX'₁, hX'₂⟩,
rw [←hX'₁, ←hX₁],
from le_antisymm (hg (hX₂ _)) (hX'₂ _),
end
/-- the max is at most a given upper bound for f -/
lemma max_le_ub {f : α → β} {b : β} (h_ub : ∀ x : α, f x ≤ b) :
max_val f ≤ b :=
by {cases max_spec f with X hX, rw [←hX.1], apply h_ub}
/-- a given lower bound for f is at most the max of f-/
lemma lb_le_max {f : α → β} (b : β) (h_lb : ∀ x : α, b ≤ f x) :
b ≤ max_val f :=
by {cases max_spec f with X hX, rw [←hX.1], apply h_lb, }
/-- the min of x is at most a given upper bound for f-/
lemma min_le_ub {f : α → β} {b : β} (h_ub : ∀ x : α, f x ≤ b) :
min_val f ≤ b :=
by {cases min_spec f with X hX, rw [←hX.1], apply h_ub}
/-- a given lower bound for x is at most the min of x-/
lemma lb_le_min {f : α → β} {b : β} (h_lb : ∀ x : α, b ≤ f x) :
b ≤ min_val f :=
by {cases min_spec f with X hX, rw [←hX.1], apply h_lb}
/-- an upper bound that is attained by f must be the max -/
lemma attained_ub_is_max (f : α → β) (a : α) :
(∀ x, f x ≤ f a) → max_val f = f a :=
begin
intros h_ub,
rcases max_spec f with ⟨X, hX⟩,
apply le_antisymm (max_le_ub h_ub),
rw [←hX.1], apply hX.2,
end
lemma attained_ub_is_max' (f : α → β) (a : α) (b : β) (hba : b ≤ f a) (hub : ∀ x, f x ≤ b):
max_val f = b :=
by {rw ← le_antisymm (hub a) hba, exact attained_ub_is_max _ _ (λ x, le_trans (hub x) hba)}
/-- a lower bound attained by f must be the min -/
lemma attained_lb_is_min (f : α → β) (a : α) :
(∀ x, f a ≤ f x) → min_val f = f a :=
begin
intros h_lb,
rcases min_spec f with ⟨X, hX⟩,
refine le_antisymm _ (lb_le_min h_lb),
rw [←hX.1], apply hX.2,
end
lemma attained_lb_is_min' (f : α → β) (a : α) (b : β) (hab : f a ≤ b) (hlb : ∀ x, b ≤ f x) :
min_val f = b :=
by {rw ← le_antisymm hab (hlb a), exact attained_lb_is_min _ _ (λ x, le_trans hab (hlb x))}
/-- the max of a constant function -/
lemma max_const (b : β) :
max_val (λ (x : α), b) = b :=
by {rcases max_spec (λ (x : α), b) with ⟨x, hx⟩, rw ←hx.1 }
/-- the min of a constant function -/
lemma min_const (b : β) :
min_val (λ (x : α), b) = b :=
by {rcases min_spec (λ (x : α), b) with ⟨x, hx⟩, rw ←hx.1 }
/-- given a bound f(x) ≤ f(x') for all x,x', a pair a,a' for which f(a) = f(a') determines
the max and min of f,f' respectively -/
lemma minmax_eq_cert [nonempty α'] [fintype α'] (f : α → β) (f' : α' → β) :
(∃ a a', f a = f' a') → (∀ x x', f x ≤ f' x') → max_val f = min_val f' :=
begin
rintros ⟨a, a', heq⟩ hbound,
rcases max_spec f with ⟨x,hx⟩,
rcases min_spec f' with ⟨y,hy'⟩,
have hub := attained_ub_is_max' f a (f' a') heq.symm.le (λ x, hbound x a'),
have hlb := attained_lb_is_min' f' a' (f a) heq.symm.le (λ x', hbound a x'),
rw [hub, hlb, heq],
end
lemma max_compose_le_max [non_empt : nonempty α] [fintype α'] (φ : α → α') (f : α' → β) :
max_val (f ∘ φ) ≤ @max_val _ _ _ (nonempty.map φ non_empt) _ f :=
begin
rcases max_spec (f ∘ φ) with ⟨a, ⟨ha₁, ha₂⟩⟩,
rcases @max_spec _ _ _ (nonempty.map φ non_empt) _ f with ⟨a', ⟨ha'₁, ha'₂⟩⟩,
rw [←ha₁, ←ha'₁],
apply ha'₂,
end
lemma min_le_min_compose [non_empt : nonempty α] [fintype α'] (φ : α → α') (f : α' → β) :
@min_val _ _ _ (nonempty.map φ non_empt) _ f ≤ min_val (f ∘ φ) :=
begin
rcases min_spec (f ∘ φ) with ⟨a, ⟨ha₁, ha₂⟩⟩,
rcases @min_spec _ _ _ (nonempty.map φ non_empt) _ f with ⟨a', ⟨ha'₁, ha'₂⟩⟩,
rw [←ha₁, ←ha'₁],
apply ha'₂,
end
--instance prod_fin [fintype α']: nonempty (fintype (α × α')) :=
--by { apply_instance, }
/-- a bimonotone function of two maxima is a maximum over a product type -/
lemma max_zip [nonempty α'] [fintype α'] (f : α → β) (f' : α' → β) (g : β × β → β')
(g_mono : ∀ b₁ b₂ b₁' b₂', b₁ ≤ b₁' → b₂ ≤ b₂' → g ⟨b₁,b₂⟩ ≤ g ⟨b₁',b₂'⟩) :
g ⟨max_val f, max_val f'⟩ = max_val (λ a : α × α', g ⟨f a.1,f' a.2⟩) :=
let f_prod := (λ a : α × α', g ⟨f a.1,f' a.2⟩) in
begin
rcases max_spec f with ⟨a, ⟨ha₁, ha₂⟩⟩,
rcases max_spec f' with ⟨a', ⟨ha'₁, ha'₂⟩⟩,
rcases max_spec f_prod with ⟨⟨x,x'⟩,⟨hx₁, hx₂⟩⟩,
rw [←ha₁,←ha'₁,←hx₁],
apply le_antisymm,
from hx₂ ⟨a,a'⟩, apply g_mono, apply ha₂, apply ha'₂,
end
/-- a bimonotone function of two minima is a minimum over a product type -/
lemma min_zip [nonempty α'] [fintype α'] (f : α → β) (f' : α' → β) (g : β × β → β')
(g_mono : ∀ b₁ b₂ b₁' b₂', b₁ ≤ b₁' → b₂ ≤ b₂' → g ⟨b₁,b₂⟩ ≤ g ⟨b₁',b₂'⟩) :
g ⟨min_val f, min_val f'⟩ = min_val (λ a : α × α', g ⟨f a.1,f' a.2⟩) :=
let f_prod := (λ a : α × α', g ⟨f a.1,f' a.2⟩) in
begin
obtain ⟨a, ⟨ha₁, ha₂⟩⟩ := min_spec f,
rcases min_spec f' with ⟨a', ⟨ha'₁, ha'₂⟩⟩,
rcases min_spec f_prod with ⟨⟨x,x'⟩,⟨hx₁, hx₂⟩⟩,
rw [←ha₁,←ha'₁,←hx₁],
apply le_antisymm,
apply g_mono, apply ha₂, apply ha'₂, from hx₂ ⟨a,a'⟩,
end
end general_fintype
section adding -- lemmas with a little more structure (eg addition) on β
variables {α α' β : Type*} [nonempty α] [fintype α] [nonempty α'] [fintype α'] [linear_ordered_semiring β]
lemma max_add_commute (f : α → β) (s : β) :
(max_val f) + s = max_val (λ x, f x + s) :=
begin
set g : β → β := λ x, x + s with hg,
have hg_mono : monotone g :=
λ x y hxy, by {rw hg, dsimp only, apply add_le_add_right hxy},
have := max_compose_mono f g hg_mono,
congr',
end
lemma min_add_commute (f : α → β) (s : β) :
(min_val f) + s = min_val (λ x, f x + s) :=
begin
set g : β → β := λ x, x + s with hg,
have hg_mono : monotone g :=
λ x y hxy, by {rw hg, dsimp only, apply add_le_add_right hxy},
have := min_compose_mono f g hg_mono,
congr',
end
lemma sum_of_max (f : α → β) (f' : α' → β) :
max_val f + max_val f' = max_val (λ a : α × α', f a.1 + f' a.2) :=
max_zip f f' (λ (b : β × β), b.1+b.2) (λ _ _ _ _ h₁ h₂, add_le_add h₁ h₂)
lemma sum_of_min (f : α → β) (f' : α' → β) :
min_val f + min_val f' = min_val (λ a : α × α', f a.1 + f' a.2) :=
min_zip f f' (λ (b : β × β), b.1+b.2) (λ _ _ _ _ h₁ h₂, add_le_add h₁ h₂)
end adding
/- sums, intersections, unions etc over finite types, as opposed to sets/finsets.
emphasis is on being able to reindex -/
/-
variables {α β γ: Type*} [fintype α] (op: β → β → β)[is_commutative β op] [is_associative β op]
/-- returns a list of elements of pairs (a,f(a)) for a in α -/
def as_list (f : α → β) : list (α × β) :=
let s := (infer_instance : fintype α).elems.val in (s.map (λ a, (⟨a,f a⟩ : α × β))).to_list
def val_list (f : α → β) : list β :=
(as_list f).unzip.2
/-- folds the commutative, associative operation op over the elements of β indexed by α
starting at b -/
def fold (b : β) (f : α → β) : β :=
let fs := (infer_instance : fintype α).elems in fs.fold op b f
/-- fintype folding can be written as a list folding (for induction)-/
lemma fold_eq_fold_list (b : β) (f : α → β) :
fold op b f = (as_list f).foldr (λ p q, op p.2 q) b :=
begin
simp only [fold, as_list, finset.fold],
set ms := (multiset.map (λ (a : α), (a, f a)) (fintype.elems α).val),
have hl := ms.coe_to_list, rw ←hl,
have := multiset.coe_fold_r op b (val_list f), --b ms.to_list,
sorry,
end
def Sum [add_comm_monoid β] (f : α → β) : β :=
fold (+) (0 : β) f
def Prod [comm_monoid β] (f : α → β) : β :=
fold (*) (1 : β) f
def Union (f : α → set γ) : set γ :=
fold (∪) ∅ f
def Inter (f : α → set γ) : set γ :=
fold (∩) univ f
lemma Inter_eq_setInter (f : α → set γ) :
Inter f = set.Inter f :=
begin
rw [Inter, fold_eq_fold_list],
--induction as_list f,
dsimp, sorry,
end
-/
/-
def fin.induction {n : ℕ} {C : fin (n + 1) → Sort u_1} (h0 : C 0)
(hs : Π (i : fin n), C (⇑fin.cast_succ i) → C i.succ) (i : fin (n + 1)) :
C i
Define C i by induction on i : fin (n + 1) via induction on the underlying nat value. This function has two arguments: h0 handles the base case on C 0, and hs defines the inductive step using C i.cast_succ.
-/
-- def list.foldr {α : Type*} {β : Type*} (f : α → β → β) (b : β) :
--list α → β
--def union_over {n : ℕ} (Xs : fin n → set α) : set α :=
-- finset.fold (λ a b, a ∪ b) (∅ : set α) Xs (fin n)
|
module Depling.Replace
import Depling
import Depling.Incr
import Utils
import Data.Fin
import Data.Vect
mutual
total
dReplace' : DAST n -> DAST n -> DAST n -> DAST n
dReplace' rp ra (ʌ v) = ʌ v
dReplace' rp ra (λ at b) = λ (dReplace rp ra at) (dReplace (incr FZ rp) (incr FZ ra) b)
dReplace' rp ra (λT at rt) = λT (dReplace rp ra at) (dReplace (incr FZ rp) (incr FZ ra) rt)
dReplace' rp ra (f =!= a) = dReplace rp ra f =!= dReplace rp ra a
dReplace' rp ra 𝕋 = 𝕋
dReplace' rp ra (𝔽 at rt b) =
𝔽
(dReplace ( rp) ( ra) at)
(dReplace ( incr FZ $ rp) ( incr FZ $ ra) rt)
(dReplace (incr FZ $ incr FZ $ rp) (incr FZ $ incr FZ $ ra) b )
dReplace' rp ra (𝕌 n t) = 𝕌 n $ dReplace rp ra t
dReplace' rp ra (ℂ cn as) = assert_total $ ℂ cn $ map (dReplace rp ra) as
dReplace' rp ra (ℙ v cn t f) = ℙ (dReplace rp ra v) cn (dReplace (incr' rp) (incr' ra) t) (dReplace rp ra f)
dReplace' rp ra (𝔹 t) = 𝔹 $ dReplace rp ra t
total
export
dReplace : DAST n -> DAST n -> DAST n -> DAST n
dReplace rp ra a = if a == rp then ra else dReplace' rp ra a
|
lemma analytic_on_id [analytic_intros]: "id analytic_on S" |
## Author: Sergio García Prado
## Title: Exercises with Solutions 4
rm(list = ls())
Q <- matrix(c(-4/10, 4/10, 0, 0, 0,
1/2, -4/5, 3/10, 0, 0,
0, 3/4, -19/20, 2/10, 0,
0, 0, 1, -11/10, 1/10,
0 , 0, 0, 1, -1),
5, 5, byrow = TRUE)
A <- cbind(Q[, 1:(nrow(Q) - 1)], rep(1, nrow(Q)))
## Should be solved via detailed balance equations...
(stationary <- solve(A)[nrow(Q), ])
# 0.456537618699781 0.365230094959825 0.14609203798393 0.029218407596786 0.00292184075967864
(machines.mean <- sum(4:0 * stationary))
# 3.24324324324324
(all.busy.ratio <- sum(stationary[3] / 2, stationary[4:5]))
# 0.10518626734843
|
<ompts:test>
<ompts:testdescription>Test which checks the omp_get_wtime function. It compares the time with which is called a sleep function with the time it took by messuring the difference between the call of the sleep function and its end.</ompts:testdescription>
<ompts:ompversion>2.0</ompts:ompversion>
<ompts:directive>omp_get_wtime</ompts:directive>
<ompts:testcode>
INTEGER FUNCTION <ompts:testcode:functionname>omp_wtime</ompts:testcode:functionname>()
IMPLICIT NONE
<ompts:orphan:vars>
DOUBLE PRECISION start
DOUBLE PRECISION endtime
COMMON start, endtime
include "omp_lib.h"
</ompts:orphan:vars>
INTEGER wait_time
DOUBLE PRECISION measured_time
INTEGER fileunit
wait_time=1
start = 0;
endtime = 0;
<ompts:orphan>
<ompts:check>
start=omp_get_wtime()
</ompts:check>
</ompts:orphan>
CALL sleep(wait_time)
<ompts:orphan>
<ompts:check>
endtime=omp_get_wtime()
</ompts:check>
</ompts:orphan>
measured_time=endtime-start
WRITE(1,*) "work took",measured_time,"sec. time."
IF(measured_time.GT.0.99*wait_time .AND.
& measured_time .LT. 1.01*wait_time) THEN
<testfunctionname></testfunctionname>=1
ELSE
<testfunctionname></testfunctionname>=0
END IF
END
</ompts:testcode>
</ompts:test>
|
[STATEMENT]
lemma Says_certificate_valid [simp]:
"[| Says A B (sign SK \<lbrace>lid, xid, cc, cm,
cert C EK onlyEnc (priSK RCA)\<rbrace>) \<in> set evs;
evs \<in> set_pur |]
==> EK = pubEK C"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>Says A B (sign SK \<lbrace>lid, xid, cc, cm, cert C EK onlyEnc (priSK RCA)\<rbrace>) \<in> set evs; evs \<in> set_pur\<rbrakk> \<Longrightarrow> EK = pubEK C
[PROOF STEP]
by (unfold sign_def, auto) |
# Copyright (c) 2018-2021, Carnegie Mellon University
# See LICENSE for details
# ==========================================================================
# ColDirectSum
# ==========================================================================
Class(ColDirectSum, BaseOverlap, rec(
#-----------------------------------------------------------------------
abbrevs := [
function(arg)
arg:=Flat(arg);
return [arg[1], arg{[2..Length(arg)]}];
end ],
#-----------------------------------------------------------------------
new := meth(self, overlap, spls)
return self._new(0, overlap, spls);
end,
#-----------------------------------------------------------------------
dims := meth(self)
local ovdim;
ovdim := self.ovDim(self.overlap,
List(self._children, t -> t.dimensions[1]));
return [ ovdim[3] - ovdim[2] + 1, # max - min + 1
Sum(self._children, t -> t.dimensions[2]) ];
end,
#-----------------------------------------------------------------------
toAMat := meth(self)
return
AMatSPL(
Sparse(
self._coloverlap(
List(self._children, t -> t.dimensions[1]),
self.overlap
)
)
) *
DirectSumAMat(List(self._children, AMatSPL));
end,
));
ColDirectSum._transpose_class := RowDirectSum;
RowDirectSum._transpose_class := ColDirectSum;
|
theory Lmap_lappend
imports Main "$HIPSTER_HOME/IsaHipster"
"Lappend"
"Lmap"
begin
setup Tactic_Data.set_coinduct_sledgehammer
setup Misc_Data.set_noisy
(* cohipster lmap lappend *)
(* Discovers and proves the following in ca. 80 seconds *)
lemma lemma_af [thy_expl]: "ltl (lappend z (lmap y z)) = lappend (ltl z) (lmap y z)"
by (coinduction arbitrary: y z rule: Llist.Llist.coinduct_strong)
(metis Lappend.lemma_a lappend.simps(4) lmap.ctr(1))
lemma lemma_ag [thy_expl]: "ltl (lappend (lmap y z) z) = lappend (lmap y (ltl z)) z"
by (coinduction arbitrary: y z rule: Llist.Llist.coinduct_strong)
(smt Llist.sel(2) Lmap.lemma_ab lappend.disc_iff(1) lappend.simps(4) lmap.disc_iff(2) lnull_def)
lemma lemma_ah [thy_expl]: "ltl (lappend (lmap y z) (ltl z)) = lappend (lmap y (ltl z)) (ltl z)"
by (coinduction arbitrary: y z rule: Llist.Llist.coinduct_strong)
(smt Llist.sel(2) Lmap.lemma_ab lappend.ctr(1) lappend.simps(4) lmap.disc_iff(2) lnull_def)
(* lmap distributes over lappend *)
lemma lemma_ai [thy_expl]: "lappend (lmap y z) (lmap y x2) = lmap y (lappend z x2)"
by (coinduction arbitrary: x2 y z rule: Llist.Llist.coinduct_strong)
(smt Llist.case_eq_if Lmap.lemma_ab lappend.disc_iff(1) lappend.simps(3) lappend.simps(4) lmap.disc_iff(2) lmap.sel(1))
end |
(*
* Copyright 2014, NICTA
*
* This software may be distributed and modified according to the terms of
* the BSD 2-Clause license. Note that NO WARRANTY is provided.
* See "LICENSE_BSD2.txt" for details.
*
* @TAG(NICTA_BSD)
*)
theory longlong
imports "../CTranslation"
begin
install_C_file "longlong.c"
ML {* NameGeneration.return_var_name (Absyn.Signed Absyn.LongLong) *}
context longlong
begin
thm f_body_def
thm shifts1_body_def
thm shifts2_body_def
lemma "(ucast :: 16 word \<Rightarrow> 8 word) 32768 = 0"
apply simp
done
lemma "(scast :: 16 word \<Rightarrow> 8 word) 32768 = 0"
by simp
lemma "(scast :: 16 word \<Rightarrow> 8 word) 65535 = 255"
by simp
lemma "(ucast :: 16 word \<Rightarrow> 8 word) 65535 = 255"
by simp
lemma "(ucast :: 16 word \<Rightarrow> 8 word) 32767 = 255" by simp
lemma "(scast :: 16 word \<Rightarrow> 8 word) 32767 = 255" by simp
lemma "(scast :: 8 word \<Rightarrow> 16 word) 255 = 65535" by simp
lemma "(ucast :: 8 word \<Rightarrow> 16 word) 255 = 255" by simp
lemma g_result:
"\<Gamma> \<turnstile> \<lbrace> True \<rbrace> \<acute>ret__int :== CALL callg() \<lbrace> \<acute>ret__int = 0 \<rbrace>"
apply vcg
apply (simp add: max_word_def)
done
thm literals_body_def
lemma literals_result:
"\<Gamma> \<turnstile> \<lbrace> True \<rbrace> \<acute>ret__int :== CALL literals() \<lbrace> \<acute>ret__int = 31 \<rbrace>"
apply vcg
apply simp
done
end (* context *)
end (* theory *)
|
function x = circle01_to_circle ( alpha, r, c, n, x )
%*****************************************************************************80
%
%% CIRCLE01_TO_CIRCLE maps points from the unit circle to a general one.
%
% Discussion:
%
% To map data, defined in the unit circle, to a circle with radius R
% and center C, and to rotate the original data by an angle of ALPHA first,
%
% X = R * ( cos(alpha) -sin(alpha) ) * X + CX
% Y ( sin(alpha) cos(alpha) ) Y CY
%
% Licensing:
%
% This code is distributed under the GNU LGPL license.
%
% Modified:
%
% 21 May 2013
%
% Author:
%
% John Burkardt
%
% Parameters:
%
% Input, real ALPHA, the angle, in radians, by which the unit circle
% data should be rotated.
%
% Input, real R, the scale factor by which the unit circle data
% should be stretched.
%
% Input, real C(2,1), the translation to be applied.
%
% Input, integer N, the number of points to transform.
%
% Input/output, real X(2,N), the points to be transformed.
%
%
% Rotation:
%
% Don't write "cos ( alpha )" instead of "cos(alpha)" because Matlab's
% bebuggered automatic array creator thinks blanks separate matrix elements!
%
rot = [ cos(alpha), -sin(alpha); ...
sin(alpha), cos(alpha) ];
x(1:2,1:n) = c(1:2,1:2) * x(1:2,1:n);
%
% Dilation:
%
x(1:2,1:n) = r * x(1:2,1:n);
%
% Translation:
%
x(1:2,1:n) = x(1:2,1:n) + repmat ( c(1:2,1), 1, n );
return
end
|
If $S$ is a convex bounded set with a point $a$ in its relative interior, then the relative frontier of $S$ is homotopy equivalent to the affine hull of $S$ with $a$ removed. |
(* Étude de cas.
On se propose de regarder le cas de l'automate qui reconnaît les mots finissant par "aab".
On commence par définir cet automate en utilisant le TP de LIFLF.
*)
(* inclure ici le TP de LF *)
(* Automate qui reconnaît les mots qui finissent par "aab" *)
Definition gaab := [ ]. (* remplacer ici *)
Definition Aaab := false. (* remplacer ici *)
(* Écrire en commentaire la grammaire régulière produisant le langage de l'automate *)
(* Source : X1
*)
(* On peut définir le prédicat "être généré par cette grammaire" *)
(* En effet : une règle "N -> c M" signifie qu'un mot généré depuis N
peut être constitué d'un c suivi par un mot généré depuis M donc
pour tout mot w, le mot cw est généré par N si w est généré par M.
Dit autrement : "pour tout mot w, si w est généré depuis M alors cw
est généré depuis N". C'est exactement ce qu'on se propose
d'écrire. *)
(* On va donc définir un prédicat *inductif*, paramétré par un mot et
un état (vu comme un non terminal de la grammaire ), dont chaque
règle de construction caractérise chaque règle de grammaire *)
(* Définir ce prédicat inductif Paab, de type liste Alphabet -> nat -> Prop *)
Inductive Paab : list Alphabet -> nat -> Prop := False (* remplacer ici *)
.
(* Pour montrer qu'un mot est bien généré depuis un état, il suffit
d'appliquer (apply) les règles de construction jusqu'à tomber sur un
cas de base. *)
(* Montrer que le mot abaaab est bien généré depuis le non terminal 1 *)
Lemma exemple : Paab [a;b;a;a;a;b] 1.
Proof.
Admitted. (* remplacer ici *)
(* CE PRÉDICAT (DÉRIVÉ DE LA GRAMMAIRE) CARACTÉRISE-T-IL BIEN LES MOTS
RECONNUS PAR L'AUTOMATE ? *)
(* Pour le montrer on va poser un lemme intermédiaire *)
(* Ce lemme, appelons-le PmimeA, énonce que pour tout mot généré à
partir d'un état/non terminal, disons q, de la grammaire, la lecture
de ce mot depuis q dans l'automate aboutit à un état, disons e, et cet
état e est acceptant. *)
(* Définir le lemme PmimeA *)
Lemma PmimeA : False. (* remplacer ici *)
Proof.
Admitted. (* remplacer ici *)
(* Pour le montrer une nouvelle tactique va être bien utile :
inversion.
- La tactique "inversion" appliquée à un nom d'inductif énumère les
cas *possibles* de règles qui ont pu le produire.
- Les cas absurdes sont éliminés, en particulier si une hypothèse n'a
pu apparaître qu'à l'aide de cas absurdes, le but est prouvé.
- On va se servir de cette tactique pour se placer dans les différents
cas du prédicat. *)
(* Énoncer et montrer le théorème principal : tout mot généré depuis le non terminal 1 est reconnu par l'automate. *)
Theorem PA : False. (* remplacer ici *)
Proof.
Admitted. (* remplacer ici *)
(*
Bonjour madame,
J'ai eu une absence injustifiée par rapport au cours de Vendredi.
Je ne sais pas comment être présent vu que je n'ai reçu aucun mail concernant l'anglais depuis le confinement.
Pourriez-vous me l'expliquer svp ?
Merci,
Lucas.
*)
|
module Shader ( sampleBRDF, probability, reflectance
, sampleVec
, sampleCol
, sampleVal
) where
import Types
import Numeric.Vector
import Numeric.Scalar (fromScalar, scalar)
import Control.Monad.State
import VecUtil (unpack3, unpack2)
import qualified SceneRandom as R
import VecUtil (vec3Of)
-- UTIL STUFF
fmod :: (RealFrac a) => a -> a -> a
fmod n d = n - (fromIntegral $ truncate $ n / d) * d
-- END UTIL STUFF
-- BRDF STUFF
sampleBRDF :: BRDF -> State SceneContext (Maybe Ray)
sampleBRDF BRDFEmpty = return Nothing
sampleBRDF (Emission _ _) = return Nothing
sampleBRDF (Diffuse _) = do
ctx <- get
let normal = rh_getNormal $ ss_getHit ctx
let pos = (rh_getPos (ss_getHit ctx)) + (normal * (vec3Of 0.01))
dir <- R.randVecHemisphere normal
return $ Just $ Ray pos dir
probability :: BRDF -> Vec3d -> State SceneContext Double
probability BRDFEmpty _ = return 0
probability (Emission _ _) _ = return 0.15915494309
probability (Diffuse _) _ = return 0.15915494309 -- 1 / (2 * PI)
reflectance :: BRDF -> State SceneContext Vec3d
reflectance BRDFEmpty = return $ vec3 0 0 0
reflectance (Emission col str) = do
col' <- sampleCol col
str' <- sampleVal str
return $ col' * (fromScalar (scalar str'))
reflectance (Diffuse col) = do
col' <- sampleCol col
return $ col'
reflectance (Glossy col roughness) = do
col' <- sampleCol col
return col'
-- END BRDF STUFF
-- VECTOR STUFF
sampleVec :: SVector -> State SceneContext Vec3d
sampleVec UV = do
ss <- get
let (u, v) = unpack2 $ rh_getTexCoord $ ss_getHit ss
return $ vec3 u v 0
sampleVec Position = do
ss <- get
return $ rh_getPos $ ss_getHit ss
sampleVec Normal = do
ss <- get
return $ rh_getNormal $ ss_getHit ss
sampleVec (VecConst x y z) = return $ vec3 x y z
sampleVec (VecMath op l r) = do
l' <- sampleVec l
r' <- sampleVec r
case op of
VAdd -> return $ l' + r'
VSub -> return $ l' - r'
VMul -> return $ l' * r'
VDiv -> return $ l' / r'
VMod -> let (x,y,z) = unpack3 l'; (i,j,k) = unpack3 r' in return $ vec3 (fmod x i) (fmod y j) (fmod z k)
VAbs -> return $ abs l'
-- VDot -> return $ l' · r'
-- VCross -> return $ l' × r'
sampleVec (CombineXYZ x y z) = do
x' <- sampleVal x
y' <- sampleVal y
z' <- sampleVal z
return $ vec3 x' y' z'
-- END VECTOR STUFF
-- COLOR STUFF
sampleCol :: SColor -> State SceneContext Vec3d
sampleCol ColVertex = do
ss <- get
return $ rh_getColor $ ss_getHit ss
sampleCol (ColVector v) = sampleVec v
sampleCol (ColConst r g b) = return $ vec3 r g b
sampleCol (CombineRGB vr vg vb) = do
r <- sampleVal vr
g <- sampleVal vg
b <- sampleVal vb
return $ vec3 r g b
-- END COLOR STUFF
-- VALUE STUFF
sampleVal :: SValue -> State SceneContext Double
sampleVal (ValConst x) = return x
sampleVal RayLength = do
ss <- get
return $ rh_getDistance $ ss_getHit ss
sampleVal (ValMath op l r) = do
l' <- sampleVal l
r' <- sampleVal r
case op of
Add -> return $ l' + r'
Sub -> return $ l' - r'
Mul -> return $ l' * r'
Div -> return $ l' / r'
Abs -> return $ abs l'
Mod -> return $ fmod l' r'
LessThan -> return $ if l' < r' then 1 else 0
GreaterThan -> return $ if l' > r' then 1 else 0
sampleVal (SeparateX v) = sampleVec v >>= (return . unpack3) >>= \(x,_,_) -> return x
sampleVal (SeparateY v) = sampleVec v >>= (return . unpack3) >>= \(_,y,_) -> return y
sampleVal (SeparateZ v) = sampleVec v >>= (return . unpack3) >>= \(_,_,z) -> return z
sampleVal (SeparateR v) = sampleCol v >>= (return . unpack3) >>= \(r,_,_) -> return r
sampleVal (SeparateG v) = sampleCol v >>= (return . unpack3) >>= \(_,g,_) -> return g
sampleVal (SeparateB v) = sampleCol v >>= (return . unpack3) >>= \(_,_,b) -> return b
-- END VALUE STUFF
|
/-
Copyright (c) 2022 Kevin Buzzard. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Kevin Buzzard
-/
import field_theory.normal
import field_theory.is_alg_closed.algebraic_closure
/-!
# Normal closures
Let L/K be a finite field extension. The normal closure N/L of L/K is a finite extension
N/L such that N/K is normal and which is informally "the smallest extension with this property".
More formally we could say that if M/L is algebraic and M/K is normal then there exists
a morphism of K-algebras `N →ₐ[K] M`. Note that this morphism may not be unique, and
indeed `N` is only defined up to a non-unique isomorphism in general.
## Main Definitions
- `normal_closure K L` where `L` is a field extension of the field `K`, of finite degree.
-/
--universe u
section field_range
/- def alg_hom.field_range {F K L : Type*} [field F] [field K] [field L] [algebra F K]
[algebra F L] (φ : K →ₐ[F] L) : intermediate_field F L :=
{ ..φ.range,
..φ.to_ring_hom.field_range } -/
/-- Restrict the codomain of a alg_hom `f` to `f.range`.
This is the bundled version of `set.range_factorization`. -/
@[reducible] def alg_hom.field_range_restrict {F K L : Type*} [field F] [field K] [field L] [algebra F K]
[algebra F L] (φ : K →ₐ[F] L) : K →ₐ[F] φ.field_range :=
φ.cod_restrict φ.range φ.mem_range_self
end field_range
/- section inclusion
noncomputable lemma intermediate_field.inclusion {K L : Type*} [field K] [field L] [algebra K L]
{S T : intermediate_field K L} (h : S ≤ T) : (↥S →ₐ[K] ↥T) :=
{ to_fun := set.inclusion h,
map_one' := rfl,
map_add' := λ _ _, rfl,
map_mul' := λ _ _, rfl,
map_zero' := rfl,
commutes' := λ _, rfl }
end inclusion -/
variables (K L : Type*) [field K] [field L] [algebra K L]
noncomputable! def normal_closure : intermediate_field K (algebraic_closure L) :=
supr (λ φ : (L →ₐ[K] algebraic_closure L), φ.field_range)
namespace normal_closure
lemma le_closure : (is_scalar_tower.to_alg_hom K L (algebraic_closure L)).field_range ≤
normal_closure K L :=
le_supr _ _
noncomputable instance : algebra L (normal_closure K L) := ring_hom.to_algebra
((intermediate_field.inclusion (le_closure K L)).comp
((is_scalar_tower.to_alg_hom K L (algebraic_closure L)).field_range_restrict))
lemma is_normal (h : algebra.is_algebraic K L) : normal K (normal_closure K L) := sorry
lemma is_algebraic (h : algebra.is_algebraic K L) : algebra.is_algebraic K (normal_closure K L) :=
sorry
instance : is_scalar_tower K L (normal_closure K L) := sorry
lemma is_finite_dimensional [finite_dimensional K L] : finite_dimensional K (normal_closure K L) :=
sorry
end normal_closure |
/-
Copyright (c) 2020 Markus Himmel. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Markus Himmel
-/
import category_theory.limits.shapes.binary_products
import category_theory.limits.shapes.equalizers
import category_theory.limits.shapes.pullbacks
/-!
# Constructing pullbacks from binary products and equalizers
> THIS FILE IS SYNCHRONIZED WITH MATHLIB4.
> Any changes to this file require a corresponding PR to mathlib4.
If a category as binary products and equalizers, then it has pullbacks.
Also, if a category has binary coproducts and coequalizers, then it has pushouts
-/
universes v u
open category_theory
namespace category_theory.limits
/-- If the product `X ⨯ Y` and the equalizer of `π₁ ≫ f` and `π₂ ≫ g` exist, then the
pullback of `f` and `g` exists: It is given by composing the equalizer with the projections. -/
lemma has_limit_cospan_of_has_limit_pair_of_has_limit_parallel_pair
{C : Type u} [𝒞 : category.{v} C] {X Y Z : C} (f : X ⟶ Z) (g : Y ⟶ Z) [has_limit (pair X Y)]
[has_limit (parallel_pair (prod.fst ≫ f) (prod.snd ≫ g))] : has_limit (cospan f g) :=
let π₁ : X ⨯ Y ⟶ X := prod.fst, π₂ : X ⨯ Y ⟶ Y := prod.snd, e := equalizer.ι (π₁ ≫ f) (π₂ ≫ g) in
has_limit.mk
{ cone := pullback_cone.mk (e ≫ π₁) (e ≫ π₂) $ by simp only [category.assoc, equalizer.condition],
is_limit := pullback_cone.is_limit.mk _
(λ s, equalizer.lift (prod.lift (s.π.app walking_cospan.left)
(s.π.app walking_cospan.right)) $ by
rw [←category.assoc, limit.lift_π, ←category.assoc, limit.lift_π];
exact pullback_cone.condition _)
(by simp) (by simp) $ λ s m h₁ h₂, by { ext,
{ simpa using h₁ },
{ simpa using h₂ } } }
section
local attribute [instance] has_limit_cospan_of_has_limit_pair_of_has_limit_parallel_pair
/-- If a category has all binary products and all equalizers, then it also has all pullbacks.
As usual, this is not an instance, since there may be a more direct way to construct
pullbacks. -/
lemma has_pullbacks_of_has_binary_products_of_has_equalizers
(C : Type u) [𝒞 : category.{v} C] [has_binary_products C] [has_equalizers C] :
has_pullbacks C :=
{ has_limit := λ F, has_limit_of_iso (diagram_iso_cospan F).symm }
end
/-- If the coproduct `Y ⨿ Z` and the coequalizer of `f ≫ ι₁` and `g ≫ ι₂` exist, then the
pushout of `f` and `g` exists: It is given by composing the inclusions with the coequalizer. -/
lemma has_colimit_span_of_has_colimit_pair_of_has_colimit_parallel_pair
{C : Type u} [𝒞 : category.{v} C] {X Y Z : C} (f : X ⟶ Y) (g : X ⟶ Z) [has_colimit (pair Y Z)]
[has_colimit (parallel_pair (f ≫ coprod.inl) (g ≫ coprod.inr))] : has_colimit (span f g) :=
let ι₁ : Y ⟶ Y ⨿ Z := coprod.inl, ι₂ : Z ⟶ Y ⨿ Z := coprod.inr,
c := coequalizer.π (f ≫ ι₁) (g ≫ ι₂) in
has_colimit.mk
{ cocone := pushout_cocone.mk (ι₁ ≫ c) (ι₂ ≫ c) $
by rw [←category.assoc, ←category.assoc, coequalizer.condition],
is_colimit := pushout_cocone.is_colimit.mk _
(λ s, coequalizer.desc (coprod.desc (s.ι.app walking_span.left)
(s.ι.app walking_span.right)) $ by
rw [category.assoc, colimit.ι_desc, category.assoc, colimit.ι_desc];
exact pushout_cocone.condition _)
(by simp) (by simp) $ λ s m h₁ h₂, by { ext,
{ simpa using h₁ },
{ simpa using h₂ } } }
section
local attribute [instance] has_colimit_span_of_has_colimit_pair_of_has_colimit_parallel_pair
/-- If a category has all binary coproducts and all coequalizers, then it also has all pushouts.
As usual, this is not an instance, since there may be a more direct way to construct
pushouts. -/
lemma has_pushouts_of_has_binary_coproducts_of_has_coequalizers
(C : Type u) [𝒞 : category.{v} C] [has_binary_coproducts C] [has_coequalizers C] :
has_pushouts C :=
has_pushouts_of_has_colimit_span C
end
end category_theory.limits
|
-- Not using any Floats so shouldn't need ieee754 installed.
-- .flags file contains
-- -c --ghc-flag="-hide-package ieee754"
module _ where
open import Agda.Builtin.IO
open import Agda.Builtin.Unit
postulate
return : {A : Set} → A → IO A
{-# COMPILE GHC return = \ _ -> return #-}
main : IO ⊤
main = return _
|
[GOAL]
α : Type u_1
𝕜 : Type u_2
inst✝¹ : IsROrC 𝕜
inst✝ : MeasurableSpace α
f : α → 𝕜
μ : MeasureTheory.Measure α
hre : Measurable fun x => ↑IsROrC.re (f x)
him : Measurable fun x => ↑IsROrC.im (f x)
⊢ Measurable f
[PROOFSTEP]
convert
Measurable.add (M := 𝕜) (IsROrC.measurable_ofReal.comp hre) ((IsROrC.measurable_ofReal.comp him).mul_const IsROrC.I)
[GOAL]
case h.e'_5.h
α : Type u_1
𝕜 : Type u_2
inst✝¹ : IsROrC 𝕜
inst✝ : MeasurableSpace α
f : α → 𝕜
μ : MeasureTheory.Measure α
hre : Measurable fun x => ↑IsROrC.re (f x)
him : Measurable fun x => ↑IsROrC.im (f x)
x✝ : α
⊢ f x✝ = (IsROrC.ofReal ∘ fun x => ↑IsROrC.re (f x)) x✝ + (IsROrC.ofReal ∘ fun x => ↑IsROrC.im (f x)) x✝ * IsROrC.I
[PROOFSTEP]
exact (IsROrC.re_add_im _).symm
[GOAL]
α : Type u_1
𝕜 : Type u_2
inst✝¹ : IsROrC 𝕜
inst✝ : MeasurableSpace α
f : α → 𝕜
μ : MeasureTheory.Measure α
hre : AEMeasurable fun x => ↑IsROrC.re (f x)
him : AEMeasurable fun x => ↑IsROrC.im (f x)
⊢ AEMeasurable f
[PROOFSTEP]
convert
AEMeasurable.add (M := 𝕜) (IsROrC.measurable_ofReal.comp_aemeasurable hre)
((IsROrC.measurable_ofReal.comp_aemeasurable him).mul_const IsROrC.I)
[GOAL]
case h.e'_5.h
α : Type u_1
𝕜 : Type u_2
inst✝¹ : IsROrC 𝕜
inst✝ : MeasurableSpace α
f : α → 𝕜
μ : MeasureTheory.Measure α
hre : AEMeasurable fun x => ↑IsROrC.re (f x)
him : AEMeasurable fun x => ↑IsROrC.im (f x)
x✝ : α
⊢ f x✝ = (IsROrC.ofReal ∘ fun x => ↑IsROrC.re (f x)) x✝ + (IsROrC.ofReal ∘ fun x => ↑IsROrC.im (f x)) x✝ * IsROrC.I
[PROOFSTEP]
exact (IsROrC.re_add_im _).symm
|
!RUN: %flang -g -S -emit-llvm %s -o - | FileCheck %s
!CHECK: !DILocalVariable(name: "arr", scope: {{![0-9]+}}, file: {{![0-9]+}}, type: [[TYPE:![0-9]+]])
!CHECK: [[TYPE]] = !DICompositeType(tag: DW_TAG_array_type, baseType: {{![0-9]+}}, size: 32, align: 32, elements: [[ELEM:![0-9]+]], dataLocation: {{![0-9]+}}, allocated: {{![0-9]+}})
!CHECK: [[ELEM]] = !{[[ELEM1:![0-9]+]], [[ELEM2:![0-9]+]]}
!CHECK: [[ELEM1]] = !DISubrange(lowerBound: !DIExpression(DW_OP_push_object_address, DW_OP_plus_uconst, 80, DW_OP_deref), upperBound: !DIExpression(DW_OP_push_object_address, DW_OP_plus_uconst, 120, DW_OP_deref), stride: !DIExpression(DW_OP_push_object_address, DW_OP_plus_uconst, 112, DW_OP_deref, DW_OP_push_object_address, DW_OP_plus_uconst, 24, DW_OP_deref, DW_OP_mul))
!CHECK: [[ELEM2]] = !DISubrange(lowerBound: !DIExpression(DW_OP_push_object_address, DW_OP_plus_uconst, 128, DW_OP_deref), upperBound: !DIExpression(DW_OP_push_object_address, DW_OP_plus_uconst, 168, DW_OP_deref), stride: !DIExpression(DW_OP_push_object_address, DW_OP_plus_uconst, 160, DW_OP_deref, DW_OP_push_object_address, DW_OP_plus_uconst, 24, DW_OP_deref, DW_OP_mul))
program main
integer(kind=4), allocatable :: arr(:, :)
allocate (arr(10,10))
arr(1,1) = 99
print *, arr(1,1)
end program main
|
constant f (x y : Nat) : Nat
constant g (x : Nat) : Nat
theorem ex1 (x : Nat) (h₁ : f x x = g x) (h₂ : g x = x) : f x (f x x) = x := by
simp
simp [*]
theorem ex2 (x : Nat) (h₁ : f x x = g x) (h₂ : g x = x) : f x (f x x) = x := by
simp [*]
axiom g_ax (x : Nat) : g x = 0
theorem ex3 (x y : Nat) (h₁ : f x x = g x) (h₂ : f x x < 5) : f x x + f x x = 0 := by
simp [*] at *
trace_state
have aux₁ : f x x = g x := h₁
have aux₂ : g x < 5 := h₂
simp [g_ax]
|
module Day9
export get_inputs, get_solution1, get_solution2
## Input getting
read_input(f_name) = parse.(Int, readlines(joinpath(@__DIR__, f_name)))
function get_inputs()
test_input1 = test_input2 = (read_input("test_input1.txt"), 5)
test_output1 = 127
test_output2 = 62
data = (read_input("input.txt"), 25)
return (; test_input1, test_output1, test_input2, test_output2, data)
end
## Solution functions
function is_valid(number, preamble)
for i in eachindex(preamble), j in i:length(preamble)
if preamble[i] + preamble[j] == number return true end
end
return false
end
# Part 1
function get_solution1((data, preamble_length))
for i in eachindex(data)
j = i + preamble_length
preamble = @view data[i:j-1]
val = data[j]
if !is_valid(val, preamble) return val end
end
return -1
end
# Part 2
function get_solution2((data, preamble_length))
sol1 = get_solution1((data, preamble_length))
for i in eachindex(data)
val = 0
for j in i:length(data)
val += data[j]
if val==sol1
this_range = @view data[i:j]
return sum(extrema(this_range))
elseif val > sol1
break
end
end
end
return -1
end
end |
import Base: show
function show(io::IO, h::History)
if h.converged
printstyled(io, "Converged", color = :green)
else
printstyled(io, "Not converged", color = :red)
end
print(io, ": ", h.nconverged, " of ", h.nev, " eigenvalues in ",
h.mvproducts, " matrix-vector products")
end
function show(io::IO, mime::MIME{Symbol("text/plain")}, s::PartialSchur)
println(io, "PartialSchur decomposition (", eltype(s.Q) ,") of dimension ", size(s.Q, 2), "\neigenvalues:")
show(io, mime, s.eigenvalues)
end
|
[GOAL]
α : Type u
β : Type v
γ : Type w
inst✝² : Group α
inst✝¹ : Monoid β
inst✝ : MulAction β α
H : Subgroup α
x✝³ x✝² x✝¹ : α
x✝ : x✝²⁻¹ * x✝¹ ∈ H
⊢ (x✝³ • x✝²)⁻¹ * x✝³ • x✝¹ ∈ H
[PROOFSTEP]
rwa [smul_eq_mul, smul_eq_mul, mul_inv_rev, mul_assoc, inv_mul_cancel_left]
[GOAL]
α : Type u
β : Type v
γ : Type w
inst✝² : Group α
inst✝¹ : Monoid β
inst✝ : MulAction β α
H : Subgroup α
b : { x // x ∈ ↑opposite (normalizer H) }
c x✝¹ : α
x✝ : c⁻¹ * x✝¹ ∈ H
⊢ (b • c)⁻¹ * b • x✝¹ ∈ H
[PROOFSTEP]
rwa [smul_def, smul_def, smul_eq_mul_unop, smul_eq_mul_unop, mul_inv_rev, ← mul_assoc, mem_normalizer_iff'.mp b.prop,
mul_assoc, mul_inv_cancel_left]
[GOAL]
α : Type u
β : Type v
γ : Type w
inst✝² : Group α
inst✝¹ : Monoid β
inst✝ : MulAction β α
H : Subgroup α
hH : Normal H
x✝³ : αᵐᵒᵖ
x✝² x✝¹ : α
x✝ : x✝²⁻¹ * x✝¹ ∈ H
⊢ (x✝³ • x✝²)⁻¹ * x✝³ • x✝¹ ∈ H
[PROOFSTEP]
rwa [smul_eq_mul_unop, smul_eq_mul_unop, mul_inv_rev, mul_assoc, hH.mem_comm_iff, mul_assoc, mul_inv_cancel_right]
[GOAL]
α : Type u
β : Type v
γ : Type w
inst✝³ : Group α
inst✝² : Monoid β
inst✝¹ : MulAction β α
H : Subgroup α
inst✝ : QuotientAction β H
b : β
q : α ⧸ H
⊢ ↑(b • Quotient.out' q) = b • q
[PROOFSTEP]
rw [← Quotient.smul_mk, QuotientGroup.out_eq']
[GOAL]
α : Type u
β : Type v
γ : Type w
inst✝² : Group α
inst✝¹ : Monoid β
inst✝ : MulAction β α
H : Subgroup α
a : α
q : α ⧸ H
⊢ (Quotient.out' q)⁻¹ * a ^ minimalPeriod ((fun x x_1 => x • x_1) a) q * Quotient.out' q ∈ H
[PROOFSTEP]
rw [mul_assoc, ← QuotientGroup.eq', QuotientGroup.out_eq', ← smul_eq_mul, Quotient.mk_smul_out', eq_comm,
pow_smul_eq_iff_minimalPeriod_dvd]
[GOAL]
α : Type u
β : Type v
γ : Type w
inst✝² inst✝¹ : Group α
inst✝ : MulAction α β
x : β
g : α ⧸ stabilizer α x
g1 g2 : α
H : Setoid.r g1 g2
⊢ g1 • (g1⁻¹ * g2) • x = g2 • x
[PROOFSTEP]
rw [smul_smul, mul_inv_cancel_left]
[GOAL]
α : Type u
β : Type v
γ : Type w
inst✝² inst✝¹ : Group α
inst✝ : MulAction α β
x : β
y₁ y₂ : α ⧸ stabilizer α x
g₁ g₂ : α
H : g₁ • x = g₂ • x
⊢ Setoid.r g₁ g₂
[PROOFSTEP]
rw [leftRel_apply]
[GOAL]
α : Type u
β : Type v
γ : Type w
inst✝² inst✝¹ : Group α
inst✝ : MulAction α β
x : β
y₁ y₂ : α ⧸ stabilizer α x
g₁ g₂ : α
H : g₁ • x = g₂ • x
⊢ g₁⁻¹ * g₂ ∈ stabilizer α x
[PROOFSTEP]
show (g₁⁻¹ * g₂) • x = x
[GOAL]
α : Type u
β : Type v
γ : Type w
inst✝² inst✝¹ : Group α
inst✝ : MulAction α β
x : β
y₁ y₂ : α ⧸ stabilizer α x
g₁ g₂ : α
H : g₁ • x = g₂ • x
⊢ (g₁⁻¹ * g₂) • x = x
[PROOFSTEP]
rw [mul_smul, ← H, inv_smul_smul]
[GOAL]
α : Type u
β : Type v
γ : Type w
inst✝² inst✝¹ : Group α
inst✝ : MulAction α β
x✝ b : β
x y : α ⧸ stabilizer α b
hxy :
(fun g => { val := ofQuotientStabilizer α b g, property := (_ : ofQuotientStabilizer α b g ∈ orbit α b) }) x =
(fun g => { val := ofQuotientStabilizer α b g, property := (_ : ofQuotientStabilizer α b g ∈ orbit α b) }) y
⊢ ofQuotientStabilizer α b x = ofQuotientStabilizer α b y
[PROOFSTEP]
convert congr_arg Subtype.val hxy
[GOAL]
α : Type u
β : Type v
γ : Type w
inst✝⁵ inst✝⁴ : Group α
inst✝³ : MulAction α β
x b : β
inst✝² : Fintype α
inst✝¹ : Fintype ↑(orbit α b)
inst✝ : Fintype { x // x ∈ stabilizer α b }
⊢ Fintype.card ↑(orbit α b) * Fintype.card { x // x ∈ stabilizer α b } = Fintype.card α
[PROOFSTEP]
rw [← Fintype.card_prod, Fintype.card_congr (orbitProdStabilizerEquivGroup α b)]
[GOAL]
α : Type u
β : Type v
γ : Type w
inst✝³ inst✝² : Group α
inst✝¹ : MulAction α β
x : β
G : Type u_1
inst✝ : Group G
H : Subgroup G
⊢ stabilizer G ↑1 = H
[PROOFSTEP]
ext
[GOAL]
case h
α : Type u
β : Type v
γ : Type w
inst✝³ inst✝² : Group α
inst✝¹ : MulAction α β
x : β
G : Type u_1
inst✝ : Group G
H : Subgroup G
x✝ : G
⊢ x✝ ∈ stabilizer G ↑1 ↔ x✝ ∈ H
[PROOFSTEP]
simp [QuotientGroup.eq]
[GOAL]
α : Type u
β : Type v
γ : Type w
inst✝⁶ inst✝⁵ : Group α
inst✝⁴ : MulAction α β
x : β
inst✝³ : Fintype α
inst✝² : Fintype β
inst✝¹ : Fintype (Quotient (orbitRel α β))
inst✝ : (b : β) → Fintype { x // x ∈ stabilizer α b }
φ : Quotient (orbitRel α β) → β
hφ : LeftInverse Quotient.mk'' φ
⊢ Fintype.card β = ∑ ω : Quotient (orbitRel α β), Fintype.card α / Fintype.card { x // x ∈ stabilizer α (φ ω) }
[PROOFSTEP]
classical
have : ∀ ω : Ω, Fintype.card α / Fintype.card (stabilizer α (φ ω)) = Fintype.card (α ⧸ stabilizer α (φ ω)) :=
by
intro ω
rw [Fintype.card_congr (@Subgroup.groupEquivQuotientProdSubgroup α _ (stabilizer α <| φ ω)), Fintype.card_prod,
Nat.mul_div_cancel]
exact Fintype.card_pos_iff.mpr (by infer_instance)
simp_rw [this, ← Fintype.card_sigma, Fintype.card_congr (selfEquivSigmaOrbitsQuotientStabilizer' α β hφ)]
[GOAL]
α : Type u
β : Type v
γ : Type w
inst✝⁶ inst✝⁵ : Group α
inst✝⁴ : MulAction α β
x : β
inst✝³ : Fintype α
inst✝² : Fintype β
inst✝¹ : Fintype (Quotient (orbitRel α β))
inst✝ : (b : β) → Fintype { x // x ∈ stabilizer α b }
φ : Quotient (orbitRel α β) → β
hφ : LeftInverse Quotient.mk'' φ
⊢ Fintype.card β = ∑ ω : Quotient (orbitRel α β), Fintype.card α / Fintype.card { x // x ∈ stabilizer α (φ ω) }
[PROOFSTEP]
have : ∀ ω : Ω, Fintype.card α / Fintype.card (stabilizer α (φ ω)) = Fintype.card (α ⧸ stabilizer α (φ ω)) :=
by
intro ω
rw [Fintype.card_congr (@Subgroup.groupEquivQuotientProdSubgroup α _ (stabilizer α <| φ ω)), Fintype.card_prod,
Nat.mul_div_cancel]
exact Fintype.card_pos_iff.mpr (by infer_instance)
[GOAL]
α : Type u
β : Type v
γ : Type w
inst✝⁶ inst✝⁵ : Group α
inst✝⁴ : MulAction α β
x : β
inst✝³ : Fintype α
inst✝² : Fintype β
inst✝¹ : Fintype (Quotient (orbitRel α β))
inst✝ : (b : β) → Fintype { x // x ∈ stabilizer α b }
φ : Quotient (orbitRel α β) → β
hφ : LeftInverse Quotient.mk'' φ
⊢ ∀ (ω : Quotient (orbitRel α β)),
Fintype.card α / Fintype.card { x // x ∈ stabilizer α (φ ω) } = Fintype.card (α ⧸ stabilizer α (φ ω))
[PROOFSTEP]
intro ω
[GOAL]
α : Type u
β : Type v
γ : Type w
inst✝⁶ inst✝⁵ : Group α
inst✝⁴ : MulAction α β
x : β
inst✝³ : Fintype α
inst✝² : Fintype β
inst✝¹ : Fintype (Quotient (orbitRel α β))
inst✝ : (b : β) → Fintype { x // x ∈ stabilizer α b }
φ : Quotient (orbitRel α β) → β
hφ : LeftInverse Quotient.mk'' φ
ω : Quotient (orbitRel α β)
⊢ Fintype.card α / Fintype.card { x // x ∈ stabilizer α (φ ω) } = Fintype.card (α ⧸ stabilizer α (φ ω))
[PROOFSTEP]
rw [Fintype.card_congr (@Subgroup.groupEquivQuotientProdSubgroup α _ (stabilizer α <| φ ω)), Fintype.card_prod,
Nat.mul_div_cancel]
[GOAL]
case H
α : Type u
β : Type v
γ : Type w
inst✝⁶ inst✝⁵ : Group α
inst✝⁴ : MulAction α β
x : β
inst✝³ : Fintype α
inst✝² : Fintype β
inst✝¹ : Fintype (Quotient (orbitRel α β))
inst✝ : (b : β) → Fintype { x // x ∈ stabilizer α b }
φ : Quotient (orbitRel α β) → β
hφ : LeftInverse Quotient.mk'' φ
ω : Quotient (orbitRel α β)
⊢ 0 < Fintype.card { x // x ∈ stabilizer α (φ ω) }
[PROOFSTEP]
exact Fintype.card_pos_iff.mpr (by infer_instance)
[GOAL]
α : Type u
β : Type v
γ : Type w
inst✝⁶ inst✝⁵ : Group α
inst✝⁴ : MulAction α β
x : β
inst✝³ : Fintype α
inst✝² : Fintype β
inst✝¹ : Fintype (Quotient (orbitRel α β))
inst✝ : (b : β) → Fintype { x // x ∈ stabilizer α b }
φ : Quotient (orbitRel α β) → β
hφ : LeftInverse Quotient.mk'' φ
ω : Quotient (orbitRel α β)
⊢ Nonempty { x // x ∈ stabilizer α (φ ω) }
[PROOFSTEP]
infer_instance
[GOAL]
α : Type u
β : Type v
γ : Type w
inst✝⁶ inst✝⁵ : Group α
inst✝⁴ : MulAction α β
x : β
inst✝³ : Fintype α
inst✝² : Fintype β
inst✝¹ : Fintype (Quotient (orbitRel α β))
inst✝ : (b : β) → Fintype { x // x ∈ stabilizer α b }
φ : Quotient (orbitRel α β) → β
hφ : LeftInverse Quotient.mk'' φ
this :
∀ (ω : Quotient (orbitRel α β)),
Fintype.card α / Fintype.card { x // x ∈ stabilizer α (φ ω) } = Fintype.card (α ⧸ stabilizer α (φ ω))
⊢ Fintype.card β = ∑ ω : Quotient (orbitRel α β), Fintype.card α / Fintype.card { x // x ∈ stabilizer α (φ ω) }
[PROOFSTEP]
simp_rw [this, ← Fintype.card_sigma, Fintype.card_congr (selfEquivSigmaOrbitsQuotientStabilizer' α β hφ)]
[GOAL]
α : Type u
β : Type v
γ : Type w
inst✝⁵ inst✝⁴ : Group α
inst✝³ : MulAction α β
x : β
inst✝² : Fintype α
inst✝¹ : (a : α) → Fintype ↑(fixedBy α β a)
inst✝ : Fintype (Quotient (orbitRel α β))
⊢ ∑ a : α, Fintype.card ↑(fixedBy α β a) = Fintype.card (Quotient (orbitRel α β)) * Fintype.card α
[PROOFSTEP]
rw [← Fintype.card_prod, ← Fintype.card_sigma, Fintype.card_congr (sigmaFixedByEquivOrbitsProdGroup α β)]
[GOAL]
α : Type u
β : Type v
γ : Type w
inst✝³ inst✝² : Group α
inst✝¹ : MulAction α β
x : β
G : Type ?u.117012
inst✝ : Group G
H : Subgroup G
⊢ ∀ (x y : G ⧸ H), ∃ g, g • x = y
[PROOFSTEP]
{ rintro ⟨x⟩ ⟨y⟩
refine' ⟨y * x⁻¹, QuotientGroup.eq.mpr _⟩
simp only [smul_eq_mul, H.one_mem, mul_left_inv, inv_mul_cancel_right]
}
[GOAL]
α : Type u
β : Type v
γ : Type w
inst✝³ inst✝² : Group α
inst✝¹ : MulAction α β
x : β
G : Type ?u.117012
inst✝ : Group G
H : Subgroup G
⊢ ∀ (x y : G ⧸ H), ∃ g, g • x = y
[PROOFSTEP]
rintro ⟨x⟩ ⟨y⟩
[GOAL]
case mk.mk
α : Type u
β : Type v
γ : Type w
inst✝³ inst✝² : Group α
inst✝¹ : MulAction α β
x✝¹ : β
G : Type ?u.117012
inst✝ : Group G
H : Subgroup G
x✝ : G ⧸ H
x : G
y✝ : G ⧸ H
y : G
⊢ ∃ g, g • Quot.mk Setoid.r x = Quot.mk Setoid.r y
[PROOFSTEP]
refine' ⟨y * x⁻¹, QuotientGroup.eq.mpr _⟩
[GOAL]
case mk.mk
α : Type u
β : Type v
γ : Type w
inst✝³ inst✝² : Group α
inst✝¹ : MulAction α β
x✝¹ : β
G : Type ?u.117012
inst✝ : Group G
H : Subgroup G
x✝ : G ⧸ H
x : G
y✝ : G ⧸ H
y : G
⊢ ((fun x x_1 => x • x_1) (y * x⁻¹) x)⁻¹ * y ∈ H
[PROOFSTEP]
simp only [smul_eq_mul, H.one_mem, mul_left_inv, inv_mul_cancel_right]
[GOAL]
α : Type u
β : Type v
γ : Type w
G : Type u_1
inst✝³ : Group G
inst✝² : Fintype G
g : G
inst✝¹ : Fintype ↑(carrier (ConjClasses.mk g))
inst✝ : Fintype { x // x ∈ MulAction.stabilizer (ConjAct G) g }
⊢ Fintype.card ↑(carrier (ConjClasses.mk g)) =
Fintype.card G / Fintype.card { x // x ∈ MulAction.stabilizer (ConjAct G) g }
[PROOFSTEP]
classical
rw [Fintype.card_congr <| ConjAct.toConjAct (G := G) |>.toEquiv]
rw [← MulAction.card_orbit_mul_card_stabilizer_eq_card_group (ConjAct G) g, Nat.mul_div_cancel]
simp_rw [ConjAct.orbit_eq_carrier_conjClasses]
exact Fintype.card_pos_iff.mpr inferInstance
[GOAL]
α : Type u
β : Type v
γ : Type w
G : Type u_1
inst✝³ : Group G
inst✝² : Fintype G
g : G
inst✝¹ : Fintype ↑(carrier (ConjClasses.mk g))
inst✝ : Fintype { x // x ∈ MulAction.stabilizer (ConjAct G) g }
⊢ Fintype.card ↑(carrier (ConjClasses.mk g)) =
Fintype.card G / Fintype.card { x // x ∈ MulAction.stabilizer (ConjAct G) g }
[PROOFSTEP]
rw [Fintype.card_congr <| ConjAct.toConjAct (G := G) |>.toEquiv]
[GOAL]
α : Type u
β : Type v
γ : Type w
G : Type u_1
inst✝³ : Group G
inst✝² : Fintype G
g : G
inst✝¹ : Fintype ↑(carrier (ConjClasses.mk g))
inst✝ : Fintype { x // x ∈ MulAction.stabilizer (ConjAct G) g }
⊢ Fintype.card ↑(carrier (ConjClasses.mk g)) =
Fintype.card (ConjAct G) / Fintype.card { x // x ∈ MulAction.stabilizer (ConjAct G) g }
[PROOFSTEP]
rw [← MulAction.card_orbit_mul_card_stabilizer_eq_card_group (ConjAct G) g, Nat.mul_div_cancel]
[GOAL]
α : Type u
β : Type v
γ : Type w
G : Type u_1
inst✝³ : Group G
inst✝² : Fintype G
g : G
inst✝¹ : Fintype ↑(carrier (ConjClasses.mk g))
inst✝ : Fintype { x // x ∈ MulAction.stabilizer (ConjAct G) g }
⊢ Fintype.card ↑(carrier (ConjClasses.mk g)) = Fintype.card ↑(MulAction.orbit (ConjAct G) g)
case H
α : Type u
β : Type v
γ : Type w
G : Type u_1
inst✝³ : Group G
inst✝² : Fintype G
g : G
inst✝¹ : Fintype ↑(carrier (ConjClasses.mk g))
inst✝ : Fintype { x // x ∈ MulAction.stabilizer (ConjAct G) g }
⊢ 0 < Fintype.card { x // x ∈ MulAction.stabilizer (ConjAct G) g }
[PROOFSTEP]
simp_rw [ConjAct.orbit_eq_carrier_conjClasses]
[GOAL]
case H
α : Type u
β : Type v
γ : Type w
G : Type u_1
inst✝³ : Group G
inst✝² : Fintype G
g : G
inst✝¹ : Fintype ↑(carrier (ConjClasses.mk g))
inst✝ : Fintype { x // x ∈ MulAction.stabilizer (ConjAct G) g }
⊢ 0 < Fintype.card { x // x ∈ MulAction.stabilizer (ConjAct G) g }
[PROOFSTEP]
exact Fintype.card_pos_iff.mpr inferInstance
[GOAL]
α : Type u
β : Type v
γ : Type w
G : Type u_1
inst✝ : Group G
H : Subgroup G
⊢ normalCore H = MonoidHom.ker (MulAction.toPermHom G (G ⧸ H))
[PROOFSTEP]
apply le_antisymm
[GOAL]
case a
α : Type u
β : Type v
γ : Type w
G : Type u_1
inst✝ : Group G
H : Subgroup G
⊢ normalCore H ≤ MonoidHom.ker (MulAction.toPermHom G (G ⧸ H))
[PROOFSTEP]
intro g hg
[GOAL]
case a
α : Type u
β : Type v
γ : Type w
G : Type u_1
inst✝ : Group G
H : Subgroup G
g : G
hg : g ∈ normalCore H
⊢ g ∈ MonoidHom.ker (MulAction.toPermHom G (G ⧸ H))
[PROOFSTEP]
apply Equiv.Perm.ext
[GOAL]
case a.H
α : Type u
β : Type v
γ : Type w
G : Type u_1
inst✝ : Group G
H : Subgroup G
g : G
hg : g ∈ normalCore H
⊢ ∀ (x : G ⧸ H), ↑(↑(MulAction.toPermHom G (G ⧸ H)) g) x = ↑1 x
[PROOFSTEP]
refine' fun q ↦ QuotientGroup.induction_on q _
[GOAL]
case a.H
α : Type u
β : Type v
γ : Type w
G : Type u_1
inst✝ : Group G
H : Subgroup G
g : G
hg : g ∈ normalCore H
q : G ⧸ H
⊢ ∀ (z : G), ↑(↑(MulAction.toPermHom G (G ⧸ H)) g) ↑z = ↑1 ↑z
[PROOFSTEP]
refine' fun g' => (MulAction.Quotient.smul_mk H g g').trans (QuotientGroup.eq.mpr _)
[GOAL]
case a.H
α : Type u
β : Type v
γ : Type w
G : Type u_1
inst✝ : Group G
H : Subgroup G
g : G
hg : g ∈ normalCore H
q : G ⧸ H
g' : G
⊢ (g • g')⁻¹ * g' ∈ H
[PROOFSTEP]
rw [smul_eq_mul, mul_inv_rev, ← inv_inv g', inv_inv]
[GOAL]
case a.H
α : Type u
β : Type v
γ : Type w
G : Type u_1
inst✝ : Group G
H : Subgroup G
g : G
hg : g ∈ normalCore H
q : G ⧸ H
g' : G
⊢ g'⁻¹ * g⁻¹ * g'⁻¹⁻¹ ∈ H
[PROOFSTEP]
exact H.normalCore.inv_mem hg g'⁻¹
[GOAL]
case a
α : Type u
β : Type v
γ : Type w
G : Type u_1
inst✝ : Group G
H : Subgroup G
⊢ MonoidHom.ker (MulAction.toPermHom G (G ⧸ H)) ≤ normalCore H
[PROOFSTEP]
refine' (Subgroup.normal_le_normalCore.mpr fun g hg => _)
[GOAL]
case a
α : Type u
β : Type v
γ : Type w
G : Type u_1
inst✝ : Group G
H : Subgroup G
g : G
hg : g ∈ MonoidHom.ker (MulAction.toPermHom G (G ⧸ H))
⊢ g ∈ H
[PROOFSTEP]
rw [← H.inv_mem_iff, ← mul_one g⁻¹, ← QuotientGroup.eq, ← mul_one g]
[GOAL]
case a
α : Type u
β : Type v
γ : Type w
G : Type u_1
inst✝ : Group G
H : Subgroup G
g : G
hg : g ∈ MonoidHom.ker (MulAction.toPermHom G (G ⧸ H))
⊢ ↑(g * 1) = ↑1
[PROOFSTEP]
exact (MulAction.Quotient.smul_mk H g 1).symm.trans (Equiv.Perm.ext_iff.mp hg (1 : G))
[GOAL]
α : Type u
β : Type v
γ : Type w
G : Type u_1
inst✝ : Group G
⊢ Nat.card { p // Commute p.fst p.snd } = Nat.card (ConjClasses G) * Nat.card G
[PROOFSTEP]
classical
rcases fintypeOrInfinite G; swap
· rw [mul_comm, Nat.card_eq_zero_of_infinite, Nat.card_eq_zero_of_infinite, zero_mul]
simp only [Nat.card_eq_fintype_card]
-- Porting note: Changed `calc` proof into a `rw` proof.
rw [card_congr (Equiv.subtypeProdEquivSigmaSubtype Commute), card_sigma,
sum_equiv ConjAct.toConjAct.toEquiv (fun a ↦ card { b // Commute a b })
(fun g ↦ card (MulAction.fixedBy (ConjAct G) G g)) fun g ↦
card_congr' <| congr_arg _ <| funext fun h ↦ mul_inv_eq_iff_eq_mul.symm.to_eq,
MulAction.sum_card_fixedBy_eq_card_orbits_mul_card_group]
congr 1; apply card_congr'; congr; ext; exact (Setoid.comm' _).trans isConj_iff.symm
[GOAL]
α : Type u
β : Type v
γ : Type w
G : Type u_1
inst✝ : Group G
⊢ Nat.card { p // Commute p.fst p.snd } = Nat.card (ConjClasses G) * Nat.card G
[PROOFSTEP]
rcases fintypeOrInfinite G
[GOAL]
case inl
α : Type u
β : Type v
γ : Type w
G : Type u_1
inst✝ : Group G
val✝ : Fintype G
⊢ Nat.card { p // Commute p.fst p.snd } = Nat.card (ConjClasses G) * Nat.card G
case inr
α : Type u
β : Type v
γ : Type w
G : Type u_1
inst✝ : Group G
val✝ : Infinite G
⊢ Nat.card { p // Commute p.fst p.snd } = Nat.card (ConjClasses G) * Nat.card G
[PROOFSTEP]
swap
[GOAL]
case inr
α : Type u
β : Type v
γ : Type w
G : Type u_1
inst✝ : Group G
val✝ : Infinite G
⊢ Nat.card { p // Commute p.fst p.snd } = Nat.card (ConjClasses G) * Nat.card G
[PROOFSTEP]
rw [mul_comm, Nat.card_eq_zero_of_infinite, Nat.card_eq_zero_of_infinite, zero_mul]
[GOAL]
case inl
α : Type u
β : Type v
γ : Type w
G : Type u_1
inst✝ : Group G
val✝ : Fintype G
⊢ Nat.card { p // Commute p.fst p.snd } = Nat.card (ConjClasses G) * Nat.card G
[PROOFSTEP]
simp only [Nat.card_eq_fintype_card]
-- Porting note: Changed `calc` proof into a `rw` proof.
[GOAL]
case inl
α : Type u
β : Type v
γ : Type w
G : Type u_1
inst✝ : Group G
val✝ : Fintype G
⊢ card { p // Commute p.fst p.snd } = card (ConjClasses G) * card G
[PROOFSTEP]
rw [card_congr (Equiv.subtypeProdEquivSigmaSubtype Commute), card_sigma,
sum_equiv ConjAct.toConjAct.toEquiv (fun a ↦ card { b // Commute a b })
(fun g ↦ card (MulAction.fixedBy (ConjAct G) G g)) fun g ↦
card_congr' <| congr_arg _ <| funext fun h ↦ mul_inv_eq_iff_eq_mul.symm.to_eq,
MulAction.sum_card_fixedBy_eq_card_orbits_mul_card_group]
[GOAL]
case inl
α : Type u
β : Type v
γ : Type w
G : Type u_1
inst✝ : Group G
val✝ : Fintype G
⊢ card (Quotient (MulAction.orbitRel (ConjAct G) G)) * card (ConjAct G) = card (ConjClasses G) * card G
[PROOFSTEP]
congr 1
[GOAL]
case inl.e_a
α : Type u
β : Type v
γ : Type w
G : Type u_1
inst✝ : Group G
val✝ : Fintype G
⊢ card (Quotient (MulAction.orbitRel (ConjAct G) G)) = card (ConjClasses G)
[PROOFSTEP]
apply card_congr'
[GOAL]
case inl.e_a.h
α : Type u
β : Type v
γ : Type w
G : Type u_1
inst✝ : Group G
val✝ : Fintype G
⊢ Quotient (MulAction.orbitRel (ConjAct G) G) = ConjClasses G
[PROOFSTEP]
congr
[GOAL]
case inl.e_a.h.e_s
α : Type u
β : Type v
γ : Type w
G : Type u_1
inst✝ : Group G
val✝ : Fintype G
⊢ MulAction.orbitRel (ConjAct G) G = IsConj.setoid G
[PROOFSTEP]
ext
[GOAL]
case inl.e_a.h.e_s.H
α : Type u
β : Type v
γ : Type w
G : Type u_1
inst✝ : Group G
val✝ : Fintype G
a✝ b✝ : G
⊢ Setoid.Rel (MulAction.orbitRel (ConjAct G) G) a✝ b✝ ↔ Setoid.Rel (IsConj.setoid G) a✝ b✝
[PROOFSTEP]
exact (Setoid.comm' _).trans isConj_iff.symm
|
import M4R.Algebra.Ring.Matsumura
namespace M4R
namespace Ideal
def chain (α : Type _) [Ring α] := Nat → Ideal α
namespace chain
variable [Ring α] (c : chain α)
protected theorem ext {c₁ c₂ : chain α} : c₁ = c₂ ↔ ∀ n, c₁ n = c₂ n :=
⟨fun h _ => h ▸ rfl, funext⟩
def ascending : Prop := ∀ n, c n ⊆ c n.succ
def descending : Prop := ∀ n, c n.succ ⊆ c n
def is_prime : Prop := ∀ n, (c n).is_prime
def base (I : Ideal α) : Prop := c 0 = I
theorem base_self : c.base (c 0) := rfl
def is_stable : Prop := ∃ N, ∀ n, N ≤ n → c n = c N
def strict_infinite : Prop := ∀ n, c n ≠ c n.succ
def strict_stable : Prop := ∃ N, (∀ n, n < N → c n ≠ c n.succ) ∧ (∀ n, N ≤ n → c n = c N)
noncomputable def stable_length (hc : c.strict_stable) : Nat := Classical.choose hc
theorem stable_length_spec (hc : c.strict_stable) : (∀ n, n < c.stable_length hc → c n ≠ c n.succ) ∧
∀ n, c.stable_length hc ≤ n → c n = c (c.stable_length hc) := Classical.choose_spec hc
def shift (n : Nat) : chain α := fun k => c (n + k)
variable {c}
theorem stable_length_eq (N : Nat) (hN₁ : ∀ n, n < N → c n ≠ c n.succ) (hN₂ : ∀ n, N ≤ n → c n = c N) :
c.stable_length ⟨N, hN₁, hN₂⟩ = N :=
let hc : c.strict_stable := ⟨N, hN₁, hN₂⟩
Nat.le_antisymm (Nat.not_lt.mp (mt ((c.stable_length_spec hc).left N) (iff_not_not.mpr ((hN₂ N.succ (Nat.le_succ N)).symm))))
(Nat.not_lt.mp (mt (hN₁ (c.stable_length hc)) (iff_not_not.mpr (((c.stable_length_spec hc).right _ (Nat.le_succ _)).symm))))
theorem shift_ascending (n : Nat) (hc : c.ascending) : (c.shift n).ascending :=
fun m => (Nat.add_succ n m ▸ hc (n + m) : c (n + m) ⊆ c (n + m.succ))
theorem shift_descending (n : Nat) (hc : c.descending) : (c.shift n).descending :=
fun m => (Nat.add_succ n m ▸ hc (n + m) : c (n + m.succ) ⊆ c (n + m))
theorem shift_prime (n : Nat) (hc : c.is_prime) : (c.shift n).is_prime :=
fun m => hc (n + m)
theorem shift_base (c : chain α) (n : Nat) : (c.shift n).base (c n) := rfl
theorem shift_is_stable (n : Nat) (hc : c.is_stable) : (c.shift n).is_stable := by
let ⟨N, hN⟩ := hc
byCases h : n < N
{ exact ⟨N - n, fun m hm => (hN (n + m) (Nat.add_comm m n ▸ Nat.sub_le_iff_right.mp hm)).trans
(congrArg c (Nat.add_sub_of_le (Nat.le_of_lt h))).symm⟩ }
{ exact ⟨0, fun m hm => have h := Nat.not_lt.mp h
(hN (n + m) (Nat.le_trans h (Nat.le_add_right n m))).trans (hN n h).symm⟩ }
theorem shift_strict_infinite (n : Nat) (hc : c.strict_infinite) : (c.shift n).strict_infinite :=
fun m => hc (n + m)
theorem shift_strict_stable (n : Nat) (hc : c.strict_stable) : (c.shift n).strict_stable := by
let ⟨N, hN₁, hN₂⟩ := hc
byCases h : n < N
{ exact ⟨N - n, fun m hm => hN₁ (n + m) (Nat.add_comm m n ▸ (Nat.lt_sub_iff_right (Nat.le_of_lt h)).mp hm),
fun m hm => (hN₂ (n + m) (Nat.add_comm n m ▸ Nat.sub_le_iff_right.mp hm)).trans (congrArg c
(Nat.add_sub_of_le (Nat.le_of_lt h))).symm⟩ }
{ exact ⟨0, fun m hm => absurd hm (Nat.not_lt_zero m), fun m hm =>
have h := Nat.not_lt.mp h
(hN₂ (n + m) (Nat.le_trans h (Nat.le_add_right n m))).trans (hN₂ n h).symm⟩ }
theorem shift_stable_length (n : Nat) (hc : c.strict_stable) : (c.shift n).stable_length (shift_strict_stable n hc) =
c.stable_length hc - n := by
byCases h : n < c.stable_length hc
{ exact stable_length_eq _ (fun m hm => (c.stable_length_spec hc).left (n + m) (Nat.add_comm m n ▸
(Nat.lt_sub_iff_right (Nat.le_of_lt h)).mp hm)) fun m hm => by
have := (c.stable_length_spec hc).right _ (Nat.sub_le_iff_right.mp hm)
rw [Nat.add_comm, ←Nat.sub_add_cancel (Nat.le_of_lt h), Nat.add_comm _ n] at this
exact this }
{ exact Nat.sub_eq_zero_of_le (Nat.not_lt.mp h) ▸ stable_length_eq _ (fun m hm => absurd hm (Nat.not_lt_zero m))
fun m hm => have h := Nat.not_lt.mp h; ((c.stable_length_spec hc).right (n + m) (Nat.le_trans h
(Nat.le_add_right n m))).trans ((c.stable_length_spec hc).right n h).symm }
theorem subset_ascending (hc : c.ascending) {m n : Nat} (h : m ≤ n) : c m ⊆ c n :=
have : ∀ n, c m ⊆ c (m + n) := fun n => by
induction n with
| zero => exact Subset.refl _
| succ n ih => exact Nat.add_succ m n ▸ Subset.trans ih (hc (m + n))
let ⟨k, hk⟩ := Nat.le.dest h
hk ▸ this k
theorem subset_descending (hc : c.descending) {m n : Nat} (h : m ≤ n) : c n ⊆ c m :=
have : ∀ n, c (m + n) ⊆ c m := fun n => by
induction n with
| zero => exact Subset.refl _
| succ n ih => exact Nat.add_succ m n ▸ Subset.trans (hc (m + n)) ih
let ⟨k, hk⟩ := Nat.le.dest h
hk ▸ this k
theorem subset_base {I : Ideal α} (hc₁ : c.base I) (hc₂ : c.descending) (n : Nat) : c n ⊆ I :=
hc₁ ▸ subset_descending hc₂ (Nat.zero_le n)
theorem base_subset {I : Ideal α} (hc₁ : c.base I) (hc₂ : c.ascending) (n : Nat) : I ⊆ c n :=
hc₁ ▸ subset_ascending hc₂ (Nat.zero_le n)
theorem base_prime {I : Ideal α} (hc₁ : c.base I) (hc₂ : c.is_prime) : I.is_prime :=
hc₁ ▸ hc₂ 0
theorem subsetneq_succ_of_strict_infinite_ascending (hc₁ : c.strict_infinite) (hc₂ : c.ascending) (n : Nat) :
c n ⊊ c n.succ := Ideal.subsetneq.mpr ⟨hc₂ n, hc₁ n⟩
theorem subsetneq_lt_of_strict_infinite_ascending (hc₁ : c.strict_infinite) (hc₂ : c.ascending) {m n : Nat} (h : m < n) :
c m ⊊ c n := ProperSubset.trans_left (subsetneq_succ_of_strict_infinite_ascending hc₁ hc₂ m) (subset_ascending hc₂ h)
theorem subsetneq_succ_of_strict_infinite_descending (hc₁ : c.strict_infinite) (hc₂ : c.descending) (n : Nat) :
c n.succ ⊊ c n := Ideal.subsetneq.mpr ⟨hc₂ n, (hc₁ n).symm⟩
theorem subsetneq_lt_of_strict_infinite_descending (hc₁ : c.strict_infinite) (hc₂ : c.descending) {m n : Nat} (h : m < n) :
c n ⊊ c m := ProperSubset.trans_right (subset_descending hc₂ h) (subsetneq_succ_of_strict_infinite_descending hc₁ hc₂ m)
theorem is_stable_of_strict_stable : c.strict_stable → c.is_stable :=
fun ⟨N, hN⟩ => ⟨N, hN.right⟩
theorem length_zero_of_strict_stable_0_eq_1 (hc : c.strict_stable) (h01 : c 0 = c 1) : c.stable_length hc = 0 :=
Nat.le_zero.mp (Nat.not_lt.mp (mt ((c.stable_length_spec hc).left 0) (iff_not_not.mpr h01)))
def length_infinite (S : Set (chain α)) : Prop := (∃ c ∈ S, c.strict_infinite) ∨
∀ n, ∃ (c : chain α) (hc : c.strict_stable), c ∈ S ∧ n ≤ c.stable_length hc
def length_finite (S : Set (chain α)) : Prop :=
(∀ c ∈ S, ¬c.strict_infinite) ∧ ∃ (c : chain α) (hc : c.strict_stable), c ∈ S ∧
∀ (d : chain α) (hd : d.strict_stable), d ∈ S → d.stable_length hd ≤ c.stable_length hc
theorem length_finite_iff_not_infinite {S : Set (chain α)} (hS : ∃ c ∈ S, c.strict_stable) :
length_finite S ↔ ¬length_infinite S := by
simp only [chain.length_finite, chain.length_infinite, not_or_iff_and_not, not_exists, not_forall, not_and, Nat.not_le]
exact Iff.and (forall_congr' fun _ => Iff.rfl)
⟨fun ⟨c, hstab, _, hmax⟩ => ⟨(c.stable_length hstab).succ, fun d hd₁ hd₂ => Nat.lt_succ_of_le (hmax d hd₁ hd₂)⟩,
fun ⟨N, hN⟩ =>
let ⟨⟨c, hc⟩, hcS, hmax⟩ := maximal.max_exists {c : Subtype chain.strict_stable | c.val ∈ S}
(let ⟨c, hcS, hstab⟩ := hS; ⟨⟨c, hstab⟩, hcS⟩) (fun ⟨c, hc⟩ => c.stable_length hc)
⟨N, fun ⟨c, hc⟩ hcS => Nat.le_of_lt (hN c hc hcS)⟩
⟨c, hc, hcS, fun d hd => hmax ⟨d, hd⟩⟩⟩
open NCSemiring
theorem NonTrivial_of_length_infinite {S : Set (chain α)} (h : length_infinite S) : Ring.is_NonTrivial α :=
fun h10 =>
have : ∀ I J : Ideal α, I = J := fun I J =>
Ideal.ext.mp (Set.ext.mp fun _ => by simp only [all_trivial h10, I.has_zero, J.has_zero])
h.elim (fun ⟨c, hcS, hc⟩ => absurd (this _ _) (hc 0)) fun hc =>
let ⟨c, hc, hcS, hc'⟩ := hc 1; absurd (this _ _) ((c.stable_length_spec hc).left 0 hc')
theorem field_not_strict_infinite_of_descending (h : Ring.is_Field α) {c : chain α}
(hdesc : c.descending) : ¬c.strict_infinite := fun hc => by
have : ∀ n, c n = 1 := fun n => (Ring.ideal_0_or_1 h (c n)).resolve_left
fun h => hc n (in_zero_ideal (h ▸ hdesc n : c n.succ ⊆ 0) ▸ h)
apply hc 0; rw [this 0, this 1]
theorem field_strict_stable_length_le_1_of_descending (h : Ring.is_Field α) {c : chain α}
(hdesc : c.descending) (hc : c.strict_stable) : c.stable_length hc ≤ 1 :=
Nat.not_lt.mp (mt ((c.stable_length_spec hc).left 1) (iff_not_not.mpr ((Ring.ideal_0_or_1 h (c 0)).elim
(fun h₀₀ => have : ∀ n, c n = 0 := fun n => in_zero_ideal (h₀₀ ▸ c.subset_base c.base_self hdesc n)
(this 1).trans (this 2).symm)
fun h₀₁ => (Ring.ideal_0_or_1 h (c 1)).elim
(fun h₁₀ => h₁₀.trans (in_zero_ideal (h₁₀ ▸ hdesc 1)).symm)
fun h₁₁ => have := length_zero_of_strict_stable_0_eq_1 hc (h₀₁.trans h₁₁.symm)
h₁₁.trans (h₀₁.symm.trans (this ▸ ((c.stable_length_spec hc).right 2 (this ▸ Nat.zero_le 2)).symm)))))
theorem field_length_finite_of_descending (h : Ring.is_Field α) {S : Set (chain α)} (hS₁ : ∃ c ∈ S, c.strict_stable)
(hS₂ : S ⊆ {c | c.descending}) : length_finite S :=
(length_finite_iff_not_infinite hS₁).mpr fun hinf => hinf.elim
(fun ⟨c, hcS, hc⟩ => absurd hc (field_not_strict_infinite_of_descending h (hS₂ hcS)))
fun h' => by
let ⟨c, hc, hcS, hc2⟩ := h' 2
have := Nat.le_trans hc2 (field_strict_stable_length_le_1_of_descending h (hS₂ hcS) hc)
contradiction
open Classical
noncomputable def length_of_finite {S : Set (chain α)} (h : length_finite S) : Nat :=
(choose h.right).stable_length (choose (choose_spec h.right))
theorem length_spec_of_finite {S : Set (chain α)} (h : length_finite S) :
(∃ (c : chain α) (hc : c.strict_stable), c ∈ S ∧ c.stable_length hc = length_of_finite h) ∧
∀ (d : chain α) (hd : d.strict_stable), d ∈ S → d.stable_length hd ≤ length_of_finite h :=
have ⟨hc, hcS, h'⟩ := choose_spec h.right
⟨⟨choose h.right, hc, hcS, rfl⟩, h'⟩
theorem length_eq_of_finite {S : Set (chain α)} (h₁ : ∀ c ∈ S, ¬c.strict_infinite) {c : chain α} (hc : c.strict_stable) (hcS : c ∈ S)
(h₂ : ∀ (d : chain α) (hd : d.strict_stable), d ∈ S → d.stable_length hd ≤ c.stable_length hc) :
length_of_finite ⟨h₁, c, hc, hcS, h₂⟩ = c.stable_length hc :=
have h := choose_spec (choose_spec (⟨h₁, c, hc, hcS, h₂⟩ : length_finite S).right)
Nat.le_antisymm (h₂ _ _ h.left) (h.right c hc hcS)
def const_chain (I : Ideal α) : chain α := fun _ => I
theorem const_chain.ascending (I : Ideal α) : (const_chain I).ascending :=
fun _ => Subset.refl I
theorem const_chain.descending (I : Ideal α) : (const_chain I).descending :=
fun _ => Subset.refl I
theorem const_chain.is_prime {I : Ideal α} (hI : I.is_prime) : (const_chain I).is_prime :=
fun _ => hI
theorem const_chain.base (I : Ideal α) : (const_chain I).base I := rfl
theorem const_chain.strict_stable (I : Ideal α) : (const_chain I).strict_stable :=
⟨0, fun _ => (absurd · (Nat.not_lt_zero _)), fun _ _ => rfl⟩
theorem const_chain.is_stable (I : Ideal α) : (const_chain I).is_stable :=
is_stable_of_strict_stable (const_chain.strict_stable I)
theorem const_chain.length (I : Ideal α) : (const_chain I).stable_length (const_chain.strict_stable I) = 0 :=
byContradiction fun h => ((const_chain I).stable_length_spec
(const_chain.strict_stable I)).left 0 (Nat.zero_lt_iff_neq_zero.mpr h) rfl
theorem const_chain.of_length_zero {hc₁ : c.strict_stable} (hc₂ : c.stable_length hc₁ = 0) : c = const_chain (c 0) :=
chain.ext.mpr fun n => hc₂ ▸ (c.stable_length_spec hc₁).right n (hc₂ ▸ Nat.zero_le n)
theorem const_chain.of_0_eq_1 (hc : c.strict_stable) (h01 : c 0 = c 1) : c = const_chain (c 0) :=
of_length_zero (length_zero_of_strict_stable_0_eq_1 hc h01)
section contract_chain
variable [Ring α] [Ring β] {f : α →₊ β} (hf : Ideal.preserve_mul_right f)
def contract_chain (c : chain β) : chain α := fun n => contraction hf (c n)
theorem contract_chain.ascending {c : chain β} (hc : c.ascending) : (contract_chain hf c).ascending :=
fun n => contraction.subset hf (hc n)
theorem contract_chain.descending {c : chain β} (hc : c.descending) : (contract_chain hf c).descending :=
fun n => contraction.subset hf (hc n)
theorem contract_chain.is_prime {f : α →ᵣ₁ β} {c : chain β} (hc : c.is_prime) :
(contract_chain f.preserve_mul_right c).is_prime := fun n => contraction_prime f (hc n)
theorem contract_chain.is_stable {c : chain β} : c.is_stable → (contract_chain hf c).is_stable :=
fun ⟨N, hN⟩ => ⟨N, fun n hn => congrArg (contraction hf) (hN n hn)⟩
theorem contract_chain.base {c : chain β} {I : Ideal β} (hc : c.base I) :
(contract_chain hf c).base (contraction hf I) := congrArg (contraction hf) hc
variable (hfs : Function.surjective f.hom)
theorem contract_chain.is_stable_of_surjective {c : chain β} : (contract_chain hf c).is_stable → c.is_stable :=
fun ⟨N, hN⟩ => ⟨N, fun n hn => contraction_injective_of_surjective hf hfs (hN n hn)⟩
theorem contract_chain.strict_infinite_of_surjective {c : chain β} (hc : c.strict_infinite) :
(contract_chain hf c).strict_infinite :=
fun n h => hc n (contraction_injective_of_surjective hf hfs h)
theorem contract_chain.strict_stable_of_surjective {c : chain β} (hc : c.strict_stable) :
(contract_chain hf c).strict_stable :=
let ⟨N, hN₁, hN₂⟩ := hc
⟨N, fun n hn h => hN₁ n hn (contraction_injective_of_surjective hf hfs h),
fun n hn => congrArg (contraction hf) (hN₂ n hn)⟩
theorem contract_chain.stable_length_eq_of_surjective {c : chain β} (hc : c.strict_stable) :
c.stable_length hc = (contract_chain hf c).stable_length (contract_chain.strict_stable_of_surjective hf hfs hc) :=
have hc' := contract_chain.strict_stable_of_surjective hf hfs hc
Nat.le_antisymm (Nat.not_lt.mp (mt ((c.stable_length_spec hc).left ((contract_chain hf c).stable_length hc'))
(iff_not_not.mpr (contraction_injective_of_surjective hf hfs (((contract_chain hf c).stable_length_spec hc').right
_ (Nat.le_succ _)).symm))))
(Nat.not_lt.mp (mt (((contract_chain hf c).stable_length_spec hc').left (c.stable_length hc)) (iff_not_not.mpr
(congrArg (contraction hf) ((c.stable_length_spec hc).right _ (Nat.le_succ _)).symm))))
theorem contract_chain.length_infinite {S : Set (chain β)}
(hS : length_infinite S) : length_infinite (Function.image' (contract_chain hf) S) :=
Or.imp (fun ⟨c, hcS, hc⟩ => ⟨contract_chain hf c, ⟨c, hcS, rfl⟩,
contract_chain.strict_infinite_of_surjective hf hfs hc⟩)
(fun h n => let ⟨c, hc, hcS, hcn⟩ := h n
⟨contract_chain hf c, contract_chain.strict_stable_of_surjective hf hfs hc,
⟨c, hcS, rfl⟩, contract_chain.stable_length_eq_of_surjective hf hfs hc ▸ hcn⟩) hS
end contract_chain
section extend_chain
variable [Ring α] [Ring β] (f : α → β)
noncomputable def extend_chain (c : chain α) : chain β := fun n => extension f (c n)
theorem extend_chain.ascending {c : chain α} (hc : c.ascending) : (extend_chain f c).ascending :=
fun n => extension.subset f (hc n)
theorem extend_chain.descending {c : chain α} (hc : c.descending) : (extend_chain f c).descending :=
fun n => extension.subset f (hc n)
theorem extend_chain.base {c : chain α} {I : Ideal α} (hc : c.base I) :
(extend_chain f c).base (extension f I) := congrArg (extension f) hc
theorem extension_contraction_of_isomorphism (c : chain α) (f : α ≅ᵣ β) :
contract_chain f.preserve_mul_right (extend_chain f.hom c) = c :=
chain.ext.mpr fun n => extension_contraction_eq_of_isomorphism f (c n)
end extend_chain
noncomputable def strict_increasing_index (h : ∀ N, ∃ (n : Nat) (hn : N ≤ n), c n ≠ c N) : Nat → Nat
| 0 => 0
| n+1 => choose (h (strict_increasing_index h n))
theorem strict_increasing_index.idx_strict (h : ∀ N, ∃ (n : Nat) (hn : N ≤ n), c n ≠ c N) (n : Nat) :
strict_increasing_index h n < strict_increasing_index h n.succ :=
Nat.lt_of_le_and_ne (choose (choose_spec (h (strict_increasing_index h n))))
fun h' => choose_spec (choose_spec (h (strict_increasing_index h n))) (congrArg _ h'.symm)
theorem strict_increasing_index.strict (h : ∀ N, ∃ (n : Nat) (hn : N ≤ n), c n ≠ c N) (n : Nat) :
c (strict_increasing_index h n) ≠ c (strict_increasing_index h n.succ) :=
(choose_spec (choose_spec (h (strict_increasing_index h n)))).symm
noncomputable def strictified (h : ∀ N, ∃ (n : Nat) (hn : N ≤ n), c n ≠ c N) : chain α :=
fun n => match n with
| 0 => 1
| n+1 => c (strict_increasing_index h (if c 0 = 1 then n.succ else n))
theorem strictified.base (h : ∀ N, ∃ (n : Nat) (hn : N ≤ n), c n ≠ c N) : (strictified h).base 1 := rfl
theorem strictified.base1 (h : ∀ N, ∃ (n : Nat) (hn : N ≤ n), c n ≠ c N) (h0 : c 0 = 1) :
∀ n, strictified h n = c (strict_increasing_index h n)
| 0 => base h ▸ h0.symm
| n+1 => by simp only [strictified, h0, ite_true]; rfl
theorem strictified.not_base1 (h : ∀ N, ∃ (n : Nat) (hn : N ≤ n), c n ≠ c N) (h0 : c 0 ≠ 1) (n : Nat) :
strictified h n.succ = c (strict_increasing_index h n) := by simp only [strictified, h0, ite_false]
theorem strictified.descending (h : ∀ N, ∃ (n : Nat) (hn : N ≤ n), c n ≠ c N) (hc : c.descending) : (strictified h).descending :=
fun n => by
byCases h0 : c 0 = 1
{ simp only [base1 h h0]; exact chain.subset_descending hc (Nat.le_of_lt (strict_increasing_index.idx_strict h n)) }
{ match n with
| 0 => exact in_unit_ideal _
| n+1 => simp only [not_base1 h h0]; exact chain.subset_descending hc (Nat.le_of_lt
(strict_increasing_index.idx_strict h n)) }
theorem strictified.strict_infinite (h : ∀ N, ∃ (n : Nat) (hn : N ≤ n), c n ≠ c N) : (strictified h).strict_infinite :=
fun n => by
byCases h0 : c 0 = 1
{ simp only [base1 h h0]; exact strict_increasing_index.strict h n }
{ simp only [not_base1 h h0]
match n with
| 0 => exact Ne.symm h0
| n+1 => simp only [not_base1 h h0]; exact strict_increasing_index.strict h n }
section strict_index
-- i.e. `k` greater than previous index `n`, representing different ideal from `c n`,
-- less than `m` (to avoid jumping past `m`) and different ideal from `c m` (to make sure we go to `m`, rather than an
-- earlier ideal in the same block (we assume you never have the same ideal appear twice with any distinct ideals in between)
abbrev next_term (c : chain α) (m n : Nat) : Prop := Nonempty ({k | n < k ∧ c n ≠ c k ∧ k < m ∧ ∀ l, l ≤ k → c l ≠ c m} : Set Nat)
theorem next_term_ext {m n N : Nat} (h : n < N ∧ c n ≠ c N ∧ N < m ∧ ∀ l, l ≤ N → c l ≠ c m) (hmin : ∀ k, n < k ∧ c n ≠ c k ∧
k < m ∧ (∀ l, l ≤ k → c l ≠ c m) → N ≤ k) : choose (minimal.min_exists _ (⟨N, h⟩ : next_term c m n) id) = N :=
let m := minimal.min_exists _ (⟨N, h⟩ : next_term c m n) id
Nat.le_antisymm ((choose_spec m).right N h) (hmin (choose m) (choose_spec m).left)
theorem not_next_term_m (c : chain α) (m : Nat) : ¬ next_term c m m := fun ⟨_, h₁, _, h₂, _⟩ => Nat.lt_not_symm ⟨h₁, h₂⟩
-- index of `n`ᵗʰ unique ideal in chain `c` (stabilised such that `c (m+k) = c m` for all `k`)
noncomputable def strict_index (c : chain α) (m : Nat) : Nat → Nat
| 0 => if c 0 = c m then m else 0
| n+1 => if h : next_term c m (strict_index c m n) then choose (minimal.min_exists _ h id) else m
theorem strict_index.def_zero_m {m : Nat} (h : c 0 = c m) : strict_index c m 0 = m := by
simp only [strict_index, h, ite_true]
theorem strict_index.def_zero_0 {m : Nat} (h : c 0 ≠ c m) : strict_index c m 0 = 0 := by
simp only [strict_index, h, ite_false]
theorem strict_index.zero_or_m (c : chain α) (m : Nat) : strict_index c m 0 = 0 ∨ strict_index c m 0 = m := by
byCases h : c 0 = c m
{ exact Or.inr (def_zero_m h) }
{ exact Or.inl (def_zero_0 h) }
theorem strict_index.c_zero (c : chain α) (m : Nat) : c (strict_index c m 0) = c 0 := by
byCases h : c 0 = c m
{ exact (congrArg (c ·) (def_zero_m h)).trans h.symm }
{ exact (congrArg (c ·) (def_zero_0 h)) }
theorem strict_index.def_exists {m n : Nat} (h : next_term c m (strict_index c m n)) :
strict_index c m n.succ = choose (minimal.min_exists _ h id) := by simp only [strict_index, h, dite_true]
theorem strict_index.neq_m_of_exists {m n : Nat} (h : next_term c m (strict_index c m n)) :
strict_index c m n.succ ≠ m :=
def_exists h ▸ Nat.ne_of_lt (choose_spec (minimal.min_exists _ h id)).left.right.right.left
theorem strict_index.def_not_exists {m n : Nat} (h : ¬next_term c m (strict_index c m n)) :
strict_index c m n.succ = m := by simp only [strict_index, h, dite_false]
theorem strict_index.spec (c : chain α) (m n : Nat) : strict_index c m n.succ = m ∨
(strict_index c m n < strict_index c m n.succ ∧ c (strict_index c m n) ≠ c (strict_index c m n.succ) ∧
strict_index c m n.succ < m ∧ (∀ l, l ≤ strict_index c m n.succ → c l ≠ c m) ∧
∀ k, strict_index c m n ≤ k → k < strict_index c m n.succ → c k = c (strict_index c m n)) := by
byCases h : next_term c m (strict_index c m n)
{ apply Or.inr
have hmin := minimal.min_exists _ h id
have ⟨⟨h₁, h₂, h₃, h₄⟩, h₅⟩ := choose_spec hmin;
rw [def_exists h]
exact ⟨h₁, h₂, h₃, h₄, fun k hk₁ hk₂ => (Nat.lt_or_eq_of_le hk₁).elim
(fun hk₁ => byContradiction fun h => have hkm := Nat.lt_trans hk₂ h₃; absurd (h₅ k ⟨hk₁, Ne.symm h, hkm, fun l hl =>
h₄ l (Nat.le_trans hl (Nat.le_of_lt hk₂))⟩) (Nat.not_le.mpr hk₂))
fun h => by rw [h]⟩ }
{ exact Or.inl (strict_index.def_not_exists h) }
theorem strict_index.const_after_m {m n : Nat} (h : strict_index c m n = m) : ∀ k, n ≤ k → strict_index c m k = m := fun k hk =>
have : ∀ k, strict_index c m (n + k) = m := fun k => by
induction k with
| zero => exact h
| succ k ih => exact def_not_exists fun ⟨_, h₁, _, h₂, _⟩ => absurd ih (Nat.ne_of_lt (Nat.lt_trans h₁ h₂))
let ⟨k, hk⟩ := Nat.le.dest hk
hk ▸ this k
theorem strict_index.le_m (c : chain α) (m : Nat) : (n : Nat) → strict_index c m n ≤ m
| 0 => (zero_or_m c m).elim (Eq.symm · ▸ Nat.zero_le m) (Eq.symm · ▸ Nat.le_refl m)
| n+1 => (spec c m n).elim Nat.le_of_eq (fun h => Nat.le_of_lt h.right.right.left)
theorem strict_index.increasing (c : chain α) (m n : Nat) : strict_index c m n ≤ strict_index c m n.succ :=
(spec c m n).elim (Eq.symm · ▸ le_m c m n) fun h => Nat.le_of_lt h.left
theorem strict_index.m_of_stable {m n : Nat} (h : strict_index c m n = strict_index c m n.succ) :
strict_index c m n = m := h ▸ (spec c m n).resolve_right fun h' => absurd h (Nat.ne_of_lt h'.left)
theorem strict_index.stable_of_m {m n : Nat} (h : strict_index c m n = m) : strict_index c m n = strict_index c m n.succ :=
h.trans (const_after_m h n.succ (Nat.le_succ _)).symm
theorem strict_index.stable_after {m n : Nat} (h : strict_index c m n = strict_index c m n.succ) :
∀ k, n ≤ k → strict_index c m k = m := const_after_m (m_of_stable h)
theorem strict_index.strict_before {m n : Nat} (h : strict_index c m n ≠ strict_index c m n.succ) :
∀ k, k ≤ n → strict_index c m k ≠ strict_index c m k.succ :=
fun k hk h' => absurd (stable_of_m (stable_after h' n hk)) h
theorem strict_index.strict_increasing {m n : Nat} (h : strict_index c m n ≠ strict_index c m n.succ) :
∀ k, k ≤ n.succ → k ≤ strict_index c m k := fun k hk => by
induction k with
| zero => exact Nat.zero_le _
| succ k ih => exact Nat.succ_le_of_lt (Nat.lt_of_le_of_lt (ih (Nat.le_trans (Nat.le_succ k) hk))
(Nat.lt_of_le_and_ne (increasing c m k) (strict_before h k (Nat.le_of_succ_le_succ hk))))
theorem strict_index.m_eq_m (c : chain α) (m : Nat) : strict_index c m m = m := by
byCases h : strict_index c m m = strict_index c m m.succ
{ exact m_of_stable h }
{ exact Nat.le_antisymm (le_m c m m) (strict_increasing h m (Nat.le_succ m)) }
theorem strict_index.strict_stable (c : chain α) (m : Nat) : ∃ N, (∀ n, n < N → strict_index c m n ≠ strict_index c m n.succ)
∧ ∀ n, N ≤ n → strict_index c m n = m :=
let ⟨N, hN, hNmax⟩ := minimal.min_exists {k | strict_index c m k = m} ⟨m, m_eq_m c m⟩ id
⟨N, fun n hn => match N with
| 0 => absurd hn (Nat.not_lt_zero n)
| N+1 => by
have : strict_index c m N ≠ strict_index c m N.succ := fun h =>
absurd (hNmax N (m_of_stable h)) (Nat.not_le.mpr (Nat.lt.base N))
exact strict_before this n (Nat.le_of_lt_succ hn), (const_after_m hN ·)⟩
noncomputable abbrev strict_index.length (c : chain α) (m : Nat) : Nat := choose (strict_stable c m)
theorem strict_index.length_spec (c : chain α) (m : Nat) : (∀ n, n < length c m → strict_index c m n ≠ strict_index c m n.succ)
∧ ∀ n, length c m ≤ n → strict_index c m n = m := choose_spec (strict_stable c m)
theorem strict_index.stable_length_le (c : chain α) (m : Nat) : length c m ≤ m :=
Nat.not_lt.mp (mt ((choose_spec (strict_stable c m)).left m) (iff_not_not.mpr (stable_of_m (m_eq_m c m))))
theorem strict_index.neq_m_lt_length {m n : Nat} (hn : n < length c m) : strict_index c m n ≠ m :=
fun h => absurd (stable_of_m h) ((length_spec c m).left n hn)
theorem strict_index.eq_m_ge_length {m n : Nat} (hn : length c m ≤ n) : strict_index c m n = m :=
(length_spec c m).right n hn
theorem strict_index.length_zero {m : Nat} : length c m = 0 ↔ c m = c 0 :=
⟨fun h => c_zero c m ▸ (congrArg (c ·) (eq_m_ge_length (h ▸ Nat.le_refl 0)).symm),
fun h => Nat.le_zero.mp (Nat.le_of_succ_le_succ (Nat.not_le.mp (mt neq_m_lt_length (iff_not_not.mpr (def_zero_m h.symm)))))⟩
abbrev monotone_chain (c : chain α) : Prop := ∀ i k, i ≤ k → c i = c k → ∀ j, i ≤ j → j ≤ k → c j = c i
theorem monotone_of_ascending (hc : c.ascending) : monotone_chain c :=
fun i k hik hcik j hij hjk => Ideal.antisymm (hcik ▸ subset_ascending hc hjk) (subset_ascending hc hij)
theorem monotone_of_descending (hc : c.descending) : monotone_chain c :=
fun i k hik hcik j hij hjk => Ideal.antisymm (subset_descending hc hij) (hcik ▸ subset_descending hc hjk)
theorem strict_index.next_term_m_succ {m n : Nat} (hmonotone : monotone_chain c) (h : strict_index c m n = strict_index c m.succ n)
(nt : next_term c m (strict_index c m n)) : ∃ nt' : next_term c m.succ (strict_index c m.succ n), choose (minimal.min_exists _ nt id) = choose (minimal.min_exists _ nt' id) :=
let h' := minimal.min_exists _ nt id
have : strict_index c m.succ n < choose h' ∧ c (strict_index c m.succ n) ≠ c (choose h') ∧ choose h' < m.succ ∧ ∀ l, l ≤ choose h' → c l ≠ c m.succ :=
⟨h ▸ (choose_spec h').left.left, h ▸ (choose_spec h').left.right.left, Nat.lt_trans (choose_spec h').left.right.right.left (Nat.lt.base m),
fun l hl h'' => absurd (hmonotone l m.succ (Nat.le_trans hl (Nat.le_of_lt (Nat.lt.step (choose_spec h').left.right.right.left))) h'' m
(Nat.le_trans hl (Nat.le_of_lt (choose_spec h').left.right.right.left)) (Nat.le_succ m)) ((choose_spec h').left.right.right.right l hl).symm⟩
⟨⟨choose h', this⟩, (next_term_ext this fun k hk => by
byCases hk' : k = m
{ exact hk'.symm ▸ Nat.le_of_lt (choose_spec h').left.right.right.left }
{ byCases hk'' : ∃ l, l ≤ k ∧ c l = c m;
{ let ⟨l, hl, h⟩ := hk''; exact Nat.le_trans (Nat.le_of_lt (Nat.not_le.mp (imp_not_comm.mp
((choose_spec h').left.right.right.right l) h))) hl }
{ simp only [not_exists, not_and] at hk''
exact (choose_spec h').right k ⟨h ▸ hk.left, h ▸ hk.right.left, Nat.lt_of_le_and_ne
(Nat.le_of_succ_le_succ hk.right.right.left) hk', hk''⟩} } ).symm⟩
theorem strict_index.lt_length_m_succ {m n : Nat} (hmonotone : monotone_chain c)
(hn : n < length c m) : strict_index c m n = strict_index c m.succ n := by
induction n with
| zero =>
have h0 := mt def_zero_m (neq_m_lt_length hn)
have h0' := def_zero_0 fun h' => absurd (hmonotone 0 m.succ (Nat.zero_le _) h' m (Nat.zero_le m) (Nat.le_succ m)).symm h0
exact (def_zero_0 h0).trans h0'.symm
| succ n ih =>
have : next_term c m (strict_index c m n) := of_not_not (mt def_not_exists (neq_m_lt_length hn))
let ⟨k, hk⟩ := next_term_m_succ hmonotone (ih (Nat.lt_trans (Nat.lt.base n) hn)) this
exact (def_exists this).trans (hk.trans (def_exists k).symm)
theorem strict_index.ge_length_of_m_eq_m_succ {m n : Nat} (hmonotone : monotone_chain c)
(hm : c m = c m.succ) (hn : length c m ≤ n) : strict_index c m.succ n = m.succ := by
induction n with
| zero => exact def_zero_m ((length_zero.mp (Nat.le_zero.mp hn)).symm.trans hm)
| succ n ih =>
exact (Nat.lt_or_eq_of_le hn).elim (fun hn => stable_after (stable_of_m (ih (Nat.le_of_succ_le_succ hn))) n.succ (Nat.le_succ n))
fun hn =>
have he := lt_length_m_succ hmonotone (hn ▸ Nat.lt.base n : n < length c m)
have : ¬next_term c m (strict_index c m n) := fun h => absurd (eq_m_ge_length (Nat.le_of_eq hn)) (neq_m_of_exists h)
have : ¬next_term c m.succ (strict_index c m.succ n) := fun ⟨k, h₁, h₂, h₃, h₄⟩ =>
absurd ⟨k, he ▸ h₁, he ▸ h₂, Nat.lt_of_le_and_ne (Nat.le_of_succ_le_succ h₃) fun h =>
absurd hm (h ▸ h₄ k (Nat.le_refl k)), fun l hl => hm ▸ h₄ l hl⟩ this
def_not_exists this
theorem strict_index.neq_succ_injective {m n : Nat} (h : strict_index c m n ≠ strict_index c m n.succ) : c (strict_index c m n) ≠ c (strict_index c m n.succ) := by
byCases h' : strict_index c m n.succ = m
{ match n with
| 0 =>
rw [h'] at h ⊢; have := mt def_zero_m h
rw [def_zero_0 this]; exact this
| n+1 => rw [h']; exact ((spec c m n).resolve_left (fun h' => h (stable_of_m h'))).right.right.right.left _ (Nat.le_refl _) }
{ exact ((spec c m n).resolve_left h').right.left }
theorem strict_index.eq_succ_injective {m n : Nat} (h : c (strict_index c m n) = c (strict_index c m n.succ)) : strict_index c m n = strict_index c m n.succ :=
of_not_not (mt neq_succ_injective (iff_not_not.mpr h))
theorem strict_index.m_injective {m n : Nat} (h : c (strict_index c m n) = c m) : strict_index c m n = m :=
match n with
| 0 => def_zero_m (c_zero c m ▸ h)
| n+1 => by
byCases h' : next_term c m (strict_index c m n)
{ have h'' := minimal.min_exists _ h' id;
exact absurd (def_exists h' ▸ h) ((choose_spec h'').left.right.right.right
(choose h'') (Nat.le_refl _)); }
{ exact def_not_exists h' }
theorem strict_index.eq_length_of_m_neq_m_succ {m : Nat} (hmonotone : monotone_chain c)
(hm : c m ≠ c m.succ) : c (strict_index c m.succ (length c m)) = c m := by
byCases hl : length c m = 0
{ exact hl ▸ c_zero c m.succ ▸ (length_zero.mp hl).symm }
{ let ⟨k, hk⟩ := Nat.ne_zero_dest hl;
rw [hk]
have hk' : k < length c m := hk ▸ Nat.lt.base k
have he := lt_length_m_succ hmonotone hk'
let ⟨n, hn, hnmin⟩ := minimal.min_exists {n | c n = c m} ⟨m, rfl⟩ id
have hm' := hnmin m rfl
have hn' : strict_index c m.succ k < n ∧ c (strict_index c m.succ k) ≠ c n ∧ n < m.succ ∧
∀ l, l ≤ n → c l ≠ c m.succ := ⟨he ▸ byContradiction fun h => absurd (m_injective (hn ▸
hmonotone n m hm' hn (strict_index c m k) (Nat.not_lt.mp h) (le_m c m k)))
(neq_m_lt_length hk'), fun h => absurd (m_injective (hn ▸ he ▸ h)) (neq_m_lt_length hk'),
Nat.succ_le_succ hm', fun l hl hl' => absurd ((hmonotone l m.succ (Nat.le_trans hl
(Nat.le_step hm')) hl' m (Nat.le_trans hl hm') (Nat.le_succ m)).trans hl') hm⟩;
rw [def_exists ⟨n, hn'⟩, next_term_ext hn' fun x ⟨h₁, h₂, h₃, h₄⟩ => byContradiction fun h =>
have h := Nat.not_le.mp h
have : next_term c m (strict_index c m k) := ⟨x, he ▸ h₁, he ▸ h₂,
Nat.lt_of_le_and_ne (Nat.le_of_succ_le_succ h₃) (fun h' => absurd hm' (Nat.not_le.mpr (h' ▸ h))),
fun l hl h' => absurd (Nat.lt_of_le_of_lt hl (Nat.lt_of_lt_of_le h (hnmin l h'))) (Nat.lt_irrefl l)⟩
absurd (eq_m_ge_length (Nat.le_refl (length c m))) (hk ▸ neq_m_of_exists this)]; exact hn }
theorem strict_index.length_succ_of_m_neq_m_succ {m : Nat} (hmonotone : monotone_chain c)
(hm : c m ≠ c m.succ) : strict_index c m.succ (length c m).succ = m.succ :=
have : ¬ next_term c m.succ (strict_index c m.succ (length c m)) := fun ⟨x, h₁, h₂, h₃, h₄⟩ => absurd (hmonotone (strict_index c m.succ
(length c m)) m (Nat.le_trans (Nat.le_of_lt h₁) (Nat.le_of_succ_le_succ h₃)) (eq_length_of_m_neq_m_succ hmonotone hm) x (Nat.le_of_lt h₁)
(Nat.le_of_succ_le_succ h₃)).symm h₂
def_not_exists this
theorem strict_index.succ_length_le (hmonotone : monotone_chain c) (m : Nat) : length c m ≤ length c m.succ :=
Nat.not_lt.mp fun h => by
have := le_m c m (length c m.succ)
rw [lt_length_m_succ hmonotone h, eq_m_ge_length (Nat.le_refl _)] at this
exact absurd this (Nat.not_le.mpr (Nat.lt.base m))
theorem strict_index.succ_length_succ {m : Nat} (hc : c m ≠ c m.succ) (hmonotone : monotone_chain c) :
length c m.succ = (length c m).succ :=
Nat.le_antisymm
(Nat.not_lt.mp (mt ((length_spec c m.succ).left (length c m).succ) (iff_not_not.mpr
(stable_of_m (length_succ_of_m_neq_m_succ hmonotone hc)))))
(Nat.succ_le_of_lt (Nat.not_le.mp (mt ((length_spec c m.succ).right (length c m)) fun h =>
absurd ((eq_length_of_m_neq_m_succ hmonotone hc).symm.trans (congrArg (c ·) h)) hc)))
theorem strict_index.succ_length_eq {m : Nat} (hc : c m = c m.succ) (hmonotone : monotone_chain c) :
length c m.succ = length c m :=
Nat.le_antisymm
(Nat.not_lt.mp (mt ((length_spec c m.succ).left (length c m)) (iff_not_not.mpr ((ge_length_of_m_eq_m_succ
hmonotone hc (Nat.le_refl _)).trans (ge_length_of_m_eq_m_succ hmonotone hc (Nat.le_succ _)).symm))))
(succ_length_le hmonotone m)
end strict_index
noncomputable def strict_stabilised (c : chain α) (m : Nat) : chain α := fun n => c (strict_index c m n)
theorem strict_stabilised.base (c : chain α) (m : Nat) : (strict_stabilised c m).base (c 0) := strict_index.c_zero c m
theorem strict_stabilised.descending (hc : c.descending) (m : Nat) : (strict_stabilised c m).descending :=
fun n => subset_descending hc (strict_index.increasing c m n)
theorem strict_stabilised.strict_stable (c : chain α) (m : Nat) : (strict_stabilised c m).strict_stable :=
let ⟨N, hN₁, hN₂⟩ := strict_index.strict_stable c m
⟨N, fun n hn => strict_index.neq_succ_injective (hN₁ n hn),
fun n hn => congrArg (c ·) ((hN₂ n hn).trans (hN₂ N (Nat.le_refl _)).symm)⟩
noncomputable abbrev strict_stabilised.length (c : chain α) (m : Nat) : Nat :=
(strict_stabilised c m).stable_length (strict_stable c m)
theorem strict_stabilised.length_eq_length (c : chain α) (m : Nat) : length c m = strict_index.length c m :=
Nat.le_antisymm (Nat.not_lt.mp (mt (((strict_stabilised c m).stable_length_spec (strict_stable c m)).left (strict_index.length c m))
(iff_not_not.mpr (congrArg (c ·) (((strict_index.length_spec c m).right (strict_index.length c m) (Nat.le_refl _)).trans
((strict_index.length_spec c m).right (strict_index.length c m).succ (Nat.le_succ _)).symm)))))
(Nat.not_lt.mp (mt ((strict_index.length_spec c m).left (length c m)) (iff_not_not.mpr (strict_index.eq_succ_injective
(((strict_stabilised c m).stable_length_spec (strict_stable c m)).right (length c m).succ (Nat.le_succ _)).symm))))
theorem strict_stabilised.length_le (c : chain α) (m : Nat) : length c m ≤ m :=
length_eq_length c m ▸ strict_index.stable_length_le c m
theorem strict_stabilised.succ_length_le (hmonotone : monotone_chain c) (m : Nat) :
length c m ≤ length c m.succ := length_eq_length c m ▸ length_eq_length c m.succ ▸ strict_index.succ_length_le hmonotone m
theorem strict_stabilised.succ_length_succ {m : Nat} (hmonotone : monotone_chain c) (hc : c m ≠ c m.succ) :
length c m.succ = (length c m).succ := length_eq_length c m ▸ length_eq_length c m.succ ▸ strict_index.succ_length_succ hc hmonotone
theorem strict_stabilised.succ_length_eq {m : Nat} (hmonotone : monotone_chain c) (hc : c m = c m.succ) :
length c m.succ = length c m := length_eq_length c m ▸ length_eq_length c m.succ ▸ strict_index.succ_length_eq hc hmonotone
def prefix_chain (I : Ideal α) (c : chain α) : chain α := fun n => match n with | 0 => I | n+1 => c n
namespace prefix_chain
theorem ascending {I : Ideal α} (hI : I ⊆ c 0) (hc : c.ascending) : (prefix_chain I c).ascending :=
fun n => match n with | 0 => hI | n+1 => hc n
theorem descending {I : Ideal α} (hI : c 0 ⊆ I) (hc : c.descending) : (prefix_chain I c).descending :=
fun n => match n with | 0 => hI | n+1 => hc n
theorem is_prime {I : Ideal α} (hI : I.is_prime) (hc : c.is_prime) : (prefix_chain I c).is_prime :=
fun n => match n with | 0 => hI | n+1 => hc n
theorem strict_stable {I : Ideal α} (hc : c.strict_stable) (hI : I ≠ c 0) : (prefix_chain I c).strict_stable :=
⟨(c.stable_length hc).succ, fun k hk => match k with | 0 => hI | k+1 => (c.stable_length_spec hc).left k (Nat.lt_of_succ_lt_succ hk),
fun k hk => match k with | k+1 => (c.stable_length_spec hc).right k (Nat.le_of_succ_le_succ hk)⟩
theorem stable_length {I : Ideal α} (hc : c.strict_stable) (hI : I ≠ c 0) :
(prefix_chain I c).stable_length (strict_stable hc hI) = (c.stable_length hc).succ :=
stable_length_eq _ (fun k hk => match k with | 0 => by exact hI | k+1 => (c.stable_length_spec hc).left k (Nat.lt_of_succ_lt_succ hk))
fun k hk => match k with | k+1 => (c.stable_length_spec hc).right k (Nat.le_of_succ_le_succ hk)
theorem strict_infinite {I : Ideal α} (hc : c.strict_infinite) (hI : I ≠ c 0) : (prefix_chain I c).strict_infinite :=
fun n => match n with | 0 => hI | n+1 => hc n
end prefix_chain
end chain
end Ideal
end M4R
|
function cx = jacobi_poly ( n, alpha, beta, x )
%*****************************************************************************80
%
%% JACOBI_POLY evaluates the Jacobi polynomials at X.
%
% Differential equation:
%
% (1-X*X) Y'' + (BETA-ALPHA-(ALPHA+BETA+2) X) Y' + N (N+ALPHA+BETA+1) Y = 0
%
% Recursion:
%
% P(0,ALPHA,BETA,X) = 1,
%
% P(1,ALPHA,BETA,X) = ( (2+ALPHA+BETA)*X + (ALPHA-BETA) ) / 2
%
% P(N,ALPHA,BETA,X) =
% (
% (2*N+ALPHA+BETA-1)
% * ((ALPHA**2-BETA**2)+(2*N+ALPHA+BETA)*(2*N+ALPHA+BETA-2)*X)
% * P(N-1,ALPHA,BETA,X)
% -2*(N-1+ALPHA)*(N-1+BETA)*(2*N+ALPHA+BETA) * P(N-2,ALPHA,BETA,X)
% ) / 2*N*(N+ALPHA+BETA)*(2*N-2+ALPHA+BETA)
%
% Restrictions:
%
% -1 < ALPHA
% -1 < BETA
%
% Norm:
%
% Integral ( -1 <= X <= 1 ) ( 1 - X )**ALPHA * ( 1 + X )**BETA
% * P(N,ALPHA,BETA,X)**2 dX
% = 2**(ALPHA+BETA+1) * Gamma ( N + ALPHA + 1 ) * Gamma ( N + BETA + 1 ) /
% ( 2 * N + ALPHA + BETA ) * N! * Gamma ( N + ALPHA + BETA + 1 )
%
% Special values:
%
% P(N,ALPHA,BETA)(1) = (N+ALPHA)!/(N!*ALPHA!) for integer ALPHA.
%
% Licensing:
%
% This code is distributed under the GNU LGPL license.
%
% Modified:
%
% 24 July 2004
%
% Author:
%
% John Burkardt
%
% Reference:
%
% Milton Abramowitz and Irene Stegun,
% Handbook of Mathematical Functions,
% US Department of Commerce, 1964.
%
% Parameters:
%
% Input, integer N, the highest order polynomial to compute. Note
% that polynomials 0 through N will be computed.
%
% Input, real ALPHA, one of the parameters defining the Jacobi
% polynomials, ALPHA must be greater than -1.
%
% Input, real BETA, the second parameter defining the Jacobi
% polynomials, BETA must be greater than -1.
%
% Input, real X, the point at which the polynomials are to be evaluated.
%
% Output, real CX(1:N+1), the values of the first N+1 Jacobi
% polynomials at the point X.
%
if ( alpha <= -1.0 )
fprintf ( 1, '\n' );
fprintf ( 1, 'JACOBI_POLY - Fatal error!\n' );
fprintf ( 1, ' Illegal input value of ALPHA = %f\n', alpha );
fprintf ( 1, ' But ALPHA must be greater than -1.\n' );
error ( 'JACOBI_POLY - Fatal error!' );
end
if ( beta <= -1.0 )
fprintf ( 1, '\n' );
fprintf ( 1, 'JACOBI_POLY - Fatal error!\n' );
fprintf ( 1, ' Illegal input value of BETA = %f\n', beta );
fprintf ( 1, ' But BETA must be greater than -1.\n' );
error ( 'JACOBI_POLY - Fatal error!' );
end
if ( n < 0 )
cx = [];
return
end
cx(1) = 1.0;
if ( n == 0 )
return
end
cx(2) = ( 1.0 + 0.5 * ( alpha + beta ) ) * x + 0.5 * ( alpha - beta );
for i = 2 : n
c1 = 2 * i * ( i + alpha + beta ) * ( 2 * i - 2 + alpha + beta );
c2 = ( 2 * i - 1 + alpha + beta ) * ( 2 * i + alpha + beta ) ...
* ( 2 * i - 2 + alpha + beta );
c3 = ( 2 * i - 1 + alpha + beta ) * ( alpha + beta ) * ( alpha - beta );
c4 = - 2 * ( i - 1 + alpha ) * ( i - 1 + beta ) * ( 2 * i + alpha + beta );
cx(i+1) = ( ( c3 + c2 * x ) * cx(i) + c4 * cx(i-1) ) / c1;
end
return
end
|
IHACKEDIT.COM Robux Tix Hack Tool are designed to assisting you while playing Roblox very easily. It will be setting up resources for Robux and Tix with a ton amounts availabe each day. We are going to sorry that we can't provide unlimited amount yet. However don't get worried, how many quantity are still much sufficient to play Roblox simply with out ordering any kind of buy-in-app-services that the developer's offer.
This IHACKEDIT.COM Hack Tool tend to be designed and tested by our own exclusive squad. Running properly at Android Phone, Tablets (any operating system version), iPhone, apple ipad, iPad Mini and all other. Like we have described previously mentioned, you should not improve your own personal gadget's program (i. e. root, jailbreak). Beautifully made with excellent user-friendly user interface that make you easy to use IHACKEDIT.COM Hack Tool. Along with Anti Ban™ security, your Roblox Profile is going to be as safe as play generally. The host is actually running AVAILABLE 24/24 AND 7/7, consequently don't worry about the moment you are going to work with this IHACKEDIT.COM Hack Tool.
Stats : 340904 Robux and 340904 Tix free generated today. | this awesome hack was created by our developers, which allows you to unlock or get completely free In-App purchases in your game. |
[STATEMENT]
lemma efficient_pell_power_correct [simp]:
"efficient_pell_power D z n = (pell_mul_nat D z ^^ n) (1, 0)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. efficient_pell_power D z n = (pell_mul_nat D z ^^ n) (1, 0)
[PROOF STEP]
unfolding efficient_pell_power_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. efficient_power (pell_mul_nat D) (1, 0) z n = (pell_mul_nat D z ^^ n) (1, 0)
[PROOF STEP]
by (intro efficient_power_correct) (auto simp: algebra_simps) |
Formal statement is: lemma norm_cos_sin [simp]: "norm (Complex (cos t) (sin t)) = 1" Informal statement is: $\|\cos(t) + i\sin(t)\| = 1$. |
[STATEMENT]
lemma not_prim_pow': assumes "\<not> primitive u" obtains k r where "r\<^sup>@Suc (Suc k) = u"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>r k. r \<^sup>@ Suc (Suc k) = u \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
proof (cases)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<lbrakk>\<And>r k. r \<^sup>@ Suc (Suc k) = u \<Longrightarrow> thesis; ?P2\<rbrakk> \<Longrightarrow> thesis
2. \<lbrakk>\<And>r k. r \<^sup>@ Suc (Suc k) = u \<Longrightarrow> thesis; \<not> ?P2\<rbrakk> \<Longrightarrow> thesis
[PROOF STEP]
assume "u \<noteq> \<epsilon>"
[PROOF STATE]
proof (state)
this:
u \<noteq> \<epsilon>
goal (2 subgoals):
1. \<lbrakk>\<And>r k. r \<^sup>@ Suc (Suc k) = u \<Longrightarrow> thesis; ?P2\<rbrakk> \<Longrightarrow> thesis
2. \<lbrakk>\<And>r k. r \<^sup>@ Suc (Suc k) = u \<Longrightarrow> thesis; \<not> ?P2\<rbrakk> \<Longrightarrow> thesis
[PROOF STEP]
from not_prim_primroot_expE[OF assms this that]
[PROOF STATE]
proof (chain)
picking this:
(\<And>k. \<rho> u \<^sup>@ Suc (Suc k) = u \<Longrightarrow> ?r1 k \<^sup>@ Suc (Suc (?k1 k)) = u) \<Longrightarrow> thesis
[PROOF STEP]
show thesis
[PROOF STATE]
proof (prove)
using this:
(\<And>k. \<rho> u \<^sup>@ Suc (Suc k) = u \<Longrightarrow> ?r1 k \<^sup>@ Suc (Suc (?k1 k)) = u) \<Longrightarrow> thesis
goal (1 subgoal):
1. thesis
[PROOF STEP]
.
[PROOF STATE]
proof (state)
this:
thesis
goal (1 subgoal):
1. \<lbrakk>\<And>r k. r \<^sup>@ Suc (Suc k) = u \<Longrightarrow> thesis; \<not> u \<noteq> \<epsilon>\<rbrakk> \<Longrightarrow> thesis
[PROOF STEP]
qed (simp add: that[of \<epsilon> 2]) |
import MyNat.Definition
namespace MyNat
open MyNat
/-!
# Advanced proposition world.
## Level 3: and_trans.
With this proof we can use the first `cases` tactic to extract hypotheses `p : P` `q : Q` from
`hpq : P ∧ Q` and then we can use another `cases` tactic to extract hypotheses `q' : Q` and `r : R` from
`hpr : Q ∧ R` then we can split the resulting goal `⊢ P ∧ R` using `constructor` and easily pick off the
resulting sub-goals `⊢ P` and `⊢ R` using our given hypotheses.
## Lemma
If `P`, `Q` and `R` are true/false statements, then `P ∧ Q` and
`Q ∧ R` together imply `P ∧ R`.
-/
lemma and_trans (P Q R : Prop) : P ∧ Q → Q ∧ R → P ∧ R := by
intro hpq
intro hqr
cases hpq with
| intro p q =>
cases hqr with
| intro q' r =>
constructor
assumption
assumption
/-!
Next up [Level 4](./Level4.lean.md)
-/ |
# -*- coding: utf-8 -*-
import sys
sys.path.insert(0,".")
import unittest
import neuroml
import neuroml.writers as writers
import PyOpenWorm
from PyOpenWorm import *
import networkx
import rdflib
import rdflib as R
import pint as Q
import os
import subprocess as SP
import subprocess
import tempfile
import doctest
from glob import glob
from nose import with_setup
class Quantity_Test():
def test_string_init_short(self):
q = Quantity.parse("23 mL")
assert "milliliter" == q.unit
assert 23 == q.value
def test_string_init_volume(self):
q = Quantity.parse("23 inches^3")
assert "inch ** 3" == q.unit
assert 23 == q.value
def test_string_init_compound(self):
q = Quantity.parse("23 inches/second")
assert "inch / second" == q.unit
assert 23 == q.value
def test_atomic_short(self):
q = Quantity(23, "mL")
assert "milliliter" == q.unit
assert 23 == q.value
def test_atomic_long(self):
q = Quantity(23, "milliliters")
assert "milliliter" == q.unit
assert 23 == q.value
|
informal statement Let $x$ and $y$ be elements of $G$. Prove that $xy=yx$ if and only if $y^{-1}xy=x$ if and only if $x^{-1}y^{-1}xy=1$.formal statement theorem exercise_1_1_18 {G : Type*} [group G]
(x y : G) : x * y = y * x ↔ y⁻¹ * x * y = x ↔ x⁻¹ * y⁻¹ * x * y = 1 := |
/-
Copyright (c) 2022 Damiano Testa. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Damiano Testa
! This file was ported from Lean 3 source module data.polynomial.laurent
! leanprover-community/mathlib commit 831c494092374cfe9f50591ed0ac81a25efc5b86
! Please do not edit these lines, except to modify the commit id
! if you have ported upstream changes.
-/
import Mathbin.Data.Polynomial.AlgebraMap
import Mathbin.RingTheory.Localization.Basic
/-! # Laurent polynomials
We introduce Laurent polynomials over a semiring `R`. Mathematically, they are expressions of the
form
$$
\sum_{i \in \mathbb{Z}} a_i T ^ i
$$
where the sum extends over a finite subset of `ℤ`. Thus, negative exponents are allowed. The
coefficients come from the semiring `R` and the variable `T` commutes with everything.
Since we are going to convert back and forth between polynomials and Laurent polynomials, we
decided to maintain some distinction by using the symbol `T`, rather than `X`, as the variable for
Laurent polynomials
## Notation
The symbol `R[T;T⁻¹]` stands for `laurent_polynomial R`. We also define
* `C : R →+* R[T;T⁻¹]` the inclusion of constant polynomials, analogous to the one for `R[X]`;
* `T : ℤ → R[T;T⁻¹]` the sequence of powers of the variable `T`.
## Implementation notes
We define Laurent polynomials as `add_monoid_algebra R ℤ`.
Thus, they are essentially `finsupp`s `ℤ →₀ R`.
This choice differs from the current irreducible design of `polynomial`, that instead shields away
the implementation via `finsupp`s. It is closer to the original definition of polynomials.
As a consequence, `laurent_polynomial` plays well with polynomials, but there is a little roughness
in establishing the API, since the `finsupp` implementation of `R[X]` is well-shielded.
Unlike the case of polynomials, I felt that the exponent notation was not too easy to use, as only
natural exponents would be allowed. Moreover, in the end, it seems likely that we should aim to
perform computations on exponents in `ℤ` anyway and separating this via the symbol `T` seems
convenient.
I made a *heavy* use of `simp` lemmas, aiming to bring Laurent polynomials to the form `C a * T n`.
Any comments or suggestions for improvements is greatly appreciated!
## Future work
Lots is missing!
-- (Riccardo) add inclusion into Laurent series.
-- (Riccardo) giving a morphism (as `R`-alg, so in the commutative case)
from `R[T,T⁻¹]` to `S` is the same as choosing a unit of `S`.
-- A "better" definition of `trunc` would be as an `R`-linear map. This works:
-- ```
-- def trunc : R[T;T⁻¹] →[R] R[X] :=
-- begin
-- refine (_ : add_monoid_algebra R ℕ →[R] R[X]).comp _,
-- { exact ⟨(to_finsupp_iso R).symm, by simp⟩ },
-- { refine ⟨λ r, comap_domain _ r (set.inj_on_of_injective (λ a b ab, int.of_nat.inj ab) _), _⟩,
-- exact λ r f, comap_domain_smul _ _ _ }
-- end
-- ```
-- but it would make sense to bundle the maps better, for a smoother user experience.
-- I (DT) did not have the strength to embark on this (possibly short!) journey, after getting to
-- this stage of the Laurent process!
-- This would likely involve adding a `comap_domain` analogue of
-- `add_monoid_algebra.map_domain_alg_hom` and an `R`-linear version of
-- `polynomial.to_finsupp_iso`.
-- Add `degree, int_degree, int_trailing_degree, leading_coeff, trailing_coeff,...`.
-/
open Polynomial BigOperators
open Polynomial AddMonoidAlgebra Finsupp
noncomputable section
variable {R : Type _}
/-- The semiring of Laurent polynomials with coefficients in the semiring `R`.
We denote it by `R[T;T⁻¹]`.
The ring homomorphism `C : R →+* R[T;T⁻¹]` includes `R` as the constant polynomials. -/
abbrev LaurentPolynomial (R : Type _) [Semiring R] :=
AddMonoidAlgebra R ℤ
#align laurent_polynomial LaurentPolynomial
-- mathport name: «expr [T;T⁻¹]»
local notation:9000 R "[T;T⁻¹]" => LaurentPolynomial R
/-- The ring homomorphism, taking a polynomial with coefficients in `R` to a Laurent polynomial
with coefficients in `R`. -/
def Polynomial.toLaurent [Semiring R] : R[X] →+* R[T;T⁻¹] :=
(mapDomainRingHom R Int.ofNatHom).comp (toFinsuppIso R)
#align polynomial.to_laurent Polynomial.toLaurent
/-- This is not a simp lemma, as it is usually preferable to use the lemmas about `C` and `X`
instead. -/
theorem Polynomial.toLaurent_apply [Semiring R] (p : R[X]) :
p.toLaurent = p.toFinsupp.mapDomain coe :=
rfl
#align polynomial.to_laurent_apply Polynomial.toLaurent_apply
/-- The `R`-algebra map, taking a polynomial with coefficients in `R` to a Laurent polynomial
with coefficients in `R`. -/
def Polynomial.toLaurentAlg [CommSemiring R] : R[X] →ₐ[R] R[T;T⁻¹] :=
by
refine' AlgHom.comp _ (to_finsupp_iso_alg R).toAlgHom
exact map_domain_alg_hom R R Int.ofNatHom
#align polynomial.to_laurent_alg Polynomial.toLaurentAlg
@[simp]
theorem Polynomial.toLaurentAlg_apply [CommSemiring R] (f : R[X]) : f.toLaurentAlg = f.toLaurent :=
rfl
#align polynomial.to_laurent_alg_apply Polynomial.toLaurentAlg_apply
namespace LaurentPolynomial
section Semiring
variable [Semiring R]
theorem single_zero_one_eq_one : (single 0 1 : R[T;T⁻¹]) = (1 : R[T;T⁻¹]) :=
rfl
#align laurent_polynomial.single_zero_one_eq_one LaurentPolynomial.single_zero_one_eq_one
/-! ### The functions `C` and `T`. -/
/-- The ring homomorphism `C`, including `R` into the ring of Laurent polynomials over `R` as
the constant Laurent polynomials. -/
def c : R →+* R[T;T⁻¹] :=
singleZeroRingHom
#align laurent_polynomial.C LaurentPolynomial.c
theorem algebraMap_apply {R A : Type _} [CommSemiring R] [Semiring A] [Algebra R A] (r : R) :
algebraMap R (LaurentPolynomial A) r = c (algebraMap R A r) :=
rfl
#align laurent_polynomial.algebra_map_apply LaurentPolynomial.algebraMap_apply
/-- When we have `[comm_semiring R]`, the function `C` is the same as `algebra_map R R[T;T⁻¹]`.
(But note that `C` is defined when `R` is not necessarily commutative, in which case
`algebra_map` is not available.)
-/
theorem c_eq_algebraMap {R : Type _} [CommSemiring R] (r : R) : c r = algebraMap R R[T;T⁻¹] r :=
rfl
#align laurent_polynomial.C_eq_algebra_map LaurentPolynomial.c_eq_algebraMap
theorem single_eq_c (r : R) : single 0 r = c r :=
rfl
#align laurent_polynomial.single_eq_C LaurentPolynomial.single_eq_c
/-- The function `n ↦ T ^ n`, implemented as a sequence `ℤ → R[T;T⁻¹]`.
Using directly `T ^ n` does not work, since we want the exponents to be of Type `ℤ` and there
is no `ℤ`-power defined on `R[T;T⁻¹]`. Using that `T` is a unit introduces extra coercions.
For these reasons, the definition of `T` is as a sequence. -/
def t (n : ℤ) : R[T;T⁻¹] :=
single n 1
#align laurent_polynomial.T LaurentPolynomial.t
@[simp]
theorem t_zero : (t 0 : R[T;T⁻¹]) = 1 :=
rfl
#align laurent_polynomial.T_zero LaurentPolynomial.t_zero
theorem t_add (m n : ℤ) : (t (m + n) : R[T;T⁻¹]) = t m * t n :=
by
convert single_mul_single.symm
simp [T]
#align laurent_polynomial.T_add LaurentPolynomial.t_add
theorem t_sub (m n : ℤ) : (t (m - n) : R[T;T⁻¹]) = t m * t (-n) := by rw [← T_add, sub_eq_add_neg]
#align laurent_polynomial.T_sub LaurentPolynomial.t_sub
@[simp]
theorem t_pow (m : ℤ) (n : ℕ) : (t m ^ n : R[T;T⁻¹]) = t (n * m) := by
rw [T, T, single_pow n, one_pow, nsmul_eq_mul]
#align laurent_polynomial.T_pow LaurentPolynomial.t_pow
/-- The `simp` version of `mul_assoc`, in the presence of `T`'s. -/
@[simp]
theorem mul_t_assoc (f : R[T;T⁻¹]) (m n : ℤ) : f * t m * t n = f * t (m + n) := by
simp [← T_add, mul_assoc]
#align laurent_polynomial.mul_T_assoc LaurentPolynomial.mul_t_assoc
@[simp]
theorem single_eq_c_mul_t (r : R) (n : ℤ) : (single n r : R[T;T⁻¹]) = (c r * t n : R[T;T⁻¹]) := by
convert single_mul_single.symm <;> simp
#align laurent_polynomial.single_eq_C_mul_T LaurentPolynomial.single_eq_c_mul_t
-- This lemma locks in the right changes and is what Lean proved directly.
-- The actual `simp`-normal form of a Laurent monomial is `C a * T n`, whenever it can be reached.
@[simp]
theorem Polynomial.toLaurent_c_mul_t (n : ℕ) (r : R) :
((Polynomial.monomial n r).toLaurent : R[T;T⁻¹]) = c r * t n :=
show mapDomain coe (monomial n r).toFinsupp = (c r * t n : R[T;T⁻¹]) by
rw [to_finsupp_monomial, map_domain_single, single_eq_C_mul_T]
#align polynomial.to_laurent_C_mul_T Polynomial.toLaurent_c_mul_t
@[simp]
theorem Polynomial.toLaurent_c (r : R) : (Polynomial.C r).toLaurent = c r :=
by
convert Polynomial.toLaurent_c_mul_t 0 r
simp only [Int.ofNat_zero, T_zero, mul_one]
#align polynomial.to_laurent_C Polynomial.toLaurent_c
@[simp]
theorem Polynomial.toLaurent_x : (Polynomial.X.toLaurent : R[T;T⁻¹]) = t 1 :=
by
have : (Polynomial.X : R[X]) = monomial 1 1 := by simp [← C_mul_X_pow_eq_monomial]
simp [this, Polynomial.toLaurent_c_mul_t]
#align polynomial.to_laurent_X Polynomial.toLaurent_x
@[simp]
theorem Polynomial.toLaurent_one : (Polynomial.toLaurent : R[X] → R[T;T⁻¹]) 1 = 1 :=
map_one Polynomial.toLaurent
#align polynomial.to_laurent_one Polynomial.toLaurent_one
@[simp]
theorem Polynomial.toLaurent_c_mul_eq (r : R) (f : R[X]) :
(Polynomial.C r * f).toLaurent = c r * f.toLaurent := by
simp only [_root_.map_mul, Polynomial.toLaurent_c]
#align polynomial.to_laurent_C_mul_eq Polynomial.toLaurent_c_mul_eq
@[simp]
theorem Polynomial.toLaurent_x_pow (n : ℕ) : (X ^ n : R[X]).toLaurent = t n := by
simp only [map_pow, Polynomial.toLaurent_x, T_pow, mul_one]
#align polynomial.to_laurent_X_pow Polynomial.toLaurent_x_pow
@[simp]
theorem Polynomial.toLaurent_c_mul_x_pow (n : ℕ) (r : R) :
(Polynomial.C r * X ^ n).toLaurent = c r * t n := by
simp only [_root_.map_mul, Polynomial.toLaurent_c, Polynomial.toLaurent_x_pow]
#align polynomial.to_laurent_C_mul_X_pow Polynomial.toLaurent_c_mul_x_pow
instance invertibleT (n : ℤ) : Invertible (t n : R[T;T⁻¹])
where
invOf := t (-n)
invOf_mul_self := by rw [← T_add, add_left_neg, T_zero]
mul_invOf_self := by rw [← T_add, add_right_neg, T_zero]
#align laurent_polynomial.invertible_T LaurentPolynomial.invertibleT
@[simp]
theorem invOf_t (n : ℤ) : ⅟ (t n : R[T;T⁻¹]) = t (-n) :=
rfl
#align laurent_polynomial.inv_of_T LaurentPolynomial.invOf_t
theorem isUnit_t (n : ℤ) : IsUnit (t n : R[T;T⁻¹]) :=
isUnit_of_invertible _
#align laurent_polynomial.is_unit_T LaurentPolynomial.isUnit_t
@[elab_as_elim]
protected theorem induction_on {M : R[T;T⁻¹] → Prop} (p : R[T;T⁻¹]) (h_C : ∀ a, M (c a))
(h_add : ∀ {p q}, M p → M q → M (p + q))
(h_C_mul_T : ∀ (n : ℕ) (a : R), M (c a * t n) → M (c a * t (n + 1)))
(h_C_mul_T_Z : ∀ (n : ℕ) (a : R), M (c a * t (-n)) → M (c a * t (-n - 1))) : M p :=
by
have A : ∀ {n : ℤ} {a : R}, M (C a * T n) :=
by
intro n a
apply n.induction_on
· simpa only [T_zero, mul_one] using h_C a
· exact fun m => h_C_mul_T m a
· exact fun m => h_C_mul_T_Z m a
have B : ∀ s : Finset ℤ, M (s.Sum fun n : ℤ => C (p.to_fun n) * T n) :=
by
apply Finset.induction
· convert h_C 0
simp only [Finset.sum_empty, _root_.map_zero]
· intro n s ns ih
rw [Finset.sum_insert ns]
exact h_add A ih
convert B p.support
ext a
simp_rw [← single_eq_C_mul_T, Finset.sum_apply', single_apply, Finset.sum_ite_eq']
split_ifs with h h
· rfl
· exact finsupp.not_mem_support_iff.mp h
#align laurent_polynomial.induction_on LaurentPolynomial.induction_on
/-- To prove something about Laurent polynomials, it suffices to show that
* the condition is closed under taking sums, and
* it holds for monomials.
-/
@[elab_as_elim]
protected theorem induction_on' {M : R[T;T⁻¹] → Prop} (p : R[T;T⁻¹])
(h_add : ∀ p q, M p → M q → M (p + q)) (h_C_mul_T : ∀ (n : ℤ) (a : R), M (c a * t n)) : M p :=
by
refine' p.induction_on (fun a => _) h_add _ _ <;> try exact fun n f _ => h_C_mul_T _ f
convert h_C_mul_T 0 a
exact (mul_one _).symm
#align laurent_polynomial.induction_on' LaurentPolynomial.induction_on'
theorem commute_t (n : ℤ) (f : R[T;T⁻¹]) : Commute (t n) f :=
f.inductionOn' (fun p q Tp Tq => Commute.add_right Tp Tq) fun m a =>
show t n * _ = _
by
rw [T, T, ← single_eq_C, single_mul_single, single_mul_single, single_mul_single]
simp [add_comm]
#align laurent_polynomial.commute_T LaurentPolynomial.commute_t
@[simp]
theorem t_mul (n : ℤ) (f : R[T;T⁻¹]) : t n * f = f * t n :=
(commute_t n f).Eq
#align laurent_polynomial.T_mul LaurentPolynomial.t_mul
/-- `trunc : R[T;T⁻¹] →+ R[X]` maps a Laurent polynomial `f` to the polynomial whose terms of
nonnegative degree coincide with the ones of `f`. The terms of negative degree of `f` "vanish".
`trunc` is a left-inverse to `polynomial.to_laurent`. -/
def trunc : R[T;T⁻¹] →+ R[X] :=
(toFinsuppIso R).symm.toAddMonoidHom.comp <| comapDomain.addMonoidHom fun a b => Int.ofNat.inj
#align laurent_polynomial.trunc LaurentPolynomial.trunc
@[simp]
theorem trunc_c_mul_t (n : ℤ) (r : R) : trunc (c r * t n) = ite (0 ≤ n) (monomial n.toNat r) 0 :=
by
apply (to_finsupp_iso R).Injective
rw [← single_eq_C_mul_T, Trunc, AddMonoidHom.coe_comp, Function.comp_apply,
comap_domain.add_monoid_hom_apply, to_finsupp_iso_apply]
by_cases n0 : 0 ≤ n
· lift n to ℕ using n0
erw [comap_domain_single, to_finsupp_iso_symm_apply]
simp only [Int.coe_nat_nonneg, Int.toNat_coe_nat, if_true, to_finsupp_iso_apply,
to_finsupp_monomial]
· lift -n to ℕ using (neg_pos.mpr (not_le.mp n0)).le with m
rw [to_finsupp_iso_apply, to_finsupp_inj, if_neg n0]
erw [to_finsupp_iso_symm_apply]
ext a
have := ((not_le.mp n0).trans_le (Int.ofNat_zero_le a)).ne'
simp only [coeff, comap_domain_apply, Int.ofNat_eq_coe, coeff_zero, single_apply_eq_zero, this,
IsEmpty.forall_iff]
#align laurent_polynomial.trunc_C_mul_T LaurentPolynomial.trunc_c_mul_t
@[simp]
theorem leftInverse_trunc_toLaurent :
Function.LeftInverse (trunc : R[T;T⁻¹] → R[X]) Polynomial.toLaurent :=
by
refine' fun f => f.inductionOn' _ _
· exact fun f g hf hg => by simp only [hf, hg, _root_.map_add]
·
exact fun n r => by
simp only [Polynomial.toLaurent_c_mul_t, trunc_C_mul_T, Int.coe_nat_nonneg, Int.toNat_coe_nat,
if_true]
#align laurent_polynomial.left_inverse_trunc_to_laurent LaurentPolynomial.leftInverse_trunc_toLaurent
@[simp]
theorem Polynomial.trunc_toLaurent (f : R[X]) : trunc f.toLaurent = f :=
leftInverse_trunc_toLaurent _
#align polynomial.trunc_to_laurent Polynomial.trunc_toLaurent
theorem Polynomial.toLaurent_injective :
Function.Injective (Polynomial.toLaurent : R[X] → R[T;T⁻¹]) :=
leftInverse_trunc_toLaurent.Injective
#align polynomial.to_laurent_injective Polynomial.toLaurent_injective
@[simp]
theorem Polynomial.toLaurent_inj (f g : R[X]) : f.toLaurent = g.toLaurent ↔ f = g :=
⟨fun h => Polynomial.toLaurent_injective h, congr_arg _⟩
#align polynomial.to_laurent_inj Polynomial.toLaurent_inj
theorem Polynomial.toLaurent_ne_zero {f : R[X]} : f ≠ 0 ↔ f.toLaurent ≠ 0 :=
(map_ne_zero_iff _ Polynomial.toLaurent_injective).symm
#align polynomial.to_laurent_ne_zero Polynomial.toLaurent_ne_zero
theorem exists_t_pow (f : R[T;T⁻¹]) : ∃ (n : ℕ)(f' : R[X]), f'.toLaurent = f * t n :=
by
apply f.induction_on' _ fun n a => _ <;> clear f
· rintro f g ⟨m, fn, hf⟩ ⟨n, gn, hg⟩
refine' ⟨m + n, fn * X ^ n + gn * X ^ m, _⟩
simp only [hf, hg, add_mul, add_comm (n : ℤ), map_add, map_mul, Polynomial.toLaurent_x_pow,
mul_T_assoc, Int.ofNat_add]
· cases' n with n n
· exact ⟨0, Polynomial.C a * X ^ n, by simp⟩
· refine' ⟨n + 1, Polynomial.C a, _⟩
simp only [Int.negSucc_eq, Polynomial.toLaurent_c, Int.ofNat_succ, mul_T_assoc, add_left_neg,
T_zero, mul_one]
#align laurent_polynomial.exists_T_pow LaurentPolynomial.exists_t_pow
/-- This is a version of `exists_T_pow` stated as an induction principle. -/
@[elab_as_elim]
theorem induction_on_mul_t {Q : R[T;T⁻¹] → Prop} (f : R[T;T⁻¹])
(Qf : ∀ {f : R[X]} {n : ℕ}, Q (f.toLaurent * t (-n))) : Q f :=
by
rcases f.exists_T_pow with ⟨n, f', hf⟩
rw [← mul_one f, ← T_zero, ← Nat.cast_zero, ← Nat.sub_self n, Nat.cast_sub rfl.le, T_sub, ←
mul_assoc, ← hf]
exact Qf
#align laurent_polynomial.induction_on_mul_T LaurentPolynomial.induction_on_mul_t
/-- Suppose that `Q` is a statement about Laurent polynomials such that
* `Q` is true on *ordinary* polynomials;
* `Q (f * T)` implies `Q f`;
it follow that `Q` is true on all Laurent polynomials. -/
theorem reduce_to_polynomial_of_mul_t (f : R[T;T⁻¹]) {Q : R[T;T⁻¹] → Prop}
(Qf : ∀ f : R[X], Q f.toLaurent) (QT : ∀ f, Q (f * t 1) → Q f) : Q f :=
by
induction' f using LaurentPolynomial.induction_on_mul_t with f n
induction' n with n hn
· simpa only [Int.ofNat_zero, neg_zero, T_zero, mul_one] using Qf _
· convert QT _ _
simpa using hn
#align laurent_polynomial.reduce_to_polynomial_of_mul_T LaurentPolynomial.reduce_to_polynomial_of_mul_t
section Support
theorem support_c_mul_t (a : R) (n : ℤ) : (c a * t n).support ⊆ {n} := by
simpa only [← single_eq_C_mul_T] using support_single_subset
#align laurent_polynomial.support_C_mul_T LaurentPolynomial.support_c_mul_t
theorem support_c_mul_t_of_ne_zero {a : R} (a0 : a ≠ 0) (n : ℤ) : (c a * t n).support = {n} :=
by
rw [← single_eq_C_mul_T]
exact support_single_ne_zero _ a0
#align laurent_polynomial.support_C_mul_T_of_ne_zero LaurentPolynomial.support_c_mul_t_of_ne_zero
/-- The support of a polynomial `f` is a finset in `ℕ`. The lemma `to_laurent_support f`
shows that the support of `f.to_laurent` is the same finset, but viewed in `ℤ` under the natural
inclusion `ℕ ↪ ℤ`. -/
theorem toLaurent_support (f : R[X]) : f.toLaurent.support = f.support.map Nat.castEmbedding :=
by
generalize hd : f.support = s
revert f
refine' Finset.induction_on s _ _ <;> clear s
·
simp (config := { contextual := true }) only [Polynomial.support_eq_empty, map_zero,
Finsupp.support_zero, eq_self_iff_true, imp_true_iff, Finset.map_empty]
· intro a s as hf f fs
have : (erase a f).toLaurent.support = s.map Nat.castEmbedding :=
hf (f.erase a)
(by
simp only [fs, Finset.erase_eq_of_not_mem as, Polynomial.support_erase,
Finset.erase_insert_eq_erase])
rw [← monomial_add_erase f a, Finset.map_insert, ← this, map_add, Polynomial.toLaurent_c_mul_t,
support_add_eq, Finset.insert_eq]
· congr
exact support_C_mul_T_of_ne_zero (polynomial.mem_support_iff.mp (by simp [fs])) _
· rw [this]
exact Disjoint.mono_left (support_C_mul_T _ _) (by simpa)
#align laurent_polynomial.to_laurent_support LaurentPolynomial.toLaurent_support
end Support
section Degrees
/-- The degree of a Laurent polynomial takes values in `with_bot ℤ`.
If `f : R[T;T⁻¹]` is a Laurent polynomial, then `f.degree` is the maximum of its support of `f`,
or `⊥`, if `f = 0`. -/
def degree (f : R[T;T⁻¹]) : WithBot ℤ :=
f.support.max
#align laurent_polynomial.degree LaurentPolynomial.degree
@[simp]
theorem degree_zero : degree (0 : R[T;T⁻¹]) = ⊥ :=
rfl
#align laurent_polynomial.degree_zero LaurentPolynomial.degree_zero
@[simp]
theorem degree_eq_bot_iff {f : R[T;T⁻¹]} : f.degree = ⊥ ↔ f = 0 :=
by
refine' ⟨fun h => _, fun h => by rw [h, degree_zero]⟩
rw [degree, Finset.max_eq_sup_withBot] at h
ext n
refine' not_not.mp fun f0 => _
simp_rw [Finset.sup_eq_bot_iff, Finsupp.mem_support_iff, Ne.def, WithBot.coe_ne_bot] at h
exact h n f0
#align laurent_polynomial.degree_eq_bot_iff LaurentPolynomial.degree_eq_bot_iff
section ExactDegrees
open Classical
@[simp]
theorem degree_c_mul_t (n : ℤ) (a : R) (a0 : a ≠ 0) : (c a * t n).degree = n :=
by
rw [degree]
convert Finset.max_singleton
refine' support_eq_singleton.mpr _
simp only [← single_eq_C_mul_T, single_eq_same, a0, Ne.def, not_false_iff, eq_self_iff_true,
and_self_iff]
#align laurent_polynomial.degree_C_mul_T LaurentPolynomial.degree_c_mul_t
theorem degree_c_mul_t_ite (n : ℤ) (a : R) : (c a * t n).degree = ite (a = 0) ⊥ n := by
split_ifs with h h <;>
simp only [h, map_zero, MulZeroClass.zero_mul, degree_zero, degree_C_mul_T, Ne.def,
not_false_iff]
#align laurent_polynomial.degree_C_mul_T_ite LaurentPolynomial.degree_c_mul_t_ite
@[simp]
theorem degree_t [Nontrivial R] (n : ℤ) : (t n : R[T;T⁻¹]).degree = n :=
by
rw [← one_mul (T n), ← map_one C]
exact degree_C_mul_T n 1 (one_ne_zero : (1 : R) ≠ 0)
#align laurent_polynomial.degree_T LaurentPolynomial.degree_t
theorem degree_c {a : R} (a0 : a ≠ 0) : (c a).degree = 0 :=
by
rw [← mul_one (C a), ← T_zero]
exact degree_C_mul_T 0 a a0
#align laurent_polynomial.degree_C LaurentPolynomial.degree_c
theorem degree_c_ite (a : R) : (c a).degree = ite (a = 0) ⊥ 0 := by
split_ifs with h h <;> simp only [h, map_zero, degree_zero, degree_C, Ne.def, not_false_iff]
#align laurent_polynomial.degree_C_ite LaurentPolynomial.degree_c_ite
end ExactDegrees
section DegreeBounds
theorem degree_c_mul_t_le (n : ℤ) (a : R) : (c a * t n).degree ≤ n :=
by
by_cases a0 : a = 0
· simp only [a0, map_zero, MulZeroClass.zero_mul, degree_zero, bot_le]
· exact (degree_C_mul_T n a a0).le
#align laurent_polynomial.degree_C_mul_T_le LaurentPolynomial.degree_c_mul_t_le
theorem degree_t_le (n : ℤ) : (t n : R[T;T⁻¹]).degree ≤ n :=
(le_of_eq (by rw [map_one, one_mul])).trans (degree_c_mul_t_le n (1 : R))
#align laurent_polynomial.degree_T_le LaurentPolynomial.degree_t_le
theorem degree_c_le (a : R) : (c a).degree ≤ 0 :=
(le_of_eq (by rw [T_zero, mul_one])).trans (degree_c_mul_t_le 0 a)
#align laurent_polynomial.degree_C_le LaurentPolynomial.degree_c_le
end DegreeBounds
end Degrees
instance : Module R[X] R[T;T⁻¹] :=
Module.compHom _ Polynomial.toLaurent
instance (R : Type _) [Semiring R] : IsScalarTower R[X] R[X] R[T;T⁻¹]
where smul_assoc x y z := by simp only [SMul.smul, SMul.comp.smul, map_mul, mul_assoc]
end Semiring
section CommSemiring
variable [CommSemiring R]
instance algebraPolynomial (R : Type _) [CommSemiring R] : Algebra R[X] R[T;T⁻¹] :=
{ Polynomial.toLaurent with
commutes' := fun f l => by simp [mul_comm]
smul_def' := fun f l => rfl }
#align laurent_polynomial.algebra_polynomial LaurentPolynomial.algebraPolynomial
theorem algebraMap_x_pow (n : ℕ) : algebraMap R[X] R[T;T⁻¹] (X ^ n) = t n :=
Polynomial.toLaurent_x_pow n
#align laurent_polynomial.algebra_map_X_pow LaurentPolynomial.algebraMap_x_pow
@[simp]
theorem algebraMap_eq_toLaurent (f : R[X]) : algebraMap R[X] R[T;T⁻¹] f = f.toLaurent :=
rfl
#align laurent_polynomial.algebra_map_eq_to_laurent LaurentPolynomial.algebraMap_eq_toLaurent
theorem isLocalization : IsLocalization (Submonoid.closure ({X} : Set R[X])) R[T;T⁻¹] :=
{ map_units := fun t => by
cases' t with t ht
rcases submonoid.mem_closure_singleton.mp ht with ⟨n, rfl⟩
simp only [is_unit_T n, [anonymous], algebra_map_eq_to_laurent, Polynomial.toLaurent_x_pow]
surj := fun f =>
by
induction' f using LaurentPolynomial.induction_on_mul_t with f n
have := (Submonoid.closure ({X} : Set R[X])).pow_mem Submonoid.mem_closure_singleton_self n
refine' ⟨(f, ⟨_, this⟩), _⟩
simp only [[anonymous], algebra_map_eq_to_laurent, Polynomial.toLaurent_x_pow, mul_T_assoc,
add_left_neg, T_zero, mul_one]
eq_iff_exists := fun f g =>
by
rw [algebra_map_eq_to_laurent, algebra_map_eq_to_laurent, Polynomial.toLaurent_inj]
refine' ⟨_, _⟩
· rintro rfl
exact ⟨1, rfl⟩
· rintro ⟨⟨h, hX⟩, h⟩
rcases submonoid.mem_closure_singleton.mp hX with ⟨n, rfl⟩
exact mul_X_pow_injective n h }
#align laurent_polynomial.is_localization LaurentPolynomial.isLocalization
end CommSemiring
end LaurentPolynomial
|
[GOAL]
a : ℤ
n : ℕ
⊢ succ^[n + 1] a = a + ↑(n + 1)
[PROOFSTEP]
rw [Function.iterate_succ', Int.ofNat_succ, ← add_assoc]
[GOAL]
a : ℤ
n : ℕ
⊢ (succ ∘ succ^[n]) a = a + ↑n + 1
[PROOFSTEP]
exact congr_arg _ (succ_iterate a n)
[GOAL]
a : ℤ
n : ℕ
⊢ pred^[n + 1] a = a - ↑(n + 1)
[PROOFSTEP]
rw [Function.iterate_succ', Int.ofNat_succ, ← sub_sub]
[GOAL]
a : ℤ
n : ℕ
⊢ (pred ∘ pred^[n]) a = a - ↑n - 1
[PROOFSTEP]
exact congr_arg _ (pred_iterate a n)
[GOAL]
a b : ℤ
h : a ≤ b
⊢ Order.succ^[toNat (b - a)] a = b
[PROOFSTEP]
rw [succ_eq_succ, succ_iterate, toNat_sub_of_le h, ← add_sub_assoc, add_sub_cancel']
[GOAL]
a b : ℤ
h : a ≤ b
⊢ Order.pred^[toNat (b - a)] b = a
[PROOFSTEP]
rw [pred_eq_pred, pred_iterate, toNat_sub_of_le h, sub_sub_cancel]
[GOAL]
z : ℤ
⊢ z - 1 ⋖ z
[PROOFSTEP]
rw [Int.covby_iff_succ_eq, sub_add_cancel]
[GOAL]
a b : ℕ
⊢ ↑a ⋖ ↑b ↔ a ⋖ b
[PROOFSTEP]
rw [Nat.covby_iff_succ_eq, Int.covby_iff_succ_eq]
[GOAL]
a b : ℕ
⊢ ↑a + 1 = ↑b ↔ a + 1 = b
[PROOFSTEP]
exact Int.coe_nat_inj'
|
header {* \isaheader{Return and their corresponding call nodes} *}
theory ReturnAndCallNodes imports CFG begin
context CFG begin
subsection {* Defining @{text "return_node"} *}
definition return_node :: "'node \<Rightarrow> bool"
where "return_node n \<equiv> \<exists>a a'. valid_edge a \<and> n = targetnode a \<and>
valid_edge a' \<and> a \<in> get_return_edges a'"
lemma return_node_determines_call_node:
assumes "return_node n"
shows "\<exists>!n'. \<exists>a a'. valid_edge a \<and> n' = sourcenode a \<and> valid_edge a' \<and>
a' \<in> get_return_edges a \<and> n = targetnode a'"
proof(rule ex_ex1I)
from `return_node n`
show "\<exists>n' a a'. valid_edge a \<and> n' = sourcenode a \<and> valid_edge a' \<and>
a' \<in> get_return_edges a \<and> n = targetnode a'"
by(simp add:return_node_def) blast
next
fix n' nx
assume "\<exists>a a'. valid_edge a \<and> n' = sourcenode a \<and> valid_edge a' \<and>
a' \<in> get_return_edges a \<and> n = targetnode a'"
and "\<exists>a a'. valid_edge a \<and> nx = sourcenode a \<and> valid_edge a' \<and>
a' \<in> get_return_edges a \<and> n = targetnode a'"
then obtain a a' ax ax' where "valid_edge a" and "n' = sourcenode a"
and "valid_edge a'" and "a' \<in> get_return_edges a"
and "n = targetnode a'" and "valid_edge ax" and "nx = sourcenode ax"
and "valid_edge ax'" and "ax' \<in> get_return_edges ax"
and "n = targetnode ax'"
by blast
from `valid_edge a` `a' \<in> get_return_edges a` have "valid_edge a'"
by(rule get_return_edges_valid)
from `valid_edge a` `a' \<in> get_return_edges a` obtain a''
where intra_edge1:"valid_edge a''" "sourcenode a'' = sourcenode a"
"targetnode a'' = targetnode a'" "kind a'' = (\<lambda>cf. False)\<^sub>\<surd>"
by(fastforce dest:call_return_node_edge)
from `valid_edge ax` `ax' \<in> get_return_edges ax` obtain ax''
where intra_edge2:"valid_edge ax''" "sourcenode ax'' = sourcenode ax"
"targetnode ax'' = targetnode ax'" "kind ax'' = (\<lambda>cf. False)\<^sub>\<surd>"
by(fastforce dest:call_return_node_edge)
from `valid_edge a` `a' \<in> get_return_edges a`
obtain Q r p fs where "kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs"
by(fastforce dest!:only_call_get_return_edges)
with `valid_edge a` `a' \<in> get_return_edges a` obtain Q' p f'
where "kind a' = Q'\<hookleftarrow>\<^bsub>p\<^esub>f'" by(fastforce dest!:call_return_edges)
with `valid_edge a'`
have "\<exists>!a''. valid_edge a'' \<and> targetnode a'' = targetnode a' \<and> intra_kind(kind a'')"
by(rule return_only_one_intra_edge)
with intra_edge1 intra_edge2 `n = targetnode a'` `n = targetnode ax'`
have "a'' = ax''" by(fastforce simp:intra_kind_def)
with `sourcenode a'' = sourcenode a` `sourcenode ax'' = sourcenode ax`
`n' = sourcenode a` `nx = sourcenode ax`
show "n' = nx" by simp
qed
lemma return_node_THE_call_node:
"\<lbrakk>return_node n; valid_edge a; valid_edge a'; a' \<in> get_return_edges a;
n = targetnode a'\<rbrakk>
\<Longrightarrow> (THE n'. \<exists>a a'. valid_edge a \<and> n' = sourcenode a \<and> valid_edge a' \<and>
a' \<in> get_return_edges a \<and> n = targetnode a') = sourcenode a"
by(fastforce intro!:the1_equality return_node_determines_call_node)
subsection {* Defining call nodes belonging to a certain @{text "return_node"} *}
definition call_of_return_node :: "'node \<Rightarrow> 'node \<Rightarrow> bool"
where "call_of_return_node n n' \<equiv> \<exists>a a'. return_node n \<and>
valid_edge a \<and> n' = sourcenode a \<and> valid_edge a' \<and>
a' \<in> get_return_edges a \<and> n = targetnode a'"
lemma return_node_call_of_return_node:
"return_node n \<Longrightarrow> \<exists>!n'. call_of_return_node n n'"
by -(frule return_node_determines_call_node,unfold call_of_return_node_def,simp)
lemma call_of_return_nodes_det [dest]:
assumes "call_of_return_node n n'" and "call_of_return_node n n''"
shows "n' = n''"
proof -
from `call_of_return_node n n'` have "return_node n"
by(simp add:call_of_return_node_def)
hence "\<exists>!n'. call_of_return_node n n'" by(rule return_node_call_of_return_node)
with `call_of_return_node n n'` `call_of_return_node n n''`
show ?thesis by auto
qed
lemma get_return_edges_call_of_return_nodes:
"\<lbrakk>valid_call_list cs m; valid_return_list rs m;
\<forall>i < length rs. rs!i \<in> get_return_edges (cs!i); length rs = length cs\<rbrakk>
\<Longrightarrow> \<forall>i<length cs. call_of_return_node (targetnodes rs!i) (sourcenode (cs!i))"
proof(induct cs arbitrary:m rs)
case Nil thus ?case by fastforce
next
case (Cons c' cs')
note IH = `\<And>m rs. \<lbrakk>valid_call_list cs' m; valid_return_list rs m;
\<forall>i<length rs. rs ! i \<in> get_return_edges (cs' ! i); length rs = length cs'\<rbrakk>
\<Longrightarrow> \<forall>i<length cs'. call_of_return_node (targetnodes rs ! i) (sourcenode (cs'!i))`
from `length rs = length (c' # cs')` obtain r' rs' where "rs = r' # rs'"
and "length rs' = length cs'" by(cases rs) auto
with `\<forall>i<length rs. rs ! i \<in> get_return_edges ((c' # cs') ! i)`
have "\<forall>i<length rs'. rs' ! i \<in> get_return_edges (cs' ! i)"
and "r' \<in> get_return_edges c'" by auto
from `valid_call_list (c'#cs') m` have "valid_edge c'"
by(fastforce simp:valid_call_list_def)
from this `r' \<in> get_return_edges c'`
have "get_proc (sourcenode c') = get_proc (targetnode r')"
by(rule get_proc_get_return_edge)
from `valid_call_list (c'#cs') m`
have "valid_call_list cs' (sourcenode c')"
apply(clarsimp simp:valid_call_list_def)
apply(hypsubst_thin)
apply(erule_tac x="c'#cs'" in allE) apply clarsimp
by(case_tac cs')(auto simp:sourcenodes_def)
from `valid_return_list rs m` `rs = r' # rs'`
`get_proc (sourcenode c') = get_proc (targetnode r')`
have "valid_return_list rs' (sourcenode c')"
apply(clarsimp simp:valid_return_list_def)
apply(erule_tac x="r'#cs'" in allE) apply clarsimp
by(case_tac cs')(auto simp:targetnodes_def)
from IH[OF `valid_call_list cs' (sourcenode c')`
`valid_return_list rs' (sourcenode c')`
`\<forall>i<length rs'. rs' ! i \<in> get_return_edges (cs' ! i)` `length rs' = length cs'`]
have all:"\<forall>i<length cs'.
call_of_return_node (targetnodes rs' ! i) (sourcenode (cs' ! i))" .
from `valid_edge c'` `r' \<in> get_return_edges c'` have "valid_edge r'"
by(rule get_return_edges_valid)
from `valid_edge r'` `valid_edge c'` `r' \<in> get_return_edges c'`
have "return_node (targetnode r')" by(fastforce simp:return_node_def)
with `valid_edge c'` `r' \<in> get_return_edges c'` `valid_edge r'`
have "call_of_return_node (targetnode r') (sourcenode c')"
by(simp add:call_of_return_node_def) blast
with all `rs = r' # rs'` show ?case
by auto(case_tac i,auto simp:targetnodes_def)
qed
end
end
|
lemma (in first_countable_topology) countable_basis: obtains A :: "nat \<Rightarrow> 'a set" where "\<And>i. open (A i)" "\<And>i. x \<in> A i" "\<And>F. (\<forall>n. F n \<in> A n) \<Longrightarrow> F \<longlonglongrightarrow> x" |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.