Datasets:
AI4M
/

text
stringlengths
0
3.34M
If $f$ is a measurable function from a measure space $(X, \mathcal{A}, \mu)$ to a measure space $(Y, \mathcal{B}, \nu)$, then for any $B \in \mathcal{B}$, the measure of $f^{-1}(B)$ in $(X, \mathcal{A}, \mu)$ is equal to the measure of $B$ in $(Y, \mathcal{B}, \nu)$.
[STATEMENT] lemma empty_literal_pat [def_pat_rules]: "String.empty_literal \<equiv> UNPROTECT String.empty_literal" [PROOF STATE] proof (prove) goal (1 subgoal): 1. STR '''' \<equiv> UNPROTECT STR '''' [PROOF STEP] by simp
# This file holds the definition of the Personnel type. This type is used to # define members of a military organisation, in this case, the Belgian Army. # The Personnel type requires no additional types. requiredTypes = [ ] for reqType in requiredTypes if !isdefined( Symbol( uppercase( string( reqType[ 1 ] ) ) * reqType[ 2:end ] ) ) include( joinpath( typePath, reqType * ".jl" ) ) end # if !isdefined( Symbol( ... end # for reqType in requiredTypes export Personnel type Personnel # Information about the personnel member. persData::Dict{Symbol, Any} function Personnel( id::String = "-----" ) newPers = new() newPers.persData = Dict{Symbol, Any}( :id => id ) return newPers end # Personnel( id ) function Personnel( idKey::Symbol, id::String = "-----" ) newPers = new() newPers.persData = Dict{Symbol, Any}( idKey => id ) return newPers end # Personnel( idKey, id ) function Personnel( idKey::String, id::String ) newPers = new() newPers.persData = Dict{Symbol, Any}( Symbol( idKey ) => id ) return newPers end # Personnel( idKey, id ) end # type Personnel
lemma closed_components: "\<lbrakk>closed s; c \<in> components s\<rbrakk> \<Longrightarrow> closed c"
(************************************************************************** * TLC: A library for Coq * * Reflection : Basic Operations on Booleans * **************************************************************************) Set Implicit Arguments. Require Import LibTactics LibLogic LibOperation. (* ********************************************************************** *) (* ################################################################# *) (** * Boolean type *) (* ---------------------------------------------------------------------- *) (* ================================================================= *) (** ** Definition *) (** From the Prelude: Inductive bool : Type := | true : bool | false : bool. *) (* ---------------------------------------------------------------------- *) (* ================================================================= *) (** ** Inhabited *) Instance Inhab_bool : Inhab bool. Proof using. constructor. apply (Inhab_of_val true). Qed. (* ---------------------------------------------------------------------- *) (* ================================================================= *) (** ** Extensionality *) (** See [LibReflect] for extensionality of booleans. *) (* ********************************************************************** *) (* ################################################################# *) (** * Boolean Operations *) (* ================================================================= *) (** ** Comparison *) Definition eqb (x y:bool) : bool := match x, y with | true, true => true | false, false => true | _, _ => false end. (** Negation *) Definition neg (x:bool) : bool := match x with | true => false | false => true end. (** Conjunction *) Definition and (x y:bool) : bool := match x, y with | true, true => true | _, _ => false end. (** Disjunction *) Definition or (x y:bool) : bool := match x, y with | false, false => false | _, _ => true end. (** Implication *) Definition impl (x y:bool) : bool := or x (neg y). (** Exclusive or *) Definition xor (x y:bool) : bool := neg (eqb x y). (** Notations *) Bind Scope Bool_scope with bool. Open Scope Bool_scope. Notation "! x" := (neg x) (at level 35, right associativity) : Bool_scope. Infix "&&" := and (at level 40, left associativity) : Bool_scope. Infix "||" := or (at level 50, left associativity) : Bool_scope. Notation "! x" := (neg x) (at level 35, right associativity) : Bool_scope. Infix "&&" := and (at level 40, left associativity) : Bool_scope. Infix "||" := or (at level 50, left associativity) : Bool_scope. (* ********************************************************************** *) (** * Boolean Decision Procedure : tactic working by exponential case analysis on all variables of type bool. *) (* ---------------------------------------------------------------------- *) (* ================================================================= *) (** ** Tactic [tautob] *) (** [tautob] introduces all variables that it can, performs a case analysis on all the boolean variables, then splits all subgoals and attempts resolution using intuition *) Ltac tautob_post tt := simpls; try split; intros; try discriminate; try solve [ intuition auto_false ]. Ltac tautob_core tt := let rec aux tt := (try intros_all); match goal with | b : bool |- _ => destruct b; clear b; aux tt | _ => tautob_post tt end in aux tt. Tactic Notation "tautob" := tautob_core tt. (* ********************************************************************** *) (* ################################################################# *) (** * Properties of boolean operators *) (* ---------------------------------------------------------------------- *) (* ================================================================= *) (** ** Properties of [eqb] *) Lemma eqb_same : forall x, eqb x x = true. Proof using. tautob. Qed. Lemma eqb_true_l : neutral_l eqb true. (* forall x, eqb true x = x. *) Proof using. tautob. Qed. Lemma eqb_true_r : neutral_r eqb true. Proof using. tautob. Qed. Lemma eqb_false_l : forall x, eqb false x = neg x. Proof using. tautob. Qed. Lemma eqb_false_r : forall x, eqb x false = neg x. Proof using. tautob. Qed. Lemma eqb_comm : comm eqb. Proof using. tautob. Qed. (* ---------------------------------------------------------------------- *) (* ================================================================= *) (** ** Properties of [and] *) Lemma and_same : idempotent2 and. Proof using. tautob. Qed. Lemma and_true_l : neutral_l and true. Proof using. tautob. Qed. Lemma and_true_r : neutral_r and true. Proof using. tautob. Qed. Lemma and_false_l : absorb_l and false. Proof using. tautob. Qed. Lemma and_false_r : absorb_r and false. Proof using. tautob. Qed. Lemma and_comm : comm and. Proof using. tautob. Qed. Lemma and_assoc : assoc and. Proof using. tautob. Qed. Lemma and_or_l : distrib_r and or. (* forall x y z, (x || y) && z = x && z ||| y && z. *) Proof using. tautob. Qed. Lemma and_or_r : distrib_l and or. (* forall x y z, x && (y ||| z) = x && y ||| x && z. *) Proof using. tautob. Qed. (* ---------------------------------------------------------------------- *) (* ================================================================= *) (** ** Properties of [or] *) Lemma or_same : idempotent2 or. Proof using. tautob. Qed. Lemma or_false_l : neutral_l or false. Proof using. tautob. Qed. Lemma or_false_r : neutral_r or false. Proof using. tautob. Qed. Lemma or_true_l : absorb_l or true. Proof using. tautob. Qed. Lemma or_true_r : absorb_r or true. Proof using. tautob. Qed. Lemma or_comm : comm or. Proof using. tautob. Qed. Lemma or_assoc : assoc or. Proof using. tautob. Qed. Lemma or_and_l : distrib_r or and. (* forall x y z, (x && y) || z = (x || z) && (y || z). *) Proof using. tautob. Qed. Lemma or_and_r : distrib_l or and. (* forall x y z, x || (y && z) = (x || y) && (x || z). *) Proof using. tautob. Qed. (* ---------------------------------------------------------------------- *) (* ================================================================= *) (** ** Properties of [neg] *) Lemma neg_false : ! false = true. Proof using. auto. Qed. Lemma neg_true : ! true = false. Proof using. auto. Qed. (* --LATER: fix coq display of goals below *) Lemma neg_and : automorphism neg and or. (* forall x y, ! (x && y) = (! x) || (! y) *) Proof using. tautob. Qed. Lemma neg_or : automorphism neg or and. (* forall x y, ! (x || y) = (! x) && (! y) *) Proof using. tautob. Qed. Lemma neg_neg : involutive neg. (* forall x, ! (! b) = b *) Proof using. tautob. Qed. (* ---------------------------------------------------------------------- *) (* ================================================================= *) (** ** Properties of [if then else] *) Section PropertiesIf. Implicit Types x y z : bool. Lemma if_true : forall x y, (if true then x else y) = x. Proof using. auto. Qed. Lemma if_false : forall x y, (if false then x else y) = y. Proof using. auto. Qed. Lemma if_then_else_same : forall x y, (if x then y else y) = y. Proof using. tautob. Qed. Lemma if_then_true_else_false : forall x, (if x then true else false) = x. Proof using. tautob. Qed. Lemma if_then_false_else_true : forall x, (if x then false else true) = !x. Proof using. tautob. Qed. Lemma if_then_true : forall x y, (if x then true else y) = x || y. Proof using. tautob. Qed. Lemma if_then_false : forall x y, (if x then false else y) = (!x) && y. Proof using. tautob. Qed. Lemma if_else_false : forall x y, (if x then y else false) = x && y. Proof using. tautob. Qed. Lemma if_else_true : forall x y, (if x then y else true) = (!x) || y. Proof using. tautob. Qed. End PropertiesIf. (* ---------------------------------------------------------------------- *) (* ================================================================= *) (** ** Properties of [impl] and [xor] *) (** We do not provide lemmas for [impl] and [xor] because these functions can be easily expressed in terms of the other operators. *) (* ---------------------------------------------------------------------- *) (* ================================================================= *) (** ** Opacity *) Opaque eqb neg and or. (* ********************************************************************** *) (* ################################################################# *) (** * Tactics *) (* ---------------------------------------------------------------------- *) (* ================================================================= *) (** ** Tactic [rew_neg_neg] *) (** [rew_neg_neg] is a tactic that simplifies all double negations of booleans, i.e. replaces [!!b] with [b]. *) Hint Rewrite neg_neg : rew_neg_neg. Tactic Notation "rew_neg_neg" := autorewrite with rew_neg_neg. Tactic Notation "rew_neg_neg" "~" := rew_neg_neg; auto_tilde. Tactic Notation "rew_neg_neg" "*" := rew_neg_neg; auto_star. Tactic Notation "rew_neg_neg" "in" hyp(H) := autorewrite with rew_neg_neg in H. Tactic Notation "rew_neg_neg" "~" "in" hyp(H) := rew_neg_neg in H; auto_tilde. Tactic Notation "rew_neg_neg" "*" "in" hyp(H) := rew_neg_neg in H; auto_star. Tactic Notation "rew_neg_neg" "in" "*" := autorewrite_in_star_patch ltac:(fun tt => autorewrite with rew_neg_neg). (* autorewrite with rew_neg_neg in *. *) Tactic Notation "rew_neg_neg" "~" "in" "*" := rew_neg_neg in *; auto_tilde. Tactic Notation "rew_neg_neg" "*" "in" "*" := rew_neg_neg in *; auto_star. (* ---------------------------------------------------------------------- *) (* ================================================================= *) (** ** Tactic [rew_bool] *) (** [rew_bool] simplifies boolean expressions, using rewriting lemmas in the database [rew_bool] defined below. *) Hint Rewrite eqb_same eqb_true_l eqb_true_r eqb_false_l eqb_false_r neg_false neg_true neg_neg neg_and neg_or and_true_l and_true_r and_false_l and_false_r or_false_l or_false_r or_true_l or_true_r if_true if_false if_then_else_same if_then_true_else_false if_then_false_else_true if_then_true if_else_false if_then_false if_else_true : rew_bool. Tactic Notation "rew_bool" := autorewrite with rew_bool. Tactic Notation "rew_bool" "~" := rew_bool; auto_tilde. Tactic Notation "rew_bool" "*" := rew_bool; auto_star. Tactic Notation "rew_bool" "in" hyp(H) := autorewrite with rew_bool in H. Tactic Notation "rew_bool" "~" "in" hyp(H) := rew_bool in H; auto_tilde. Tactic Notation "rew_bool" "*" "in" hyp(H) := rew_bool in H; auto_star. Tactic Notation "rew_bool" "in" "*" := autorewrite_in_star_patch ltac:(fun tt => autorewrite with rew_bool). (* autorewrite with rew_bool in *. *) Tactic Notation "rew_bool" "~" "in" "*" := rew_bool in *; auto_tilde. Tactic Notation "rew_bool" "*" "in" "*" := rew_bool in *; auto_star.
State Before: ι : Sort ?u.109089 α : Type u β : Type v inst✝ : PseudoEMetricSpace α δ✝ ε : ℝ s t : Set α x : α δ : ℝ E : Set α ⊢ thickening δ E ⊆ cthickening δ E State After: ι : Sort ?u.109089 α : Type u β : Type v inst✝ : PseudoEMetricSpace α δ✝ ε : ℝ s t : Set α x✝ : α δ : ℝ E : Set α x : α hx : x ∈ thickening δ E ⊢ x ∈ cthickening δ E Tactic: intro x hx State Before: ι : Sort ?u.109089 α : Type u β : Type v inst✝ : PseudoEMetricSpace α δ✝ ε : ℝ s t : Set α x✝ : α δ : ℝ E : Set α x : α hx : x ∈ thickening δ E ⊢ x ∈ cthickening δ E State After: ι : Sort ?u.109089 α : Type u β : Type v inst✝ : PseudoEMetricSpace α δ✝ ε : ℝ s t : Set α x✝ : α δ : ℝ E : Set α x : α hx : infEdist x E < ENNReal.ofReal δ ⊢ x ∈ cthickening δ E Tactic: rw [thickening, mem_setOf_eq] at hx State Before: ι : Sort ?u.109089 α : Type u β : Type v inst✝ : PseudoEMetricSpace α δ✝ ε : ℝ s t : Set α x✝ : α δ : ℝ E : Set α x : α hx : infEdist x E < ENNReal.ofReal δ ⊢ x ∈ cthickening δ E State After: no goals Tactic: exact hx.le
= Lock Haven , Pennsylvania =
> env <- new.env() > env[["x"]] <- 123 > env[["x"]]
Require Import Coqlib. Require Import Errors. Require Import Maps. Require Import AST. Require Import Integers. Require Import Values. Require Import Memory. Require Import Globalenvs. Require Import Events. Require Import Smallstep. Require Import Op. Require Import Registers. Require Import Inlining. Require Import Inliningspec. Require Import RTL. Require Import mem_lemmas. Require Import semantics. Require Import reach. Require Import effect_semantics. Require Import structured_injections. Require Import simulations. Require Import effect_properties. Require Import simulations_lemmas. Require Export Axioms. Require Import RTL_coop. Require Import RTL_eff. (*Load Santiago_tactics.*) Ltac open_Hyp:= match goal with | [H: and _ _ |- _] => destruct H | [H: exists _, _ |- _] => destruct H end. (* The rewriters *) Section PRESERVATION. Hint Rewrite vis_restrict_sm: restrict. Hint Rewrite restrict_sm_all: restrict. Hint Rewrite restrict_sm_frgnBlocksSrc: restrict. Variable SrcProg: program. Variable TrgProg: program. Hypothesis TRANSF: transf_program SrcProg = OK TrgProg. Let ge : genv := Genv.globalenv SrcProg. Let tge : genv := Genv.globalenv TrgProg. Let fenv := funenv_program SrcProg. Lemma symbols_preserved: forall (s: ident), Genv.find_symbol tge s = Genv.find_symbol ge s. Proof. intros. apply Genv.find_symbol_transf_partial with (transf_fundef fenv). apply TRANSF. Qed. Lemma varinfo_preserved: forall b, Genv.find_var_info tge b = Genv.find_var_info ge b. Proof. intros. apply Genv.find_var_info_transf_partial with (transf_fundef fenv). apply TRANSF. Qed. Lemma functions_translated: forall (v: val) (f: fundef), Genv.find_funct ge v = Some f -> exists f', Genv.find_funct tge v = Some f' /\ transf_fundef fenv f = OK f'. Proof. eapply (Genv.find_funct_transf_partial (transf_fundef fenv) _ TRANSF). Qed. Lemma function_ptr_translated: forall (b: block) (f: fundef), Genv.find_funct_ptr ge b = Some f -> exists f', Genv.find_funct_ptr tge b = Some f' /\ transf_fundef fenv f = OK f'. Proof. eapply (Genv.find_funct_ptr_transf_partial (transf_fundef fenv) _ TRANSF). Qed. Lemma sig_function_translated: forall f f', transf_fundef fenv f = OK f' -> funsig f' = funsig f. Proof. intros. destruct f; Errors.monadInv H. exploit transf_function_spec; eauto. intros SP; inv SP. auto. auto. Qed. Lemma GDE_lemma: genvs_domain_eq ge tge. Proof. (* OLD unfold genvs_domain_eq, genv2blocks. simpl; split; intros. split; intros; destruct H as [id Hid]. rewrite <- symbols_preserved in Hid. exists id; trivial. rewrite symbols_preserved in Hid. exists id; trivial. ad_it. (*rewrite varinfo_preserved. intuition.*) *) unfold genvs_domain_eq, genv2blocks. simpl; split; intros. split; intros; destruct H as [id Hid]. rewrite <- symbols_preserved in Hid. exists id; trivial. rewrite symbols_preserved in Hid. exists id; trivial. split; intros. rewrite varinfo_preserved; intuition. split. intros [f H]. apply function_ptr_translated in H. destruct H as [? [? _]]. eexists; eassumption. intros [f H]. apply (@Genv.find_funct_ptr_rev_transf_partial _ _ _ _ _ _ TRANSF) in H. destruct H as [? [? _]]. eexists; eassumption. Qed. Hint Resolve GDE_lemma: trans_correct. (** ** Properties of contexts and relocations *) Remark sreg_below_diff: forall ctx r r', Plt r' ctx.(dreg) -> sreg ctx r <> r'. Proof. intros. zify. unfold sreg; rewrite shiftpos_eq. xomega. Qed. Remark context_below_diff: forall ctx1 ctx2 r1 r2, context_below ctx1 ctx2 -> Ple r1 ctx1.(mreg) -> sreg ctx1 r1 <> sreg ctx2 r2. Proof. intros. red in H. zify. unfold sreg; rewrite ! shiftpos_eq. xomega. Qed. Remark context_below_lt: forall ctx1 ctx2 r, context_below ctx1 ctx2 -> Ple r ctx1.(mreg) -> Plt (sreg ctx1 r) ctx2.(dreg). Proof. intros. red in H. unfold Plt; zify. unfold sreg; rewrite shiftpos_eq. xomega. Qed. (** ** Agreement between register sets before and after inlining. *) Definition agree_regs (F: meminj) (ctx: context) (rs rs': regset) := (forall r, Ple r ctx.(mreg) -> val_inject F rs#r rs'#(sreg ctx r)) /\(forall r, Plt ctx.(mreg) r -> rs#r = Vundef). Definition val_reg_charact (F: meminj) (ctx: context) (rs': regset) (v: val) (r: reg) := (Plt ctx.(mreg) r /\ v = Vundef) \/ (Ple r ctx.(mreg) /\ val_inject F v rs'#(sreg ctx r)). Remark Plt_Ple_dec: forall p q, {Plt p q} + {Ple q p}. Proof. intros. destruct (plt p q). left; auto. right; xomega. Qed. Lemma agree_val_reg_gen: forall F ctx rs rs' r, agree_regs F ctx rs rs' -> val_reg_charact F ctx rs' rs#r r. Proof. intros. destruct H as [A B]. destruct (Plt_Ple_dec (mreg ctx) r). left. rewrite B; auto. right. auto. Qed. Lemma agree_val_regs_gen: forall F ctx rs rs' rl, agree_regs F ctx rs rs' -> list_forall2 (val_reg_charact F ctx rs') rs##rl rl. Proof. induction rl; intros; constructor; auto. apply agree_val_reg_gen; auto. Qed. Lemma agree_val_reg: forall F ctx rs rs' r, agree_regs F ctx rs rs' -> val_inject F rs#r rs'#(sreg ctx r). Proof. intros. exploit agree_val_reg_gen; eauto. instantiate (1 := r). intros [[A B] | [A B]]. rewrite B; auto. auto. Qed. Lemma agree_val_regs: forall F ctx rs rs' rl, agree_regs F ctx rs rs' -> val_list_inject F rs##rl rs'##(sregs ctx rl). Proof. induction rl; intros; simpl. constructor. constructor; auto. apply agree_val_reg; auto. Qed. Lemma agree_set_reg: forall F ctx rs rs' r v v', agree_regs F ctx rs rs' -> val_inject F v v' -> Ple r ctx.(mreg) -> agree_regs F ctx (rs#r <- v) (rs'#(sreg ctx r) <- v'). Proof. unfold agree_regs; intros. destruct H. split; intros. repeat rewrite Regmap.gsspec. destruct (peq r0 r). subst r0. rewrite peq_true. auto. rewrite peq_false. auto. apply shiftpos_diff; auto. rewrite Regmap.gso. auto. xomega. Qed. Lemma agree_set_reg_undef: forall F ctx rs rs' r v', agree_regs F ctx rs rs' -> agree_regs F ctx (rs#r <- Vundef) (rs'#(sreg ctx r) <- v'). Proof. unfold agree_regs; intros. destruct H. split; intros. repeat rewrite Regmap.gsspec. destruct (peq r0 r). subst r0. rewrite peq_true. auto. rewrite peq_false. auto. apply shiftpos_diff; auto. rewrite Regmap.gsspec. destruct (peq r0 r); auto. Qed. Lemma agree_set_reg_undef': forall F ctx rs rs' r, agree_regs F ctx rs rs' -> agree_regs F ctx (rs#r <- Vundef) rs'. Proof. unfold agree_regs; intros. destruct H. split; intros. rewrite Regmap.gsspec. destruct (peq r0 r). subst r0. auto. auto. rewrite Regmap.gsspec. destruct (peq r0 r); auto. Qed. Lemma agree_regs_invariant: forall F ctx rs rs1 rs2, agree_regs F ctx rs rs1 -> (forall r, Ple ctx.(dreg) r -> Plt r (ctx.(dreg) + ctx.(mreg)) -> rs2#r = rs1#r) -> agree_regs F ctx rs rs2. Proof. unfold agree_regs; intros. destruct H. split; intros. rewrite H0. auto. apply shiftpos_above. eapply Plt_le_trans. apply shiftpos_below. xomega. apply H1; auto. Qed. Lemma agree_regs_incr: forall F ctx rs1 rs2 F', agree_regs F ctx rs1 rs2 -> inject_incr F F' -> agree_regs F' ctx rs1 rs2. Proof. intros. destruct H. split; intros. eauto. auto. Qed. Remark agree_regs_init: forall F ctx rs, agree_regs F ctx (Regmap.init Vundef) rs. Proof. intros; split; intros. rewrite Regmap.gi; auto. rewrite Regmap.gi; auto. Qed. Lemma agree_regs_init_regs: forall F ctx rl vl vl', val_list_inject F vl vl' -> (forall r, In r rl -> Ple r ctx.(mreg)) -> agree_regs F ctx (init_regs vl rl) (init_regs vl' (sregs ctx rl)). Proof. induction rl; simpl; intros. apply agree_regs_init. inv H. apply agree_regs_init. apply agree_set_reg; auto. Qed. (** ** Executing sequences of moves *) Lemma tr_moves_init_regs: forall F stk f sp m ctx1 ctx2, context_below ctx1 ctx2 -> forall rdsts rsrcs vl pc1 pc2 rs1, tr_moves f.(fn_code) pc1 (sregs ctx1 rsrcs) (sregs ctx2 rdsts) pc2 -> (forall r, In r rdsts -> Ple r ctx2.(mreg)) -> list_forall2 (val_reg_charact F ctx1 rs1) vl rsrcs -> exists rs2, star step tge (State stk f sp pc1 rs1 m) E0 (State stk f sp pc2 rs2 m) /\ agree_regs F ctx2 (init_regs vl rdsts) rs2 /\ forall r, Plt r ctx2.(dreg) -> rs2#r = rs1#r. Proof. induction rdsts; simpl; intros. (* rdsts = nil *) inv H0. exists rs1; split. apply star_refl. split. apply agree_regs_init. auto. (* rdsts = a :: rdsts *) inv H2. inv H0. exists rs1; split. apply star_refl. split. apply agree_regs_init. auto. simpl in H0. inv H0. exploit IHrdsts; eauto. intros [rs2 [A [B C]]]. exists (rs2#(sreg ctx2 a) <- (rs2#(sreg ctx1 b1))). split. eapply star_right. eauto. eapply exec_Iop; eauto. traceEq. split. destruct H3 as [[P Q] | [P Q]]. subst a1. eapply agree_set_reg_undef; eauto. eapply agree_set_reg; eauto. rewrite C; auto. apply context_below_lt; auto. intros. rewrite Regmap.gso. auto. apply sym_not_equal. eapply sreg_below_diff; eauto. destruct H2; discriminate. Qed. Lemma tr_moves_init_regs': forall F hf stk f sp m ctx1 ctx2, context_below ctx1 ctx2 -> forall rdsts rsrcs vl pc1 pc2 rs1, tr_moves f.(fn_code) pc1 (sregs ctx1 rsrcs) (sregs ctx2 rdsts) pc2 -> (forall r, In r rdsts -> Ple r ctx2.(mreg)) -> list_forall2 (val_reg_charact F ctx1 rs1) vl rsrcs -> exists rs2, semantics_lemmas.corestep_star (rtl_eff_sem hf) tge (RTL_State stk f sp pc1 rs1) m (RTL_State stk f sp pc2 rs2) m /\ agree_regs F ctx2 (init_regs vl rdsts) rs2 /\ forall r, Plt r ctx2.(dreg) -> rs2#r = rs1#r. Proof. induction rdsts; simpl; intros. (* rdsts = nil *) inv H0. exists rs1; split. apply semantics_lemmas.corestep_star_zero. split. apply agree_regs_init. auto. (* rdsts = a :: rdsts *) inv H2. inv H0. exists rs1; split. apply semantics_lemmas.corestep_star_zero. split. apply agree_regs_init. auto. simpl in H0. inv H0. exploit IHrdsts; eauto. intros [rs2 [A [B C]]]. exists (rs2#(sreg ctx2 a) <- (rs2#(sreg ctx1 b1))). split. eapply semantics_lemmas.corestep_star_trans; eauto. eapply semantics_lemmas.corestep_star_one. eapply rtl_corestep_exec_Iop; eauto. split. destruct H3 as [[P Q] | [P Q]]. subst a1. eapply agree_set_reg_undef; eauto. eapply agree_set_reg; eauto. rewrite C; auto. apply context_below_lt; auto. intros. rewrite Regmap.gso. auto. apply sym_not_equal. eapply sreg_below_diff; eauto. destruct H2; discriminate. Qed. Lemma tr_moves_init_regs_eff: forall F hf stk f sp m ctx1 ctx2, context_below ctx1 ctx2 -> forall rdsts rsrcs vl pc1 pc2 rs1, tr_moves f.(fn_code) pc1 (sregs ctx1 rsrcs) (sregs ctx2 rdsts) pc2 -> (forall r, In r rdsts -> Ple r ctx2.(mreg)) -> list_forall2 (val_reg_charact F ctx1 rs1) vl rsrcs -> exists rs2, effstep_star (rtl_eff_sem hf) tge EmptyEffect (RTL_State stk f sp pc1 rs1) m (RTL_State stk f sp pc2 rs2) m /\ agree_regs F ctx2 (init_regs vl rdsts) rs2 /\ forall r, Plt r ctx2.(dreg) -> rs2#r = rs1#r. Proof. induction rdsts; simpl; intros. (* rdsts = nil *) inv H0. exists rs1; split. apply effstep_star_zero. split. apply agree_regs_init. auto. (* rdsts = a :: rdsts *) inv H2. inv H0. exists rs1; split. apply effstep_star_zero. split. apply agree_regs_init. auto. simpl in H0. inv H0. exploit IHrdsts; eauto. intros [rs2 [A [B C]]]. exists (rs2#(sreg ctx2 a) <- (rs2#(sreg ctx1 b1))). split. eapply effstep_star_trans'; eauto. eapply effstep_star_one. eapply rtl_effstep_exec_Iop; eauto. extensionality x. reflexivity. split. destruct H3 as [[P Q] | [P Q]]. subst a1. eapply agree_set_reg_undef; eauto. eapply agree_set_reg; eauto. rewrite C; auto. apply context_below_lt; auto. intros. rewrite Regmap.gso. auto. apply sym_not_equal. eapply sreg_below_diff; eauto. destruct H2; discriminate. Qed. (** ** Memory invariants *) (** A stack location is private if it is not the image of a valid location and we have full rights on it. *) Definition loc_private (F: meminj) (m m': mem) (sp: block) (ofs: Z) : Prop := Mem.perm m' sp ofs Cur Freeable /\ (forall b delta, F b = Some(sp, delta) -> ~Mem.perm m b (ofs - delta) Max Nonempty). (** Likewise, for a range of locations. *) Definition range_private (F: meminj) (m m': mem) (sp: block) (lo hi: Z) : Prop := forall ofs, lo <= ofs < hi -> loc_private F m m' sp ofs. Lemma range_private_invariant: forall F m m' sp lo hi F1 m1 m1', range_private F m m' sp lo hi -> (forall b delta ofs, F1 b = Some(sp, delta) -> Mem.perm m1 b ofs Max Nonempty -> lo <= ofs + delta < hi -> F b = Some(sp, delta) /\ Mem.perm m b ofs Max Nonempty) -> (forall ofs, Mem.perm m' sp ofs Cur Freeable -> Mem.perm m1' sp ofs Cur Freeable) -> range_private F1 m1 m1' sp lo hi. Proof. intros; red; intros. exploit H; eauto. intros [A B]. split; auto. intros; red; intros. exploit H0; eauto. omega. intros [P Q]. eelim B; eauto. Qed. Lemma range_private_perms: forall F m m' sp lo hi, range_private F m m' sp lo hi -> Mem.range_perm m' sp lo hi Cur Freeable. Proof. intros; red; intros. eapply H; eauto. Qed. Lemma range_private_alloc_left: forall F m m' sp' base hi sz m1 sp F1, range_private F m m' sp' base hi -> Mem.alloc m 0 sz = (m1, sp) -> F1 sp = Some(sp', base) -> (forall b, b <> sp -> F1 b = F b) -> range_private F1 m1 m' sp' (base + Zmax sz 0) hi. Proof. intros; red; intros. exploit (H ofs). generalize (Zmax2 sz 0). omega. intros [A B]. split; auto. intros; red; intros. exploit Mem.perm_alloc_inv; eauto. destruct (eq_block b sp); intros. subst b. rewrite H1 in H4; inv H4. rewrite Zmax_spec in H3. destruct (zlt 0 sz); omega. rewrite H2 in H4; auto. eelim B; eauto. Qed. Lemma range_private_free_left: forall F m m' sp base sz hi b m1, range_private F m m' sp (base + Zmax sz 0) hi -> Mem.free m b 0 sz = Some m1 -> F b = Some(sp, base) -> Mem.inject F m m' -> range_private F m1 m' sp base hi. Proof. intros; red; intros. destruct (zlt ofs (base + Zmax sz 0)) as [z|z]. red; split. replace ofs with ((ofs - base) + base) by omega. eapply Mem.perm_inject; eauto. eapply Mem.free_range_perm; eauto. rewrite Zmax_spec in z. destruct (zlt 0 sz); omega. intros; red; intros. destruct (eq_block b b0). subst b0. rewrite H1 in H4; inv H4. eelim Mem.perm_free_2; eauto. rewrite Zmax_spec in z. destruct (zlt 0 sz); omega. exploit Mem.mi_no_overlap; eauto. apply Mem.perm_cur_max. apply Mem.perm_implies with Freeable; auto with mem. eapply Mem.free_range_perm. eauto. instantiate (1 := ofs - base). rewrite Zmax_spec in z. destruct (zlt 0 sz); omega. eapply Mem.perm_free_3; eauto. intros [A | A]. congruence. omega. exploit (H ofs). omega. intros [A B]. split. auto. intros; red; intros. eelim B; eauto. eapply Mem.perm_free_3; eauto. Qed. Lemma range_private_extcall: forall F F' m1 m2 m1' m2' sp base hi, range_private F m1 m1' sp base hi -> (forall b ofs p, Mem.valid_block m1 b -> Mem.perm m2 b ofs Max p -> Mem.perm m1 b ofs Max p) -> Mem.unchanged_on (loc_out_of_reach F m1) m1' m2' -> Mem.inject F m1 m1' -> inject_incr F F' -> inject_separated F F' m1 m1' -> Mem.valid_block m1' sp -> range_private F' m2 m2' sp base hi. Proof. intros until hi; intros RP PERM UNCH INJ INCR SEP VB. red; intros. exploit RP; eauto. intros [A B]. split. eapply Mem.perm_unchanged_on; eauto. intros. red in SEP. destruct (F b) as [[sp1 delta1] |] eqn:?. exploit INCR; eauto. intros EQ; rewrite H0 in EQ; inv EQ. red; intros; eelim B; eauto. eapply PERM; eauto. red. destruct (plt b (Mem.nextblock m1)); auto. exploit Mem.mi_freeblocks; eauto. congruence. exploit SEP; eauto. tauto. Qed. (*NEW*) Lemma range_private_extcall_sm: forall F F' m1 m2 m1' m2' sp base hi (WDF: SM_wd F) (WDF': SM_wd F'), range_private (as_inj F) m1 m1' sp base hi -> (forall b ofs p, Mem.valid_block m1 b -> Mem.perm m2 b ofs Max p -> Mem.perm m1 b ofs Max p) -> Mem.unchanged_on (local_out_of_reach F m1) m1' m2' -> Mem.inject (as_inj F) m1 m1' -> extern_incr F F' -> sm_inject_separated F F' m1 m1' -> Mem.valid_block m1' sp -> (*NEW*) locBlocksTgt F sp = true -> range_private (as_inj F') m2 m2' sp base hi. Proof. intros until hi; intros WDF WDF' RP PERM UNCH INJ INCR SEP VB LBT. red; intros. exploit RP; eauto. intros [A B]. split. eapply Mem.perm_unchanged_on; eauto. split; trivial. intros. left. apply local_in_all in H0; eauto. intros. destruct SEP as [SEPa [SEPb SEPc]]. destruct (as_inj F b) as [[sp1 delta1] |] eqn:?. exploit (extern_incr_as_inj _ _ INCR); eauto. intros EQ; rewrite H0 in EQ; inv EQ. red; intros. eelim B; eauto. eapply PERM; eauto. red. destruct (plt b (Mem.nextblock m1)); auto. exploit Mem.mi_freeblocks; eauto. congruence. destruct (SEPa _ _ _ Heqo H0). elim (SEPc _ H2). unfold DomTgt. assert (LT: locBlocksTgt F = locBlocksTgt F') by eapply INCR. rewrite <- LT, LBT. trivial. eapply Mem.perm_valid_block; eassumption. Qed. (** ** Relating global environments *) Inductive match_globalenvs mu (bound: block): Prop := | mk_match_globalenvs (DOMAIN: forall b, Plt b bound -> ((*frgnBlocksSrc mu b = true /\*) as_inj mu b = Some(b, 0))) (IMAGE: forall b1 b2 delta gv (GV: Genv.find_var_info ge b2 = Some gv), as_inj mu b1 = Some(b2, delta) -> Plt b2 bound -> b1 = b2) (SYMBOLS: forall id b, Genv.find_symbol ge id = Some b -> Plt b bound) (FUNCTIONS: forall b fd, Genv.find_funct_ptr ge b = Some fd -> Plt b bound) (VARINFOS: forall b gv, Genv.find_var_info ge b = Some gv -> Plt b bound). Lemma find_function_agree: forall ros rs fd F ctx rs' bound, find_function ge ros rs = Some fd -> agree_regs (as_inj F) ctx rs rs' -> match_globalenvs F bound -> exists fd', find_function tge (sros ctx ros) rs' = Some fd' /\ transf_fundef fenv fd = OK fd'. Proof. intros. destruct ros as [r | id]; simpl in *. (* register *) assert (rs'#(sreg ctx r) = rs#r). exploit Genv.find_funct_inv; eauto. intros [b EQ]. assert (A: val_inject (as_inj F) rs#r rs'#(sreg ctx r)). eapply agree_val_reg; eauto. rewrite EQ in A; inv A. inv H1. assert (HH: Plt b bound). apply FUNCTIONS with fd. rewrite EQ in H; rewrite Genv.find_funct_find_funct_ptr in H. auto. (*destruct*) specialize (DOMAIN b HH). rewrite DOMAIN in H5; inv H5. rewrite Int.add_zero. rewrite EQ. trivial. eapply functions_translated; eauto. rewrite <- H2 in H. trivial. (* symbol *) rewrite symbols_preserved. destruct (Genv.find_symbol ge id); try discriminate. eapply function_ptr_translated; eauto. Qed. (* Lemma find_function_agree': forall ros rs fd F ctx rs' bound, find_function ge ros rs = Some fd -> agree_regs (as_inj F) ctx rs rs' -> match_globalenvs F bound -> exists fd', find_function tge (sros ctx ros) rs' = Some fd' /\ transf_fundef fenv fd = OK fd'. Proof. intros. destruct ros as [r | id]; simpl in *. (* register *) assert (rs'#(sreg ctx r) = rs#r). exploit Genv.find_funct_inv; eauto. intros [b EQ]. assert (A: val_inject (as_inj F) rs#r rs'#(sreg ctx r)). eapply agree_val_reg; eauto. rewrite EQ in A; inv A. inv H1. destruct (DOMAIN b). apply FUNCTIONS with fd. rewrite EQ in H; rewrite Genv.find_funct_find_funct_ptr in H. auto. rewrite H2 in H5; inv H5. rewrite Int.add_zero. rewrite EQ. trivial. eapply functions_translated; eauto. rewrite <- H2 in H. trivial. (* symbol *) rewrite symbols_preserved. destruct (Genv.find_symbol ge id); try discriminate. eapply function_ptr_translated; eauto. Qed.*) (** ** Relating stacks *) Inductive match_stacks (mu: SM_Injection) (m m': mem): list stackframe -> list stackframe -> block -> Prop := | match_stacks_nil: forall bound1 bound (MG: match_globalenvs mu bound1) (BELOW: Ple bound1 bound), match_stacks mu m m' nil nil bound | match_stacks_cons: forall res (f:function) sp pc rs stk (f':function) sp' rs' stk' bound ctx (MS: match_stacks_inside mu m m' stk stk' f' ctx sp' rs') (FB: tr_funbody fenv f'.(fn_stacksize) ctx f f'.(fn_code)) (AG: agree_regs (as_inj mu) ctx rs rs') (SP: (as_inj mu) sp = Some(sp', ctx.(dstk))) (SL: locBlocksTgt mu sp' = true ) (PRIV: range_private (as_inj mu) m m' sp' (ctx.(dstk) + ctx.(mstk)) f'.(fn_stacksize)) (SSZ1: 0 <= f'.(fn_stacksize) < Int.max_unsigned) (SSZ2: forall ofs, Mem.perm m' sp' ofs Max Nonempty -> 0 <= ofs <= f'.(fn_stacksize)) (RES: Ple res ctx.(mreg)) (BELOW: Plt sp' bound), match_stacks (mu) m m' (Stackframe res f (Vptr sp Int.zero) pc rs :: stk) (Stackframe (sreg ctx res) f' (Vptr sp' Int.zero) (spc ctx pc) rs' :: stk') bound | match_stacks_untailcall: forall stk res f' sp' rpc rs' stk' bound ctx (MS: match_stacks_inside (mu) m m' stk stk' f' ctx sp' rs') (PRIV: range_private (as_inj mu) m m' sp' ctx.(dstk) f'.(fn_stacksize)) (SSZ1: 0 <= f'.(fn_stacksize) < Int.max_unsigned) (SSZ2: forall ofs, Mem.perm m' sp' ofs Max Nonempty -> 0 <= ofs <= f'.(fn_stacksize)) (RET: ctx.(retinfo) = Some (rpc, res)) (SL: locBlocksTgt mu sp' = true ) (BELOW: Plt sp' bound), match_stacks (mu) m m' stk (Stackframe res f' (Vptr sp' Int.zero) rpc rs' :: stk') bound with match_stacks_inside (mu: SM_Injection) (m m': mem): list stackframe -> list stackframe -> function -> context -> block -> regset -> Prop := | match_stacks_inside_base: forall stk stk' f' ctx sp' rs' (MS: match_stacks (mu) m m' stk stk' sp') (SL: locBlocksTgt mu sp' = true ) (RET: ctx.(retinfo) = None) (DSTK: ctx.(dstk) = 0), match_stacks_inside (mu) m m' stk stk' f' ctx sp' rs' | match_stacks_inside_inlined: forall res f sp pc rs stk stk' f' ctx sp' rs' ctx' (MS: match_stacks_inside (mu) m m' stk stk' f' ctx' sp' rs') (FB: tr_funbody fenv f'.(fn_stacksize) ctx' f f'.(fn_code)) (AG: agree_regs (as_inj mu) ctx' rs rs') (SP: (local_of mu) sp = Some(sp', ctx'.(dstk))) (SL: locBlocksTgt mu sp' = true ) (PAD: range_private (as_inj mu) m m' sp' (ctx'.(dstk) + ctx'.(mstk)) ctx.(dstk)) (RES: Ple res ctx'.(mreg)) (RET: ctx.(retinfo) = Some (spc ctx' pc, sreg ctx' res)) (BELOW: context_below ctx' ctx) (SBELOW: context_stack_call ctx' ctx), match_stacks_inside (mu) m m' (Stackframe res f (Vptr sp Int.zero) pc rs :: stk) stk' f' ctx sp' rs'. (** Properties of match_stacks *) (*NEW*) Section MATCH_STACKS_replace_externs. Variable mu: SM_Injection. Variables FS FT: block -> bool. Hypothesis HFS: forall b, frgnBlocksSrc mu b = true -> FS b = true. Variables m m': mem. Lemma match_stacks_replace_externs: forall stk stk' bound, match_stacks mu m m' stk stk' bound -> match_stacks (replace_externs mu FS FT) m m' stk stk' bound with match_stacks_inside_replace_externs: forall stk stk' f ctx sp rs', match_stacks_inside mu m m' stk stk' f ctx sp rs' -> match_stacks_inside (replace_externs mu FS FT) m m' stk stk' f ctx sp rs'. Proof. induction 1; eauto. { econstructor; try rewrite replace_externs_as_inj; try eassumption. destruct MG. econstructor; try rewrite replace_externs_as_inj; eauto. (* intros. rewrite replace_externs_frgnBlocksSrc. destruct (DOMAIN _ H). split; eauto. *) } { econstructor; try rewrite replace_externs_as_inj; eauto. rewrite replace_externs_locBlocksTgt. assumption. } { econstructor; try rewrite replace_externs_as_inj; eauto. rewrite replace_externs_locBlocksTgt; trivial. } induction 1; eauto. { econstructor; eauto. rewrite replace_externs_locBlocksTgt; trivial. } { eapply match_stacks_inside_inlined; try rewrite replace_externs_as_inj; eauto. rewrite replace_externs_local; trivial. rewrite replace_externs_locBlocksTgt; trivial. } Qed. End MATCH_STACKS_replace_externs. (*NEW*) Section MATCH_STACKS_replace_locals. Variable mu: SM_Injection. Variables PS PT: block -> bool. Variables m m': mem. Lemma match_stacks_replace_locals: forall stk stk' bound, match_stacks mu m m' stk stk' bound -> match_stacks (replace_locals mu PS PT) m m' stk stk' bound with match_stacks_inside_replace_locals: forall stk stk' f ctx sp rs', match_stacks_inside mu m m' stk stk' f ctx sp rs' -> match_stacks_inside (replace_locals mu PS PT) m m' stk stk' f ctx sp rs'. Proof. induction 1; eauto. { econstructor; try eassumption. destruct MG. constructor; try rewrite replace_locals_as_inj; eauto. (*rewrite replace_locals_frgnBlocksSrc; assumption.*) } { econstructor; try rewrite replace_locals_as_inj; eauto. rewrite replace_locals_locBlocksTgt; trivial. } { econstructor; try rewrite replace_locals_as_inj; eauto. rewrite replace_locals_locBlocksTgt; trivial. } induction 1; eauto. { econstructor; eauto. rewrite replace_locals_locBlocksTgt; trivial. } { eapply match_stacks_inside_inlined; try rewrite replace_locals_as_inj; eauto. rewrite replace_locals_local; trivial. rewrite replace_locals_locBlocksTgt; trivial. } Qed. Lemma match_stacks_replace_locals_restrict: forall stk stk' bound, match_stacks (restrict_sm mu (vis mu)) m m' stk stk' bound -> match_stacks (restrict_sm (replace_locals mu PS PT) (vis mu)) m m' stk stk' bound with match_stacks_inside_replace_locals_restrict: forall stk stk' f ctx sp rs', match_stacks_inside (restrict_sm mu (vis mu)) m m' stk stk' f ctx sp rs' -> match_stacks_inside (restrict_sm (replace_locals mu PS PT) (vis mu)) m m' stk stk' f ctx sp rs'. Proof. induction 1; eauto. { econstructor; try eassumption. destruct MG. constructor; eauto. rewrite (*restrict_sm_frgnBlocksSrc,*) restrict_sm_all, (*replace_locals_frgnBlocksSrc,*) replace_locals_as_inj; rewrite restrict_sm_all (*, restrict_sm_frgnBlocksSrc*) in DOMAIN; assumption. rewrite restrict_sm_all, replace_locals_as_inj; rewrite restrict_sm_all in IMAGE; assumption. } { econstructor; try rewrite restrict_sm_all, replace_locals_as_inj in *; eauto. rewrite restrict_sm_locBlocksTgt, replace_locals_locBlocksTgt in *; trivial. } { econstructor; try rewrite restrict_sm_all, replace_locals_as_inj in *; eauto. rewrite restrict_sm_locBlocksTgt, replace_locals_locBlocksTgt in *; trivial. } induction 1; eauto. { econstructor; eauto. rewrite restrict_sm_locBlocksTgt, replace_locals_locBlocksTgt in *; trivial. } { eapply match_stacks_inside_inlined; try rewrite restrict_sm_all, restrict_sm_local, replace_locals_as_inj in *; eauto. rewrite restrict_sm_local, replace_locals_local in *; trivial. rewrite restrict_sm_locBlocksTgt, replace_locals_locBlocksTgt in *; trivial. } Qed. End MATCH_STACKS_replace_locals. Lemma match_globalenvs_intern_incr mu mu' b: forall (MG: match_globalenvs mu b) (INC: intern_incr mu mu') (HJ: forall b1 b2 d, as_inj mu' b1 = Some (b2, d) -> Plt b2 b -> as_inj mu b1 = Some (b2, d)) (WD: SM_wd mu'), match_globalenvs mu' b. Proof. intros. inv MG. constructor; eauto. assert (FBS: frgnBlocksSrc mu = frgnBlocksSrc mu') by eapply INC. (*NEW*) intros. eapply (intern_incr_as_inj _ _ INC); auto; apply DOMAIN. (*intros. destruct (DOMAIN _ H). split. trivial. eapply (intern_incr_as_inj _ _ INC); trivial.*) Qed. Lemma match_globalenvs_extern_incr mu mu' b: forall (MG: match_globalenvs mu b) (INC: extern_incr mu mu') (HJ: forall b1 b2 d, as_inj mu' b1 = Some (b2, d) -> Plt b2 b -> as_inj mu b1 = Some (b2, d)) (WD: SM_wd mu'), match_globalenvs mu' b. Proof. intros. inv MG. constructor; eauto. assert (FBS: frgnBlocksSrc mu = frgnBlocksSrc mu') by eapply INC. (*NEW*) intros. eapply (extern_incr_as_inj _ _ INC); auto; apply DOMAIN. (* rewrite <- FBS; intros. destruct (DOMAIN _ H). split. trivial. eapply (extern_incr_as_inj _ _ INC); trivial.*) Qed. Section MATCH_STACKS. Variable F: SM_Injection. Variables m m': mem. Let Finj := as_inj F. Lemma match_stacks_globalenvs: forall stk stk' bound, match_stacks F m m' stk stk' bound -> exists b, match_globalenvs F b with match_stacks_inside_globalenvs: forall stk stk' f ctx sp rs', match_stacks_inside F m m' stk stk' f ctx sp rs' -> exists b, match_globalenvs F b. Proof. induction 1; eauto. induction 1; eauto. Qed. Lemma match_globalenvs_preserves_globals: forall b, match_globalenvs F b -> meminj_preserves_globals ge Finj. Proof. intros. inv H. red. split. intros. eapply (DOMAIN _ (SYMBOLS _ _ H)). split. intros. eapply (DOMAIN _ (VARINFOS _ _ H)). intros. symmetry. eapply IMAGE; eauto. Qed. Lemma match_stacks_inside_globals: forall stk stk' f ctx sp rs', match_stacks_inside F m m' stk stk' f ctx sp rs' -> meminj_preserves_globals ge Finj. Proof. intros. exploit match_stacks_inside_globalenvs; eauto. intros [b A]. eapply match_globalenvs_preserves_globals; eauto. Qed. Lemma match_stacks_bound: forall stk stk' bound bound1, match_stacks F m m' stk stk' bound -> Ple bound bound1 -> match_stacks F m m' stk stk' bound1. Proof. intros. inv H. apply match_stacks_nil with bound0. auto. eapply Ple_trans; eauto. eapply match_stacks_cons; eauto. eapply Plt_le_trans; eauto. eapply match_stacks_untailcall; eauto. eapply Plt_le_trans; eauto. Qed. Variable F1: SM_Injection. Let Finj1 := as_inj F1. Variables m1 m1': mem. (*Hypothesis INCR: inject_incr Finj Finj1.*) Hypothesis INCR: intern_incr F F1. Hypothesis WDF: SM_wd F. Hypothesis WDF1: SM_wd F1. Lemma INCR': inject_incr Finj Finj1. eapply intern_incr_as_inj; auto. Qed. Lemma incre_local_of: forall mu mu' (INCR0: intern_incr mu mu') b b' delta, local_of mu b = Some (b', delta) -> local_of mu' b = Some (b', delta). intros. apply intern_incr_local in INCR0. apply INCR0; auto. Qed. Lemma match_stacks_invariant: forall stk stk' bound, match_stacks F m m' stk stk' bound -> forall (INJ: forall b1 b2 delta, Finj1 b1 = Some(b2, delta) -> Plt b2 bound -> Finj b1 = Some(b2, delta)) (PERM1: forall b1 b2 delta ofs, Finj1 b1 = Some(b2, delta) -> Plt b2 bound -> Mem.perm m1 b1 ofs Max Nonempty -> Mem.perm m b1 ofs Max Nonempty) (PERM2: forall b ofs, Plt b bound -> Mem.perm m' b ofs Cur Freeable -> Mem.perm m1' b ofs Cur Freeable) (PERM3: forall b ofs k p, Plt b bound -> Mem.perm m1' b ofs k p -> Mem.perm m' b ofs k p), match_stacks F1 m1 m1' stk stk' bound with match_stacks_inside_invariant: forall stk stk' f' ctx sp' rs1, match_stacks_inside F m m' stk stk' f' ctx sp' rs1 -> forall rs2 (RS: forall r, Plt r ctx.(dreg) -> rs2#r = rs1#r) (INJ: forall b1 b2 delta, Finj1 b1 = Some(b2, delta) -> Ple b2 sp' -> Finj b1 = Some(b2, delta)) (PERM1: forall b1 b2 delta ofs, Finj1 b1 = Some(b2, delta) -> Ple b2 sp' -> Mem.perm m1 b1 ofs Max Nonempty -> Mem.perm m b1 ofs Max Nonempty) (PERM2: forall b ofs, Ple b sp' -> Mem.perm m' b ofs Cur Freeable -> Mem.perm m1' b ofs Cur Freeable) (PERM3: forall b ofs k p, Ple b sp' -> Mem.perm m1' b ofs k p -> Mem.perm m' b ofs k p), match_stacks_inside F1 m1 m1' stk stk' f' ctx sp' rs2. Proof. assert (INCR': inject_incr Finj Finj1) by (exact INCR'). induction 1; intros. (* nil *) apply match_stacks_nil with (bound1 := bound1). inv MG. constructor; auto. (*intros. destruct (DOMAIN _ H). split. assert (frgnBlocksSrc F = frgnBlocksSrc F1) by eapply INCR. rewrite <- H2; trivial. eapply (intern_incr_as_inj _ _ INCR); trivial.*) intros. eapply (IMAGE _ _ delta _ GV). eapply INJ; eauto. eapply Plt_le_trans; eauto. auto. auto. (* cons *) apply match_stacks_cons with (ctx := ctx); auto. eapply match_stacks_inside_invariant; eauto. intros; eapply INJ; eauto; xomega. intros; eapply PERM1; eauto; xomega. intros; eapply PERM2; eauto; xomega. intros; eapply PERM3; eauto; xomega. eapply agree_regs_incr; eauto. destruct INCR; repeat open_Hyp; apply H2; assumption. eapply range_private_invariant; eauto. (* untailcall *) apply match_stacks_untailcall with (ctx := ctx); auto. eapply match_stacks_inside_invariant; eauto. intros; eapply INJ; eauto; xomega. intros; eapply PERM1; eauto; xomega. intros; eapply PERM2; eauto; xomega. intros; eapply PERM3; eauto; xomega. eapply range_private_invariant; eauto. destruct INCR; repeat open_Hyp; apply H2; assumption. assert (INCR': inject_incr Finj Finj1) by (exact INCR'). induction 1; intros. (* base *) eapply match_stacks_inside_base; eauto. eapply match_stacks_invariant; eauto. intros; eapply INJ; eauto; xomega. intros; eapply PERM1; eauto; xomega. intros; eapply PERM2; eauto; xomega. intros; eapply PERM3; eauto; xomega. destruct INCR; repeat open_Hyp; apply H2; assumption. (* inlined *) apply match_stacks_inside_inlined with (ctx' := ctx'); auto. apply IHmatch_stacks_inside; auto. intros. apply RS. red in BELOW. xomega. apply agree_regs_incr with Finj; auto. apply agree_regs_invariant with rs'; auto. intros. apply RS. red in BELOW. xomega. eapply (incre_local_of F F1); auto. destruct INCR; repeat open_Hyp. apply H3; assumption. eapply range_private_invariant; eauto. intros. split. eapply INJ; eauto. xomega. eapply PERM1; eauto. xomega. intros. eapply PERM2; eauto. xomega. Qed. Lemma match_stacks_empty: forall stk stk' bound, match_stacks F m m' stk stk' bound -> stk = nil -> stk' = nil with match_stacks_inside_empty: forall stk stk' f ctx sp rs, match_stacks_inside F m m' stk stk' f ctx sp rs -> stk = nil -> stk' = nil /\ ctx.(retinfo) = None. Proof. induction 1; intros. auto. discriminate. exploit match_stacks_inside_empty; eauto. intros [A B]. congruence. induction 1; intros. split. eapply match_stacks_empty; eauto. auto. discriminate. Qed. End MATCH_STACKS. (** Preservation by assignment to a register *) Hint Immediate intern_incr_refl. Lemma match_stacks_inside_set_reg: forall F m m' stk stk' f' ctx sp' rs' r v, SM_wd F -> match_stacks_inside F m m' stk stk' f' ctx sp' rs' -> match_stacks_inside F m m' stk stk' f' ctx sp' (rs'#(sreg ctx r) <- v). Proof. intros. eapply match_stacks_inside_invariant; eauto. intros. apply Regmap.gso. zify. unfold sreg; rewrite shiftpos_eq. xomega. Qed. (** Preservation by a memory store *) Lemma match_stacks_inside_store: forall F m m' stk stk' f' ctx sp' rs' chunk b ofs v m1 chunk' b' ofs' v' m1', SM_wd F -> match_stacks_inside F m m' stk stk' f' ctx sp' rs' -> Mem.store chunk m b ofs v = Some m1 -> Mem.store chunk' m' b' ofs' v' = Some m1' -> match_stacks_inside F m1 m1' stk stk' f' ctx sp' rs'. Proof. intros. eapply match_stacks_inside_invariant; eauto with mem. Qed. (** Preservation by an allocation *) Lemma match_stacks_inside_alloc_left: forall F m m' stk stk' f' ctx sp' rs', SM_wd F -> match_stacks_inside F m m' stk stk' f' ctx sp' rs' -> forall sz m1 b F1 delta, SM_wd F1 -> Mem.alloc m 0 sz = (m1, b) -> (intern_incr F F1) -> (as_inj F1) b = Some(sp', delta) -> (forall b1, b1 <> b -> (as_inj F1) b1 = (as_inj F) b1) -> delta >= ctx.(dstk) -> match_stacks_inside F1 m1 m' stk stk' f' ctx sp' rs'. Proof. induction 2; intros. (* base *) eapply match_stacks_inside_base; eauto. eapply (match_stacks_invariant F m m' F1); eauto. intros. destruct (eq_block b1 b). subst b1. rewrite H3 in H6; inv H6. eelim Plt_strict; eauto. rewrite H4 in H6; auto. intros. exploit Mem.perm_alloc_inv; eauto. destruct (eq_block b1 b); intros; auto. subst b1. rewrite H3 in H6; inv H6. eelim Plt_strict; eauto. destruct H2; repeat open_Hyp. apply H8; assumption. (* inlined *) assert (INCR': inject_incr (as_inj F) (as_inj F1)). eapply intern_incr_as_inj; auto. eapply match_stacks_inside_inlined; eauto. eapply IHmatch_stacks_inside; eauto. destruct SBELOW. omega. eapply agree_regs_incr; eauto. apply intern_incr_local in H3. apply H3; auto. destruct H3; repeat open_Hyp. apply H9; assumption. eapply range_private_invariant; eauto. intros. exploit Mem.perm_alloc_inv; eauto. destruct (eq_block b0 b); intros. subst b0. rewrite H4 in H7; inv H7. elimtype False; xomega. rewrite H5 in H7; auto. Qed. (** Preservation by freeing *) Lemma match_stacks_free_left: forall F m m' stk stk' sp b lo hi m1, SM_wd F -> match_stacks F m m' stk stk' sp -> Mem.free m b lo hi = Some m1 -> match_stacks F m1 m' stk stk' sp. Proof. intros. eapply match_stacks_invariant; eauto. intros. eapply Mem.perm_free_3; eauto. Qed. Lemma match_stacks_free_right: forall F m m' stk stk' sp lo hi m1', SM_wd F -> match_stacks F m m' stk stk' sp -> Mem.free m' sp lo hi = Some m1' -> match_stacks F m m1' stk stk' sp. Proof. intros. eapply match_stacks_invariant; eauto. intros. eapply Mem.perm_free_1; eauto. intros. eapply Mem.perm_free_3; eauto. Qed. Lemma min_alignment_sound: forall sz n, (min_alignment sz | n) -> Mem.inj_offset_aligned n sz. Proof. intros; red; intros. unfold min_alignment in H. assert (2 <= sz -> (2 | n)). intros. destruct (zle sz 1). omegaContradiction. destruct (zle sz 2). auto. destruct (zle sz 4). apply Zdivides_trans with 4; auto. exists 2; auto. apply Zdivides_trans with 8; auto. exists 4; auto. assert (4 <= sz -> (4 | n)). intros. destruct (zle sz 1). omegaContradiction. destruct (zle sz 2). omegaContradiction. destruct (zle sz 4). auto. apply Zdivides_trans with 8; auto. exists 2; auto. assert (8 <= sz -> (8 | n)). intros. destruct (zle sz 1). omegaContradiction. destruct (zle sz 2). omegaContradiction. destruct (zle sz 4). omegaContradiction. auto. destruct chunk; simpl in *; auto. apply Zone_divide. apply Zone_divide. apply H2; omega. Qed. (** Preservation by external calls *) Section EXTCALL. Variables F1 F2: SM_Injection. Hypothesis WDF1: SM_wd F1. Hypothesis WDF2: SM_wd F2. Let Finj1 := as_inj F1. Let Finj2 := as_inj F2. Variables m1 m2 m1' m2': mem. Hypothesis MAXPERM: forall b ofs p, Mem.valid_block m1 b -> Mem.perm m2 b ofs Max p -> Mem.perm m1 b ofs Max p. Hypothesis MAXPERM': forall b ofs p, Mem.valid_block m1' b -> Mem.perm m2' b ofs Max p -> Mem.perm m1' b ofs Max p. Hypothesis UNCHANGED: Mem.unchanged_on (loc_out_of_reach Finj1 m1) m1' m2'. Hypothesis INJ: Mem.inject Finj1 m1 m1'. Hypothesis INCR: intern_incr F1 F2. Hypothesis SEP: inject_separated Finj1 Finj2 m1 m1'. Hypothesis SMV: sm_valid F1 m1 m1'. Lemma match_stacks_extcall: forall stk stk' bound, match_stacks F1 m1 m1' stk stk' bound -> Ple bound (Mem.nextblock m1') -> match_stacks F2 m2 m2' stk stk' bound with match_stacks_inside_extcall: forall stk stk' f' ctx sp' rs', match_stacks_inside F1 m1 m1' stk stk' f' ctx sp' rs' -> Plt sp' (Mem.nextblock m1') -> match_stacks_inside F2 m2 m2' stk stk' f' ctx sp' rs'. Proof. assert (INCR': inject_incr Finj1 Finj2) by (apply INCR'; auto). induction 1; intros. apply match_stacks_nil with bound1; auto. inv MG. constructor; intros; eauto. (*destruct (DOMAIN _ H0). split. assert (F12: frgnBlocksSrc F1 = frgnBlocksSrc F2) by eapply INCR. rewrite <- F12; trivial. eapply (intern_incr_as_inj _ _ INCR); trivial.*) remember (Finj1 b1) as d; apply eq_sym in Heqd. destruct d. destruct p. rewrite (intern_incr_as_inj _ _ INCR WDF2 _ _ _ Heqd) in H0. inv H0. apply (IMAGE _ _ _ _ GV Heqd H1). destruct (SEP _ _ _ Heqd H0). destruct (DOMAIN _ H1). elim H3. apply SMV. eapply (as_inj_DomRng); eauto. eapply match_stacks_cons; eauto. eapply match_stacks_inside_extcall; eauto. xomega. eapply agree_regs_incr; eauto. destruct INCR; repeat open_Hyp. apply H3; assumption. eapply range_private_extcall; eauto. red; xomega. intros. apply SSZ2; auto. apply MAXPERM'; auto. red; xomega. eapply match_stacks_untailcall; eauto. eapply match_stacks_inside_extcall; eauto. xomega. eapply range_private_extcall; eauto. red; xomega. intros. apply SSZ2; auto. apply MAXPERM'; auto. red; xomega. destruct INCR; repeat open_Hyp; apply H3; assumption. assert (INCR': inject_incr Finj1 Finj2) by (apply INCR'; auto). induction 1; intros. eapply match_stacks_inside_base; eauto. eapply match_stacks_extcall; eauto. xomega. destruct INCR; repeat open_Hyp; apply H3; assumption. eapply match_stacks_inside_inlined; eauto. eapply agree_regs_incr; eauto. eapply (incre_local_of F1); auto. destruct INCR; repeat open_Hyp; apply H4; assumption. eapply range_private_extcall; eauto. Qed. End EXTCALL. (*NEW*) Section MATCH_STACK_restrict_locals. Variable mu : SM_Injection. Variable m1 m2: mem. Variable vals1 vals2 : list val. Hypothesis WD : SM_wd mu. Hypothesis PG: meminj_preserves_globals ge (as_inj mu). Let mu1 := restrict_sm mu (fun b => locBlocksSrc mu b || frgnBlocksSrc mu b). Let mu2 := (replace_locals mu (fun b => locBlocksSrc mu b && REACH m1 (exportedSrc mu vals1) b) (fun b => locBlocksTgt mu b && REACH m2 (exportedTgt mu vals2) b)). Lemma MGE_restrict_local bnd: match_globalenvs mu1 bnd -> match_globalenvs mu2 bnd. Proof. intros. inv H. econstructor; eauto. intros. specialize (DOMAIN _ H). unfold mu2. rewrite replace_locals_as_inj. (*replace_locals_frgnBlocksSrc. *) (*split. unfold mu1 in H0. *) (*rewrite restrict_sm_frgnBlocksSrc in H0. trivial. *) (*unfold mu1 in H1. rewrite restrict_sm_all in H1.*) unfold mu1 in DOMAIN. rewrite restrict_sm_all in DOMAIN. apply (restrictD_Some _ _ _ _ _ DOMAIN). intros. unfold mu2 in H. rewrite replace_locals_as_inj in H. symmetry. eapply PG; eassumption. Qed. Lemma range_private_restrict_locals sp' n sz : forall (PRIV : range_private (as_inj mu1) m1 m2 sp' n sz) (SL : locBlocksTgt mu1 sp' = true), range_private (as_inj mu2) m1 m2 sp' n sz. Proof. intros. red; intros ? HH. destruct (PRIV _ HH). split; trivial. unfold mu2; rewrite replace_locals_as_inj. unfold mu1 in H0; rewrite restrict_sm_all in H0. intros. eapply (H0 b delta). unfold mu1 in SL; rewrite restrict_sm_locBlocksTgt in SL. apply restrictI_Some; trivial. rewrite (as_inj_locBlocks _ _ _ _ WD H1), SL. trivial. Qed. Lemma agree_regs_restrict_locals rs rs' ctx: agree_regs (as_inj mu1) ctx rs rs' -> agree_regs (as_inj mu2) ctx rs rs'. Proof. intros AG; destruct AG. split; intros. unfold mu2; rewrite replace_locals_as_inj. eapply val_inject_incr; try eapply H. unfold mu1; rewrite restrict_sm_all. apply restrict_incr. trivial. apply (H0 _ H1). Qed. Lemma match_stacks_restrict_locals: forall stk stk' bnd, match_stacks mu1 m1 m2 stk stk' bnd -> match_stacks mu2 m1 m2 stk stk' bnd with match_stacks_inside_restrict_locals: forall stk stk' f' ctx sp' rs', match_stacks_inside mu1 m1 m2 stk stk' f' ctx sp' rs' -> match_stacks_inside mu2 m1 m2 stk stk' f' ctx sp' rs'. Proof. induction 1; intros. { eapply match_stacks_nil; auto. eapply MGE_restrict_local; eassumption. assumption. } { eapply match_stacks_cons; eauto. eapply agree_regs_restrict_locals; eassumption. unfold mu2; rewrite replace_locals_as_inj. unfold mu1 in SP; rewrite restrict_sm_all in SP. eapply (restrictD_Some _ _ _ _ _ SP). unfold mu2; rewrite replace_locals_locBlocksTgt. unfold mu1 in SL; rewrite restrict_sm_locBlocksTgt in SL. trivial. eapply range_private_restrict_locals; eassumption. } { eapply match_stacks_untailcall; eauto. eapply range_private_restrict_locals; eassumption. unfold mu2; rewrite replace_locals_locBlocksTgt. unfold mu1 in SL; rewrite restrict_sm_locBlocksTgt in SL. trivial. } induction 1; intros. { eapply match_stacks_inside_base; eauto. unfold mu2; rewrite replace_locals_locBlocksTgt. unfold mu1 in SL; rewrite restrict_sm_locBlocksTgt in SL. trivial. } { eapply match_stacks_inside_inlined; eauto. eapply agree_regs_restrict_locals; eassumption. unfold mu2; rewrite replace_locals_local. unfold mu1 in SP; rewrite restrict_sm_local in SP. apply (restrictD_Some _ _ _ _ _ SP). unfold mu2; rewrite replace_locals_locBlocksTgt. unfold mu1 in SL; rewrite restrict_sm_locBlocksTgt in SL. trivial. eapply range_private_restrict_locals; eassumption. } Qed. End MATCH_STACK_restrict_locals. (** Change of context corresponding to an inlined tailcall *) Lemma align_unchanged: forall n amount, amount > 0 -> (amount | n) -> align n amount = n. Proof. intros. destruct H0 as [p EQ]. subst n. unfold align. decEq. apply Zdiv_unique with (b := amount - 1). omega. omega. Qed. Lemma match_stacks_inside_inlined_tailcall: forall F m m' stk stk' f' ctx sp' rs' ctx' f, match_stacks_inside F m m' stk stk' f' ctx sp' rs' -> context_below ctx ctx' -> context_stack_tailcall ctx f ctx' -> ctx'.(retinfo) = ctx.(retinfo) -> range_private (as_inj F) m m' sp' ctx.(dstk) f'.(fn_stacksize) -> tr_funbody fenv f'.(fn_stacksize) ctx' f f'.(fn_code) -> match_stacks_inside F m m' stk stk' f' ctx' sp' rs'. Proof. intros. inv H. (* base *) eapply match_stacks_inside_base; eauto. congruence. rewrite H1. rewrite DSTK. apply align_unchanged. apply min_alignment_pos. apply Zdivide_0. (* inlined *) assert (dstk ctx <= dstk ctx'). rewrite H1. apply align_le. apply min_alignment_pos. eapply match_stacks_inside_inlined; eauto. red; intros. destruct (zlt ofs (dstk ctx)). apply PAD; omega. apply H3. inv H4. xomega. congruence. unfold context_below in *. xomega. unfold context_stack_call in *. omega. Qed. (** ** Relating states *) Inductive match_states: SM_Injection -> RTL_core -> mem -> RTL_core -> mem -> Prop := | match_regular_states: forall mu stk f sp pc rs m stk' f' sp' rs' m' ctx (MS: match_stacks_inside mu m m' stk stk' f' ctx sp' rs') (FB: tr_funbody fenv f'.(fn_stacksize) ctx f f'.(fn_code)) (AG: agree_regs (as_inj mu) ctx rs rs') (SP: (as_inj mu) sp = Some(sp', ctx.(dstk))) (MINJ: Mem.inject (as_inj mu) m m') (VB: Mem.valid_block m' sp') (PRIV: range_private (as_inj mu) m m' sp' (ctx.(dstk) + ctx.(mstk)) f'.(fn_stacksize)) (SSZ1: 0 <= f'.(fn_stacksize) < Int.max_unsigned) (SSZ2: forall ofs, Mem.perm m' sp' ofs Max Nonempty -> 0 <= ofs <= f'.(fn_stacksize)), match_states mu (RTL_State stk f (Vptr sp Int.zero) pc rs) m (RTL_State stk' f' (Vptr sp' Int.zero) (spc ctx pc) rs') m' | match_call_states: forall (mu: SM_Injection) stk fd args m stk' fd' args' m' (MS: match_stacks mu m m' stk stk' (Mem.nextblock m')) (FD: transf_fundef fenv fd = OK fd') (VINJ: val_list_inject (as_inj mu) args args') (MINJ: Mem.inject (as_inj mu) m m'), match_states mu (RTL_Callstate stk fd args) m (RTL_Callstate stk' fd' args') m' | match_call_regular_states: forall (mu: SM_Injection) stk f vargs m stk' f' sp' rs' m' ctx ctx' pc' pc1' rargs (MS: match_stacks_inside mu m m' stk stk' f' ctx sp' rs') (FB: tr_funbody fenv f'.(fn_stacksize) ctx f f'.(fn_code)) (BELOW: context_below ctx' ctx) (NOP: f'.(fn_code)!pc' = Some(Inop pc1')) (MOVES: tr_moves f'.(fn_code) pc1' (sregs ctx' rargs) (sregs ctx f.(fn_params)) (spc ctx f.(fn_entrypoint))) (VINJ: list_forall2 (val_reg_charact (as_inj mu) ctx' rs') vargs rargs) (MINJ: Mem.inject (as_inj mu) m m') (VB: Mem.valid_block m' sp') (PRIV: range_private (as_inj mu) m m' sp' ctx.(dstk) f'.(fn_stacksize)) (SSZ1: 0 <= f'.(fn_stacksize) < Int.max_unsigned) (SSZ2: forall ofs, Mem.perm m' sp' ofs Max Nonempty -> 0 <= ofs <= f'.(fn_stacksize)), match_states mu (RTL_Callstate stk (Internal f) vargs) m (RTL_State stk' f' (Vptr sp' Int.zero) pc' rs') m' | match_return_states: forall (mu: SM_Injection) stk v m stk' v' m' (MS: match_stacks mu m m' stk stk' (Mem.nextblock m')) (VINJ: val_inject (as_inj mu) v v') (MINJ: Mem.inject (as_inj mu) m m'), match_states mu (RTL_Returnstate stk v) m (RTL_Returnstate stk' v') m' | match_return_regular_states: forall (mu: SM_Injection)stk v m stk' f' sp' rs' m' ctx pc' or rinfo (MS: match_stacks_inside mu m m' stk stk' f' ctx sp' rs') (RET: ctx.(retinfo) = Some rinfo) (AT: f'.(fn_code)!pc' = Some(inline_return ctx or rinfo)) (VINJ: match or with None => v = Vundef | Some r => val_inject (as_inj mu) v rs'#(sreg ctx r) end) (MINJ: Mem.inject (as_inj mu) m m') (VB: Mem.valid_block m' sp') (PRIV: range_private (as_inj mu) m m' sp' ctx.(dstk) f'.(fn_stacksize)) (SSZ1: 0 <= f'.(fn_stacksize) < Int.max_unsigned) (SSZ2: forall ofs, Mem.perm m' sp' ofs Max Nonempty -> 0 <= ofs <= f'.(fn_stacksize)), match_states mu (RTL_Returnstate stk v) m (RTL_State stk' f' (Vptr sp' Int.zero) pc' rs') m'. Definition MATCH (d:RTL_core) mu c1 m1 c2 m2:Prop := match_states (restrict_sm mu (vis mu)) c1 m1 c2 m2 /\ REACH_closed m1 (vis mu) /\ meminj_preserves_globals ge (as_inj mu) /\ globalfunction_ptr_inject ge (as_inj mu) /\ (forall b, isGlobalBlock ge b = true -> frgnBlocksSrc mu b = true) /\ sm_valid mu m1 m2 /\ SM_wd mu /\ Mem.inject (as_inj mu) m1 m2. (** ** Forward simulation *) Definition RTL_measure (S: RTL_core) : nat := match S with | RTL_State _ _ _ _ _ => 1%nat | RTL_Callstate _ _ _ => 0%nat | RTL_Returnstate _ _ => 0%nat end. Lemma tr_funbody_inv: forall sz cts f c pc i, tr_funbody fenv sz cts f c -> f.(fn_code)!pc = Some i -> tr_instr fenv sz cts pc i c. Proof. intros. inv H. eauto. Qed. (*COMMENT: I'm suspicious about entry points. We might not need it. COFRRECT: Not needed. Will remove. *) Definition entry_points_ok entrypoints:= forall (v1 v2 : val) (sig : signature), In (v1, v2, sig) entrypoints -> exists b f1 f2, v1 = Vptr b Int.zero /\ v2 = Vptr b Int.zero /\ Genv.find_funct_ptr ge b = Some f1 /\ Genv.find_funct_ptr tge b = Some f2. (*NEW*) Variable hf : I64Helpers.helper_functions. (*COMMENT: This lemma might belong in another file*) Lemma forall_length: forall A B vals1 vals2 (F: A -> B -> Prop), Forall2 F vals1 vals2 -> Zlength vals1 = Zlength vals2. Lemma forall_length_aux: forall A B vals1 vals2 (F: A -> B -> Prop), Forall2 F vals1 vals2 -> forall z, Zlength_aux z A vals1 = Zlength_aux z B vals2. intros A B vals1 vals2 F HH. induction HH. reflexivity. simpl; intros. remember (Z.succ z) as z'. apply IHHH. Qed. unfold Zlength; intros. eapply forall_length_aux. eassumption. Qed. Lemma MATCH_wd: forall (d : RTL_core) (mu : SM_Injection) (c1 : RTL_core) (m1 : mem) (c2 : RTL_core) (m2 : mem) (MC:MATCH d mu c1 m1 c2 m2), SM_wd mu. intros. eapply MC. Qed. Hint Resolve MATCH_wd: trans_correct. Lemma MATCH_RC: forall (d : RTL_core) (mu : SM_Injection) (c1 : RTL_core) (m1 : mem) (c2 : RTL_core) (m2 : mem) (MC: MATCH d mu c1 m1 c2 m2), REACH_closed m1 (vis mu). intros. eapply MC. Qed. Hint Resolve MATCH_RC: trans_correct. Lemma MATCH_restrict: forall (d : RTL_core) (mu : SM_Injection) (c1 : RTL_core) (m1 : mem) (c2 : RTL_core) (m2 : mem) (X : block -> bool) (MC: MATCH d mu c1 m1 c2 m2)(HX: forall b : block, vis mu b = true -> X b = true)(RC0:REACH_closed m1 X), MATCH d (restrict_sm mu X) c1 m1 c2 m2. intros. destruct MC as [MS [RC [PG [GF [Glob [SMV [WD INJ]]]]]]]. assert (WDR: SM_wd (restrict_sm mu X)). apply restrict_sm_WD; assumption. split; try rewrite vis_restrict_sm; try rewrite restrict_sm_all; try rewrite restrict_sm_frgnBlocksSrc. rewrite restrict_sm_nest; assumption. intuition. (*meminj_preserves_globals*) rewrite <- restrict_sm_all. eapply restrict_sm_preserves_globals; auto. intros. apply HX. unfold vis. rewrite Glob; auto. apply orb_true_r. (* globalfunction_ptr_inject *) apply restrict_preserves_globalfun_ptr. assumption. intros b isGlob. apply HX. unfold vis. rewrite Glob; auto. apply orb_true_r. (* sm_valid *) unfold sm_valid; split; intros; red in SMV; destruct SMV as [H0 H1]. apply H0; unfold DOM; erewrite <- restrict_sm_DomSrc; eauto. apply H1; unfold RNG; erewrite <- restrict_sm_DomTgt; eauto. (* Mem.inject *) apply inject_restrict; try assumption. Qed. Hint Resolve MATCH_restrict: trans_correct. Lemma MATCH_valid: forall (d : RTL_core) (mu : SM_Injection) (c1 : RTL_core) (m1 : mem) (c2 : RTL_core) (m2 : mem) (MC: MATCH d mu c1 m1 c2 m2), sm_valid mu m1 m2. intros. apply MC. Qed. Hint Resolve MATCH_valid: trans_correct. Lemma MATCH_PG: forall (d : RTL_core) (mu : SM_Injection) (c1 : RTL_core) (m1 : mem) (c2 : RTL_core) (m2 : mem)( MC: MATCH d mu c1 m1 c2 m2), meminj_preserves_globals ge (extern_of mu) /\ (forall b : block, isGlobalBlock ge b = true -> frgnBlocksSrc mu b = true). Proof. intros. assert (GF: forall b, isGlobalBlock ge b = true -> frgnBlocksSrc mu b = true). apply MC. split; trivial. rewrite <- match_genv_meminj_preserves_extern_iff_all; trivial. apply MC. apply MC. Qed. Hint Resolve MATCH_PG: trans_correct. Lemma MATCH_initial_core: forall (v : val) (vals1 : list val) (c1 : RTL_core) (m1 : mem) (j : meminj) (vals2 : list val) (m2 : mem) (DomS DomT : block -> bool) (R : list_norepet (map fst (prog_defs SrcProg))) (*entrypoints : list (val * val * signature)*) (*entry_ok : entry_points_ok entrypoints*) (*init_mem : exists m0 : mem, Genv.init_mem SrcProg = Some m0*) (Ini: initial_core (rtl_eff_sem hf) ge v vals1 = Some c1) (MINJ: Mem.inject j m1 m2) (VInj: Forall2 (val_inject j) vals1 vals2) (PG: meminj_preserves_globals ge j) (J: forall (b1 b2 : block) (d : Z), j b1 = Some (b2, d) -> DomS b1 = true /\ DomT b2 = true) (RCH:forall b : block, REACH m2 (fun b' : block => isGlobalBlock tge b' || getBlocks vals2 b') b = true -> DomT b = true) (*InitMem : exists m0 : mem, Genv.init_mem SrcProg = Some m0 /\ Ple (Mem.nextblock m0) (Mem.nextblock m1) (*Not needed/ Can remove just this one ineq*) /\ Ple (Mem.nextblock m0) (Mem.nextblock m2)*) (GFI: globalfunction_ptr_inject ge j) (*GDE: genvs_domain_eq ge tge*) (HDomS: forall b : block, DomS b = true -> Mem.valid_block m1 b) (HDomT: forall b : block, DomT b = true -> Mem.valid_block m2 b), exists c2 : RTL_core, initial_core (rtl_eff_sem hf) tge v vals2 = Some c2 /\ MATCH c1 (initial_SM DomS DomT (REACH m1 (fun b : block => isGlobalBlock ge b || getBlocks vals1 b)) (REACH m2 (fun b : block => isGlobalBlock tge b || getBlocks vals2 b)) j) c1 m1 c2 m2. Proof. intros. inversion Ini. unfold RTL_initial_core in H0. unfold ge in *. unfold tge in *. destruct v; inv H0. remember (Int.eq_dec i Int.zero) as z; destruct z; inv H1. clear Heqz. remember (Genv.find_funct_ptr (Genv.globalenv SrcProg) b) as zz; destruct zz; inv H0. apply eq_sym in Heqzz. destruct f; try discriminate. case_eq (val_casted.val_has_type_list_func vals1 (sig_args (funsig (Internal f))) && val_casted.vals_defined vals1). 2: solve[intros H2; rewrite H2 in H1; inv H1]. intros H2; rewrite H2 in H1. inv H1. exploit function_ptr_translated; eauto. intros [tf [FP TF]]. exploit sig_function_translated; try eassumption. intros SIG. assert (FF: exists f', tf = Internal f'). Errors.monadInv TF. eexists; reflexivity. destruct FF as [f' ?]. subst tf. unfold rtl_eff_sem, rtl_coop_sem. simpl. case_eq (Int.eq_dec Int.zero Int.zero). intros ? e. unfold tge in FP; rewrite FP. assert (val_casted.val_has_type_list_func vals2 (sig_args (funsig (Internal f')))=true) as ->. { eapply val_casted.val_list_inject_hastype; eauto. eapply forall_inject_val_list_inject; eauto. destruct (val_casted.vals_defined vals1); auto. rewrite andb_comm in H2; simpl in H2. solve[inv H2]. assert (sig_args (funsig (Internal f')) = sig_args (funsig (Internal f))) as ->. { rewrite SIG. simpl. reflexivity. } destruct (val_casted.val_has_type_list_func vals1 (sig_args (funsig (Internal f)))); auto. } assert (val_casted.vals_defined vals2=true) as ->. { eapply val_casted.val_list_inject_defined. eapply forall_inject_val_list_inject; eauto. destruct (val_casted.vals_defined vals1); auto. rewrite andb_comm in H2; inv H2. } simpl. eexists; split. erewrite <- forall_length; eauto. destruct (proj_sumbool (zlt match match Zlength vals1 with | 0 => 0 | Z.pos y' => Z.pos y'~0 | Z.neg y' => Z.neg y'~0 end with | 0 => 0 | Z.pos y' => Z.pos y'~0~0 | Z.neg y' => Z.neg y'~0~0 end Int.max_unsigned)); try discriminate. reflexivity. Focus 2. intros CONTRA. solve[elimtype False; auto]. clear e e0. destruct (core_initial_wd ge tge _ _ _ _ _ _ _ MINJ VInj J RCH PG GDE_lemma HDomS HDomT _ (eq_refl _)) as [AA [BB [CC [DD [EE [FF GG]]]]]]. remember (val_casted.val_has_type_list_func vals1 (sig_args (funsig (Internal f))) && val_casted.vals_defined vals1) as vc. destruct vc; inv H2. split. { specialize (Genv.find_funct_ptr_not_fresh SrcProg). intros FFP. (*destruct init_mem as [m0 INIT_MEM]. specialize (FFP _ _ _ INIT_MEM Heqzz). destruct (valid_init_is_global _ R _ INIT_MEM _ FFP) as [id Hid].*) destruct (proj_sumbool (zlt match match Zlength vals1 with | 0 => 0 | Z.pos y' => Z.pos y'~0 | Z.neg y' => Z.neg y'~0 end with | 0 => 0 | Z.pos y' => Z.pos y'~0~0 | Z.neg y' => Z.neg y'~0~0 end Int.max_unsigned)); try discriminate. inv H0. econstructor; try rewrite restrict_sm_all, initial_SM_as_inj. 2: assumption. { clear GG FF. econstructor; try rewrite restrict_sm_all, initial_SM_as_inj. unfold initial_SM in *; simpl in *. unfold vis; simpl. clear CC DD Ini. exploit @restrict_preserves_globals. eapply PG. instantiate (1:=(fun b : block => REACH m1 (fun b1 : block => isGlobalBlock (Genv.globalenv SrcProg) b1 || getBlocks vals1 b1) b)). simpl; intros. apply EE; assumption. intros PGR. destruct PGR as [A [B CC]]. (*TODO: move*) Lemma genv_next_symbol_exists' b (ge0 : genv) l : list_norepet (map fst l) -> (Plt b (Genv.genv_next ge0) -> exists id, ~List.In id (map fst l) /\ Genv.find_symbol ge0 id = Some b) -> Plt b (Genv.genv_next (Genv.add_globals ge0 l)) -> exists id, Genv.find_symbol (Genv.add_globals ge0 l) id = Some b. Proof. revert ge0 b. induction l; simpl; auto. intros ge0 b ? ? H2. destruct (H0 H2) as [? [? ?]]. solve[eexists; eauto]. intros ge0 b H H2 H3. inv H. destruct a; simpl in *. eapply IHl; eauto. intros Hplt. destruct (ident_eq b (Genv.genv_next ge0)). * subst b. exists i. unfold Genv.add_global, Genv.find_symbol; simpl. rewrite PTree.gss; auto. * unfold Genv.add_global, Genv.find_symbol; simpl. destruct H2 as [x H2]. unfold Genv.add_global in Hplt; simpl in Hplt; xomega. exists x. destruct H2 as [A B]. split; auto. rewrite PTree.gso; auto. Qed. Lemma genv_next_symbol_exists b : list_norepet (map fst (prog_defs SrcProg)) -> Plt b (Genv.genv_next ge) -> exists id, Genv.find_symbol ge id = Some b. Proof. intros Hnorepet H. exploit genv_next_symbol_exists'; eauto. simpl; xomega. Qed. Lemma match_globalenvs_init2: forall (R: list_norepet (map fst (prog_defs SrcProg))) j, meminj_preserves_globals ge (as_inj j) -> match_globalenvs j (Genv.genv_next ge). Proof. intros. destruct H as [A [B C]]. constructor. intros b D. cut (exists id, Genv.find_symbol (Genv.globalenv SrcProg) id = Some b). intros [id ID]. (*split. *) solve[eapply A; eauto]. exploit genv_next_symbol_exists; eauto. intros. symmetry. solve [eapply (C _ _ _ _ GV); eauto]. intros. eapply Genv.genv_symb_range; eauto. intros. eapply Genv.genv_funs_range; eauto. intros. eapply Genv.genv_vars_range; eauto. Qed. apply match_globalenvs_init2; eauto. unfold as_inj; simpl. Lemma restrict_empty: forall X, restrict (fun _ : block => None) X = (fun _ : block => None). Proof. intros X. extensionality b. unfold restrict. destruct (X b); auto. Qed. Lemma join_empty: forall j, join j (fun _ : block => None) = j. Proof. intros j. extensionality b. unfold join. destruct (j b) as [[b' d]|]; auto. Qed. rewrite restrict_empty, join_empty. eapply restrict_preserves_globals. assumption. intuition. Lemma genv_next_symbol_exists2 b : list_norepet (map fst (prog_defs SrcProg)) -> Psucc b = Genv.genv_next ge -> exists id, Genv.find_symbol ge id = Some b. Proof. intros Hnorepet H. apply genv_next_symbol_exists; auto. xomega. Qed. (* (*Ple (Genv.genv_next ge) (Mem.nextblock m1)*) { destruct PG as [XX [Y Z]]. unfold Ple. rewrite <-Pos.leb_le. destruct (Pos.leb (Genv.genv_next ge) (Mem.nextblock m1)) eqn:?; auto. rewrite Pos.leb_nle in Heqb0. assert (Heqb': (Genv.genv_next ge > Mem.nextblock m1)%positive) by xomega. assert (exists b0, Psucc b0 = Genv.genv_next ge). { destruct (Genv.genv_next ge). exists ((b0~1)-1)%positive. simpl. auto. exists (Pos.pred (b0~0))%positive. rewrite Pos.succ_pred. auto. xomega. xomega. } destruct H0 as [b0 H0]. generalize H0 as H'; intro. apply genv_next_symbol_exists2 in H0. destruct H0 as [id H0]. apply XX in H0. apply J in H0. destruct H0 as [H0 H3]. specialize (HDomS _ H0). unfold Mem.valid_block in HDomS. clear - Heqb' HDomS H'. xomega. auto. }*) (*Ple (Genv.genv_next ge) (Mem.nextblock m2)*) { destruct PG as [XX [Y Z]]. unfold Ple. rewrite <-Pos.leb_le. destruct (Pos.leb (Genv.genv_next ge) (Mem.nextblock m2)) eqn:?; auto. rewrite Pos.leb_nle in Heqb0. assert (Heqb': (Genv.genv_next ge > Mem.nextblock m2)%positive) by xomega. assert (exists b0, Psucc b0 = Genv.genv_next ge). { destruct (Genv.genv_next ge). exists ((b0~1)-1)%positive. simpl. auto. exists (Pos.pred (b0~0))%positive. rewrite Pos.succ_pred. auto. xomega. xomega. } destruct H0 as [b0 H0]. generalize H0 as H'; intro. apply genv_next_symbol_exists2 in H0. destruct H0 as [id H0]. apply XX in H0. apply J in H0. destruct H0 as [H0 H3]. specialize (HDomT _ H3). unfold Mem.valid_block in HDomT. clear - Heqb' HDomT H'. xomega. auto. } (**Ye Old Proof rewrite initial_SM_as_inj. assumption. unfold as_inj; simpl. constructor. { (*DOMAIN*) simpl. unfold as_inj; simpl. intros b0 DD. cut (exists id, Genv.find_symbol (Genv.globalenv SrcProg) id = Some b0). intros [symb ID]. (*split. apply REACH_nil. rewrite (find_symbol_isGlobal _ _ _ ID). trivial.*) apply joinI. left. eapply restrictI_Some. destruct PG. eapply H0; eassumption. eapply REACH_nil. erewrite find_symbol_isGlobal. trivial. eassumption. eapply valid_init_is_global; eauto. } { (*IMAGE*) unfold as_inj; simpl. intros. symmetry. destruct (joinD_Some _ _ _ _ _ H0). solve [eapply (CC _ _ _ _ GV); eauto]. destruct H3. destruct (restrictD_Some _ _ _ _ _ H4). discriminate. } { intros. eapply Genv.find_symbol_not_fresh; eauto. } { intros. eapply Genv.find_funct_ptr_not_fresh ; eauto. } { intros. eapply Genv.find_var_info_not_fresh; eauto. } destruct InitMem as [m0' [INIT_MEM' [Ple1 Ple2]]]. rewrite INIT_MEM' in INIT_MEM. inversion INIT_MEM. subst; auto.*) } unfold initial_SM, vis; simpl. clear - VInj. eapply forall_inject_val_list_inject. apply restrict_forall_vals_inject; try eassumption. intros. apply REACH_nil. apply orb_true_iff; right. trivial. eapply inject_restrict; eassumption. } rewrite initial_SM_as_inj. intuition. Qed. Lemma MATCH_halted: forall (cd : RTL_core) (mu : SM_Injection) (c1 : RTL_core) (m1 : mem) (c2 : RTL_core) (m2 : mem) (v1 : val) (MC: MATCH cd mu c1 m1 c2 m2)(HALT: halted (rtl_eff_sem hf) c1 = Some v1), exists v2 : val, Mem.inject (as_inj mu) m1 m2 /\ val_inject (restrict (as_inj mu) (vis mu)) v1 v2 /\ halted (rtl_eff_sem hf) c2 = Some v2. Proof. intros. unfold MATCH in MC; destruct MC as [H0 H1]. inv H0; simpl in *; inv HALT. inv MS. exists v'; split; try assumption. eapply H1. inv H0. split; trivial. rewrite <- restrict_sm_all; assumption. inv H0. inv MS0. rewrite RET in RET0; inv RET0. inv H0. inv MS. rewrite RET in RET0; inv RET0. inv H0. Qed. Hint Resolve MATCH_halted: trans_correct. Lemma MATCH_atExternal: forall (mu : SM_Injection) (c1 : RTL_core) (m1 : mem) (c2 : RTL_core) (m2 : mem) (e : external_function) (vals1 : list val) (ef_sig : signature) (MC: MATCH c1 mu c1 m1 c2 m2) (ATE: at_external (rtl_eff_sem hf) c1 = Some (e, ef_sig, vals1)), Mem.inject (as_inj mu) m1 m2 /\ (exists vals2 : list val, Forall2 (val_inject (restrict (as_inj mu) (vis mu))) vals1 vals2 /\ at_external (rtl_eff_sem hf) c2 = Some (e, ef_sig, vals2) /\ (forall pubSrc' pubTgt' : block -> bool, pubSrc' = (fun b : block => locBlocksSrc mu b && REACH m1 (exportedSrc mu vals1) b) -> pubTgt' = (fun b : block => locBlocksTgt mu b && REACH m2 (exportedTgt mu vals2) b) -> forall nu : SM_Injection, nu = replace_locals mu pubSrc' pubTgt' -> MATCH c1 nu c1 m1 c2 m2 /\ Mem.inject (shared_of nu) m1 m2)). intros. split. inv MC; apply H0. inv MC; simpl in *. inv H; inv ATE. destruct H0 as [RC [ MPG [GFP [GLOB_FRGN [ SMV [ SMWD MINJ']]]]]]. destruct fd; inv H1. inv FD; simpl in *. destruct (BuiltinEffects.observableEF_dec hf e0); inv H0. exists args'. split. apply val_list_inject_forall_inject. autorewrite with restrict in VINJ; assumption. split; intros. trivial. specialize (val_list_inject_forall_inject _ _ _ VINJ); intros ValsInj. autorewrite with restrict in ValsInj. specialize (forall_vals_inject_restrictD _ _ _ _ ValsInj); intros. exploit replace_locals_wd_AtExternal; try eassumption. intros SMWD_replace_locals. subst. split; auto. split; auto. rewrite replace_locals_vis. constructor; eauto. apply match_stacks_replace_locals_restrict; auto. rewrite restrict_sm_all, replace_locals_as_inj in *; auto. rewrite restrict_sm_all, replace_locals_as_inj in *; auto. repeat open_Hyp. split; auto. solve[rewrite replace_locals_vis; auto ]. split; auto. solve[rewrite replace_locals_as_inj; auto]. Lemma globalfunction_ptr_inject_replace_locals: forall mu ls lt (PG : globalfunction_ptr_inject ge (as_inj mu)), globalfunction_ptr_inject ge (as_inj (replace_locals mu ls lt)). unfold globalfunction_ptr_inject; intros. rewrite replace_locals_as_inj. eapply PG; eauto. Qed. split. apply globalfunction_ptr_inject_replace_locals; assumption. split; auto. solve[rewrite replace_locals_frgnBlocksSrc; auto]. split. unfold sm_valid. rewrite replace_locals_DOM, replace_locals_RNG. assumption. split; auto. solve[rewrite replace_locals_as_inj; auto]. eapply inject_shared_replace_locals; eauto. extensionality b; eauto. extensionality b; eauto. Qed. Hint Resolve MATCH_atExternal: trans_correct. Section MS_RSI. (* Match Stacks: restricted Structured injections*) Variable mu nu: SM_Injection. Hypothesis WDmu : SM_wd mu. Hypothesis WDnu : SM_wd nu. Hypothesis PG: meminj_preserves_globals ge (as_inj mu). Hypothesis INC: inject_incr (as_inj mu) (as_inj nu). Variables X Y: block -> bool. Hypothesis HX: forall b, vis mu b = true -> X b = true. Hypothesis HY: forall b, vis nu b = true -> Y b = true. Hypothesis H_mu_nu: forall b, vis mu b = true -> vis nu b = true. Hypothesis HXY: inject_incr (restrict (local_of mu) X) (restrict (local_of nu) Y). Hypothesis LBTmu: forall b, locBlocksTgt mu b = true -> locBlocksTgt nu b = true. Variables m1 m1' m2 m2' :mem. Variables PS PT: block -> bool. Let muR:= replace_locals mu PS PT. Hypothesis MAXPERM: forall b ofs p, Mem.valid_block m1 b -> Mem.perm m2 b ofs Max p -> Mem.perm m1 b ofs Max p. Hypothesis MAXPERM': forall b ofs p, Mem.valid_block m1' b -> Mem.perm m2' b ofs Max p -> Mem.perm m1' b ofs Max p. Hypothesis UNCHANGED: Mem.unchanged_on (local_out_of_reach muR m1) m1' m2'. Let muV:= restrict_sm mu (vis mu). Let nuY:= restrict_sm nu Y. Hypothesis FrgnSrcPres: forall b, frgnBlocksSrc mu b = true -> frgnBlocksSrc nu b = true. Hypothesis PGnu: meminj_preserves_globals ge (as_inj nu). (*Hypothesis SEP : globals_separate tge muR nu.*) (*Hypothesis SEP: sm_inject_separated muR nu m1 m1'.*) Hypothesis HAI: local_of mu = local_of nu. Hypothesis SMVmu: sm_valid mu m1 m1'. Lemma MGE_RSI bnd : match_globalenvs muV bnd -> match_globalenvs nuY bnd. Proof. intros. inv H. constructor; eauto. intros. specialize (DOMAIN _ H). (* unfold muV in H0. rewrite restrict_sm_frgnBlocksSrc in H0.*) unfold muV in DOMAIN. rewrite restrict_sm_all in DOMAIN. unfold nuY. rewrite (*restrict_sm_frgnBlocksSrc,*) restrict_sm_all. (*rewrite (FrgnSrcPres _ H0). split. trivial.*) destruct (restrictD_Some _ _ _ _ _ DOMAIN); clear DOMAIN. apply restrictI_Some. eapply INC. trivial. auto. intros. symmetry. eapply PGnu; eauto. unfold nuY in H. rewrite restrict_sm_all in H. destruct (restrictD_Some _ _ _ _ _ H) as [AA BB]; exact AA. (*YE Old version of the proof intros. unfold nuY in H. rewrite restrict_sm_all in H. destruct (restrictD_Some _ _ _ _ _ H); clear H. remember (as_inj muV b1) as q. apply eq_sym in Heqq. destruct q. destruct p. unfold muV in Heqq. rewrite restrict_sm_all in Heqq. destruct (restrictD_Some _ _ _ _ _ Heqq); clear Heqq. rewrite (INC _ _ _ H) in H1; inv H1. eapply (IMAGE _ _ _ _ GV); trivial. unfold muV; rewrite restrict_sm_all. apply restrictI_Some; eassumption. assert (HH: as_inj muR b1 = None). unfold muR. rewrite replace_locals_as_inj. unfold muV in Heqq. rewrite restrict_sm_all in Heqq. destruct (restrictD_None' _ _ _ Heqq); clear Heqq. trivial. destruct H as [bb2 [dd [AI VIS]]]. specialize (INC _ _ _ AI). rewrite H1 in INC. inv INC. destruct PG as [PGa [PGb PGc]]. specialize (PGc _ _ _ _ GV AI). subst. destruct (DOMAIN _ H0). unfold muV in H. rewrite restrict_sm_frgnBlocksSrc in H. unfold vis in VIS. rewrite H, orb_true_r in VIS. discriminate. destruct PGnu as [PGa [PGb PGc]]. symmetry; eapply PGc; eauto.*) Qed. Lemma range_private_RSI sp' n sz : forall (PRIV : range_private (as_inj muV) m1 m1' sp' n sz) (SL : locBlocksTgt muV sp' = true), range_private (as_inj nuY) m2 m2' sp' n sz. Proof. intros. red; intros ? HH. destruct (PRIV _ HH). split. eapply UNCHANGED. red; intros. unfold muV in SL; rewrite restrict_sm_locBlocksTgt in SL. unfold muR. split. rewrite replace_locals_locBlocksTgt. trivial. rewrite replace_locals_local, replace_locals_pubBlocksSrc. intros. left. eapply H0. unfold muV; rewrite restrict_sm_all. apply restrictI_Some. apply local_in_all; eassumption. unfold vis. destruct (local_DomRng _ WDmu _ _ _ H1); intuition. eapply Mem.perm_valid_block; eassumption. eassumption. intros. intros N. unfold nuY in H1; rewrite restrict_sm_all in H1. unfold muV in SL; rewrite restrict_sm_locBlocksTgt in SL. destruct (restrictD_Some _ _ _ _ _ H1); clear H1. apply LBTmu in SL. destruct (joinD_Some _ _ _ _ _ H2) as [EXT | [EXT LOC]]; clear H2. destruct (extern_DomRng _ WDnu _ _ _ EXT). rewrite (extBlocksTgt_locBlocksTgt _ WDnu _ H2) in SL. discriminate. rewrite <- HAI in LOC. apply MAXPERM in N. eapply (H0 b delta); trivial. unfold muV; rewrite restrict_sm_all. apply restrictI_Some. apply local_in_all; eassumption. unfold vis. destruct (local_DomRng _ WDmu _ _ _ LOC). rewrite H1; trivial. eapply SMVmu. apply local_in_all in LOC; trivial. eapply (as_inj_DomRng _ _ _ _ LOC WDmu). Qed. Lemma agree_regs_RSI rs rs' ctx: agree_regs (as_inj muV) ctx rs rs' -> agree_regs (as_inj nuY) ctx rs rs'. Proof. intros AG; destruct AG. split; intros. eapply val_inject_incr; try eapply H. unfold nuY, muV; repeat rewrite restrict_sm_all. red; intros. destruct (restrictD_Some _ _ _ _ _ H2); clear H2. apply restrictI_Some; eauto. trivial. apply (H0 _ H1). Qed. Hypothesis BV: forall b1 b1' d, Mem.valid_block m1' b1' -> as_inj nu b1 = Some(b1',d) -> Mem.valid_block m1 b1. Lemma match_stacks_RSI: forall stk stk' bnd (MS: match_stacks muV m1 m1' stk stk' bnd), match_stacks nuY m2 m2' stk stk' bnd with match_stacks_inside_RSI: forall stk stk' f' ctx sp' rs', match_stacks_inside muV m1 m1' stk stk' f' ctx sp' rs' -> match_stacks_inside nuY m2 m2' stk stk' f' ctx sp' rs'. Proof. induction 1; intros. { eapply match_stacks_nil; auto. eapply MGE_RSI. eapply MG. assumption. } { eapply match_stacks_cons; eauto. eapply agree_regs_RSI; eassumption. unfold nuY; rewrite restrict_sm_all. unfold muV in SP; rewrite restrict_sm_all in SP. destruct (restrictD_Some _ _ _ _ _ SP). eapply restrictI_Some; eauto. unfold nuY; rewrite restrict_sm_locBlocksTgt. unfold muV in SL; rewrite restrict_sm_locBlocksTgt in SL. auto. eapply range_private_RSI; eassumption. intros. apply MAXPERM' in H. apply (SSZ2 _ H). eapply SMVmu. unfold muV in SL. rewrite restrict_sm_locBlocksTgt in SL. unfold RNG, DomTgt. rewrite SL; trivial. } { eapply match_stacks_untailcall; eauto. eapply range_private_RSI; try eassumption. intros. apply MAXPERM' in H. apply (SSZ2 _ H). eapply SMVmu. unfold muV in SL. rewrite restrict_sm_locBlocksTgt in SL. unfold RNG, DomTgt. rewrite SL; trivial. unfold nuY; rewrite restrict_sm_locBlocksTgt. unfold muV in SL; rewrite restrict_sm_locBlocksTgt in SL. eauto. } induction 1; intros. { unfold muV in SL; rewrite restrict_sm_locBlocksTgt in SL. eapply match_stacks_inside_base; eauto. unfold nuY. rewrite restrict_sm_locBlocksTgt. auto. } { eapply match_stacks_inside_inlined; eauto. eapply agree_regs_RSI; try eassumption. unfold nuY. rewrite restrict_sm_local. eapply HXY. unfold muV in SP; rewrite restrict_sm_local in SP. destruct (restrictD_Some _ _ _ _ _ SP). apply restrictI_Some; trivial. auto. unfold muV in SL; rewrite restrict_sm_locBlocksTgt in SL. unfold nuY. rewrite restrict_sm_locBlocksTgt. auto. red; intros. destruct (PAD _ H0). split; intros. eapply UNCHANGED. split; intros. unfold muV in SL; rewrite restrict_sm_locBlocksTgt in SL. unfold muR. rewrite replace_locals_locBlocksTgt. trivial. unfold muR in H3. rewrite replace_locals_local in H3. left. eapply H2. unfold muV. rewrite restrict_sm_all. apply restrictI_Some. apply local_in_all; eassumption. unfold vis. destruct (local_DomRng _ WDmu _ _ _ H3). rewrite H4; trivial. eapply Mem.perm_valid_block; eassumption. assumption. assert (VB: Mem.valid_block m1' sp'). eapply Mem.perm_valid_block; eassumption. unfold muV in SL. rewrite restrict_sm_locBlocksTgt in SL. intros N. apply MAXPERM in N. eapply H2; try eassumption. unfold nuY in H3; rewrite restrict_sm_all in H3. destruct (restrictD_Some _ _ _ _ _ H3); clear H3. destruct (joinD_Some _ _ _ _ _ H4) as [EXT | [_ LOC]]; clear H4. destruct (extern_DomRng _ WDnu _ _ _ EXT). apply (extBlocksTgt_locBlocksTgt _ WDnu) in H4. apply LBTmu in SL. rewrite SL in H4. discriminate. unfold muV; rewrite restrict_sm_all. rewrite <- HAI in LOC. apply restrictI_Some. apply local_in_all; try eassumption. unfold vis. destruct (local_DomRng _ WDmu _ _ _ LOC). rewrite H3; trivial. apply Mem.perm_valid_block in H1. assert (as_inj mu b = Some (sp', delta)). {subst muV. rewrite restrict_sm_local' in SP; eauto. rewrite HAI in SP. apply WDnu in SP. destruct SP as [locnusp Locnusp']. assert (HH:= H3). apply as_inj_locBlocks in H3. unfold nuY in H3. rewrite restrict_sm_locBlocksTgt, restrict_sm_locBlocksSrc in H3. rewrite Locnusp' in H3. unfold nuY in HH. rewrite restrict_sm_all in HH. unfold restrict in HH. destruct (Y b); try discriminate. rewrite locBlocksSrc_as_inj_local in HH; eauto. rewrite <- HAI in HH. unfold as_inj, join. assert (HH':=HH). apply WDmu in HH'; destruct HH' as [locmub ?]. destruct WDmu as [disjoint_Src WDmu']. destruct (disjoint_Src b); try congruence. destruct (extern_of mu b) eqn:extern_of_b; auto. destruct p. eapply WDmu in extern_of_b; destruct extern_of_b as [? ?]. congruence. unfold nuY. apply restrict_sm_WD; eauto. } apply SMVmu. unfold DOM. eapply as_inj_DomRng; eauto. } Qed. End MS_RSI. (* OLD PROOF Theorem transl_program_correct: forall (R: list_norepet (map fst (prog_defs SrcProg))) (entrypoints : list (val * val * signature)) (entry_ok : entry_points_ok entrypoints) (init_mem: exists m0, Genv.init_mem SrcProg = Some m0), SM_simulation.SM_simulation_inject (rtl_eff_sem hf) (rtl_eff_sem hf) ge tge (*entrypoints*). intros. (*eapply sepcomp.effect_simulations_lemmas.inj_simulation_star_wf.*) eapply effect_simulations_lemmas.inj_simulation_star with (match_states:= MATCH)(measure:= RTL_measure). Lemma environment_equality: (exists m0:mem, Genv.init_mem SrcProg = Some m0) -> genvs_domain_eq ge tge. intros. ad_it. Qed. (* destruct H0 as [b0]; exists b0; rewriter_back; [rewrite symbols_preserved| rewrite <- symbols_preserved| rewrite varinfo_preserved| rewrite <- varinfo_preserved]; reflexivity. Qed.*) Hint Resolve environment_equality: trans_correct. auto with trans_correct. Lemma MATCH_wd: forall (d : RTL_core) (mu : SM_Injection) (c1 : RTL_core) (m1 : mem) (c2 : RTL_core) (m2 : mem) (MC:MATCH d mu c1 m1 c2 m2), SM_wd mu. intros. eapply MC. Qed. Hint Resolve MATCH_wd: trans_correct. eauto with trans_correct. Lemma MATCH_RC: forall (d : RTL_core) (mu : SM_Injection) (c1 : RTL_core) (m1 : mem) (c2 : RTL_core) (m2 : mem) (MC: MATCH d mu c1 m1 c2 m2), REACH_closed m1 (vis mu). intros. eapply MC. Qed. Hint Resolve MATCH_RC: trans_correct. eauto with trans_correct. Lemma MATCH_restrict: forall (d : RTL_core) (mu : SM_Injection) (c1 : RTL_core) (m1 : mem) (c2 : RTL_core) (m2 : mem) (X : block -> bool) (MC: MATCH d mu c1 m1 c2 m2)(HX: forall b : block, vis mu b = true -> X b = true)(RC0:REACH_closed m1 X), MATCH d (restrict_sm mu X) c1 m1 c2 m2. intros. destruct MC as [MC [RC [PG [GF [VAL [WDmu INJ]]]]]]. assert (WDR: SM_wd (restrict_sm mu X)). apply restrict_sm_WD; assumption. split; try rewrite vis_restrict_sm; try rewrite restrict_sm_all; try rewrite restrict_sm_frgnBlocksSrc. rewrite restrict_sm_nest; assumption. intuition. (*meminj_preserves_globals*) rewrite <- restrict_sm_all. eapply restrict_sm_preserves_globals; auto. intros. apply HX. unfold vis. rewrite GF; auto. apply orb_true_r. (* sm_valid *) unfold sm_valid; split; intros; red in VAL; destruct VAL as [H0 H1]. apply H0; unfold DOM; erewrite <- restrict_sm_DomSrc; eauto. apply H1; unfold RNG; erewrite <- restrict_sm_DomTgt; eauto. (* Mem.inject *) apply inject_restrict; try assumption. Qed. Hint Resolve MATCH_restrict: trans_correct. auto with trans_correct. Lemma MATCH_valid: forall (d : RTL_core) (mu : SM_Injection) (c1 : RTL_core) (m1 : mem) (c2 : RTL_core) (m2 : mem) (MC: MATCH d mu c1 m1 c2 m2), sm_valid mu m1 m2. intros. apply MC. Qed. Hint Resolve MATCH_valid: trans_correct. eauto with trans_correct. (* Here there is a goal missing*) Lemma MATCH_PG: forall (d : RTL_core) (mu : SM_Injection) (c1 : RTL_core) (m1 : mem) (c2 : RTL_core) (m2 : mem)( MC: MATCH d mu c1 m1 c2 m2), meminj_preserves_globals ge (extern_of mu) /\ (forall b : block, isGlobalBlock ge b = true -> frgnBlocksSrc mu b = true). Proof. intros. assert (GF: forall b, isGlobalBlock ge b = true -> frgnBlocksSrc mu b = true). apply MC. split; trivial. rewrite <- match_genv_meminj_preserves_extern_iff_all; trivial. apply MC. apply MC. Qed. Hint Resolve MATCH_PG: trans_correct. eauto with trans_correct. Lemma Match_Halted: forall (cd : RTL_core) (mu : SM_Injection) (c1 : RTL_core) (m1 : mem) (c2 : RTL_core) (m2 : mem) (v1 : val) (MC: MATCH cd mu c1 m1 c2 m2)(HALT: halted (rtl_eff_sem hf) c1 = Some v1), exists v2 : val, Mem.inject (as_inj mu) m1 m2 /\ val_inject (restrict (as_inj mu) (vis mu)) v1 v2 /\ halted (rtl_eff_sem hf) c2 = Some v2. Proof. intros. unfold MATCH in MC; destruct MC as [H0 H1]. inv H0; simpl in *; inv HALT. Print match_states. inv MS. exists v'; split; try assumption. eapply H1. inv H0. split; trivial. rewrite <- restrict_sm_all; assumption. inv H0. inv MS0. rewrite RET in RET0; inv RET0. inv H0. inv MS. rewrite RET in RET0; inv RET0. inv H0. Qed. Hint Resolve Match_Halted: trans_correct. eauto with trans_correct. Lemma at_external_lemma: forall (mu : SM_Injection) (c1 : RTL_core) (m1 : mem) (c2 : RTL_core) (m2 : mem) (e : external_function) (vals1 : list val) (ef_sig : signature)(MC: MATCH c1 mu c1 m1 c2 m2) (ATE: at_external (rtl_eff_sem hf) c1 = Some (e, ef_sig, vals1)), Mem.inject (as_inj mu) m1 m2 /\ (exists vals2 : list val, Forall2 (val_inject (restrict (as_inj mu) (vis mu))) vals1 vals2 /\ at_external (rtl_eff_sem hf) c2 = Some (e, ef_sig, vals2)). intros. split. inv MC; apply H0. inv MC; simpl in *. inv H; inv ATE. destruct fd; inv H1. inv FD; simpl in *. destruct (BuiltinEffects.observableEF_dec hf e0); inv H2. exists args'. split. apply val_list_inject_forall_inject. autorewrite with restrict in VINJ; assumption. trivial. Qed. Hint Resolve at_external_lemma: trans_correct. eauto with trans_correct. Lemma Match_AfterExternal: forall (mu : SM_Injection) (st1 : RTL_core) (st2 : RTL_core) (m1 : mem) (e : external_function) (vals1 : list val) (m2 : mem) (ef_sig : signature) (vals2 : list val) (e' : external_function) (ef_sig' : signature) (MemInjMu : Mem.inject (as_inj mu) m1 m2) (MatchMu : MATCH st1 mu st1 m1 st2 m2) (AtExtSrc : at_external (rtl_eff_sem hf) st1 = Some (e, ef_sig, vals1)) (AtExtTgt : at_external (rtl_eff_sem hf) st2 = Some (e', ef_sig', vals2)) (ValInjMu : Forall2 (val_inject (restrict (as_inj mu) (vis mu))) vals1 vals2) (pubSrc' : block -> bool) (pubSrcHyp : pubSrc' = (fun b : block => locBlocksSrc mu b && REACH m1 (exportedSrc mu vals1) b)) (pubTgt' : block -> bool) (pubTgtHyp : pubTgt' = (fun b : block => locBlocksTgt mu b && REACH m2 (exportedTgt mu vals2) b)) (nu : SM_Injection) (NuHyp : nu = replace_locals mu pubSrc' pubTgt') (nu' : SM_Injection) (ret1 : val) (m1' : mem) (ret2 : val) (m2' : mem) (INC : extern_incr nu nu') (SEP : sm_inject_separated nu nu' m1 m2) (WDnu' : SM_wd nu') (SMvalNu' : sm_valid nu' m1' m2') (MemInjNu' : Mem.inject (as_inj nu') m1' m2') (RValInjNu' : val_inject (as_inj nu') ret1 ret2) (FwdSrc : mem_forward m1 m1') (FwdTgt : mem_forward m2 m2') (frgnSrc' : block -> bool) (frgnSrcHyp : frgnSrc' = (fun b : block => DomSrc nu' b && (negb (locBlocksSrc nu' b) && REACH m1' (exportedSrc nu' (ret1 :: nil)) b))) (frgnTgt' : block -> bool) (frgnTgtHyp : frgnTgt' = (fun b : block => DomTgt nu' b && (negb (locBlocksTgt nu' b) && REACH m2' (exportedTgt nu' (ret2 :: nil)) b))) (mu' : SM_Injection) (Mu'Hyp : mu' = replace_externs nu' frgnSrc' frgnTgt') (UnchPrivSrc : Mem.unchanged_on (fun (b : block) (_ : Z) => locBlocksSrc nu b = true /\ pubBlocksSrc nu b = false) m1 m1') (UnchLOOR : Mem.unchanged_on (local_out_of_reach nu m1) m2 m2'), exists (st1' st2' : RTL_core), after_external (rtl_eff_sem hf) (Some ret1) st1 = Some st1' /\ after_external (rtl_eff_sem hf) (Some ret2) st2 = Some st2' /\ MATCH st1' mu' st1' m1' st2' m2'. Proof. intros. destruct MatchMu as [MC [RC [PG [GF [VAL [WDmu [INJ GFP]]]]]]]. inv MC; simpl in *; inv AtExtSrc. destruct fd; inv H0. destruct fd'; inv AtExtTgt. inv FD. destruct (BuiltinEffects.observableEF_dec hf e1); inv H0; inv H1. rename o into OBS. exists (RTL_Returnstate stk ret1). eexists. split. reflexivity. split. reflexivity. assert (INCvisNu': inject_incr (restrict (as_inj nu') (vis (replace_externs nu' (fun b : Values.block => DomSrc nu' b && (negb (locBlocksSrc nu' b) && REACH m1' (exportedSrc nu' (ret1 :: nil)) b)) (fun b : Values.block => DomTgt nu' b && (negb (locBlocksTgt nu' b) && REACH m2' (exportedTgt nu' (ret2 :: nil)) b))))) (as_inj nu')). unfold vis. rewrite replace_externs_frgnBlocksSrc, replace_externs_locBlocksSrc. apply restrict_incr. assert (RC': REACH_closed m1' (mapped (as_inj nu'))). eapply inject_REACH_closed; eassumption. assert (PHnu': meminj_preserves_globals (Genv.globalenv SrcProg) (as_inj nu')). subst. clear - INC SEP PG GF WDmu WDnu'. apply meminj_preserves_genv2blocks in PG. destruct PG as [PGa [PGb PGc]]. apply meminj_preserves_genv2blocks. split; intros. specialize (PGa _ H). apply joinI; left. apply INC. rewrite replace_locals_extern. apply foreign_in_extern. assert (GG: isGlobalBlock ge b = true). unfold isGlobalBlock, ge. apply genv2blocksBool_char1 in H. rewrite H. trivial. destruct (frgnSrc _ WDmu _ (GF _ GG)) as [bb2 [dd [FF FT2]]]. rewrite (foreign_in_all _ _ _ _ FF) in PGa. inv PGa. assumption. split; intros. specialize (PGb _ H). apply joinI; left. apply INC. rewrite replace_locals_extern. assert (GG: isGlobalBlock ge b = true). (*4 goals*) unfold isGlobalBlock, ge. apply genv2blocksBool_char2 in H. rewrite H. intuition. (*3 goals*) destruct (frgnSrc _ WDmu _ (GF _ GG)) as [bb2 [dd [FF FT2]]]. rewrite (foreign_in_all _ _ _ _ FF) in PGb. inv PGb. apply foreign_in_extern; eassumption. (*2 goals*) eapply (PGc _ _ delta H). specialize (PGb _ H). clear PGa PGc. remember (as_inj mu b1) as d. destruct d; apply eq_sym in Heqd. (*3 goals*) destruct p. apply extern_incr_as_inj in INC; trivial. (*3 goals*) rewrite replace_locals_as_inj in INC. rewrite (INC _ _ _ Heqd) in H0. trivial. (*3 goals*) destruct SEP as [SEPa _]. rewrite replace_locals_as_inj, replace_locals_DomSrc, replace_locals_DomTgt in SEPa. destruct (SEPa _ _ _ Heqd H0). destruct (as_inj_DomRng _ _ _ _ PGb WDmu). congruence. (*1 goal*) assert (RR1: REACH_closed m1' (fun b : Values.block => *) Lemma Match_AfterExternal: forall (mu : SM_Injection) (st1 : RTL_core) (st2 : RTL_core) (m1 : mem) (e : external_function) (vals1 : list val) (m2 : mem) (ef_sig : signature) (vals2 : list val) (e' : external_function) (ef_sig' : signature) (MemInjMu : Mem.inject (as_inj mu) m1 m2) (MatchMu : MATCH st1 mu st1 m1 st2 m2) (AtExtSrc : at_external (rtl_eff_sem hf) st1 = Some (e, ef_sig, vals1)) (AtExtTgt : at_external (rtl_eff_sem hf) st2 = Some (e', ef_sig', vals2)) (ValInjMu : Forall2 (val_inject (restrict (as_inj mu) (vis mu))) vals1 vals2) (pubSrc' : block -> bool) (pubSrcHyp : pubSrc' = (fun b : block => locBlocksSrc mu b && REACH m1 (exportedSrc mu vals1) b)) (pubTgt' : block -> bool) (pubTgtHyp : pubTgt' = (fun b : block => locBlocksTgt mu b && REACH m2 (exportedTgt mu vals2) b)) (nu : SM_Injection) (NuHyp : nu = replace_locals mu pubSrc' pubTgt') (nu' : SM_Injection) (ret1 : val) (m1' : mem) (ret2 : val) (m2' : mem) (INC : extern_incr nu nu') (SEP : globals_separate tge nu nu') (WDnu' : SM_wd nu') (SMvalNu' : sm_valid nu' m1' m2') (MemInjNu' : Mem.inject (as_inj nu') m1' m2') (RValInjNu' : val_inject (as_inj nu') ret1 ret2) (FwdSrc : mem_forward m1 m1') (FwdTgt : mem_forward m2 m2') (frgnSrc' : block -> bool) (frgnSrcHyp : frgnSrc' = (fun b : block => DomSrc nu' b && (negb (locBlocksSrc nu' b) && REACH m1' (exportedSrc nu' (ret1 :: nil)) b))) (frgnTgt' : block -> bool) (frgnTgtHyp : frgnTgt' = (fun b : block => DomTgt nu' b && (negb (locBlocksTgt nu' b) && REACH m2' (exportedTgt nu' (ret2 :: nil)) b))) (mu' : SM_Injection) (Mu'Hyp : mu' = replace_externs nu' frgnSrc' frgnTgt') (UnchPrivSrc : Mem.unchanged_on (fun (b : block) (_ : Z) => locBlocksSrc nu b = true /\ pubBlocksSrc nu b = false) m1 m1') (UnchLOOR : Mem.unchanged_on (local_out_of_reach nu m1) m2 m2'), exists (st1' st2' : RTL_core), after_external (rtl_eff_sem hf) (Some ret1) st1 = Some st1' /\ after_external (rtl_eff_sem hf) (Some ret2) st2 = Some st2' /\ MATCH st1' mu' st1' m1' st2' m2'. Proof. intros. destruct MatchMu as [MC [RC [PG [GFP [GF [VAL [WDmu INJ]]]]]]]. inv MC; simpl in *; inv AtExtSrc. destruct fd; inv H0. destruct fd'; inv AtExtTgt. inv FD. destruct (BuiltinEffects.observableEF_dec hf e1); inv H0; inv H1. rename o into OBS. exists (RTL_Returnstate stk ret1). eexists. split. reflexivity. split. reflexivity. assert (INCvisNu': inject_incr (restrict (as_inj nu') (vis (replace_externs nu' (fun b : Values.block => DomSrc nu' b && (negb (locBlocksSrc nu' b) && REACH m1' (exportedSrc nu' (ret1 :: nil)) b)) (fun b : Values.block => DomTgt nu' b && (negb (locBlocksTgt nu' b) && REACH m2' (exportedTgt nu' (ret2 :: nil)) b))))) (as_inj nu')). (*unfold vis. rewrite replace_externs_frgnBlocksSrc, replace_externs_locBlocksSrc.*) apply restrict_incr. assert (RC': REACH_closed m1' (mapped (as_inj nu'))). eapply inject_REACH_closed; eassumption. assert (PGnu': meminj_preserves_globals (Genv.globalenv SrcProg) (as_inj nu')). eapply meminj_preserves_globals_extern_incr_separate. eassumption. rewrite replace_locals_as_inj. assumption. assumption. { (*Here is the only place SEP is used*) specialize (genvs_domain_eq_isGlobal _ _ GDE_lemma). intros GL. red. unfold ge in GL. rewrite GL. apply SEP. } clear SEP. assert (RR1: REACH_closed m1' (fun b : Values.block => locBlocksSrc nu' b || DomSrc nu' b && (negb (locBlocksSrc nu' b) && REACH m1' (exportedSrc nu' (ret1 :: nil)) b))). intros b Hb. rewrite REACHAX in Hb. destruct Hb as [L HL]. generalize dependent b. induction L; simpl; intros; inv HL. assumption. specialize (IHL _ H1); clear H1. apply orb_true_iff in IHL. remember (locBlocksSrc nu' b') as l. destruct l; apply eq_sym in Heql. clear IHL. remember (pubBlocksSrc nu' b') as p. destruct p; apply eq_sym in Heqp. assert (Rb': REACH m1' (mapped (as_inj nu')) b' = true). apply REACH_nil. destruct (pubSrc _ WDnu' _ Heqp) as [bb2 [dd1 [PUB PT]]]. eapply mappedI_true. apply (pub_in_all _ WDnu' _ _ _ PUB). assert (Rb: REACH m1' (mapped (as_inj nu')) b = true). eapply REACH_cons; try eassumption. specialize (RC' _ Rb). destruct (mappedD_true _ _ RC') as [[b2 d1] AI']. remember (locBlocksSrc nu' b) as d. destruct d; simpl; trivial. apply andb_true_iff. split. eapply as_inj_DomRng; try eassumption. eapply REACH_cons; try eassumption. apply REACH_nil. unfold exportedSrc. rewrite (pubSrc_shared _ WDnu' _ Heqp). intuition. destruct (UnchPrivSrc) as [UP UV]; clear UnchLOOR. specialize (UP b' z Cur Readable). specialize (UV b' z). destruct INC as [_ [_ [_ [_ [LCnu' [_ [PBnu' [_ [FRGnu' _]]]]]]]]]. rewrite <- LCnu'. rewrite replace_locals_locBlocksSrc. rewrite <- LCnu' in Heql. rewrite replace_locals_locBlocksSrc in *. rewrite <- PBnu' in Heqp. rewrite replace_locals_pubBlocksSrc in *. clear INCvisNu'. rewrite Heql in *. simpl in *. intuition. assert (VB: Mem.valid_block m1 b'). eapply VAL. unfold DOM, DomSrc. rewrite Heql. intuition. apply (H VB) in H2. rewrite (H0 H2) in H4. clear H H0. remember (locBlocksSrc mu b) as q. destruct q; simpl; trivial; apply eq_sym in Heqq. assert (Rb : REACH m1 (vis mu) b = true). eapply REACH_cons; try eassumption. apply REACH_nil. unfold vis. rewrite Heql; trivial. specialize (RC _ Rb). unfold vis in RC. rewrite Heqq in RC; simpl in *. rewrite replace_locals_frgnBlocksSrc in FRGnu'. rewrite FRGnu' in RC. apply andb_true_iff. split. unfold DomSrc. rewrite (frgnBlocksSrc_extBlocksSrc _ WDnu' _ RC). intuition. apply REACH_nil. unfold exportedSrc. rewrite (frgnSrc_shared _ WDnu' _ RC). intuition. destruct IHL. inv H. apply andb_true_iff in H. simpl in H. destruct H as[DomNu' Rb']. clear INC INCvisNu' UnchLOOR UnchPrivSrc. remember (locBlocksSrc nu' b) as d. destruct d; simpl; trivial. apply eq_sym in Heqd. apply andb_true_iff. split. assert (RET: Forall2 (val_inject (as_inj nu')) (ret1::nil) (ret2::nil)). constructor. assumption. constructor. destruct (REACH_as_inj _ WDnu' _ _ _ _ MemInjNu' RET _ Rb' (fun b => true)) as [b2 [d1 [AI' _]]]; trivial. assert (REACH m1' (mapped (as_inj nu')) b = true). eapply REACH_cons; try eassumption. apply REACH_nil. eapply mappedI_true; eassumption. specialize (RC' _ H). destruct (mappedD_true _ _ RC') as [[? ?] ?]. eapply as_inj_DomRng; eassumption. eapply REACH_cons; try eassumption. assert (RRC: REACH_closed m1' (fun b : Values.block => mapped (as_inj nu') b && (locBlocksSrc nu' b || DomSrc nu' b && (negb (locBlocksSrc nu' b) && REACH m1' (exportedSrc nu' (ret1 :: nil)) b)))). eapply REACH_closed_intersection; eassumption. assert (GFnu': forall b, isGlobalBlock (Genv.globalenv SrcProg) b = true -> DomSrc nu' b && (negb (locBlocksSrc nu' b) && REACH m1' (exportedSrc nu' (ret1 :: nil)) b) = true). intros. specialize (GF _ H). assert (FSRC:= extern_incr_frgnBlocksSrc _ _ INC). rewrite replace_locals_frgnBlocksSrc in FSRC. rewrite FSRC in GF. rewrite (frgnBlocksSrc_locBlocksSrc _ WDnu' _ GF). apply andb_true_iff; simpl. split. unfold DomSrc. rewrite (frgnBlocksSrc_extBlocksSrc _ WDnu' _ GF). intuition. apply REACH_nil. unfold exportedSrc. rewrite (frgnSrc_shared _ WDnu' _ GF). intuition. rewrite restrict_sm_all in *. exploit (eff_after_check1 mu); try eassumption; try reflexivity. eapply val_list_inject_forall_inject. eapply val_list_inject_incr; try eassumption. apply restrict_incr. intros [WDnu [SMVnu [MinjNu VinjNu]]]. assert (WDR: SM_wd (restrict_sm mu (vis mu))). apply restrict_sm_WD; trivial. destruct (eff_after_check2 _ _ _ _ _ MemInjNu' RValInjNu' _ (eq_refl _) _ (eq_refl _) _ (eq_refl _) WDnu' SMvalNu'). assert (RRC1': REACH_closed m1' (fun b : block => locBlocksSrc nu' b || DomSrc nu' b && (negb (locBlocksSrc nu' b) && REACH m1' (exportedSrc nu' (ret1 :: nil)) b))). intuition. assert (WDR': SM_wd (restrict_sm nu' (fun b : block => locBlocksSrc nu' b || DomSrc nu' b && (negb (locBlocksSrc nu' b) && REACH m1' (exportedSrc nu' (ret1 :: nil)) b)))). apply restrict_sm_WD. assumption. intros. unfold vis in H1. destruct (locBlocksSrc nu' b); simpl in *; trivial. apply andb_true_iff; split. unfold DomSrc. rewrite (frgnBlocksSrc_extBlocksSrc _ WDnu' _ H1). intuition. apply REACH_nil. unfold exportedSrc. rewrite sharedSrc_iff_frgnpub, H1. intuition. trivial. split. Focus 2. unfold vis in *. rewrite replace_externs_locBlocksSrc, replace_externs_frgnBlocksSrc, replace_externs_as_inj in *. intuition. (* globalfunction_ptr_inject *) unfold globalfunction_ptr_inject; intros. apply GFP in H1; destruct H1. split; auto. move INC at bottom. apply extern_incr_as_inj in INC; auto. rewrite replace_locals_as_inj in INC. apply INC; assumption. econstructor; try rewrite restrict_sm_all; try eassumption. {(*Match_stacks*) clear UnchPrivSrc OBS INCvisNu'. eapply match_stacks_bound. instantiate (1:=Mem.nextblock m2). 2: eapply forward_nextblock; eassumption. eapply match_stacks_RSI. 15: eapply MS. 11: eapply UnchLOOR. assumption. assumption. (* assumption. *) rewrite replace_externs_as_inj. apply extern_incr_as_inj in INC. rewrite replace_locals_as_inj in INC; assumption. assumption. instantiate (1:= vis mu). trivial. trivial. rewrite replace_externs_vis. intros. exploit extern_incr_vis; try eassumption. rewrite replace_locals_vis; intros. rewrite H2 in H1. clear H2. unfold vis in H1. remember (locBlocksSrc nu' b) as q. destruct q; simpl in *; trivial. apply andb_true_iff; split. unfold DomSrc. rewrite (frgnBlocksSrc_extBlocksSrc _ WDnu' _ H1). intuition. apply REACH_nil. unfold exportedSrc. rewrite sharedSrc_iff_frgnpub, H1; trivial. intuition. rewrite replace_externs_local, replace_externs_vis. assert (LOC: local_of mu = local_of nu'). red in INC. rewrite replace_locals_local in INC. eapply INC. rewrite <- LOC in *. red; intros ? ? ? Hb. destruct (restrictD_Some _ _ _ _ _ Hb); clear Hb. apply restrictI_Some; trivial. destruct (local_DomRng _ WDmu _ _ _ H1) as [lS _]. assert (LS: locBlocksSrc mu = locBlocksSrc nu'). red in INC. rewrite replace_locals_locBlocksSrc in INC. eapply INC. rewrite <- LS, lS. trivial. rewrite replace_externs_locBlocksTgt. assert (LOC: locBlocksTgt mu = locBlocksTgt nu'). red in INC. rewrite replace_locals_locBlocksTgt in INC. eapply INC. rewrite LOC; trivial. intros. eapply FwdSrc; eassumption. intros. eapply FwdTgt; eassumption. (*Tried replace_externs_meminj_preserves_globals_as_inj*) rewrite replace_externs_as_inj. assumption. (*rewrite replace_externs_frgnBlocksSrc. intros. unfold DomSrc. assert (FRG: frgnBlocksSrc mu = frgnBlocksSrc nu'). red in INC. rewrite replace_locals_frgnBlocksSrc in INC. apply INC. rewrite FRG in H1. specialize (frgnBlocksSrc_extBlocksSrc _ WDnu' _ H1). intros EE. rewrite (extBlocksSrc_locBlocksSrc _ WDnu' b), EE; simpl. apply REACH_nil. apply orb_true_iff. right. apply frgnSrc_shared; trivial. trivial. clear - PGnu'. red. rewrite replace_externs_as_inj in *; assumption. *) rewrite replace_externs_local. red in INC. rewrite replace_locals_local in INC. eapply INC. assumption. } rewrite replace_externs_as_inj, replace_externs_vis. clear - RValInjNu' WDnu'. inv RValInjNu'; econstructor; eauto. apply restrictI_Some; trivial. destruct (locBlocksSrc nu' b1); simpl; trivial. destruct (as_inj_DomRng _ _ _ _ H WDnu') as [dS dT]. rewrite dS; simpl. apply REACH_nil. unfold exportedSrc. apply orb_true_iff; left. apply getBlocks_char. exists ofs1; left; eauto. rewrite replace_externs_as_inj, replace_externs_vis. eapply inject_restrict; try eassumption. Qed. Hint Resolve Match_AfterExternal: trans_correct. (*Some handy lemmas:*) Lemma as_inj_retrict: forall mu b1 b2 d, as_inj (restrict_sm mu (vis mu)) b1 = Some (b2, d) -> as_inj mu b1 = Some (b2, d). intros; autorewrite with restrict in H. unfold restrict in H; destruct (vis mu b1) eqn:eq; inv H; auto. Qed. Lemma local_of_loc_inj: forall mu b b' delta (WD: SM_wd mu) (loc: locBlocksTgt mu b' = true), as_inj mu b = Some (b', delta) -> local_of mu b = Some (b', delta). unfold as_inj. unfold join. intros. destruct WD. destruct (extern_of mu b) eqn:extern_mu_b; try assumption. destruct p. inv H. apply extern_DomRng in extern_mu_b. destruct extern_mu_b as [extDom extRng]. destruct (disjoint_extern_local_Tgt b'); [rewrite loc in H | rewrite extRng in H]; discriminate. Qed. Lemma alloc_local_restrict: forall mu mu' m1 m2 m1' m2' sp' f' (A : Mem.alloc m2 0 (fn_stacksize f') = (m2', sp')) (H15 : sm_locally_allocated mu mu' m1 m2 m1' m2') (SP: sp' = Mem.nextblock m2), locBlocksTgt (restrict_sm mu' (vis mu')) sp' = true. intros. unfold sm_locally_allocated in H15. destruct mu. destruct mu'; simpl in *. intuition. rewrite H1. assert (fl: freshloc m2 m2' sp' = true). unfold freshloc. assert (vb: ~ Mem.valid_block m2 sp'). unfold Mem.valid_block. subst sp'. xomega. assert (vb': Mem.valid_block m2' sp'). unfold Mem.valid_block. (*erewrite (Mem.nextblock_alloc m2 _ _ m2' sp').*) rewrite (Mem.nextblock_alloc m2 0 (fn_stacksize f') m2' sp'). subst sp'. xomega. subst sp'. exact A. destruct (valid_block_dec m2' sp'); destruct (valid_block_dec m2 sp'); intuition. rewrite fl; apply orb_true_r. Qed. Lemma allocated_is_local: forall mu mu' stk m1 m1' m2 m2' f, Mem.alloc m1 0 (fn_stacksize f) = (m1', stk) -> sm_locally_allocated mu mu' m1 m2 m1' m2' -> locBlocksSrc mu' stk = true. intros mu mu' stk m1 m1' m2 m2' f H1 H2. rewrite (Mem.alloc_result _ _ _ _ stk H1). rewrite (Mem.alloc_result _ _ _ _ stk H1) in H1. unfold sm_locally_allocated in H2. destruct mu; destruct mu'; simpl in *. intuition. rewrite H. assert (fl: freshloc m1 m1' (Mem.nextblock m1) = true). unfold freshloc. assert (vb: ~ Mem.valid_block m1 (Mem.nextblock m1)). unfold Mem.valid_block. xomega. assert (vb': Mem.valid_block m1' (Mem.nextblock m1)). unfold Mem.valid_block. rewrite (Mem.nextblock_alloc m1 0 (fn_stacksize f) m1' (Mem.nextblock m1)). xomega. auto. destruct (valid_block_dec m1' (Mem.nextblock m1)); destruct (valid_block_dec m1 (Mem.nextblock m1)); intuition. rewrite fl; apply orb_true_r. Qed. Lemma freshalloc_restricted_map: forall mu mu' stk m1 m1' m2 m2' f sp' delta, Mem.alloc m1 0 (fn_stacksize f) = (m1', stk) -> sm_locally_allocated mu mu' m1 m2 m1' m2' -> as_inj mu' stk = Some (sp', delta) -> as_inj (restrict_sm mu' (vis mu')) stk = Some (sp', delta). intros mu mu' stk m1 m1' m2 m2' f sp' delta alloc loc_alloc map. autorewrite with restrict. unfold restrict. rewrite map. unfold vis. erewrite allocated_is_local; eauto. Qed. Lemma intern_incr_localloc_vis: forall mu mu' m1 m2 m1' m2', intern_incr mu mu' -> sm_locally_allocated mu mu' m1 m2 m1' m2' -> forall b, vis mu' b = vis mu b || freshloc m1 m1' b. unfold sm_locally_allocated, intern_incr, vis. intros; destruct mu, mu'; simpl in *. repeat open_Hyp. rewrite H0. rewrite H9. repeat rewrite <- orb_assoc. f_equal. apply orb_comm. Qed. (* OLD VERSION apply (meminj_preserves_incr_sep ge (as_inj mu) H9 m1 m2); eauto. apply intern_incr_as_inj; auto. apply sm_inject_separated_mem; auto. eapply intern_incr_meminj_preserves_globals_as_inj in H17. destruct H17 as [H00 H01]; apply H01; auto. eexact H20. exact H12. split; eauto. assumption. (* internal function, inlined *) inversion FB; subst. repeat open_Hyp. exploit alloc_left_mapped_sm_inject; try eassumption. (* sp' is local *) destruct MS0; unfold locBlocksTgt in SL; unfold restrict_sm in SL; destruct mu; simpl in *; assumption. (* offset is representable *) instantiate (1 := dstk ctx). generalize (Zmax2 (fn_stacksize f) 0). omega. (* size of target block is representable *) intros. right. exploit SSZ2; eauto with mem. inv FB; omega. (* we have full permissions on sp' at and above dstk ctx *) intros. apply Mem.perm_cur. apply Mem.perm_implies with Freeable; auto with mem. eapply range_private_perms; eauto. xomega. (* offset is aligned *) replace (fn_stacksize f - 0) with (fn_stacksize f) by omega. inv FB. apply min_alignment_sound; auto. (* nobody maps to (sp, dstk ctx...) *) END OF OLD PART *) Lemma injection_almost_equality_restrict: forall mu mu' m1 m2 m1' m2' stk f, Mem.alloc m1 0 (fn_stacksize f) = (m1', stk) -> intern_incr mu mu' -> sm_locally_allocated mu mu' m1 m2 m1' m2' -> (forall b : block, (b = stk -> False) -> as_inj mu' b = as_inj mu b) -> forall b1 : block, b1 <> stk -> as_inj (restrict_sm mu' (vis mu')) b1 = as_inj (restrict_sm mu (vis mu)) b1. intros. autorewrite with restrict. unfold restrict. erewrite intern_incr_localloc_vis; eauto. erewrite (freshloc_alloc _ _ _ _ stk H). destruct (eq_block b1 stk). simpl. apply H3 in e; inversion e. simpl; rewrite orb_false_r. rewrite H2; eauto. Qed. Lemma local_of_restrict_vis: forall mu sp sp' delta, SM_wd mu -> local_of (restrict_sm mu (vis mu)) sp = Some (sp', delta) -> as_inj (restrict_sm mu (vis mu)) sp = Some (sp', delta). intros mu sp sp' delta SMWD SP. autorewrite with restrict. unfold restrict. rewrite restrict_sm_local in SP; auto. unfold restrict in SP. destruct (vis mu sp) eqn:vismusp; simpl in SP; try solve [inv SP]. unfold as_inj, join. rewrite SP. destruct (extern_of mu sp) eqn:extofmusp; simpl; auto. destruct p. apply SMWD in extofmusp; apply SMWD in SP. repeat open_Hyp. destruct SMWD; specialize (disjoint_extern_local_Src sp); destruct disjoint_extern_local_Src. rewrite H3 in H1; inv H1. rewrite H3 in H; inv H. Qed. Lemma loc_privete_restrict: forall mu m1 m2 sp ofs, SM_wd mu -> locBlocksTgt (restrict_sm mu (vis mu)) sp = true -> loc_private (as_inj (restrict_sm mu (vis mu))) m1 m2 sp ofs -> loc_private (as_inj mu) m1 m2 sp ofs. unfold loc_private; intros. repeat open_Hyp. split. auto. intros. apply H2. assert (SL': locBlocksTgt mu sp = true). erewrite <- restrict_sm_locBlocksTgt. eassumption. autorewrite with restrict; unfold restrict; unfold vis. erewrite <- (as_inj_locBlocks) in SL'; eauto. erewrite SL'; rewrite orb_true_l; eauto. Qed. Ltac extend_smart:= let x := fresh "x" in extensionality x. Ltac rewrite_freshloc := match goal with | H: (Mem.storev _ _ _ _ = Some _) |- _ => rewrite (storev_freshloc _ _ _ _ _ H) | H: (Mem.free _ _ _ _ = Some _) |- _ => apply freshloc_free in H; rewrite H | _ => try rewrite freshloc_irrefl end. Ltac loc_alloc_solve := apply sm_locally_allocatedChar; repeat split; try extend_smart; try rewrite_freshloc; intuition. Lemma Empty_Effect_implication: forall mu m1 (b0 : block) (ofs : Z), EmptyEffect b0 ofs = true -> visTgt mu b0 = true /\ (locBlocksTgt mu b0 = false -> exists (b1 : block) (delta1 : Z), foreign_of mu b1 = Some (b0, delta1) /\ EmptyEffect b1 (ofs - delta1) = true /\ Mem.perm m1 b1 (ofs - delta1) Max Nonempty). intros mu m1 b ofs empt; unfold EmptyEffect in empt; inv empt. Qed. Lemma step_simulation_effect: forall (st1 : RTL_core) (m1 : mem) (st1' : RTL_core) (m1' : mem) (U1 : block -> Z -> bool) (ES: effstep (rtl_eff_sem hf) ge U1 st1 m1 st1' m1'), forall (st2 : RTL_core) (mu : SM_Injection) (m2 : mem) (* (U2vis: forall (b : block) (ofs : Z), U1 b ofs = true -> vis mu b = true)*) (MC: MATCH st1 mu st1 m1 st2 m2), exists (st2' : RTL_core) (m2' : mem), (exists U2 : block -> Z -> bool, (effstep_plus (rtl_eff_sem hf) tge U2 st2 m2 st2' m2' \/ (RTL_measure st1' < RTL_measure st1)%nat /\ effstep_star (rtl_eff_sem hf) tge U2 st2 m2 st2' m2') /\ (forall (b : block) (ofs : Z), U2 b ofs = true -> visTgt mu b = true /\ (locBlocksTgt mu b = false -> exists (b1 : block) (delta1 : Z), foreign_of mu b1 = Some (b, delta1) /\ U1 b1 (ofs - delta1) = true /\ Mem.perm m1 b1 (ofs - delta1) Max Nonempty))) /\ exists (mu' : SM_Injection), intern_incr mu mu' /\ (*sm_inject_separated mu mu' m1 m2 /\*) globals_separate ge mu mu' /\ sm_locally_allocated mu mu' m1 m2 m1' m2' /\ MATCH st1' mu' st1' m1' st2' m2'. intros. simpl in *. destruct MC as [MS PRE]. inv ES; inv MS. (* Inop *) { exploit tr_funbody_inv; eauto. intros TR; inv TR. eexists. eexists. split. eexists. split. left; simpl. eapply effstep_plus_one; simpl. eapply rtl_effstep_exec_Inop. eassumption. apply Empty_Effect_implication. exists mu. intuition. apply gsep_refl. loc_alloc_solve. unfold MATCH. intuition. eapply match_regular_states; first [eassumption| split; eassumption]. } (* Iop *) { exploit tr_funbody_inv; eauto. intros TR; inv TR. repeat open_Hyp. exploit eval_operation_inject. { eapply (restrict_sm_preserves_globals _ _ (vis mu)). eauto. intros; unfold vis; rewrite H6; trivial; rewrite orb_true_r; reflexivity. } exact SP. instantiate (2 := rs##args). instantiate (1 := rs'##(sregs ctx args)). eapply agree_val_regs; eauto. eexact MINJ. eauto. fold (sop ctx op). intros [v' [A B]]. eexists. eexists. split; simpl. eexists. split. left; simpl. eapply effstep_plus_one; simpl. eapply rtl_effstep_exec_Iop. eassumption. erewrite eval_operation_preserved; eauto. exact symbols_preserved. apply Empty_Effect_implication. econstructor; eauto. split; auto. intuition. apply gsep_refl. loc_alloc_solve. unfold MATCH. intuition. eapply match_regular_states; eauto. apply match_stacks_inside_set_reg; auto. eapply restrict_sm_WD; auto. apply agree_set_reg; auto. } (* Iload *) { exploit tr_funbody_inv; eauto. intros TR; inv TR. exploit eval_addressing_inject. { destruct PRE as [A [B [C' [C D]]]]. eapply (restrict_sm_preserves_globals _ _ (vis mu)); eauto. intros; unfold vis. rewrite C; trivial; rewrite orb_true_r; reflexivity. } eexact SP. instantiate (2 := rs##args). instantiate (1 := rs'##(sregs ctx args)). eapply agree_val_regs; eauto. eauto. fold (saddr ctx addr). intros [a' [P Q]]. exploit Mem.loadv_inject; eauto. intros [v' [U V]]. assert (eval_addressing tge (Vptr sp' Int.zero) (saddr ctx addr) rs' ## (sregs ctx args) = Some a'). rewrite <- P. apply eval_addressing_preserved. exact symbols_preserved. eexists. eexists. split; simpl. eexists. split. left; simpl. eapply effstep_plus_one. eapply rtl_effstep_exec_Iload; try eassumption. apply Empty_Effect_implication. exists mu. intuition. apply gsep_refl. loc_alloc_solve. unfold MATCH; intuition. eapply match_regular_states; eauto. apply match_stacks_inside_set_reg; auto. eapply restrict_sm_WD; auto. apply agree_set_reg; auto. } (* Istore *) { exploit tr_funbody_inv; eauto. intros TR; inv TR. destruct PRE as [RC [PG [GFP [GF [SMV [WD INJ]]]]]]. exploit eval_addressing_inject. { eapply (restrict_sm_preserves_globals _ _ (vis mu)); eauto. intros; unfold vis. rewrite GF; trivial; rewrite orb_true_r; reflexivity. } eexact SP. instantiate (2 := rs##args). instantiate (1 := rs'##(sregs ctx args)). eapply agree_val_regs; eauto. eauto. fold saddr. intros [a' [P Q]]. exploit Mem.storev_mapped_inject. eexact INJ. eassumption. eapply val_inject_incr; try eapply Q. autorewrite with restrict. apply restrict_incr. eapply agree_val_reg; eauto. eapply agree_regs_incr. eassumption. autorewrite with restrict. apply restrict_incr. intros [m2' [U V]]. assert (eval_addressing tge (Vptr sp' Int.zero) (saddr ctx addr) rs' ## (sregs ctx args) = Some a'). rewrite <- P. apply eval_addressing_preserved. exact symbols_preserved. eexists. eexists. split. eexists. split. left; simpl. eapply effstep_plus_one. eapply rtl_effstep_exec_Istore; eauto. destruct a; inv H1. rewrite restrict_sm_all in Q. inv Q. intuition. apply StoreEffectD in H6. destruct H6 as [z [HI Ibounds]]. apply eq_sym in HI. inv HI. eapply visPropagateR; eassumption. eapply StoreEffect_PropagateLeft; try eassumption. econstructor. eassumption. trivial. exists mu. intuition. apply gsep_refl. loc_alloc_solve. destruct a; simpl in H1; try discriminate. destruct a'; simpl in U; try discriminate. assert (RC1': REACH_closed m1' (vis mu)). eapply REACH_Store; try eassumption. inv Q. autorewrite with restrict in H8. eapply restrictD_Some. eapply H8. intros. rewrite getBlocks_char in H5. destruct H5. destruct H5. assert (val_inject (as_inj (restrict_sm mu (vis mu))) rs # src rs' # (sreg ctx src)). eapply agree_val_reg; eauto. rewrite H5 in H6. inv H6. autorewrite with restrict in H11. eapply restrictD_Some. eassumption. simpl in H5. contradiction. unfold MATCH; intuition. (*match_states*) econstructor; eauto. eapply match_stacks_inside_store; eauto. apply restrict_sm_WD; auto. autorewrite with restrict; eapply inject_restrict; try eassumption. eapply Mem.store_valid_block_1; eauto. eapply range_private_invariant; eauto. intros; split; auto. eapply Mem.perm_store_2; eauto. intros; eapply Mem.perm_store_1; eauto. intros. eapply SSZ2. eapply Mem.perm_store_2; eauto. inv H2. (* sm_valid mu m1' m2' *) split; intros. eapply Mem.store_valid_block_1; try eassumption. eapply SMV; assumption. eapply Mem.store_valid_block_1; try eassumption. eapply SMV; assumption. } (* Icall *) { exploit match_stacks_inside_globalenvs; eauto. intros [bound G]. exploit find_function_agree; eauto. intros [fd' [A B]]. exploit tr_funbody_inv; eauto. intros TR. inv TR. (* not inlined *) { destruct H as [RC [PG [GFP [Glob [SMV [WD MInj]]]]]]. Lemma find_function_translated: forall ros ls f, find_function ge ros ls = Some f -> exists tf, find_function tge ros ls = Some tf /\ transf_fundef fenv f = OK tf. Proof. unfold find_function; intros; destruct ros; simpl. apply functions_translated; auto. rewrite symbols_preserved. destruct (Genv.find_symbol ge i). apply function_ptr_translated; auto. congruence. Qed. destruct (find_function_translated _ _ _ H0) as [AA [BB CC]]. eexists. eexists. split. eexists. split. left; simpl. (* Lemma functions_translated': forall v f, Genv.find_funct ge v = Some f -> exists tf, Genv.find_funct tge v = Some tf /\ transf_fundef fenv f = OK tf. eapply Genv.find_funct_transf_partial. apply TRANSF. Qed.*) { eapply effstep_plus_one. eapply rtl_effstep_exec_Icall. - eauto. - generalize BB. unfold sros; destruct ros; eauto. - apply sig_function_translated. assumption. } (* Definition regset_inject: meminj -> regset -> regset -> Prop := fun (j : meminj) (rs rs' : regset) => forall r : positive, val_inject j rs # r rs' # r. Lemma regset_find_function_translated: forall j ros rs rs' fd ctx, meminj_preserves_globals ge j -> globalfunction_ptr_inject ge j -> regset_inject j rs rs' -> find_function ge ros rs = Some fd -> exists fd', find_function tge (sros ctx ros) rs' = Some fd' /\ transf_fundef fenv fd = OK fd'. Proof. intros until fd; destruct ros; simpl. intros. assert (RR: rs'#(sreg ctx r) = rs#(sreg ctx r)). exploit Genv.find_funct_inv; eauto. intros [b EQ]. generalize (H1 r). rewrite EQ. intro LD. inv LD. rewrite EQ in *; clear EQ. rewrite Genv.find_funct_find_funct_ptr in H2. apply H0 in H2. destruct H2. rewrite H2 in H6; inv H6. Proof (Genv.find_funct_transf_partial transf_fundef _ TRANSF). rewrite Int.add_zero. trivial. rewrite RR. apply functions_translated; auto. rewrite symbols_preserved. destruct (Genv.find_symbol ge i); intros. apply funct_ptr_translated; auto. discriminate. Lemma regset_find_function_translated: forall j ros rs rs' fd ctx, meminj_preserves_globals ge j -> globalfunction_ptr_inject ge j -> agree_regs j ctx rs rs' -> find_function ge ros rs = Some fd -> exists fd', find_function tge (sros ctx ros) rs' = Some fd' /\ transf_fundef fenv fd = OK fd'. Proof. unfold find_function; intros; destruct ros; simpl. apply functions_translated. destruct (Genv.find_funct_inv _ _ H2) as [b Hb]. destruct H1 as [H1 _]. specialize (H1 r). rewrite Hb in *. inv H1. rewrite Genv.find_funct_find_funct_ptr in H2. destruct (H0 _ _ H2). rewrite H1 in H6. inv H6. rewrite Int.add_zero. assumption. rewrite symbols_preserved. destruct (Genv.find_symbol ge i). apply function_ptr_translated; auto. congruence. Qed. unfold find_function; intros; destruct ros; simpl. apply functions_translated. destruct (Genv.find_funct_inv _ _ H2) as [b Hb]. destruct H1 as [AG1 AG2]. specialize (AG1 r). rewrite Hb in *. inv AG1. rewrite Genv.find_funct_find_funct_ptr in H2. destruct (H0 _ _ H2). rewrite H1 in H6. inv H6. rewrite Int.add_zero. rewrite Genv.find_funct_find_funct_ptr. assumption. rewrite symbols_preserved. destruct (Genv.find_symbol ge i). apply function_ptr_translated; auto. congruence. Qed. forall ros rs fd F ctx rs' bound, find_function ge ros rs = Some fd -> agree_regs (as_inj F) ctx rs rs' -> match_globalenvs F bound -> exists fd', find_function tge (sros ctx ros) rs' = Some fd' /\ transf_fundef fenv fd = OK fd'. simpl. eapply sig_function_translated; eauto. *) apply Empty_Effect_implication. exists mu. intuition. apply gsep_refl. loc_alloc_solve. unfold MATCH. split. econstructor; eauto. eapply match_stacks_cons; eauto. destruct MS0; assumption. eapply agree_val_regs; eauto. intuition. } (* inlined *) { assert (fd = Internal f0). simpl in H0. destruct (Genv.find_symbol ge id) as [b|] eqn:?; try discriminate. exploit (funenv_program_compat SrcProg). try eassumption. eauto. intros. unfold ge in H0. congruence. subst fd. eexists. eexists. split. eexists. split. right; split; simpl. omega. eapply effstep_star_zero. intuition. exists mu. intuition. apply gsep_refl. loc_alloc_solve. unfold MATCH; intuition. Focus 1. eapply match_call_regular_states; eauto. (* match_call_regular_states*) assert (SL: locBlocksTgt (restrict_sm mu (vis mu)) sp' = true) by (destruct MS0; assumption). eapply match_stacks_inside_inlined; eauto. apply local_of_loc_inj; auto; try (apply restrict_sm_WD); auto. red; intros. apply PRIV. inv H13. destruct H16. xomega. apply agree_val_regs_gen; auto. red; intros; apply PRIV. destruct H16. omega. } } (* Itailcall *) { exploit match_stacks_inside_globalenvs; eauto. intros [bound G]. exploit find_function_agree; eauto. intros [fd' [A B]]. assert (PRIV': range_private (as_inj (restrict_sm mu (vis mu))) m1' m2 sp' (dstk ctx) f'.(fn_stacksize)). eapply range_private_free_left; eauto. inv FB. rewrite <- H4. auto. exploit tr_funbody_inv; eauto. intros TR. inv TR. (* within the original function *) { inv MS0; try congruence. assert (X: { m1' | Mem.free m2 sp' 0 (fn_stacksize f') = Some m1'}). apply Mem.range_perm_free. red; intros. destruct (zlt ofs f.(fn_stacksize)). replace ofs with (ofs + dstk ctx) by omega. eapply Mem.perm_inject; eauto. eapply Mem.free_range_perm; eauto. omega. inv FB. eapply range_private_perms; eauto. xomega. destruct X as [m2' FREE]. eexists. eexists. split. eexists. split. left; simpl. eapply effstep_plus_one. eapply rtl_effstep_exec_Itailcall; eauto. eapply sig_function_translated; eauto. rewrite restrict_sm_all in SP. destruct (restrictD_Some _ _ _ _ _ SP). intuition. apply FreeEffectD in H14. destruct H14; subst. eapply visPropagate; try eassumption. eapply FreeEffect_PropagateLeft; try eassumption. eapply as_inj_retrict; autorewrite with restrict; rewrite <- DSTK; eassumption. apply FreeEffectD in H14. destruct H14 as [? [? ?]]; subst. rewrite restrict_sm_locBlocksTgt in *. rewrite SL in H16. inversion H16. exists mu. intuition. apply gsep_refl. loc_alloc_solve. assert (Mem.inject (as_inj mu) m1' m2'). eapply Mem.free_right_inject. eapply Mem.free_left_inject. eapply H13. eassumption. eassumption. intros. rewrite DSTK in PRIV'. exploit (PRIV' (ofs + delta)). omega. intros [P Q]. eelim Q. autorewrite with restrict. eapply restrictI_Some. eapply H12. rewrite restrict_sm_locBlocksTgt in SL. erewrite <- (as_inj_locBlocks _ b1 sp') in SL; try eassumption. unfold vis. rewrite SL. eapply orb_true_l. replace (ofs + delta - delta) with ofs by omega. apply Mem.perm_max with k. apply Mem.perm_implies with p; auto with mem. unfold MATCH. intuition. econstructor; eauto. eapply match_stacks_bound with (bound := sp'). eapply match_stacks_invariant; eauto. apply restrict_sm_WD; auto. intros. eapply Mem.perm_free_3; eauto. intros. eapply Mem.perm_free_1; eauto. intros. eapply Mem.perm_free_3; eauto. erewrite Mem.nextblock_free; eauto. red in VB; xomega. eapply agree_val_regs; eauto. eapply Mem.free_right_inject; eauto. eapply Mem.free_left_inject; eauto. (* show that no valid location points into the stack block being freed *) intros. rewrite DSTK in PRIV'. exploit (PRIV' (ofs + delta)). omega. intros [P Q]. eelim Q; eauto. replace (ofs + delta - delta) with ofs by omega. apply Mem.perm_max with k. apply Mem.perm_implies with p; auto with mem. eapply REACH_closed_free; eauto. (* sm_valid mu m1' m2' *) split; intros. eapply Mem.valid_block_free_1; try eassumption. eapply H10; assumption. eapply Mem.valid_block_free_1; try eassumption. eapply H10; assumption. } (* turned into a call *) { eexists. eexists. split. eexists. split. left; simpl. eapply effstep_plus_one. eapply rtl_effstep_exec_Icall; eauto. eapply sig_function_translated; eauto. intros b ofs empt; unfold EmptyEffect in empt; inv empt. exists mu. intuition. apply gsep_refl. loc_alloc_solve. unfold MATCH. intuition. econstructor; eauto. eapply match_stacks_untailcall; eauto. eapply match_stacks_inside_invariant; eauto. apply restrict_sm_WD; auto. intros. eapply Mem.perm_free_3; eauto. destruct MS0; assumption. eapply agree_val_regs; eauto. eapply Mem.free_left_inject; eauto. eapply REACH_closed_free; eauto. (* sm_valid mu m1' m2 *) split; intros. eapply Mem.valid_block_free_1; try eassumption. eapply H10; assumption. eapply H10; assumption. (* Mem.inject (as_inj mu) m1' m2' *) eapply Mem.free_left_inject; eauto. } (* inlined *) { assert (fd = Internal f0). simpl in H0. destruct (Genv.find_symbol ge id) as [b|] eqn:?; try discriminate. exploit (funenv_program_compat SrcProg); eauto. intros. unfold ge in H0. congruence. subst fd. eexists. eexists. split. eexists. split. right; split. simpl; omega. eapply effstep_star_zero. intuition. exists mu. intuition. apply gsep_refl. loc_alloc_solve. unfold MATCH; intuition. econstructor; eauto. eapply match_stacks_inside_inlined_tailcall; eauto. eapply match_stacks_inside_invariant; eauto. apply restrict_sm_WD; auto. intros. eapply Mem.perm_free_3; eauto. apply agree_val_regs_gen; auto. eapply Mem.free_left_inject; eauto. red; intros; apply PRIV'. assert (dstk ctx <= dstk ctx'). red in H14; rewrite H14. apply align_le. apply min_alignment_pos. omega. eapply REACH_closed_free; eauto. (* sm_valid mu m1' m2 *) split; intros. eapply Mem.valid_block_free_1; try eassumption. eapply H15; assumption. eapply H15; assumption. eapply Mem.free_left_inject; eauto. } } { (* builtin*) exploit tr_funbody_inv; eauto. intros TR; inv TR. rename MINJ into MINJR. destruct PRE as [RC [PG [GFP [Glob [SMV [WD MINJ]]]]]]. assert (PGR: meminj_preserves_globals ge (restrict (as_inj mu) (vis mu))). rewrite <- restrict_sm_all. eapply restrict_sm_preserves_globals; try eassumption. unfold vis. intuition. rewrite restrict_sm_all in *. assert (ArgsInj:= agree_val_regs _ _ _ _ args AG). exploit (BuiltinEffects.inlineable_extern_inject _ _ GDE_lemma); (*try eapply H;*) try eassumption. apply symbols_preserved. intros [mu' [vres' [tm' [EC [VINJ [MINJ' [UNMAPPED [OUTOFREACH [INCR [SEPARATED [GSEP [LOCALLOC [WD' [VAL' RC']]]]]]]]]]]]]]. exists (RTL_State stk' f' (Vptr sp' Int.zero) (spc ctx pc') (rs'#(sreg ctx res) <- vres')), tm'. split. eexists. split. left. apply effstep_plus_one. eapply rtl_effstep_exec_Ibuiltin; eauto. intros. eapply BuiltinEffects.BuiltinEffect_Propagate; eassumption. exists mu'. intuition. assert (ISEP: inject_separated (restrict (as_inj mu) (vis mu)) (restrict (as_inj mu') (vis mu')) m1 m2). red. intros ??? RAI RAI'. destruct (restrictD_Some _ _ _ _ _ RAI') as [AI' VIS']; clear RAI'. destruct (restrictD_None' _ _ _ RAI) as [AI | [bb2 [dd [AI VIS]]]]; clear RAI. apply sm_inject_separated_mem in SEPARATED. apply (SEPARATED _ _ _ AI AI'). trivial. rewrite (intern_incr_vis_inv _ _ WD WD' INCR _ _ _ AI VIS') in VIS; discriminate. split. { econstructor; eauto. { eapply match_stacks_inside_set_reg. apply restrict_sm_WD; trivial. eapply match_stacks_inside_extcall; try eapply MS0. apply restrict_sm_WD; trivial. apply restrict_sm_WD; trivial. intros; eapply external_call_max_perm; eauto. intros; eapply external_call_max_perm; eauto. rewrite restrict_sm_all. apply OUTOFREACH. rewrite restrict_sm_all. apply MINJR. apply restrict_sm_intern_incr; trivial. repeat rewrite restrict_sm_all; trivial. clear - SMV. destruct SMV. split; intros. rewrite restrict_sm_DOM in H1. apply (H _ H1). rewrite restrict_sm_RNG in H1. apply (H0 _ H1). apply VB. } rewrite restrict_sm_all. apply agree_set_reg; eauto. eapply agree_regs_incr; eauto. apply (intern_incr_restrict _ _ WD' INCR). rewrite restrict_sm_all. apply (intern_incr_restrict _ _ WD' INCR). assumption. rewrite restrict_sm_all. apply inject_restrict; assumption. eapply external_call_mem_forward; try eassumption. { rewrite restrict_sm_all. eapply range_private_extcall; try eassumption. intros. eapply external_call_mem_forward; eauto. apply (intern_incr_restrict _ _ WD' INCR). } intros. apply SSZ2. eapply external_call_max_perm; eauto. } intuition. eapply meminj_preserves_incr_sep. eapply PG. eassumption. apply intern_incr_as_inj; trivial. apply sm_inject_separated_mem; eassumption. (*globalfunction_ptr_inject ge (as_inj mu')*) red; intros b fb Hb. destruct (GFP _ _ Hb). split; trivial. eapply intern_incr_as_inj; eassumption. assert (FRG: frgnBlocksSrc mu = frgnBlocksSrc mu') by eapply INCR. rewrite <- FRG. apply Glob; assumption. } (* Icond *) { exploit tr_funbody_inv; eauto. intros TR; inv TR. assert (eval_condition cond rs'##(sregs ctx args) m2 = Some b). eapply eval_condition_inject; eauto. eapply agree_val_regs; eauto. eexists. eexists. split; simpl. eexists. split. left; simpl. eapply effstep_plus_one. eapply rtl_effstep_exec_Icond; eauto. apply Empty_Effect_implication. exists mu. intuition. apply gsep_refl. loc_alloc_solve. unfold MATCH. intuition. destruct b; econstructor; eauto. } (* jumptable *) { exploit tr_funbody_inv; eauto. intros TR; inv TR. assert (H3: val_inject (as_inj (restrict_sm mu (vis mu))) rs#arg rs'#(sreg ctx arg)). eapply agree_val_reg; eauto. rewrite H0 in H3; inv H3. eexists. eexists. split; simpl. eexists. split. left. eapply effstep_plus_one. eapply rtl_effstep_exec_Ijumptable; eauto. rewrite list_nth_z_map. rewrite H1. simpl; reflexivity. apply Empty_Effect_implication. exists mu. intuition. apply gsep_refl. loc_alloc_solve. unfold MATCH. intuition. econstructor; eauto. } (* return *) { exploit tr_funbody_inv; eauto. intros TR; inv TR. (* not inlined *) { inv MS0; try congruence. assert (X: { m1' | Mem.free m2 sp' 0 (fn_stacksize f') = Some m1'}). apply Mem.range_perm_free. red; intros. destruct (zlt ofs f.(fn_stacksize)). replace ofs with (ofs + dstk ctx) by omega. eapply Mem.perm_inject; eauto. eapply Mem.free_range_perm; eauto. omega. inv FB. eapply range_private_perms; eauto. generalize (Zmax_spec (fn_stacksize f) 0). destruct (zlt 0 (fn_stacksize f)); omega. destruct X as [m2' FREE]. eexists. eexists. split. eexists. split; simpl. left. eapply effstep_plus_one. eapply rtl_effstep_exec_Ireturn; eauto. (*Here is the effect: return*) rewrite restrict_sm_all in SP. destruct (restrictD_Some _ _ _ _ _ SP). destruct PRE as [RC [PG [GFP [Glob [SMV [WD MINJ']]]]]]. intuition. apply FreeEffectD in H7. destruct H7; subst. eapply visPropagate; try eassumption. eapply FreeEffect_PropagateLeft; try eassumption. eapply as_inj_retrict; autorewrite with restrict; rewrite <- DSTK; eassumption. apply FreeEffectD in H7. destruct H7 as [? [? ?]]; subst. rewrite restrict_sm_locBlocksTgt in *. rewrite SL in H8. inversion H8. exists mu. intuition. apply gsep_refl. loc_alloc_solve. unfold MATCH; intuition. econstructor; eauto. eapply match_stacks_bound with (bound := sp'). eapply match_stacks_invariant; eauto. apply restrict_sm_WD; auto. intros. eapply Mem.perm_free_3; eauto. intros. eapply Mem.perm_free_1; eauto. intros. eapply Mem.perm_free_3; eauto. erewrite Mem.nextblock_free; eauto. red in VB; xomega. destruct or; simpl. apply agree_val_reg; auto. auto. eapply Mem.free_right_inject; eauto. eapply Mem.free_left_inject; eauto. (* show that no valid location points into the stack block being freed *) intros. inversion FB; subst. assert (PRIV': range_private (as_inj (restrict_sm mu (vis mu))) m1' m2 sp' (dstk ctx) f'.(fn_stacksize)). rewrite H17 in PRIV. eapply range_private_free_left; eauto. rewrite DSTK in PRIV'. exploit (PRIV' (ofs + delta)). omega. intros [A B]. eelim B; eauto. replace (ofs + delta - delta) with ofs by omega. apply Mem.perm_max with k. apply Mem.perm_implies with p; auto with mem. eapply REACH_closed_free; eauto. (* sm_valid mu m1' m2 *) split; intros. eapply Mem.valid_block_free_1; try eassumption. eapply H9; assumption. eapply Mem.valid_block_free_1; try eassumption. eapply H9; assumption. eapply Mem.free_right_inject; eauto. eapply Mem.free_left_inject; eauto. (* show that no valid location points into the stack block being freed *) intros. inversion FB; subst. assert (PRIV': range_private (as_inj (restrict_sm mu (vis mu))) m1' m2 sp' (dstk ctx) f'.(fn_stacksize)). rewrite H17 in PRIV. eapply range_private_free_left; eauto. rewrite DSTK in PRIV'. exploit (PRIV' (ofs + delta)). omega. intros [A B]. eelim B. autorewrite with restrict. eapply restrictI_Some. apply H11. rewrite restrict_sm_locBlocksTgt in SL. erewrite <- (as_inj_locBlocks _ b1 sp') in SL; try eassumption. unfold vis. rewrite SL. eapply orb_true_l. replace (ofs + delta - delta) with ofs by omega. apply Mem.perm_max with k. apply Mem.perm_implies with p; auto with mem. } (* inlined *) { eexists. eexists. split; simpl. eexists. split. right; split; simpl. omega. eapply effstep_star_zero. intuition. exists mu. intuition. apply gsep_refl. loc_alloc_solve. unfold MATCH; intuition. econstructor; eauto. eapply match_stacks_inside_invariant; eauto. apply restrict_sm_WD; auto. intros. eapply Mem.perm_free_3; eauto. destruct or; simpl. apply agree_val_reg; auto. auto. eapply Mem.free_left_inject; eauto. inv FB. subst. rewrite H14 in PRIV. eapply range_private_free_left; eauto. eapply REACH_closed_free; eauto. (*sm_valid*) split; intros. eapply Mem.valid_block_free_1; try eassumption. eapply H9; assumption. eapply H9; assumption. (* Mem.inject (as_inj mu) m1' m2 *) eapply Mem.free_left_inject; eauto. } } (* internal function, not inlined *) { assert (A: exists f', tr_function fenv f f' /\ fd' = Internal f'). Errors.monadInv FD. exists x. split; auto. eapply transf_function_spec; eauto. destruct A as [f' [TR EQ]]. inversion TR; subst. repeat open_Hyp. exploit alloc_parallel_intern; eauto. apply Zle_refl. instantiate (1 := fn_stacksize f'). inv H0. xomega. intros [mu' [m2' [sp' [A [B [C [D E]]]]]]]. eexists. eexists. split; simpl. eexists. split; simpl. left. eapply effstep_plus_one. eapply rtl_effstep_exec_function_internal; eauto. apply Empty_Effect_implication. rewrite H4. exists mu'. intuition. eapply intern_incr_globals_separate; eauto. unfold MATCH; intuition. unfold globals_separate. rewrite H5. rewrite <- H4. eapply match_regular_states; eauto. assert (SP: sp' = Mem.nextblock m2) by (eapply Mem.alloc_result; eauto). apply match_stacks_inside_base. rewrite <- SP in MS0. eapply (match_stacks_invariant (restrict_sm mu (vis mu))); eauto. eapply restrict_sm_intern_incr; auto. eapply restrict_sm_WD; auto. intros. destruct (eq_block b1 stk). subst b1. apply as_inj_retrict in H21; rewrite D in H21; inv H21. subst b2. eelim Plt_strict; eauto. rewrite <- H21. autorewrite with restrict. unfold restrict. rewrite H15; auto. assert (vis mu' b1 = true ). destruct (vis mu' b1) eqn:vismu'b1; auto. autorewrite with restrict in H21. unfold restrict in H21. rewrite vismu'b1 in H21; inv H21. erewrite (intern_incr_vis_inv mu mu'); auto. rewrite H23; auto. rewrite <- H15; auto. apply as_inj_retrict in H21; eassumption. intros. exploit Mem.perm_alloc_inv. eexact H. eauto. destruct (eq_block b1 stk); intros; auto. subst b1. apply as_inj_retrict in H21. rewrite D in H21; inv H21. subst b2. eelim Plt_strict; eauto. intros. eapply Mem.perm_alloc_1; eauto. intros. exploit Mem.perm_alloc_inv. eexact A. eauto. rewrite dec_eq_false; auto. eapply alloc_local_restrict; eauto. auto. auto. auto. rewrite H4. apply agree_regs_init_regs. eapply val_list_inject_incr. autorewrite with restrict. eapply intern_incr_restrict; try (apply C); auto. autorewrite with restrict in VINJ; auto. inv H0; auto. eapply freshalloc_restricted_map; eauto. rewrite H1; auto. autorewrite with restrict. apply inject_restrict; auto. eapply Mem.valid_new_block; eauto. red; intros. split. eapply Mem.perm_alloc_2; eauto. inv H0; xomega. intros; red; intros. exploit Mem.perm_alloc_inv. eexact H. eauto. destruct (eq_block b stk); intros; apply as_inj_retrict in H22. subst. rewrite D in H22; inv H22. inv H0; xomega. rewrite H15 in H22; auto. eelim Mem.fresh_block_alloc. eexact A. eapply Mem.mi_mappedblocks. apply H14. apply H22. intros. exploit Mem.perm_alloc_3; eauto. xomega. apply (meminj_preserves_incr_sep ge (as_inj mu) H9 m1 m2); eauto. apply intern_incr_as_inj; auto. apply sm_inject_separated_mem; auto. (*globalfunction_ptr_inject ge (as_inj mu')*) red; intros b fb Hb. destruct (H10 _ _ Hb). split; trivial. eapply intern_incr_as_inj; eassumption. eapply intern_incr_meminj_preserves_globals_as_inj in H18. destruct H18 as [H00 H01]; apply H01; auto. eexact H21. exact H13. split; eauto. assumption. } (* internal function, inlined *) { inversion FB; subst. repeat open_Hyp. exploit alloc_left_mapped_sm_inject; try eassumption. (* sp' is local *) destruct MS0; unfold locBlocksTgt in SL; unfold restrict_sm in SL; destruct mu; simpl in *; assumption. (* offset is representable *) instantiate (1 := dstk ctx). generalize (Zmax2 (fn_stacksize f) 0). omega. (* size of target block is representable *) intros. right. exploit SSZ2; eauto with mem. inv FB; omega. (* we have full permissions on sp' at and above dstk ctx *) intros. apply Mem.perm_cur. apply Mem.perm_implies with Freeable; auto with mem. eapply range_private_perms; eauto. xomega. (* offset is aligned *) replace (fn_stacksize f - 0) with (fn_stacksize f) by omega. inv FB. apply min_alignment_sound; auto. (* nobody maps to (sp, dstk ctx...) *) intros. exploit (PRIV (ofs + delta')); eauto. xomega. intros [A B]. apply (B b delta'); eauto. assert (SL': locBlocksTgt mu sp' = true). destruct MS0; unfold locBlocksTgt in SL; unfold restrict_sm in SL; destruct mu; simpl in *; assumption. rewrite <- (as_inj_locBlocks mu b sp' delta') in SL'; auto. autorewrite with restrict. unfold restrict; unfold vis. rewrite SL'. rewrite orb_true_l; simpl; assumption. replace (ofs + delta' - delta') with ofs by omega. apply Mem.perm_max with k. apply Mem.perm_implies with p; auto with mem. intros [mu' [A [B [C D]]]]. exploit tr_moves_init_regs_eff; eauto. intros [rs'' [P [Q R]]]. eexists. eexists. split; simpl. eexists. split; simpl. left. eapply effstep_plus_star_trans. eapply effstep_plus_one. eapply rtl_effstep_exec_Inop; eauto. eapply P. apply Empty_Effect_implication. exists mu'; intuition. eapply intern_incr_globals_separate; eauto. (*First SEP*) unfold MATCH; intuition. constructor; eauto. assert (SM_wd (restrict_sm mu (vis mu))). apply restrict_sm_WD; auto. assert (SM_wd (restrict_sm mu' (vis mu'))). apply restrict_sm_WD; auto. eapply (match_stacks_inside_alloc_left (restrict_sm mu (vis mu))); eauto. eapply match_stacks_inside_invariant; eauto. eapply restrict_sm_intern_incr; eauto. eapply freshalloc_restricted_map; eauto. eapply injection_almost_equality_restrict; eauto. omega. apply agree_regs_incr with (as_inj (restrict_sm mu (vis mu))); auto. apply intern_incr_as_inj; try apply restrict_sm_intern_incr; eauto. apply restrict_sm_WD; auto. eapply freshalloc_restricted_map; eauto. autorewrite with restrict. eapply inject_restrict; eauto. rewrite H2. eapply range_private_alloc_left; eauto. eapply freshalloc_restricted_map; eauto. eapply injection_almost_equality_restrict; eauto. eapply intern_incr_meminj_preserves_globals_as_inj with (mu0:=mu); eauto. (*globalfunction_ptr_inject ge (as_inj mu')*) red; intros b fb Hb. destruct (H10 _ _ Hb). split; trivial. eapply intern_incr_as_inj; eassumption. eapply intern_incr_meminj_preserves_globals_as_inj with (mu0:=mu); eauto. } { (* nonobservable external call *) rename MINJ into MINJR. destruct PRE as [RC [PG [GFP [Glob [SMV [WD MINJ]]]]]]. assert (PGR: meminj_preserves_globals ge (restrict (as_inj mu) (vis mu))). rewrite <- restrict_sm_all. eapply restrict_sm_preserves_globals; try eassumption. unfold vis. intuition. rewrite restrict_sm_all in *. simpl in FD. inv FD. specialize (BuiltinEffects.EFhelpers _ _ OBS); intros. exploit (BuiltinEffects.inlineable_extern_inject _ _ GDE_lemma); try eapply H0; try eassumption. apply symbols_preserved. intros [mu' [vres' [tm' [EC [RESINJ [MINJ' [UNMAPPED [OUTOFREACH [INCR [SEPARATED [GSEP [LOCALLOC [WD' [VAL' RC']]]]]]]]]]]]]]. eexists; eexists. split. eexists. split. left. eapply effstep_plus_one. eapply rtl_effstep_exec_function_external; eauto. intros. eapply BuiltinEffects.BuiltinEffect_Propagate; eassumption. exists mu'. intuition. assert (ISEP: inject_separated (restrict (as_inj mu) (vis mu)) (restrict (as_inj mu') (vis mu')) m1 m2). red. intros ??? RAI RAI'. destruct (restrictD_Some _ _ _ _ _ RAI') as [AI' VIS']; clear RAI'. destruct (restrictD_None' _ _ _ RAI) as [AI | [bb2 [dd [AI VIS]]]]; clear RAI. apply sm_inject_separated_mem in SEPARATED. apply (SEPARATED _ _ _ AI AI'). trivial. rewrite (intern_incr_vis_inv _ _ WD WD' INCR _ _ _ AI VIS') in VIS; discriminate. split. { econstructor; try solve[rewrite restrict_sm_all; eassumption]. { (*eapply match_stacks_inside_set_reg. apply restrict_sm_WD; trivial. *) eapply match_stacks_bound. eapply match_stacks_extcall. 10: eapply MS0. apply restrict_sm_WD; trivial. apply restrict_sm_WD; trivial. intros; eapply external_call_max_perm; eauto. intros; eapply external_call_max_perm; eauto. rewrite restrict_sm_all. apply OUTOFREACH. rewrite restrict_sm_all. apply MINJR. apply restrict_sm_intern_incr; trivial. repeat rewrite restrict_sm_all; trivial. clear - SMV. destruct SMV. split; intros. rewrite restrict_sm_DOM in H1. apply (H _ H1). rewrite restrict_sm_RNG in H1. apply (H0 _ H1). xomega. eapply forward_nextblock. eapply external_call_mem_forward; eassumption. } rewrite restrict_sm_all. apply inject_restrict; assumption. } intuition. eapply meminj_preserves_incr_sep. eapply PG. eassumption. apply intern_incr_as_inj; trivial. apply sm_inject_separated_mem; eassumption. (*globalfunction_ptr_inject ge (as_inj mu')*) red; intros b fb Hb. destruct (GFP _ _ Hb). split; trivial. eapply intern_incr_as_inj; eassumption. assert (FRG: frgnBlocksSrc mu = frgnBlocksSrc mu') by eapply INCR. rewrite <- FRG. apply Glob; assumption. } (* return fron noninlined function *) { inv MS0. (* normal case *) { eexists. eexists. split; simpl. eexists. split; simpl. left. eapply effstep_plus_one. eapply rtl_effstep_exec_return. apply Empty_Effect_implication. exists mu. intuition. apply gsep_refl. loc_alloc_solve. unfold MATCH; intuition. econstructor; eauto. apply match_stacks_inside_set_reg; auto. apply restrict_sm_WD; auto. apply agree_set_reg; auto. } (* untailcall case *) { inv MS; try congruence. rewrite RET in RET0; inv RET0. eexists. eexists. split; simpl. eexists. split. left. eapply effstep_plus_one. eapply rtl_effstep_exec_return. apply Empty_Effect_implication. exists mu. intuition. apply gsep_refl. loc_alloc_solve. unfold MATCH. intuition. eapply match_regular_states; eauto. eapply match_stacks_inside_set_reg; eauto. apply restrict_sm_WD; auto. apply agree_set_reg; auto. apply local_of_restrict_vis; auto. red; intros. destruct (zlt ofs (dstk ctx)). apply PAD; omega. apply PRIV; omega. } } (* return from inlined function *) { inv MS0; try congruence. rewrite RET0 in RET; inv RET. unfold inline_return in AT. assert (PRIV': range_private (as_inj mu) m1' m2 sp' (dstk ctx' + mstk ctx') f'.(fn_stacksize)). assert (restrict_bridge: range_private (as_inj (restrict_sm mu (vis mu))) m1' m2 sp' (dstk ctx' + mstk ctx') (fn_stacksize f')). red; intros. destruct (zlt ofs (dstk ctx)). apply PAD. omega. apply PRIV. omega. red; intros. red in restrict_bridge. apply restrict_bridge in H. eapply loc_privete_restrict; repeat open_Hyp; eauto. destruct or. eexists. eexists. split; simpl. eexists. split; simpl. left. eapply effstep_plus_one. eapply rtl_effstep_exec_Iop; eauto. simpl. reflexivity. apply Empty_Effect_implication. exists mu. intuition. apply gsep_refl. loc_alloc_solve. unfold MATCH; intuition. econstructor; eauto. apply match_stacks_inside_set_reg; auto. apply restrict_sm_WD; auto. apply agree_set_reg; auto. (* without a result *) apply local_of_restrict_vis; auto. red; intros. destruct (zlt ofs (dstk ctx)). apply PAD; omega. apply PRIV; omega. eexists. eexists. split; simpl. eexists. split. left. eapply effstep_plus_one. eapply rtl_effstep_exec_Inop; eauto. apply Empty_Effect_implication. exists mu. intuition. eapply intern_incr_globals_separate; eauto. apply sm_locally_allocatedChar. repeat split; extensionality b0; rewrite freshloc_irrefl; intuition. unfold MATCH; intuition. econstructor; eauto. subst vres. apply agree_set_reg_undef'; auto. apply local_of_restrict_vis; auto. red; intros. destruct (zlt ofs (dstk ctx)). apply PAD; omega. apply PRIV; omega. } Qed. (** ** Behold the theorem *) Theorem transl_program_correct: forall (R: list_norepet (map fst (prog_defs SrcProg))) (*entrypoints : list (val * val * signature)*) (*entry_ok : entry_points_ok entrypoints*) (*init_mem: exists m0, Genv.init_mem SrcProg = Some m0*), SM_simulation.SM_simulation_inject (rtl_eff_sem hf) (rtl_eff_sem hf) ge tge. intros. eapply simulations_lemmas.inj_simulation_star with (match_states:= MATCH)(measure:= RTL_measure); eauto with trans_correct. (*Initial Core*) intros; eapply MATCH_initial_core; eauto. (* { (*destruct init_mem as [m0 INIT]. exists m0; split; auto. unfold meminj_preserves_globals in H2. destruct H2 as [A [B C]]. *) assert (P: forall p q, {Ple p q} + {Plt q p}). intros p q. case_eq (Pos.leb p q). intros TRUE. apply Pos.leb_le in TRUE. left; auto. intros FALSE. apply Pos.leb_gt in FALSE. right; auto. cut (forall b, Plt b (Mem.nextblock m0) -> exists id, Genv.find_symbol ge id = Some b). intro D. split. destruct (P (Mem.nextblock m0) (Mem.nextblock m1)); auto. exfalso. destruct (D _ p). apply A in H2. assert (Mem.valid_block m1 (Mem.nextblock m1)). eapply Mem.valid_block_inject_1; eauto. clear - H8; unfold Mem.valid_block in H8. xomega. destruct (P (Mem.nextblock m0) (Mem.nextblock m2)); auto. exfalso. destruct (D _ p). apply A in H2. assert (Mem.valid_block m2 (Mem.nextblock m2)). eapply Mem.valid_block_inject_2; eauto. clear - H8; unfold Mem.valid_block in H8. xomega. intros b LT. unfold ge. apply valid_init_is_global with (b0 := b) in INIT. eapply INIT; auto. apply R. apply LT. }*) {intros. exploit step_simulation_effect; eauto. intros HH; destruct HH as [st2' [m2' [[U2 ?] [mu' ?]]]]. repeat open_Hyp. exists st2', m2', mu'. intuition; exists U2;intuition. } Qed. End PRESERVATION.
If two predicates are equal, then the set of all elements satisfying the predicates are equal.
""" refco(phpa, tc, rh, wl) Determine the constants A and B in the atmospheric refraction model dZ = A tan Z + B tan^3 Z. Z is the "observed" zenith distance (i.e. affected by refraction) and dZ is what to add to Z to give the "topocentric" (i.e. in vacuo) zenith distance. ### Given ### - `phpa`: Pressure at the observer (hPa = millibar) - `tc`: Ambient temperature at the observer (deg C) - `rh`: Relative humidity at the observer (range 0-1) - `wl`: Wavelength (micrometers) ### Returned ### - `refa`: tan Z coefficient (radians) - `refb`: tan^3 Z coefficient (radians) ### Notes ### 1. The model balances speed and accuracy to give good results in applications where performance at low altitudes is not paramount. Performance is maintained across a range of conditions, and applies to both optical/IR and radio. 2. The model omits the effects of (i) height above sea level (apart from the reduced pressure itself), (ii) latitude (i.e. the flattening of the Earth), (iii) variations in tropospheric lapse rate and (iv) dispersive effects in the radio. The model was tested using the following range of conditions: lapse rates 0.0055, 0.0065, 0.0075 deg/meter latitudes 0, 25, 50, 75 degrees heights 0, 2500, 5000 meters ASL pressures mean for height -10% to +5% in steps of 5% temperatures -10 deg to +20 deg with respect to 280 deg at SL relative humidity 0, 0.5, 1 wavelengths 0.4, 0.6, ... 2 micron, + radio zenith distances 15, 45, 75 degrees The accuracy with respect to raytracing through a model atmosphere was as follows: worst RMS optical/IR 62 mas 8 mas radio 319 mas 49 mas For this particular set of conditions: lapse rate 0.0065 K/meter latitude 50 degrees sea level pressure 1005 mb temperature 280.15 K humidity 80% wavelength 5740 Angstroms the results were as follows: ZD raytrace eraRefco Saastamoinen 10 10.27 10.27 10.27 20 21.19 21.20 21.19 30 33.61 33.61 33.60 40 48.82 48.83 48.81 45 58.16 58.18 58.16 50 69.28 69.30 69.27 55 82.97 82.99 82.95 60 100.51 100.54 100.50 65 124.23 124.26 124.20 70 158.63 158.68 158.61 72 177.32 177.37 177.31 74 200.35 200.38 200.32 76 229.45 229.43 229.42 78 267.44 267.29 267.41 80 319.13 318.55 319.10 deg arcsec arcsec arcsec The values for Saastamoinen's formula (which includes terms up to tan^5) are taken from Hohenkerk and Sinclair (1985). 3. A wl value in the range 0-100 selects the optical/IR case and is wavelength in micrometers. Any value outside this range selects the radio case. 4. Outlandish input parameters are silently limited to mathematically safe values. Zero pressure is permissible, and causes zeroes to be returned. 5. The algorithm draws on several sources, as follows: a) The formula for the saturation vapour pressure of water as a function of temperature and temperature is taken from Equations (A4.5-A4.7) of Gill (1982). b) The formula for the water vapour pressure, given the saturation pressure and the relative humidity, is from Crane (1976), Equation (2.5.5). c) The refractivity of air is a function of temperature, total pressure, water-vapour pressure and, in the case of optical/IR, wavelength. The formulae for the two cases are developed from Hohenkerk & Sinclair (1985) and Rueger (2002). d) The formula for beta, the ratio of the scale height of the atmosphere to the geocentric distance of the observer, is an adaption of Equation (9) from Stone (1996). The adaptations, arrived at empirically, consist of (i) a small adjustment to the coefficient and (ii) a humidity term for the radio case only. e) The formulae for the refraction constants as a function of n-1 and beta are from Green (1987), Equation (4.31). ### References ### - Crane, R.K., Meeks, M.L. (ed), "Refraction Effects in the Neutral Atmosphere", Methods of Experimental Physics: Astrophysics 12B, Academic Press, 1976. - Gill, Adrian E., "Atmosphere-Ocean Dynamics", Academic Press, 1982. - Green, R.M., "Spherical Astronomy", Cambridge University Press, 1987. - Hohenkerk, C.Y., & Sinclair, A.T., NAO Technical Note No. 63, 1985. - Rueger, J.M., "Refractive Index Formulae for Electronic Distance Measurement with Radio and Millimetre Waves", in Unisurv Report S-68, School of Surveying and Spatial Information Systems, University of New South Wales, Sydney, Australia, 2002. - Stone, Ronald C., P.A.S.P. 108, 1051-1058, 1996. """ function refco(phpa, tk, rh, wl) refa = Ref(0.0) refb = Ref(0.0) ccall((:eraRefco, liberfa), Cvoid, (Cdouble, Cdouble, Cdouble, Cdouble, Ref{Cdouble}, Ref{Cdouble}), phpa, tk, rh, wl, refa, refb) refa[], refb[] end """ rm2v(r) Express an r-matrix as an r-vector. ### Given ### - `r`: Rotation matrix ### Returned ### - `w`: Rotation vector (Note 1) ### Notes ### 1. A rotation matrix describes a rotation through some angle about some arbitrary axis called the Euler axis. The "rotation vector" returned by this function has the same direction as the Euler axis, and its magnitude is the angle in radians. (The magnitude and direction can be separated by means of the function eraPn.) 2. If r is null, so is the result. If r is not a rotation matrix the result is undefined; r must be proper (i.e. have a positive determinant) and real orthogonal (inverse = transpose). 3. The reference frame rotates clockwise as seen looking along the rotation vector from the origin. """ function rm2v(r) w = zeros(3) ccall((:eraRm2v, liberfa), Cvoid, (Ptr{Cdouble}, Ptr{Cdouble}), r, w) w end """ rv2m(w) Form the r-matrix corresponding to a given r-vector. ### Given ### - `w`: Rotation vector (Note 1) ### Returned ### - `r`: Rotation matrix ### Notes ### 1. A rotation matrix describes a rotation through some angle about some arbitrary axis called the Euler axis. The "rotation vector" supplied to This function has the same direction as the Euler axis, and its magnitude is the angle in radians. 2. If w is null, the unit matrix is returned. 3. The reference frame rotates clockwise as seen looking along the rotation vector from the origin. """ function rv2m(w) r = zeros((3, 3)) ccall((:eraRv2m, liberfa), Cvoid, (Ptr{Cdouble}, Ptr{Cdouble}), w, r) r end """ rxr(a, b) Multiply two r-matrices. ### Given ### - `a`: First r-matrix - `b`: Second r-matrix ### Returned ### - `atb`: A * b ### Note ### It is permissible to re-use the same array for any of the arguments. ### Called ### - `eraCr`: copy r-matrix """ function rxr(a, b) atb = zeros((3, 3)) ccall((:eraRxr, liberfa), Cvoid, (Ptr{Cdouble}, Ptr{Cdouble}, Ptr{Cdouble}), a, b, atb) atb end """ rx(phi, r) Rotate an r-matrix about the x-axis. ### Given ### - `phi`: Angle (radians) ### Given and returned ### - `r`: r-matrix, rotated ### Notes ### 1. Calling this function with positive phi incorporates in the supplied r-matrix r an additional rotation, about the x-axis, anticlockwise as seen looking towards the origin from positive x. 2. The additional rotation can be represented by this matrix: ( 1 0 0 ) ( ) ( 0 + cos(phi) + sin(phi) ) ( ) ( 0 - sin(phi) + cos(phi) ) """ rx """ ry(phi, r) Rotate an r-matrix about the y-axis. ### Given ### - `theta`: Angle (radians) ### Given and returned ### - `r`: r-matrix, rotated ### Notes ### 1. Calling this function with positive theta incorporates in the supplied r-matrix r an additional rotation, about the y-axis, anticlockwise as seen looking towards the origin from positive y. 2. The additional rotation can be represented by this matrix: ( + cos(theta) 0 - sin(theta) ) ( ) ( 0 1 0 ) ( ) ( + sin(theta) 0 + cos(theta) ) """ ry """ rz(phi, r) Rotate an r-matrix about the z-axis. ### Given ### - `psi`: Angle (radians) ### Given and returned ### - `r`: r-matrix, rotated ### Notes ### 1. Calling this function with positive psi incorporates in the supplied r-matrix r an additional rotation, about the z-axis, anticlockwise as seen looking towards the origin from positive z. 2. The additional rotation can be represented by this matrix: ( + cos(psi) + sin(psi) 0 ) ( ) ( - sin(psi) + cos(psi) 0 ) ( ) ( 0 0 1 ) """ rz for name in ("rx", "ry", "rz") f = Symbol(name) fc = "era" * uppercasefirst(name) @eval begin function ($f)(a, r) ccall(($fc, liberfa), Cvoid, (Cdouble, Ptr{Cdouble}), a, r) r end end end """ rxpv(r, pv) Multiply a pv-vector by an r-matrix. ### Given ### - `r`: R-matrix - `pv`: Pv-vector ### Returned ### - `rpv`: R * pv ### Note ### It is permissible for pv and rpv to be the same array. ### Called ### - `eraRxp`: product of r-matrix and p-vector """ function rxpv(r, p) rp = zeros((2, 3)) ccall((:eraRxpv, liberfa), Cvoid, (Ptr{Cdouble}, Ptr{Cdouble}, Ptr{Cdouble}), r, p, rp) rp end """ rxp(r, p) Multiply a p-vector by an r-matrix. ### Given ### - `r`: R-matrix - `p`: P-vector ### Returned ### - `rp`: R * p ### Note ### It is permissible for p and rp to be the same array. ### Called ### - `eraCp`: copy p-vector """ function rxp(r, p) rp = zeros(3) ccall((:eraRxp, liberfa), Cvoid, (Ptr{Cdouble}, Ptr{Cdouble}, Ptr{Cdouble}), r, p, rp) rp end
using Pochoir using Test @testset "Pochoir.jl" begin # Write your tests here. end
[STATEMENT] lemma pref_comp_not_spref: "u \<bowtie> v \<Longrightarrow> \<not> u <p v \<Longrightarrow> v \<le>p u" [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<lbrakk>u \<bowtie> v; \<not> u <p v\<rbrakk> \<Longrightarrow> v \<le>p u [PROOF STEP] using contrapos_np[OF _ pref_comp_not_pref] [PROOF STATE] proof (prove) using this: \<lbrakk>\<not> ?u1 <p ?v1; \<not> ?P \<Longrightarrow> ?u1 \<bowtie> ?v1; \<not> ?P \<Longrightarrow> \<not> ?v1 \<le>p ?u1\<rbrakk> \<Longrightarrow> ?P goal (1 subgoal): 1. \<lbrakk>u \<bowtie> v; \<not> u <p v\<rbrakk> \<Longrightarrow> v \<le>p u [PROOF STEP] .
import data.set.basic import data.analysis.filter /- # Sequences In this file, we shall try to build some of the theory of sequences. The aim is to get to eventually constant sequences in maximum generality. -/ namespace seq -- To us, given a space `α`, a sequence in `α` is a sequence of elements of `α`, -- say a_0, a_1, ..., a_n, ... In other words, what we are looking for is a function f : ℕ → α -- No need to define a sequence separately -- A constant sequence has all values to be the same - that is, for some constant c in `α`, -- a_i = c for all i variable {α : Type*} -- `α` is an implicit variable, explicit variables are given by (), and implicit variables are given by {} or [] /-- The constant sequence. -/ --def lambda {α β : Type*} : α → β := λ a, _ def const_seq (c : α) : ℕ → α := λ n, c #check const_seq (1 : ℤ) 0 -- here Lean is able to identify that the `α` I want to use is indeed the ℕ -- An eventually constant sequence is one that takes the same value after -- a while, that is, given a constant c, ∃ N : ℕ, ∀ i ≥ N, a_i = c. -- Let us now try to formalize what eventually constant sequences are. def eventually_constant_seq_1 := { f : ℕ → α | ∃ (c : α) (N : ℕ), ∀ i ≥ N, f i = c } -- example (0 : eventually_constant_seq_1) : -- If I have a set of the form s := {x : β | p x}, which is the set of all x in β such that x satisfies the property p. Then, if I choose a term y of type s, y.1 is of type β, and y.2 is a proof that y satisfies the property p. -- This gives me a set of eventually constant functions on α. Any eventually -- constant function is a term of this type. #print eventually_constant_seq_1 -- First create a condition for a sequence to be eventually constant. def is_eventually_constant (f : ℕ → α) : Prop := set.nonempty { n | ∀ m, n ≤ m → f (nat.succ m) = f m } -- could have chosen f m = f n as well, which def is better? def is_eventually_constant' (f : ℕ → α) : Prop := set.nonempty { n | ∀ m, n ≤ m → f m = f n } structure eventually_const_seq := (to_seq : ℕ → α) -- the sequence (is_ec : is_eventually_constant to_seq) -- proof that the seq is e.c. -- Define the notion of a limit for an eventually constant sequence /-- The limit is the value `c` of the e.c. seq, that is limit = N or anything bigger than N -/ noncomputable def sequence_limit' (a : @eventually_constant_seq_1 α) : α := classical.some a.2 -- a.2 says : ∃ (c : α) (N : ℕ), ∀ i ≥ N, a i = c, -- and classical.some a.2 gives a value c for which ∀ i ≥ N, a i = c. /-- The minimum N such that a_N = c (use Inf) OR any n such that a_n = c -/ def sequence_limit_index (a : @eventually_constant_seq_1 α) : ℕ := sorry /-- For all m ≥ index (using the Inf definition), a_m = limit = c. -/ lemma sequence_limit_eq (a : @eventually_constant_seq_1 α) (m : ℕ) (hm : sequence_limit_index a ≤ m) : sequence_limit' a = a.1 m := sorry end seq
(* This code is copyrighted by its authors; it is distributed under *) (* the terms of the LGPL license (see LICENSE and description files) *) (* Contribution to the Coq Library V6.3 (July 1999) *) (****************************************************************************) (* *) (* *) (* Solange Coupet-Grimal & Line Jakubiec *) (* *) (* *) (* Laboratoire d'Informatique de Marseille *) (* CMI-Technopole de Chateau-Gombert *) (* 39, Rue F. Joliot Curie *) (* 13453 MARSEILLE Cedex 13 *) (* e-mail:{Solange.Coupet,Line.Jakubiec}@lim.univ-mrs.fr *) (* *) (* *) (* Coq V5.10 *) (* May 30th 1996 *) (* *) (****************************************************************************) (* Comp_Synth .v *) (****************************************************************************) Require Import Factorization_Synth. Require Import Comparator_Relation. Parameter BASE : BT. Definition b := base BASE. Definition Num := num BASE. Definition Val_bound := val_bound BASE. Lemma Comparator : forall (n : nat) (o : order) (X Y : Num n), {o' : order | R (exp b n) o (Val_bound n X) (Val_bound n Y) o'}. intros n o X Y. unfold R in |- *; unfold b in |- *; unfold Val_bound in |- *. apply factorization_for_synthesis. exact is_factorizable. exact (is_proper BASE). Defined. (*************************************************************************************)
module Data.String.Iterator import Control.Monad.Identity import public Data.List.Lazy %default total -- Backend-dependent string iteration type, -- parameterised by the string that it iterates over. -- -- Beware: the index is checked only up to definitional equality. -- In theory, you could run `decEq` on two strings -- with the same content but allocated in different memory locations -- and use the obtained Refl to coerce iterators between them. -- -- The strictly correct solution is to make the iterators independent -- from the exact memory location of the string given to `uncons`. -- (For example, byte offsets satisfy this requirement.) export data StringIterator : String -> Type where [external] -- This function is private -- to avoid subverting the linearity guarantees of withString. %foreign "scheme:blodwen-string-iterator-new" "RefC:stringIteratorNew" "javascript:stringIterator:new" private fromString : (str : String) -> StringIterator str -- This function uses a linear string iterator -- so that backends can use mutating iterators. export withString : (str : String) -> ((1 it : StringIterator str) -> a) -> a withString str f = f (fromString str) ||| Runs the action `f` on the slice `res` of the original string `str` represented by the ||| iterator `it` %foreign "scheme:blodwen-string-iterator-to-string" "RefC:stringIteratorToString" "javascript:stringIterator:toString" export withIteratorString : (str : String) -> (1 it : StringIterator str) -> (f : (res : String) -> a) -> a -- We use a custom data type instead of Maybe (Char, StringIterator) -- to remove one level of pointer indirection -- in every iteration of something that's likely to be a hot loop, -- and to avoid one allocation per character. -- -- The Char field of Character is unrestricted for flexibility. public export data UnconsResult : String -> Type where EOF : UnconsResult str Character : (c : Char) -> (1 it : StringIterator str) -> UnconsResult str -- We pass the whole string to the uncons function -- to avoid yet another allocation per character -- because for many backends, StringIterator can be simply an integer -- (e.g. byte offset into an UTF-8 string). %foreign "scheme:blodwen-string-iterator-next" "RefC:stringIteratorNext" "javascript:stringIterator:next" export uncons : (str : String) -> (1 it : StringIterator str) -> UnconsResult str export foldl : (accTy -> Char -> accTy) -> accTy -> String -> accTy foldl op acc str = withString str (loop acc) where loop : accTy -> (1 it : StringIterator str) -> accTy loop acc it = case uncons str it of EOF => acc Character c it' => loop (acc `op` c) (assert_smaller it it') export unpack : String -> LazyList Char unpack str = runIdentity $ withString str unpack' where -- This is a Functor instance of Identity, but linear in second argument %inline mapId : forall a, b. (a -> b) -> (1 x : Identity a) -> Identity b mapId f (Id x) = Id (f x) unpack' : (1 it : StringIterator str) -> Identity (Lazy (LazyList Char)) unpack' it = case uncons str it of EOF => pure [] Character c it' => mapId (c ::) (unpack' $ assert_smaller it it')
#include "IMedia.h" #include <boost/filesystem.hpp> #include "Media/mediaFactory.h" namespace Limitless { std::shared_ptr<IMedia> requestLoad(std::string name, std::string location) { SharedMedia media; boost::filesystem::path filePath(location); if(!boost::filesystem::exists(filePath)) return media; std::string extension=filePath.extension().string().substr(1); std::vector<std::string> extHandlers=MediaFactory::instance().handlesExtension(extension); if(extHandlers.empty()) return media; media=MediaFactory::instance().create(extHandlers[0]); media->load(filePath.string()); return media; } }
{-# OPTIONS --cubical --no-import-sorts --safe #-} module Cubical.Algebra.Monoid.Submonoid where open import Cubical.Core.Everything open import Cubical.Foundations.Prelude open import Cubical.Foundations.HLevels open import Cubical.Data.Sigma open import Cubical.Algebra open import Cubical.Algebra.Monoid.Morphism open import Cubical.Algebra.Semigroup.Subsemigroup open import Cubical.Relation.Unary open import Cubical.Relation.Unary.Subtype open import Cubical.HITs.PropositionalTruncation record IsSubmonoid {c ℓ} (M : Monoid c) (Member : Pred ⟨ M ⟩ ℓ) : Type (ℓ-max c ℓ) where constructor issubmonoid private module M = Monoid M field preservesOp : M._•_ Preserves₂ Member preservesId : M.ε ∈ Member isSubsemigroup : IsSubsemigroup M.semigroup Member isSubsemigroup = record { closed = preservesOp } open IsSubsemigroup isSubsemigroup hiding (closed; _^_) public ε : Carrier ε = M.ε , preservesId identityˡ : LeftIdentity ε _•_ identityˡ _ = ΣPathTransport→PathΣ _ _ (M.identityˡ _ , isProp[ Member ] _ _ _) identityʳ : RightIdentity ε _•_ identityʳ _ = ΣPathTransport→PathΣ _ _ (M.identityʳ _ , isProp[ Member ] _ _ _) identity : Identity ε _•_ identity = identityˡ , identityʳ isMonoid : IsMonoid Carrier _•_ ε isMonoid = record { isSemigroup = isSemigroup ; identity = identity } monoid : Monoid _ monoid = record { isMonoid = isMonoid } open Monoid monoid using (ε-uniqueˡ; ε-uniqueʳ; _^_) public record Submonoid {c} (M : Monoid c) ℓ : Type (ℓ-max c (ℓ-suc ℓ)) where constructor mksubmonoid private module M = Monoid M field Member : Pred ⟨ M ⟩ ℓ isSubmonoid : IsSubmonoid M Member open IsSubmonoid isSubmonoid public subsemigroup : Subsemigroup M.semigroup ℓ subsemigroup = record { isSubsemigroup = isSubsemigroup } open Subsemigroup subsemigroup public using (submagma) instance SubmonoidCarrier : ∀ {c ℓ} {M : Monoid c} → HasCarrier (Submonoid M ℓ) _ SubmonoidCarrier = record { ⟨_⟩ = Submonoid.Carrier } module _ {ℓ} (M : Monoid ℓ) where open Monoid M ε-isSubmonoid : IsSubmonoid M { ε } ε-isSubmonoid = record { preservesOp = map2 λ p q → cong₂ _•_ p q ∙ identityʳ ε ; preservesId = ∣ refl ∣ } ε-submonoid : Submonoid M _ ε-submonoid = record { isSubmonoid = ε-isSubmonoid } U-isSubmonoid : IsSubmonoid M U U-isSubmonoid = record {} -- trivial U-submonoid : Submonoid M _ U-submonoid = record { isSubmonoid = U-isSubmonoid }
Occasionally on the wiki you may come across pages with differently colored tables. Anybody can change their tables with a little practice. ASUCD Budget subpages ASUCD Elections Reform (woo white text) wiki:Dixon:BigNugg BBQ Co Boy Scouts Cobalt Salon & Gallery Elite Training Studios The Greenhouse Users/JenKao Lasting Impressions Hair Design lemonn lime photography tea list Users/TusharRawat Pinkberry The Pink House The Wardrobe wiki:Sacramento:Yogurtagogo
(* Title: HOL/TLA/Stfun.thy Author: Stephan Merz Copyright: 1998 University of Munich *) section {* States and state functions for TLA as an "intensional" logic *} theory Stfun imports Intensional begin typedecl state instance state :: world .. type_synonym 'a stfun = "state => 'a" type_synonym stpred = "bool stfun" consts (* Formalizing type "state" would require formulas to be tagged with their underlying state space and would result in a system that is much harder to use. (Unlike Hoare logic or Unity, TLA has quantification over state variables, and therefore one usually works with different state spaces within a single specification.) Instead, "state" is just an anonymous type whose only purpose is to provide "Skolem" constants. Moreover, we do not define a type of state variables separate from that of arbitrary state functions, again in order to simplify the definition of flexible quantification later on. Nevertheless, we need to distinguish state variables, mainly to define the enabledness of actions. The user identifies (tuples of) "base" state variables in a specification via the "meta predicate" basevars, which is defined here. *) stvars :: "'a stfun => bool" syntax "_PRED" :: "lift => 'a" ("PRED _") "_stvars" :: "lift => bool" ("basevars _") translations "PRED P" => "(P::state => _)" "_stvars" == "CONST stvars" defs (* Base variables may be assigned arbitrary (type-correct) values. Note that vs may be a tuple of variables. The correct identification of base variables is up to the user who must take care not to introduce an inconsistency. For example, "basevars (x,x)" would definitely be inconsistent. *) basevars_def: "stvars vs == range vs = UNIV" lemma basevars: "!!vs. basevars vs ==> EX u. vs u = c" apply (unfold basevars_def) apply (rule_tac b = c and f = vs in rangeE) apply auto done lemma base_pair1: "!!x y. basevars (x,y) ==> basevars x" apply (simp (no_asm) add: basevars_def) apply (rule equalityI) apply (rule subset_UNIV) apply (rule subsetI) apply (drule_tac c = "(xa, arbitrary) " in basevars) apply auto done lemma base_pair2: "!!x y. basevars (x,y) ==> basevars y" apply (simp (no_asm) add: basevars_def) apply (rule equalityI) apply (rule subset_UNIV) apply (rule subsetI) apply (drule_tac c = "(arbitrary, xa) " in basevars) apply auto done lemma base_pair: "!!x y. basevars (x,y) ==> basevars x & basevars y" apply (rule conjI) apply (erule base_pair1) apply (erule base_pair2) done (* Since the unit type has just one value, any state function can be regarded as "base". The following axiom can sometimes be useful because it gives a trivial solution for "basevars" premises. *) lemma unit_base: "basevars (v::unit stfun)" apply (unfold basevars_def) apply auto done lemma baseE: "[| basevars v; !!x. v x = c ==> Q |] ==> Q" apply (erule basevars [THEN exE]) apply blast done (* ------------------------------------------------------------------------------- The following shows that there should not be duplicates in a "stvars" tuple: *) lemma "!!v. basevars (v::bool stfun, v) ==> False" apply (erule baseE) apply (subgoal_tac "(LIFT (v,v)) x = (True, False)") prefer 2 apply assumption apply simp done end
(* Copyright (C) 2017 M.A.L. Marques This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at http://mozilla.org/MPL/2.0/. *) (* type: lda_exc *) a0 := 0.93222*RS_FACTOR: kk := 9.47362e-3*RS_FACTOR: f := (rs, zeta) -> -a0*(1 - kk*log(1 + rs/kk)/rs)/rs:
Theorem pair_eq : forall (A B: Type)(a a': A)(b b': B), (a, b) = (a', b') <-> a = a' /\ b = b'. Proof. unfold iff. intros. apply conj. - intros. injection H. intros. apply conj. apply H1. apply H0. - intros. apply proj1 in H as H1. apply proj2 in H as H2. rewrite H1. rewrite H2. reflexivity. Qed. Definition Relation (A B : Type) : Type := (prod A B -> Prop). Axiom Relation_eq : forall {A B} (R S : Relation A B), (forall (a:A)(b:B), R(a, b) <-> S(a, b)) <-> R = S. Definition dom {A B}(R : Relation A B) : (A -> Prop) := (fun x => (exists y:B, R(x, y))). Definition ran {A B}(R : Relation A B) : (B -> Prop) := (fun y => (exists x:A, R(x, y))). Definition image {A B}(R : Relation A B) : ((A -> Prop) -> (B -> Prop)) := (fun A_ => (fun y => (exists x:A, A_ x -> R(x, y)))). Definition inv_image {A B}(R : Relation A B) : ((B -> Prop) -> (A -> Prop)) := (fun B_ => (fun x => (exists y:B, B_ y -> R(x, y)))). Definition inv {A B}(R : Relation A B) : Relation B A := (fun p => (match p with | (b,a) => R(a, b) end)). Definition composition {A B C} (R : Relation A B) (S : Relation B C) : Relation A C := (fun p => (match p with | (a, c) => exists b, R(a, b) /\ S(b, c) end)). Definition symmetric {A} (R : Relation A A) : Prop := forall (a b : A), R(a, b) -> R(b, a). Definition antisymmetric {A} (R : Relation A A) : Prop := forall (a b : A), R(a, b) -> R(b, a) -> a = b. Definition asymmetric {A} (R: Relation A A) : Prop := forall (a b : A), R(a, b) -> not (R(b, a)). Definition transitive {A} (R: Relation A A) : Prop := forall (a b c : A), R(a, b) -> R(b, c) -> R(a, c). Definition reflexive {A} (R : Relation A A) : Prop := forall (a : A), R(a, a). Definition injective {A B} (R : Relation A B) : Prop := forall (a1 a2 : A)(b : B), R(a1, b) -> R(a2, b) -> a1 = a2. Definition functional {A B} (R : Relation A B) : Prop := forall (a : A)(b1 b2 : B), R(a, b1) -> R(a, b2) -> b1 = b2. Definition one_to_one {A B} (R : Relation A B) : Prop := injective R /\ functional R. Definition left_total {A B} (R : Relation A B) : Prop := forall (a : A), exists (b : B), R(a, b). Definition surjective {A B} (R : Relation A B) : Prop := forall (b : B), exists (a : A), R(a, b). Definition equiv {A} (R : Relation A A) : Prop := reflexive R /\ transitive R /\ symmetric R.
In February 2010 , it was reported that Jordan was seeking majority ownership of the Bobcats . As February wore on , it emerged that the leading contenders for the team were Jordan and former Houston Rockets president George <unk> . On February 27 , the Bobcats announced that Johnson had reached an agreement with Jordan and his group , MJ Basketball Holdings , to buy the team pending NBA approval . On March 17 , the NBA Board of Governors unanimously approved Jordan 's purchase , making him the first former player ever to become the majority owner of an NBA team . It also made him the league 's only African @-@ American majority owner .
(* Title: HOL/Library/Product_Plus.thy Author: Brian Huffman *) section \<open>Additive group operations on product types\<close> theory Product_Plus imports MainRLT begin subsection \<open>Operations\<close> instantiation prod :: (zero, zero) zero begin definition zero_prod_def: "0 = (0, 0)" instance .. end instantiation prod :: (plus, plus) plus begin definition plus_prod_def: "x + y = (fst x + fst y, snd x + snd y)" instance .. end instantiation prod :: (minus, minus) minus begin definition minus_prod_def: "x - y = (fst x - fst y, snd x - snd y)" instance .. end instantiation prod :: (uminus, uminus) uminus begin definition uminus_prod_def: "- x = (- fst x, - snd x)" instance .. end lemma fst_zero [simp]: "fst 0 = 0" unfolding zero_prod_def by simp lemma snd_zero [simp]: "snd 0 = 0" unfolding zero_prod_def by simp lemma fst_add [simp]: "fst (x + y) = fst x + fst y" unfolding plus_prod_def by simp lemma snd_add [simp]: "snd (x + y) = snd x + snd y" unfolding plus_prod_def by simp lemma fst_diff [simp]: "fst (x - y) = fst x - fst y" unfolding minus_prod_def by simp lemma snd_diff [simp]: "snd (x - y) = snd x - snd y" unfolding minus_prod_def by simp lemma fst_uminus [simp]: "fst (- x) = - fst x" unfolding uminus_prod_def by simp lemma snd_uminus [simp]: "snd (- x) = - snd x" unfolding uminus_prod_def by simp lemma add_Pair [simp]: "(a, b) + (c, d) = (a + c, b + d)" unfolding plus_prod_def by simp lemma diff_Pair [simp]: "(a, b) - (c, d) = (a - c, b - d)" unfolding minus_prod_def by simp lemma uminus_Pair [simp, code]: "- (a, b) = (- a, - b)" unfolding uminus_prod_def by simp subsection \<open>Class instances\<close> instance prod :: (semigroup_add, semigroup_add) semigroup_add by standard (simp add: prod_eq_iff add.assoc) instance prod :: (ab_semigroup_add, ab_semigroup_add) ab_semigroup_add by standard (simp add: prod_eq_iff add.commute) instance prod :: (monoid_add, monoid_add) monoid_add by standard (simp_all add: prod_eq_iff) instance prod :: (comm_monoid_add, comm_monoid_add) comm_monoid_add by standard (simp add: prod_eq_iff) instance prod :: (cancel_semigroup_add, cancel_semigroup_add) cancel_semigroup_add by standard (simp_all add: prod_eq_iff) instance prod :: (cancel_ab_semigroup_add, cancel_ab_semigroup_add) cancel_ab_semigroup_add by standard (simp_all add: prod_eq_iff diff_diff_eq) instance prod :: (cancel_comm_monoid_add, cancel_comm_monoid_add) cancel_comm_monoid_add .. instance prod :: (group_add, group_add) group_add by standard (simp_all add: prod_eq_iff) instance prod :: (ab_group_add, ab_group_add) ab_group_add by standard (simp_all add: prod_eq_iff) lemma fst_sum: "fst (\<Sum>x\<in>A. f x) = (\<Sum>x\<in>A. fst (f x))" proof (cases "finite A") case True then show ?thesis by induct simp_all next case False then show ?thesis by simp qed lemma snd_sum: "snd (\<Sum>x\<in>A. f x) = (\<Sum>x\<in>A. snd (f x))" proof (cases "finite A") case True then show ?thesis by induct simp_all next case False then show ?thesis by simp qed lemma sum_prod: "(\<Sum>x\<in>A. (f x, g x)) = (\<Sum>x\<in>A. f x, \<Sum>x\<in>A. g x)" proof (cases "finite A") case True then show ?thesis by induct (simp_all add: zero_prod_def) next case False then show ?thesis by (simp add: zero_prod_def) qed end
theory Chunkval_Interface imports Sep_Logic Chunkvals begin (* to satisfy Mem.valid_access, we demand alignment of chunkvals to their respective sizes note that this is actually more restrictive than align_chunk in the Mfloat64 and Many64 cases! *) fun align_chunkval_nat :: "chunkval \<Rightarrow> nat" where "align_chunkval_nat (CVword8 _) = 1" | "align_chunkval_nat (CVword16 _) = 2" | "align_chunkval_nat (CVword32 _) = 4" | "align_chunkval_nat (CVword64 _) = 8" | "align_chunkval_nat (CVfragments _ q) = size_quantity_nat q" | "align_chunkval_nat (CVundef l) = nat_of_num l" lemma align_chunkval_encode[simp]: "align_chunkval_nat (encode_chunkval chunk v) = size_chunk_nat chunk" apply (cases chunk; cases v) by (auto simp add: encode_chunkval.simps num_of_nat_inverse size_chunk_nat.simps) lemma chunk_fits_encoding_aligned: fixes ofs :: int assumes fits: "chunk_fits_chunkval chunk cv" assumes "align_chunkval_nat cv dvd ofs" shows "align_chunkval_nat (encode_chunkval chunk v) dvd ofs" apply (rule chunk_fits_chunkval.cases[OF fits]) using assms by (auto simp: size_chunkval_nat.simps align_chunk.simps align_chunkval_nat.simps size_chunk_nat.simps) lemma chunk_fits_aligns_agree: fixes ofs :: int assumes "chunk_fits_chunkval chunk cv" assumes "align_chunkval_nat cv dvd ofs" shows "align_chunk chunk dvd ofs" apply (rule chunk_fits_chunkval.cases[OF assms(1)]) using assms(2) apply (auto simp: align_chunk.simps) apply (cases chunk) by (auto simp: align_chunk.simps size_chunk_nat.simps) section \<open>Chunkval Assertion\<close> definition "chunkval_at \<equiv> \<lambda>(b, ofs) cv. mem.val_range (b, ofs) (chunkval_to_memvals cv) ** \<up>(int (align_chunkval_nat cv) dvd ofs)" lemma dvd_trumphs: "(4 dvd ofs \<and> 8 dvd ofs) \<longleftrightarrow> 8 dvd ofs" for ofs :: int by (meson cong_exp_iff_simps(2) dvd_trans even_numeral mod_eq_0_iff_dvd) lemma chunkval_as_memchunk: assumes "chunk_fits_chunkval chunk cv" shows "chunkval_at (b, ofs) cv = (mem.mem_chunk chunk (b, ofs) (chunkval_to_memvals cv) ** \<up>(int (align_chunkval_nat cv) dvd ofs))" unfolding chunkval_at_def mem.mem_chunk_def apply (cases "int (align_chunkval_nat cv) dvd ofs") apply (drule chunk_fits_aligns_agree[OF assms(1)]) using chunk_fits_chunkval_size assms by (auto simp add: sep_algebra_simps) fun size_chunkvals where "size_chunkvals [] = 0" | "size_chunkvals (cv # cvs) = size_chunkval_nat cv + size_chunkvals cvs" lemma size_chunkvals_append: "size_chunkvals (cvs @ [cv]) = size_chunkval_nat cv + size_chunkvals cvs" apply (induction cvs) by auto lemma size_chunkvals_replicate[simp]: "size_chunkvals (replicate l cv) = size_chunkval_nat cv * l" apply (induct l) by auto lemma size_chunkvals_some[iff]: assumes "cvs \<noteq> []" shows "0 < size_chunkvals cvs" apply (cases cvs) using assms by auto lemma size_chunkvals_some_word[iff]: assumes "cvs \<noteq> []" assumes "int (size_chunkvals cvs) < arch_max_size" shows "0 < (word_of_nat (size_chunkvals cvs) :: ptrsize word)" apply (rule ccontr) apply (simp add: word_of_nat_less_iff[of 0, simplified]) using assms unfolding arch_max_size_def using size_chunkvals_some by (metis Word_64.word_bits_conv Word_64.word_bits_len_of bot_nat_0.extremum_strict of_nat_less_imp_less real_of_nat_eq_numeral_power_cancel_iff take_bit_nat_eq_self_iff) definition "chunkvals_to_memvals cvs \<equiv> concat (map chunkval_to_memvals cvs)" lemma chunkvals_to_memvals0[simp]: "chunkvals_to_memvals [] = []" unfolding chunkvals_to_memvals_def by simp lemma length_chunkvals_to_memvals[simp]: "length (chunkvals_to_memvals cvs) = size_chunkvals cvs" unfolding chunkvals_to_memvals_def apply (induction cvs) by (auto) section \<open>Chunkval Range\<close> fun cv_range where "cv_range (b, ofs) [] = \<box>" | "cv_range (b, ofs) (cv # cvs) = (chunkval_at (b, ofs) cv ** cv_range (b, ofs+size_chunkval_nat cv) cvs)" lemma cv_range_append: "cv_range (b, ofs) (cvs @ [cv]) = (cv_range (b, ofs) cvs ** chunkval_at (b, ofs+size_chunkvals cvs) cv)" apply (induction cvs arbitrary: ofs) by (auto simp add: add.assoc) lemma cv_range_undef: fixes ofs :: int fixes sz :: nat assumes "0 < sz" assumes "sz dvd ofs" shows "cv_range (b, ofs) (replicate n (CVundef (num_of_nat sz))) = mem.val_range (b, ofs) (replicate (n*sz) Undef)" using assms apply (induction n arbitrary: ofs) apply (simp) apply (subst replicate_Suc)+ apply (simp add: replicate_add mem.val_range_split_concat num_of_nat_inverse) apply (rule sep_conj_trivial_strip2) unfolding chunkval_at_def by (simp add: sep_algebra_simps num_of_nat_inverse) fun cvs_aligned where "cvs_aligned (b, ofs) [] = True" | "cvs_aligned (b, ofs) (cv # cvs) = (int (align_chunkval_nat cv) dvd ofs \<and> cvs_aligned (b, ofs + size_chunkval_nat cv) cvs)" lemma cv_range_alt: "cv_range p cvs = (\<up>(cvs_aligned p cvs) ** mem.val_range p (chunkvals_to_memvals cvs))" apply (cases p) subgoal for b ofs apply (induction cvs arbitrary: p ofs) apply (simp add: sep_algebra_simps) by (simp add: chunkvals_to_memvals_def chunkval_at_def mem.val_range_split_concat sep_algebra_simps) done thm mem.val_range_split[no_vars] lemma cv_range_split: assumes "len \<le> length cvs" shows "cv_range (b, ofs) cvs = (cv_range (b, ofs) (take len cvs) \<and>* cv_range (b, ofs + (size_chunkvals (take len cvs))) (drop len cvs))" using assms proof (induction len) case 0 then show ?case by (simp add: sep_algebra_simps) next case (Suc len) then have "len \<le> length cvs" by simp note IH = Suc(1)[OF this] let ?cvs1 = "take len cvs" let ?cvs2 = "drop len cvs" let ?cv = "cvs ! len" have cvs1': "take (Suc len) cvs = ?cvs1 @ [?cv]" by (meson Suc.prems less_eq_Suc_le take_Suc_conv_app_nth) obtain cvs2' where cvs2': "?cvs2 = ?cv # cvs2'" "drop (Suc len) cvs = cvs2'" by (metis Cons_nth_drop_Suc Suc.prems Suc_le_lessD) show ?case apply (simp add: IH cvs1' cvs2' cv_range_append size_chunkvals_append) by (smt (verit, best)) qed lemma cv_range_extract: assumes "idx < length cvs" shows "cv_range (b, 0) cvs = (cv_range (b, 0) (take idx cvs) \<and>* chunkval_at (b, int (size_chunkvals (take idx cvs))) (cvs ! idx) \<and>* cv_range (b, int (size_chunkvals (take idx cvs) + size_chunkval_nat (cvs ! idx))) (drop (Suc idx) cvs))" apply (simp add: sep_algebra_simps) proof (goal_cases) case 1 have "idx \<le> length cvs" using assms by simp note split1 = cv_range_split[OF this, of b 0, simplified] obtain cv cvs' where cvs': "drop idx cvs = cv # cvs'" "cvs ! idx = cv" "drop (Suc idx) cvs = cvs'" by (metis Cons_nth_drop_Suc assms) let ?ofs_idx = "int (size_chunkvals (take idx cvs))" let ?ofs_cvs' = "int (size_chunkvals (take idx cvs)) + size_chunkval_nat cv" have split2: "cv_range (b, ?ofs_idx) (drop idx cvs) = (chunkval_at (b, ?ofs_idx) cv ** cv_range (b, ?ofs_cvs') cvs')" by (simp add: cvs') then show ?case by (simp add: split1 split2 cvs') qed definition "cvs_of_size cvs elem_size \<equiv> \<forall>cv \<in> set cvs. (size_chunkval_nat cv) = elem_size" lemma cvs_of_size0[iff]: "cvs_of_size [] sz" unfolding cvs_of_size_def by auto lemma cvs_of_size_undef[iff]: "cvs_of_size (replicate n (CVundef elem_size)) (nat_of_num elem_size)" unfolding cvs_of_size_def by auto lemma size_chunkvals_of_size: assumes "cvs_of_size cvs elem_size" shows "size_chunkvals cvs = elem_size * length cvs" using assms apply (induction cvs) apply simp unfolding cvs_of_size_def by auto lemma cvs_of_size_take[iff]: assumes "cvs_of_size cvs elem_size" shows "cvs_of_size (take l cvs) elem_size" using assms unfolding cvs_of_size_def by (meson in_set_takeD) lemma size_chunkvals_of_size_take: assumes "l < length cvs" assumes "cvs_of_size cvs elem_size" shows "size_chunkvals (take l cvs) = elem_size * l" using cvs_of_size_take[OF assms(2), THEN size_chunkvals_of_size] using assms(1) by auto lemma cv_range_extract_samesize: assumes "idx < length cvs" assumes "cvs_of_size cvs elem_size" shows "cv_range (b, 0) cvs = (cv_range (b, 0) (take idx cvs) \<and>* chunkval_at (b, int (elem_size * idx)) (cvs ! idx) \<and>* cv_range (b, int (elem_size * (Suc idx))) (drop (Suc idx) cvs))" apply (simp add: cv_range_extract[OF assms(1)]) apply (simp add: cv_range_extract size_chunkvals_of_size_take[OF assms]) using assms(2) cvs_of_size_def by (smt (verit, ccfv_threshold) assms(1) nth_mem) section \<open>Chunkval Array\<close> record cv_array_def = array_name :: ident array_elem_size :: nat array_length :: nat record cv_array = cv_array_def + array_block :: positive array_chunkvals :: "chunkval list" definition "array_size array \<equiv> array_elem_size array * array_length array" definition array_idx_offset :: "'a cv_array_def_scheme \<Rightarrow> nat \<Rightarrow> int" where "array_idx_offset array idx \<equiv> (array_elem_size array) * idx" definition "chunk_fits_array chunk array \<equiv> size_chunk_nat chunk = (array_elem_size array)" fun array_from_def :: "cv_array_def \<Rightarrow> positive \<Rightarrow> chunkval list \<Rightarrow> cv_array" where "array_from_def def b cvs = cv_array_def.extend def (cv_array.fields b cvs)" definition "SAME_DEF def array \<equiv> cv_array_def.truncate array = def" inductive array_def_valid :: "cv_array_def \<Rightarrow> bool" where "\<lbrakk> 0 < array_elem_size def; 0 < array_length def; array_size def < arch_max_size \<rbrakk> \<Longrightarrow> array_def_valid def" inductive_cases array_def_validE: "array_def_valid array" inductive array_valid :: "cv_array \<Rightarrow> bool" where "\<lbrakk> array_chunkvals array \<noteq> []; length (array_chunkvals array) = array_length array; cvs_of_size (array_chunkvals array) (array_elem_size array); array_size array < arch_max_size \<rbrakk> \<Longrightarrow> array_valid array" inductive_cases array_validE: "array_valid array" context fixes array assumes valid: "array_valid array" begin lemma properties[iff]: "length (array_chunkvals array) = array_length array" "cvs_of_size (array_chunkvals array) (array_elem_size array)" "array_size array < arch_max_size" using array_validE[OF valid] by auto lemma array_chunkvals_size[simp]: shows "size_chunkvals (array_chunkvals array) = array_size array" apply (rule array_validE[OF valid]) unfolding array_size_def cvs_of_size_def proof (induction "array_chunkvals array" arbitrary: array) case Nil then show ?case by simp next case (Cons a x) note [simp] = \<open>a # x = array_chunkvals array\<close>[symmetric] show ?case using Cons(1,3) apply simp by (metis Cons.hyps(2) Cons.prems cvs_of_size_def size_chunkvals.simps(2) size_chunkvals_of_size) qed lemma array_valid_size_g0[iff]: shows "0 < array_size array" unfolding array_size_def by (metis array_chunkvals_size array_size_def array_validE size_chunkvals_some valid) lemma array_valid_size_bounded[simp]: shows "take_bit 64 (array_size array) = array_size array" unfolding arch_max_size_def by (metis arch_max_size_def fact_2 int_ops(3) len64 of_nat_fact of_nat_less_numeral_power_cancel_iff properties(3) take_bit_nat_eq_self) lemma array_idx_offset_bounded: assumes idx: "idx < array_length array" shows "array_idx_offset array idx < array_size array" apply (rule array_validE[OF valid]) unfolding array_idx_offset_def array_size_def using idx by (metis less_imp_of_nat_less nat_0_less_mult_iff nat_mult_less_cancel_disj size_chunkvals_of_size size_chunkvals_some) lemma array_idx_offset_bounded'[simp]: assumes idx: "idx < array_length array" shows "take_bit 64 (array_idx_offset array idx) = array_idx_offset array idx" using assms array_idx_offset_bounded unfolding array_valid.simps arch_max_size_def array_idx_offset_def by (smt (verit, ccfv_threshold) array_valid_size_bounded of_nat_power_less_of_nat_cancel_iff take_bit_nat_eq_self_iff take_bit_of_nat valid) lemma chunk_fits_chunkval_if_array: assumes idx: "idx < array_length array" assumes fits: "chunk_fits_array chunk array" shows "chunk_fits_chunkval chunk (array_chunkvals array ! idx)" apply (rule array_validE[OF valid]) by (simp add: chunk_fits_chunkval_size fits[unfolded chunk_fits_array_def, symmetric] cvs_of_size_def idx) end lemma chunk_fits_cvs_of_size_update: assumes "chunk_fits_array chunk array" assumes "cvs_of_size (array_chunkvals array) (array_elem_size array)" shows "cvs_of_size ((array_chunkvals array)[idx := encode_chunkval chunk v]) (array_elem_size array)" using assms unfolding cvs_of_size_def chunk_fits_array_def apply (cases array) by (metis insertE set_update_subset_insert size_encode_chunkval subsetD) lemma array_same_def[simp]: assumes "SAME_DEF def array" shows "array_name def = array_name array" "array_elem_size def = array_elem_size array" "array_length def = array_length array" "array_idx_offset def idx = array_idx_offset array idx" "chunk_fits_array chunk def = chunk_fits_array chunk array" using assms unfolding SAME_DEF_def array_idx_offset_def chunk_fits_array_def by (auto simp: cv_array_def.defs) definition "have_array_head def array \<equiv> (\<up>(SAME_DEF def array \<and> array_valid array) \<and>* env.assn_is (array_name array) (Vptr (array_block array) 0) \<and>* chunkval_at (array_block array, -8) (CVword64 (Int64.repr (array_size array))))" definition "have_array def array \<equiv> have_array_head def array \<and>* cv_range (array_block array, 0) (array_chunkvals array)" schematic_goal cv_array_alt: "have_array def array = (\<up>(?x) \<and>* env.assn_is (array_name array) (Vptr (array_block array) 0) \<and>* mem.mem_val_fit Mptr (array_block array, - size_chunk Mptr) (Vptrofs (Int64.repr (array_size array))) \<and>* mem.val_range (array_block array, 0) (chunkvals_to_memvals (array_chunkvals array)))" unfolding have_array_def have_array_head_def chunkval_at_def Mptr_def Vptrofs_def cv_range_alt mem.mem_chunk_def by (simp add: sep_algebra_simps encode_val.simps mem.chunk_fit.simps size_chunk_nat.simps align_chunk.simps size_chunk.simps) definition array_get_val :: "cv_array \<Rightarrow> nat \<Rightarrow> memory_chunk \<Rightarrow> val" where "array_get_val array idx chunk \<equiv> decode_chunkval chunk (array_chunkvals array ! idx)" definition array_set_val :: "cv_array \<Rightarrow> nat \<Rightarrow> memory_chunk \<Rightarrow> val \<Rightarrow> cv_array" where "array_set_val array idx chunk v \<equiv> array_chunkvals_update (\<lambda>cvs. list_update cvs idx (encode_chunkval chunk v)) array" lemma array_same_def_set_val[iff]: assumes "SAME_DEF array_def array" shows "SAME_DEF array_def (array_set_val array idx chunk v)" using assms unfolding SAME_DEF_def array_set_val_def by (auto simp: cv_array_def.defs) section \<open>Chunkval Operations\<close> named_theorems cv_op_wrappers definition [cv_op_wrappers]: "mem_op_valid_access_cv = mem.op_valid_access" definition [cv_op_wrappers]: "mem_op_decode_chunk_cv = mem.op_decode_chunk" definition [cv_op_wrappers]: "mem_op_load_cv = mem.op_load" definition [cv_op_wrappers]: "mem_op_loadv_cv = mem.op_loadv" definition [cv_op_wrappers]: "mem_op_store_cv = mem.op_store" definition [cv_op_wrappers]: "mem_op_storev_cv = mem.op_storev" definition [cv_op_wrappers]: "Eload_cv \<equiv> Eload" definition [cv_op_wrappers]: "Sstore_cv \<equiv> Sstore" lemmas mem_wp_load_cv[vcg_decomp_rules] = mem.wp_load[folded cv_op_wrappers] lemmas mem_wp_loadv_cv[vcg_decomp_rules] = mem.wp_loadv[folded cv_op_wrappers] lemmas mem_wp_storev_cv[vcg_decomp_rules] = mem.wp_storev[folded cv_op_wrappers] lemmas wp_Eload_cv[vcg_decomp_rules] = wp_Eload[folded cv_op_wrappers] lemmas wp_Sstore_cv[vcg_decomp_rules] = wp_Sstore[folded cv_op_wrappers] print_statement mem.ht_valid_access_inv[folded cv_op_wrappers] lemma mem_ht_valid_access_cv[vcg_rules]: assumes fits: "chunk_fits_chunkval chunk cv" shows "mem.htriple (chunkval_at p cv) (mem_op_valid_access_cv chunk p perm) (\<lambda>r. \<up>r \<and>* chunkval_at p cv)" apply (rule ptrI[of p]) unfolding chunkval_as_memchunk[OF fits] mem_op_valid_access_cv_def by vcg print_statement mem.ht_decode_chunk_inv[folded cv_op_wrappers] lemma mem_ht_decode_chunk_cv[vcg_rules]: assumes fits: "chunk_fits_chunkval chunk cv" shows "mem.htriple (chunkval_at p cv) (mem_op_decode_chunk_cv chunk p) (\<lambda>r. \<up>(decode_val chunk r = decode_chunkval chunk cv) \<and>* chunkval_at p cv)" apply (rule ptrI) unfolding chunkval_as_memchunk[OF fits] mem_op_decode_chunk_cv_def unfolding mem.op_decode_chunk_def mem.mem_chunk_def using decode_chunkval_val[OF fits] by vcg print_statement mem.ht_store_inv[folded cv_op_wrappers] lemma mem_ht_store_cv_weak: assumes fits: "chunk_fits_chunkval chunk cv" shows "mem.htriple_weak (chunkval_at p cv) (mem_op_store_cv chunk p v) (\<lambda>_. chunkval_at p (encode_chunkval chunk v))" apply (rule ptrI) unfolding chunkval_as_memchunk[OF fits] mem_op_store_cv_def apply vcg unfolding chunkval_at_def mem.mem_chunk_def using chunk_fits_encoding_aligned[OF fits] chunk_fits_aligns_agree[OF fits] encode_chunkval_val by vcg lemma mem_ht_store_cv[vcg_rules]: assumes fits: "chunk_fits_chunkval chunk cv" shows "mem.htriple (chunkval_at p cv) (mem_op_store_cv chunk p v) (\<lambda>_. chunkval_at p (encode_chunkval chunk v))" apply (rule ptrI) apply (rule state_op.htriple_strengthen_inv[OF mem_ht_store_cv_weak[OF fits]]) apply simp unfolding mem_op_store_cv_def mem.op_store_def apply (simp split: prod.splits option.splits add: concrete_mem_lens.update_maybe_def concrete_mem_lens.laws) by (metis Mem'.store_inv concrete_mem_lens.get_put mem.invariants_def) section \<open>Array Operation\<close> definition Sarray_alloc :: "cv_array_def \<Rightarrow> stmt" where "Sarray_alloc def \<equiv> (Sbuiltin (Some (array_name def)) EF_malloc [Econst (Olongconst (Int64.repr (array_size def)))])" lemma ht_malloc_cv_array[vcg_rules]: notes ht_malloc[vcg_rules] fixes def :: cv_array_def assumes def_valid: "array_def_valid def" defines "new_array b \<equiv> array_from_def def b (replicate (array_length def) (CVundef (num_of_nat (array_elem_size def))))" shows "stmt_ht ge f sp (env.assn_is (array_name def) v) (Sarray_alloc def) (silent_op (EXS b. have_array def (new_array b)))" proof - have array_def_assms[iff]: "0 < array_elem_size def" "0 < array_length def" "int (array_size def) < arch_max_size" using def_valid[unfolded array_def_valid.simps, simplified] by auto have [simp]: "\<And>b sz. chunkval_at (b, -8) (CVword64 sz) = mem.mem_val Mptr (b, - size_chunk Mptr) (Vptrofs sz)" unfolding chunkval_at_def mem.mem_chunk_def unfolding Mptr_def Vptrofs_def by (simp add: size_chunk.simps size_chunk_nat.simps align_chunk.simps encode_val.simps mem.chunk_fit.simps sep_algebra_simps) have [simp]: "\<And>b. array_name (new_array b) = array_name def" "\<And>b. array_size (new_array b) = array_size def" "\<And>b. array_block (new_array b) = b" unfolding new_array_def by (auto simp: cv_array_def.defs cv_array.defs array_size_def) have [iff]: "\<And>x. mem.chunk_fit Mptr (Vptrofs x)" unfolding Mptr_def Vptrofs_def by (simp add: mem.chunk_fit.intros(5)) have [simp]: "take_bit 64 (array_size def) = array_size def" using \<open>int (array_size def) < arch_max_size\<close> unfolding arch_max_size_def by (simp add: take_bit_nat_eq_self_iff) have [simp]: "\<And>b. cv_range (b, 0) (array_chunkvals (new_array b)) = mem.val_range (b, 0) (replicate (nat (Int64.unsigned (Int64.repr (int (array_size def))))) Undef)" unfolding new_array_def array_size_def using cv_range_undef by (simp add: cv_array.defs cv_array_def.defs array_size_def[symmetric] mult.commute) have [iff]: "\<And>b. array_valid (new_array b)" unfolding array_valid.simps new_array_def unfolding array_size_def apply (simp add: cv_array.defs cv_array_def.defs) by (metis array_def_assms(1) array_def_assms(3) array_size_def cvs_of_size_undef num_of_nat_inverse of_nat_mult) have [iff]: "\<And>b. SAME_DEF def (new_array b)" unfolding SAME_DEF_def new_array_def by (simp add: cv_array_def.defs) then show ?thesis unfolding Sarray_alloc_def have_array_def have_array_head_def supply [simp] = sep_algebra_simps by vcg qed definition "Sarray_free def \<equiv> Sbuiltin None (EF_free) [Evar (array_name def)]" lemma ht_free_cv_array[vcg_rules]: shows "stmt_ht ge f sp (have_array def array) (Sarray_free def) (silent_op (EXS v. env.assn_is (array_name def) v))" unfolding Sarray_free_def cv_array_alt apply vcg apply (metis array_chunkvals_size array_valid.simps size_chunkvals_some_word) apply simp apply (simp add: sep_algebra_simps) by vcg definition Earray_idx_offset where "Earray_idx_offset array_def idx \<equiv> (Ebinop Oaddl (Evar (array_name array_def)) (Econst (Olongconst (word_of_int (array_idx_offset array_def idx)))))" lemma ht_array_idx_offset[vcg_rules]: assumes [iff]: "idx < array_length array" shows "expr_ht ge sp (have_array array_def array) (Earray_idx_offset array_def idx) (\<lambda>r. r = Vptr (array_block array) (word_of_int (array_idx_offset array idx)))" unfolding have_array_def have_array_head_def unfolding Earray_idx_offset_def by vcg definition Earray_load where "Earray_load array_def idx chunk \<equiv> Eload_cv chunk (Earray_idx_offset array_def idx)" lemma ht_array_load[vcg_rules]: assumes fits_array: "chunk_fits_array chunk array" assumes idx_ok: "idx < array_length array" shows "expr_ht ge sp (have_array array_def array) (Earray_load array_def idx chunk) (\<lambda>r. r = array_get_val array idx chunk)" unfolding have_array_def have_array_head_def apply (rule expr.htriple_pure_preI) apply (simp add: pure_part_pure_conj_eq sep_algebra_simps) apply clarify proof (goal_cases) case 1 note valid = \<open>array_valid array\<close> note array_assms = valid[simplified array_valid.simps, simplified] then have idx_inrange: "idx < length (array_chunkvals array)" using idx_ok by auto note extr = cv_range_extract[OF this] note array_offset_ok = array_idx_offset_bounded'[OF valid idx_ok] have [simp]: "int (size_chunkvals (take idx (array_chunkvals array))) = array_idx_offset array idx" by (metis array_assms array_idx_offset_def idx_ok size_chunkvals_of_size_take) have [iff]: "chunk_fits_chunkval chunk (array_chunkvals array ! idx)" by (simp add: 1 chunk_fits_chunkval_if_array fits_array idx_ok) have [simp]: "decode_chunkval chunk (array_chunkvals array ! idx) = array_get_val array idx chunk" by (simp add: array_get_val_def) show ?case apply (simp add: extr) unfolding Earray_load_def Earray_idx_offset_def using \<open>SAME_DEF array_def array\<close> apply vcg apply (simp add: array_offset_ok) by vcg qed definition Sarray_store :: "cv_array_def \<Rightarrow> nat \<Rightarrow> memory_chunk \<Rightarrow> expr \<Rightarrow> stmt" where "Sarray_store array_def idx chunk a \<equiv> Sstore_cv chunk (Earray_idx_offset array_def idx) a" lemma ht_array_store[vcg_rules]: assumes fits_array: "chunk_fits_array chunk array" assumes idx_ok: "idx < array_length array" assumes [vcg_rules]: "expr_ht ge sp F a (\<lambda>r. r = v)" shows "stmt_htF ge f sp F (have_array array_def array) (Sarray_store array_def idx chunk a) (silent_op (have_array array_def (array_set_val array idx chunk v)))" unfolding have_array_def have_array_head_def apply (rule stmt.htripleF_pure_preI) apply (simp add: pure_part_pure_conj_eq sep_algebra_simps) apply clarify proof (goal_cases) case 1 note valid = \<open>array_valid array\<close> note array_assms = valid[simplified array_valid.simps, simplified] let ?cvs = "array_chunkvals array" let ?cv' = "encode_chunkval chunk v" let ?cvs' = "?cvs[idx := ?cv']" have idx_inrange: "idx < length ?cvs" using idx_ok array_assms by simp have idx_inrange': "idx < length ?cvs'" using idx_ok array_assms by simp have [simp]: "?cvs' ! idx = ?cv'" using idx_inrange' by auto note extr = cv_range_extract[OF idx_inrange] note extr_new = cv_range_extract[OF idx_inrange', simplified] note array_offset_ok = array_idx_offset_bounded'[OF valid idx_ok] have [simp]: "int (size_chunkvals (take idx (array_chunkvals array))) = array_idx_offset array idx" by (metis array_assms array_idx_offset_def idx_ok size_chunkvals_of_size_take) have [iff]: "chunk_fits_chunkval chunk (array_chunkvals array ! idx)" by (simp add: valid chunk_fits_chunkval_if_array fits_array idx_ok) have [iff]: "array_valid (array_set_val array idx chunk v)" unfolding array_valid.simps array_set_val_def using array_assms apply (auto) using chunk_fits_cvs_of_size_update fits_array apply auto[1] unfolding array_size_def by simp have [simp]: "size_chunkval_nat (array_chunkvals array ! idx) = size_chunk_nat chunk" using chunk_fits_chunkval_size by auto note [simp] = array_same_def[OF \<open>SAME_DEF array_def array\<close>] show ?case unfolding Sarray_store_def Earray_idx_offset_def apply vcg apply (simp add: array_offset_ok extr) apply vcg unfolding array_set_val_def apply (simp add: sep_algebra_simps extr_new) unfolding array_size_def apply simp by vcg qed end
(**************************************************************************) (* Mechanised Framework for Local Interactions & Distributed Algorithms *) (* T. Balabonski, P. Courtieu, L. Rieg, X. Urbain *) (* PACTOLE project *) (* *) (* This file is distributed under the terms of the CeCILL-C licence. *) (* *) (**************************************************************************) Require Import SetoidDec. Require Import Pactole.Util.SetoidDefs. Require Import Pactole.Core.State. Require Import Pactole.Observations.Definition. Set Implicit Arguments. Section PointedObservation. Context {Loc : Location}. Context {info : Type}. Context {St : State info}. Context {N : Identifiers.Names}. Context {Obs : Observation}. Instance PointedObservation : Observation := {| observation := observation * info; observation_Setoid := prod_Setoid observation_Setoid state_Setoid; observation_EqDec := prod_EqDec observation_EqDec state_EqDec; obs_from_config := fun config st => (obs_from_config config st, st); obs_from_config_compat := ltac:(repeat intro; now split; trivial; apply obs_from_config_compat); obs_is_ok := fun obs config st => obs_is_ok (fst obs) config st /\ st == snd obs; obs_from_config_spec := fun config st => conj (obs_from_config_spec config st) (reflexivity st) |}. End PointedObservation.
theory Types imports Main HOL.Real begin subsection \<open>datatype\<close> datatype ITree = Leaf int | Node ITree int ITree term Leaf term Node primrec ITreeSum :: "ITree \<Rightarrow> int" where "ITreeSum (Leaf v) = v" | "ITreeSum (Node left v right) = ITreeSum left + v + ITreeSum right" definition "ITree1 \<equiv> Node (Node (Leaf 4) 5 (Node (Leaf 1) 8 (Leaf 2))) 10 (Node (Leaf 7) 9 (Leaf 15))" value "ITreeSum ITree1" datatype Fruit = APPLE | ORANGE | BANANA term APPLE value APPLE lemma "APPLE \<noteq> ORANGE" by simp primrec showfruit :: "Fruit \<Rightarrow> string" where "showfruit APPLE = ''it is an apple''" | "showfruit ORANGE = ''it is an orange''" | "showfruit BANANA = ''it is a banana''" datatype Exp = B bool | I int | N nat term B term I term N datatype Type = Bool | Integer | Nature value "B True" primrec type :: "Exp \<Rightarrow> Type" where "type (B a) = Bool" | "type (I a) = Integer" | "type (N a) = Nature" value "type (B a)" datatype boolex = Const bool | Var nat | Neg boolex | And boolex boolex | Or boolex boolex type_synonym env = "nat \<Rightarrow> bool" primrec eval :: "boolex \<Rightarrow> env \<Rightarrow> bool" where "eval (Const a) ev = a" | "eval (Var v) ev = ev v" | "eval (Neg e) ev = (\<not> eval e ev)" | "eval (And e1 e2) ev = (eval e1 ev \<and> eval e2 ev)" | "eval (Or e1 e2) ev = (eval e1 ev \<or> eval e2 ev)" definition env1 :: "env" where "env1 n \<equiv> (if n mod 2 = 0 then True else False)" value "eval (Var 1) env1" value "eval (And (Var 2) (Var 4)) env1" value "eval (And (Or (Const True) (Var 1)) (Neg (Var 4))) env1" subsubsection \<open>mutual recursion\<close> datatype aexp = IF bexp aexp aexp | Add aexp aexp | Sub aexp aexp | Num nat and bexp = Less aexp aexp | And bexp bexp | Neg bexp primrec evala :: "aexp \<Rightarrow> nat" and evalb :: "bexp \<Rightarrow> bool" where "evala (IF b a1 a2) = (if (evalb b) then evala a1 else evala a2)" | "evala (Add a1 a2) = (evala a1) + (evala a2)" | "evala (Sub a1 a2) = (evala a1) - (evala a2)" | "evala (Num a) = a" | "evalb (Less a1 a2) = (evala a1 < evala a2)" | "evalb (And b1 b2) = (evalb b1 \<and> evalb b2)" | "evalb (Neg b) = (\<not> evalb b)" value "evala (IF (Less (Num 10) (Num 20)) (Num 1) (Num 2))" value "evalb (Less (Num 50) (Num 100))" subsection \<open>product, pair and tuple\<close> typedecl Vertex record Vert = xpos :: int ypos :: int axiomatization v1 :: Vertex and v2 :: Vertex and v3 :: Vertex and v4 :: Vertex and v5 :: Vertex and v6 :: Vertex and v7 :: Vertex where assum: "distinct [v1,v2,v3,v4,v5,v6,v7]" definition rel1 :: "Vertex \<times> Vertex" where "rel1 \<equiv> (v1,v2)" definition Agraph :: "(Vertex \<times> Vertex) set" where "Agraph \<equiv> {(v1, v2), (v1, v3), (v2, v4), (v4, v3), (v4, v5), (v3, v6), (v5, v7), (v6, v7)}" term "()" term "(v1, v2)" term "Pair v1 v2" term Pair lemma "(v1, v2) = Pair v1 v2" by simp definition "conn1 \<equiv> (v1, v2)" lemma "fst conn1 = v1" by (simp add:conn1_def) lemma "snd conn1 = v2" by (simp add:conn1_def) lemma "(v1, v2) \<noteq> (v2, v1)" using assum by auto lemma "(x,y) = (a,b) \<longleftrightarrow> x = a \<and> y = b" by auto definition h :: "nat \<Rightarrow> nat \<Rightarrow> nat" where "h x y \<equiv> x + y" term h definition h2 :: "(nat \<times> nat) \<Rightarrow> nat" where "h2 \<equiv> \<lambda>(x,y). x + y" lemma "case_prod h = h2" unfolding h_def h2_def by simp lemma "curry h2 = h" unfolding h_def h2_def by simp value "h2 (Pair 1 2)" value "h2 (1,2)" value "h 1 2" lemma "(a,b,c) = (a,(b,c))" by simp term "((a,b),c)" term "(a,b,c)" value "fst ((a,b),c)" value "fst (a,b,c)" value "snd ((a,b),c)" value "snd (a,b,c)" value "fst (fst ((a,b),c))" value "snd (fst ((a,b),c))" subsection \<open>record\<close> record coord2d = Xpt :: int Ypt :: int term Xpt term Ypt print_record coord2d definition coord1 :: "coord2d" where "coord1 \<equiv> \<lparr>Xpt = 20, Ypt = 30\<rparr>" value "Xpt coord1" definition coord1' :: "coord2d" where "coord1' \<equiv> coord1\<lparr>Ypt := 50\<rparr>" value "coord1'" definition incX :: "coord2d \<Rightarrow> coord2d" where "incX r \<equiv> r\<lparr>Xpt := Xpt r + 1\<rparr>" value "incX coord1" lemma "coord1\<lparr>Ypt := 50\<rparr> = \<lparr>Xpt = 20, Ypt = 50\<rparr>" unfolding coord1_def by simp record coord3d = coord2d + Zpt :: int definition coord3 :: "coord3d" where "coord3 \<equiv> \<lparr>Xpt = 20, Ypt = 30, Zpt = 40\<rparr>" value "Xpt coord3" value "Zpt coord3" definition coord3' :: "coord3d" where "coord3' \<equiv> coord3\<lparr>Zpt := 50\<rparr>" value "coord3'" value "coord2d.more coord3" term "\<lparr>Xpt = 20, Ypt = 30, Zpt = 40\<rparr>" record coord4d = coord3d + Time :: int definition coord4 :: "coord4d" where "coord4 \<equiv> \<lparr>Xpt = 20, Ypt = 30, Zpt = 40, Time = 1000\<rparr>" value "\<lparr>Xpt = 20, Ypt = 30, Zpt = 40, Time = 1000\<rparr>" value "coord4d.make 20 30 40 1000" type_synonym recT1 = "\<lparr>Xpt :: int, Ypt :: int, Time :: int\<rparr>" print_record recT1 type_synonym recT2 = "\<lparr>Xpt :: int, Ypt :: int, Zpt :: int, Time :: int\<rparr>" print_record recT2 value "coord2d.more coord4" lemma "\<lparr>Xpt = a1, Ypt = b1\<rparr> = \<lparr>Xpt = a2, Ypt = b2\<rparr> \<longleftrightarrow> a1 = a2 \<and> b1 = b2" by simp lemma "r = \<lparr>Xpt = Xpt r, Ypt = Ypt r, \<dots> = coord2d.more r\<rparr>" by simp value "coord2d.Xpt coord1" value "coord2d.make 5 5" term Time definition "coord5 \<equiv> coord4d.extend coord4 (True)" value "coord5" value "\<lparr>Xpt = 20, Ypt = 30, Zpt = 40, Time = 1000, \<dots> = True\<rparr>" value "more coord5" value "coord2d.more coord4" value "coord2d.truncate \<lparr>Xpt = Xpt coord1, Ypt = Ypt coord1, \<dots> = 50::int\<rparr>" term "\<lparr>Xpt = 10, Ypt = 20, \<dots> = 40::int \<rparr>" value "Xpt \<lparr>Xpt = 10, Ypt = 20, \<dots> = p\<rparr>" value "coord2d.truncate coord3" value "coord3d.fields 100" value "coord4d.fields 200" value "coord2d.more coord4" value "coord3d.more coord4" term "coord4d.fields" term "coord3d.more" value "coord3d.more (coord3d.extend coord3 ''hello'')" value "coord2d.extend coord1 (coord3d.fields 50)" subsection \<open>parameteric types\<close> record 'a Coord3d = xpos :: 'a ypos :: 'a zpos :: 'a record 'a Coord4d = "'a Coord3d " + Time :: 'a definition coord6 :: "int Coord3d" where "coord6 \<equiv> \<lparr>xpos = 10, ypos = 20, zpos = 30\<rparr>" definition coord7 :: "real Coord3d" where "coord7 \<equiv> \<lparr>xpos = 10.3::real, ypos = 20.3, zpos = 30.5\<rparr>" definition "coord9 \<equiv> \<lparr>xpos = 10.3::real, ypos = 20.3, zpos = 30.5, Time = 1000.0\<rparr>" value "Coord3d.extend coord6 ''hello''" value "Coord3d.extend coord7 ''good''" value "truncate coord9" record ('a, 'b, 'c) COORD3d = Xp :: 'a Yp :: 'b Zp :: 'c definition coord10 :: "(int, string, real) COORD3d" where "coord10 \<equiv> \<lparr>Xp = 10, Yp = ''20w'', Zp = 30.5\<rparr>" datatype 'a Tree = Leaf 'a | Node "'a Tree" 'a "'a Tree" type_synonym itree = "int Tree" type_synonym stree = "string Tree" primrec count :: "'a Tree \<Rightarrow> nat" where "count (Leaf _) = 1" | "count (Node l _ r) = count l + count r + 1" definition Tree1 :: "int Tree" where "Tree1 \<equiv> Node (Node (Leaf 4) 5 (Node (Leaf 1) 8 (Leaf 2))) 10 (Node (Leaf 7) 9 (Leaf 15))" definition Tree2 :: "string Tree" where "Tree2 \<equiv> Node (Leaf ''a'') ''b'' (Leaf ''c'')" value "count Tree1" value "count Tree2" datatype 'a array = Null | Conn 'a "'a array" ("_,_") primrec length :: "'a array \<Rightarrow> nat" where "length Null = 0" | "length (Conn a arr) = 1 + length arr" primrec nth :: "'a array \<Rightarrow> nat \<Rightarrow> 'a" ("_[_]") where "nth (Conn a arr) i = (case i of 0 \<Rightarrow> a | Suc k \<Rightarrow> nth arr k)" primrec array_update :: "'a array \<Rightarrow> nat \<Rightarrow> 'a \<Rightarrow> 'a array" ("_[_] := _") where "array_update (Conn a arr) i v = (case i of 0 \<Rightarrow> Conn v arr | Suc j \<Rightarrow> Conn a (array_update arr j v))" definition arr1 :: "int array" where "arr1 \<equiv> (1,2,3,Null)" value "arr1[2]" value "length arr1" value "arr1[2] := 10" definition arr2 :: "string array" where "arr2 \<equiv>(''hello'', ''world'', ''next'', ''word'', Null)" value "arr2[1]" value "arr2[0]" value "arr2[0] := ''hi''" subsection \<open>type declare, synonym, typedef\<close> type_synonym mynat = nat definition nat1 :: "mynat \<Rightarrow> mynat" where "nat1 n \<equiv> n + 1" value "nat1 15" type_synonym 'a array_alias = "'a array" type_synonym 'a array_synonym = "'a array" type_synonym coord3d_t = coord3d definition coord8 :: "coord3d_t" where "coord8 \<equiv> coord3" value "coord8" typedecl BOOL axiomatization T :: BOOL and F :: BOOL and AND :: "BOOL \<Rightarrow> BOOL \<Rightarrow> BOOL" and OR :: "BOOL \<Rightarrow> BOOL \<Rightarrow> BOOL" and NOT :: "BOOL \<Rightarrow> BOOL" where ANDs: "AND a b = (if (a = T \<and> b = T) then T else F)" and ORs: "OR a b = (if (a = T \<or> b = T) then T else F)" and NOTs: "NOT a \<equiv> (if a = T then F else T)" (* and TF: "T \<noteq> F" *) definition xorBOOL :: "BOOL \<Rightarrow> BOOL \<Rightarrow> BOOL" where "xorBOOL a b \<equiv> OR (AND a (NOT b)) (AND (NOT a) b)" (* value T *) lemma "xorBOOL T F = T" unfolding xorBOOL_def using ANDs NOTs ORs by simp lemma "xorBOOL T T = F" unfolding xorBOOL_def using ANDs NOTs ORs by auto lemma "xorBOOL F F = F" unfolding xorBOOL_def using ANDs NOTs ORs by auto lemma "xorBOOL F T = T" unfolding xorBOOL_def using ANDs NOTs ORs by simp typedef Even = "{x::nat. x mod 2 = 0}" (* morphisms rep abs *) proof - have "(2::nat) mod 2 = 0" by simp hence "2\<in>{x::nat. x mod 2 = 0}" by simp thus ?thesis by meson qed term Rep_Even thm Rep_Even term Abs_Even thm Abs_Even_inject thm Abs_Even_inverse thm Rep_Even_inject thm Rep_Even_inverse (* lemma "(rep n) mod 2 = 0" using rep by auto lemma "rep (abs x) mod 2 = 0" using rep abs_inverse by auto lemma "m\<in>{x. x mod 2 = 0} \<Longrightarrow> n\<in>{x. x mod 2 = 0} \<Longrightarrow> abs m = abs n \<longleftrightarrow> m = n" using abs_inject by simp *) instantiation Even :: zero begin definition Zero_even_def: "0 = Abs_Even 0" instance .. end term "0::Even" instantiation Even :: one begin definition One_even_def: "1 = Abs_Even 2" instance .. end term "1::Even" definition SUC :: "Even \<Rightarrow> Even" where "SUC n \<equiv> Abs_Even (Suc (Suc (Rep_Even n)))" lemma "SUC 0 = 1" using SUC_def Zero_even_def One_even_def Rep_Even Abs_Even_inverse Abs_Even_inject by (metis (mono_tags, lifting) Suc_eq_plus1_left mem_Collect_eq mod_0 numeral_1_eq_Suc_0 numeral_One one_add_one) definition ADD :: "Even \<Rightarrow> Even \<Rightarrow> Even" where "ADD m n \<equiv> Abs_Even ((Rep_Even m) + (Rep_Even n))" lemma "ADD (SUC 0) (SUC 0) = SUC (SUC 0)" unfolding SUC_def ADD_def using Rep_Even Abs_Even_inverse by (metis (mono_tags, lifting) Suc_eq_plus1_left Zero_even_def add_Suc_shift mem_Collect_eq mod_0 mod_add_self1 numeral_1_eq_Suc_0 numeral_One one_add_one) typedef three = "{(True, True), (True, False), (False, True)}" by blast term Abs_three term Rep_three thm Abs_three_inject thm Abs_three_inverse definition "One = Abs_three (True, True)" definition "Two = Abs_three (True, False)" definition "Three = Abs_three (False, True)" lemma three_distinct: "One \<noteq> Two" "One \<noteq> Three" "Two \<noteq> Three" by (simp_all add: One_def Two_def Three_def Abs_three_inject) lemma three_cases: fixes x :: three obtains "x = One | x = Two | x = Three" by (cases x) (auto simp: One_def Two_def Three_def Abs_three_inject) end
\chapter{The emergence of American English: theories, descriptions, and models} \label{bkm:Ref523475305}\hypertarget{Toc63021204}{} This chapter lays the theoretical groundwork for the present study. Section \ref{bkm:Ref522870687} presents an overview and a discussion of theories of the emergence of new varieties of English and the way that the emergence of American English is described in these frameworks. This leads to the development of the essential argument of the study in \sectref{bkm:Ref522870698}, namely that the concept of \textit{enregisterment} provides an important perspective on the emergence of new varieties and should be incorporated in the theoretical modeling of the process as well as used to complement methodological approaches to studying it. A central aspect of the argument is the distinction between three types of varieties, namely structural varieties, perceptual varieties and discursive varieties, and in \sectref{bkm:Ref523897668}, I develop a model which illustrates the difference as well as the relationship between structural and discursive varieties based on the framework of enregisterment. Section \ref{bkm:Ref517077661} directs the focus onto the history of American English again by discussing how the development of the variety has been described in works without an underlying theory of the process of emergence and which role is assigned to the structural and the discursive level in these descriptions. \section{Theories of the emergence of American English as a new variety of English} \label{bkm:Ref522870687}\hypertarget{Toc63021205}{} There are several works which have been written on the history of American English during the last 100 years. In general, according to \citet[250]{Schneider2007}, American English is the “best researched postcolonial variety of all”. However, \citet[250]{Schneider2007} also notes a lack of a “theoretically informed history of the language” and he addresses the need for such a history by describing the emergence of American English within the framework of his Dynamic Model. This study continues this line of research, which is why I focus on describing and discussing mainly three theories and models of the emergence of new varieties and the way that they describe this process in the case of American English. In \sectref{bkm:Ref524246106}, I compare and contrast \citegen{Trudgill2004} theory of new-dialect formation, \citegen{Schneider2007} Dynamic Model, which draws heavily on \citegen{Mufwene2001} theory of the “ecology” of language evolution, and Kretzschmar’s view of the emergence of new varieties within his theory of language as a complex system presented in \citet{Kretzschmar2014, Kretzschmar2015, Kretzschmar2015b}. Based on this overview, I will discuss two issues in more detail which are crucial to the debate: the definition of the term \textit{variety} (\sectref{bkm:Ref521000690}) and the role of social factors in the emergence of new varieties (\sectref{bkm:Ref521576818}). In \sectref{bkm:Ref527366308} I finally discuss important consequences of the theoretical debate which are the foundation for my subsequent argument that studying the emergence of new varieties of English needs a careful distinction between structure and discourse in order to be able to investigate the relationship between these two dimensions. \subsection{Overview of theories and models of the emergence of new varieties of English} \label{bkm:Ref524246106}\hypertarget{Toc63021206}{}\label{bkm:Ref525715631} \citegen{Trudgill2004} theory of new-dialect formation and \citegen{Schneider2007} Dynamic Model have in common that they both identify a set of stages or phases that underlie the emergence or formation of new dialects or varieties. Trudgill, who develops his theory mainly based on his study of the history of New Zealand English, argues that “in tabula rasa colonial situations, dialect mixture and new-dialect formation are not haphazard processes” (\citeyear[26]{Trudgill2004}) but a predictable development proceeding from stage 1, which he labels “rudimentary levelling and interdialectal development” to stage 2, labeled “variability and apparent levelling in new-dialect formation” and finally to stage 3 “determinism in new-dialect formation”. In the initial stage, adult speakers of different dialects come into contact in a new place. The communication between these speakers can lead to the leveling of minority and very localized variants because, as Trudgill suggests, speakers need to make themselves understood and the use of linguistic forms unknown to a majority of speakers can inhibit this aim. Additional reasons for this rudimentary leveling can also be that speakers accommodate to particularly salient forms or that they react to normative attitudes which speakers have brought with them from their home country (\citeyear[89-93]{Trudgill2004}). Trudgill regards this stage as the least important one because in his view, “adults are only capable of limited amounts of accommodation” and it is not them but children who are largely responsible for the formation of a new dialect (\citeyear[94]{Trudgill2004}). It is not accommodation but language acquisition which leads to the apparent leveling in stage 2. Children do not notice low frequency forms (below a threshold of roughly 10\%) and do not acquire them, thereby reducing the number of forms which are available to the next generation to a considerable extent. This second generation of children plays a crucial role in stage 3, because from an already reduced number of forms they now select the most common ones. The result is a “final, stable, relatively uniform outcome” in the form of “a stable, crystallized variety” (\citeyear[113]{Trudgill2004}). The final stage is completed by a process called \textit{focusing}, first described and labeled by \citet{LePage1985} and defined by Trudgill as “the process by which the new variety acquires norms and stability” (\citeyear[88]{Trudgill2004}). However, it is important that focusing only occurs when a stable set of forms has emerged through stages 2 and 3, which is why the process of new-dialect formation is essentially deterministic, the new dialect being “a \emph{statistical composite} of the dialect mixture” (\citeyear[123]{Trudgill2004}). Because the theory is mainly based on the case of New Zealand English, Trudgill does not describe or analyze the emergence of American English using his theoretical framework in any detail. He notes several times that it is harder to study the formation of American English than that of New Zealand English because the mixture processes underlying the formation of the variety took place such a long time ago \citep[2]{Trudgill2004}. In general, he argues that American English went through the same stages as all other colonial dialects, and he notes only one possible difference, namely that comprehensibility played perhaps a more important role in the rudimentary leveling of the first stage because the traditional dialects spoken by the settlers were more different from one another than those in colonies which were settled later. Although his study does not include American English in the analysis, it is therefore still relevant because his theoretical claims apply to American English as well and it is one of the goals of the present study to discuss their validity. The second model, \citegen{Schneider2007} Dynamic Model, also assumes that the emergence of new varieties of English is characterized by “a uniform underlying process [which] has been effective in all these [contact] situations and explains a wide range of parallel phenomena from one variety to another” \citep[4]{Schneider2007}. However, the phases which he identifies and the mechanisms operating in the process are in many respects different from Trudgill’s. \tabref{tab:2:1} summarizes the key parameters of the different phases and shows that Schneider does not view dialect formation as a deterministic process, but that linguistic effects constitute only one parameter in his model. They result from sociolinguistic conditions, which are a consequence of speakers’ identity constructions, which are in turn caused by the historical and political context.\footnote{\citet{Schneider2007} distinguishes between two speech communities in his model: the Settlers speech community (STL) and the Indigenous speech community (IDG).} Accordingly, \citet[30]{Schneider2007} speaks of a “monodirectional, causal relationship” operating between the parameters. While his model predicts that all post-colonial varieties go through all of these phases, he does not claim that he is able to predict the precise linguistic forms of the new repertoire. His model is not deterministic but explicitly dynamic. Consequently, Schneider does not use the term \textit{formation} to describe the emergence of new varieties but \textit{evolution}, and he aligns himself explicitly with theories of language evolution, particularly with \citegen{Mufwene2001} \textit{feature-pool model}. This model postulates that in a contact situation all linguistic features produced by the speakers are in competition (in a “pool”) and speakers select from this pool. Their choices are influenced by the “ecology” of the contact situation comprising linguistic as well as non-linguistic factors, such as the demographic and political situation and social factors, particularly identity constructions (which identity speakers want to express) and role alignments (which other speakers they want to align with). In Schneider’s Dynamic Model, “identity constructions and realignments, and their symbolic linguistic expression, are also at the heart of the process of the emergence of PCEs [Postcolonial Englishes]” \citep[28]{Schneider2007}. The mechanism that operates in this process is accommodation \citep{Giles1984}, but in contrast to Trudgill, who regards accommodation as an “automatic consequence of interaction” which is “not necessarily driven by social factors such as prestige or identity” \citep[28]{Trudgill2004}, Schneider emphasizes the social nature of the process: \begin{quote} Speakers who wish to signal a social bond between themselves will minimize existing linguistic differences as a direct reflection of social proximity: they will tend to pick up forms used by the communication partner to increase the set of shared features and to avoid forms which they realize are not used by their partner and might thus function as a linguistic separator. \citep[27]{Schneider2007} \end{quote} This difference is indicative of the fact that the role of social factors is a matter of considerable debate in theories of the emergence of new varieties and it will therefore be discussed in more detail in \sectref{bkm:Ref521576818}. \begin{sidewaystable} \scriptsize \begin{tabularx}{\textwidth}{L{2cm}L{3.5cm}L{3cm}L{4cm}L{4cm}} \lsptoprule Phase & History and politics & Identity construction & Sociolinguistics of contact/ use/ attitudes & Linguistic developments/ structural effects\\ \midrule 1: Foundation & STL: colonial expansion: trade, military outposts, missionary activities, emigration/ settlement IDG: occupation, loss/ sharing of territory, trade & STL: part of original nation IDG: indigenous & STL: cross-dialectal contact, limited exposure to local languages IDG: minority bilingualism (acquisition of English) & STL: koinéization; toponymic borrowing; incipient pidginization (in trade colonies)\\ 2: Exonormative stabilization & stable colonial status; English established as language of administration, law, (higher) education, … & STL: outpost of original nation, “British-plus-local” IDG: individually “local-plus British” & STL: acceptance of original norm; expanding contact IDG: spreading (elite) bilingualism & lexical borrowing (esp. fauna and flora, cultural terms); “-isms”; pidginization/creolization (in trade/plantation colonies)\\ 3: Nativization & weakening ties; often political independence but remaining cultural association & STL: permanent resident of British origin IDG: permanent resident of indigenous origin & widespread and regular contacts, accommodation IDG: common bilingualism, toward language shift, L1 speakers of local English STL: sociolinguistic cleavage between innovative speakers (adopting IDG forms) and conservative speakers (upholding external norm; “complaint tradition”) & heavy lexical borrowing; IDG: phonological innovations (“accent”, possibly due to transfer); structural nativization, spreading from IDG to STL: innovations at lexis – grammar interface (verb complementation, prepositional usage, constructions with certain words/word classes), lexical productivity (compounds, derivation, phrases, semantic shifts); code-mixing (as identity carrier)\\ 4: Endonormative stabilization & post-independence, self-dependence (possibly after “Event X”) & (member of) new nation, territory-based, increasingly pan-ethnic & acceptance of local norm (as identity carrier), positive attitude to it; (residual conservatism); literary creativity in new variety & stabilization of new variety, emphasis on homogeneity, codification: dictionary writing, grammatical description\\ 5: Differentiation & stable young nation, internal sociopolitical differentiation & group-specific (as part of overarching new national identity) & network construction (increasingly dense group-internal interactions) & dialect birth: group-specific (ethnic, regional social) varieties emerge (as L1 or L2)\\ \lspbottomrule \end{tabularx} \caption{ Developmental phases of Schneider’s Dynamic Model (from \citealt[56]{Schneider2007}) } \label{tab:2:1} \end{sidewaystable} With regard to the case of American English, Schneider considers it to be “an almost unique opportunity to observe the entire developmental cycle in hindsight” (\citeyear[251]{Schneider2007}) because it is the oldest and also the best researched variety of all postcolonial varieties. I provide a brief summary of his analysis here by focusing especially on those aspects which illustrate his central thesis that social factors, especially identity constructions, have linguistic effects. In the first phase (roughly from 1587 to 1670), evidence of identity constructions is scarce, but Schneider finds it very likely that the early settlers still perceived themselves as Englishmen \citep[258]{Schneider2007}. Consequently, it is the degree of mixture of speakers coming from different regions and speaking different dialects which has the most effect on linguistic developments. Regions in which the population mixture was highest (as in the case of the Quakers in Pennsylvania) exhibited the highest degree of koinéization, defined by \citet[35]{Schneider2007} as the “emergence of a relatively homogeneous “middle-of-the-road variety”” based on a process in which “speakers […] mutually adjust their pronunciation and lexical usage to facilitate understanding”. In regions with culturally and linguistically more homogeneous settler groups (particularly New England, the South and the Appalachian Mountains), less koinéization occurred. This explains the present-day situation, in which the most distinctive dialects are found in the South and in the East, while the mainstream American variety is located in the Midland, the West and the North \citep[261--262]{Schneider2007}. At the same time, \citet[262]{Schneider2007} argues that social similarities between the settler groups in New England and in tidewater Virginia, namely their middle-to-upper-class background and close ties to the home country, were responsible for the fact that southern and eastern dialects share a number of features, for example lack of rhoticity, yod-dropping and lexical forms like \emph{piazza} ‘veranda’. In the second phase (ca. 1670-1773), \citet[265]{Schneider2007} distinguishes two English-speaking groups with different identity constructions. One group was of higher social status and lived on the coast and the other group was of lower social status and lived in more inland regions. While the first group still firmly identified with England, the second one adopted an “English-plus” colonial identity, which was influenced by their more frequent contact with other cultural groups and their “frontier experience” \citep[265]{Schneider2007}. Additionally, Schneider argues that non-English speaking groups had a split identity because even though they wanted to adjust in America and leave problems in their home countries behind, they also wanted to retain their cultural and linguistic heritage (\citeyear[266]{Schneider2007}). And lastly, African groups were torn between forces to adjust and the desire to resist these forces and maintain their cultural identity. This combination of identity constructions resulted on the one hand in a stable exonormative orientation (as predicted by the Dynamic Model) among the high-status social group and on the other hand in a bilingualism or multilingualism among groups coming from non-English-speaking countries and in a variable sociolinguistic situation for African Americans who sometimes had extensive contacts with white speakers of English and sometimes primarily intra-ethnic contact without an opportunity to acquire English (\citeyear[266--269]{Schneider2007}). In terms of linguistic effects, Schneider (\citeyear[269--273]{Schneider2007}) finds a high degree of linguistic homogeneity and lexical borrowings as well as innovations (“Americanisms”). With regard to homogeneity, he notes that this is of course not to be seen in absolute terms, i.e. the complete absence of variability. It is rather the case that leveling processes took place as the result of mixing and koinéization, but “cultural and linguistic peculiarities” were retained as well, and English innovations were adopted as a result of the prevailing exonormative orientation. An important example is rhoticity: \begin{quote} The most interesting case in point is postvocalic /r/, a sound which was pronounced even in southern British English well into the eighteenth century and disappeared only then, as in modern RP. In other words, the r-lessness of New England and the South must have developed in America, modeling English linguistic fashion – a strong indication of the exonormative linguistic orientation of colonial America. (\citeyear[271]{Schneider2007}) \end{quote} The term \textit{Americanism}, coined by John Witherspoon in 1781 (see \citealt[272]{Schneider2007}), is another indicator of the exonormative orientation because the lexical items labeled as such were usually evaluated negatively because they did not conform to a British norm. With regard to African American English Schneider finds that there is no evidence of it in the colonial period, although he also notes that “there was room for the development and retention of ethnic speech markers, for the development of linguistic means to signal a non-white, and possibly subliminally counter-European, ethnolinguistic identity” (\citeyear[268]{Schneider2007}). The crucial phase in the emergence of American English is the third phase, the nativization phase, which \citet[273]{Schneider2007} dates from ca. 1773 to 1828/1848. According to him, the “birth of American English as a concept and as a variety falls into that period” (\citeyear[276]{Schneider2007}) – as an effect of the political independence which was accompanied by changing identity constructions. An American nationalism replaced the orientation towards England and Schneider makes it clear that the “close nexus between political events and linguistic developments (via identity rewritings), and the causal role of the former for the latter, are undisputed” (\citeyear[275]{Schneider2007}). He cites several authors who establish the claim for a language separate from Britain, including Noah Webster’s famous words: “as an independent nation, our honor requires us to have a system of our own, in language as well as in government” (\citealt[20]{Webster1789}, quoted in \citealt[277]{Schneider2007}). There were also voices resisting the call for linguistic independence by continuing to favor British norms (forming a “complaint tradition”) but Schneider argues that “[i]n quantitative terms […] nativization made the balance tip toward the former position” (\citeyear[277]{Schneider2007}). Schneider postulates a clear relationship between the positive evaluation of linguistic difference and actual structural developments: \begin{quote} [T]he period of structural nativization was the one during which effects inhibiting divergence disappeared and, in contrast, linguistic differences became actively promoted or at least positively evaluated. Westward expansion, then, brought an increasingly appreciative attitude toward down-to-earth speechways and hence strengthened another powerful factor promoting linguistic nativization. While of course British and American English still have a lot in common and linguistic continuity has also been important, differences between the two major varieties of English kept increasing. (\citeyear[278]{Schneider2007}) \end{quote} As evidence for these increasing differences he gives examples of lexical borrowings and innovations, stressing the creativity of word-formation processes characteristic of this phase, which found expression especially in many conversions (\citeyear[279]{Schneider2007}). On the level of grammar, he finds an “almost endless” number of innovations at the lexico-grammatical interface, e.g. \emph{different than} (vs. \emph{from, to}) (\citeyear[280]{Schneider2007}). What is also important in this phase is spelling because several differences to British English did not emerge from use (like differences on other levels) but through deliberate language planning and have acquired a symbolic significance in public discourse (e.g. <-or>/<-our>, <-ize>/<-ise>) (\citeyear[281]{Schneider2007}). The phonological level is the only level on which Schneider finds that hardly any evidence is available and if it is, it is difficult to interpret. Schneider consequently does not give any examples on how structural nativization proceeds phonologically (\citeyear[279]{Schneider2007}). He does, however, provide more information on how he conceptualizes the process by arguing that \begin{quote} differences between varieties of English, British and American in the present case, not only consist of the ones frequently observed, documented and listed, but they encompass an infinitely larger set of habits and constructions which are hardly ever explicitly noted, most of which are associated with particular lexical items. […] This suggests that structural nativization operates inconspicuously but highly effectively, affecting frequencies and co-occurrence tendencies of individual words and constructions more than anything else. (\citeyear[282]{Schneider2007}) \end{quote} This shows that he regards actual structural nativization as proceeding below the level of awareness, but it also raises the question of the exact nature of the relationship between consciously expressed attitudes and evaluations and unconsciously proceeding structural changes, a question that remains open in Schneider’s account. The fourth phase is dated from 1828/1848 to 1898 and, in line with the Dynamic Model, it is characterized by an endonormative orientation based on “a new type of national self-dependence and a national pride based on local, American, achievements” (\citeyear[283]{Schneider2007}). In this phase, the entire continent was settled and controlled by Americans, and this “achievement”, at the cost of Native Americans who were killed and forced away from their lands, was not only a source of national pride but also led to “a second heightened phase of koinéization” (\citeyear[290]{Schneider2007}) resulting in a high degree of uniformity. The uniformity was strengthened by the codification of American English in Webster’s \textit{An American Dictionary of the English Language} (\citeyear{Webster1828}) and Bartlett’s \textit{Dictionary of Americanisms, a Glossary of Words and Phrases usually regarded as peculiar to the United States} (\citeyear{Bartlett1848}). At the same time, regional and social variability obviously continued. \citet[289]{Schneider2007} observes that many literary works which became part of a distinctive American literary canon employed representations of regional and social dialects, but he does not explore the relationship between the literary interest in linguistic variation at a time characterized by the codification of a uniform American variety further. This is something that I will do in the present study, which will show the relevance of this observation to the emergence of American English. Another relevant point that Schneider makes is that the development does not always proceed continually in one direction, but that there can be breaks or even returns to an older phase, as evidenced by “a purist, pro-English movement, which […] gained momentum after the Civil War and in the 1870s and 1880s, after endonormative stabilization” (\citeyear[288]{Schneider2007}). So exonormative orientations, based on different identity constructions, did not cease to exist, even in a phase of endonormative stabilization, and this study will also shed some light on the interplay between these two types of orientations in the nineteenth century and also on the ways that they interact with regional and social variability as well as with national uniformity. \largerpage The starting point of the fifth phase, which continues until the present day, is 1898. Schneider regards the Spanish-American War in 1898 as a turning point because it was the first war fought by the whole unified nation against another power, which strengthened feelings of national unity which were then a prerequisite for cultural fragmentation under the umbrella of the nation. “American society is being transformed into a multicultural mosaic, and this process is mirrored by the emergence of distinct varieties of English, each associated with different identities” (\citeyear[294]{Schneider2007}). A very important aspect is that Schneider does not postulate that dialect diversity occurred only in the twentieth century, but he finds that it has always existed. In contrast to earlier centuries, however, he finds that the twentieth century is marked by a “\emph{socially indicative} dialect diversity, an ethnic and regional fragmentation of the population along linguistic lines in perception and production” [emphasis mine] (\citeyear[296]{Schneider2007}). The crucial difference is therefore that diversity comes to index social identities: \begin{quote} [T]his diversification happened because the various regional, social, and ethnic groups recognized the importance of carving out and signaling their own distinct identities against other groups and also against an overarching nation which, while it is good to be part of, is too big and too distant to be comforting and to offer the proximity and solidarity which humans require. (\citeyear[296]{Schneider2007}) \end{quote} This had an impact on linguistic structure in that dialect differences became more pronounced and, in Schneider’s words, more “strictly compartmentalized” (\citeyear[296]{Schneider2007}). As for the nativization period, he stresses that the process operated subconsciously and that it was not the result of an intentional act. As evidence for the developments in the fifth phase he cites several sociolinguistic studies, e.g. Labov’s investigation of Martha’s Vineyard (\citealt{Labov1972} [1963]) and Wolfram \& Schilling-Estes's (\citeyear{Wolfram1996,Wolfram1997}) study of the island Ocracoke, which illustrate how people on the islands used traditional regional variants to symbolize and demarcate their island identity against outsiders and to ensure that they are not absorbed in the mainland group identity. Next to these endangered local varieties he also describes the development of further varieties and their connection to identity constructions and realignments: Southern English, a Northern English marked by the Northern Cities Shift, ethnic speech forms in the European-American groups, Native American English(es), African American English, Chicano English and other Hispanic varieties, Cajun English, Hawaiian Creole and Asian Englishes. To give an example, he cites \citegen{Tillery2003} research which finds that there were two periods of great social change in the South, the first after the Civil War and the following Reconstruction period (marked among other things by immigration of northern Americans) and the second around World War II (marked especially by urbanization), and in both cases these social developments were accompanied by significant linguistic changes. For example, after World War II “the linguistic expression of a new, modern Southern identity was shaped, [which] affected both “Traditional” and “New” Southern features” (\citeyear[299]{Schneider2007}). So non-rhoticity and yod-retention were for example features which were associated with traditional, rural, antebellum culture, whereas rhoticity and yod-dropping have come to symbolize the “New South” (\citeyear[299]{Schneider2007}). All in all, \citegen{Schneider2007} Dynamic Model stands in stark contrast to \citegen{Trudgill2004} model of new-dialect formation. Trudgill’s stages are very much focused on linguistic developments while Schneider’s developmental phases emphasize the close connection between the historical situation, social factors and linguistic developments. The type of evidence that both linguists draw on reflects this difference. Trudgill relies largely on data from the \emph{Origins of New Zealand English} (ONZE) project and on dialectological research on nineteenth-century British English dialects. While Trudgill therefore keeps a strong focus on linguistic data, Schneider cites an abundance of research not only on linguistic developments in seventeen postcolonial varieties of English but also on social developments in order to support his model. With regard to linguistic effects, his case studies provide many examples, but they do not offer as systematic and detailed an overview as Trudgill’s analysis of New Zealand English. Instead, Schneider emphasizes the common characteristics of sociolinguistic developments, a dimension that is almost completely neglected in Trudgill’s model. These contrary views illustrate the need for more research on the emergence of new varieties of English. A third theory has been developed by \citet{Kretzschmar2014} as part of his theory of speech as a complex system. In contrast to \citet{Trudgill2004} and \citet{Schneider2007}, he does not postulate the existence of separate phases that lead to the emergence of a new variety because this view is not compatible with his theory of language. A complex system is “a system in which large networks of components with no central control and simple rules of operation give rise to complex collective behavior, sophisticated information processing, and adaptation via learning or evolution” (\citealt[143]{Kretzschmar2014}, citing \citealt[13]{Mitchell2009}). Applied to speech, these components are linguistic forms, variants realizing a variable, and what is “truly stable and systematic about speech” \citep[151]{Kretzschmar2014} is that the token frequency distribution of these forms is nonlinear, leading to a typical A-curve when graphed on a chart \citep[147]{Kretzschmar2014}. This nonlinear distribution occurs at different levels of scale, for example at the level of an individual, a community, a larger region or a nation, and variants which occur at the top at one level of scale can occur in the tail of the curve at another level of scale. Consequently, linguists who identify a variety traditionally do so by identifying the top-ranked variants which occur on a specific level of scale, for example at the level of nation, so that American English, for example, is considered to consist of the variants which occur at the top frequency ranks at the national scale. \citet[151]{Kretzschmar2014}, however, considers it a mistake to focus only on these top-ranked variants in linguistic analysis because the low-frequency variants are in fact highly relevant in language change and in the emergence of new varieties as well. Change occurs because speakers interact and these interactions can result in changes in the frequency distribution of variants. This in turn leads to new variants ranked at the top of the frequency curve, which can then be described as a new variety at some level of scale. \citet[151]{Kretzschmar2014} supports Schneider’s view that the emergence of varieties is \emph{not} a deterministic process: \begin{quote} Random interactions between speakers may eventually promote variants at lower frequencies to the top rank, and vice versa, so there is no fixed relationship between input frequencies (say, from settler, indigenous, or adstrate languages) and what will become most common in a new variety. \end{quote} He argues that not only language is involved in interactions but human perceptions as well and human perception is not restricted to what is most frequent but also encompasses reactions to the use of variants. In this context, Kretzschmar uses the concept of \textit{positive feedback}, which he regards as equivalent to \citegen{LePage1985} concept of \textit{focusing}: \begin{quote} The idea of feedback recognizes that the information content in speech is not just the functional message of some utterance or piece of writing, but also an evaluation of who says what when. \citep[151]{Kretzschmar2014} \end{quote} Judging by the reactions of other speakers to the use of linguistic forms, speakers evaluate the success of their use and adapt accordingly. Kretzschmar considers this a better view of Schneider’s and Mufwene’s concept of a \textit{selection process} because, in his view, variants are not selected (and rarely lost). Instead, they simply become more or less frequent through “massive numbers of random interactions between speakers” and “the perceptions of the human agents using language” (\citeyear[152]{Kretzschmar2014}). Unlike Trudgill, who uses \textit{focusing} to explain the stabilization of the forms of the new variety (basically the last step in the process), Kretzschmar assigns a much more central role to focusing because in his view it explains why the process is not deterministic (i.e. the predictable result of input frequencies). It is noticeable, however, that he does not elaborate on how exactly focusing proceeds and which mechanisms are operative in the process of giving and receiving feedback and evaluating the success of linguistic forms. However, he points out that his model and Schneider’s Dynamic Model complement each other precisely because in his view Schneider describes “the evolution of the new society’s perceptions” and not “the internal linguistic history of a new variety” (\citeyear[157]{Kretzschmar2014}). By relegating Schneider’s “new varieties” to the level of perception of usage (to be distinguished from actual usage), he questions the traditional definition of \textit{variety}, of the object whose emergence is controversially modeled, which is why in \sectref{bkm:Ref521000690} I will take a closer look at the understanding of the term in linguistics in general and in the models described in this section in particular. \citet{Kretzschmar2015} describes the implications of his model for an account of the emergence of American English. For the early phases of settlement, he describes the linguistic situation as a “pool of linguistic features collected from a radically mixed settlement population” (\citeyear[251]{Kretzschmar2015}), a conceptualization that is in line with Mufwene’s feature pool but not with historical accounts on American English such as \citegen{Fischer1989} \emph{Albion Seeds}, which describes the culture and language of the different regions of settlement as having been transplanted from the respective regions in Britain and remaining fairly homogeneous. In this situation, order emerged in the form of the nonlinear distributions characteristic of complex systems. And owing to the scaling property of complex systems, these distributions occurred at different levels of scale, so that Kretzschmar speaks of American English already in this early phase: \begin{quote} Right from the beginning somewhat different sets of variants emerged as top-ranked elements in different localities. Also right from the beginning, a particular set of variants emerged at the highest level of scale, American English. (\citeyear[257]{Kretzschmar2015}) \end{quote} So in contrast to \citet{Schneider2007}, who considers American English to nativize in the late eighteenth and early nineteenth century, Kretzschmar suggests that an American variety, conceptualized as a set of top-ranked elements at the highest level of scale, has already been present from the seventeenth century onwards. He views the nativization that Schneider describes as located on the level of perception (people started to notice and describe differences between American and British English) and argues along the same lines that eighteenth-century comments on uniformity are equally a result of perception and not to be taken as evidence for an actual colonial koiné \citep[258--259]{Kretzschmar2015}. While \citet[270]{Schneider2007} is also critical of strong versions of the koinéization hypothesis (he cites \citealt{Dillard1975} as the strongest one), he nevertheless assumes a “remarkable degree of linguistic homogeneity in the colonies” in phase 2, which is in contrast to the diversification of phase 5. Kretzschmar also disagrees here by stating that diversification also occurred from the seventeenth century onwards, when “noticeable differences, both between American regions and between American and British English” (\citeyear[257]{Kretzschmar2015}) emerged. In order to support this view, he gives examples of linguistic forms which were present then and are still associated with specific regions, e.g. non-rhoticity, lexical items like \emph{chunks} and \emph{tote} and grammatical forms like \emph{hadn’t ought}. The diversification just became \emph{more} noticeable as time progressed (\citeyear[259]{Kretzschmar2015}). Even though Kretzschmar does not postulate the existence of developmental phases in general, he notes a major difference between the eighteenth century and the nineteenth century in the case of American English. While the great population mixture in the early settlements and in the eighteenth century led to the creation of new and independent patterns (\citeyear[259]{Kretzschmar2015}), the nineteenth century was rather marked by an extension of these complex systems from the east to the west as a result of westward migration. Groups of settlers who already lived in western parts of the country, especially Scotch-Irish settlers in the Appalachian mountain regions, could contribute to these complex systems, and Kretzschmar cites Montgomery's (\citeyear{Montgomery1989, Montgomery1991, Montgomery1997}) findings that indeed some Scotch-Irish variants have been retained in these areas, but overall, he finds that Atlas data shows that “major patterns created by historical east-west settlement largely persist” (\citeyear[261]{Kretzschmar2015}). Nevertheless, change always occurs in complex systems, but Kretzschmar regards it as located rather at lower levels of scale (neighborhoods and cities) and not at larger regional levels of scale (\citeyear[261]{Kretzschmar2015}). This overview of three models of the emergence of new varieties illustrates that linguists are far from achieving a consensus of opinion on the common, underlying operations and mechanisms behind this process. I argue here that some differences between the models are a result of different conceptualizations and definitions of the term \textit{variety} and that it is important to clarify what the term refers to in order to be able to analyze the emergence of new varieties and discuss and ultimately test the models proposed by \citet{Trudgill2004}, \citet{Schneider2007} and \citet{Kretzschmar2014, Kretzschmar2015}. The analysis of the meaning(s) of the term \textit{variety} is therefore going to be the subject of \sectref{bkm:Ref521000690}, before I will pay detailed attention to the role of social factors in \sectref{bkm:Ref521576818} and conclude with a final discussion of the models in \sectref{bkm:Ref527389617}. \subsection{Definition of the term \textit{variety}} \label{bkm:Ref521000690}\hypertarget{Toc63021207}{} A first step in any discussion on how new varieties emerge must be to define what precisely the term \textit{variety} refers to and how a “new” variety can be distinguished from an “old” one. At first, it needs to be noted that the different theories and models foreground different terms. \citet{Trudgill2004} speaks predominantly of \textit{dialects} and particularly of \textit{colonial dialects}, while \citet{Schneider2007} prefers the term \textit{variety} and refers to the varieties that emerge in the different places as \textit{postcolonial varieties} or \textit{Postcolonial Englishes}.\footnote{It is important to note that each author uses \emph{both} terms, \textit{dialect} and \textit{variety}, in their books but foreground different terms by choosing them as the main term for the newly formed or emerging entities.} It is often indicated that \textit{dialect} and \textit{variety} are used more or less synonymously, with \textit{variety} being a newer and more neutral term. \citet[32]{Meyerhoff2011}, for example, defines variety as a “[r]elatively neutral term used to refer to languages and dialects” which “[a]voids the problem of drawing a distinction between the two, and avoids negative attitudes often attached to the term \emph{dialect}”. \citet[5]{Chambers1998} make a slight distinction between the two terms. They write in their introduction to dialectology that they “shall use ‘variety’ as a neutral term to apply to any particular kind of language which we wish, \emph{for some purpose}, to consider as a single entity” [emphasis mine], while they regard \textit{dialect} as a more particular term which refers to “varieties which are grammatically (and perhaps lexically) as well as phonologically different from other varieties”. It can be inferred from this distinction that \citet{Trudgill2004} foregrounds the term \textit{dialect} in his theory because the main criterion he uses to distinguish a new variety or dialect from an old one is the criterion of structural distinctiveness. He argues that in order to explain the formation of a new dialect, one has to “first decide what the distinctive characteristics of New Zealand English are” \citep[31]{Trudgill2004}. The distinctiveness is established in relation to British dialects, the starting point being a mixture of dialects spoken by the parents of the first New-Zealand-born Anglophones, and the end-point the dialect spoken by the second generation of Anglophone children in New Zealand. Differences which emerged afterwards (since 1890) are not included in the analysis because they are the result of changes occurring only after New Zealand English had been formed \citep[32]{Trudgill2004}. So in order to distinguish and define a “new” variety of New Zealand English against “old” varieties of British English, he looks for evidence of a new set of linguistic features, distinct from other older sets of features, based on two main sources: studies on the history of the English language, especially dialectological research on nineteenth- and twentieth-century English, and the ONZE project, comprising recordings of New Zealanders made by the National Broadcasting operation of New Zealand between 1946 and 1948, which provide insights into the second stage of the formation process because the informants represent the first generation of children born in New Zealand \citep[33]{Trudgill2004}. The linguistic features he analyzes are almost exclusively phonological and located on the segmental level, and the analysis rests heavily on frequencies of use. What distinguishes stage 3, the stage at which he postulates the existence of a distinct New Zealand variety, from stage 2 is that the variety is now characterized by uniformity and stability \citep[113]{Trudgill2004}. Uniformity is achieved because majority variants have survived and minority variants have disappeared, and stability is achieved through focusing. It appears therefore that while the primary criterion in defining the new variety is the structural distinctiveness of a uniform set of features, stability, achieved through focusing, is at least a secondary criterion, which is applicable only after a new set of features has formed. It is noticeable, however, that \citet{Trudgill2004} does not analyze the focusing process in his study on New Zealand English, which again highlights the relative unimportance of the stability criterion. \citet{Schneider2007} also focuses on the criterion of structural distinctiveness in his model. He speaks of “the birth and growth of structurally distinctive PCEs” \citep[45]{Schneider2007} and describes a PCE as “a new language variety which is recognizably distinct in certain respects from the language form that was transported originally, and which has stabilized linguistically to a considerable extent” \citep[51]{Schneider2007}. In the phase of structural nativization, the degree of difference to the former input varieties increases the most. He states that “this stage results in the heaviest effects on the restructuring of the English language itself; it is at the heart of the birth of a new formally distinct PCE” \citep[44]{Schneider2007}. S-curves, which typically characterize linguistic changes, have a phase of rapid increase of a variant in the middle of the development over time and this is where Schneider locates the phase of structural nativization. In line with \citet{Trudgill2004}, \citet[51]{Schneider2007} regards stability as a second characteristic of a variety, and he argues that it is achieved during a phase which can be seen as corresponding to the later part of the S-curve. However, in stark contrast to \citet{Trudgill2004}, speakers’ perceptions play a role in his conceptualization of a variety as well. He writes that “regional speech differences emerge, stabilize, and become recognizable in the public mind” \citep[9]{Schneider2007}, which shows that in addition to structural difference and stability, public recognition is also a factor to be dealt with in determining what a variety is and how it emerges. In the case study on American English, he finds that the “birth of American English as a concept and as a variety falls into that period [the period between ca. 1773-1828/1848]” \citep[276]{Schneider2007}. In this statement he explicitly distinguishes between the \emph{concept} of a variety and the variety itself, but he does not elaborate on this distinction any further, which is problematic because he assumes a direct relationship between the development of structural differences and people’s perceptions and attitudes: \begin{quote} [T]he period of structural nativization was the one during which effects inhibiting divergence disappeared and, in contrast, linguistic differences became actively promoted or at least positively evaluated. Westward expansion, then, brought an increasingly appreciative attitude toward down-to-earth speechways, and hence strengthened another powerful factor promoting linguistic nativization. \citep[278]{Schneider2007} \end{quote} So in \citegen{Schneider2007} model, speakers’ perceptions and attitudes, which can be seen as formative of the \emph{concept} of a new variety, did not only play a role in the phase of endonormative stabilization, but they are also supposed to influence structural nativization, that is the emergence of a \emph{structural} variety. \citet[94]{Schneider2007} argues that people usually focus on a few salient distinctive forms when they perceive and evaluate a variety, but that “those properties of a variety which seem specifically distinctive [are] quantitative tendencies of word co-occurrences, recurrent patterns, speech habits, prefabricated phraseology”. Based on this, a conceptual variety can be defined as consisting of a small set of salient linguistic features which are recognized by speakers as distinct and which attract some sort of evaluation. A structural variety can be defined as consisting of a large set of features which make the variety structurally different from another set of features. Nevertheless, Schneider’s idea of what a variety is, and how the concept of a variety and the structure of a variety are related, remains rather vague. \citet{Kretzschmar2014, Kretzschmar2015, Kretzschmar2015b}, on the other hand, discusses the term \textit{variety} in more detail and he is fairly critical of traditional understandings of the term. As pointed out in \sectref{bkm:Ref525715631}, he relegates \citegen{Schneider2007} Dynamic Model to the level of perception only and argues that this is the case for most descriptions of varieties: \begin{quote} What actually makes “new varieties” of English or of any other language, in the sense that we usually mean in linguistics, is that linguists from time to time choose to record their perceptions of the usage of some population of speakers. \citep[157]{Kretzschmar2014} \end{quote} These perceptions include only the top-ranked variants of the complex system of speech at a specific level of scale which makes the varieties described by linguists “idealized abstractions” \citep[156]{Kretzschmar2014} which linguists and also lay people are always interested in, but which should not be confused with linguistic reality. According to \citet{Kretzschmar2014}, this reality should best be understood as a complex system: \begin{quote} New varieties are not just something to be associated with former colonies; they are emerging all around us every day, as speakers of English form new groups in local neighborhoods, communities of practice, social settings, and new places around the globe in many places besides colonial settings. It is not a process that happens once and is done. The complex system of speech continues to operate, and new order emerges from it all the time. \citep[157]{Kretzschmar2014} \end{quote} He does not address \citegen{Schneider2007} distinction between structural variety and conceptual variety, but claims that Schneider views varieties only as “identity-driven discourse constructs” (\citealt[51]{Schneider2007}, cited in \citealt[157]{Kretzschmar2014}), a claim which is clearly not justified given that the nativization phase in the Dynamic Model rests crucially on structural differentiation.\footnote{In fact, \citet[51]{Schneider2007} writes that the homogeneity that is often emphasized in descriptions of new varieties is an “identity-driven discourse construct” and not the variety itself. Still, it makes sense to ask to what extent the conceptual variety in \citegen{Schneider2007} Dynamic Model can be defined as a discourse construct and which role identity plays in the process of its emergence. I will address these questions in more detail in \sectref{bkm:Ref522870698} and \sectref{bkm:Ref523897668}.} In an earlier article, \citet{Kretzschmar2012} make a distinction similar to the one between structure and concept. They argue that \emph{natural} language varieties need to be distinguished from \emph{ideational} language varieties. They discuss the case of Standard American English and argue that it is an \textit{idea} which does not have an empirical basis in language use. Instead, it is “just another manifestation of a particular culture” \citep[156]{Kretzschmar2012}. By calling it an “idea”, they relegate Standard American English to a level that corresponds to Schneider’s conceptual level, so that, in the end, we can derive a threefold distinction between a) the linguistic reality, which is a complex system, b) the perceptual variety, which is based on the linguists’ perceptions of the top-ranked variants on a specific level of scale (e.g. individual, local or national) and c) the conceptual/ideational variety, which is based on people’s idea about language. With regard to the ideational variety, it is important to note that even though it does not directly reflect language in use, it is “not a myth” but “a very real […] construct” for its speakers \citep[143]{Kretzschmar2012}. Kretzschmar is not the only one who critically discusses the notion of \textit{variety} in general and in the context of new variety formation in particular. \citet{Leimgruber2013b} also calls for “rethinking the concept of ‘geographical varieties’ of English”. He argues that the starting point for defining a variety, especially in the context of World Englishes, is usually a geographical and political but \emph{not} a linguistic concept: \begin{quote} It may seem impractical to completely do away with such a useful concept as the variety, which has for so long been the basic unit of analysis in many fields of linguistics, including World Englishes. It remains, however, that the concept is often under-defined in works setting out to describe such varieties – terms like ‘Singapore English’, ‘Malaysian English’, ‘Welsh English’, etc., are taken for granted because, after all, they contain a geographical component everyone can relate to. The actual linguistic form of the ‘variety’ is then described post hoc, with the analytical unit ‘variety’ conditioning the analysis. \citep[6]{Leimgruber2013b} \end{quote} This discussion shows that a clearer definition of what kinds of varieties there are, how they are related and how they can be identified and described is necessary. A recent contribution by \citet{Pickl2016} is helpful in this regard. He draws on the terms \textit{emic} and \textit{etic} to distinguish different types of varieties. Emic dialects are “cognitive concepts of the speakers whose speech is at the same time the object of linguistic investigation” \citep[78]{Pickl2016} and etic dialects are based on objective linguistic analyses of speech productions. Regarding the relationship between emic and etic dialects he states that \begin{quote} There is no reason why the fundamental linguistic concept of a dialect variety should differ from the folk linguistic concept. In other words, scholarly or etic ideas about geolinguistic entities can and should have the same principal structure as lay persons’ implicit ideas about dialects in space while being based on transparent – and, as far as possible, objective – criteria that are not derived from the speakers’ ideas, but from scientific reasoning. \citep[78]{Pickl2016} \end{quote} The shared “principal structure” is a prototypical one, with the important characteristic of fuzzy category boundaries and linguistic features which have different degrees of typicality. The distinction between emically and etically defined categories is analogous to \citegen{Schneider2007} distinction between a conceptual and a structural variety, but the similarities and differences are elaborated on in more detail. With regard to the emic category, \citet{Pickl2016} refers to perceptual dialectology as a research area, while his own study focuses on the etic category. He defines it more precisely by using a definition by \citet{Berruto2010}: \begin{quote} The tendential co-occurrence of variants gives rise to linguistic varieties. Therefore, a linguistic variety is conceivable as a set of co-occurring variants; it is identified simultaneously by both such a co-occurrence of variants, from the linguistic viewpoint, and the co-occurrence of these variants with extralinguistic, social features, from the external, societal viewpoint. (\citealt[229]{Berruto2010}, cited in \citealt[79]{Pickl2016}) \end{quote} In order to identify sets of co-occurring features, Pickl argues for using statistical methods, more specifically factor analysis, which he regards as superior to other statistical methods like cluster analysis, bipartite spectral graph partitioning and multidimensional scaling (\citeyear[80--83]{Pickl2016}). He supports this view by conducting an analysis on dialect areas in Bavarian Swabia based on data from the dialect atlas \emph{Sprachatlas von Bayerisch-Schwaben} (SBS, \citealt{Konig19962009}). The variability in the region is reduced to 16 factors which account for 62.21\% of the variance in the data. Each factor stands for a recurring pattern of linguistic variants which is particularly strong in a geographical region (and within the region, the locations exhibit different degrees of typicality for the pattern) so that in the end, a combined factor map can be constructed which shows the prototypically structured dialect areas in geographical space. The point is that he finds a way for identifying dialect areas based on a statistical analysis of the data only and without recourse to either lay people’s or linguists’ subjective judgements. Attempts in this direction have been numerous (they belong to the field of dialectometry), but they did not draw on prototype theory or were skeptical of the existence of dialect areas in general (e.g. \citealt{Kretzschmar1996}). Pickl’s suggestion therefore combines the insight that emically and etically defined varieties share a prototypical structure with a suggestion for describing the structure of etic varieties by means of objective criteria, thereby minimizing the influence of ideas on the outcome of the analysis. \citegen{Pickl2016} approach counters \citegen{Kretzschmar2014} criticism that the varieties perceived and described by linguists are restricted to top-ranked variants and that all those variants which occur at lower frequencies are ignored and considered irrelevant because he aims at identifying abstract varieties in a way that takes the complexity of speech more strongly into account than other methods. For example, \citet[81]{Pickl2016} states that “it is […] impossible for a cluster analysis to come up with anything more subtle than global, exclusively dominant areas; subordinate, non-dominant areas that are determined by smaller numbers of features cannot be identified by cluster analysis”, which is why he proposes factor analysis as a statistical method instead. As his goal is to identify dialect layers overlapping in space, each layer consisting of “congruent distribution areas of co-occurring linguistic forms” \citep[79]{Pickl2016}, he tries to achieve an abstraction that is closer to the linguistic reality than traditional analyses which identified discrete dialect areas based on the presence or absence of distinctive linguistic features. \citegen{Pickl2016} focus on geographical varieties leaves open the question of how an etic variety can be described which is not only based on regional distributions of data, but also on social ones. It is conceivable, however, that his approach can also identify social varieties if sufficient data are available. \citet[6]{Leimgruber2013b} also points out the need for more data to carry out quantitative studies which complement qualitative analyses conducted by sociolinguists like \citet{Blommaert2010}, who do not consider it important anymore to identify and describe varieties at all but are primarily interested in describing and explaining how speakers draw on linguistic resources in their social interaction with others (see \sectref{bkm:Ref506884048} for details). In general, \citegen{Pickl2016} approach to defining and identifying structural varieties takes the criterion of structural distinctiveness more seriously than other approaches. To summarize, it seems that linguists distinguish several types of varieties. Based on the discussion above, I propose a distinction between the linguistic reality, conceptualized as a complex system, and three abstract types of varieties: structural varieties, perceptual varieties and discursive varieties, as illustrated in \figref{fig:2:1}. \begin{figure} \includegraphics[width=0.8\textwidth]{figures/Paulsen-img01.pdf} \caption{Types of varieties described by linguists} \label{fig:2:1} \end{figure} While the linguistic “reality”, i.e. the total number of forms used by speakers in particular regional, social and situational contexts, is a complex system as described by \citet{ Kretzschmar2014, Kretzschmar2015, Kretzschmar2015b}, it is possible to identify and describe more abstract patterns of co-occurring forms. Structural varieties are tendential co-occurrences of variants in a particular place (and potentially also in a particular social situation or in relation to social factors). They are determined based on production data, collected and analyzed by the linguist using statistical measures and \citegen{Pickl2016} prototype approach is a very convincing one because it allows for fuzzy category boundaries and does not just identify one dominant pattern but also its overlaps with other non-dominant patterns. Perceptual varieties are tendential co-occurrences of forms in lay people’s perceptions of language variation. They are determined based on perception data, again collected and analyzed by the linguist. Here, perception in a cognitive sense (what is perceivable by the senses) and in a conceptual sense (what is perceivable because people have a cultural concept of the pattern in mind) are both included and it is convincing to assume a prototypical structure of these perceptual categories as well. Discursive varieties take the ideational character of varieties into account, which is for example discussed in \citet{Kretzschmar2012}. They are tendential co-occurrences of forms in speakers’ discursive constructions of patterns of variation and it is the aim of \sectref{bkm:Ref522870698} and \sectref{bkm:Ref523897668} to show how the concept of \textit{enregisterment} is useful in defining and investigating discursive varieties. It is important to note that the different types of varieties overlap. In determining structural varieties linguists might be influenced by their perceptions already in the process of collecting data, so it is questionable whether a purely fact-driven abstraction of co-occurrence patterns is even possible.\footnote{\citet[6]{Leimgruber2013b} for example criticizes that the collection of corpora for studying World Englishes take a “conceptual linguistic system tied to a particular locale” as a starting point.} At the same time, perception is of course related to structure in that highly frequent variants could be more easily perceivable than low-frequency ones. It is, however, more than doubtful that frequency of use is the only factor influencing perception. If variants are often the focus of \emph{discourses} on language, for example, they are more likely to be perceived by speakers even if they are \emph{used} infrequently. And lastly, perception influences the construction of discursive varieties because speakers are more likely to engage discursively with variants that they perceive. However, it is equally possible that linguistic forms remain part of discursive varieties even though they are neither produced nor cognitively perceived anymore. Against the background of this distinction I compare and discuss the role of social factors, particularly of identity, in the emergence of new varieties of English in the next section. \subsection{The role of social factors in the emergence of new varieties} \label{bkm:Ref521576818}\hypertarget{Toc63021208}{} The roles attributed to social factors in the emergence of new varieties by \citet{Trudgill2004} and \citet{Schneider2007} could not be more different. The following citations illustrate this well. \citet[95]{Schneider2007} writes that \begin{quote} The Dynamic Model, supported by accommodation and identity theory, predicts that via language attitudes a speaker’s social identity alignment will determine his or her language behavior in detail. Note that there is no implication made here that these developments have anything to do with consciousness: accommodation works irrespective of whether the feature selected and strengthened to signal one’s alignment is a salient marker of which a speaker is explicitly aware or an indicator which operates indirectly and subconsciously. \end{quote} \citet{Trudgill2004}, on the other hand, is skeptical of social factors influencing the formation of the new variety. Especially identity, which assumes a central role in the Dynamic Model, is ruled out as a relevant factor: \begin{quote} And it is clear that identity factors cannot lead to the development of new linguistic features. It would be ludicrous to suggest that New Zealand English speakers deliberately developed, say, closer front vowels in order to symbolise some kind of local or national New Zealand identity. This is, of course, not necessarily the same thing as saying that, once new linguistic features have developed, they cannot become emblematic, although it is as well to be sceptical about the extent to which this sort of phenomenon does actually occur also. For example, we can say that the twentieth-century innovation in New Zealand English, whereby the \textsc{kit} vowel became centralised might perhaps now constitute a symbol of New Zealand identity and that the vowel might for that reason in future even become more centralised. But I have to say that I would, personally, find even this unconvincing. Why do New Zealanders need to symbolise their identity as New Zealanders when most of them spend most of their time, as is entirely normal, talking to other New Zealanders? But in any case, we most certainly cannot argue that New Zealanders deliberately centralized this vowel \emph{in order to} develop an identity marker. \citep[157]{Trudgill2004} \end{quote} A further argument which he puts forward against the role of social factors relates to the key role played by children in the process of new-dialect formation. In his view, children are not influenced by social factors like prestige or stigma, but “[t]hey simply selected, in most cases, the variants which were most common” \citep[115]{Trudgill2004}. Adults influenced the new variety only marginally through accommodation in the early stages of contact, but even this accommodation “is not necessarily driven by social factors such as prestige or identity, but is most often an automatic consequence of interaction” \citep[28]{Trudgill2004}. These two extreme positions have attracted the interest of a number of linguists and consequently, a discussion section in \emph{Language in Society} 37 (2008) has been devoted to the issue. \citet{Schneider2008b} and \citet{Trudgill2008b, Trudgill2008c} defend their respective positions and support it with more arguments and evidence. \citet{Mufwene2008} and \citet{Tuten2008} partly agree with Trudgill’s position; \citet{Bauer2008}, \citet{Coupland2008}, \citet{Holmes2008} disagree with Trudgill and argue in favor of \citegen{Schneider2007} view. One of the key issues of the discussion is the role of identity and it is noticeable that it is not understood by all participants in the same way. Trudgill’s (\citeyear{Trudgill2004, Trudgill2008}) argument focuses on “national identity” and all authors concede that if identity is reduced to this national perspective, Trudgill rightly doubts its relevance. However, apart from \citet{Mufwene2008}, all authors emphasize that the question of identity should \emph{not} be reduced to some sort of abstract national identity. \citet[273]{Bauer2008}, for example, argues that “complex kinds of identity are being expressed in the choice of a particular phonetic variant. It will not be as simple as feeling that one is ‘British’ or ‘New Zealand’; it will be much more local and much more specific”. \citet[259]{Tuten2008} also suggests that it is more likely that local or regional identities develop and influence the variety formation. \citet[269]{Coupland2008} goes beyond the local/national distinction by pointing out that identity in general is “often less coherent, less rationalised, more elusive, more negotiated, and more emergent […]. Identities are known to be often multiple and contingent”. This view is echoed by \citet[274]{Holmes2008} who state that “[…] to imply, as Trudgill seems to be doing, that “national identity” can stand for all types of identity deflects our attention from the real sociolinguistic issues”. \citet[265]{Schneider2008b} also points out that identity plays a role in \emph{all} phases of the model, also before the stage of nation-building. Nevertheless, he still emphasizes the importance of national identity (next to all other identity constructions): \begin{quote} Of course, there are linguistic forms considered diagnostic of individual postcolonial Englishes, and it is difficult to see how precisely these forms rather than any others should have been selected on a purely deterministic basis, excluding national identity as a factor. The strongest argument for the impact of identity in these processes is the observation that the origin and/or recognition and spread and/or scholarly documentation of these forms typically fall into periods of heightened national or social awareness. \citep[266]{Schneider2008b} \end{quote} Related to the question of what kind of identity is supposed to play a role (national, local, individual) is the question of intentionality. In \citegen{Trudgill2004} view, arguing for a role of identity means arguing that people change their linguistic behavior \emph{intentionally} and Mufwene picks up on that point by stating that \begin{quote} Trudgill is certainly correct in refuting the position that colonial identity drove the structural divergence of “new dialects” both from their metropolitan kin and from each other. This would be tantamount to claiming that evolution is goal-oriented and the colonists had really planned to be different linguistically. \citep[257]{Mufwene2008} \end{quote} While it is convincing that settlers in a colony did not come together and developed the goal to intentionally change their speech in order to mark themselves as different from their country of origin, it is nevertheless unclear why \citet{Trudgill2004, Trudgill2008b} and \citet{Mufwene2008} reduce the question of identity to the abstract national level and interpret effects of identity construction as the result of intentional moves because this is not at all what \citet{Schneider2007} or other sociolinguists claim. On the contrary, \citet[264]{Schneider2008b} clarifies his position again by emphasizing that he sees identity constructions and linguistic accommodation as closely related. He regards identity as an “individual stance with respect to the social structures of one’s environment” and accommodation as “a process [in which] individuals approach each other’s speech behavior by adopting select forms heard in their environment, thus increasing the set of shared features” and concludes that accommodation is therefore “one of the mechanisms of expressing one’s identity choices” \citep[264]{Schneider2008b}. As a counterargument to Trudgill’s (\citeyear{Trudgill2004, Trudgill2008}) view that accommodation is automatic (and not social) because it is biologically given, he states that it is actually the social nature of the process that is biologically given because human beings are by nature social beings who strive to create group cohesiveness to ensure their survival in a hostile world \citep[264]{Schneider2008b}. \citet[275]{Holmes2008} argue similarly for the importance of identity not only in influencing the direction of accommodation but also in determining the frequency of interactions between people: \begin{quote} [N]on-demographic social factors bear directly on frequencies of interaction. Because people bring to each encounter their personal and social identities, as well as knowledge and beliefs about intergroup relations and about the social marking of linguistic variants, interactions with certain social groups will be sought out or avoided. Thus, social factors influence \textit{both} the frequency of interactions \textit{and} the direction of accommodation. \end{quote} This is reminiscent of Kretzschmar’s (\citeyear{Kretzschmar2014, Kretzschmar2015}) argument that complex systems evolve and change through massive interactions between speakers, which are naturally constrained by the place that they live in and their social background, which also has an influence on how much they travel and which communication channels they use to get in contact with people who live in other places. While \citet[262]{Kretzschmar2015} is very skeptical of simple correlations between linguistic features and regional and social features, he nevertheless argues that “positive feedback, or focussing, creates the A-curve for every feature” \citep[152]{Kretzschmar2014} and cites from \citegen{LePage1985} \emph{Acts of Identity}: \begin{quote} We see speech acts as acts of projection: the speaker is projecting his inner universe, implicitly with the invitation to others to share it. […] The feedback that he receives from those with whom he talks may reinforce him, or may cause him to modify his projections, both in their form and in their content. To the extent that he is reinforced, his behavior in that particular context may become more regular, more focused. (\citealt[181--192]{LePage1985}, cited in \citealt[152]{Kretzschmar2014}) \end{quote} This shows that \citet{Kretzschmar2014, Kretzschmar2015} argues for a role of social factors and identity, albeit not in an abstract national sense, but in an individual sense and that he regards people’s reactions and social consequences of speech in actual communicative situations as crucial for the creation of A-curves in complex systems of speech. Even \citet{Mufwene2008}, who strongly argues against a role of identity, regards accommodation as a social process. He states that “[t]he speakers’ mutual accommodations are certainly the social aspect of the mechanisms by which selection from among the competing variants (and language varieties) proceeds” \citep[257]{Mufwene2008}. Why he sees accommodation as a social process influencing the evolution of varieties while at the same time ruling out identity as a factor is less clear; perhaps it is the restriction of identity to an abstract colonial one that makes him skeptical. He does, however, speculate that “[i]f identity has a role to play, it must be in resisting influence from outside one’s community” \citep[258]{Mufwene2008}. As he does not elaborate on this any further, his view on the role of social factors remains very general and it is also not clear how they interact with other linguistic factors like frequency and “simplicity, perceptual salience, semantic transparency, regularity, and more familiarity to particular speakers” \citep[257]{Mufwene2008} and the role of children as “affective filters” \citep[258]{Mufwene2008}. \citet{Trudgill2008b} defends his position against the criticism by emphasizing again that children play the crucial role in new-dialect formation and he draws on \citegen{Pickering2004} interactive alignment model to support his argument that young children \emph{automatically} accommodate to each other, without any influence of social factors, and that therefore the majority variant survives \citep[279]{Trudgill2008b}. However, \citegen{Tuten2008} discussion of the interactive alignment model emphasizes that its applicability seems rather restricted to young children while “older children and adults could adopt a strategy of non-alignment when appropriate” \citep[261]{Tuten2008}. He argues that when children grow older, they change their social orientation away from their parents towards the peer-group and that they become aware of similarities and differences between them and others. Furthermore, in the process of growing up, people “are heavily socialized to perform in certain ways” \citep[260]{Tuten2008} so that “it may be that accommodation (or automatic interactive alignment) and identity formation (among older children and adolescents) are closely linked” and that “community identity formation and koine formation are simultaneous and mutually dependent processes” \citep[261]{Tuten2008}. It can be concluded that the overwhelming majority of the arguments are in favor of a position that includes social factors, particularly identity constructions, in the process of new variety formation. This is not to say that identity is unequivocally seen as the main driving force. \citet[265]{Schneider2008b} for example concedes that other factors may be as strong as identity and \citet[267--268]{Coupland2008} warns that “there are dangers in running too freely to causal explanations around identity”. \citegen[279]{Trudgill2008b} strongest point remains that there is no “feature-by-feature social-reasons account” which shows convincingly how social factors and identity influence the shape of a new variety. \citet[266]{Schneider2008b} equally concludes that “[D]esigning a study that will test a straightforward connection between socio-psychological attitudes (including national identity) and the use of specific linguistic forms in these contexts will certainly be a worthwhile task”. \subsection{Conclusion} \label{bkm:Ref527366308}\hypertarget{Toc63021209}{}\label{bkm:Ref527389617} This section has shown that theories of the emergence of new varieties of English still differ to a considerable extent with regard to the underlying mechanisms and phases involved in the process. Even though \citet{Kretzschmar2014} claims that his account is complementary to \citegen{Schneider2007} Dynamic Model, the analysis has shown that this is only true to a limited degree. The only feature-by-feature account is provided by \citet{Trudgill2004} on the emergence of New Zealand English, but as he regards his model to be applicable to all new dialects in tabula rasa situations, his claims can be tested in the American context as well. I have raised two important issues in \sectref{bkm:Ref521000690} and \sectref{bkm:Ref521576818}: The first issue was that the different theories and models conceptualize the emergent new variety in different ways, which has a considerable impact on their claims, and the second issue was that one of the key differences between the theories is the role attributed to social factors in the process. It is not hard to see that these issues overlap. \citet{Trudgill2004}, who regards a new variety as a new linguistic system that is structurally different from other linguistic systems (particularly from those that the settlers brought to the new country), views social factors as irrelevant in the process. They could only become important \emph{after} the formation process in the way that people single out new linguistic features as characteristic of the variety and make them emblematic of it. I interpret this as claiming that social factors could contribute to the emergence of a perceptual and/or discursive variety, but only as a \emph{consequence} of the formation of a new structural variety. \citet{Schneider2007} on the other hand claims that not only a structural variety emerges but a perceptual and/or discursive variety as well, and that the emergence of the latter acts as a driving force in the emergence of the former. The perception, recognition, public documentation and discussion of a variety is therefore in his view \emph{not} just a consequence of already present structural differences, but it is one of the \emph{causes} of structural differentiation. The linguistic features which make the new variety distinct from other varieties are of course much more numerous than those perceived and discussed as distinct and \citet{Schneider2007} claims that they are found on different linguistic levels as well: While the majority of forms of the perceptual and/or discursive variety are located on the phonological and lexical level (with a focus on individual phonological segments and particular words or different spellings of the same word), the majority of structural differences can rather be found on the lexico-grammatical interface. The link between the two levels is supposedly found in the social realm – in the phase of structural nativization, a positive evaluation of forms recognized as distinct leads people to increasingly use distinct forms and patterns because they can express a new (national) identity by aligning themselves with a new model of speech, a discursive variety which subsequently stabilizes in the endonormative phase. However, the description of this process, that is the “micro-level of the relationship between attitudes and the evolution (i.e. selection or avoidance) of individual linguistic forms” \citep[95]{Schneider2007} remains rather vague and is only supported by some examples which do not represent a “feature-by-feature social-reasons account” that \citet{Trudgill2008b} demands. The same is true for Kretzschmar’s (\citeyear{Kretzschmar2014, Kretzschmar2015}) claims that positive feedback in people’s interactions influences the non-linear frequency distributions which form the basis for the idealized abstract structural varieties described by linguists. Concrete evidence or detailed descriptions as to which forms receive feedback, how this feedback is expressed and perceived and how people adapt their linguistic behavior based on the feedback is not provided. In my view, the question of how new varieties emerge needs to be approached by first of all distinguishing more systematically between the different kinds of varieties because the investigation of each kind of variety requires a different methodology. Structural varieties should be identified based on bottom-up analyses of data, but it needs to be acknowledged as well that a completely data-driven, bottom-up analysis might be difficult to conduct in practice and this type of analysis is especially difficult in historical contexts because of the scarcity of data. Perceptual varieties should be described based on data obtained by methods used in perceptual dialectology, and discursive varieties should be investigated by means of discourse-linguistic methods (see \sectref{bkm:Ref506891065}). The systematic investigation of these different types of varieties should then form the basis for exploring the connection between them. The concept of \textit{enregisterment} is helpful in this regard because it involves a theory of how people construct \textit{registers} as models of linguistic (and social) behavior based on language use in everyday social situations. To conceptualize discursive varieties as registers which are based on structural varieties because they are formed as a result of social interaction (both immediate and mediated) helps to clarify the link between the different types of varieties. It is the aim of the present study to gain insights into the enregisterment of American English by showing how the construction of a discursive variety can be traced systematically in nineteenth-century America. In \sectref{bkm:Ref522870698}, I will therefore describe the concept of \textit{enregisterment} in detail and discuss its relation to several research areas of linguistics. \section{Enregisterment} \label{bkm:Ref522870698}\hypertarget{Toc63021210}{}\label{bkm:Ref522888605} \textit{Enregisterment} is a concept that has been developed by the anthropological linguists Michael \citet{Silverstein2003, Silverstein1979, Silverstein1993} and Asif \citet{Agha2003, Agha2007}. In \sectref{bkm:Ref506883801}, I compare and contrast their definitions of register and enregisterment and sketch the advantages that their models hold for theorizing the emergence of new varieties. In \sectref{bkm:Ref506884048}, I outline and discuss the integration of enregisterment and central concepts to which it is tied (e.g. indexicality and orders of indexicality) in sociolinguistic research in general and show what potential it holds for analyzing social factors in the emergence of new varieties in particular. Taking up \citegen{Kretzschmar2014} observation that Schneider’s Dynamic Model (\citeyear{Schneider2007}) is primarily concerned with the perception of new varieties (and not with actual linguistic usage), I sketch the theoretical assumptions and important findings of the field which is primarily interested in the perception of varieties by non-linguists, namely perceptual dialectology, and discuss its relation to enregisterment in \sectref{bkm:Ref10466352}. Finally, I suggest in \sectref{bkm:Ref506891065} that the newly emerging field of discourse linguistics, which is concerned with the social negotiation of knowledge through linguistic practice \citep[53]{Spitzmuller2011}, can contribute to a study of enregisterment, both from a theoretical and a methodological point of view. \subsection{The origins of the concept \textit{enregisterment} in linguistic anthropology} \label{bkm:Ref506883801}\hypertarget{Toc63021211}{}\label{bkm:Ref512260235} The term \textit{enregisterment} was originally introduced by the anthropologist and linguist Michael Silverstein in the mid-1980s (see \citealt{Silverstein2016} for details on the earlier uses of the term), but elaborated on in most detail in his \citeyear{Silverstein2003} article on indexical order.\footnote{This article is both based on and an elaboration of ideas presented in Silverstein’s prior publications (\citeyear{Silverstein1979, Silverstein1993}).} He claims that an analysis of any sociolinguistic phenomenon requires an analysis of the indexicality of the linguistic forms used. Indexicality means that the linguistic forms possess the quality to point to aspects of the micro- as well as the macro-context in which the forms are used. For example, in a specific micro-context of wine tasting evaluative phrases like \emph{beautifully complex, very pronounced yellow} and \emph{assertive backbone} index connoisseurship (even though they are not part of specialists’ vocabulary, which comprises words like \emph{bouquet} and phrases like \emph{slightly pasty/acidic texture}). At the same time, this connoisseurship is “macro-sociologically locatable” \citep[226]{Silverstein2003} in that the use of these specific forms also indexes a social distinction between those who know how to describe wine and people who do not. Therefore, the use of the evaluative terms indexes more than connoisseurship: It also indexes social traits of the speaker, such as being well-bred or being at least upwardly mobile and having an interesting character. These two types of indexicality are located by Silverstein on different orders: The connoisseurship indexed by the phrases is on the \emph{n}{}-th order and the social traits on the \emph{n}+1st order of indexicality. This example illustrates that these orders are in a dialectic relationship. The existence of \emph{n}{}-th order indexicality makes it available for what Silverstein terms “ethno-metapragmatic evaluations” \citep[214]{Silverstein2003} which are embedded in a larger cultural schema shaped by social and linguistic ideologies: Being an expert of wine is associated with being part (or trying to be part of) an elite social group and this in turn is connected to expectations about the character and the social and linguistic behavior of that group. Describing a wine as \emph{beautifully complex} therefore comes to index social traits of the speaker such as a high social standing, educatedness and cultivation. This is what Silverstein calls \textit{essentialization}: The social traits are ideologically constructed as essences of persons and thus become “predictable-as-true”. When people believe that the phrase \emph{beautifully complex} is uttered by members of an educated, cultivated and upper-class elite, the utterance of the phrase points to these qualities. As such, it can be used by speakers to signal this macro-social identity. The essentialization (and sometimes even naturalization) of the values indexed by the form makes it possible that \emph{n}+1st order indexicality blends with \emph{n}{}-th order indexicality or even replaces it: Speakers have an idea about “wine talk” and about people engaging in it even though they may never have been part of a wine tasting situation themselves. This in turn opens up the possibility for new \emph{n}+1st order indexical values: The use of the forms can for example be evaluated negatively within a cultural schema that is skeptical or even highly critical of elites and elitist behavior. The idea of indexical order is a prerequisite for Silverstein’s definition of enregisterment and register. Enregisterment is the process by which “\textit{n}-th- and \textit{n}+1st-order indexicalities are dialectically mediated” through “culturally construing and interpreting contextual formal variation as “different ways of saying ‘the same’ thing”” \citep[216]{Silverstein2003}. Enregisterment is therefore a process of cultural construction. A wine can be described with the phrase \emph{beautifully complex with an assertive backbone} or with the phrase \emph{very good and tasty}; even though both phrases do not denote the exact same thing, they are culturally constructed as expressing the same meaning. \citet[212]{Silverstein2003} calls this phenomenon “metapragmatically imputed denotational equivalence”. Through the cultural schema and metapragmatic evaluations described above, the first phrase which indexes connoisseurship (\emph{n}-th order) is enregistered (\emph{n}+1st order) and becomes part of a lexical register which \citet{Silverstein2003} labels as \textit{oinoglossia}. Registers are defined by Silverstein as “alternate ways of “saying ‘the same’ thing” considered “appropriate to” particular contexts of usage” (\citeyear[212]{Silverstein2003}). This means that even though both phrases are constructed as having the same basic meaning, they are different in that only the phrase \emph{beautifully complex with an assertive backbone} is deemed appropriate in a wine tasting situation – not only because it indexes knowledge about wine but also because it indexes knowledge about appropriate linguistic behavior in social circles where wine tastings are common. Silverstein writes further that “the register’s forms being extractable from the sum total of all possible texts in such a context, a register will consist of particular register shibboleths, at whatever analytic plane of language structure (phonologico-phonetic, morpholexical, morphosyntactic, grammaticosemantic, etc.)” (\citeyear[212]{Silverstein2003}). Applied to the example of the oinoglossia register, this means that this register can be recognized by shibboleths which are found on the lexical level (I assume that this is why Silverstein calls it a lexical register). A phrase like \emph{beautifully complex} indexes the use of the oinoglossia register, but in order to produce a coherent text it must co-occur with other linguistic forms which are also part of the register. To illustrate this, I suggest the following example sentences: \ea\label{ex:sir} Sir, you must taste this beautifully complex wine! \ex\label{ex:dudebeautifully} Dude, you must taste this beautifully complex wine! \ex\label{ex:dudeawesome} Dude, you must taste this awesome wine! \z These sentences show that the address terms and the adjective phrases are in a paradigmatic relationship and that their syntagmatic co-occurrence is determined by the register to which they belong. In (\ref{ex:sir}) and (\ref{ex:dudeawesome}), the address term and the adjective phrase belong to the same register or registers which are compatible with each other, whereas in (\ref{ex:dudebeautifully}) they belong to different registers which are incompatible with each other. The oinoglossia register is created by a higher-order indexicality linking \emph{beautifully complex} to a high social status and the prestige activity of wine tasting - these indexical values are not compatible with the values of American masculinity and non-conformity indexed by the address term \emph{dude} \citep{Kiesling2004}. Sentence (\ref{ex:dudebeautifully}) is unlikely to be produced by speakers, unless they want to create irony and achieve a humorous effect. It is therefore possible that linguistic forms which belong to different registers co-occur syntagmatically, but these combinations are usually marked and open to interpretations involving an even higher-order indexicality ((\emph{n}+1)+1). Silverstein therefore concludes his definition by saying that “[w]hile such shibboleths are strongly salient as indexes that the register is in use, the overall register itself consists of these plus whatever further formal machinery of language permits speakers to make text, such as invariant aspects of the grammar of their language. (A \textit{language} is thus the union of its \textit{registers}.)” (\citeyear[212]{Silverstein2003}). This definition implies that registers are recognized by means of salient linguistic forms (register shibboleths) co-occurring syntagmatically, but that they consist of other forms as well which are invariant or at least not register shibboleths of other incompatible registers. The aspect of \emph{recognition} is also at the heart of \citegen{Agha2007} concept of enregisterment. He defines it as “processes and practices whereby performable signs become recognized (and regrouped) as belonging to distinct, differentially valorized registers by a population” (\citeyear[81]{Agha2007}). He therefore regards registers as entities that come into existence through recognition by a group of people in a particular cultural and social context and during a particular time period. It is also noteworthy that registers are not restricted to linguistic forms but comprise all kinds of signs which are performable and therefore visible to others. \textit{Semiotic registers} in a wider sense are consequently distinguished from more specific \textit{registers of discourse}. In contrast to Silverstein, whose focus is on the development of a semiotic model of enregisterment (and ultimately a model of language as a union of registers), Agha is very much concerned with modeling the cultural and social processes by which registers are created and changed continually in a socio-historical context. The definitions of semiotic register and register of discourse reflect this \citep[81]{Agha2007}: \begin{quote} A \emph{register of discourse}: a cultural model of action \begin{itemize} \item[(a)]which links speech repertoires to stereotypic indexical values \item[(b)] is performable through utterances (yields enactable personae/relationships) \item[(c)] is recognized by a sociohistorical population \end{itemize} A \emph{semiotic register}: a register where language is not the only type of sign-behavior modeled, and utterances not the only modality of action. A register of discourse is a special case. \end{quote} While the indexicality of (linguistic) forms is as central for Agha as for Silverstein in the emergence of registers, it is crucial that Agha defines registers not as repertoires of signs per se, but as cultural models of action. A model of action is only an abstraction of the actual, observable action, but it is necessarily based on that action and serves as a point of reference for all participants in the action and therefore influences the action that it models. The emphasis on action means that only that which is performable and therefore observable can become part of the model. In all these observable instances of action which necessarily happen in a social context, indexical links are created between signs involved in the action and aspects of the context. Agha’s main contribution is to provide a theoretical model for how these links between (linguistic) forms and indexical values, created in every instance of observable action (“the micro-time of interaction”, \citeyear[103]{Agha2007}), become cultural models of actions (“macro-social regularities of culture”, \citeyear[103]{Agha2007}), which then influence actual action again. I outline this process of enregisterment in the following paragraphs. The key activities in enregisterment are speakers’ \textit{reflexive activities}, “namely activities in which communicative signs are used to typify other perceivable signs” \citep[16]{Agha2007}. This means that several signs are grouped together and assigned a metalinguistic predicate which relates to types of persons, types of interpersonal relationships or types of behaviors. Using the wine example again, the speaker’s use of \emph{beautifully complex with an assertive backbone}, the elegant and expensive jacket that he or she wears and the way he or she greets people with a smile and a handshake are evaluated and typified by participants in the same situation as ‘wine-connoisseur’, ‘superior social standing’ and ‘polite’. In this process, disparate cross-modal signs become icons of categories of personhood, behavior and relationships and the use of these icons comes to index characteristics of its users. The icons are therefore classified by Agha as \textit{indexical icons}, which are \textit{emblematic signs}. It is important to note that while these typifications are perceivable by definition, this does not mean that they are always expressed explicitly and linguistically. On the contrary, they are often implicit and only mediated by overt signs, e.g. by the deferential behavior of others (\citeyear[103]{Agha2007}). Individual face-to-face encounters between people (as in this example) are part of larger “communicative chain processes” and “communicative networks” (\citeyear[69]{Agha2007}) through which typifications are transmitted within a population. \citet[151]{Agha2007} provides a list of common typifications of language use (\tabref{tab:2:2}) which reflects the difference between reflexive activities in direct interpersonal interactions (1.) and in larger more indirect cultural forms of communication (3.). Typifications also occur as the result of interventions of experts (2.). \begin{table} \begin{tabularx}{\textwidth}{R{2.5cm}l} % & Typifications of language use\\ \lsptoprule 1. & Everyday reflexive behaviors, such as\\ & ~~(a) use of register names\\ & ~~(b) accounts of usage/users\\ & ~~(c) descriptions of ‘appropriate’ use\\ & ~~(d) patterns of ‘next turn’ response behavior\\ & ~~(e) patterns of ratified vs. unratified use\\ 2. & Judgements elicited through\\ & ~~(f) interviews\\ & ~~(g) questionnaires\\ & ~~(h) ‘matched guise’ experiments\\ 3. & Metadiscursive genres such as\\ & ~~(i) traditions of lexicography\\ & ~~(j) grammatology\\ & ~~(k) canonical texts\\ & ~~(l) schooling\\ & ~~(m) popular print genres\\ & ~~(n) electronic media\\ & ~~(o) literary representations\\ & ~~(p) myth\\ & ~~(q) ritual\\ \lspbottomrule \end{tabularx} \caption{ Typifications of language use in Agha's framework of enregisterment (from \citealt[151]{Agha2007}) } \label{tab:2:2}\label{tab:key:2} \end{table} It is essential in enregisterment that one instance of metapragmatic activity cannot constitute a register, but that typifications have to be recurrent in the behavior of many speakers. It is only through the transmission and recurrence of evaluations of forms that they become recognized and distinguishable as a register. The frequently recurring typifications become “stereotypes of indexicality” or “metapragmatic stereotypes” (\citeyear[151-153]{Agha2007}) which means that they become social regularities. It is evident that especially the reflexive behaviors in (i)-(q) of \tabref{tab:2:2} contribute to this development because they involve mass communication of some sort where linguistic forms and their indexical values are brought to the attention of a large audience. It is possible that these stereotypes become so widely known and accepted that they become “a routinely background reality for very large groups of people” and therefore “socially routinized metapragmatic constructs (such as beliefs, habits, norms, ideologies)” (\citeyear[29]{Agha2007}). In the transmission process, however, typifications are also negotiated and transformed. Here, Agha draws on Silverstein’s orders of indexicality: If an evaluation of a set of forms becomes so common that it becomes presupposable by many people, it can become subject to reanalysis which could affect both the forms (regrouping) and the indexical values (revalorization). For example, if the indexical link between \emph{beautifully complex with an assertive backbone} (together with other signs) and the image of an ‘expert of wine’ and ‘polite’ behavior and social ‘superiority’ becomes transmitted to a large audience (possibly through advertisements), it may become so strong that it becomes subject to evaluation itself, for example it could be revalorized as an index of snobbery. This does not imply that the former valorization disappears; it is rather the case that there may be competing valorizations which co-exist and depend on the evaluator. What is ‘polite’ and ‘sophisticated’ for a person who is part of higher social circles may be ‘snobbish’ for a person who is not part of these circles. This is why Agha defines registers as dependent on the people recognizing its forms and evaluating them as different from other forms. It also explains why Agha finds questions of boundaries of registers “fruitless and misplaced” (\citeyear[168]{Agha2007}) because any boundaries associated with registers are continually negotiated and reset in the processes of enregisterment. \tabref{tab:2:3} provides an overview of the dimensions of register organization and change that \citet[169]{Agha2007} postulates: \begin{table} \begin{tabularx}{\textwidth}{R{1cm}X} \lsptoprule A. & \textbf{Repertoire characteristics} \\ & \textit{Repertoire size}: number of forms \\ & \textit{Grammatical range}: number of form-classes in which forms occur \\ & \textit{Semiotic range}: variety of linguistic and non-linguistic signs associated with use \\ B. & \textbf{Social range} of enactable (pragmatic) values\\ & \textit{Indexical focus}: Stereotypes of speaker-actor, relation to interlocutor, occasion of use, etc. \\ & \textit{Images} (or icons) stereotypically attached to indexical sign-forms: for speaker-focused indexicals, persona types (male/female, upper/lower class, etc.); for interlocutor-focal indexicals, types of relationship (deference, intimacy, etc.) \\ & Positive or negative values associated with the registers \\ C. & \textbf{Social domain(s)}: Categories of persons acquainted with the register formation\\ & \textit{Domain of recognition}: persons who recognize the register’s forms \\ & \textit{Domain of fluency}: persons fully competent in the register’s use \\ \lspbottomrule \end{tabularx} \caption{Some dimensions of register organization and change (from \citealt[169]{Agha2007}} \label{tab:2:3} \end{table} \largerpage Registers can be characterized with regard to the three dimensions A--C, but “any such account is merely a sociohistorical snapshot of a phase of enregisterment for particular users” \citep[170]{Agha2007}. Even though Agha places particular emphasis on this processual perspective, he nevertheless also stresses that there are mechanisms which work towards a relative stability and persistence of registers. Institutions of various kinds play an important role here; it is possible, for example, that the forms of a register are codified in dictionaries and grammars and used in educational institutions. This is obviously the case for standard registers whose size is usually not only much larger than that of other registers, but also more resistant to change. Agha uses the example of Received Pronunciation to illustrate how phonological forms come to be enregistered as a national standard of pronunciation in England.\footnote{This article was first published in 2003 and has been re-published in 2007 as Chapter 4 in \emph{Language and social relations}.} Furthermore, processes of \textit{essentialization} and \textit{naturalization}, which also play an important role in Silverstein’s model of enregisterment, establish a natural motivation of the link between the register’s forms and values \citep[74]{Agha2007}. Metapragmatic activity which repeatedly presents the values indexed by the forms as natural qualities of its users causes people to disregard the fact that the indexical link has in fact been socially constructed. This is for example the case when the use of standard forms is so commonly linked to the attribute ‘intelligent’ in metapragmatic activity that language users start considering intelligence as a \emph{natural} characteristic of the people using the forms. Using standard forms even comes to be seen as an essential quality of the group of intelligent people. It is easy to see how normative criteria are based on these essential qualities: In such a scenario, people must use standard forms in order to be seen as intelligent by others. Stabilizing mechanisms are reinforced when they are formulated and backed by authority, for example by institutions of expertise, or by people who have been assigned an expert role. Despite these mechanisms of stabilization, however, registers are always subject to reanalysis and transformation; the difference lies in the speed and the extent to which that happens. What, then, is the effect of registers? The circulation of metapragmatic stereotypes and images of personhood associated with them provide the ground for role alignments of speakers in interaction. This means that they can either signal their sameness and their co-membership in a social category (symmetric alignment) or their difference (asymmetric alignment) \citep[133]{Agha2007}. This happens in every instance of interaction, but the existence of culturally shared stereotypes also leads to social regularities in role alignment. Alignments are particularly motivated by so-called \textit{characterological figures} linked to registers. Such a figure is “any image of personhood that is performable through a semiotic display or enactment (such as an utterance). Once performed, the figure is potentially detachable from its current animator in subsequent moments of construal and re-circulation” \citep[177]{Agha2007}. This means that links between forms and their indexical values become less abstract, but embodied, and as such more readily inhabited by speakers to signal their social identity. Comparing \citegen{Silverstein2003} and \citegen{Agha2007} definition of register, it becomes clear that they are very similar in many respects. They see the metapragmatic engagement with perceivable signs (linguistic and others) as the key process in the emergence of registers as it is through this engagement that the signs come to index social values (on the \emph{n}+1st indexical order). They both stress the interrelation between micro-contexts and macro-contexts (Silverstein) and individual face-to-face encounters and large-scale cultural processes (Agha) because registers cannot come to exist on one of these levels only. However, there is also an important difference, which can be explained by the fact that they pursue slightly different aims with their theories. Silverstein’s aim is more (socio-) linguistic in that he wants to show how sociolinguistic analysis needs to be completed by studying not only the \emph{n}th-order of indexicality but also the \emph{n}+1st-order. Agha’s theoretical orientation is more sociological as he aims to describe and explain the role that language has in social life and its impact on social relations. For Silverstein, accordingly, a register comprises all linguistic forms needed to make a text which are judged to be appropriate in context, but while some forms are salient and point to the existence of registers, some forms are not salient or simply invariant. He elaborates on this view in a recent article where he states that \begin{quote} Language users evaluate discourse with intuitive metrics of coherence of enregistered features of form co-occurring in text-in-context across segmentable stretches of discourse such as an individual’s contribution to discursive interaction, generally focusing on highly salient ‘register shibboleths’ that reveal a basic register setting around which cluster the untroubled compatibility or indexically marked lack of compatibility of other aspects of usage. \citep[59--60]{Silverstein2016} \end{quote} His statement that a language is a union of its registers is basically a theoretical claim about the nature of language. Agha’s definition of registers as models of conduct is not so much language-theoretical but rather social. By emphasizing that models are reference points for social (including linguistic) behavior, he distinguishes them at the same time from actual conduct and language use. Therefore, a register does not comprise all forms needed to produce text in an actual context, but only those forms which are recognized by people as part of the register. It can therefore vary in size – some registers comprise only few forms, while others, especially standard registers, comprise a very large number. It seems as if Silverstein locates registers more on the level of actual language use, while Agha locates them on a discursive level: It is language through which such models are formulated (the terms \textit{discourse} and \textit{discursive} will be discussed extensively in \sectref{bkm:Ref506891065}). Nevertheless, as both stress the interrelation between the level of language use and the discursive level, this difference is more a reflection of their different research aims than a difference between their theories: Silverstein’s registers, located on the level of language use, only exist because of the metapragmatic engagement of language users with some of its forms; Agha’s registers, located on the discursive level, have a bearing on actual language use and in these instances of actual use the register’s forms of course co-occur with other forms. These forms can be congruent with the register or they may “by degrees, cancel the stereotypic values” indexed by the register’s forms, as in the example sentence (\ref{ex:dudebeautifully}) above. This discussion of Silverstein’s and Agha’s conception of registers and enregisterment shows why enregisterment is a useful theoretical framework for studying the emergence of new varieties: First of all, it provides a way of linking the two levels identified in Schneider’s Dynamic Model, the level of the structure and the level of the concept of a variety, in one theoretical framework, the structural one corresponding to actual language use and the conceptual one corresponding to the discursive level of metapragmatic activity and engagement with linguistic forms (and other signs). At the same time, the theoretical framework of enregisterment also provides a basis for systematically distinguishing between these levels in a study of the emergence of a new variety. This in turn is the basis for providing empirical support for the important role of social factors and people’s perceptions, beliefs and attitudes in the emergence of new varieties if a changing statistical correlation between linguistic forms and social and geographic categories can be shown to correlate with changing metapragmatic activities surrounding these forms. Secondly, Agha and Silverstein elaborate central ideas of the Dynamic Model in much more detail: the interrelation between identity construction and the construction of linguistic difference as well as the emergent \emph{recognition} of forms as differential by a population, which is important because it is part of Schneider’s definition of the term variety itself (see \sectref{bkm:Ref521000690}). The third advantage of the theoretical framework of enregisterment is that it provides a methodological basis for studying the discursive level. By emphasizing that metapragmatic activity is by definition observable it becomes clear that it can be studied empirically. Agha’s and Silverstein’s case studies and examples are mostly qualitative and intended to underline their theoretical arguments; in this study, however, I develop a methodology for extensive case studies which combine both quantitative and qualitative methods to analyze enregisterment processes in nineteenth-century America and contribute to a theoretically informed account of the emergence of American English. \subsection{Indexing varieties: enregisterment in (historical) sociolinguistics} \label{bkm:Ref506884048}\hypertarget{Toc63021212}{} Given its emphasis on theorizing the relation between language and social behavior, it is not surprising that the concept of enregisterment has caught the interest of sociolinguists. Silverstein himself links his order of indexicality explicitly to sociolinguistic research by aligning it with \citegen{Labov1972} order of linguistic variables as indicators, markers, and stereotypes as well as with the difference between dialectal and superposed variability posited by \citet[383--384]{Gumperz1968}. But it was essentially a change of orientation in sociolinguistics, which \citet{Eckert2012} describes using the metaphor of three succeeding waves, that has sparked the interest first of all in indexicality and increasingly also in enregisterment. While the first wave of sociolinguistics was concerned with discovering the systematic relations between linguistic variation and macro-social categories like social class, age, gender as well as race and ethnicity by using mainly quantitative methods, second wave studies rather looked at how language use correlated with social categories which were relevant to a specific group of speakers on a local level by adding qualitative and ethnographic methods to the research design. Third wave studies shifted the focus from correlations to agency: Instead of viewing linguistic variation as a reflection of social categories they concentrate on the way that speakers make use of variation to construct and express social categories and identities. Consequently, the social meaning created through stylistic practice is at the center of interest of third wave studies and the concept of indexical order is very helpful in this respect because it shows how linguistic forms come to index social attributes and how these indexical meanings co-exist but also change over time. \citet{Eckert2008} develops the concept of the \textit{indexical field} which she defines as “a field of potential meanings” of variables, or “constellation of ideologically related meanings, any one of which can be activated in the situated use of the variable” (\citeyear[454]{Eckert2008}). It is evident here how the emphasis on activation reflects the importance of agency: By activating one or more indexical meanings of a variant, the language user contributes to the maintenance but also to the change of the indexical field, which is in constant flux. Eckert illustrates the concept by using the released /t/ as an example. Drawing on several studies conducted in American contexts, Eckert constructs an indexical field (see \figref{fig:2:2}) consisting of social personae indexed by released /t/, qualities, which are seen as permanent, and stances, which are rather momentary and tied to specific situations but can become constructed as part of people’s identity if they are habitually expressed. \begin{figure} \includegraphics[width=0.8\textwidth]{figures/Paulsen-img02.pdf} \caption{Indexical field of released /t/: social personae (in circles), permanent qualities (in grey boxes) and stances (in italics), my own illustration based on \citet[469]{Eckert2008}} \label{fig:2:2} \end{figure} The indexical field created here emphasizes again the importance of social personae (similar to Agha’s characterological figures) which, according to \citet[470]{Eckert2008}, anchor the process of interpretation because they are less fluid than permanent qualities and situated stances. \citegen[454]{Eckert2008} proposal to study “variation as an indexical system, taking meaning as a point of departure rather than the sound changes or structural issues that have generally governed what variables we study and how we study them” is therefore a programmatic statement for third wave sociolinguistic studies. Using the field metaphor, she makes the point that Silverstein’s indexical order is not to be understood as linear (as it is the case in the first sociolinguistic studies on enregisterment which I will discuss below), but as a continual reconstrual of indexical value. At the same time, she also draws on Agha by claiming that “variables combine to constitute styles” (\citeyear[472]{Eckert2008}) and that styles are “the product of enregisterment” (\citeyear[456]{Eckert2008}). However, she does not employ the term \textit{register} because she regards the common definition of register as “a static collocation of features associated with a specific setting or fixed social category” (\citeyear[456]{Eckert2008}) as too established in sociolinguistics. However, as her own definition of \textit{style} is also very different from the established definitions of the term in traditional variationist studies, her argument against the use of the term \textit{register} is not very convincing – it rather underlines the necessity to develop and redefine established terms and concepts.\footnote{For an extensive overview of the development and definitions of the concept of \textit{style} see e.g. \citet{Coupland2007}.} This is precisely what Agha does for the term \textit{register} by criticizing earlier views (\citeyear[167--170]{Agha2007}). Nevertheless, it becomes clear that Eckert places herself firmly in the field of sociolinguistics and aims at developing her own theoretical framework of style (instead of using a framework from linguistic anthropology) and testing its usefulness for the study of sociolinguistic variation. At the same time, she also stays within the variationist sociolinguistic tradition by focusing on the indexical potential of single variants and by studying actual usage and behavior, not metadiscursive activities. It is interesting that she does identify a “need to examine a far greater range of variables than is commonly done in the field” (\citeyear[472]{Eckert2008}) and a need to address questions of the structure of styles and to model the process of \emph{bricolage}, the process whereby individual (linguistic) resources are “interpreted and combined with other resources to construct a more complex meaningful entity” (\citeyear[456--457]{Eckert2008}), since it is precisely these issues that Agha and Silverstein address in their theory of enregisterment. In a recent article, she elaborates more on the relation between her conception of style and Agha’s register: She describes Agha’s register as “a style that is enduringly associated with some widely recognized character type such as Posh Brit or Surfer Dude” and therefore as “an outcome of stylistic practice” \citep[76]{Eckert2016} in which people do not \emph{use} registers, but refer to them and draw on them in their actual language use when they make smaller or larger interactive moves. By describing register as “a sign at a particularly high level of consensuality and metadiscursivity” \citep[76]{Eckert2016} she locates it on a conceptual level and distinguishes it from actual linguistic and stylistic practice. Although Eckert captures a very important point in Agha’s theory here, namely that registers are \emph{models} of action and not action per se, it is an account of Agha’s theory that is too reductionist. In Agha’s model, a differential metapragmatic treatment of sets of forms is already sufficient to indicate the existence of a register because it points to a differential evaluation of these forms: Users associate one set of forms with different indexical values than another set of forms. In Agha’s view, explicit metadiscourses on speech forms and the values that they index \emph{can} exist and they often do exist, especially in the form of characterological figures embodying these links, but it is not a prerequisite for the existence of registers. Registers may acquire a very large social domain (especially with respect to the domain of recognition), but their domain can theoretically also be much smaller. Instead of viewing registers as specific types of style, Agha distinguishes them in a different way and adds the concept of \textit{enregistered style}. Styles are “patterns of co-occurrence among semiotic devices” \citep[186]{Agha2007} and these devices include linguistic and non-linguistic tokens. Every utterance can therefore be described as a co-occurrence style because in an actual interaction tokens cannot occur in isolation, but they always co-occur and therefore create an observable formal pattern. Only when a formal co-occurrence pattern is differentially evaluated, Agha speaks of enregistered style\textit{s} because they have acquired a cultural intelligibility and significance. It is these enregistered styles that are “reflexively endogenized to a register model” \citep[186]{Agha2007}. \figref{fig:2:3} illustrates this integrative view of styles and registers. %%please move the includegraphics inside the {figure} environment %%\includegraphics[width=\textwidth]{figures/Paulsen-img003.emf} \begin{figure} \includegraphics[width=.8\textwidth]{figures/Paulsen-img03.eps} \caption{Style, enregistered style and register based on definitions by \citet{Agha2007}} \label{fig:2:3} \end{figure} Third wave sociolinguistic studies basically study the relation between style and enregistered styles because their aim is to identify the indexical meaning of (combinations of) linguistic variants, thereby paying attention to how they are linked to other signs in the social landscape. This indexical meaning is used by speakers to position themselves socially by making stylistic moves, thereby changing styles and creating the possibility for new indexical meanings.\footnote{In my view, Silverstein’s notion of register can therefore be equated with Agha’s notion of enregistered style.} Agha adds another dimension to these studies by explaining how this process works: Indexical meaning is created through reflexive activities, resulting in a register which is a model of action and therefore not to be equated with a pattern occurring in actual use. Nevertheless, it has an influence on language use as the model is performable in actual stylistic practice. In addition to an empirical study of language use, Agha sees an empirical study of reflexive activity as crucial in identifying how people create and transform social identity and social relations through language.\footnote{In her well-known classification of sociolinguistic studies in three waves, Eckert also states that “every case of variation [discussed in her article] involves enregisterment” (\citeyear[96]{Eckert2012}), but she does not clearly distinguish or relate enregisterment and style in this article. This makes it difficult to follow her argumentation that enregisterment loses its analytic force at some point because nuances of sound, such as fortition or lenition, cannot fruitfully be regarded as components of registers because it is \textit{only} “in continual stylistic practice that nuances of sound take on sufficient meaning to participate in processes of enregisterment” (\citeyear[97]{Eckert2012}).} Another theoretical framework in which the notion of indexicality plays an important role and which is closely related to Eckert’s theory of style and the indexical field is \citegen{Bucholtz2005} framework for the analysis of identity. They regard identity not as an internal psychological phenomenon, but as a social and cultural phenomenon produced through linguistic and other semiotic practices. Identity, defined as “the social positioning of self and other” (\citeyear[586]{Bucholtz2005}), is linguistically indexed in various ways: through labels, implicatures, stances, styles, or linguistic structures and systems. This incorporation of linguistic structures and systems in their \textit{indexicality principle} is particularly interesting because it is reminiscent of \citegen[18]{Agha2007} idea that “acts of value ascription to language can […] acquire much more generic discursive objects (e.g., entire speech varieties), and become habitual for large groups of evaluators”. \citet[597]{Bucholtz2005} do not elaborate on that point in more detail, nor do they employ the concept of enregisterment, but they do mention the works on language, nationalism and ideology that inform this view, especially that of the linguistic anthropologists \citet{Gal1995}. These two researchers elaborate on the semiotic processes by which differences between languages and dialects are constructed through linguistic ideologies: “[I]deologies interpret linguistic structure, sometimes exaggerating or even creating linguistic differentiation” (\citeyear[993]{Gal1995}). They argue that this construction process is not only shaped by the speakers (“the immediate participants in a sociolinguistic field”, \citeyear[977]{Gal1995}), but also by scholars (“professional observers”, \citeyear[993]{Gal1995}) and criticize that \begin{quote} Although it is now a commonplace that social categories—including nations, ethnic groups, races, genders, classes—are in part constructed and reproduced through symbolic devices and everyday practices that create boundaries between them, this analysis is only rarely extended to language. Despite a generation of sociolinguistic work that has persistently provided evidence to the contrary, linguistic differentiation—the formation of languages and dialects—is still often regarded as an a social [sic] process. \citep[969]{Gal1995} \end{quote} As a potential reason for this, Gal \& Irvine identify nineteenth-century ideologies which equated one language with one culture – an equation which was then used as a basis to claim nationhood and territory. They argue that precisely because of the idea that language is independent of social activities it could be used for the identification of nations. As the unity of the nation was identified based on the unity of language, linguistic homogeneity became an important characteristic of nations, and ideologically-driven processes of erasure led to the eradication of internal variation to such an extent that people viewed language as free of variation and as a fixed system that should not change. Borders between languages and dialects are therefore socially constructed – a line of argumentation that is continued by Agha’s proposal that these construction processes must be paid attention to and complement (if not replace) traditional variationist analyses: \begin{quote} The terms dialect and sociolect describe forms of variation in the denotational system of a language community. […] Dialects may exist and be describable by linguists but groups speaking these dialects may be separated in various ways so that cross-dialect contact among persons does not occur and the existence of dialect differences is not even suspected by most (in principle, by any) speakers of the language. Dialect differences are relevant to social life only insofar as they are experienced through communicative events. How such relevance is construed society internally is an empirical question that will have different answers in different sociohistorical locales. \citep[132]{Agha2007} \end{quote} What people view as languages and dialects are registers (in Agha’s sense of the term) which are constructed through an interpretation of linguistic variation through reflexive models – \citet[135]{Agha2007} calls this “the reflexive construal of such ‘-lects’ \emph{as} registers”. Bucholtz \& Hall’s statement that identity can be indexed through linguistic structures and systems can therefore be interpreted by using the framework of enregisterment: Linguistic forms have the potential for being grouped with others and evaluated as a distinct speech variety (a dialect, a sociolect, a language) by speakers in a process which links these forms to aspects of the social identity of speakers using these forms. And it is in fact this potential of forms to become indexes of speech varieties that has been the focus of sociolinguistic studies of enregisterment to date. In the following paragraphs I outline and discuss the most important studies on enregisterment in order to demonstrate that although these studies provide numerous important insights, some theoretical and methodological clarifications and adjustments are necessary to make enregisterment a fruitful theoretical framework for the emergence of new varieties of English. The first sociolinguistic study using Silverstein’s orders of indexicality and Agha’s concept of enregisterment as a theoretical framework is \citegen{Johnstone2006} study on the enregisterment of \textit{Pittsburghese}. They relate their study to \citegen{Agha2003} case study on the historical enregisterment of Received Pronunciation by emphasizing that enregisterment can also be used to explain how varieties which do not carry overt prestige can nevertheless also become standardized through the development of vernacular norms (\citeyear[80]{Johnstone2006}). Furthermore, they take up Silverstein’s point that \citegen{Labov1972} trichotomy of indicators, markers and stereotypes can be captured by orders of indexicality, the latter being more abstract and providing the advantage of a more nuanced understanding of how relationships between linguistic forms are formed and stabilized (\citeyear[81]{Johnstone2006}). Their study is of great value because it links three fields of research: dialectology, sociolinguistics and linguistic anthropology. They use dialectological evidence to describe linguistic variants which are used in Pittsburgh or in the Pittsburgh metropolitan area, but which are at the same time not limited to this geographical area (the only variant with a small area of occurrence is monophthongal [aː] in the lexical set \textsc{mouth}), and can therefore not be used to identify a set of linguistic forms distinctive of this area. They use sociolinguistic interviews to determine an index score for the use of the monophthongal variant of \textsc{mouth} by five speakers to measure the extent to which they use this local variant. At the same time, the interviews are used to study their perception and their evaluation of this variant. They find that there are two groups of speakers with respect to usage: The two oldest speakers use the local monophthongal variant most of the time, while the middle-aged speakers and the youngest speaker use it hardly at all. In terms of attitudes and perception, they find that one of the older speakers is not aware of using the variant and does not recognize it as local, while the other speaker has learnt to recognize this variant as local and, while knowing about negative evaluations of the variant, regards it as rather neutral (he states that sounding like a working-class Pittsburgher is not “a big deal” (\citeyear[89]{Johnstone2006}). In the second group, the two middle-aged speakers regard the variant as local, incorrect and as signaling working-class membership. One of them, however, also indicates a potential of signaling solidarity by using the variant. The youngest speaker in the second group regards the monophthongal variant as an indicator of local speech and local identity, but in contrast to the other speakers, he does not associate any negative values with it. These findings, which are summarized in \tabref{tab:2:4}, are interpreted by drawing on the anthropological concepts of orders of indexicality and enregisterment. John K.’s almost invariable use of [aː] and his complete lack of recognition of the form in his own speech is interpreted as the variant being a first-order-index in his speech: He does not use it to express social identity but because he is from the region. According to Johnstone et al., this situation is typical of the time until the 1960s, when the localness of the variant [aː] was observable to outsiders, but not to speakers themselves. Dottie X. confirms this experience but recognizes the variant now as local and working-class. Since she evaluates it neutrally (it is not a problem for her to sound working class), she also does not use the variant to do social work and it is therefore also seen as a first-order index. Arlene C. on the other hand, evaluates the monophthongal variant negatively as incorrect and working-class and uses the diphthongal variant to the extent that even a pattern of hypercorrection can be observed (\citeyear[91]{Johnstone2006}). \begin{table} \begin{tabularx}{\textwidth}{p{2cm}>{\raggedright}p{1.7cm}Qp{2.1cm}@{}} \lsptoprule ~\newline Speaker & \mbox{Use of [aː]}\newline \mbox{in \textsc{mouth}} & ~\newline Perception and evaluation of [aː] & Order of\newline indexicality\\ \midrule Dr. John K.,\newline born 1928 & high & {\textbullet} No recognition of the variant in his own speech {\textbullet} Evaluation of the variant as not local, as working-class and as signaling a lack of education when asked to compare it to the diphthongal variant & First-order\\ \tablevspace Dottie X.,\newline born 1930 & high & {\textbullet} Recognition of the variant now, but not when she was younger {\textbullet} Neutral evaluation of the variant as local and working-class & \\ \tablevspace Arlene C.,\newline \mbox{born ca. 1940} & low & {\textbullet} Recognition of the variant, which is evaluated negatively as incorrect and working-class (and not as a marker of local identity) & Second-order\\ \tablevspace Barb E.,\newline born 1957 & low & {\textbullet} Recognition of the variant, which is evaluated negatively as incorrect and working-class, but also positively as a marker of social solidarity with fellow Pittsburghers & \\ \tablevspace Jessica H.,\newline born 1979 & low & {\textbullet} Recognition of the variant, which is evaluated positively as a marker of local Pittsburgh identity & Third-order\\ \lspbottomrule \end{tabularx} \caption{ Speakers’ use and perception and evaluation of [aː] in \citegen{Johnstone2006} study on the enregisterment of Pittsburghese and interpretation in terms of orders of indexicality } \label{tab:2:4} \end{table} In Johnstone et al.’s view, this marks the second-order-indexicality of the variant, as it is avoided by Arlene C. to avoid stigmatization. Barb E.’s use of [aː] is very low, but her evaluation of [aː] is interpreted as an indicator of stylistic variability in her own speech because she regards the use of the variant negatively in some contexts (with its potential to index incorrect and working-class speech), but also positively in other contexts (with its potential to index solidarity among fellow Pittsburghers). This potential to index localness in a positive way is foregrounded in the evaluation of [aː] by Jessica H. As older, negative evaluations of the forms are now in the process of being replaced by a new, positive one, the form is now interpreted as being a third-order index by \citet{Johnstone2006}. A further indication of its third-order status is that it is not stylistically variable in Jessica H.’s own speech and not used in her everyday interactions, but \textit{only} in explicit performances of identity, which she describes in the interview. This performance of local Pittsburgh identity by drawing on a specific set of linguistic forms, including but not limited to [aː], is supported or even made possible by a growing number of metadiscursive activities. \citet{Johnstone2006} analyze twenty newspaper articles about local speech and observe that they have appeared more regularly since the 1950s and 1960s and that they evaluate the supposedly regional forms they describe in a disparaging way (\citeyear[95]{Johnstone2006}). In the 1960s and early 1970s, the attitude shifted mostly due to experts like the University of Pittsburgh dialectologist Robert Parslow, who legitimized these forms and assigned them to a local dialect in several interviews. Other materials analyzed by \citet{Johnstone2006} also constitute evidence of an increasingly favorably evaluation of (supposedly) local forms which are listed in a folk dictionary titled \emph{Sam McCool’s New Pittsburghese: How to Speak Like a Pittsburgher} (\citeyear{McCool1982}, cited by \citealt[96]{Johnstone2006}) and which became commoditized by being written on T-shirts, mugs, postcards, shot glasses and similar products. It is in this context that younger speakers like Jessica H. now recognize forms like [aː] in \textsc{mouth} and associate them with Pittsburgh speech and use them in performances of local identity even though they do not use them in their everyday speech. The reason for summarizing this study in such detail is that it has been very influential – it is cited in every sociolinguistic work drawing on enregisterment – and has therefore shaped the understanding of theoretical issues, such as the delimitation of the different orders of indexicality and the relationship between these orders and enregisterment, as well as methodological approaches to the study of enregisterment within a broader sociolinguistic framework. There are three theoretical points made by \citet{Johnstone2006} that need closer attention and which I will discuss in detail below: They relate to the role of awareness in delimiting different orders of indexicality, the relationship between orders of indexicality and enregisterment and the relationship between production and recognition of language forms. Awareness (or consciousness) is an important factor for \citet{Johnstone2006} in delimiting the different orders of indexicality. While a correlation between a linguistic form and an extra-linguistic characteristic is not noticeable to speakers on the level of first-order-indexicality, it is crucial for second-order-indexicality that speakers start to notice the correlation and use it to do social work. When a linguistic form is explicitly discussed in metadiscursive activities and used consciously in performances of identity, it is a third-order index of a particular aspect of identity (here of \emph{local} identity). The difference here seems to be not only one of awareness but also of intentionality: Only when a linguistic form is intentionally and reflexively used to signal a social characteristic is it a third-order index. Their view is in line with Labov’s classification of indicators, markers and stereotypes, which also rests on differing levels of awareness, from not being aware of social correlations (indicators) to not necessarily being aware of them, but definitely reacting to them (markers), to being very aware and explicitly discussing them (stereotypes). It is not in line with \citet{Silverstein2003}, however, who explicitly argues against Labov’s views by stating that \begin{quote} Where the Labovian sociolinguistic marker differs from the mere indicator is the inherent interaction of whatever SEC-indexing rates of production of standard with what we might term \emph{register demand} (a species of tasks demands in the normal psychological sense, and having nothing inherently to do with “consciousness,” contra Labov’s speculation). \citep[218]{Silverstein2003} \end{quote} This shows that he also regards style shifts (i.e. intra-speaker variation in different situations) as evidence for the respective forms being second-order indexes because speakers react to the “demands” of an existing register by changing those forms which are linked to it. But the process of indicators becoming markers (or first-order indexes becoming second-order indexes) is independent of whether speakers do this consciously or not. This view on the role of awareness in style shifting can also be found in sociolinguistic theories belonging to the third wave. In a recent theoretical article, \citet{Eckert2016} argues against the traditional sociolinguistic emphasis on consciousness and awareness, which is also visible in distinctions like \textit{change from below} and \textit{change from above} as well as \textit{overt prestige} and \textit{covert prestige}. In her view, “consciousness and awareness are not simple matters, and agency does not equal or require awareness” (\citeyear[78]{Eckert2016}). Rather than being external to cognition, “the social is embedded in the unconscious to the same extent, in the same way, and along the same timeline, as the linguistic” (\citeyear[78]{Eckert2016}). To support her argument she cites experimental studies which show that speech perception is influenced by social information about the speakers \citep{DOnofrio2015} and nonlinguistic information like the presence of stuffed toy kangaroos and koalas or toy kiwis invoking associations to Australia and New Zealand respectively \citep{Hay2010}. Using eye-tracking, \citet{DOnofrio2015} shows that persona-based information affects early and automatic speech processing (which is not under conscious control by speakers) because it leads speakers to expect a particular vowel which they associate with the persona. Even though these studies focus on perception, they suggest that speakers’ sensitivity to social information is not necessarily conscious but can be automatic as well. Given these findings, it is doubtful whether consciousness or awareness are helpful constructs in delimiting different orders of indexicality. What is important instead is the metapragmatic engagement with language which is empirically observable evidence for \emph{n}+1st-order indexicality and therefore of enregisterment. If we can observe speakers’ reflexive activities in which they typify perceivable signs, we have evidence of the construction of a register which then influences speakers’ conscious or unconscious perceptions and productions. What makes \emph{n}+1st-order indexicality different from the (\emph{n}+1)+1st-order of indexicality is in Silverstein’s view not primarily a matter of consciousness but of presupposition: \begin{quote} Labovian sociolinguistic ‘stereotypes’, of course, are markers that have tilted in the direction of ideological transparency, the stuff of conscious, value-laden, imitational inhabitance – consciously speaking “like” some social type or personified image […]. The values of stereotypes are presupposed in the social-structure-as-indexed according to an ideological model, pure and simple; \textit{n}+1st-order indexicality has become presupposing, in other words, in effect replacing an older n-th-order indexical presupposition. \citep[220]{Silverstein2003} \end{quote} When the values indexed on the \emph{n}+1st-order become presupposing, they become open for reanalysis and reinterpretation. The existence of metadiscursive activity involving explicit comments on language use and the use of the features in stylized performances cannot be regarded as evidence for (\emph{n}+1)+1st-order of indexicality per se, but if it occurs repeatedly, it shows that the form-value links are transmitted and become more and more stable and therefore potentially presupposing. Using the example from \sectref{bkm:Ref512260235} again, it is possible that the adjective phrase \emph{beautifully complex with an assertive backbone} is explicitly commented on in a magazine article giving readers suggestions as to how they can describe wine at wine tastings. This contributes to the transmission and stabilization of the oinoglossia register but it is not an indicator of a (\emph{n}+1)+1st-order of indexicality as the values associated with the form are not subject to reinterpretation. However, if an article makes fun of the use of such phrases and suggests that time is better spent drinking wine instead of describing it, the wide recognition of the phrase as belonging to the oinoglossia register is the basis for its negative revalorization. This is an instance of (\emph{n}+1)+1st-order of indexicality. To sum up this discussion, it is not convincing that consciousness or awareness would be helpful concepts in separating the different orders of indexicality. Metapragmatic engagement with language, including (but not restricted to) all types of reflexive activities listed in \tabref{tab:2:2}, is best seen as evidence of second-order indexicality. Metapragmatic activities which build on second-order indexicality to reanalyze and reinterpret these indexical values are evidence of (\emph{n}+1)+1st-order of indexicality. The second issue is the relation between different orders of indexicality and enregisterment. \citegen{Johnstone2006} study seems to equate the enregisterment process with succeeding orders of indexicality, such that a form is not enregistered when it is a first-order index, somewhat enregistered when it is a second-order index, and fully enregistered when it is a third-order index. This conception is implicitly or explicitly found in most other works on enregisterment. \citet[137--138]{Beal2012} for example describes the process of enregisterment as follows: \begin{quote} Enregisterment comes about through the ‘indexing’ of linguistic features as associated with social characteristics of speakers. Applying a language-ideological approach first developed by \citet{Silverstein1976, Silverstein1998}, \citet{Milroy2000, Milroy2004} asserts that there are three orders of indexicality whereby linguistic forms are associated with social categories. The orders relate to ascending levels of awareness within and beyond the speech community: \begin{itemize} \item \emph{First-order indexicality}: the association of a particular linguistic form and some specific social category. At this stage the association may be noticed by, for example, linguists, but speakers themselves are unaware of it. \item \emph{Second-order indexicality}: speakers may rationalize and justify the link between the linguistic form and a particular social category. \item \emph{Third-order indexicality}: forms which have been linked with a certain social category become the subject of overt comment. \end{itemize} \end{quote} This characterization illustrates again the problematic distinction between the different orders of indexicality based on different levels of awareness and the close parallel to Labov’s indicators, markers and stereotypes. It is especially unclear why speakers need a lower level of awareness for rationalizing and justifying the link between the linguistic form and a particular social category than for overtly commenting on it. \citet[167]{Milroy2004} in fact states that “\emph{second-order indexicality} is a metapragmatic concept, describing the noticing, discussion, and rationalization of first-order indexicality”. Her definition therefore includes the \textit{discussion} of linguistic forms, which is by definition overt and explicit, on the level of \emph{second}{}-order indexicality, but also other forms of metapragmatic engagement without recourse to notions of awareness, while third-order indexicality is not defined or discussed in this article at all. In line with \citet{Silverstein2003}, this definition of second-order indexicality rather emphasizes the important role of language ideologies, which are defined by \citet[193]{Silverstein1979} as “sets of beliefs about language articulated by users as a rationalization or justification of perceived language structure or use” (cited by \citealt[166]{Milroy2004}), in the emergence of second-order indexicality. Even though she does not use the concept of enregisterment in her article either, it is telling that her main focus is on first- and second-order indexicality because, as shown in \sectref{bkm:Ref512260235}, this is where Silverstein locates enregisterment. Following his view, forms become already enregistered by becoming second-order indexes. In fact, the data presented and discussed by \citet{Johnstone2006} and \citet{Beal2009, Beal2012} can be interpreted in a different way. In the Pittsburgh case, the analysis shows that the linguistic forms have first been enregistered as non-standard, based on a linguistic ideology associating local forms as deviating from a national prestige variety. A form like [aː] therefore became a second-order index of primarily social characteristics and the resulting register is rather a more general register of \textit{non-standard speech}. The explicit metapragmatic judgments elicited through the interviews and the metadiscursive activities identified in newspaper articles are evidence for this enregisterment process. The results of the interviews (see \tabref{tab:2:4}) also show that the social domain of the register has expanded over time. While Dottie X. did not recognize the variant [aː] and did not associate it with local and incorrect speech when she was young, she recognized the variants and the values indexed by it at the time of the interview. John K. still does not recognize the variant as local. So the social domain of the register has expanded so that it includes Dottie X. but not John K. yet. The expansion of the social domain is a potential factor influencing the transformation of the register and the revalorization of the register’s forms. While localness has played a role before in that it marked the forms as deviating from a national standard, the social repercussions of this deviation were in the foreground and not the indexicality of place. But against the background of ideologies attributing a positive value to regional identity, the index of localness was foregrounded and positive social evaluations were added. They existed parallel to the negative ones or even disappeared for some people, especially young people like Jessica H., who regards it as positive to have a linguistic form which signals her regional identity. The register label \textit{Pittsburghese}, which first appeared in a newspaper article published in 1967 \citep[95]{Johnstone2006}, shows the transformation of the social range of values with its increasing focus on local identity and its positive evaluation. Pittsburghese is therefore a register which builds on a prior register whose form-value links ([aː] indexing local, non-standard, incorrect, uneducated speech) have become a social regularity and as such open for reanalysis in the given sociohistorical context marked by social and geographical mobility ([aː] indexing local identity). This change in social range and social domain is typical for enregisterment and emphasizes the processual character of the concept and its fluidity. In fact, in \citegen{Johnstone2009} article on the commodification and enregisterment of Pittsburghese, she interprets the results in this way by stating that the local forms are already enregistered by becoming second-order indexes. In her book on Pittsburghese published in \citeyear{Johnstone2013}, Johnstone even uses the term \textit{re-enregisterment} to label the transition from second-order to third-order indexicality. Even though, on principle, the prefix \emph{re}- is not necessary here, as the term enregisterment already encompasses fluidity and change (as does the dialectic relationship between \emph{n}th-order and \emph{n}+1st-order of indexicality described by \citealt{Silverstein2003}), it is useful because it adds emphasis to the transition process of one order of indexicality to the next. The second case is Beal’s analysis of enregisterment in a northern English dialect, which she presents in several articles (e.g. \citeyear{Beal2009, Beal2012, Beal2017}). It has a strong historical focus and shows how linguistic forms come to index localness and are evaluated positively. One example is the creation of “symbolic working heroes” in dialect literature “with the characters of the weaver in Lancashire and Yorkshire, the pitman and keelman in the northeast, embodying the symbolic virtues of the “gradely” or the “canny lad”” (\citeyear[136]{Beal2012}). While these are certainly instances of metadiscursive activity and therefore evidence of the enregisterment of northern dialects, I suggest considering them as third-order indexes not simply because they link form and values explicitly, but because they had already been enregistered as northern and non-standard before. \citet[137]{Beal2012} herself notes parallels between the historical development in Pittsburgh and in northern England: The increasing mobility led to contact between speakers of different dialects and the increasing exposure to different variants increased the potential for higher-order indexicality and enregisterment of forms as belonging to specific regional dialects as well. She describes the rising number of metadiscursive activities in northern England particularly in the second half of the nineteenth century, but in contrast to \citet{Johnstone2006}, she does not elaborate on prior negative evaluations of these local forms as non-standard, incorrect and uneducated. Her argument is that the enregisterment of northern dialects is a reaction against leveling processes, defined by \citet[98]{Trudgill1986} as the “reduction or attrition of \emph{marked} variants”. While Trudgill considers variants as marked which are of relatively low frequency, Beal also considers variants as marked which are socially stigmatized. As an example she cites Watt’s observation that some phonological forms of Tyneside speech are “stereotyped as parochial, unsophisticated, old-fashioned (etc.)” (\citeyear[55]{Watt2002}, cited in \citealt[127]{Beal2012}). Interestingly, this implies the presence of prior negative evaluations of variants which are indicative of second-order indexicality and therefore of enregisterment before the second-half of the nineteenth-century. In fact, in a more recent article \citet{Beal2017} argues that second-order indexical links between linguistic forms and the North of England can already be identified in the sixteenth century and that a \textit{new} order of indexicality started to emerge in the eighteenth century because “forms which have been indexed at the \textit{n}+1-order become associated with another ideological schema” (\citeyear[28]{Beal2017}). To sum up, this discussion shows that the traditional emphasis on the Labovian distinction between indicators, markers, and stereotypes and the accompanying different levels of awareness is not helpful to study enregisterment processes. For this reason, scholars most prominently engaged in promoting the use of enregisterment in sociolinguistics like Johnstone and Beal have changed their definitions and interpretations in the last decade at least to some extent to follow Silverstein and Agha more closely. In my study, I will do so even more by disregarding the concept of awareness completely and by viewing enregisterment as the discursive construction of a cultural model of action which rests on an emerging second-order indexicality, empirically observable through instances of reflexive activity, with an inherent potential for third-order indexicality. Instead of theorizing enregisterment as proceeding on a cline from first- to second- to third-order indexicality (perhaps even implying some sort of completeness at the last order), I emphasize the dialectic relationship between the orders of indexicality, which is the basis for the processual character of enregisterment. The third issue is the relationship between perception and production. It is related to the question of the role of consciousness and awareness and also to the relationship between orders of indexicality and enregisterment discussed above. Johnstone et al. seem to imply that it is the difference between using forms in “un-self-conscious speech” and using forms in self-conscious, stylized performances of identity which marks the difference between forms which are enregistered and forms which are not (\citeyear[97--99]{Johnstone2006}). They distinguish “the variable, second-order use of regional variants in everyday interaction” from “third-order performances of a person’s knowledge of the sociolinguistic stereotypes that constitute “Pittsburghese””, and they seem to suggest that only the latter case is tied to enregisterment. Their example case to illustrate this is the youngest speaker, Jessica H., who evaluates the variant positively as a marker of Pittsburgh speech, but her frequency of use is as low as for those speakers who evaluate the variant negatively (see \tabref{tab:2:4}). She has a middle-class background and little contact with people using local forms but an “explicit awareness of “Pittsburghese”” (\citeyear[97]{Johnstone2006}), which makes it possible for her to use the forms of the register Pittsburghese to index her identity when it is called for (she describes a situation where a group of college students from different places compare and perform their accents). While this case undoubtedly provides evidence of enregisterment because Jessica H.’s account of her own linguistic behavior is an account of reflexive activity, it is only one potential way of how speakers can position themselves socially through language use. In fact, considering Agha’s definition of enregisterment, it is not the production of forms which is crucial for defining registers but the perception of forms. A register is a register when its forms are recognized by a sociohistorical population – this is independent of whether speakers of the same population actually produce the forms. Nevertheless, Agha suggests analyzing the extent to which speakers are competent in the use of the register as well and therefore distinguishes two social domains: the domain of recognition and the domain of fluency (see \tabref{tab:2:3}). His analysis of the enregisterment of RP illustrates this necessity as the asymmetry between receptive and productive competence is very high. The point is that Agha’s suggestion to view registers as cultural models of action implies that speakers \emph{can} align with them when they use language, but that they do not have to. \citet{Spitzmuller2013} has developed a model of social positioning which extends a sociolinguistic model of stancetaking \citep{DuBois2007} to include what he calls metapragmatic stancetaking as well. \begin{figure} \includegraphics[width=.8\textwidth]{figures/Paulsen-img04.pdf} \caption{A model of social positioning through language (my own illustration based on the German and English versions in \citealt{Spitzmuller2013, Spitzmuller2015, Spitzmuller10062016})} \label{fig:2:4}\label{fig:key:4} \end{figure} This model integrates the two dimensions which are visible in \figref{fig:2:3}: The triangle on the left captures the dimension of actual language use in a communicative situation in which an actor uses and evaluates linguistic forms in a specific way. The triangle on the right, by contrast, captures the dimension of the register as a cultural model of action which links linguistic forms to abstract types of persons (social personae) and practices. So by using a linguistic form, a speaker not only aligns himself or herself with other actors, but also to types of persons and behavior which are indexed by the linguistic forms that he or she uses. The model emphasizes the relation between the concrete linguistic interaction and the abstract register as a social regularity – a relation which is not fixed but highly dynamic. At the same time, it also allows for a separation of the two dimensions to study their interaction in specific cases. The implications for the role of production and perception are that while language use shapes registers, actors can position themselves with respect to registers through evaluation as well as through production. \citet{Spitzmuller2013} draws on Bucholtz \& Hall’s (\citeyear{Bucholtz2006, Bucholtz2005}) proposition regarding the different ways in which actors construct identity in positioning their selves in relation to other actors and to abstract register models. The first way is a process of \textit{adequation} or \textit{distinction}. Adequation means that an attempt is made to create the impression of similarity to the other actors by downplaying differences and foregrounding similarities. Distinction is complementary to adequation. In this process, differences are foregrounded and similarities are downplayed to create the effect of differentiation to other actors. The second possibility is a process of \textit{authentication} or \textit{denaturalization}. The key concept here is authenticity, but Bucholtz \& Hall emphasize that authenticity as an inherent essence needs to be distinguished from the social process of authentication through which the realness and genuineness of an identity is verified discursively (\citeyear[601]{Bucholtz2005}). Denaturalization is the opposite process because it involves the deconstruction and subversion of claims to authenticity. Assumptions of naturalized and essentialized links between identity and social and other characteristics (e.g. biological characteristics like skin color) are called into question in this process. Actors can therefore establish a relation to other actors by claiming authenticity of their own language use or by denaturalizing it e.g. through parody. The third way is a process of \textit{authorization} or \textit{illegitimation} where identities are affirmed and possibly even imposed or, by contrast, dismissed or ignored. In this process, social institutions play a great role as they give power to ideologies which underlie both processes. All these processes show the relationality of identity, which requires the social positioning of actors in relation to other actors through linguistic and other means. It is for example possible that they evaluate linguistic forms that other speakers use as authentic for the group of people they recognize as indexed by the forms, but that they do not feel the need to align with this group and therefore do not use the features themselves. The decision as to whether to align with a group or not can also be a matter of situational context as in the case of Jessica H. She might feel the need to align with Pittsburgh speakers when establishing her identity in relation to other college students, but obviously not in the interview situation. Coming back to the issue of consciousness and awareness, it needs to be stressed again that \citet{Bucholtz2005} argue in the same way as \citet{Eckert2016} that the construction of identity and social positioning is not by definition a conscious process: “identity may be in part intentional, in part habitual and less than fully conscious, in part an outcome of interactional negotiation, in part a construct of others’ perceptions and representations, and in part an outcome of larger ideological processes and structures” (\citeyear[585]{Bucholtz2005}). To sum up, this discussion shows that it is fruitful to integrate enregisterment with current sociolinguistic theories and models which belong to the third wave rather than with first-wave ideas and concepts like Labov’s indicators, markers and stereotypes. Since the classic studies discussed above, several sociolinguistic studies have described enregisterment processes in varying contexts.\footnote{The following overview is restricted to studies of enregisterment processes in English-speaking contexts and communities, but it should be noted that there are studies of enregisterment in other contexts as well: \citet{Cole2010} and \citet{Goebel2007, Goebel2008, Goebel2012} study enregisterment processes in Indonesia, \citet{Babel2011} and \citet{Romero2012} in South America, \citet{Dong2010} in China, \citet{Eggert2017} in France, \citet{Elmentaler2017} in Germany, \citet{Frekko2009} and \citet{Peter2020} in Spain, \citet{Madsen2013} in Denmark, \citet{Managan2011} in Guadeloupe, \citet{Newell2009} in Côte d’Ivoire, \citet{Park2016} in Korea, \citet{Slotta2012} in Papua New Guinea, \citet{Wilce2008} in Bangladesh.} In studies on enregisterment in English-speaking contexts, most attention has been given to the role of region and regional dialects in this process. Analogous to \citegen{Johnstone2006} study on the enregisterment of Pittsburghese, the enregisterment of several other regional dialects was investigated, with an almost exclusive focus on England and the United States: in England, Sheffieldish and Geordie \citep{Beal2009}, the Black Country dialect in the West Midlands \citep{Clark2013}, the Yorkshire dialect \citep{Cooper2013} and, more specifically, the Barnsley dialect \citep{Cooper2019} and other “distinct sub-‘Yorkshire’ repertoires” \citep[128]{Cooper2020} as well as the Lancashire dialect \citep{RuanoGarcia2020}; in the United States, Copper Country English or Yooperese in Michigan and Wisconsin \citep{Remlinger2009,Remlinger2009b,Remlinger2017}, the Cleveland accent or the northern accent in Ohio \citep{CampbellKibler2012,CampbellKibler2015} as well as Southern speech \citep{Cramer2013}. While social factors necessarily play a role in all the enregisterment processes described in these studies, the labels given to the registers clearly underline the prime importance of positive values associated with the regional culture and language. An interesting case is the “northern accent” in the United States because it demonstrates that a perceived \textit{lack} of social meaning is actually a social meaning in itself and therefore relevant for enregisterment processes. \citet{CampbellKibler2015} explicitly argue against \citegen{Johnstone2006} view that forms like \emph{yinz} and monophthongal [aː] lacked social meaning before they were noticed by speakers by suggesting that “they likely existed within a larger register, with meanings such as normative, unremarkable, or (in the right contexts) American, non-Southern, and native English” \citep[98]{CampbellKibler2015}. In their view, the prevailing language ideology in the United States leads to a positive evaluation of unmarked speech. Consequently, \begin{quote} the alternative to a model in which \emph{yinz} indexes Pittsburgh or raised \textsc{trap}/ \textsc{bath} indexes Cleveland is not one in which they index nothing. Rather, it is one in which they both, together with \emph{you}, lower \textsc{trap}/ \textsc{bath}, high front tokens of \textsc{fleece}, the lexical item \emph{cat}, and so on, index normative, unremarkable American English. \citep[98]{CampbellKibler2015} \end{quote} This observation is particularly relevant in that it shows enregisterment processes of regional varieties to be embedded in and linked to other enregisterment processes. In a country where a standard language ideology prevails and interacts with the ideology of nationalism, the enregisterment of that standard, unmarked speech proceeds in relation to the enregisterment of non-standard speech, which is marked as regionally or socially restricted and not adequate to be associated with the speech of the nation as a whole. It is essentially this assumption that the present study builds on: Investigating historical enregisterment processes of American English requires not only an analysis of which forms are associated with American speech, but also, and perhaps even more importantly, which forms are excluded from an American register. I will argue that the construction of what is American proceeds against an existing British English register which leads to forms marked as American and not British. At the same time, however, it also proceeds through an internal differentiation process constructing some variants as regionally or socially marked and, consequently, the alternative variants as unmarked forms which are fit to represent the speech of the nation. The attention paid to region in studies on enregisterment is actually a bit surprising given that that \citegen{Agha2003} seminal study on the enregisterment of \textit{Received Pronunciation} (RP) in England focuses mainly on the establishment, transmission and transformation of \emph{social} values associated with linguistic forms, which led to RP becoming “a status emblem in British society” \citep[231]{Agha2003} and not an emblem of regional belonging. For English, there are only two studies investigating the emergence of a register that is constructed primarily through the creation of indexical links between linguistic variants and \textit{social} characteristics of the speakers: \citet{SchintuMartinez2016} and \citet{But2017} describe the enregisterment of \textit{cant} in eighteenth-century England, highlighting the association of linguistic forms and low speaker status, involvement in criminal activities and negative character traits like maliciousness. A third study, \citegen{Pratt2017} analysis of parodic perfomances of Californian characters, covers a middle ground. The author’s main interest is in the social nature of enregisterment processes as they describe the creation of social stereotypes (in this case the Californian Valley Girl and the Surfer Dude) and how they are linked to a repertoire of phonetic forms. They argue, however, that this creation process is a part of a broader enregisterment process of Californian English, thus showing that social aspects and region are intertwined. The social complexity of the process is also underlined by \citet{Eberhardt2012}, whose study of the enregisterment of Pittsburghese in the African American community demonstrates the importance of \citegen{Agha2007} dimension of social domain: In contrast to the white community in Pittsburgh, local African Americans recognize not only a limited set of linguistic forms as indexical of Pittsburgh speech, but they also recognize different social values: whiteness and, connected to that, negative associations with oppression and racism \citep[367]{Eberhardt2012}. It should be noted that most of the studies above also include a historical dimension of some kind, doing justice to the processual character of enregisterment. Most notable with respect to this dimension is \citegen{Cooper2013} study because of the methodological framework he developed for studying enregisterment in historical contexts. His use of quantitative and qualitative analyses of the representation of dialect forms in historical texts and their discussion in metapragmatic discourse serves as an inspiration for the methodology of the present study that has a historical focus as well. Furthermore, he describes actual changes in the register’s repertoire of forms over time, using the term \textit{deregisterment} (a term that was first introduced by \citealt{Williams2012}). In general, most studies on enregisterment that have been conducted to date investigate these processes in England and the United States. In that regard, the present study is not different, as it also aims at tracing enregisterment processes in the United States in the nineteenth-century. However, the focus is not on a particular region in the United States, but on its emergence as a new variety of English, thereby relating theories modeling this process to enregisterment. This has not been done before, even in the few studies that look at enregisterment processes in the context of World Englishes: Moll’s (\citeyear{Moll2017, Moll2014}) analysis of the enregisterment of what she calls “Cyber-Jamaican”, a very specific “digital ethnolinguistic repertoire” \citep[216]{Moll2014}, \citegen{Henry2010} investigation of the enregisterment of Chinglish in China and \citegen{Hodson2017b} analysis of the enregisterment of American English in British novels published between 1800-1836. For the present study, Hodson’s analysis is nevertheless highly relevant. Although it is limited in scope to a rather short time period and only six novels, it nevertheless provides important insights into the enregisterment of American English as a new variety of English in a British context, so \emph{outside} of the United States, in a social domain different from this study, but nevertheless connected to it in several ways. Especially the key value of inferiority/superiority, embodied prominently through the figure of the “vulgar American”, plays a crucial role in enregisterment processes in an American context as well, as the results of the present study will show. Despite the very limited number of studies on enregisterment of new varieties of English, the importance of one of its essential concepts is recognized: In the recently published \emph{Cambridge Handbook of World Englishes}, \citet{Schleef2020} makes a strong case for using indexicality to study identity constructions and the role of these constructions in the development of New Englishes. An insightful application of indexicality is provided by \citet{Leimgruber2012, Leimgruber2013} who uses it to explain the particular mix of linguistic features employed by Singapore English speakers (instead of one of the varieties which are assumed to exist in Singapore, i.e. Singlish, Hokkien or Standard English). The growing interest in enregisterment is accompanied by even more sociolinguistic studies investigating aspects that are highly relevant for studying enregisterment processes. Three of those deserve particular attention: commodification, authenticity and communication technology. First of all, commodification is concerned with economic motivations for representations of language, thereby highlighting the possibility that language becomes commodified, i.e. “organized and conceptualized in terms of commodity production, distribution, and consumption” (\citealt[207]{Fairclough1992}, cited in \citealt[161]{Johnstone2009}). This is the case when linguistic forms are printed on T-shirts or mugs (for concrete examples see \citealt{Beal2009,Cooper2013,Johnstone2009,Remlinger2009}) or when dialect representations are used to increase the popularity of novels (see Picone’s \citeyear{Picone2014} analysis of literary dialect in nineteenth-century local color novels depicting the American South). This is in part a consequence of enregisterment, as the producers of these goods will only use forms which they expect to be recognized and whose indexical values they judge as attractive, but commodification is also part of the enregisterment process as it increases the domain of recognition, stabilizes the repertoire and can also contribute to changes in evaluation. When linguistic forms are on display as part of economic goods, people who might not have recognized them before because they have a different socio-economic or regional background can learn about them and are therefore enabled to position themselves socially in relation to the forms and the values linked to them. This shows the importance of analyzing economic interests and their effects on enregisterment processes, which will also be a relevant factor in the present study with its focus on newspapers. The second aspect, authenticity, is central to enregisterment processes as well. The traditional variationist view on authentic speech is that it is the kind of speech which is the least self-conscious and the least influenced by standard norms in the speech community (see \citealt{Eckert2014} for an overview). In this framework, speech in performance, defined as “verbal art” by \citet{Bauman1990}, cannot be authentic because by consciously adapting speech to foreground the poetic function of language a high amount of attention is given to speech. Third wave sociolinguistics have challenged this view of authenticity, however, by arguing that authenticity is not an inherent quality but a claim that speakers make \citep{Eckert2014}. These claims involve attributes that speakers need to possess and based on which speakers can construct themselves or others as authentic or inauthentic. In this framework, speech in performance can be constructed as authentic even though the performing speakers do not use the same speech forms in other natural contexts. \citegen{Beal2009b} analysis of the British Indie Band Arctic Monkeys is a good example of this. The speech of the lead singer during performances of their songs is claimed to be authentic even though he is known to speak differently in contexts which are not part of the performance (interviews, other parts of radio broadcast). Linking her study to Spitzmüller’s model, it becomes clear how the Arctic Monkeys position themselves not just in relation to one register but to several registers in developing their unique singing style. It is crucial that, in this view, authenticity is not equated with non-reflexive language use, but it is the reflexive nature of language use which is essential in constructing authenticity – by engaging in a particular set of cultural practices and using a particular set of linguistic forms a speaker can make claims about being authentic. This view of authenticity therefore highlights the role of performance as “a highly reflexive mode of communication” which “puts the act of speaking on display – objectifies it, lifts it to a degree from its interactional setting and opens it to scrutiny by an audience” \citep[73]{Bauman1990}. As reflexive activities constitute evidence for enregisterment processes, performances are therefore a highly relevant source of data (see \citealt{Johnstone2011} for a detailed analysis of such a performance). Performances also allow for the creation and transmission of embodied stereotypes or what Agha calls \textit{characterological figures}. Such a figure is defined as “any image of personhood that is performable through semiotic display or enactment (such as an utterance). Once performed, the figure is potentially detachable from its current animator in subsequent moments of construal and re-circulation” \citep[177]{Agha2007}. Characterological figures render social personae visible in performance and therefore have an important consequence for the interaction between register models and actual language use – they “motivate patterns of role alignment in interaction” \citep[177]{Agha2007}.\footnote{It is important to point out here that the appearance of characterological figures is of course not restricted to performances – they can also be found in static images like cartoons, which is obviously important for a historical study for which recordings of performances are not available (see e.g. Clark's \citeyear{Clark2020} detailed study of the role of cartoons in enregisterment processes in the West Midlands).} It is convincing that language users are more likely to align their speech with register models linked to characterological figures than to those models consisting of indexical links between speech and only abstract social values like ‘correct’ and ‘proper’. Several studies have already identified such figures: the Lanky (Lancashire), the Tyke (Yorkshire) and the Geordie (northeast England) (see \citealt{Beal2017}), the Yooper in Michigan \citep{Remlinger2009}, the Yinzer in Pittsburgh \citep{Johnstone2017} and the Valley Girl and the Surfer Dude in California \citep{Pratt2017}.\footnote{The Yooper is an excellent example of such a characterological figure. It is described by Remlinger as “stereotypically male: a backwoods, independent do-it-yourselfer who hunts and fishes, rides a snowmobile, drinks beer, spends time at deer camp, and is suspicious of outsiders” (\citeyear[119]{Remlinger2009}). It is noticeable that this description encompasses mostly activities and a general stance towards life; the appearance of this figure seems to be less important. This is supported by a bumper sticker described by Remlinger which reads “Yooper it’s not just a word, it’s a lifestyle” (\citeyear[119]{Remlinger2009}). Remlinger argues for the importance of such a figure in enregisterment by stating that the Yooper and the way of speaking attributed to this figure “reinforces the notion that a distinct and unified dialect exists in the Copper Country, despite the variability of English throughout the area and despite the widespread use of many of these features throughout the Upper Midwest and places as distant as Alaska” (\citeyear[119]{Remlinger2009}).} However, it needs to be noted that enregisterment can also proceed without recourse to a characterological figure embodying imagined speakers of that variety. \citet{CampbellKibler2015} find that when northern Ohioans were asked to describe the northern accent, they offered only diffuse descriptions and did not mention any characterological figures. This can be explained by their argument that “lack of accent [is] a valuable social meaning in and of itself” (\citeyear[115]{CampbellKibler2015}) – even though enregisterment proceeds through the indexical link between linguistic forms and the values of ‘accent-free’, ‘unmarked’ and ‘normal’ speech, these values do not invite an association with a particular figure because such a restriction to a particular speaker would stand in contrast to the generality and unmarkedness of the variants in question. However, characterological figures are still important because they are linked to those varieties which \emph{are} evaluated as accented. \citet[301]{CampbellKibler2012} cites \citet{Preston1997} here, who found that widely circulating figures of the hillbilly and the gangbanger are associated with a “Southern accent” and “Ebonics”, two varieties which she regards as “highly enregistered in the U.S. context” even though a systematic investigation of the enregisterment of these varieties has not been conducted so far. All in all, it needs to be noted that performances and characterological figures are crucial for the development of registers as cultural models of action and their links to language use by inviting speakers’ role alignment. Every one of these reflexive activities is then part of a larger process of enregisterment and because claims to authenticity are processes (referred to as \textit{authentication} by \citealt{Bucholtz2005}) which are not global but usually selective (not all practices and all forms are used to claim authenticity), they possess the potential for change. Speakers not only select those attributes and qualities which are important to them, but they also bring in new ones which have the potential of being interpreted by others as authentic as well and become enregistered in the process. This leads \citet[44]{Eckert2014} to conclude that “[t]o the extent that a linguistic variable is deployed in an authenticity claim, the process of adequation will contribute to its ever-changing indexical field”. Finding an answer to the question which qualities and linguistic forms marked an American as authentic in the nineteenth century will consequently be an important part of the present study. The last aspect to be discussed in this section is the role of communication technology in enregisterment. \citet{Johnstone2011b} provides an important study, again in the context of Pittsburghese, which shows that it is important to take into account the medium in which instances of reflexive activity appear. Even though it does not necessarily \emph{determine} who gets access and what kind of content is selected, it nevertheless has an influence on it. At the same time, it has to be considered to what extent the media are controlled by individuals, families, companies or institutions. Last but not least, the study indirectly demonstrates that it is not primarily the medium that shapes people’s perception and evaluation of the content, but that other factors play a more important role. Johnstone emphasizes this point in her discussion of the website \textit{Pittsburghese} (http://www.pittsburghese.com) by pointing out that even though the website was intended to be entertaining rather than informative, it turned out that at least some people were willing to take the information on the site to be technical expertise (\citeyear[10]{Johnstone2011b}). Johnstone cites one case of a person who searched for information about Pittsburgh speech and preferred this website over a website provided by linguists (Johnstone herself and Scott F. Kiesling) because he considered the latter to be hard to understand at times and not entertaining enough (\citeyear[6]{Johnstone2011b}). In the context of my study, this has important implications: Even though I analyze only one medium, printed newspaper articles, it is to be expected that other factors, especially the text type (whether the article is for example a news story, a humorous anecdote or an advertisement), have an influence on what people expect from the content, how they interpret it and how their beliefs and views are shaped by it. Overall, three important conclusions can be drawn from the discussion presented in this section: First of all, sociolinguistics has benefited from the concepts of indexicality and enregisterment, but at the same time it has also contributed greatly to their development. Indexicality proves to be a useful concept in third wave sociolinguistic studies because it helps sociolinguists to theorize and analyze how social meaning is created through stylistic practice in specific instances of language use, and enregisterment can be used to explain how many instances of stylistic practice lead to enregistered styles and registers. At the same time, the development of the concept of the indexical field to identify and describe indexical meanings has advanced the study of indexicality to a great extent, and several studies have underlined the importance of indexicality in the construction and negotiation of social meaning and identity. All the studies which have used the framework of enregisterment illustrate the usefulness of the concept and provide empirical support for and a theoretical elaboration of important factors in enregisterment processes: commodification, performance, characterological figures, authenticity and communication technology. Taken together, the studies show that various research objectives, research methodologies and kinds of data can be integrated when studying enregisterment. Secondly, it has become clear that especially the integration of enregisterment into sociolinguistic theory can still be advanced further. The distinction between a register as a model of action located on the discursive level and an enregistered style located on the level of actual language use is helpful for differentiating the concepts of \textit{style} and \textit{register} (which are still often used synonymously) and for theorizing and analyzing the process of social positioning modeled by \citet{Spitzmuller2013}. According to this model, speakers’ stylistic choices in communicative situations are not only influenced by the other actors present in the situation but by registers as well. At the same time, these stylistic choices and therefore instances of language use in particular situations also shape registers. Furthermore, newer sociolinguistic findings suggest that while agency is important in this process, the heavy reliance on levels of awareness, which goes back to Labov and which is an attribute of first wave studies, is not fruitful in analyzing orders of indexicality and stages in the enregisterment process, not least because it is difficult to operationalize. It is, however, not only necessary to integrate enregisterment further into sociolinguistics, but enregisterment also provides an excellent chance for a further integration of sociolinguistics, perceptual dialectology and the field of research labeled discourse linguistics (e.g. by \citealt{Warnke2008b} and \citealt{Spitzmuller2011}). Particularly the model for a discourse-linguistic multi-layer analysis (DIMLAN)\footnote{In the German original, the model is abbreviated as DIMEAN (\emph{diskurslinguistische Mehr-Ebenen-Analyse}). It was first described in \citet{Warnke2008b} and presented in more detail and with minor modifications in \citet{Spitzmuller2011}. An English version was presented by Spitzmüller in Gothenburg in \citeyear{Spitzmuller2014}.} developed by the just mentioned authors and outlined in \sectref{bkm:Ref506891065} is an excellent way of systematizing different ways of accessing and analyzing enregisterment and of providing guidance and orientation. Consequently, I will develop and justify my research questions and methodology with reference to this model (see Chapter \ref{bkm:Ref513018040}). The third conclusion concerns the relevance of enregisterment for the study of the emergence of new varieties of English. In \sectref{bkm:Ref521576818}, I have outlined the debate on the role of social factors, particularly of identity, in the emergence of new varieties, and the results of the sociolinguistic studies discussed in this section strongly suggest that these factors play an important role not only in everyday interaction but also in large-scale linguistic change and differentiation processes. In her recent theoretical article, \citet[68]{Eckert2016} argues that “social meaning in variation is an integral part of language and that macrosocial patterns of variation are at once the product of, and a constraint on, a complex system of meaning”, leading her to conclude that while the role of identity in language change still requires further research, it is unlikely that it does not play a role at all. Consequently, she points out that it is crucial to investigate the construction of social meaning(s) at the micro-level and hypothesizes that “[a]ccommodation in colonial situations may have more to do with emerging local social types or stances in the colonial situation than with some abstract colonial identification” \citep[82]{Eckert2016}. I would add that rather than foregrounding one or the other it seems fruitful to take a closer look at how exactly local social types and stances might actually be connected to a more abstract colonial identification. The following section will shed light on the intersection between enregisterment and a branch of linguistics which has already surfaced in this section because it also has a considerable overlap with sociolinguistics: perceptual dialectology. \subsection{Perceiving varieties: enregisterment and perceptual dialectology} \label{bkm:Ref10466352}\hypertarget{Toc63021213}{} One of the leading scholars in the field of perceptual dialectology, Dennis Preston, makes an important point in a recent handbook article, namely that the term \textit{perception} refers to two different things: On the one hand, it refers to ideas that people have about linguistic facts around them and on the other hand, it refers to perceptual abilities, that is the abilities to detect even subtle differences in speech and to identify and differentiate varieties based on these perceptions. Perceptual dialectology is interested in both kinds of perception \citep[199]{Preston2018}. He notes that while earlier studies focused more on the ideas in people’s mind, later studies were increasingly interested in finding out how the acoustic reality is perceived by speakers and how these perceptions relate to production and to people’s ideas. This development was accompanied by a shift from considering more global properties of speech to analyzing the perception of fine linguistic details, by testing for example whether people could perceive slight differences in the degree of diphthongization of a vowel and match these differences to different places. A general finding is that the relationship between ideas, psychological perception and production is complex. A second important point that Preston makes relates to the relevance of social factors in perceptual dialectology. As suggested by the term \textit{dialectology}, the field is concerned with the identification of dialects which have traditionally been understood mainly in regional terms. In contrast to traditional dialectology, which bases the identification of dialects on speakers’ linguistic productions, the aim of the early studies was to add speakers’ perceptions to the picture and consequently, to identify perceptual dialect areas, either based on respondents’ ratings of degrees of difference of their own dialect to other dialects or based on maps drawn by the respondents themselves (see \citealt{Preston2018} for details). Despite this early focus on regional dialects, social considerations have also played an important role in perceptual dialectology. In the beginning, perceptual dialectologists mainly studied attitudes towards the perceived dialects and evaluative judgements were elicited through a variety of methods, for example by asking respondents to label and provide comments on the maps they have drawn or by asking them to rate perceived dialect regions or samples of actual speech either based on descriptors provided by the linguist (often involving the two important dimensions of pleasantness and correctness) or based on descriptors provided by the speakers themselves (see \citealt{Preston2018} and \citealt{Cramer2016} for details). More recently, elaborate experimental methods have been developed to test the interaction between attitudes and perception, and studies have revealed that speakers’ ideas about and attitudes towards speech can lead to a mismatch between the actual acoustic reality and the perception of this reality. I have already cited some of these studies in \sectref{bkm:Ref506884048}, but I add two older studies here summarized by \citet[197--199]{Preston2018}. The first one was conducted by \citet{Niedzielski1999}, who found that people from Michigan listening to the recorded speech sample of a fellow Michigan speaker did not recognize that this speaker used a higher realization of the vowel /æ/, which is characteristic of the so-called Northern Cities Shift, but identified a lower and more central variant as the form actually produced by the speaker. In her view, the idea that Michigan speakers use \textit{standard} American English and not regional forms influenced the informants’ perception of actual data. Other studies showed that speech perception can also be very accurate and sensitive to fine details. The second study, by \citet{Plichta2005}, shows that American respondents could distinguish different degrees of diphthongization of /ɑɪ/ and also place the slightly varying vowel realizations on a north-south accent continuum. This demonstrates that they not only recognized the monophthongal [ɑː] as a stereotypical southern variant, but that they could also accurately perceive the variants. Nevertheless, the authors could also show an influence of social factors on speakers’ perceptions because when they separated the results by sex of speaker, it became clear that even though the degree of monopthongization was the same (through resynthesis), women’s vowel realizations were rated as more northern and men’s realizations as more southern. As there was no acoustic basis for these differences in rating, the authors concluded that ideas about women’s speech being more correct than men’s speech and northern speech being more standard than southern speech influenced the informants’ perceptions. Although this overview of the field of perceptual dialectology is not very detailed, it nevertheless illustrates an important point, namely that studies conducted in this field share many theoretical assumptions and aims with studies using enregisterment as a theoretical framework. First and foremost, they assume that nonlinguists’ ideas about and perceptions of language are worth studying and by focusing on lay people’s point of view, both frameworks can be considered part of \textit{folk linguistics}. Secondly, studies on perceptual dialect areas as well as on enregisterment focus on the perception or recognition of linguistic forms, either as part of a perceived regional dialect or register. And thirdly, they share an interest in the evaluation of (sets of) linguistic forms, which is an indispensable part of enregisterment studies because the recognition of difference is assumed to emerge through a differential evaluation of linguistic forms, but which is also central to most studies in perceptual dialectology because of the insight that attitudinal factors can trigger and influence speakers’ perceptions. One difference between perceptual dialectology and enregisterment is their slightly different focus, the first paying more attention to regional dialects and the second to social models of action, which is also a result of their evolution from different fields of study (dialectology on the one hand and linguistic anthropology on the other hand). Perceptual dialectologists rather emphasize the relevance of their results for explaining language variation and change, while scholars using enregisterment as a theoretical framework stress that their studies are important for explaining how people are connected in social relationships through language. Nevertheless, as shown in \sectref{bkm:Ref506884048}, many studies on enregisterment also focus on the evaluation of specific linguistic forms as indexical of region. Another major difference concerns the methodological approach. For the most part, perceptual dialectologists analyze data which were obtained through their own intervention, while enregisterment studies are more often than not based on data which are not the result of intervention by the researcher. This means that with regard to Agha’s overview of different kinds of typifications of language use (see \tabref{tab:2:2}), perceptual dialectology can provide the best insights into the typifications in category 2, while enregisterment studies contribute most by analyzing typifications in category 1 and 3. This is accompanied by a much heavier reliance on quantitative methods in perceptual dialectology and qualitative methods in studies on enregisterment. However, as shown in \sectref{bkm:Ref506884048}, some enregisterment studies combine quantitative and qualitative methods and \citet[194]{Preston2018} explicitly discusses discourse as a source of evidence for perceptual dialectology, and he illustrates qualitative discourse-analytic methods by extracting presuppositions underlying a short exchange between two friends. In general, it follows from this discussion that perceptual dialectology and enregisterment are two frameworks which are highly compatible and have a lot of potential to complement each other. In the conclusion of his article, \citet[200]{Preston2018} notes that \begin{quote} Respondents delineate areas as distinct or different on the basis of their likes and dislikes with respect to speakers and the stereotypes that respondents hold of them, giving concrete expression to Silverstein’s notion of higher-order \emph{indexicality}, in which the attributes of people (slow, smart, fun-loving, etc.) are assigned to their language variety and, in fact, become intrinsic parts of that variety’s description. \end{quote} Indexicality and enregisterment can therefore be used by perceptual dialectologists to explain the processes that lead to the recognition of perceptual dialect areas and the specific evaluations of speech forms connected to these regions, while perceptual dialectology can add a cognitive perspective to enregisterment studies and, together with sociolinguistics, provide the link between abstract register models and actual linguistic production, as modeled by \citet{Spitzmuller10062016, Spitzmuller2013}. It is therefore not surprising that some studies on enregisterment in an American context have been conducted by researchers with a background in perceptual dialectology (see the articles by \citealt{CampbellKibler2012,CampbellKibler2015,Remlinger2009,Remlinger2009b} discussed in \sectref{bkm:Ref506884048}), but there is definitely more potential for a fruitful integration of these frameworks. To conclude, perceptual dialectology contributes in important ways to the definition of a variety that relies not on patterns of production, but of perception, including cognitive as well as ideational aspects, and can be integrated with enregisterment, which offers a primarily social and cultural perspective. Before describing a model which brings together different perspectives on the term variety in \sectref{bkm:Ref523897668}, I address the relationship between enregisterment and discourse linguistics in \sectref{bkm:Ref506891065}. As the time frame that this study focuses on is the nineteenth century, the majority of methods used by perceptual dialectologists cannot be used because there are no informants available from whom data could be elicited. Discourse linguistics, however, provides a theoretical framework for integrating a vast amount of methods to study typifications of language in historical texts and other material and is therefore an indispensable resource to give systematicity and coherence to a study of (particularly historical) enregisterment. \subsection{Constructing varieties: enregisterment and discourse linguistics} \label{bkm:Ref506891065}\hypertarget{Toc63021214}{} The concept \textit{discourse} is used in many different fields of research and it is consequently defined, interpreted and analyzed in a variety of ways. \citet{Angermuller2014} provides a comprehensive overview of research on discourse and divides the approaches into two inseparable fields: discourse theory and discourse analysis (visualized in \figref{fig:2:5}). \begin{figure} \includegraphics[width=0.8\textwidth]{figures/Paulsen-img05.pdf} \caption{ Discourse research as discourse analysis and discourse theory (my own illustration based on \citealt[26]{Angermuller2014}) } \label{fig:2:5} \end{figure} The former comprises approaches which are primarily concerned with the connection between language and knowledge, power and subjectivity and which contribute to the development of theories in the social and political sciences as well as cultural studies. The latter focuses on methodology and comprises works which are concerned with empirical approaches to studying discourse as linguistic practice in social contexts. This separation between discourse theory and analysis is reflected in different (yet crucially connected) conceptions of discourse which are described by \citet{Johnstone2018}. The first one is discourse as “actual instances of communicative action in the medium of language” (\citeyear[2]{Johnstone2018}) and it is these instances (or to be more precise, \emph{records} of these instances) which are studied by discourse analysts. The second conception regards discourses as enumerable entities, as “conventional ways of talking that both create and are created by conventional ways of thinking. These linked ways of talking and thinking constitute ideologies (sets of interrelated ideas) and serve to circulate power in society” (\citeyear[2--3]{Johnstone2018}). An example of an approach which combines the two perspectives is \citet{Blommaert2005}, whose central goal is to explain what he calls “language-in-society” (\citeyear[16]{Blommaert2005}), which manifests itself in the shape of discourse which he defines as \begin{quote} all forms of meaningful semiotic human activity seen in connection with social, cultural, and historical patterns and developments of use. Discourse is one of the possible names we can give to it, and I follow Michel Foucault in doing so. What is traditionally understood by language is but one manifestation of it; all kinds of semiotic ‘flagging’ performed by means of objects, attributes, or activities can and should also be included for they usually constitute the ‘action’ part of language-in-action. (\citeyear[3]{Blommaert2005})\footnote{This definition of discourse even goes beyond a purely linguistic conception of discourse by including other kinds of semiotic activity – even though this view is not generally shared, the importance of including other ways of meaning-making in the analysis is also pointed out by others. \citet[2]{Johnstone2018} for example notes that “discourse analysts often need to think about the connections between language and other such modes of semiosis, or meaning-making”.} \end{quote} This analysis of language-in-society should be “critical” in that it “needs to focus on power effects, and in particular on how inequality is produced in, through, and around discourse” (\citeyear[233]{Blommaert2005}). In contrast to earlier approaches belonging to the school of Critical Discourse Analysis (CDA), he argues for putting more emphasis on the study of “context”, which should not only include “linguistic and textual explicit forms” but also “modes of production and circulation of discourse” (\citeyear[233]{Blommaert2005}). Indexicality plays a crucial role in his approach to discourse analysis because indexical meanings connect linguistic signs and contexts and ultimately link language to larger social and cultural patterns. \citet{Blommaert2005} draws on Silverstein’s orders of indexicality to emphasize that indexical meanings are not randomly created but “systematically reproduced, stratified meanings often called ‘norms’ or ‘rules’ of language, and always typically associated with a particular shape of language” (\citeyear[73]{Blommaert2005}). Although Blommaert does not draw on the concepts of \textit{register} or \textit{enregisterment}, it is not difficult to see how it can be integrated in his approach: The “particular shape of language” associated with indexical meanings is what Agha calls a register. Furthermore, two of the fundamental theoretical principles of discourse analysis developed by Blommaert can be extended by using Agha’s concepts of register and enregistered style. \begin{quote} [Principle 3] Our unit of analysis is not an abstract ‘language’ but \emph{the actual and densely contextualised forms in which language occurs in society}. We need to focus on varieties in language, for such variation is at the core of what makes language and meaning social. Whenever the term ‘language’ is used in this book, it will be used in this sociolinguistic sense. One uneasy by-effect of this sociolinguistic use is that we shall often be at pains to find a name for the particular forms of co-occurrence of language. The comfort offered by words such as ‘English’, ‘Zulu’, or ‘Japanese’ is something we shall have to miss. We shall have to address rather complex, equivocal, messy forms of language. [Principle 4] Language users have \emph{repertoires}, containing different sets of varieties, and these repertoires are the material with which they can engage in communication; they will determine what people can do with language. […] [W]hat people actually produce as discourse will be conditioned by their sociolinguistic background”. (\citeyear[15]{Blommaert2005}) \end{quote} The notions \textit{repertoire} and \textit{variety} could be described and analyzed in a better way by using the terms \textit{style}, \textit{enregistered style} and \textit{register} as discussed in \sectref{bkm:Ref512260235} and \sectref{bkm:Ref506884048} because they reflect precisely Blommaert’s first principle, namely that “[i]n analysing language-in-society, the focus should be on what language means to its users” (\citeyear[14]{Blommaert2005}). This means that by identifying which repertoires of forms people recognize based on their connection to a specific set of indexical meanings, discourse analysts do not have to give up using words like \textit{English}, but they have to clarify what this word means to language users, i.e. describe the respective register. Registers as recognizable repertoires do not only constitute constraints on discourse, as Blommaert points out in his fourth principle, but they are also constructed through discourse, and this is in my view the essential link between enregisterment and discourse theory and analysis. Because of this theoretical compatibility, discourse analysis can offer a methodological framework for studies on enregisterment, and the results of these studies can in turn be used by discourse analysts in their study of discourses on issues other than language. In what follows, I outline \citegen[185]{Spitzmuller2011} theory and methodology of \textit{discourse linguistics}, which is an excellent way of systematizing the variety of approaches to studying enregisterment discussed in \sectref{bkm:Ref506884048}. In contrast to \citet{Johnstone2018}, who explicitly considers discourse analysis a research method and not a discipline or subdiscipline of linguistics, \citet[2]{Spitzmuller2011} claim that it is justified to establish a discipline which they call \emph{Diskurslinguistik} (discourse linguistics) because it can contribute to studying the phenomenon of discourse in the same way as other disciplines dealing with discourse (e.g. philosophy, sociology, history, literary studies) and because it can also contribute to gaining insights into language which cannot be generated by other subdisciplines of linguistics (e.g. lexicology, semantics, text linguistics, sociolinguistics). While emphasizing the close relation between discourse linguistics and the Anglo-American tradition of discourse analysis, they distinguish them by claiming that the focus of discourse linguistics is much more on the function of language to constitute society and knowledge (\textit{gesellschafts- und wissenskonstituierende Funktion}). The center of discourse linguistics is therefore the conception of discourse which rests on Foucault, consequently putting more emphasis on the goal of discovering how language shapes the world (as \citealt{Johnstone2018} puts it), whereas the focus of discourse analysis is more on the conception of discourse as instances of communicative action and an inquiry of how language is shaped by the world. Consequently, \citegen{Spitzmuller2011} theoretical framework is very much concerned with a definition of the term \textit{knowledge}, which they describe as “the result of a continuous negotiation, recognition and rejection of insights in discursive practice” (\citeyear[42]{Spitzmuller2011}, my translation). Knowledge is seen as discursively formed and this is only possible through language with the \textit{statement} as the smallest unit (Foucault: \emph{énoncé}, \citeauthor{Spitzmuller2011}: \emph{Äußerung}). Consequently, knowledge is constituted in discourse through statements and it is for this reason that \citet{Spitzmuller2011} suggest an adaptation of Jakobson’s model of factors of verbal communication: \begin{figure} \includegraphics[width=.9\textwidth]{figures/Paulsen-img06.eps} \caption{Factors in the discursive constitution of knowledge (from \citealt[54]{Spitzmuller2011}, my translation) } \label{fig:2:6} \end{figure} In the center of the model is Foucault’s \textit{statement}, a terminological decision which is meant to reflect that in this model the message is conceived of as instances of communicative action (and not abstract sentences). The speaker or writer is the actor in this process, but it is important to note that actors do not have to be persons, but can also be institutions, specific groups or individuals representing specific social functions (e.g. politicians). The statement links the actor and the hearer or reader. Another important change concerns Jakobson’s \textit{context}. \citet{Spitzmuller2011} speak of \textit{projections} instead, to emphasize that statements to do not refer to any ontological reality, but that they refer to and at the same time evoke projections of this reality. Instead of \textit{contact}, \citet{Spitzmuller2011} use the term \textit{medium} to emphasize that every message is transmitted by means of a medium and the precise nature of the medium has a bearing on how the statement is perceived by the hearers or readers. \citet{Spitzmuller2011} urge discourse linguists to identify precisely which media are used and to include visual elements and interactions between the visual and the linguistic in their analyses. Lastly, perhaps the most important adaptation of Jakobson’s model concerns the \textit{code}. They replace this term by \textit{knowledge} because in order to understand and interpret statements not only a shared code is needed but also shared knowledge. They summarize the factors in their model by stating that \begin{quote} the discursive constitution of knowledge is a result of statements which are produced by actors in a medial form, which are perceivable by other actors and which refer to mental content on the basis of shared knowledge which is relevant for the understanding of the statement. (\citealt[57]{Spitzmuller2011}, my translation) \end{quote} Based on these six factors they deduct six functions of the discursive constitution of knowledge and, again with explicit reference to Foucault, six \textit{regulatives} of this process, which illustrates that statements do not exist separately from power structures in society but are embedded in them. The factors, functions and regulatives are summarized in what Spitzmüller \& Warnke call a \textit{field model} in \tabref{tab:2:5}. \begin{table} \begin{tabularx}{\textwidth}{XXX} \lsptoprule \textbf{Factors} & \textbf{Functions} & \textbf{Regulatives}\\ \midrule speaker/writer & argumentative & hearability\\ hearer/reader & distributive & control of access\\ statement & rhetorical & norms of expression\\ projection & evocative & linguistic conditioning\\ medium & performative & rules of sayability\\ knowledge & metadiscursive & orders of discourse\\ \lspbottomrule \end{tabularx} \caption{ Factors, functions and regulatives of the discursive constitution of knowledge (from \citealt[63]{Spitzmuller2011}, my translation) } \label{tab:2:5} \end{table} This model shows how language, knowledge and power are intrinsically connected. As statements are linked to speakers or writers, they have an argumentative function: These actors argue for or question knowledge and in doing so create knowledge or change it. The distributive function is equally important because in a model which assumes that language has the function to evoke projections (and not to refer to an ontological reality) statements need to be distributed and shared. Both of these factors are regulated by power structures because in order to participate in the discursive constitution of knowledge, speakers have to be able to make themselves heard and hearers have to gain access to the statements made by speakers. In this context, norms of expression regulating the statement and its rhetorical function also come into play because the form of the statement also crucially influences whether speakers are not only able but also willing to hear the statement. It is here that Spitzmüller \& Warnke explicitly mention enregisterment, but they do not explore the connection between enregisterment and norms of expression in any detail (\citeyear[61]{Spitzmuller2011}). However, they stress that norms of expressions are crucial because the forms of language and the values attributed to the forms in a community have a decisive influence on access to and participation in discourse. Following my argumentation above, it is therefore important to take norms of expression into consideration when studying the discursive constitution of knowledge, but it is helpful to use enregisterment and the concept of register in addition to norms of expression to investigate this regulative.\footnote{Register is a wider concept than that of a \textit{norm}. \citet[126]{Agha2007} considers a \textit{norm} to be an “externally observable pattern of behavior”, a “statistical norm or frequency distribution in some order of behavior”. If this pattern is reflexively recognized as normal by a population, it becomes a “normalized model of behavior” which constitutes a norm for the group of people who recognizes it. The normalized model can then become a normative model, which is “linked to standards whose breach results in sanctions”; it becomes “a norm codified as a standard”. In this line of argumentation, ‘normal’ or ‘standard’ are therefore social values that can be linked to linguistic and other forms of social behavior in enregisterment processes.} Furthermore, it is important to note again that registers do not only regulate discourse, but that they are themselves discursively constructed. The parallels between Agha’s model of enregisterment and Spitzmüller \& Warnke’s model of the discursive constitution of knowledge are evident: Language itself can become the topic of discourse through reflexive activities and a reflexive model of speech is constructed by actors typifying linguistic forms and making these typifications heard by other actors. The typifications have a specific form, are bound to a specific medium and evoke a projection which is bound to shared knowledge. As pointed out in \sectref{bkm:Ref506883801}, the circulation and transmission of typifications is crucial for the formation of a register. One typification does not form a register and this statement is also true for discourse: One statement does not constitute a discourse, but, as \citet[187]{Spitzmuller2011} point out, “[t]he discourse is […] only discourse where intratextual phenomena, actors and transtextual structures interact” [my translation]. The methodological framework that they develop for analyzing discourses is structured based on these three dimensions and presented in \figref{fig:2:6a}. \begin{figure} \includegraphics[width=.95\textwidth]{figures/Paulsen-img06a.pdf} \caption{ \label{bkm:Ref522891320}Layout of the discourse-linguistic multi-layer analysis (DIMLAN) (my own illustration based on \citealt[201]{Spitzmuller2011} and \citealt{Spitzmuller2014}) } \label{fig:2:6a} \end{figure} As registers are discursively constructed, this methodological framework is also useful for studying enregisterment. At the center of discourse analysis as conducted by discourse linguists are statements in concrete temporal-spatial contexts \citep[123]{Spitzmuller2011}, which means that when studying enregisterment the focus has to be on statements which typify other linguistic signs. Note here that Agha defines reflexive activities more widely, as the use of “communicative signs” to “typify other perceivable signs” (\citeyear[16]{Agha2007}), but this integration of linguistic and other signs is also important in discourse analysis where other signs occurring in combination with statements are also seen as part of the process of constituting knowledge. Statements are the smallest units in discourse analysis and they typically occur in texts, a unit which is defined by Spitzmüller \& Warnke as “a multiplicity of statements with syntactic-semantic relations and one/several thematic center/centers in a formal or situational frame” (\citeyear[137]{Spitzmuller2011}). The intratextual layer therefore comprises an analysis of a single text, where the focus can be on words, propositions or the structure of the text. If the focus is on textual structures, the visual structure of the text should also be taken into account, comprising non-linguistic elements like images (and their relation to the rest of the text), typography and the material to which the text is bound. Discourse analysis as understood by Spitzmüller \& Warnke does not stop at the intratextual layer, but regards the transtextual layer as equally important. On this layer, a multiplicity of texts produced by different actors and occurring in different media is analyzed; the relation between these texts is established through communicative/discursive action which is why a third layer is included in the framework, namely the agent layer. They see this layer as mediating between the intra- and the transtextual layer because at this layer statements are “filtered” (\citeyear[173--174]{Spitzmuller2011}). This means on the one hand that discursive actions determine which statements occur as part of the discourse and which position and importance they have in the discourse (this filter is referred to as “discourse rules”). On the other hand, every text produced by an actor is shaped by one or several discourses (this filter is referred to as “discourse shape”). Their intention behind the DIMLAN model is not to provide a set of instructions that needs to be followed consecutively and comprehensively, but they consider it a framework which helps the discourse linguist to decide and argue for a specific method or set of methods. They are convinced that “fixed procedures cannot do justice to the multimodality and linguistic-systematic heterogeneity of discourses” (\citealt[135]{Spitzmuller2011}, my translation). Instead, they argue for the principle of triangulation and a mixture of multiple methods and approaches to studying discourses. In addition to the DIMLAN model summarizing the different \emph{layers} of discourse-linguistic analysis, they also distinguish several objects of study as well as methods and procedures for studying it. These are summarized in \tabref{tab:2:7} and intended to support discourse linguists planning a study. In this section, I will not elaborate on all these objects, methods and procedures in detail, but I will come back to them in Chapter \ref{bkm:Ref513018040} when I develop the aims and the methodology of the present study. \begin{table} \begin{tabularx}{.55\textwidth}{XX} \lsptoprule \multicolumn{2}{c}{ objects}\\ \midrule statement & discourse\\ execution of action & product of action\\ event & series\\ \midrule \multicolumn{2}{c}{ methods}\\ \midrule thematic & systematic\\ synchronic & diachronic\\ corpus-based & corpus-driven\\ \midrule \multicolumn{2}{c}{ procedures}\\ \midrule heuristic & focused\\ individual & collaborative\\ one step & several steps\\ \lspbottomrule \end{tabularx} \caption{ Objects, methods and procedures of discourse-linguistic studies (based on \citealt[124--135]{Spitzmuller2011}, my translation). } \label{tab:2:7}\label{tab:key:7} \end{table} In conclusion, this section shows that discourse linguistics (as developed by \citealt{Spitzmuller2011} by drawing essentially on Foucault’s work but also many other theoretical and methodological approaches to discourse analysis, e.g. by \citealt{Blommaert2005}) is especially beneficial for studies on enregisterment because an understanding of a register as something that is discursively constructed allows researchers to use the methodological framework developed by \citet{Spitzmuller2011} to describe precisely how they approach the analysis of this construction process. This framework has a solid theoretical foundation and while it keeps the necessary openness to a variety of approaches and methods it provides helpful systematicity at the same time. A discourse-linguistic approach to enregisterment is particularly important for studying historical enregisterment processes because methods which rely on the intervention by the researcher to obtain reflexive activities on language for example by interviewing speakers (as used in perceptual dialectology) are obviously not an option in historical contexts. A further conclusion which can be drawn from \sectref{bkm:Ref522888605} as a whole is that enregisterment can be located at the intersection between several overlapping areas of linguistic research. It is this overlap (visualized in \figref{fig:2:7}) which is responsible for the wide variety of methods and approaches to enregisterment and also for different nuances of conceptualizing enregisterment. \begin{figure} \includegraphics[width=.8\textwidth]{figures/Paulsen-img07.eps} \caption{ Areas of linguistic research and enregisterment } \label{fig:2:7} \end{figure} In this study, the conception of enregisterment remains very close to \citet{Agha2003, Agha2007}, but important additions and refinements of concepts and methods provided by sociolinguistic studies (e.g. characterological figures, social indexicality, identity construction and social positioning through language, the role of performances and authenticity) will be taken into account in the development of the methodology, the analysis and the interpretation of the data. As the focus of this study is on the nineteenth century, methods used by perceptual dialectologists play no role in this study, but I will heavily rely on the discourse-linguistic framework developed by \citet{Spitzmuller2011}. In combination with the theory of enregisterment and indexicality, this framework is the basis for developing the concept of the \textit{discursive variety}, in contrast to the \textit{structural variety}, and for modeling the relationship between them. \section{The construction of discursive varieties through enregisterment: a model} \label{bkm:Ref523897668}\hypertarget{Toc63021215}{}\label{bkm:Ref523899116} Structural and discursive varieties are both fuzzy and not discrete entities. For purposes of description and investigation, however, they are idealized and abstracted as separate entities. On a structural level this is done by linguists who base their descriptions on frequency distributions of variants they observe in actual linguistic behavior. On a discursive level, the speakers themselves construct a variety through reflexive activities – their metapragmatic and metadiscursive engagement with language. Frequency plays a role here as well. The more frequently typifications of linguistic forms occur, the more they turn into metapragmatic stereotypes and the more stable and visible a register appears. \begin{figure} \includegraphics[width=.8\textwidth]{figures/Paulsen-img08.pdf} \caption{ A model of the construction of discursive varieties through enregisterment (from \citet{Paulsenforthcoming}, reprinted with kind permission from John Benjamins Publishing Company, Amsterdam/Philadelphia) } \label{fig:2:8} \end{figure} \figref{fig:2:8} illustrates the process of how a register, or discursive variety, is constructed and it therefore synthesizes the discussion in \sectref{bkm:Ref522870698}.\footnote{I have developed this model in collaboration with my colleagues Benjamin Peter and Johanna Gerwin. It is also presented in \citet{Paulsenforthcoming} and in Peter’s study of the discursive construction of Andalusian (\citeyear[116]{Peter2020}). Peter also created a second model that brings particularly the processes of revalorization of variants and thus of re-enregisterment into focus (\citeyear[157]{Peter2020}).} First of all, the model proceeds from the assumption that there is a level of structured variation underlying all abstractions. This level consists of all the forms which are realized in social and communicative contexts and linguists can abstract structural varieties by using statistical methods. If people engage reflexively with linguistic forms, they elevate the forms to a discursive level by making them subject to metapragmatic and metadiscursive activities. In these activities, forms are indexically linked to social and pragmatic values which are linked to larger ideologies, linguistic and non-linguistic, within a cultural and sociohistorical context. In terms of orders of indexicality, the forms therefore move from \emph{n}th-order-indexicality to \emph{n}+1st-order-indexicality in this process. It is usually the case that metapragmatic and metadiscursive activities focus on a few salient forms and mark one set of salient forms as different from another set of forms, thus creating different registers consisting of all forms recognized as co-occurring with the set of salient forms, the so-called \textit{register shibboleths}. As pointed out above, if forms are frequently subject of reflexive activities, this contributes to their salience, but other factors can increase the salience as well, e.g. if the form is foregrounded against other forms or if a form is connected to a conspicuous characterological figure. The key idea behind this model is that there is a potential relationship between the structural and the discursive level not only because forms found on the structural level can become part of a register but also because the register potentially influences speakers’ choice of forms in interaction when they position themselves socially (see Spitzmüller’s (\citeyear{Spitzmuller2013, Spitzmuller2015}) model of social positioning, described in \sectref{bkm:Ref506884048}). Overall, the model captures \citegen[234]{Agha2003} idea that “phonetic varieties have now become objects – or, object discourses – in relation to a metadiscourse linking speech to social classifications”.\footnote{In this case, \citet{Agha2003} refers to the case of Received Pronunciation in England, which is why he restricts the statement to \emph{phonetic} varieties. In principle, this statement can of course be extended to varieties comprising all structural levels.} By linking sets of co-occurring forms to social values, people discursively construct knowledge about the forms, about social characteristics of speakers using the forms as well as about situations and contexts in which the use of the forms is appropriate. It is important to emphasize again that even though the model suggests that there are boundaries, these boundaries are only constructed, either by the linguist, based on objective criteria, or by the speakers themselves, based on the need to build social relations to other speakers. While discrete boundaries seem to be useful or even necessary, they are in fact fuzzy and subject to constant change. As pointed out above, the stability of both structural and discursive registers depends on several factors, e.g. the amount of interactions between people using different sets of linguistic forms, the frequency of typifications of speech, the salience of characterological figures, and the degree of institutionalization and codification. The model forms the basis for my case studies which focus on the discursive level by investigating when and how American English was enregistered in the United States in the course of the nineteenth century. Before describing my methodology in detail in \sectref{bkm:Ref513018040}, I will end this chapter by coming back to prior research on the historical development of American English in \sectref{bkm:Ref517077661}. The aim of the section is to analyze how historical linguists have determined the beginning(s) of the variety and which importance they attribute to the structural and the discursive level in describing the major developments of the variety. \section{Structure and discourse in descriptions of the history of American English} \label{bkm:Ref517077661}\hypertarget{Toc63021216}{} Historians describing the development of American English face the problem of determining when American English emerged as a new variety of English, that is, as a variety that is distinct from an old variety. This problem is illustrated in a recent article by Richard W. Bailey, which was published as part of a collection of articles on the history of varieties of English edited by \citet{Bergs2017}. On the one hand, he finds that “[h]istorically considered, American English begins to emerge in the 16th century, even before any English speakers reached the shores of the North American continent” (\citeyear[9]{Bailey2017}). On the other hand, he notes that “America was slow to develop a distinctive linguistic identity” (\citeyear[10]{Bailey2017}) and that it was only “by the beginning of the 19th century [that] American English had become a recognized variety of the language” (\citeyear[12]{Bailey2017}). Finally, he argues that “[w]hat is less recognized [by scholars] is the emergence of the meaning of “American English” as a distinct language with certain distinctive properties” (\citeyear[13]{Bailey2017}). These statements show that Bailey’s concept of American English is rather vague: The reason for postulating such an early beginning, which seems paradoxical at first, is probably based on lexical items which had been used by Native Americans before the arrival of English settlers (he cites for example \emph{canoe} and \emph{maize}) and which were borrowed by the English settlers later and constituted a difference between their speech and the language forms used in England. In contrast to that, the American English with a “distinctive linguistic identity” seems to be based on the \emph{recognition} of linguistic differences, which according to Bailey occurred on lower levels of the social scale: While “[h]igh status Americans spoke just like high-status Britons” (\citeyear[12]{Bailey2017}), slaves, servants as well as Scots and Irish immigrants spoke differently. American English as a distinct \textit{language} is defined based on the “certain distinctive properties” accorded to American English by contemporary commentators. One of the most important commentators was Noah Webster, whose views that he proposed and argued for in his dictionaries influenced the perceived properties of American English to a great extent. \citet[16]{Bailey2017} summarizes these properties as follows: \begin{quote} Early in the 19th century, the reputation of American English had been settled – at least for some. The language was free of regional variation, at least in comparison to Great Britain. And it was remarkable for its purity which had been achieved through the preservation of the good old ways of Shakespeare and Addison and through the efforts to regularize it by analogy (so \emph{deaf} was like \emph{leaf}), preservation (continuing to employ \emph{air} and \emph{heir} as homophones), or transparency (in a preference for \emph{meeting house} rather than \emph{auditorium}). \end{quote} The example of Bailey’s overview article thus illustrates how vaguely American English is defined – structural distinctiveness is not the primary criterion for postulating the existence of a new variety, but only the basis for a discursive construction of the \textit{variety} or the \textit{language} that relies on recognition and valorization. It is not clear, however, how exactly the recognition of different forms of speech in America (used by slaves, servants, Scots and Irish) leads to \emph{one} American variety and it is equally unclear which set of forms is evaluated as constituting a ‘pure’ and ‘homogeneous’ American language. The issue of defining American English is also addressed by \citet{Algeo2001b} in his preface to the sixth volume of the \emph{Cambridge History of the English Language}, which is concerned with the history of English in North America. He states that \begin{quote} [a]ll languages have internal variation ranging in scope from idiolects (the particular ways different persons use the language) to national varieties (standardized forms of the language used in a particular independent political unit). [...] Between the idiolect and the national variety are dialects, regional and social, on various dimensions. (\citeyear[xviii]{Algeo2001b}) \end{quote} In this vein, he states with regard to the “beginning” of American English that while the “process of differentiation between the English of Britain and that of America began with the first settlement in America” (\citeyear[xvi]{Algeo2001b}), “with the American Revolution, the variations that had developed in the colonies became a new national variety, contrasting with what from this point can be called the British national variety” (\citeyear[xviii]{Algeo2001b}). The existence of independent political units thus seems to be taken as a basis for postulating the existence of two separate national varieties. On the other hand, he problematizes the terms \textit{dialect}, \textit{language variety} and \textit{language} as being abstractions that are usually used metaphorically and thus not to be taken literally: “To talk about language in such metaphors is useful and not to be avoided. But it is wise to remember that such talk is metaphorical, not literal”. That he finds it difficult to define a variety based on the criterion of structural distinctiveness becomes clear when he says that “[b]ecause of the complexities of linguistic systems, it is impossible to speak with confidence about how much alike or how different two speechways are or to compare two dialects with respect to their overall rate or degree of change” (\citeyear[xix]{Algeo2001b}). Consequently, much like \citet{Bailey2017}, \citet{Algeo2001b} also bases the emergence of a distinct American variety mainly on discursive factors and identifies the period from 1776 to 1898 as crucial in that regard because in this “National Period” “the sense of a distinct variety arose, which was standardized especially in dictionaries and spelling books and spread over the continent during the westward expansion” (\citeyear[xx]{Algeo2001b}). A focus on recognition and thus on discursive factors can also be found in \citeapo{Davis2003} introduction to his collection of texts relating to American English between 1781 and 1921. He attributes a “pivotal position” to American English because “it is the first form of English to be recognized as a distinct new variety” (\citeyear[xi]{Davis2003}). He describes this recognition process further as “the process by which a distinct form of English was picked out of the mass of variation existing on opposite sides of the North Atlantic and on the Atlantic itself, and the various social and political values assigned to that form once its identity had been instituted” (\citeyear[xi]{Davis2003}). In some ways, this description of the process is reminiscent of the definition of enregisterment because it is essentially about a set of forms being recognized as distinct; however, Davis argues here that social values are assigned to the set of forms \emph{after} they have been recognized, whereas Silverstein and Agha emphasize that the differentiation process is essentially based on a differential evaluation of forms, so that the emerging registers are a \emph{consequence} of valorization processes: Forms are recognized \emph{because} they are assigned social values in the process of enregisterment. Like \citet{Algeo2001b}, Davis dates the starting point of this process of recognition to the eighteenth century, following not only the American independence from Great Britain but also the standardization of English in England. The texts edited and published in this series largely constitute expert discourses on the language spoken in America, starting with selected papers in the first volume, which span the publication of John Witherspoon’s “The Druid” in 1781 (in which he coined the term \textit{Americanism}) to Charles Whibley’s article “The American language”, published in the Blackwood’s Magazine in 1908. The second volume contains glossaries of Americanisms by \citet{Elwyn1859}, \citet{Fallows1883} and \citet{Norton1890}, the third and the fourth volume exhibit the works of the verbal critic Richard Grant White (\citeyear{White1870,White1880}), followed by \citegen{ScheledeVere1872} re-interpretation of \citegen{Bartlett1848} \emph{Dictionary of Americanisms}. The sixth volume is concerned with works by language planners, including \citet{Molee1888} and \citet{Williams1890}. The seventh and eighth volumes focus on articles and books by academics: Brander Matthews (\citeyear{Brander1892b,Brander1901, Brander1909}), George P. Krapp (\citeyear{Krapp1919}) and Gilbert M. Tucker (\citeyear{Tucker1921}). Based on these texts, Davis identifies five “loosely overlapping periods, often corresponding to Americans’ changing political awareness” (\citeyear[xii]{Davis2003}). The first period is marked by an emphasis on linguistic innovation and opposition to British English, which was connected to the new political independence. During the second period, however, many of these presumably innovative forms were identified as older British forms, reflecting a change of focus from innovation to conservatism. Davis links this to “a cultural backlash against the perceived coarseness of frontier-dominated political life in the 1820’s and 1830’s” as well as to “the concerns of the new philology” (\citeyear[xii]{Davis2003}). For the third period, Davis notes an increase in the awareness of American English as a distinct variety, which is reflected in authors trying to define standard American English and establish their authority over other competing positions (a notable example being Richard Grant White). Davis characterizes this period as being marked by great changes, the most important ones being “industrialization, urbanization, intensive immigration, and the expansion of the middle class and its educational institutions”, and these changes were accompanied by great “cultural insecurity” (\citeyear[xii]{Davis2003}). This not only increased the pressure to define a standard but also the difficulty of the task. The fourth period was different from the previous one because the emphasis shifted to the status of American English as a standard and a model in the world, a development which “corresponded to the awakening and growth of the United States as an imperial power from 1890–1914, and its involvement in Latin America and the Pacific” (\citeyear[xiii]{Davis2003}). Davis dates the last period as starting after the first world war because of a “renewed focus” on the distinctiveness of American English, which was especially noticeable in \citegen{Mencken19371919} work on the \textit{American language} \citep[xii]{Davis2003}. Regarding expert discourses on language, his interpretation thus suggests that the crucial period for the emergence of an American standard variety is the third one, which loosely spans from the middle of the nineteenth century to World War I. Another attempt to define the beginning of a distinct American English variety can be found in \citegen{Simpson1986} \emph{The politics of American English, 1776-1850}. He claims that \begin{quote} American English as we recognize it today had been essentially established by 1850. That is, its major deviations from British English had by that time both been proposed in theory (mostly by Noah Webster) and adopted into relatively common (though not uncontested) practice. \citep[11]{Simpson1986} \end{quote} This statement shows that on the one hand, Simpson distinguishes between theoretical ideas about language (a discursive dimension) and practice (a structural dimension). On the other hand, he implies a connection between the two – not only temporally, by saying that American English was established by 1850 on both levels, discourse and practice, but also by using the term \textit{adopted}, which suggests that the theoretical ideas have shaped actual practice. The importance he attributes to discourse is highlighted further in his statement that “[t]hanks to the efforts of two generations of linguistic pioneers, Noah Webster foremost among them, and to the spectacular rise in national self-confidence, America had, by about 1850, a version of English that was recognizably its own” \citep[3]{Simpson1986}. Furthermore, he also argues for the date 1850 primarily on the discursive level: First of all, he notes a change in discourse from a focus on social and political differences and tensions to a focus on unity and homogeneity, which is related to democracy having become “the dominant ideology or self-image, so that, in the continuing development of a self-declared pluralistic culture, a struggle of languages has been the harder to perceive where it does exist” \citep[7]{Simpson1986}. Secondly, and this is his central argument, he finds a difference between two traditions of American literature and their representations of language. The first tradition, before 1850, has James Fenimore Cooper as a particularly representative writer, while the second tradition is marked by Transcendentalist authors: \begin{quote} In Cooper’s world, language and society are presented as mechanical; the parts remain parts, without combining into any grand whole. Language is always made up of different languages in conflict, and they do not resolve themselves into any democratically representative common language. These are precisely the implications that the Transcendentalists avoid or cover over. \citep[252]{Simpson1986} \end{quote} Simpson’s study is thus essentially a study to be located on the level of discourse, with a focus on literary works published between 1776 and 1850. The importance he attributes to the late eighteenth and first half of the nineteenth century in the creation of a distinctly recognized American English is acknowledged by \citet{Jones1999} in his study \emph{Strange talk: The politics of dialect literature in Gilded Age America}; however, as the title suggests, Jones argues that the late-nineteenth century is even more important for the formation of American English: “I agree with Simpson that the real political clash of dialects that we find in Cooper disappears – by about 1850 – with the idealism of the transcendentalists [...]; it reappears, however, in an even more various and extreme way in post-Civil War literature” \citep[215]{Jones1999}. Regarding the cultural background in which such dialect literature was published, he notes that after the Civil War, two changes could be observed with regard to the development of ideas about language: First, “the idea of an American English came into its own” and second, “this recognition of a new national language was accompanied by a new acceptance of dialect” \citep[16--17]{Jones1999}. This acceptance and (scientific) interest in dialect was motivated by ideas that regarded language as crucial “to understand the mind and culture of its speakers”, but, on the other hand, there was also a fear of dialect diversity connected to a fear of “contamination and fragmentation” \citep[17]{Jones1999}. This new literary movement attracted the interest of a wide range of readers, reflecting a “cult of the vernacular” \citep[39]{Jones1999}. Representations of dialect were not only popular in mainstream literature but they were also part of a large number of dialect sketches included in “highbrow literary magazines”, which had a refined readership, and they were used by minority writers (such as Abraham Cahan and Paul Laurence Dunbar) in their works, which were read not only by white but also by Black Americans \citep[7]{Jones1999}. His main conclusion is that “[d]ialect literature rose to prominence in the Gilded Age because it was integral to developing a cultural debate over the state of American English” \citep[210]{Jones1999}. This shows that, like \citet{Simpson1986}, his study focuses on the discursive level by investigating the ideas surrounding American English, which led to the creation and transformation of linguistic ideologies, and by taking a closer look at the role that literature played in this process. Unlike Simpson, however, he regards the late-nineteenth century as the crucial period for the formation of American English. David Simpson and Gavin Jones are both literary scholars and even though \citet[11]{Simpson1986} makes claims about the influence of ideas about language on actual linguistic practice, their interest is mainly on the discourses on language and not on language use. The importance of the discursive level for investigating the historical development and especially the emergence of an American standard has also been explored by several linguists in more detail. \citet{Cooley1992} provides an important contribution by asking the question: “[U]nder what circumstances are some variant systems recognized as dialects while others are not? In other words, when and how is variation accorded perceptual status and named a language variety itself, a dialect?” \citep[167]{Cooley1992}. She thus distinguishes between the presence of variation and its perception by speakers and uses this distinction to shed new light on the debate about whether early American English is marked by uniformity or diversity (on a structural level) – a debate that also plays an important role in accounts on the emergence of American English that are based on different theories of the emergence of new varieties (see \sectref{bkm:Ref524246106}). \citet{Cooley1992} argues that there was “a co-existence of both diversity and uniformity in early American English” and that “[t]his co-existence may be reconciled by psycholinguistic or sociolinguistic principles rather than denied by arguments for a single state of uniformity or diversity or for sequences of one followed by the other” (\citeyear[168]{Cooley1992}). An important point to consider here is the type of evidence – she finds that for the early American period most sources are descriptions and comments by grammarians, orthoepists and journalists as well as literary dialect, all of which constitute secondary sources which are not independent of the beliefs and perceptions by those who produced them. A change in these beliefs therefore necessarily had an effect on their descriptions, comments and representations of language and such a change was caused by the Revolutionary War and the War of 1812, which led to a “change of social and political allegiance, through which the colonists began to consider themselves Americans rather than Englishmen” \citep[180]{Cooley1992}. In her view, a consequence of this development was a “psycholinguistic, perceptual, change of the standard variety” during the latter part of the eighteenth century and the early part of the nineteenth century, which is similar to Schneider’s postulated change from an exonormative to an endonormative orientation. Her main argument is that the recognition and acknowledgement of language diversity requires the existence of a standard variety, marked by uniformity, against which the diverse sub-varieties can be delimited \citep[180]{Cooley1992}. She regards the appearance of literary dialect in the late 1780s as another indicator of an increasing recognition of an American standard because only the existence of a standard would guarantee the recognition of representations of forms differing from the standard. Overall, her main argument is thus that historical evidence pointing to the uniformity as well as to the diversity of language in the United States needs to be interpreted by taking into account the beliefs of the commentators and writers, which are in turn shaped by social and political changes. It is important to note, however, that while \citet[183]{Cooley1992} suspects that the growing recognition of a standard also influenced language use (even if unconsciously), she does not explore these connections in more detail. The question of the relation between literary dialect and the emergence of an American standard is also addressed by \citet{Minnick2010}, who summarizes her main claim as follows: \begin{quote} An American standard for English, then, emerges through the contrast to it that the literary representations of vernacular speech provided, since they are replete with information about what is not standard by their very markedness. This contrast contributes to an image of invisibility for ‘standard American’, a prestige variety defined by no identifying characteristics of its own but rather only by what it is not: regionally or socially or racially marked. The ‘otherness’ of the vernacular-speaking characters of the local color tradition, then, is part of an increasingly vivid background against which an image of a ‘standard’ American English that otherwise has no appreciable identity of its own is rendered visible. \citep[181]{Minnick2010} \end{quote} Minnick bases her argument on the one hand on the works by Anne Newport Royall, which were published early in the nineteenth century and in which she represented the speech of provincial characters from rural New England and the south, and on the other hand on writers in the Old Southwestern tradition, who created the figure of the rugged uneducated frontiersman or backwoodsman. Interestingly, \citet[178--179]{Minnick2010} finds a strong ambivalence here because this figure and his speech are not only constructed as the “other” against which the narrator’s speech is presented as standard, but the independence and the individualism of the figure is also celebrated in these texts, so that it becomes “a popular if stereotypical symbol for American national identity and particularly for American masculinity”. This leads her to conclude that a positive evaluation of a standard did not automatically coincide with a depreciation of vernacular speech because these “varieties” could “index culturally popular values like masculinity, independence, bravery, and physical strength” \citep[180-181]{Minnick2010} as well. The situation is different for the third group of speakers whose speech is constructed as the “other” in literary works: African Americans. Representations of their speech functioned as implicit evidence of Black inferiority and there were no positive social values connected to it, at least not in works written by white authors. In general, Minnick’s analysis thus locates the emergence of an American standard in the nineteenth century and the emphasis on its construction through contrasting it with “other” speech varieties in literary works clearly locates her work on a discursive level. Minnick’s study also provides an important contribution to the analysis of the discursive construction of American English from a methodological point of view because she advocates the use of quantitative corpus methods for analyzing literary dialect to avoid interpretations and conclusions which are limited because they are only based on an impressionistic analysis of the data. A very detailed study on the standardization of American English which analyzes discourses on language in America from a sociolinguistic point of view is \citegen{Bonfiglio2002} \emph{Race and the rise of standard American}. His focus is on pronunciation, and thus on the emergence of a standard American accent rather than a standard variety comprising all linguistic levels, and he aims to show “why and how [the] mid(western) accent rose to be perceived as the standard” \citep[1]{Bonfiglio2002}. The linguistic form that is central to his analysis and discussion is the post-vocalic /r/: The two cultural centers of the country, New York and Boston, were marked by the absence of post-vocalic /r/ and yet its presence became a defining feature of the national standard, for which he uses the label \textit{American network standard} because it came to be used by broadcasting media. To uncover reasons for this rather unusual development (in comparison to England and other European countries, where the speech of the cultural centers became the standard), Bonfiglio analyzes on the one hand a multitude of texts to identify the linguistic ideologies of “influential figures in the United States” and how they relate to ideologies of race and ethnicity. On the other hand, he also investigates how the views expressed in these texts were perceived and received by speakers that were heard by many people: popular actors, announcers and politicians. One important result of his study relates to the time period during which the standard became recognized: He regards the early twentieth century as the crucial period and thus explicitly argues against \citegen[225]{Labov2006} speculation that the shift in the prestige of /r/ occurred in the 1940s in connection with changes resulting from World War II. He rules out an earlier starting point for the emerging pronunciation standard because he regards the radio as crucial for the transmission of knowledge of pronunciation: “In 1920, radio had not yet begun its programmed broadcasting. Thus knowledge of pronunciation remained largely local, and impressions of the speech of other regions was not gained directly but spread largely by word of mouth” \citep[47]{Bonfiglio2002}. This affected also the discourse on /r/: \begin{quote} […] both the description and prescription of pronunciation of /r/ remained largely local and tended to generalize based upon the regional custom until the advent of regular radio broadcasts. Non-rhotic /r/ is observed and recommended on the east coast, while rhotic /r/ is preferred in the central states. \citep[54--55]{Bonfiglio2002} \end{quote} This view must be challenged based on the studies noting the enormous interest in representations of dialect in the nineteenth century. Even though it is clear that the impression of pronunciation gained through these representations is not as direct as that which can be gained by listening to actual speech, it nevertheless provides the readers with an idea of what people in other regions sounded like. In the following analysis, I will also show that representations of non-rhoticity (and, implicitly, also rhoticity) circulated in nationwide newspaper discourses especially in the last two decades of the nineteenth century, so that discourses on /r/ do not seem to be as local as Bonfiglio suggests. In addition to the advent of radio broadcasting, Bonfiglio identifies two other reasons for (mid)western speech patterns, particularly rhoticity, to become the standard accent in the early twentieth century. The first are “xenophobic and anti-Semitic movements” (\citeyear[4]{Bonfiglio2002}) at that time. They built on an increasing consciousness of race and ethnicity that Bonfiglio sees emerging in the postbellum period and ultimately leading to a shift in prestige of eastern cities, especially of New York City, from being positively viewed as cultural centers to being negatively viewed as “contaminated” by poor immigrants. This shift in prestige also affected the prestige of the eastern speech patterns, including non-rhoticity, which became seen as racially different and as impure. This emphasis on race, which is underlined by the title of his study, is explicitly directed against the view that the emergence of an American pronunciation standard, or more specifically the shift in prestige of post-vocalic /r/, is a result of differentiation from British English speech – a view that he labels a “myth” \citep[2]{Bonfiglio2002}. Connected to this is the second reason, namely the construction of the (mid) western region and their speech patterns as an ideal: \begin{quote} The (mid)western accent was constructed and desired by forces external to the area itself that projected a preferred ethnicity upon that region and defined it within a power dynamic of difference, i.e. it was precisely not the speech of the ethnically contaminated areas of the northeast metropolis and the south. \citep[8]{Bonfiglio2002} \end{quote} His argument is thus that it was not the (mid)west per se that was attractive, but it was the negative image of the south and the northeast that led to its positive valorization – he speaks of “antigravitational forces” (\citeyear[72]{Bonfiglio2002}) here. Similarly, this valorization was not pushed by the (mid)westerners themselves, but was the result of the cultural power exerted by the northeastern population (\citeyear[72]{Bonfiglio2002}). This is for example visible in “the decision of Harvard and other Ivy League Universities to seek the sustenance of their proper ethnicity and culture in rural western regions”, which indicates “the onset of a shift in the prestige discourse of the educated man” \citep[230]{Bonfiglio2002}. An important figure in this valorization process is the “western hero as an instantiation of the proper American male” whose “speech patterns came to function as metonymies of the condition of nostalgia, sentimentality, and tradition” \citep[231]{Bonfiglio2002}. There are interesting parallels to \citegen{Minnick2010} finding that there are positive evaluations of a similar type of masculinity, characterized by physical strength and toughness needed for a life at the frontier, in the dialect representations in the literature of the Old Southwestern tradition. She takes this as evidence that while the speech of these figures was marked as non-standard, it was nevertheless not condemned but indexed positive characteristics as well. This suggests that this positively evaluated masculinity noted by \citet{Bonfiglio2002} has its roots in the early nineteenth century. It is obvious that Bonfiglio’s analysis must also be located primarily on the discursive level. His basic argument is “that folk linguistic beliefs determined the national standard”, which is why he aims to “illuminate the larger cultural factors that informed the folkish linguistic beliefs in question” \citep[73]{Bonfiglio2002}. However, this argument shows that he also makes claims about changes in language use – the standard is not only an idea, but a model that influences people’s choice of linguistic forms. This is visible, for example, in the statement that “Americans gravitated toward the pronunciation associated with a “purer” region of the country, and they did so in a largely non-conscious manner” \citep[4]{Bonfiglio2002}. This implies that speakers aligned with a model of speech that they evaluated positively, and that this alignment process was not the result of a conscious effort. Similarly, the following statement illustrates his view that a regional diffusion of a linguistic form resulting in a linguistic change in a region is caused by non-linguistic factors: “The migration of the American continuant postvocalic /r/ from the western states eastward, its supplanting of the dropped postvocalic /r/ of the east coast, and its rise to standardization began in the twenties and was precipitated by the axis of radio, anti-immigration, and westward nostalgia” \citep[60]{Bonfiglio2002}. Bonfiglio’s claims and arguments are thus reminiscent of the claims underlying the theoretical framework of enregisterment outlined in \sectref{bkm:Ref512260235} and the model of social positioning developed by \citet{Spitzmuller2015, Spitzmuller10062016} (see \figref{fig:2:4} in \sectref{bkm:Ref506884048}). It thus supports my position that an analysis of the discursive construction of American English is an indispensable part of studying the emergence of American English. His focus on a linguistic form, /r/, and its “culturally constructed value” \citep[7]{Bonfiglio2002} is also interesting from the point of view of enregisterment. However, the strong focus on one form, even if it has a “high cultural visibility” \citep[6]{Bonfiglio2002}, is also problematic as it does not reveal anything about its relation to other forms – if American English is investigated as a process of enregisterment, it is necessary to study how \emph{sets of} perceivable signs, linguistic forms and other non-linguistic signs, become linked to social and cultural values and thus differentiable registers. Bonfiglio’s major claims, namely that race played an important role in the standardization of American English and that language ideology, that is beliefs about language, are central to the standardization process, are also shared by \citet{Milroy2012}. In their third edition of \emph{Authority in language}, an often-cited work on processes of language standardization and the role played by prescriptivism in these processes, they conclude that \begin{modquote} In the US, bitter divisions created by slavery and the Civil War shaped a language ideology focused on racial discrimination rather than on the class distinctions characteristic of an older monarchical society like Britain which continue to shape language attitudes. Also salient in the US was perceived pressure from large numbers of non-English speakers, from both long-es\-tab\-lished communities (such as Spanish speakers in the South-West) and successive waves of immigrants. This gave rise in the nineteenth and twentieth centuries to policies and attitudes which promoted Anglo-conformity. \citep[160]{Milroy2012} \end{modquote} Even though they do not cite \citegen{Bonfiglio2002} study, their review of the literature thus confirms his emphasis on race in contrast to the emphasis on class in Great Britain. However, concerning the periodization of the process of standardization in the United States, they regard a different period as particularly important: “Heavy immigration to northern cities between 1880 and 1920 gave rise to conflicts of dominance between immigrant groups and older élites, and to labour conflicts which had the effect of crowding out democratic ideals of equal rights in both north and south” \citep[160]{Milroy2012}. Especially the importance of radio broadcasting is thus not regarded as an important factor in their analysis, which constitutes a major difference to Bonfiglio’s account. However, it needs to be noted that Milroy \& Milroy do not focus on pronunciation alone, which makes radio broadcasting perhaps less important as a factor. On the other hand, they do not analyze the standardization process by focusing on the linguistic forms themselves – it is not clear which forms become standard and which do not, so that the reasoning about the causes for standardization remains on a rather general and abstract level. With regard to the importance of language ideologies, \citet[162]{Milroy2012} not only agree with \citet{Bonfiglio2002}, but they also link their findings on standardization processes to the understanding of language ideologies proposed by the linguistic anthropologists Woolard, Silverstein and Irvine and Gal. They particularly point out that these beliefs have central social significance by recognizing their essential function of helping language users “to make sense of the socially structured language variability which they observe every day” (\citeyear[162]{Milroy2012}) and also, beyond the individual level, their role in “delimiting and defining salient social groups and indeed whole nations” (\citeyear[163]{Milroy2012}). It is precisely this delimitation process that is at the heart of the theory of enregisterment and this study seeks to identify not only which linguistic forms were constructed as ‘American’ but also which underlying ideologies made this construction possible. The overview thus far shows that there is much interest in the history of American English which focuses on the discursive level, but the views with regard to the crucial periods for the recognition of American English as a distinct or standard variety vary and depend, at least to a great extent, on the material studied: publications by “experts” on language or other influential figures on the one hand, and literary works, especially dialect literature, on the other hand. This underlines the need for further studies investigating different materials – a need which is met by this study, as it focuses on newspaper articles. With regard to the structural level, i.e. the structural differentiation between American English and British English as well as between distinct American varieties, it needs to be noted that systematic and detailed studies on the historical development of linguistic forms in America are actually rather scarce. \citet[152]{Montgomery2001} notes for example that “[t]he field has many simplistic statements and assumptions about what must have occurred in new-dialect formation in the American colonies, rather than documentation of input varieties and the extent to which these were maintained”. His article is mainly concerned with summarizing what is known about British and Irish antecedents, but in doing so he also offers theoretical and methodological insights. He criticizes for example that some hypotheses which are not supported by enough linguistic evidence have “achieved a life of their own” as “part of constructed American memory” \citep[111]{Montgomery2001}. An example is the postulation of linguistic ties between Massachusetts, Virginia, or Appalachia with southeastern England, southern or southwestern England, and Ulster respectively \citep[109]{Montgomery2001}. In his view, linguistic studies are required in which several types of evidence are carefully analyzed and interpreted: This evidence should not only comprise secondary sources like popular observations by outsiders, commentary of grammarians and lexicographers and literary attestations but also primary sources like poetic rhymes and original texts and manuscripts because the latter have a higher value in reconstructing structural changes and differences than the former \citep[97]{Montgomery2001}. An example for such a collection of original texts providing insights into the variation present during the earliest phase of settlement is the Early American Corpus containing texts from New England from the 1620s to the 1720s, among others records of Salem witchcraft trials from 1692 \citep{Kyto2004}. In a pilot study, \citet{Kyto2004} finds that there is hardly any evidence of grammatical and phonological forms that can be classified as \textit{dialect input}, that is as forms found in local vernacular British dialects. The few dialect forms that do occur are found in speech-related records. The other forms reflect instead an “early prestige language” that also has its origin in Britain and that was taken by the settlers to America. The fact that these prestige forms constitute the majority of forms in the corpus is explained by \citet[151]{Kyto2004} by the educatedness of most of the authors of the documents included in the corpus. In general, Kytö does not expect “major differences from the language of the mother country” and her analysis and interpretation rather supports the continuity between the (socio-)linguistic variation present in the areas that the settlers originated from and the variation present in the New England settlements. The difficulty of determining the beginning of structural differentiation based on primary sources can for example be illustrated by the discussion revolving around the beginning of White Southern American English. \citegen{Bailey1997} article addresses the question “When did Southern American English begin” by analyzing several types of primary data, among them phonetic records of southern speakers born in the nineteenth century from the Linguistic Atlas of the Middle and South Atlantic States (LAMSAS) and the Linguistic Atlas of the Gulf States (LAGS), as well as orthographic evidence from the Tennessee Civil War Veterans Questionnaires (TCWVQ), a collection of documents written by white male Tennessee speakers, most of whom were educated \citep[260]{Bailey1997}. He focuses on twelve phonological features and seven grammatical features, which he regards as distinctively southern (he calls them “long-established stereotypes of SAE”, \citeyear[258]{Bailey1997}) and finds that only six phonological and four grammatical features were clearly part of Southern American English by the middle of the nineteenth century. The other six phonological and two grammatical features appeared or were increasingly used in the nineteenth century, particularly in the last quarter (in the period after 1875). He thus concludes that Southern American English begins in the last quarter of the nineteenth-century and hypothesizes that drastic social changes in the south during that time created a situation which was particularly conducive to the diffusion of linguistic changes \citep[271]{Bailey1997}. The most important changes were the increasing number of villages and towns which became connected through railroad tracks and the increasing geographical mobility. Bailey’s hypothesis was tested seventeen years later by \citet{Montgomery2014}, who published their results in an article entitled “When did Southern American English really begin?” Between the two studies, new evidence had been found based on which the Corpus of American Civil War letters was created, which provided new primary evidence to shed light on the beginning of Southern American English. Based on an analysis of the grammatical features investigated by \citet{Bailey1997}, the authors find that “the crucial period for the developing distinctiveness of Southern American English must be pushed back at least one generation”. They evaluate Bailey’s argument that the social changes in the last quarter of the nineteenth century correlate with the beginning of Southern American English as not very convincing because based on research on the history of the American South they assume that “other periods probably witnessed substantial innovation and diffusion, too” \citep[334]{Montgomery2014}. In support of their own argument, they stress instead that historians “have argued that by 1830 the South had become a self-conscious region increasingly at odds with the nation at large”, which is why they ask the question of whether “regional consciousness [could] have played a role in the formation of regional standards of speech” \citep[345]{Montgomery2014}. These two articles discussing the “beginnings” of White Southern American English thus show that especially the scarcity or even absence of primary evidence makes it difficult to reconstruct linguistic changes, especially on the phonological level. Furthermore, differentiation may have proceeded differently on different levels of structure (phonology, grammar, lexicon), which makes the question of when a variety becomes distinct difficult to answer. \citet{Montgomery2014} test \citegen{Bailey1997} hypothesis only on the grammatical level, so that it is not clear whether the earlier beginning that they postulate would also apply to the phonological level. Quantitative statistical measurements, as proposed by \citet{Pickl2016}, are difficult or impossible to apply given the amount of data that is available for such analyses, but this only strengthens \citegen{Montgomery2001} call for finding and analyzing more primary evidence. Lastly, their discussion of extra-linguistic factors influencing structural differentiation shows how a case can be made for both the late nineteenth century and the first half of the nineteenth century, which underlines the need for investigating this relationship in more detail. This need has been acknowledged and addressed by \citet{Montgomery2015}. He provides further support for locating the beginning of “distinct (type)s of English” (\citeyear[99]{Montgomery2015}) in the South in the period between 1750 and 1850 by not only investigating more primary evidence (letters and a testimony written by semiliterate commoners), but also secondary evidence: 51 primary-level confederate schoolbooks. So despite his emphasis on investigating primary sources, Montgomery suggests here (albeit tentatively) that “the development of distinctive Southern English may have involved ideology leading to \emph{perception} of the South as a distinct region perhaps as much as the reality of one” (\citeyear[99]{Montgomery2015}). His argument is that those forms that were subject to comment in the textbooks must have been salient in some way, and they must have existed since at least the 1840s. He finds that most comments pertained to forms of pronunciation, and he values this evidence not only as providing “glimpses of many features, some of which (such as the drawl) come from a period earlier than previously documented” but also as evidence of southerners’ awareness “of linguistic contact and competition [...] and [of] the new-dialect formation that was the result” \citep[114]{Montgomery2015}. This study is valuable because it draws attention to the discourses surrounding particular linguistic forms in the south, especially their evaluation as correct or incorrect. It can thus be determined which linguistic forms played a role in the discursive construction of southern American English. However, the material can only function as a starting point for further investigations and his article shows that there are still many open questions, for example relating to the prestige (and distribution) of non-rhoticity (see \sectref{bkm:Ref530736302} for a detailed discussion of research on this form) and the relation between this early awareness of southern forms and the “new, modern Southern identity” identified by \citet[299]{Schneider2007} which led to the recognition and increasing use of innovative features which mark the present-day American south as a distinct dialect region.\footnote{\citet{Schneider2007} distinguishes between “traditional” and “new” southern features – the former are associated with the rural pre-Civil War culture and the latter with the modern, urban culture resulting from the social changes following the defeat in the Civil War. For southern (linguistic) identity, the middle of the nineteenth century therefore marks a turning point.} The difficulties of investigating the emergence of new varieties on a purely structural level also become visible in the case of African American English. In a recent overview and discussion of research on “the origins and history of African American Language”, \citet{Lanehart2017} not only reviews several research positions but also criticizes that research focuses too much on a set of salient linguistic features and that several perspectives on the development of African American Language “use these salient features for various purposes and sometimes in contradictory ways to support their argument” (\citeyear[86]{Lanehart2017}).\footnote{Lanehart prefers the label \textit{African American Language} over \textit{African American English} because \textit{language} is “less limiting” than \textit{English} (\citeyear[86]{Lanehart2017}).} She finally argues that with regard to structural developments, “we simply do not have the artifacts and hard evidence (recordings of nascent AAL) to make a definitive assessment about the origins and history of AAL” (\citeyear[91]{Lanehart2017}). In her discussion, she also deals implicitly with the discursive construction of African American Language by focusing on the contribution of linguistic research to this construction. She describes the Deficit position in the nineteenth century, according to which Blacks are biologically inferior to whites and thus not able to acquire English in the way that whites do – this position continued to be supported throughout the twentieth century. Lanehart argues that not only this position but also the following ones (the two most prevalent ones being the Anglicist position, which claims that African American English is based on British English varieties, and the Creolist position, which purports that the language spoken by African Americans developed from an earlier creole) are influenced by the “ideological and epistemological perspectives of their originators and supporters”, which shows that research is influenced by social, political and cultural circumstances. Research discourse also has an influence on the recognition and evaluation of African American English, but this influence also has its limits, as Lanehart points out: “[W]hen I tell people outside of linguistics about AAL, they seem dumbfounded that anyone would believe that AAL is not historically rooted to Africa since the people who speak it are, hence the African Diaspora” (\citeyear[92]{Lanehart2017}). Her overview thus strengthens the view developed in \sectref{bkm:Ref521000690} that investigations of structure are very often influenced by discourse, and while it might not be possible to completely disentangle structure and discourse in the investigation of language in general and of the historical emergence of new varieties in particular, it is important to critically reflect the ways in which one’s own investigation of structural developments is shaped by the discursive construction of the variety in question. Lanehart also addresses the issue of defining African American Language – an issue that is also discussed by \citet{Mufwene2001b} in much detail. Mufwene draws the following conclusion: \begin{quote} So far, we have done poor jobs either in not reconciling some of our definitions of AAE [African American English] with our analyses, in overemphasizing extreme differences and disregarding similarities with other English vernaculars, or in proposing definitions that ignore the sentiments of native speakers. We might even be better off not even trying to define AAE and just speaking of peculiarities observable among African Americans. There is probably no way of defining AAE – if a language variety can be defined at all – that does not reflect a particular bias, and this problem is true of any language variety in the world. (\citeyear[37]{Mufwene2001b}) \end{quote} He suggests instead a vague characterization of African American English as an ethnolect – as “English as it is spoken by or among African Americans” (\citeyear[37]{Mufwene2001b}). This puts emphasis on the speakers as a basis for defining the object of investigation – a perspective that is also adopted by \citet{Lanehart2017}. This reflects their research position, namely the importance of viewing language not only as a set of linguistic forms, but as more than that: “[L]anguage is more than the sum of its parts or the handy grammar that we all like to turn to [...]. If language could be learned from reading a grammar book, we could all be multilingual” \citep[91]{Lanehart2017}. In addition to linguistic forms, aspects of perception and recognition are thus crucial, as pointed out by \citet[37]{Mufwene2001b}: \begin{quote} I doubt that African Americans utilize just one rigid battery of structural features to identify a person as speaking English in a manner that corresponds to their own. For the purposes of group identity, I think that being able to recognize speech as African American on the family resemblance model, based on a disjunction of kinds of peculiarities, is more realistic than doing so on the basis of whether its speaker has more or fewer specific non-standard features. \end{quote} To conclude this overview of the role of structure and discourse in prior research on the emergence of American English and, related to it, other varieties in North America, it can be stated that even though it was not possible to give a complete overview here because of the large amount of literature on their historical development, the question of when American English became a distinct variety has been answered in different ways. As studies on the distribution and change of linguistic features, especially those based on primary evidence like original texts and manuscripts, are rather scarce, the “beginning” of American English has usually been determined primarily on the discursive level, by identifying the point in time that American English came to be \emph{recognized} as distinct from British English. While the Revolutionary War and the following independence from Britain are usually seen as important events because they mark the starting point of the American nation and are thus a prerequisite for the recognition of a \emph{national} variety, some have stressed that this recognition process was completed by 1850, while others found the late nineteenth century or the early twentieth century to be crucial periods for the definition of an American (standard) variety. Despite those differences, there is general agreement that the recognition process not only proceeds through delimitation from British English, but also through the recognition of American sub-varieties against which a uniform, national American variety is constructed. The linguistic differences related to race and ethnicity have been identified as particularly important and also as constituting a striking difference to standardization processes occurring in England. Another difference between England and America that has motivated investigations is that in America it was not the speech of the political, cultural, and economic centers in the northeast that became the standard, but the speech of the rural (mid)west, while in England the London speech patterns came to be recognized as the standard. Despite a focus on discourse, claims have been made about its influence on language use – on the other hand, studies focusing on language use also make claims about the correlation between the changes they observe and social and cultural developments happening at the same time. \citegen{Bonfiglio2002} study is special in this regard because he links discourses surrounding a specific linguistic form, post-vocalic /r/, to what is known about changes in the use of that form by American speakers. As post-vocalic /r/ is also one of the forms investigated in this study, I will summarize the research findings on this particular form in more detail in \sectref{bkm:Ref530736302}. Finally, not only contemporary observers are influenced by ideologies, resulting for example in different assessments of the uniformity and diversity of American English, as shown by \citet{Cooley1992}, but also linguists contributing to the current research debate, particularly (but definitely not restricted to) the investigation of African American English. In this study, I will use enregisterment as a theoretical framework to contribute to the question of how American English was constructed as a discursive variety – but in contrast to prior studies, my analysis focuses on particular linguistic forms and how the social and cultural meanings they acquired led to the recognition of a set of forms as distinctly American. Rather than following the majority of studies investigating discourses on language shaped by language experts, influential figures or authors of literary works, I will focus on newspaper articles because they had a wide and varied readership and because newspapers contained several different text types, for example editorials, news reports and advertisements as well as anecdotes and humorous paragraphs intended to entertain the readers (see \sectref{bkm:Ref524018691} for more details). \section{Conclusion} \hypertarget{Toc63021217}{} The theoretical framework developed in this chapter is intended to provide a basis for gaining deeper insights into the role of social factors and identity in the emergence on new varieties of English. I have demonstrated in \sectref{bkm:Ref522870687} that this point is one of the most contested issues in discussions about models that have been proposed so far: Trudgill’s model of new-dialect formation, Schneider’s Dynamic Model and Kretzschmar’s model of the emergence of varieties in speech as a complex system. I have shown that the arguments they present interact in crucial ways with their definition of the emerging construct, the \textit{variety}, and that an investigation that aims to shed light on the role of social factors needs to distinguish carefully between different types of varieties: structural varieties, perceptual varieties and discursive varieties. This postulation of different types of varieties does not imply that they are to be understood as existing independently of each other – on the contrary, they are crucially connected. However, it is the main argument of this study that in order to explore these connections, they have to be investigated in their own right and this requires a sound theoretical and methodological framework that does justice to the different types of varieties. As this study is primarily interested in the role of social factors, I proposed to focus on the emergence of discursive varieties, and I demonstrated in \sectref{bkm:Ref522870698} that the theory of enregisterment provides a useful framework for this task because it describes how speakers construct registers through engaging reflexively with linguistic forms (and other perceivable signs) and evaluating these linguistic forms in different ways. Linguistic forms thus become indexically linked to different social values and social personae, and the more frequently reflexive activities occur, the more salient these indexical links become and the more speakers are likely to recognize these links and contribute to their persistence or to their change through their own reflexive activities. It is through this process that registers are constructed – discursive varieties which can be understood as cultural models of action consisting of a set of linguistic and non-linguistic forms which are recognized as distinct from other sets of forms by a group of speakers. As shown in \sectref{bkm:Ref506884048}, the theory of enregisterment and indexicality has already been fruitfully applied in sociolinguistic research, but especially the distinction between the concepts of register and style as well as the role of awareness in the creation and recognition of indexical links and registers deserves closer attention. I have argued here that it makes sense to distinguish style, enregistered style and register: Whereas the first two concepts are located on the level of language use, the third concept operates on a more abstract level because it is essentially a \emph{model} of speech (and action in general) that cannot be identified by investigating speakers’ use of linguistic variants but only by investigating their reflexive activities, in other words their typifications of linguistic forms in metadiscursive practice. In line with Agha’s theory of enregisterment, sociolinguists and linguistic anthropologists have suggested to understand identity as being produced through social practice – practices in which speakers position themselves and others socially. How registers affect this process of social positioning is explained and modeled by \citet{Spitzmuller2013, Spitzmuller2015, Spitzmuller10062016}. With regard to the perceptual variety and its relation to enregisterment, I demonstrated in \sectref{bkm:Ref10466352} that studies in perceptual dialectology have also drawn on the concept of enregisterment. I have suggested that theories and methods of perceptual dialectology can mainly be used to add a cognitive perspective to the question of how linguistic forms become enregistered. They cannot be used, however, to study enregisterment in a historical context, which is why they do not play a role in the present study. In contrast to that, the new field of discourse linguistics can contribute to the study of enregisterment and thus to the study of the construction of discursive varieties in crucial ways. This can be done first of all by understanding this construction process as the result of discursive action, that is, of linguistic action that constitutes knowledge. Secondly, the methodological framework DIMLAN that takes into account the intratextual layer, the transtextual layer and the agent layer, which functions as a filter between the intra- and the transtextual layers, provides a reference point for developing a methodological approach for the systematic study of discursive activities that bring about socially shared knowledge about language in the form of cultural models of action and thus of registers. The model presented in \sectref{bkm:Ref523897668} visualizes the enregisterment process and thus the relation between the structural level and the discursive level that have been distinguished in the Dynamic Model. As it is the central aim of the next chapters to apply this model to the investigation of the enregisterment of American English, I have presented an overview of previous research that has described (aspects of) the development of American English and addressed the question of the “beginning” of this variety. I demonstrated that this question has been answered in very different ways and, more often than not, based on data that belongs to the discursive level. The \emph{recognition} of American English as a new variety was foregrounded, whereas the structural differentiation was much less studied. But even in studies that can be located on the discursive level, not much attention has been given to the specific linguistic forms that the recognition of the new variety was based on. Furthermore, it has not been sufficiently investigated which speakers actually recognized linguistic forms as distinct and when and where these processes of recognition could be observed. This underlines the need for a study that aims to investigate the emergence of a discursive \textit{American} variety systematically and empirically. However, I have also outlined important suggestions by \citet{Cooley1992} and \citet{Minnick2010} concerning the manner in which the recognition of a standard American variety proceeded. That these processes can also be identified in the following study of enregisterment in nineteenth-century America will be shown in the remaining chapters of this book.
[GOAL] α : Type u_1 ι : Type u_2 β : ι → Type u_3 inst✝² : Fintype ι inst✝¹ : (i : ι) → DecidableEq (β i) γ : ι → Type u_4 inst✝ : (i : ι) → DecidableEq (γ i) x : (i : ι) → β i ⊢ hammingDist x x = 0 [PROOFSTEP] rw [hammingDist, card_eq_zero, filter_eq_empty_iff] [GOAL] α : Type u_1 ι : Type u_2 β : ι → Type u_3 inst✝² : Fintype ι inst✝¹ : (i : ι) → DecidableEq (β i) γ : ι → Type u_4 inst✝ : (i : ι) → DecidableEq (γ i) x : (i : ι) → β i ⊢ ∀ (x_1 : ι), x_1 ∈ univ → ¬x x_1 ≠ x x_1 [PROOFSTEP] exact fun _ _ H => H rfl [GOAL] α : Type u_1 ι : Type u_2 β : ι → Type u_3 inst✝² : Fintype ι inst✝¹ : (i : ι) → DecidableEq (β i) γ : ι → Type u_4 inst✝ : (i : ι) → DecidableEq (γ i) x y : (i : ι) → β i ⊢ hammingDist x y = hammingDist y x [PROOFSTEP] simp_rw [hammingDist, ne_comm] [GOAL] α : Type u_1 ι : Type u_2 β : ι → Type u_3 inst✝² : Fintype ι inst✝¹ : (i : ι) → DecidableEq (β i) γ : ι → Type u_4 inst✝ : (i : ι) → DecidableEq (γ i) x y z : (i : ι) → β i ⊢ hammingDist x z ≤ hammingDist x y + hammingDist y z [PROOFSTEP] classical unfold hammingDist refine le_trans (card_mono ?_) (card_union_le _ _) rw [← filter_or] exact monotone_filter_right _ fun i h ↦ (h.ne_or_ne _).imp_right Ne.symm [GOAL] α : Type u_1 ι : Type u_2 β : ι → Type u_3 inst✝² : Fintype ι inst✝¹ : (i : ι) → DecidableEq (β i) γ : ι → Type u_4 inst✝ : (i : ι) → DecidableEq (γ i) x y z : (i : ι) → β i ⊢ hammingDist x z ≤ hammingDist x y + hammingDist y z [PROOFSTEP] unfold hammingDist [GOAL] α : Type u_1 ι : Type u_2 β : ι → Type u_3 inst✝² : Fintype ι inst✝¹ : (i : ι) → DecidableEq (β i) γ : ι → Type u_4 inst✝ : (i : ι) → DecidableEq (γ i) x y z : (i : ι) → β i ⊢ card (filter (fun i => x i ≠ z i) univ) ≤ card (filter (fun i => x i ≠ y i) univ) + card (filter (fun i => y i ≠ z i) univ) [PROOFSTEP] refine le_trans (card_mono ?_) (card_union_le _ _) [GOAL] α : Type u_1 ι : Type u_2 β : ι → Type u_3 inst✝² : Fintype ι inst✝¹ : (i : ι) → DecidableEq (β i) γ : ι → Type u_4 inst✝ : (i : ι) → DecidableEq (γ i) x y z : (i : ι) → β i ⊢ filter (fun i => x i ≠ z i) univ ≤ filter (fun i => x i ≠ y i) univ ∪ filter (fun i => y i ≠ z i) univ [PROOFSTEP] rw [← filter_or] [GOAL] α : Type u_1 ι : Type u_2 β : ι → Type u_3 inst✝² : Fintype ι inst✝¹ : (i : ι) → DecidableEq (β i) γ : ι → Type u_4 inst✝ : (i : ι) → DecidableEq (γ i) x y z : (i : ι) → β i ⊢ filter (fun i => x i ≠ z i) univ ≤ filter (fun a => x a ≠ y a ∨ y a ≠ z a) univ [PROOFSTEP] exact monotone_filter_right _ fun i h ↦ (h.ne_or_ne _).imp_right Ne.symm [GOAL] α : Type u_1 ι : Type u_2 β : ι → Type u_3 inst✝² : Fintype ι inst✝¹ : (i : ι) → DecidableEq (β i) γ : ι → Type u_4 inst✝ : (i : ι) → DecidableEq (γ i) x y z : (i : ι) → β i ⊢ hammingDist x y ≤ hammingDist z x + hammingDist z y [PROOFSTEP] rw [hammingDist_comm z] [GOAL] α : Type u_1 ι : Type u_2 β : ι → Type u_3 inst✝² : Fintype ι inst✝¹ : (i : ι) → DecidableEq (β i) γ : ι → Type u_4 inst✝ : (i : ι) → DecidableEq (γ i) x y z : (i : ι) → β i ⊢ hammingDist x y ≤ hammingDist x z + hammingDist z y [PROOFSTEP] exact hammingDist_triangle _ _ _ [GOAL] α : Type u_1 ι : Type u_2 β : ι → Type u_3 inst✝² : Fintype ι inst✝¹ : (i : ι) → DecidableEq (β i) γ : ι → Type u_4 inst✝ : (i : ι) → DecidableEq (γ i) x y z : (i : ι) → β i ⊢ hammingDist x y ≤ hammingDist x z + hammingDist y z [PROOFSTEP] rw [hammingDist_comm y] [GOAL] α : Type u_1 ι : Type u_2 β : ι → Type u_3 inst✝² : Fintype ι inst✝¹ : (i : ι) → DecidableEq (β i) γ : ι → Type u_4 inst✝ : (i : ι) → DecidableEq (γ i) x y z : (i : ι) → β i ⊢ hammingDist x y ≤ hammingDist x z + hammingDist z y [PROOFSTEP] exact hammingDist_triangle _ _ _ [GOAL] α : Type u_1 ι : Type u_2 β : ι → Type u_3 inst✝² : Fintype ι inst✝¹ : (i : ι) → DecidableEq (β i) γ : ι → Type u_4 inst✝ : (i : ι) → DecidableEq (γ i) ⊢ swap hammingDist = hammingDist [PROOFSTEP] funext x y [GOAL] case h.h α : Type u_1 ι : Type u_2 β : ι → Type u_3 inst✝² : Fintype ι inst✝¹ : (i : ι) → DecidableEq (β i) γ : ι → Type u_4 inst✝ : (i : ι) → DecidableEq (γ i) x y : (i : ι) → β i ⊢ swap hammingDist x y = hammingDist x y [PROOFSTEP] exact hammingDist_comm _ _ [GOAL] α : Type u_1 ι : Type u_2 β : ι → Type u_3 inst✝² : Fintype ι inst✝¹ : (i : ι) → DecidableEq (β i) γ : ι → Type u_4 inst✝ : (i : ι) → DecidableEq (γ i) x y : (i : ι) → β i ⊢ hammingDist x y = 0 → x = y [PROOFSTEP] simp_rw [hammingDist, card_eq_zero, filter_eq_empty_iff, Classical.not_not, funext_iff, mem_univ, forall_true_left, imp_self] [GOAL] α : Type u_1 ι : Type u_2 β : ι → Type u_3 inst✝² : Fintype ι inst✝¹ : (i : ι) → DecidableEq (β i) γ : ι → Type u_4 inst✝ : (i : ι) → DecidableEq (γ i) x y : (i : ι) → β i H : x = y ⊢ hammingDist x y = 0 [PROOFSTEP] rw [H] [GOAL] α : Type u_1 ι : Type u_2 β : ι → Type u_3 inst✝² : Fintype ι inst✝¹ : (i : ι) → DecidableEq (β i) γ : ι → Type u_4 inst✝ : (i : ι) → DecidableEq (γ i) x y : (i : ι) → β i H : x = y ⊢ hammingDist y y = 0 [PROOFSTEP] exact hammingDist_self _ [GOAL] α : Type u_1 ι : Type u_2 β : ι → Type u_3 inst✝² : Fintype ι inst✝¹ : (i : ι) → DecidableEq (β i) γ : ι → Type u_4 inst✝ : (i : ι) → DecidableEq (γ i) x y : (i : ι) → β i ⊢ 0 = hammingDist x y ↔ x = y [PROOFSTEP] rw [eq_comm, hammingDist_eq_zero] [GOAL] α : Type u_1 ι : Type u_2 β : ι → Type u_3 inst✝² : Fintype ι inst✝¹ : (i : ι) → DecidableEq (β i) γ : ι → Type u_4 inst✝ : (i : ι) → DecidableEq (γ i) x y : (i : ι) → β i ⊢ 0 < hammingDist x y ↔ x ≠ y [PROOFSTEP] rw [← hammingDist_ne_zero, iff_not_comm, not_lt, le_zero_iff] [GOAL] α : Type u_1 ι : Type u_2 β : ι → Type u_3 inst✝² : Fintype ι inst✝¹ : (i : ι) → DecidableEq (β i) γ : ι → Type u_4 inst✝ : (i : ι) → DecidableEq (γ i) x y : (i : ι) → β i ⊢ hammingDist x y < 1 ↔ x = y [PROOFSTEP] rw [Nat.lt_one_iff, hammingDist_eq_zero] [GOAL] α : Type u_1 ι : Type u_2 β : ι → Type u_3 inst✝⁴ : Fintype ι inst✝³ : (i : ι) → DecidableEq (β i) γ : ι → Type u_4 inst✝² : (i : ι) → DecidableEq (γ i) inst✝¹ : (i : ι) → Zero (β i) inst✝ : (i : ι) → Zero (γ i) x : (i : ι) → β i ⊢ hammingDist 0 x = hammingNorm x [PROOFSTEP] rw [hammingDist_comm, hammingDist_zero_right] [GOAL] α : Type u_1 ι : Type u_2 β : ι → Type u_3 inst✝⁴ : Fintype ι inst✝³ : (i : ι) → DecidableEq (β i) γ : ι → Type u_4 inst✝² : (i : ι) → DecidableEq (γ i) inst✝¹ : (i : ι) → Zero (β i) inst✝ : (i : ι) → Zero (γ i) f : (i : ι) → γ i → β i x : (i : ι) → γ i hf : ∀ (i : ι), f i 0 = 0 ⊢ (hammingNorm fun i => f i (x i)) ≤ hammingNorm x [PROOFSTEP] simpa only [← hammingDist_zero_right, hf] using hammingDist_comp_le_hammingDist f (y := fun _ ↦ 0) [GOAL] α : Type u_1 ι : Type u_2 β : ι → Type u_3 inst✝⁴ : Fintype ι inst✝³ : (i : ι) → DecidableEq (β i) γ : ι → Type u_4 inst✝² : (i : ι) → DecidableEq (γ i) inst✝¹ : (i : ι) → Zero (β i) inst✝ : (i : ι) → Zero (γ i) f : (i : ι) → γ i → β i x : (i : ι) → γ i hf₁ : ∀ (i : ι), Injective (f i) hf₂ : ∀ (i : ι), f i 0 = 0 ⊢ (hammingNorm fun i => f i (x i)) = hammingNorm x [PROOFSTEP] simpa only [← hammingDist_zero_right, hf₂] using hammingDist_comp f hf₁ (y := fun _ ↦ 0) [GOAL] α : Type u_1 ι : Type u_2 β : ι → Type u_3 inst✝⁶ : Fintype ι inst✝⁵ : (i : ι) → DecidableEq (β i) γ : ι → Type u_4 inst✝⁴ : (i : ι) → DecidableEq (γ i) inst✝³ : (i : ι) → Zero (β i) inst✝² : (i : ι) → Zero (γ i) inst✝¹ : Zero α inst✝ : (i : ι) → SMulWithZero α (β i) k : α x : (i : ι) → β i i : ι ⊢ (fun i c => k • c) i 0 = 0 [PROOFSTEP] simp_rw [smul_zero] [GOAL] α : Type u_1 ι : Type u_2 β : ι → Type u_3 inst✝⁶ : Fintype ι inst✝⁵ : (i : ι) → DecidableEq (β i) γ : ι → Type u_4 inst✝⁴ : (i : ι) → DecidableEq (γ i) inst✝³ : (i : ι) → Zero (β i) inst✝² : (i : ι) → Zero (γ i) inst✝¹ : Zero α inst✝ : (i : ι) → SMulWithZero α (β i) k : α hk : ∀ (i : ι), IsSMulRegular (β i) k x : (i : ι) → β i i : ι ⊢ (fun i c => k • c) i 0 = 0 [PROOFSTEP] simp_rw [smul_zero] [GOAL] α : Type u_1 ι : Type u_2 β : ι → Type u_3 inst✝³ : Fintype ι inst✝² : (i : ι) → DecidableEq (β i) γ : ι → Type u_4 inst✝¹ : (i : ι) → DecidableEq (γ i) inst✝ : (i : ι) → AddGroup (β i) x y : (i : ι) → β i ⊢ hammingDist x y = hammingNorm (x - y) [PROOFSTEP] simp_rw [hammingNorm, hammingDist, Pi.sub_apply, sub_ne_zero] [GOAL] α : Type u_1 ι : Type u_2 β : ι → Type u_3 inst✝¹ : Fintype ι inst✝ : (i : ι) → DecidableEq (β i) ⊢ ∀ (x : Hamming β), dist x x = 0 [PROOFSTEP] push_cast [GOAL] α : Type u_1 ι : Type u_2 β : ι → Type u_3 inst✝¹ : Fintype ι inst✝ : (i : ι) → DecidableEq (β i) ⊢ ∀ (x : Hamming β), ↑(hammingDist (↑ofHamming x) (↑ofHamming x)) = 0 [PROOFSTEP] exact_mod_cast hammingDist_self [GOAL] α : Type u_1 ι : Type u_2 β : ι → Type u_3 inst✝¹ : Fintype ι inst✝ : (i : ι) → DecidableEq (β i) ⊢ ∀ (x y : Hamming β), dist x y = dist y x [PROOFSTEP] push_cast [GOAL] α : Type u_1 ι : Type u_2 β : ι → Type u_3 inst✝¹ : Fintype ι inst✝ : (i : ι) → DecidableEq (β i) ⊢ ∀ (x y : Hamming β), ↑(hammingDist (↑ofHamming x) (↑ofHamming y)) = ↑(hammingDist (↑ofHamming y) (↑ofHamming x)) [PROOFSTEP] exact_mod_cast hammingDist_comm [GOAL] α : Type u_1 ι : Type u_2 β : ι → Type u_3 inst✝¹ : Fintype ι inst✝ : (i : ι) → DecidableEq (β i) ⊢ ∀ (x y z : Hamming β), dist x z ≤ dist x y + dist y z [PROOFSTEP] push_cast [GOAL] α : Type u_1 ι : Type u_2 β : ι → Type u_3 inst✝¹ : Fintype ι inst✝ : (i : ι) → DecidableEq (β i) ⊢ ∀ (x y z : Hamming β), ↑(hammingDist (↑ofHamming x) (↑ofHamming z)) ≤ ↑(hammingDist (↑ofHamming x) (↑ofHamming y)) + ↑(hammingDist (↑ofHamming y) (↑ofHamming z)) [PROOFSTEP] exact_mod_cast hammingDist_triangle [GOAL] α : Type u_1 ι : Type u_2 β : ι → Type u_3 inst✝¹ : Fintype ι inst✝ : (i : ι) → DecidableEq (β i) x✝¹ x✝ : Hamming β ⊢ (fun x y => ↑{ val := dist x y, property := (_ : 0 ≤ dist x y) }) x✝¹ x✝ = ENNReal.ofReal (dist x✝¹ x✝) [PROOFSTEP] exact ENNReal.coe_nnreal_eq _ [GOAL] α : Type u_1 ι : Type u_2 β : ι → Type u_3 inst✝¹ : Fintype ι inst✝ : (i : ι) → DecidableEq (β i) s : Set (Hamming β × Hamming β) ⊢ s ∈ uniformity (Hamming β) ↔ ∃ ε, ε > 0 ∧ ∀ {a b : Hamming β}, dist a b < ε → (a, b) ∈ s [PROOFSTEP] push_cast [GOAL] α : Type u_1 ι : Type u_2 β : ι → Type u_3 inst✝¹ : Fintype ι inst✝ : (i : ι) → DecidableEq (β i) s : Set (Hamming β × Hamming β) ⊢ s ∈ uniformity (Hamming β) ↔ ∃ ε, ε > 0 ∧ ∀ {a b : Hamming β}, ↑(hammingDist (↑ofHamming a) (↑ofHamming b)) < ε → (a, b) ∈ s [PROOFSTEP] constructor [GOAL] case mp α : Type u_1 ι : Type u_2 β : ι → Type u_3 inst✝¹ : Fintype ι inst✝ : (i : ι) → DecidableEq (β i) s : Set (Hamming β × Hamming β) ⊢ s ∈ uniformity (Hamming β) → ∃ ε, ε > 0 ∧ ∀ {a b : Hamming β}, ↑(hammingDist (↑ofHamming a) (↑ofHamming b)) < ε → (a, b) ∈ s [PROOFSTEP] refine' fun hs => ⟨1, zero_lt_one, fun hab => _⟩ [GOAL] case mp α : Type u_1 ι : Type u_2 β : ι → Type u_3 inst✝¹ : Fintype ι inst✝ : (i : ι) → DecidableEq (β i) s : Set (Hamming β × Hamming β) hs : s ∈ uniformity (Hamming β) a✝ b✝ : Hamming β hab : ↑(hammingDist (↑ofHamming a✝) (↑ofHamming b✝)) < 1 ⊢ (a✝, b✝) ∈ s [PROOFSTEP] rw_mod_cast [hammingDist_lt_one] at hab [GOAL] case mp α : Type u_1 ι : Type u_2 β : ι → Type u_3 inst✝¹ : Fintype ι inst✝ : (i : ι) → DecidableEq (β i) s : Set (Hamming β × Hamming β) hs : s ∈ uniformity (Hamming β) a✝ b✝ : Hamming β hab : ↑ofHamming a✝ = ↑ofHamming b✝ ⊢ (a✝, b✝) ∈ s [PROOFSTEP] rw [ofHamming_inj, ← mem_idRel] at hab [GOAL] case mp α : Type u_1 ι : Type u_2 β : ι → Type u_3 inst✝¹ : Fintype ι inst✝ : (i : ι) → DecidableEq (β i) s : Set (Hamming β × Hamming β) hs : s ∈ uniformity (Hamming β) a✝ b✝ : Hamming β hab : (a✝, b✝) ∈ idRel ⊢ (a✝, b✝) ∈ s [PROOFSTEP] exact hs hab [GOAL] case mpr α : Type u_1 ι : Type u_2 β : ι → Type u_3 inst✝¹ : Fintype ι inst✝ : (i : ι) → DecidableEq (β i) s : Set (Hamming β × Hamming β) ⊢ (∃ ε, ε > 0 ∧ ∀ {a b : Hamming β}, ↑(hammingDist (↑ofHamming a) (↑ofHamming b)) < ε → (a, b) ∈ s) → s ∈ uniformity (Hamming β) [PROOFSTEP] rintro ⟨_, hε, hs⟩ ⟨_, _⟩ hab [GOAL] case mpr.intro.intro.mk α : Type u_1 ι : Type u_2 β : ι → Type u_3 inst✝¹ : Fintype ι inst✝ : (i : ι) → DecidableEq (β i) s : Set (Hamming β × Hamming β) w✝ : ℝ hε : w✝ > 0 hs : ∀ {a b : Hamming β}, ↑(hammingDist (↑ofHamming a) (↑ofHamming b)) < w✝ → (a, b) ∈ s fst✝ snd✝ : Hamming β hab : (fst✝, snd✝) ∈ idRel ⊢ (fst✝, snd✝) ∈ s [PROOFSTEP] rw [mem_idRel] at hab [GOAL] case mpr.intro.intro.mk α : Type u_1 ι : Type u_2 β : ι → Type u_3 inst✝¹ : Fintype ι inst✝ : (i : ι) → DecidableEq (β i) s : Set (Hamming β × Hamming β) w✝ : ℝ hε : w✝ > 0 hs : ∀ {a b : Hamming β}, ↑(hammingDist (↑ofHamming a) (↑ofHamming b)) < w✝ → (a, b) ∈ s fst✝ snd✝ : Hamming β hab : fst✝ = snd✝ ⊢ (fst✝, snd✝) ∈ s [PROOFSTEP] rw [hab] [GOAL] case mpr.intro.intro.mk α : Type u_1 ι : Type u_2 β : ι → Type u_3 inst✝¹ : Fintype ι inst✝ : (i : ι) → DecidableEq (β i) s : Set (Hamming β × Hamming β) w✝ : ℝ hε : w✝ > 0 hs : ∀ {a b : Hamming β}, ↑(hammingDist (↑ofHamming a) (↑ofHamming b)) < w✝ → (a, b) ∈ s fst✝ snd✝ : Hamming β hab : fst✝ = snd✝ ⊢ (snd✝, snd✝) ∈ s [PROOFSTEP] refine' hs (lt_of_eq_of_lt _ hε) [GOAL] case mpr.intro.intro.mk α : Type u_1 ι : Type u_2 β : ι → Type u_3 inst✝¹ : Fintype ι inst✝ : (i : ι) → DecidableEq (β i) s : Set (Hamming β × Hamming β) w✝ : ℝ hε : w✝ > 0 hs : ∀ {a b : Hamming β}, ↑(hammingDist (↑ofHamming a) (↑ofHamming b)) < w✝ → (a, b) ∈ s fst✝ snd✝ : Hamming β hab : fst✝ = snd✝ ⊢ ↑(hammingDist (↑ofHamming snd✝) (↑ofHamming snd✝)) = 0 [PROOFSTEP] exact_mod_cast hammingDist_self _ [GOAL] α : Type u_1 ι : Type u_2 β : ι → Type u_3 inst✝¹ : Fintype ι inst✝ : (i : ι) → DecidableEq (β i) ⊢ (Bornology.cobounded (Hamming β)).sets = {s | ∃ C, ∀ (x : Hamming β), x ∈ sᶜ → ∀ (y : Hamming β), y ∈ sᶜ → dist x y ≤ C} [PROOFSTEP] ext [GOAL] case h α : Type u_1 ι : Type u_2 β : ι → Type u_3 inst✝¹ : Fintype ι inst✝ : (i : ι) → DecidableEq (β i) x✝ : Set (Hamming β) ⊢ x✝ ∈ (Bornology.cobounded (Hamming β)).sets ↔ x✝ ∈ {s | ∃ C, ∀ (x : Hamming β), x ∈ sᶜ → ∀ (y : Hamming β), y ∈ sᶜ → dist x y ≤ C} [PROOFSTEP] push_cast [GOAL] case h α : Type u_1 ι : Type u_2 β : ι → Type u_3 inst✝¹ : Fintype ι inst✝ : (i : ι) → DecidableEq (β i) x✝ : Set (Hamming β) ⊢ x✝ ∈ (Bornology.cobounded (Hamming β)).sets ↔ x✝ ∈ {s | ∃ C, ∀ (x : Hamming β), x ∈ sᶜ → ∀ (y : Hamming β), y ∈ sᶜ → ↑(hammingDist (↑ofHamming x) (↑ofHamming y)) ≤ C} [PROOFSTEP] refine' iff_of_true (Filter.mem_sets.mpr Filter.mem_bot) ⟨Fintype.card ι, fun _ _ _ _ => _⟩ [GOAL] case h α : Type u_1 ι : Type u_2 β : ι → Type u_3 inst✝¹ : Fintype ι inst✝ : (i : ι) → DecidableEq (β i) x✝⁴ : Set (Hamming β) x✝³ : Hamming β x✝² : x✝³ ∈ x✝⁴ᶜ x✝¹ : Hamming β x✝ : x✝¹ ∈ x✝⁴ᶜ ⊢ ↑(hammingDist (↑ofHamming x✝³) (↑ofHamming x✝¹)) ≤ ↑(Fintype.card ι) [PROOFSTEP] exact_mod_cast hammingDist_le_card_fintype [GOAL] α : Type u_1 ι : Type u_2 β : ι → Type u_3 inst✝² : Fintype ι inst✝¹ : (i : ι) → DecidableEq (β i) inst✝ : (i : ι) → AddCommGroup (β i) ⊢ ∀ (x y : Hamming β), dist x y = ‖x - y‖ [PROOFSTEP] push_cast [GOAL] α : Type u_1 ι : Type u_2 β : ι → Type u_3 inst✝² : Fintype ι inst✝¹ : (i : ι) → DecidableEq (β i) inst✝ : (i : ι) → AddCommGroup (β i) ⊢ ∀ (x y : Hamming β), ↑(hammingDist (↑ofHamming x) (↑ofHamming y)) = ↑(hammingNorm (↑ofHamming (x - y))) [PROOFSTEP] exact_mod_cast hammingDist_eq_hammingNorm
open import Type module Graph.Properties.Proofs where open import Data.Either.Proofs open import Functional open import Function.Equals open import Lang.Instance open import Logic open import Logic.Propositional open import Logic.Propositional.Theorems import Lvl open import Graph open import Graph.Properties open import Relator.Equals.Proofs.Equiv open import Structure.Setoid.Uniqueness open import Structure.Relator.Properties open import Type.Properties.Singleton module _ {ℓ₁ ℓ₂} {V : Type{ℓ₁}} (_⟶_ : Graph{ℓ₁}{ℓ₂}(V)) where instance undirect-undirected : Undirected(undirect(_⟶_)) Undirected.reversable undirect-undirected = intro [∨]-symmetry Undirected.reverse-involution undirect-undirected = intro (_⊜_.proof swap-involution) -- [++]-visits : ∀{ae be a₁ b₁ a₂ b₂}{e : ae ⟶ be}{w₁ : Walk(_⟶_) a₁ b₁}{w₂ : Walk(_⟶_) a₂ b₂} → (Visits(_⟶_) e w₁) ∨ (Visits(_⟶_) e w₂) → Visits(_⟶_) e (w₁ ++ w₂) complete-singular-is-undirected : ⦃ CompleteWithLoops(_⟶_) ⦄ → ⦃ Singular(_⟶_) ⦄ → Undirected(_⟶_) Undirected.reversable complete-singular-is-undirected = intro(const (completeWithLoops(_⟶_))) Undirected.reverse-involution complete-singular-is-undirected = intro(singular(_⟶_)) -- traceable-is-connected : ⦃ Traceable(_⟶_) ⦄ → Connected(_⟶_)
import GroundZero.Theorems.Pullback open GroundZero GroundZero.Types open GroundZero.Types.Equiv open GroundZero.Proto open GroundZero.Structures (prop contr) universe u v u' v' w w' k k' -- exercise 2.1 section variable {A : Type u} {a b c : A} hott def trans₁ (p : a = b) (q : b = c) : a = c := @Id.casesOn A a (λ x _, x = c → a = c) b p (@Id.casesOn A a (λ x _, a = x) c · (idp a)) q infixl:99 " ⬝₁ " => trans₁ hott def trans₂ (p : a = b) (q : b = c) : a = c := @Id.casesOn A a (λ x _, x = c → a = c) b p idfun q infixl:99 " ⬝₂ " => trans₂ hott def trans₃ (p : a = b) (q : b = c) : a = c := @Id.casesOn A b (λ x _, a = b → a = x) c q idfun p infixl:99 " ⬝₃ " => trans₃ hott def eq₁₂ (p : a = b) (q : b = c) : p ⬝₁ q = p ⬝₂ q := begin induction p; induction q; reflexivity end hott def eq₂₃ (p : a = b) (q : b = c) : p ⬝₂ q = p ⬝₃ q := begin induction p; induction q; reflexivity end hott def eq₁₃ (p : a = b) (q : b = c) : p ⬝₁ q = p ⬝₃ q := begin induction p; induction q; reflexivity end end -- exercise 2.2 section variable {A : Type u} {a b c : A} (p : a = b) (q : b = c) example : eq₁₂ p q ⬝ eq₂₃ p q = eq₁₃ p q := begin induction p; induction q; reflexivity end end -- exercise 2.3 section variable {A : Type u} {a b c : A} hott def trans₄ (p : a = b) (q : b = c) : a = c := @Id.casesOn A b (λ x _, a = b → a = x) c q (@Id.casesOn A a (λ x _, a = x) b · (idp a)) p infixl:99 " ⬝₄ " => trans₄ /- example (p : a = b) (q : b = c) : p ⬝₁ q = p ⬝₄ q := idp _ example (p : a = b) (q : b = c) : p ⬝₂ q = p ⬝₄ q := idp _ example (p : a = b) (q : b = c) : p ⬝₃ q = p ⬝₄ q := idp _ -/ example (p : a = b) (q : b = c) : p ⬝₁ q = p ⬝₄ q := begin induction p; induction q; reflexivity end end -- exercise 2.4 hott def nPath (A : Type u) : ℕ → Type u | Nat.zero => A | Nat.succ n => Σ (a b : nPath A n), a = b hott def boundary {A : Type u} {n : ℕ} : nPath A (n + 1) → (nPath A n) × (nPath A n) := λ ⟨a, b, _⟩, (a, b) -- exercise 2.5 namespace «2.5» variable {A : Type u} {B : Type v} {x y : A} (p : x = y) hott def transconst (b : B) : transport (λ _, B) p b = b := begin induction p; reflexivity end hott def f (φ : A → B) : φ x = φ y → transport (λ _, B) p (φ x) = φ y := λ q, transconst p (φ x) ⬝ q hott def g (φ : A → B) : transport (λ _, B) p (φ x) = φ y → φ x = φ y := λ q, (transconst p (φ x))⁻¹ ⬝ q example (φ : A → B) : f p φ ∘ g p φ ~ id := begin induction p; reflexivity end example (φ : A → B) : g p φ ∘ f p φ ~ id := begin induction p; reflexivity end end «2.5» -- exercise 2.6 example {A : Type u} {x y z : A} (p : x = y) : biinv (@Id.trans A x y z p) := begin apply Prod.mk <;> existsi Id.trans p⁻¹ <;> intro q <;> induction p <;> induction q <;> reflexivity end -- exercise 2.7 namespace «2.7» variable {A : Type u} {A' : Type u'} {B : A → Type v} {B' : A' → Type v'} (g : A → A') (h : Π a, B a → B' (g a)) def φ (x : Σ a, B a) : Σ a', B' a' := ⟨g x.1, h x.1 x.2⟩ hott def prodMap : Π (x y : Σ a, B a) (p : x.1 = y.1) (q : x.2 =[p] y.2), Id.map (φ g h) (Sigma.prod p q) = @Sigma.prod A' B' (φ g h x) (φ g h y) (@Id.map A A' x.1 y.1 g p) (depPathMap' g h q) := begin intro ⟨x, H⟩ ⟨y, G⟩ (p : x = y); induction p; intro (q : H = G); induction q; reflexivity end end «2.7» -- exercise 2.8 namespace «2.8» variable {A A' B B' : Type u} (g : A → A') (h : B → B') def φ : A + B → A' + B' := Coproduct.elim (Coproduct.inl ∘ g) (Coproduct.inr ∘ h) hott def ρ : Π {x y : A + B}, Coproduct.code x y → Coproduct.code (φ g h x) (φ g h y) | Sum.inl _, Sum.inl _, p => Id.map _ p | Sum.inr _, Sum.inl _, p => Empty.elim p | Sum.inl _, Sum.inr _, p => Empty.elim p | Sum.inr _, Sum.inr _, p => Id.map _ p hott def mapPathSum (x y : A + B) : Π p, Id.map (φ g h) (Coproduct.pathSum x y p) = Coproduct.pathSum (φ g h x) (φ g h y) (ρ g h p) := begin match x, y with | Sum.inl x, Sum.inl y => _ | Sum.inr _, Sum.inl _ => _ | Sum.inl _, Sum.inr _ => _ | Sum.inr x, Sum.inr y => _; { intro (p : x = y); induction p; reflexivity }; { intro; apply Empty.elim; assumption }; { intro; apply Empty.elim; assumption }; { intro (p : x = y); induction p; reflexivity } end end «2.8» -- exercise 2.9 hott def Coproduct.depUnivProperty (A : Type u) (B : Type v) (X : A + B → Type w) : (Π x, X x) ≃ (Π a, X (Coproduct.inl a)) × (Π b, X (Coproduct.inr b)) := begin fapply Sigma.mk; { intro φ; exact (λ a, φ (Coproduct.inl a), λ b, φ (Coproduct.inr b)) }; apply Qinv.toBiinv; fapply Sigma.mk; { intros φ x; induction x using Sum.casesOn; apply φ.1; apply φ.2 }; apply Prod.mk; { intro (φ, ψ); reflexivity }; { intro f; apply Theorems.funext; intro z; induction z using Sum.casesOn <;> reflexivity } end hott def Coproduct.univProperty (A : Type u) (B : Type v) (X : Type w) : (A + B → X) ≃ (A → X) × (B → X) := Coproduct.depUnivProperty A B (λ _, X) -- exercise 2.10 hott def sigma.assoc (A : Type u) (B : A → Type v) (C : (Σ x, B x) → Type w) : (Σ x, Σ y, C ⟨x, y⟩) ≃ (Σ p, C p) := begin fapply Sigma.mk; { intro w; existsi ⟨w.1, w.2.1⟩; exact w.2.2 }; apply Qinv.toBiinv; fapply Sigma.mk; { intro w; existsi w.1.1; existsi w.1.2; apply transport C; symmetry; exact Sigma.uniq w.1; exact w.2 }; apply Prod.mk; { intro ⟨⟨a, b⟩, c⟩; reflexivity }; { intro ⟨a, ⟨b, c⟩⟩; reflexivity } end -- exercise 2.11 namespace «2.11» variable {P : Type k} {A : Type u} {B : Type v} {C : Type w} (η : pullbackSquare P A B C) example : P ≃ pullback C η.1.right η.1.bot := Theorems.pullbackCorner η end «2.11» -- exercise 2.12 namespace «2.12» variable {A B C D E F : Type u} {f : A → C} {g : C → E} {i : A → B} {j : C → D} {k : E → F} {h : B → D} {s : D → F} (α : j ∘ f = h ∘ i) (β : k ∘ g = s ∘ j) def left : hcommSquare A C B D := ⟨j, h, f, i, α⟩ def right : hcommSquare C E D F := ⟨k, s, g, j, β⟩ def outer : hcommSquare A E B F := ⟨k, s ∘ h, g ∘ f, i, @Id.map (C → F) (A → F) _ _ (· ∘ f) β ⬝ @Id.map _ (A → F) _ _ (s ∘ ·) α⟩ hott def pullbackLemma (H : (right β).isPullback) : (left α).isPullback ↔ (outer α β).isPullback := sorry end «2.12» -- exercise 2.13 example : (𝟐 ≃ 𝟐) ≃ 𝟐 := Theorems.Equiv.boolEquivEqvBool -- exercise 2.14 -- Assume Γ, p : x = y ⊢ x ≡ y, let Γ = A : U, a : A. Then Γ, b : A, p : a = b ⊢ p = idp a : U, -- because in this context we have p : a = b, so a ≡ b, so p : a = a. -- “@Id.rec A a (λ b, p = idp a) (λ x, idp a) a” is then well-typed. -- This means that we have a proof of “Π (p : a = a), p = idp a” leading to contradiction. -- exercise 2.15 hott def transportMap {A : Type u} {B : A → Type v} {x y : A} (p : x = y) : transport B p = idtoeqv (Id.map B p) := begin induction p; reflexivity end -- exercise 2.18 hott def transportSquare {A : Type u} {B : A → Type v} {f g : Π x, B x} (H : f ~ g) {x y : A} (p : x = y) : Id.map (transport B p) (H x) ⬝ apd g p = apd f p ⬝ H y := begin induction p; transitivity; apply Id.reflRight; apply Equiv.idmap end
using LDUFacts using LinearAlgebra using Test @testset "LDUFacts.jl" begin include("ldufact.jl") include("lduupdate.jl") end
module Polytopes using Gridap using Gridap.Helpers using StaticArrays using Base.Cartesian using LinearAlgebra using Combinatorics export Polytope export NFace export HEX_AXIS, TET_AXIS export num_nfaces export face_normals export space_dim export dim export anchor export extrusion export nfaces export nfaces_dim export nface_connections export nf_nfs export nf_dim export num_nfaces export nface_ref_polytopes export generate_admissible_permutations export equidistant_interior_nodes_coordinates export vertices_coordinates export face_normals export edge_tangents export ref_nface_polytope export nfaces_vertices # Module constants const HEX_AXIS = 1 const TET_AXIS = 2 # Concrete implementations """ n-face of the polytope, i.e., any polytope of lower dimension `N` representing its boundary and the polytope itself (for `N` equal to the space dimension `D`) """ struct NFace{D} anchor::Point{D,Int} extrusion::Point{D,Int} end """ Aggregation of all n-faces that compose the polytope boundary and the polytope itself, the classification of n-faces with respect to their dimension and type """ struct Polytope{D} extrusion::Point{D,Int} nfaces::Vector{NFace} nf_nfs::Vector{Vector{Int64}} nf_dim::Vector{Vector{UnitRange{Int64}}} end """ Constructs a `Polytope` given the type of extrusion, i.e., HEX_AXIS (=1) for "hex" extrusion and TET_AXIS (=2) for "tet" extrusion """ function Polytope(extrusion::Vararg{Int,N}) where N return Polytope(Point{N,Int}(extrusion)) end function Polytope(extrusion::NTuple{N,Int}) where N return Polytope(extrusion...) end function Polytope(extrusion::Point{D,Int}) where D zerop = Point{D,Int}(zeros(Int64, D)) pol_nfs_dim = _polytopenfaces(zerop, extrusion) pol_nfs = pol_nfs_dim[1] pol_dim = pol_nfs_dim[2] nfs_id = Dict(nf => i for (i, nf) in enumerate(pol_nfs)) nf_nfs_dim = _polytopemesh(pol_nfs, nfs_id) nf_nfs = nf_nfs_dim[1] nf_dim = nf_nfs_dim[2] Polytope{D}(extrusion, pol_nfs, nf_nfs, nf_dim) end """ Provides the dimension of the environment space in which the n-face is defined """ space_dim(::NFace{D}) where {D} = D anchor(nf::NFace) = nf.anchor """ Provides the dimension of the n-face, i.e., n """ dim(nf::NFace) = _nfdim(nf.extrusion) extrusion(nf::NFace) = nf.extrusion dim(::Polytope{D}) where {D} = D extrusion(p::Polytope) = p.extrusion nfaces(p::Polytope) = p.nfaces nfaces_dim(p::Polytope, d) = p.nf_dim[end][d+1] """ # It provides for every df-face in the polytope all its dt-faces # We use dim-wise numbering, i.e., we start numbering from 1 at every dim """ nface_connections(p::Polytope, dfrom, dto) = _dimfrom_fs_dimto_fs(p, dfrom, dto) # @santiagobadia : I would prefer not to make public the following ones """ Provides the label of the n-faces that are a subset (on the boundary) or equal to a given n-face. We use a global numbering for n-faces in the returned array. n-faces are sorted by increasing dimension. """ nf_nfs(p::Polytope) = p.nf_nfs """ Provides for the result of the `nf_nfs` method, i.e., n-faces in an n-face, the range of n-faces for every dimension lower or equal than the n-face dimension. """ nf_dim(p::Polytope) = p.nf_dim """ Returns number of nfaces of dimension dim """ function num_nfaces(polytope::Polytope, dim::Integer) n = length(polytope.nf_dim) k = 0 for nface = 1:n d = length(polytope.nf_dim[nface]) - 1 if d == dim k += 1 end end k end """ Returns number of nfaces """ num_nfaces(polytope::Polytope) = length(polytope.nfaces) """ # Returns an array with the reference polytopes for all n-faces (undef for vertices) """ function nface_ref_polytopes(p::Polytope) function _eliminate_zeros(a) b = Int[] for m in a if (m != 0) push!(b, m) end end return Tuple(b) end nf_ref_p = Vector{Polytope}(undef, length(p.nfaces)) ref_nf_ps = Polytope[] v = _vertex() for (i_nf, nf) in enumerate(p.nfaces) r_ext = _eliminate_zeros(nf.extrusion) if r_ext != () k = 0 for (i_p, ref_p) in enumerate(ref_nf_ps) if r_ext == ref_p.extrusion k = i_p nf_ref_p[i_nf] = ref_p end end if k == 0 ref_p = Polytope(r_ext) push!(ref_nf_ps, ref_p) k = length(ref_nf_ps) + 1 nf_ref_p[i_nf] = ref_p end else nf_ref_p[i_nf] = v end end return nf_ref_p end """ It generates all the admissible permutations of nodes that lead to an admissible polytope """ function generate_admissible_permutations(p::Polytope) p_dims = length(p.extrusion) p_vs = Gridap.Polytopes._dimfrom_fs_dimto_fs(p, p_dims, 0) vs = p.nfaces[p_vs...] num_vs = length(vs) ext = p.extrusion l = [i for i = 1:num_vs] # @santiagobadia : Here we have to decide how we want this info stored permuted_polytopes = Vector{Int}[] for c in Combinatorics.permutations(l, p_dims + 1) admissible_polytope = true c1 = vs[c[1]].anchor for j = 2:p_dims+1 c2 = vs[c[j]].anchor if (!_are_nodes_connected(c1, c2, ext)) admissible_polytope = false end end if (admissible_polytope) push!(permuted_polytopes, c) end end return permuted_polytopes end """ It generates the set of nodes (its coordinates) in the interior of an n-face, for a given order. The node coordinates are the ones for a equispace case. """ function equidistant_interior_nodes_coordinates(p::Polytope{D}, order::Int) where D _order = order*ones(Int64,dim(p)) equidistant_interior_nodes_coordinates(p, _order) end function equidistant_interior_nodes_coordinates(p::Polytope{D}, order::Vector{Int}) where D if (all(extrusion(p).array .== HEX_AXIS) || all(order .== order[1])) ns = _interior_nodes_int_coords(p, order) return ns_float = _interior_nodes_int_to_real_coords(ns, order) else error("One can consider anisotropic orders on n-cubes only") end end """ It generates the list of coordinates of all vertices in the polytope. It is assumed that the polytope has the bounding box [0,1]**dim """ function vertices_coordinates(p::Polytope{D}) where D vs = _dimfrom_fs_dimto_fs(p, D, 0)[1] vcs = Point{D,Float64}[] for i = 1:length(vs) cs = convert(Vector{Float64}, [p.nfaces[vs[i]].anchor...]) push!(vcs, cs) end return vcs end """ It generates the outwards normals of the facets of a polytope. It returns two arrays, the first one being the outward normal and the second one the orientation. """ function face_normals(p::Polytope{D}) where D nf_vs = _dimfrom_fs_dimto_fs(p, D - 1, 0) vs = vertices_coordinates(p) f_ns = Point{D,Float64}[] f_os = Int[] for i_f = 1:length(p.nf_dim[end][end-1]) n, f_o = _facet_normal(p, nf_vs, vs, i_f) push!(f_ns, Point{D,Float64}(n)) push!(f_os, f_o) end return f_ns, f_os end """ It generates the tangent vectors for polytope edges. """ function edge_tangents(p::Polytope{D}) where D ed_vs = nfaces_vertices(p,1) return ts = [(t = vs[2]-vs[1])/norm(t) for vs in ed_vs ] end """ # Returns the reference polytope for n-faces of a given dimension. """ function ref_nface_polytope(p,nf_dim) nfs = nfaces_dim(p,nf_dim) fps = nface_ref_polytopes(p)[nfs] @assert(all(extrusion.(fps) .== extrusion(fps[1])), "All n-faces must be of the same type") return fps[1] end """ # Return the n-faces vertices coordinates array for a given n-face dimension """ function nfaces_vertices(p,d) nc = num_nfaces(p,d) verts = vertices_coordinates(p) faces_vs = nface_connections(p,d,0) fvs = Gridap.CellValuesGallery.CellValueFromArray(faces_vs) vs = Gridap.CellValuesGallery.CellValueFromArray(verts) cfvs = Gridap.CellValuesGallery.CellVectorFromLocalToGlobal(fvs,vs) end # Helpers _nfdim(a::Point{D,Int}) where D = sum([a[i] > 0 ? 1 : 0 for i = 1:D]) # Generates the array of n-faces of a polytope function _polytopenfaces(anchor::Point{D,Int}, extrusion::Point{D,Int}) where D dnf = _nfdim(extrusion) zerop = Point{D,Int}(zeros(Int64, D)) nf_nfs = [] nf_nfs = _nfaceboundary!(anchor, zerop, extrusion, true, nf_nfs) [sort!(nf_nfs, by = x -> x.anchor[i]) for i = 1:length(extrusion)] [sort!(nf_nfs, by = x -> x.extrusion[i]) for i = 1:length(extrusion)] [sort!(nf_nfs, by = x -> sum(x.extrusion))] numnfs = length(nf_nfs) nfsdim = [_nfdim(nf_nfs[i].extrusion) for i = 1:numnfs] dimnfs = Array{UnitRange{Int64},1}(undef, dnf + 1) dim = 0 i = 1 for iface = 1:numnfs if (nfsdim[iface] > dim) # global dim; # global i dim += 1 dimnfs[dim] = i:iface-1 i = iface end end dimnfs[dnf+1] = numnfs:numnfs return [nf_nfs, dimnfs] end # Provides for all n-faces of a polytope the d-faces for 0 <= d <n on its # boundary (e.g., given a face, it provides the set of edges and corners on its # boundary) using the global n-face numbering of the base polytope function _polytopemesh(nfaces::Vector{NFace{D}}, nfaceid::Dict) where D num_nfs = length(nfaces) nfnfs = Vector{Vector{Int64}}(undef, num_nfs) nfnfs_dim = Vector{Vector{UnitRange{Int64}}}(undef, num_nfs) for (inf, nf) in enumerate(nfaces) nfs_dim_nf = _polytopenfaces(nf.anchor, nf.extrusion) nf_nfs = nfs_dim_nf[1] dimnfs = nfs_dim_nf[2] nfnfs[inf] = [get(nfaceid, nf, nf) for nf in nf_nfs] nfnfs_dim[inf] = dimnfs end return [nfnfs, nfnfs_dim] end # Generates the list of n-face of a polytope the d-faces for 0 <= d <n on its # boundary function _nfaceboundary!( anchor::Point{D,Int}, extrusion::Point{D,Int}, extend::Point{D,Int}, isanchor::Bool, list ) where D newext = extend list = [list..., NFace{D}(anchor, extrusion)] for i = 1:D curex = newext[i] if (curex > 0) # Perform extension func1 = (j -> j == i ? 0 : newext[j]) newext = Point{D,Int}([func1(i) for i = 1:D]) func2 = (j -> j == i ? 1 : 0) edim = Point{D,Int}([func2(i) for i = 1:D]) func3 = (j -> j >= i ? anchor[j] : 0) tetp = Point{D,Int}([func3(i) for i = 1:D]) + edim if (curex == 1) # Quad extension list = _nfaceboundary!(anchor + edim, extrusion, newext, false, list) elseif (isanchor) list = _nfaceboundary!(tetp, extrusion, newext, false, list) end list = _nfaceboundary!( anchor, extrusion + edim * curex, newext, false, list ) end end return list end function _dimfrom_fs_dimto_fs(p::Polytope, dim_from::Int, dim_to::Int) @assert dim_to <= dim_from dim_from += 1 dim_to += 1 dffs_r = p.nf_dim[end][dim_from] dffs_dtfs = Vector{Vector{Int}}(undef, dffs_r[end] - dffs_r[1] + 1) offs = p.nf_dim[end][dim_to][1] - 1 for (i_dff, dff) in enumerate(dffs_r) dff_nfs = p.nf_nfs[dff] dff_dtfs_r = p.nf_dim[dff][dim_to] dff_dtfs = dff_nfs[dff_dtfs_r] dffs_dtfs[i_dff] = dff_dtfs .- offs # @santiagobadia : With or without offset ? end return dffs_dtfs end # Auxiliary function that determines whether two nodes are connected function _are_nodes_connected(c1, c2, ext) sp_dims = length(c1) d = zeros(length(c1)) for i = 1:length(d) d[i] = c2[i] - c1[i] end dn = sum(d .* d) connected = false if (dn == 1) connected = true else k = 0 for j = 1:sp_dims if (ext[j] == 2) if (c1[j] == 1 || c2[j] == 1) k = j end end end for l = 1:k d[l] = 0 end dn = sum(d .* d) if (dn == 0) connected = true end end return connected end # It generates the set of nodes (its coordinates) in the interior of an n-face, # for a given order. The node coordinates are `Int` and from 0 to `order` per # direction function _interior_nodes_int_coords(p::Polytope{D}, order) where D ext = p.extrusion _ord = [order...] verts = Point{D,Int}[] coor = zeros(Int, D) _generate_nodes!(D, p.extrusion, _ord, coor, verts) return verts end # Auxiliary private recursive function to implement _interior_nodes_int_coords function _generate_nodes!(dim, ext, order, coor, verts) ncoo = copy(coor) nord = copy(order) for i = 1:order[dim]-1 ncoo[dim] = i if dim > 1 if (ext[dim] == TET_AXIS) nord .-= 1 end _generate_nodes!(dim - 1, ext, nord, ncoo, verts) else push!(verts, Point(ncoo...)) end end end # Transforms the int coordinates to float coordinates function _interior_nodes_int_to_real_coords(nodes, order) if length(nodes) > 0 dim = length(nodes[1]) cs_float = Point{dim,Float64}[] cs = zeros(Float64, dim) for cs_int in nodes for i = 1:dim cs[i] = cs_int[i] / order[i] end push!(cs_float, Point{dim,Float64}(cs)) end else cs_float = Point{0,Float64}[] end return cs_float end function _facet_normal(p::Polytope{D}, nf_vs, vs, i_f) where D if (length(p.extrusion) > 1) v = Float64[] for i = 2:length(nf_vs[i_f]) vi = vs[nf_vs[i_f][i]] - vs[nf_vs[i_f][1]] push!(v, vi...) end n = nullspace(transpose(reshape(v, D, length(nf_vs[i_f]) - 1))) n = n .* 1 / sqrt(dot(n, n)) ext_v = _vertex_not_in_facet(p, i_f, nf_vs) v3 = vs[nf_vs[i_f][1]] - vs[ext_v] f_or = 1 if dot(v3, n) < 0.0 n *= -1 f_or = -1 end elseif (length(p.extrusion) == 1) ext_v = _vertex_not_in_facet(p, i_f, nf_vs) n = vs[nf_vs[i_f][1]] - vs[ext_v] n = n .* 1 / dot(n, n) f_or = 1 else error("O-dim polytopes do not have properly define outward facet normals") end return n, f_or end function _vertex_not_in_facet(p, i_f, nf_vs) for i in p.nf_dim[end][1] is_in_f = false for j in nf_vs[i_f] if i == j is_in_f = true break end end if !is_in_f return i break end end end # Generates a zero-dim polytope (vertex) function _vertex() ext = () nfdim = [[1:1]] nfnfs = [[1]] nfanc = Point{0,Int}() nf = NFace{0}(nfanc,nfanc) nfs = [nf] return Polytope{0}(ext, nfs, nfnfs, nfdim) end end # module Polytopes
Formal statement is: lemma enum_less: "a \<in> s \<Longrightarrow> i < n \<Longrightarrow> enum i < a \<longleftrightarrow> enum (Suc i) \<le> a" Informal statement is: If $a$ is an element of a set $s$ and $i$ is a natural number less than $n$, then $a$ is less than the $i$th element of the enumeration of $s$ if and only if $a$ is less than or equal to the $(i+1)$st element of the enumeration of $s$.
[STATEMENT] lemma fresh_fun_simp_AndL1: assumes a: "z'\<sharp>P" "z'\<sharp>M" "z'\<sharp>x" shows "fresh_fun (\<lambda>z'. Cut <c>.P (z').AndL1 (x).M z') = Cut <c>.P (z').AndL1 (x).M z'" [PROOF STATE] proof (prove) goal (1 subgoal): 1. fresh_fun (\<lambda>z'. Cut <c>.P z'.AndL1 x.M z') = Cut <c>.P z'.AndL1 x.M z' [PROOF STEP] using a [PROOF STATE] proof (prove) using this: z' \<sharp> P z' \<sharp> M z' \<sharp> x goal (1 subgoal): 1. fresh_fun (\<lambda>z'. Cut <c>.P z'.AndL1 x.M z') = Cut <c>.P z'.AndL1 x.M z' [PROOF STEP] apply - [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<lbrakk>z' \<sharp> P; z' \<sharp> M; z' \<sharp> x\<rbrakk> \<Longrightarrow> fresh_fun (\<lambda>z'. Cut <c>.P z'.AndL1 x.M z') = Cut <c>.P z'.AndL1 x.M z' [PROOF STEP] apply(rule fresh_fun_app) [PROOF STATE] proof (prove) goal (5 subgoals): 1. \<lbrakk>z' \<sharp> P; z' \<sharp> M; z' \<sharp> x\<rbrakk> \<Longrightarrow> pt TYPE(trm) TYPE(name) 2. \<lbrakk>z' \<sharp> P; z' \<sharp> M; z' \<sharp> x\<rbrakk> \<Longrightarrow> at TYPE(name) 3. \<lbrakk>z' \<sharp> P; z' \<sharp> M; z' \<sharp> x\<rbrakk> \<Longrightarrow> finite (supp (\<lambda>z'. Cut <c>.P z'.AndL1 x.M z')) 4. \<lbrakk>z' \<sharp> P; z' \<sharp> M; z' \<sharp> x\<rbrakk> \<Longrightarrow> \<exists>a. a \<sharp> (\<lambda>z'. Cut <c>.P z'.AndL1 x.M z', Cut <c>.P a.AndL1 x.M a) 5. \<lbrakk>z' \<sharp> P; z' \<sharp> M; z' \<sharp> x\<rbrakk> \<Longrightarrow> z' \<sharp> (\<lambda>z'. Cut <c>.P z'.AndL1 x.M z') [PROOF STEP] apply(rule pt_name_inst) [PROOF STATE] proof (prove) goal (4 subgoals): 1. \<lbrakk>z' \<sharp> P; z' \<sharp> M; z' \<sharp> x\<rbrakk> \<Longrightarrow> at TYPE(name) 2. \<lbrakk>z' \<sharp> P; z' \<sharp> M; z' \<sharp> x\<rbrakk> \<Longrightarrow> finite (supp (\<lambda>z'. Cut <c>.P z'.AndL1 x.M z')) 3. \<lbrakk>z' \<sharp> P; z' \<sharp> M; z' \<sharp> x\<rbrakk> \<Longrightarrow> \<exists>a. a \<sharp> (\<lambda>z'. Cut <c>.P z'.AndL1 x.M z', Cut <c>.P a.AndL1 x.M a) 4. \<lbrakk>z' \<sharp> P; z' \<sharp> M; z' \<sharp> x\<rbrakk> \<Longrightarrow> z' \<sharp> (\<lambda>z'. Cut <c>.P z'.AndL1 x.M z') [PROOF STEP] apply(rule at_name_inst) [PROOF STATE] proof (prove) goal (3 subgoals): 1. \<lbrakk>z' \<sharp> P; z' \<sharp> M; z' \<sharp> x\<rbrakk> \<Longrightarrow> finite (supp (\<lambda>z'. Cut <c>.P z'.AndL1 x.M z')) 2. \<lbrakk>z' \<sharp> P; z' \<sharp> M; z' \<sharp> x\<rbrakk> \<Longrightarrow> \<exists>a. a \<sharp> (\<lambda>z'. Cut <c>.P z'.AndL1 x.M z', Cut <c>.P a.AndL1 x.M a) 3. \<lbrakk>z' \<sharp> P; z' \<sharp> M; z' \<sharp> x\<rbrakk> \<Longrightarrow> z' \<sharp> (\<lambda>z'. Cut <c>.P z'.AndL1 x.M z') [PROOF STEP] apply(finite_guess) [PROOF STATE] proof (prove) goal (2 subgoals): 1. \<lbrakk>z' \<sharp> P; z' \<sharp> M; z' \<sharp> x\<rbrakk> \<Longrightarrow> \<exists>a. a \<sharp> (\<lambda>z'. Cut <c>.P z'.AndL1 x.M z', Cut <c>.P a.AndL1 x.M a) 2. \<lbrakk>z' \<sharp> P; z' \<sharp> M; z' \<sharp> x\<rbrakk> \<Longrightarrow> z' \<sharp> (\<lambda>z'. Cut <c>.P z'.AndL1 x.M z') [PROOF STEP] apply(subgoal_tac "\<exists>n::name. n\<sharp>(c,P,x,M)") [PROOF STATE] proof (prove) goal (3 subgoals): 1. \<lbrakk>z' \<sharp> P; z' \<sharp> M; z' \<sharp> x; \<exists>n. n \<sharp> (c, P, x, M)\<rbrakk> \<Longrightarrow> \<exists>a. a \<sharp> (\<lambda>z'. Cut <c>.P z'.AndL1 x.M z', Cut <c>.P a.AndL1 x.M a) 2. \<lbrakk>z' \<sharp> P; z' \<sharp> M; z' \<sharp> x\<rbrakk> \<Longrightarrow> \<exists>n. n \<sharp> (c, P, x, M) 3. \<lbrakk>z' \<sharp> P; z' \<sharp> M; z' \<sharp> x\<rbrakk> \<Longrightarrow> z' \<sharp> (\<lambda>z'. Cut <c>.P z'.AndL1 x.M z') [PROOF STEP] apply(erule exE) [PROOF STATE] proof (prove) goal (3 subgoals): 1. \<And>n. \<lbrakk>z' \<sharp> P; z' \<sharp> M; z' \<sharp> x; n \<sharp> (c, P, x, M)\<rbrakk> \<Longrightarrow> \<exists>a. a \<sharp> (\<lambda>z'. Cut <c>.P z'.AndL1 x.M z', Cut <c>.P a.AndL1 x.M a) 2. \<lbrakk>z' \<sharp> P; z' \<sharp> M; z' \<sharp> x\<rbrakk> \<Longrightarrow> \<exists>n. n \<sharp> (c, P, x, M) 3. \<lbrakk>z' \<sharp> P; z' \<sharp> M; z' \<sharp> x\<rbrakk> \<Longrightarrow> z' \<sharp> (\<lambda>z'. Cut <c>.P z'.AndL1 x.M z') [PROOF STEP] apply(rule_tac x="n" in exI) [PROOF STATE] proof (prove) goal (3 subgoals): 1. \<And>n. \<lbrakk>z' \<sharp> P; z' \<sharp> M; z' \<sharp> x; n \<sharp> (c, P, x, M)\<rbrakk> \<Longrightarrow> n \<sharp> (\<lambda>z'. Cut <c>.P z'.AndL1 x.M z', Cut <c>.P n.AndL1 x.M n) 2. \<lbrakk>z' \<sharp> P; z' \<sharp> M; z' \<sharp> x\<rbrakk> \<Longrightarrow> \<exists>n. n \<sharp> (c, P, x, M) 3. \<lbrakk>z' \<sharp> P; z' \<sharp> M; z' \<sharp> x\<rbrakk> \<Longrightarrow> z' \<sharp> (\<lambda>z'. Cut <c>.P z'.AndL1 x.M z') [PROOF STEP] apply(simp add: fresh_prod abs_fresh) [PROOF STATE] proof (prove) goal (3 subgoals): 1. \<And>n. \<lbrakk>z' \<sharp> P; z' \<sharp> M; z' \<sharp> x; n \<sharp> c \<and> n \<sharp> P \<and> n \<sharp> x \<and> n \<sharp> M\<rbrakk> \<Longrightarrow> n \<sharp> (\<lambda>z'. Cut <c>.P z'.AndL1 x.M z') 2. \<lbrakk>z' \<sharp> P; z' \<sharp> M; z' \<sharp> x\<rbrakk> \<Longrightarrow> \<exists>n. n \<sharp> (c, P, x, M) 3. \<lbrakk>z' \<sharp> P; z' \<sharp> M; z' \<sharp> x\<rbrakk> \<Longrightarrow> z' \<sharp> (\<lambda>z'. Cut <c>.P z'.AndL1 x.M z') [PROOF STEP] apply(fresh_guess) [PROOF STATE] proof (prove) goal (2 subgoals): 1. \<lbrakk>z' \<sharp> P; z' \<sharp> M; z' \<sharp> x\<rbrakk> \<Longrightarrow> \<exists>n. n \<sharp> (c, P, x, M) 2. \<lbrakk>z' \<sharp> P; z' \<sharp> M; z' \<sharp> x\<rbrakk> \<Longrightarrow> z' \<sharp> (\<lambda>z'. Cut <c>.P z'.AndL1 x.M z') [PROOF STEP] apply(rule exists_fresh') [PROOF STATE] proof (prove) goal (2 subgoals): 1. \<lbrakk>z' \<sharp> P; z' \<sharp> M; z' \<sharp> x\<rbrakk> \<Longrightarrow> finite (supp (c, P, x, M)) 2. \<lbrakk>z' \<sharp> P; z' \<sharp> M; z' \<sharp> x\<rbrakk> \<Longrightarrow> z' \<sharp> (\<lambda>z'. Cut <c>.P z'.AndL1 x.M z') [PROOF STEP] apply(simp add: fin_supp) [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<lbrakk>z' \<sharp> P; z' \<sharp> M; z' \<sharp> x\<rbrakk> \<Longrightarrow> z' \<sharp> (\<lambda>z'. Cut <c>.P z'.AndL1 x.M z') [PROOF STEP] apply(fresh_guess) [PROOF STATE] proof (prove) goal: No subgoals! [PROOF STEP] done
/** * @project identt * @file include/query/HelpQuery.hpp * @author S Roychowdhury <sroycode AT gmail DOT com> * @version 1.0.0 * * @section LICENSE * * Copyright (c) 2017 S Roychowdhury. * * Permission is hereby granted, free of charge, to any person obtaining a copy of * this software and associated documentation files (the "Software"), to deal in * the Software without restriction, including without limitation the rights to * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of * the Software, and to permit persons to whom the Software is furnished to do so, * subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * * @section DESCRIPTION * * HelpQuery.hpp : Help Handler * */ #ifndef _IDENTT_QUERY_HELPQUERY_HPP_ #define _IDENTT_QUERY_HELPQUERY_HPP_ #include <memory> #include <string> #include <vector> #include <algorithm> #include <boost/thread/locks.hpp> #include <boost/thread/shared_mutex.hpp> namespace identt { namespace query { class HelpQuery { public: struct HelpT { const unsigned int scope; const std::string route; const std::vector<std::string> desc; }; using pointer=std::shared_ptr<HelpQuery>; using LockT = boost::shared_mutex; using WriteLockT = boost::unique_lock< LockT >; using ReadLockT = boost::shared_lock< LockT >; using HelpListT = std::vector<HelpT>; /** * constructor * */ HelpQuery() = default; /** * make noncopyable */ HelpQuery(const HelpQuery&) = delete; HelpQuery& operator=(const HelpQuery&) = delete; /** * destructor */ virtual ~HelpQuery () {} /** * add : add one HelpT item * * @param h * HelpT help item * * @return * none */ void add(HelpT h) { WriteLockT lock (shared_lock); helplist.emplace_back(h); } /** * get : get a list of items matching scope * * @param scope * HelpT help item * * @return * HelpListT list of matching */ HelpListT get(const unsigned int scope) { HelpListT hl; ReadLockT lock (shared_lock); std::copy_if( helplist.begin() , helplist.end(), std::back_inserter(hl), [scope](const HelpT& h) { return (h.scope & scope); }); return hl; } private: HelpListT helplist; LockT shared_lock; }; } // namespace query } // namespace identt #endif /* _IDENTT_QUERY_HELPQUERY_HPP_ */
[STATEMENT] lemma [simp]: "start(q,d,f) = q" [PROOF STATE] proof (prove) goal (1 subgoal): 1. start (q, d, f) = q [PROOF STEP] by(simp add:start_def)
[STATEMENT] lemma [code]: \<open>flip_bit n w = w XOR push_bit n 1\<close> for w :: \<open>'a::len word\<close> [PROOF STATE] proof (prove) goal (1 subgoal): 1. flip_bit n w = w XOR push_bit n 1 [PROOF STEP] by (fact flip_bit_eq_xor)
\section{Rationale} \subsection{Why Nix-based?} There are many solutions in the area of software deployment. Besides Nix, we know all the traditional package managers, Docker, AppImage, VirtualBox and so on. One property of Nix we want to highlight is it's low system requirements. Basically, Nix core needs only a basic file system API in order to work. Here we try to follow the trend of keeping the number of dependencies, and still provide a competitive set of features. \subsection{Why functional-style API?} There are several reasons: \begin{itemize} \item We think that this way we could track the API changes more easier. We are trying to avoid changes in functions which are already published. We tend to import functions by name to let Python notify us whenever the API is changed. \item Class-based APIs of Python often mislead users into thinking that they could extend it by sub-classing. We don't support extension by-subclassing and so we don't need classes. \item Class-based API wrappers may be created as a standalone module. A one example of such a wrapper is the \textbf{Lens} module. \end{itemize} \subsection{What is the idea behind the promises?} Promises plays a role of a 'type-checker' which notice erroneous realizations before they appear in the Storage just like the regular type-checkers make sure that no erroneous programs appear as binaries.
{-# OPTIONS --without-K --safe #-} open import Categories.Category module Categories.Category.Instance.Simplex where open import Level open import Data.Product open import Data.Fin.Base using (Fin; _≤_) open import Data.Nat.Base using (ℕ; z≤n; s≤s) open import Function renaming (id to idF; _∘_ to _∙_) open import Relation.Binary using (_=[_]⇒_) open import Relation.Binary.PropositionalEquality Δ : Category 0ℓ 0ℓ 0ℓ Δ = record { Obj = ℕ ; _⇒_ = λ m n → Σ (Fin m → Fin n) (λ f → _≤_ =[ f ]⇒ _≤_) ; _≈_ = λ { (f , mf) (g , mg) → ∀ x → f x ≡ g x } ; id = idF , idF ; _∘_ = λ { (f , mf) (g , mg) → f ∙ g , mf ∙ mg } ; assoc = λ _ → refl ; sym-assoc = λ _ → refl ; identityˡ = λ _ → refl ; identityʳ = λ _ → refl ; identity² = λ _ → refl ; equiv = record { refl = λ _ → refl ; sym = λ eq x → sym (eq x) ; trans = λ eq₁ eq₂ x → trans (eq₁ x) (eq₂ x) } ; ∘-resp-≈ = λ {_ _ _ f g h i} eq₁ eq₂ x → trans (cong (λ t → proj₁ f t) (eq₂ x)) (eq₁ (proj₁ i x)) } open Category Δ -------------------------------------------------------------------------------- -- Face + Degeneracy Maps face-map : ∀ {n} → Fin (ℕ.suc n) → Fin n → Fin (ℕ.suc n) face-map Fin.zero k = Fin.suc k face-map (Fin.suc i) Fin.zero = Fin.zero face-map (Fin.suc i) (Fin.suc k) = Fin.suc (face-map i k) face-mono : ∀ {n} → (i : Fin (ℕ.suc n)) → _≤_ =[ face-map i ]⇒ _≤_ face-mono Fin.zero {_} {_} le = s≤s le face-mono (Fin.suc i) {Fin.zero} {_} _ = z≤n face-mono (Fin.suc i) {Fin.suc _} {Fin.suc _} (s≤s le) = s≤s (face-mono i le) face : ∀ {n} → Fin (ℕ.suc n) → n ⇒ ℕ.suc n face i = face-map i , face-mono i degeneracy-map : ∀ {n} → Fin n → Fin (ℕ.suc n) → Fin n degeneracy-map Fin.zero Fin.zero = Fin.zero degeneracy-map Fin.zero (Fin.suc k) = k degeneracy-map (Fin.suc i) Fin.zero = Fin.zero degeneracy-map (Fin.suc i) (Fin.suc k) = Fin.suc (degeneracy-map i k) degeneracy-mono : ∀ {n} → (i : Fin n) → _≤_ =[ degeneracy-map i ]⇒ _≤_ degeneracy-mono Fin.zero {Fin.zero} {_} _ = z≤n degeneracy-mono Fin.zero {Fin.suc _} {Fin.suc _} (s≤s le) = le degeneracy-mono (Fin.suc i) {Fin.zero} {_} _ = z≤n degeneracy-mono (Fin.suc i) {Fin.suc _} {Fin.suc _} (s≤s le) = s≤s (degeneracy-mono i le) degeneracy : ∀ {n} → Fin n → ℕ.suc n ⇒ n degeneracy i = degeneracy-map i , degeneracy-mono i
Formal statement is: proposition Morera_triangle: "\<lbrakk>continuous_on S f; open S; \<And>a b c. convex hull {a,b,c} \<subseteq> S \<longrightarrow> contour_integral (linepath a b) f + contour_integral (linepath b c) f + contour_integral (linepath c a) f = 0\<rbrakk> \<Longrightarrow> f analytic_on S" Informal statement is: If $f$ is continuous on an open set $S$ and if the integral of $f$ around every triangle in $S$ is zero, then $f$ is analytic on $S$.
import numpy as np def fitness_eval(population,problem,dims): fitness = None if problem == 'onemax': fitness = np.sum(population,1); elif problem == 'onemin': fitness = dims - np.sum(population,1); elif problem == 'trap5': pop = population.shape[0]; fitness = np.zeros(pop); index = np.arange(dims).reshape(-1,5) rows = index.shape[0] for i in range(pop): fitsum = 0; for j in range(rows): contri = sum(population[i,index[j,:]]); if contri == 5: fitsum = fitsum+5; else: fitsum = fitsum+(4-contri); fitness[i] = fitsum; else: raise Exception('Function not implemented.') return fitness def knapsack_fitness_eval(population, problem, dims,pop): # Question about BV[l] changing in matlab in comparison with python fitness = np.zeros(pop); Weights = problem['w']; Profits = problem['p']; Ratios = Profits/Weights; for i in range(pop): BV = population[i, :] == 1; TotalWeight = np.sum(Weights[BV]); TotalProfit = np.sum(Profits[BV]); if TotalWeight > problem['cap']: # Repair solution selections = np.sum(BV) List = np.zeros((selections,2)) counter = 0 for j in range(dims): if BV[j] == 1: List[counter,0] = Ratios[j] List[counter,1] = int(j) counter = counter + 1 if counter >= selections: break List = List[List[:,0].argsort()[::-1]] counter = selections-1 while TotalWeight > problem['cap']: l = int(List[counter,1]) BV[l] = 0 TotalWeight = TotalWeight - Weights[l] TotalProfit = TotalProfit - Profits[l] counter = counter - 1 fitness[i] = TotalProfit return fitness
module PiFrac.Everything where open import PiFrac.Syntax -- Syntax of PiFrac open import PiFrac.Opsem -- Abstract machine semantics of PiFrac open import PiFrac.AuxLemmas -- Some auxiliary lemmas about opsem for forward/backward deterministic proof open import PiFrac.NoRepeat -- Forward/backward deterministic lemmas and Non-repeating lemma open import PiFrac.Invariants -- Some invariants about abstract machine semantics open import PiFrac.Eval -- Evaluator for PiFrac open import PiFrac.Interp -- Big-step intepreter for PiFrac using Maybe monad open import PiFrac.Properties -- Properties of PiFrac open import PiFrac.Examples -- Examples open import PiFrac.Category -- PiFrac pointed category
//-*-C++-*- /*************************************************************************** * * Copyright (C) 2010 by Paul Demorest * Licensed under the Academic Free License version 2.1 * ***************************************************************************/ #ifndef __Pulsar_SplineFit_h #define __Pulsar_SplineFit_h #include "Estimate.h" #include "Reference.h" #include <gsl/gsl_bspline.h> #include <gsl/gsl_matrix.h> //! Spline fitting for smoothing and/or interpolation, using //! GSL's bspline routines. class SplineFit : public Reference::Able { public: //! Default constructor SplineFit (); //! Destructor virtual ~SplineFit (); //! Clear all current data, results void reset(); //! Set the degree of the fit void set_order(int n) { order=n; calculated=false; } //! Get the current degree int get_order() { return order; } //! Set uniform breakpoints to span the data void set_uniform_breaks(int nint); //! Add a data point void add_data(double x, Estimate<double> y); //! Compute the fit using current data void compute(); //! Evaluate the fit solution at the given x double evaluate(double x); //! Evaluate the fit solution's derivative at the given x double evaluate_deriv(double x); //! Get the reduced chi2 of the fit double get_rchi2(); protected: //! The x values for the fit std::vector<double> x; //! The y values/errors for the fit std::vector< Estimate<double> > y; //! The spline breakpoints std::vector<double> bp; //! Spline order (0=const, 3=cubic, etc) int order; //! The fit chi2 double chi2; //! Fit NDOF int ndof; //! Has the fit been calculated? bool calculated; //! The fitted coeffs gsl_vector *coeffs; //! The fit cov matrix gsl_matrix *cov; //! Check if a requested x val is in the fit range bool check_range(double x); //! Check if spline intervals and data make sense void interval_check(bool fix=false); //! Free spline workspaces void free_workspaces(); //! bspline temp space gsl_bspline_workspace *bwork; private: }; #endif
State Before: α : Type u_1 β : Type ?u.308183 γ : Type ?u.308186 δ : α → Sort u_2 s : Finset α f g : (i : α) → δ i inst✝¹ : (j : α) → Decidable (j ∈ s) inst✝ : DecidableEq α i : α hi : i ∈ s v : δ i ⊢ update (piecewise s f g) i v = piecewise s (update f i v) g State After: α : Type u_1 β : Type ?u.308183 γ : Type ?u.308186 δ : α → Sort u_2 s : Finset α f g : (i : α) → δ i inst✝¹ : (j : α) → Decidable (j ∈ s) inst✝ : DecidableEq α i : α hi : i ∈ s v : δ i ⊢ piecewise s (update f i v) (update g i v) = piecewise s (update f i v) g Tactic: rw [update_piecewise] State Before: α : Type u_1 β : Type ?u.308183 γ : Type ?u.308186 δ : α → Sort u_2 s : Finset α f g : (i : α) → δ i inst✝¹ : (j : α) → Decidable (j ∈ s) inst✝ : DecidableEq α i : α hi : i ∈ s v : δ i ⊢ piecewise s (update f i v) (update g i v) = piecewise s (update f i v) g State After: α : Type u_1 β : Type ?u.308183 γ : Type ?u.308186 δ : α → Sort u_2 s : Finset α f g : (i : α) → δ i inst✝¹ : (j : α) → Decidable (j ∈ s) inst✝ : DecidableEq α i : α hi : i ∈ s v : δ i j : α hj : ¬j ∈ s ⊢ j ≠ i Tactic: refine' s.piecewise_congr (fun _ _ => rfl) fun j hj => update_noteq _ _ _ State Before: α : Type u_1 β : Type ?u.308183 γ : Type ?u.308186 δ : α → Sort u_2 s : Finset α f g : (i : α) → δ i inst✝¹ : (j : α) → Decidable (j ∈ s) inst✝ : DecidableEq α i : α hi : i ∈ s v : δ i j : α hj : ¬j ∈ s ⊢ j ≠ i State After: no goals Tactic: exact fun h => hj (h.symm ▸ hi)
\chapter{Design and Construction of X-ray Digital Radiograhy System} \label{chap:DR_design_construct}
section \<open>Rewriting\<close> theory Rewriting imports Terms_Positions begin subsection \<open>Basic rewrite definitions\<close> subsubsection \<open>Rewrite steps with implicit signature declaration (encoded in the type)\<close> inductive_set rrstep :: "('f, 'v) term rel \<Rightarrow> ('f, 'v) term rel" for \<R> where [intro]: "(l, r) \<in> \<R> \<Longrightarrow> (l \<cdot> \<sigma>, r \<cdot> \<sigma>) \<in> rrstep \<R>" inductive_set rstep :: "('f, 'v) term rel \<Rightarrow> ('f, 'v) term rel" for \<R> where "(s, t) \<in> rrstep \<R> \<Longrightarrow> (C\<langle>s\<rangle>, C\<langle>t\<rangle>) \<in> rstep \<R>" subsubsection \<open>Restrict relations to terms induced by a given signature\<close> definition "sig_step \<F> \<R> = Restr \<R> (Collect (\<lambda> s. funas_term s \<subseteq> \<F>))" subsubsection \<open>Rewriting under a given signature/restricted to ground terms\<close> abbreviation "srrstep \<F> \<R> \<equiv> sig_step \<F> (rrstep \<R>)" abbreviation "srstep \<F> \<R> \<equiv> sig_step \<F> (rstep \<R>)" abbreviation "gsrstep \<F> \<R> \<equiv> Restr (sig_step \<F> (rstep \<R>)) (Collect ground)" subsubsection \<open>Rewriting sequences involving a root step\<close> abbreviation (input) relto :: "'a rel \<Rightarrow> 'a rel \<Rightarrow> 'a rel" where "relto R S \<equiv> S^* O R O S^*" definition "srsteps_with_root_step \<F> \<R> \<equiv> relto (sig_step \<F> (rrstep \<R>)) (srstep \<F> \<R>)" subsection \<open>Monotonicity laws\<close> lemma Restr_mono: "Restr r A \<subseteq> r" by auto lemma Restr_trancl_mono_set: "(Restr r A)\<^sup>+ \<subseteq> A \<times> A" by (simp add: trancl_subset_Sigma) lemma rrstep_rstep_mono: "rrstep \<R> \<subseteq> rstep \<R>" by (auto intro: rstep.intros[where ?C = \<box>, simplified]) lemma sig_step_mono: "\<F> \<subseteq> \<G> \<Longrightarrow> sig_step \<F> \<R> \<subseteq> sig_step \<G> \<R>" by (auto simp: sig_step_def) lemma sig_step_mono2: "\<R> \<subseteq> \<L> \<Longrightarrow> sig_step \<F> \<R> \<subseteq> sig_step \<F> \<L>" by (auto simp: sig_step_def) lemma srrstep_monp: "\<F> \<subseteq> \<G> \<Longrightarrow> srrstep \<F> \<R> \<subseteq> srrstep \<G> \<R>" by (simp add: sig_step_mono) lemma srstep_monp: "\<F> \<subseteq> \<G> \<Longrightarrow> srstep \<F> \<R> \<subseteq> srstep \<G> \<R>" by (simp add: sig_step_mono) lemma srsteps_monp: "\<F> \<subseteq> \<G> \<Longrightarrow> (srstep \<F> \<R>)\<^sup>+ \<subseteq> (srstep \<G> \<R>)\<^sup>+" by (simp add: sig_step_mono trancl_mono_set) lemma srsteps_eq_monp: "\<F> \<subseteq> \<G> \<Longrightarrow> (srstep \<F> \<R>)\<^sup>* \<subseteq> (srstep \<G> \<R>)\<^sup>*" by (meson rtrancl_mono sig_step_mono subrelI subsetD trancl_into_rtrancl) lemma srsteps_with_root_step_sig_mono: "\<F> \<subseteq> \<G> \<Longrightarrow> srsteps_with_root_step \<F> \<R> \<subseteq> srsteps_with_root_step \<G> \<R>" unfolding srsteps_with_root_step_def by (simp add: relcomp_mono srrstep_monp srsteps_eq_monp) subsection \<open>Introduction, elimination, and destruction rules for @{const sig_step}, @{const rstep}, @{const rrstep}, @{const srrstep}, and @{const srstep}\<close> lemma sig_stepE [elim, consumes 1]: "(s, t) \<in> sig_step \<F> \<R> \<Longrightarrow> \<lbrakk>(s, t) \<in> \<R> \<Longrightarrow> funas_term s \<subseteq> \<F> \<Longrightarrow> funas_term t \<subseteq> \<F> \<Longrightarrow> P\<rbrakk> \<Longrightarrow> P" by (auto simp: sig_step_def) lemma sig_stepI [intro]: "funas_term s \<subseteq> \<F> \<Longrightarrow> funas_term t \<subseteq> \<F> \<Longrightarrow> (s, t) \<in> \<R> \<Longrightarrow> (s, t) \<in> sig_step \<F> \<R>" by (auto simp: sig_step_def) lemma rrstep_subst [elim, consumes 1]: assumes "(s, t) \<in> rrstep \<R>" obtains l r \<sigma> where "(l, r) \<in> \<R>" "s = l \<cdot> \<sigma>" "t = r \<cdot> \<sigma>" using assms by (meson rrstep.simps) lemma rstep_imp_C_s_r: assumes "(s, t) \<in> rstep \<R>" shows "\<exists>C \<sigma> l r. (l,r) \<in> \<R> \<and> s = C\<langle>l\<cdot>\<sigma>\<rangle> \<and> t = C\<langle>r\<cdot>\<sigma>\<rangle>"using assms by (metis rrstep.cases rstep.simps) lemma rstep_imp_C_s_r' [elim, consumes 1]: assumes "(s, t) \<in> rstep \<R>" obtains C l r \<sigma> where "(l,r) \<in> \<R>" "s = C\<langle>l\<cdot>\<sigma>\<rangle>" "t = C\<langle>r\<cdot>\<sigma>\<rangle>" using assms using rstep_imp_C_s_r by blast lemma rrstep_basicI [intro]: "(l, r) \<in> \<R> \<Longrightarrow> (l, r) \<in> rrstep \<R>" by (metis rrstepp.intros rrstepp_rrstep_eq subst_apply_term_empty) lemma rstep_ruleI [intro]: "(l, r) \<in> \<R> \<Longrightarrow> (l, r) \<in> rstep \<R>" using rrstep_rstep_mono by blast lemma rstepI [intro]: "(l, r) \<in> \<R> \<Longrightarrow> s = C\<langle>l \<cdot> \<sigma>\<rangle> \<Longrightarrow> t = C\<langle>r \<cdot> \<sigma>\<rangle> \<Longrightarrow> (s, t) \<in> rstep \<R>" by (simp add: rrstep.intros rstep.intros) lemma rstep_substI [intro]: "(s, t) \<in> rstep \<R> \<Longrightarrow> (s \<cdot> \<sigma>, t \<cdot> \<sigma>) \<in> rstep \<R>" by (auto elim!: rstep_imp_C_s_r' simp flip: subst_subst_compose) lemma rstep_ctxtI [intro]: "(s, t) \<in> rstep \<R> \<Longrightarrow> (C\<langle>s\<rangle>, C\<langle>t\<rangle>) \<in> rstep \<R>" by (auto elim!: rstep_imp_C_s_r' simp flip: ctxt_ctxt_compose) lemma srrstepD: "(s, t) \<in> srrstep \<F> \<R> \<Longrightarrow> (s, t) \<in> rrstep \<R> \<and> funas_term s \<subseteq> \<F> \<and> funas_term t \<subseteq> \<F>" by (auto simp: sig_step_def) lemma srstepD: "(s, t) \<in> (srstep \<F> \<R>) \<Longrightarrow> (s, t) \<in> rstep \<R> \<and> funas_term s \<subseteq> \<F> \<and> funas_term t \<subseteq> \<F>" by (auto simp: sig_step_def) lemma srstepsD: "(s, t) \<in> (srstep \<F> \<R>)\<^sup>+ \<Longrightarrow> (s, t) \<in> (rstep \<R>)\<^sup>+ \<and> funas_term s \<subseteq> \<F> \<and> funas_term t \<subseteq> \<F>" unfolding sig_step_def using trancl_mono_set[OF Restr_mono] by (auto simp: sig_step_def dest: subsetD[OF Restr_trancl_mono_set]) subsubsection \<open>Transitive and relfexive closure distribution over @{const sig_step}\<close> lemma funas_rel_converse: "funas_rel \<R> \<subseteq> \<F> \<Longrightarrow> funas_rel (\<R>\<inverse>) \<subseteq> \<F>" unfolding funas_rel_def by auto lemma rstep_term_to_sig_r: assumes "(s, t) \<in> rstep \<R>" and "funas_rel \<R> \<subseteq> \<F>" and "funas_term s \<subseteq> \<F>" shows "(s, term_to_sig \<F> v t) \<in> rstep \<R>" proof - from assms(1) obtain C l r \<sigma> where *: "s = C\<langle>l \<cdot> \<sigma>\<rangle>" "t = C\<langle>r \<cdot> \<sigma>\<rangle>" "(l, r) \<in> \<R>" by auto from assms(2, 3) *(3) have "funas_ctxt C \<subseteq> \<F>" "funas_term l \<subseteq> \<F>" "funas_term r \<subseteq> \<F>" by (auto simp: *(1) funas_rel_def funas_term_subst subset_eq) then have "(term_to_sig \<F> v s, term_to_sig \<F> v t) \<in> rstep \<R>" using *(3) by (auto simp: *(1, 2) funas_ctxt_ctxt_well_def_hole_path) then show ?thesis using assms(3) by auto qed lemma rstep_term_to_sig_l: assumes "(s, t) \<in> rstep \<R>" and "funas_rel \<R> \<subseteq> \<F>" and "funas_term t \<subseteq> \<F>" shows "(term_to_sig \<F> v s, t) \<in> rstep \<R>" proof - from assms(1) obtain C l r \<sigma> where *: "s = C\<langle>l \<cdot> \<sigma>\<rangle>" "t = C\<langle>r \<cdot> \<sigma>\<rangle>" "(l, r) \<in> \<R>" by auto from assms(2, 3) *(3) have "funas_ctxt C \<subseteq> \<F>" "funas_term l \<subseteq> \<F>" "funas_term r \<subseteq> \<F>" by (auto simp: *(2) funas_rel_def funas_term_subst subset_eq) then have "(term_to_sig \<F> v s, term_to_sig \<F> v t) \<in> rstep \<R>" using *(3) by (auto simp: *(1, 2) funas_ctxt_ctxt_well_def_hole_path) then show ?thesis using assms(3) by auto qed lemma rstep_trancl_sig_step_r: assumes "(s, t) \<in> (rstep \<R>)\<^sup>+" and "funas_rel \<R> \<subseteq> \<F>" and "funas_term s \<subseteq> \<F>" shows "(s, term_to_sig \<F> v t) \<in> (srstep \<F> \<R>)\<^sup>+" using assms proof (induct) case (base t) then show ?case using subsetD[OF fuans_term_term_to_sig, of _ \<F> v] by (auto simp: rstep_term_to_sig_r sig_step_def intro!: r_into_trancl) next case (step t u) then have st: "(s, term_to_sig \<F> v t) \<in> (srstep \<F> \<R>)\<^sup>+" by auto from step(2) obtain C l r \<sigma> where *: "t = C\<langle>l \<cdot> \<sigma>\<rangle>" "u = C\<langle>r \<cdot> \<sigma>\<rangle>" "(l, r) \<in> \<R>" by auto show ?case proof (cases "ctxt_well_def_hole_path \<F> C") case True from *(3) step(4) have "funas_term l \<subseteq> \<F>" "funas_term r \<subseteq> \<F>" by (auto simp: funas_rel_def) then have "(term_to_sig \<F> v t, term_to_sig \<F> v u) \<in> rstep \<R>" using True step(2) *(3) unfolding * by auto then have "(term_to_sig \<F> v t, term_to_sig \<F> v u) \<in> srstep \<F> \<R>" by (auto simp:_ sig_step_def) then show ?thesis using st by auto next case False then have "term_to_sig \<F> v t = term_to_sig \<F> v u" unfolding * by auto then show ?thesis using st by auto qed qed lemma rstep_trancl_sig_step_l: assumes "(s, t) \<in> (rstep \<R>)\<^sup>+" and "funas_rel \<R> \<subseteq> \<F>" and "funas_term t \<subseteq> \<F>" shows "(term_to_sig \<F> v s, t) \<in> (srstep \<F> \<R>)\<^sup>+" using assms proof (induct rule: converse_trancl_induct) case (base t) then show ?case using subsetD[OF fuans_term_term_to_sig, of _ \<F> v] by (auto simp: rstep_term_to_sig_l sig_step_def intro!: r_into_trancl) next case (step s u) then have st: "(term_to_sig \<F> v u, t) \<in> (srstep \<F> \<R>)\<^sup>+" by auto from step(1) obtain C l r \<sigma> where *: "s = C\<langle>l \<cdot> \<sigma>\<rangle>" "u = C\<langle>r \<cdot> \<sigma>\<rangle>" "(l, r) \<in> \<R>" by auto show ?case proof (cases "ctxt_well_def_hole_path \<F> C") case True from *(3) step(4) have "funas_term l \<subseteq> \<F>" "funas_term r \<subseteq> \<F>" by (auto simp: funas_rel_def) then have "(term_to_sig \<F> v s, term_to_sig \<F> v u) \<in> rstep \<R>" using True step(2) *(3) unfolding * by auto then have "(term_to_sig \<F> v s, term_to_sig \<F> v u) \<in> srstep \<F> \<R>" by (auto simp:_ sig_step_def) then show ?thesis using st by auto next case False then have "term_to_sig \<F> v s = term_to_sig \<F> v u" unfolding * by auto then show ?thesis using st by auto qed qed lemma rstep_srstepI [intro]: "funas_rel \<R> \<subseteq> \<F> \<Longrightarrow> funas_term s \<subseteq> \<F> \<Longrightarrow> funas_term t \<subseteq> \<F> \<Longrightarrow> (s, t) \<in> rstep \<R> \<Longrightarrow> (s, t) \<in> srstep \<F> \<R>" by blast lemma rsteps_srstepsI [intro]: "funas_rel \<R> \<subseteq> \<F> \<Longrightarrow> funas_term s \<subseteq> \<F> \<Longrightarrow> funas_term t \<subseteq> \<F> \<Longrightarrow> (s, t) \<in> (rstep \<R>)\<^sup>+ \<Longrightarrow> (s, t) \<in> (srstep \<F> \<R>)\<^sup>+" using rstep_trancl_sig_step_r[of s t \<R> \<F>] by auto lemma rsteps_eq_srsteps_eqI [intro]: "funas_rel \<R> \<subseteq> \<F> \<Longrightarrow> funas_term s \<subseteq> \<F> \<Longrightarrow> funas_term t \<subseteq> \<F> \<Longrightarrow> (s, t) \<in> (rstep \<R>)\<^sup>* \<Longrightarrow> (s, t) \<in> (srstep \<F> \<R>)\<^sup>*" by (auto simp add: rtrancl_eq_or_trancl) lemma rsteps_eq_relcomp_srsteps_eq_relcompI [intro]: assumes "funas_rel \<R> \<subseteq> \<F>" "funas_rel \<S> \<subseteq> \<F>" and funas: "funas_term s \<subseteq> \<F>" "funas_term t \<subseteq> \<F>" and steps: "(s, t) \<in> (rstep \<R>)\<^sup>* O (rstep \<S>)\<^sup>*" shows "(s, t) \<in> (srstep \<F> \<R>)\<^sup>* O (srstep \<F> \<S>)\<^sup>*" proof - from steps obtain u where "(s, u) \<in> (rstep \<R>)\<^sup>*" "(u, t) \<in> (rstep \<S>)\<^sup>*" by auto then have "(s, term_to_sig \<F> v u) \<in> (srstep \<F> \<R>)\<^sup>*" "(term_to_sig \<F> v u, t) \<in> (srstep \<F> \<S>)\<^sup>*" using rstep_trancl_sig_step_l[OF _ assms(2) funas(2), of u v] using rstep_trancl_sig_step_r[OF _ assms(1) funas(1), of u v] funas by (auto simp: rtrancl_eq_or_trancl) then show ?thesis by auto qed subsubsection \<open>Distributivity laws\<close> lemma rstep_smycl_dist: "(rstep \<R>)\<^sup>\<leftrightarrow> = rstep (\<R>\<^sup>\<leftrightarrow>)" by (auto simp: sig_step_def) lemma sig_step_symcl_dist: "(sig_step \<F> \<R>)\<^sup>\<leftrightarrow> = sig_step \<F> (\<R>\<^sup>\<leftrightarrow>)" by (auto simp: sig_step_def) lemma srstep_symcl_dist: "(srstep \<F> \<R>)\<^sup>\<leftrightarrow> = srstep \<F> (\<R>\<^sup>\<leftrightarrow>)" by (auto simp: sig_step_def) lemma Restr_smycl_dist: "(Restr \<R> \<A>)\<^sup>\<leftrightarrow> = Restr (\<R>\<^sup>\<leftrightarrow>) \<A>" by auto lemmas rew_symcl_inwards = rstep_smycl_dist sig_step_symcl_dist srstep_symcl_dist Restr_smycl_dist lemmas rew_symcl_outwards = rew_symcl_inwards[symmetric] lemma rstep_converse_dist: "(rstep \<R>)\<inverse> = rstep (\<R>\<inverse>)" by auto lemma srrstep_converse_dist: "(srrstep \<F> \<R>)\<inverse> = srrstep \<F> (\<R>\<inverse>)" by (fastforce simp: sig_step_def) lemma sig_step_converse_rstep: "(srstep \<F> \<R>)\<inverse> = sig_step \<F> ((rstep \<R>)\<inverse>)" by (meson converse.simps set_eq_subset sig_stepE(1) sig_stepE sig_stepI subrelI) lemma srstep_converse_dist: "(srstep \<F> \<R>)\<inverse> = srstep \<F> (\<R>\<inverse>)" by (auto simp: sig_step_def) lemma Restr_converse: "(Restr \<R> A)\<inverse> = Restr (\<R>\<inverse>) A" by auto lemmas rew_converse_inwards = rstep_converse_dist srrstep_converse_dist sig_step_converse_rstep srstep_converse_dist Restr_converse trancl_converse[symmetric] rtrancl_converse[symmetric] lemmas rew_converse_outwards = rew_converse_inwards[symmetric] lemma sig_step_rsteps_dist: "funas_rel \<R> \<subseteq> \<F> \<Longrightarrow> sig_step \<F> ((rstep \<R>)\<^sup>+) = (srstep \<F> \<R>)\<^sup>+" by (auto elim!: sig_stepE dest: srstepsD) lemma sig_step_rsteps_eq_dist: "funas_rel \<R> \<subseteq> \<F> \<Longrightarrow> sig_step \<F> ((rstep \<R>)\<^sup>+) \<union> Id = (srstep \<F> \<R>)\<^sup>*" by (auto simp: rtrancl_eq_or_trancl sig_step_rsteps_dist) lemma sig_step_conversion_dist: "(srstep \<F> \<R>)\<^sup>\<leftrightarrow>\<^sup>* = (srstep \<F> (\<R>\<^sup>\<leftrightarrow>))\<^sup>*" by (auto simp: rtrancl_eq_or_trancl sig_step_rsteps_dist conversion_def srstep_symcl_dist) lemma gsrstep_conversion_dist: "(gsrstep \<F> \<R>)\<^sup>\<leftrightarrow>\<^sup>* = (gsrstep \<F> (\<R>\<^sup>\<leftrightarrow>))\<^sup>*" by (auto simp: conversion_def rew_symcl_inwards) lemma sig_step_grstep_dist: "gsrstep \<F> \<R> = sig_step \<F> (Restr (rstep \<R>) (Collect ground))" by (auto simp: sig_step_def) subsection \<open>Substitution closure of @{const srstep}\<close> lemma srstep_subst_closed: assumes "(s, t) \<in> srstep \<F> \<R>" "\<And> x. funas_term (\<sigma> x) \<subseteq> \<F>" shows "(s \<cdot> \<sigma>, t \<cdot> \<sigma>) \<in> srstep \<F> \<R>" using assms by (auto simp: sig_step_def funas_term_subst) lemma srsteps_subst_closed: assumes "(s, t) \<in> (srstep \<F> \<R>)\<^sup>+" "\<And> x. funas_term (\<sigma> x) \<subseteq> \<F>" shows "(s \<cdot> \<sigma>, t \<cdot> \<sigma>) \<in> (srstep \<F> \<R>)\<^sup>+" using assms(1) proof (induct rule: trancl.induct) case (r_into_trancl s t) show ?case using srstep_subst_closed[OF r_into_trancl assms(2)] by auto next case (trancl_into_trancl s t u) from trancl_into_trancl(2) show ?case using srstep_subst_closed[OF trancl_into_trancl(3) assms(2)] by (meson rtrancl_into_trancl1 trancl_into_rtrancl) qed lemma srsteps_eq_subst_closed: assumes "(s, t) \<in> (srstep \<F> \<R>)\<^sup>*" "\<And> x. funas_term (\<sigma> x) \<subseteq> \<F>" shows "(s \<cdot> \<sigma>, t \<cdot> \<sigma>) \<in> (srstep \<F> \<R>)\<^sup>*" using assms srsteps_subst_closed by (metis rtrancl_eq_or_trancl) lemma srsteps_eq_subst_relcomp_closed: assumes "(s, t) \<in> (srstep \<F> \<R>)\<^sup>* O (srstep \<F> \<S>)\<^sup>*" "\<And> x. funas_term (\<sigma> x) \<subseteq> \<F>" shows "(s \<cdot> \<sigma>, t \<cdot> \<sigma>) \<in> (srstep \<F> \<R>)\<^sup>* O (srstep \<F> \<S>)\<^sup>*" proof - from assms(1) obtain u where "(s, u) \<in> (srstep \<F> \<R>)\<^sup>*" "(u, t) \<in> (srstep \<F> \<S>)\<^sup>*" by auto then have "(s \<cdot> \<sigma>, u \<cdot> \<sigma>) \<in> (srstep \<F> \<R>)\<^sup>*" "(u \<cdot> \<sigma>, t \<cdot> \<sigma>) \<in> (srstep \<F> \<S>)\<^sup>*" using assms srsteps_eq_subst_closed by metis+ then show ?thesis by auto qed subsection \<open>Context closure of @{const srstep}\<close> lemma srstep_ctxt_closed: assumes "funas_ctxt C \<subseteq> \<F>" and "(s, t) \<in> srstep \<F> \<R>" shows "(C\<langle>s\<rangle>, C\<langle>t\<rangle>) \<in> srstep \<F> \<R>" using assms by (intro sig_stepI) (auto dest: srstepD) lemma srsteps_ctxt_closed: assumes "funas_ctxt C \<subseteq> \<F>" and "(s, t) \<in> (srstep \<F> \<R>)\<^sup>+" shows "(C\<langle>s\<rangle>, C\<langle>t\<rangle>) \<in> (srstep \<F> \<R>)\<^sup>+" using assms(2) srstep_ctxt_closed[OF assms(1)] by (induct) force+ lemma srsteps_eq_ctxt_closed: assumes "funas_ctxt C \<subseteq> \<F>" and "(s, t) \<in> (srstep \<F> \<R>)\<^sup>*" shows "(C\<langle>s\<rangle>, C\<langle>t\<rangle>) \<in> (srstep \<F> \<R>)\<^sup>*" using srsteps_ctxt_closed[OF assms(1)] assms(2) by (metis rtrancl_eq_or_trancl) lemma sig_steps_join_ctxt_closed: assumes "funas_ctxt C \<subseteq> \<F>" and "(s, t) \<in> (srstep \<F> \<R>)\<^sup>\<down>" shows "(C\<langle>s\<rangle>, C\<langle>t\<rangle>) \<in> (srstep \<F> \<R>)\<^sup>\<down>" using srsteps_eq_ctxt_closed[OF assms(1)] assms(2) unfolding join_def rew_converse_inwards by auto text \<open>The following lemma shows that every rewrite sequence either contains a root step or is root stable\<close> lemma nsrsteps_with_root_step_step_on_args: assumes "(s, t) \<in> (srstep \<F> \<R>)\<^sup>+" "(s, t) \<notin> srsteps_with_root_step \<F> \<R>" shows "\<exists> f ss ts. s = Fun f ss \<and> t = Fun f ts \<and> length ss = length ts \<and> (\<forall> i < length ts. (ss ! i, ts ! i) \<in> (srstep \<F> \<R>)\<^sup>*)" using assms proof (induct) case (base t) obtain C l r \<sigma> where [simp]: "s = C\<langle>l \<cdot> \<sigma>\<rangle>" "t = C\<langle>r \<cdot> \<sigma>\<rangle>" and r: "(l, r) \<in> \<R>" using base(1) unfolding sig_step_def by blast then have funas: "funas_ctxt C \<subseteq> \<F>" "funas_term (l \<cdot> \<sigma>) \<subseteq> \<F>" "funas_term (r \<cdot> \<sigma>) \<subseteq> \<F>" using base(1) by (auto simp: sig_step_def) from funas(2-) r have "(l \<cdot> \<sigma>, r \<cdot> \<sigma>) \<in> srrstep \<F> \<R>" by (auto simp: sig_step_def) then have "C = Hole \<Longrightarrow> False" using base(2) r by (auto simp: srsteps_with_root_step_def) then obtain f ss D ts where [simp]: "C = More f ss D ts" by (cases C) auto have "(D\<langle>l \<cdot> \<sigma>\<rangle>, D\<langle>r \<cdot> \<sigma>\<rangle>) \<in> (srstep \<F> \<R>)" using base(1) r funas by (auto simp: sig_step_def) then show ?case using funas by (auto simp: nth_append_Cons) next case (step t u) show ?case proof (cases "(s, t) \<in> srsteps_with_root_step \<F> \<R> \<or> (t, u) \<in> sig_step \<F> (rrstep \<R>)") case True then show ?thesis using step(1, 2, 4) by (auto simp add: relcomp3_I rtrancl.rtrancl_into_rtrancl srsteps_with_root_step_def) next case False obtain C l r \<sigma> where *[simp]: "t = C\<langle>l \<cdot> \<sigma>\<rangle>" "u = C\<langle>r \<cdot> \<sigma>\<rangle>" and r: "(l, r) \<in> \<R>" using step(2) unfolding sig_step_def by blast then have funas: "funas_ctxt C \<subseteq> \<F>" "funas_term (l \<cdot> \<sigma>) \<subseteq> \<F>" "funas_term (r \<cdot> \<sigma>) \<subseteq> \<F>" using step(2) by (auto simp: sig_step_def) from False have "C \<noteq> Hole" using funas r by (force simp: sig_step_def) then obtain f ss D ts where c[simp]: "C = More f ss D ts" by (cases C) auto from step(3, 1) False obtain g sss tss where **[simp]: "s = Fun g sss" "t = Fun g tss" and l: "length sss = length tss" and inv: "\<forall> i < length tss. (sss ! i, tss ! i) \<in> (srstep \<F> \<R>)\<^sup>*" by auto have [simp]: "g = f" and lc: "Suc (length ss + length ts) = length sss" using l *(1) unfolding c using **(2) by auto then have "\<forall> i < Suc (length ss + length ts). ((ss @ D\<langle>l \<cdot> \<sigma>\<rangle> # ts) ! i, (ss @ D\<langle>r \<cdot> \<sigma>\<rangle> # ts) ! i) \<in> (srstep \<F> \<R>)\<^sup>*" using * funas r by (auto simp: nth_append_Cons r_into_rtrancl rstep.intros rstepI sig_stepI) then have "i < length tss \<Longrightarrow> (sss ! i, (ss @ D\<langle>r \<cdot> \<sigma>\<rangle> # ts) ! i) \<in> (srstep \<F> \<R>)\<^sup>*" for i using inv * l lc funas ** by (auto simp: nth_append_Cons simp del: ** * split!: if_splits) then show ?thesis using inv l lc * unfolding c by auto qed qed lemma rstep_to_pos_replace: assumes "(s, t) \<in> rstep \<R>" shows "\<exists> p l r \<sigma>. p \<in> poss s \<and> (l, r) \<in> \<R> \<and> s |_ p = l \<cdot> \<sigma> \<and> t = s[p \<leftarrow> r \<cdot> \<sigma>]" proof - from assms obtain C l r \<sigma> where st: "(l, r) \<in> \<R>" "s = C\<langle>l \<cdot> \<sigma>\<rangle>" "t = C\<langle>r \<cdot> \<sigma>\<rangle>" using rstep_imp_C_s_r by fastforce from st(2, 3) have *: "t = s[hole_pos C \<leftarrow> r \<cdot> \<sigma>]" by simp from this st show ?thesis unfolding * by (intro exI[of _ "hole_pos C"]) auto qed lemma pos_replace_to_rstep: assumes "p \<in> poss s" "(l, r) \<in> \<R>" and "s |_ p = l \<cdot> \<sigma>" "t = s[p \<leftarrow> r \<cdot> \<sigma>]" shows "(s, t) \<in> rstep \<R>" using assms(1, 3-) replace_term_at_subt_at_id [of s p] by (intro rstepI[OF assms(2), of s "ctxt_at_pos s p" \<sigma>]) (auto simp add: ctxt_of_pos_term_apply_replace_at_ident) end
[GOAL] α : Type uu β : Type vv l₁✝ l₂✝ : List α a : α l₁ l₂ : List α p : l₁ ~ l₂ x✝³ : α x✝² x✝¹ : List α x✝ : x✝² ~ x✝¹ hs : a ∈ x✝² ↔ a ∈ x✝¹ ⊢ a ∈ x✝³ :: x✝² ↔ a ∈ x✝³ :: x✝¹ [PROOFSTEP] simp only [mem_cons, hs] [GOAL] α : Type uu β : Type vv l₁✝ l₂✝ : List α a : α l₁ l₂ : List α p : l₁ ~ l₂ x✝² x✝¹ : α x✝ : List α ⊢ a ∈ x✝¹ :: x✝² :: x✝ ↔ a ∈ x✝² :: x✝¹ :: x✝ [PROOFSTEP] simp only [mem_cons, or_left_comm] [GOAL] α : Type uu β : Type vv l₁ l₂ : List α a : α l : List α ⊢ a :: (l ++ []) ~ a :: l [PROOFSTEP] rw [append_nil] [GOAL] α : Type uu β : Type vv l₁ l₂✝ l₂ : List α ⊢ [] ++ l₂ ~ l₂ ++ [] [PROOFSTEP] simp [GOAL] α : Type uu β : Type vv l₁ l₂ l : List α a : α ⊢ concat l a ~ a :: l [PROOFSTEP] simp [GOAL] α : Type uu β : Type vv l₁✝¹ l₂✝¹ l₁✝ l₂✝ : List α p : l₁✝ ~ l₂✝ _x : α l₁ l₂ : List α _p : l₁ ~ l₂ r : length l₁ = length l₂ ⊢ length (_x :: l₁) = length (_x :: l₂) [PROOFSTEP] simp [r] [GOAL] α : Type uu β : Type vv l₁✝ l₂✝ l₁ l₂ : List α p : l₁ ~ l₂ _x _y : α l : List α ⊢ length (_y :: _x :: l) = length (_x :: _y :: l) [PROOFSTEP] simp [GOAL] α : Type uu β : Type vv l₁ l₂ : List α x : α l : List α x✝ : [] ~ x :: l p : [] ~ x :: l := x✝ ⊢ False [PROOFSTEP] injection p.symm.eq_nil [GOAL] α : Type uu β : Type vv l₁ l₂ : List α a : α l : List α ⊢ reverse (a :: l) ~ a :: l [PROOFSTEP] rw [reverse_cons] [GOAL] α : Type uu β : Type vv l₁ l₂ : List α a : α l : List α ⊢ reverse l ++ [a] ~ a :: l [PROOFSTEP] exact (perm_append_singleton _ _).trans ((reverse_perm l).cons a) [GOAL] α : Type uu β : Type vv l₁ l₂ : List α a b : α ⊢ [a] ~ [b] ↔ a = b [PROOFSTEP] simp [GOAL] α : Type uu β : Type vv l₁✝ l₂✝ : List α f : α → Option β l₁ l₂ : List α p : l₁ ~ l₂ ⊢ List.filterMap f l₁ ~ List.filterMap f l₂ [PROOFSTEP] induction p with | nil => simp | cons x _p IH => cases h : f x <;> simp [h, filterMap, IH, Perm.cons] | swap x y l₂ => cases hx : f x <;> cases hy : f y <;> simp [hx, hy, filterMap, swap] | trans _p₁ _p₂ IH₁ IH₂ => exact IH₁.trans IH₂ [GOAL] α : Type uu β : Type vv l₁✝ l₂✝ : List α f : α → Option β l₁ l₂ : List α p : l₁ ~ l₂ ⊢ List.filterMap f l₁ ~ List.filterMap f l₂ [PROOFSTEP] induction p with | nil => simp | cons x _p IH => cases h : f x <;> simp [h, filterMap, IH, Perm.cons] | swap x y l₂ => cases hx : f x <;> cases hy : f y <;> simp [hx, hy, filterMap, swap] | trans _p₁ _p₂ IH₁ IH₂ => exact IH₁.trans IH₂ [GOAL] case nil α : Type uu β : Type vv l₁✝ l₂✝ : List α f : α → Option β l₁ l₂ : List α ⊢ List.filterMap f [] ~ List.filterMap f [] [PROOFSTEP] | nil => simp [GOAL] case nil α : Type uu β : Type vv l₁✝ l₂✝ : List α f : α → Option β l₁ l₂ : List α ⊢ List.filterMap f [] ~ List.filterMap f [] [PROOFSTEP] simp [GOAL] case cons α : Type uu β : Type vv l₁✝¹ l₂✝¹ : List α f : α → Option β l₁ l₂ : List α x : α l₁✝ l₂✝ : List α _p : l₁✝ ~ l₂✝ IH : List.filterMap f l₁✝ ~ List.filterMap f l₂✝ ⊢ List.filterMap f (x :: l₁✝) ~ List.filterMap f (x :: l₂✝) [PROOFSTEP] | cons x _p IH => cases h : f x <;> simp [h, filterMap, IH, Perm.cons] [GOAL] case cons α : Type uu β : Type vv l₁✝¹ l₂✝¹ : List α f : α → Option β l₁ l₂ : List α x : α l₁✝ l₂✝ : List α _p : l₁✝ ~ l₂✝ IH : List.filterMap f l₁✝ ~ List.filterMap f l₂✝ ⊢ List.filterMap f (x :: l₁✝) ~ List.filterMap f (x :: l₂✝) [PROOFSTEP] cases h : f x [GOAL] case cons.none α : Type uu β : Type vv l₁✝¹ l₂✝¹ : List α f : α → Option β l₁ l₂ : List α x : α l₁✝ l₂✝ : List α _p : l₁✝ ~ l₂✝ IH : List.filterMap f l₁✝ ~ List.filterMap f l₂✝ h : f x = none ⊢ List.filterMap f (x :: l₁✝) ~ List.filterMap f (x :: l₂✝) [PROOFSTEP] simp [h, filterMap, IH, Perm.cons] [GOAL] case cons.some α : Type uu β : Type vv l₁✝¹ l₂✝¹ : List α f : α → Option β l₁ l₂ : List α x : α l₁✝ l₂✝ : List α _p : l₁✝ ~ l₂✝ IH : List.filterMap f l₁✝ ~ List.filterMap f l₂✝ val✝ : β h : f x = some val✝ ⊢ List.filterMap f (x :: l₁✝) ~ List.filterMap f (x :: l₂✝) [PROOFSTEP] simp [h, filterMap, IH, Perm.cons] [GOAL] case swap α : Type uu β : Type vv l₁✝ l₂✝¹ : List α f : α → Option β l₁ l₂✝ : List α x y : α l₂ : List α ⊢ List.filterMap f (y :: x :: l₂) ~ List.filterMap f (x :: y :: l₂) [PROOFSTEP] | swap x y l₂ => cases hx : f x <;> cases hy : f y <;> simp [hx, hy, filterMap, swap] [GOAL] case swap α : Type uu β : Type vv l₁✝ l₂✝¹ : List α f : α → Option β l₁ l₂✝ : List α x y : α l₂ : List α ⊢ List.filterMap f (y :: x :: l₂) ~ List.filterMap f (x :: y :: l₂) [PROOFSTEP] cases hx : f x [GOAL] case swap.none α : Type uu β : Type vv l₁✝ l₂✝¹ : List α f : α → Option β l₁ l₂✝ : List α x y : α l₂ : List α hx : f x = none ⊢ List.filterMap f (y :: x :: l₂) ~ List.filterMap f (x :: y :: l₂) [PROOFSTEP] cases hy : f y [GOAL] case swap.some α : Type uu β : Type vv l₁✝ l₂✝¹ : List α f : α → Option β l₁ l₂✝ : List α x y : α l₂ : List α val✝ : β hx : f x = some val✝ ⊢ List.filterMap f (y :: x :: l₂) ~ List.filterMap f (x :: y :: l₂) [PROOFSTEP] cases hy : f y [GOAL] case swap.none.none α : Type uu β : Type vv l₁✝ l₂✝¹ : List α f : α → Option β l₁ l₂✝ : List α x y : α l₂ : List α hx : f x = none hy : f y = none ⊢ List.filterMap f (y :: x :: l₂) ~ List.filterMap f (x :: y :: l₂) [PROOFSTEP] simp [hx, hy, filterMap, swap] [GOAL] case swap.none.some α : Type uu β : Type vv l₁✝ l₂✝¹ : List α f : α → Option β l₁ l₂✝ : List α x y : α l₂ : List α hx : f x = none val✝ : β hy : f y = some val✝ ⊢ List.filterMap f (y :: x :: l₂) ~ List.filterMap f (x :: y :: l₂) [PROOFSTEP] simp [hx, hy, filterMap, swap] [GOAL] case swap.some.none α : Type uu β : Type vv l₁✝ l₂✝¹ : List α f : α → Option β l₁ l₂✝ : List α x y : α l₂ : List α val✝ : β hx : f x = some val✝ hy : f y = none ⊢ List.filterMap f (y :: x :: l₂) ~ List.filterMap f (x :: y :: l₂) [PROOFSTEP] simp [hx, hy, filterMap, swap] [GOAL] case swap.some.some α : Type uu β : Type vv l₁✝ l₂✝¹ : List α f : α → Option β l₁ l₂✝ : List α x y : α l₂ : List α val✝¹ : β hx : f x = some val✝¹ val✝ : β hy : f y = some val✝ ⊢ List.filterMap f (y :: x :: l₂) ~ List.filterMap f (x :: y :: l₂) [PROOFSTEP] simp [hx, hy, filterMap, swap] [GOAL] case trans α : Type uu β : Type vv l₁✝¹ l₂✝¹ : List α f : α → Option β l₁ l₂ l₁✝ l₂✝ l₃✝ : List α _p₁ : l₁✝ ~ l₂✝ _p₂ : l₂✝ ~ l₃✝ IH₁ : List.filterMap f l₁✝ ~ List.filterMap f l₂✝ IH₂ : List.filterMap f l₂✝ ~ List.filterMap f l₃✝ ⊢ List.filterMap f l₁✝ ~ List.filterMap f l₃✝ [PROOFSTEP] | trans _p₁ _p₂ IH₁ IH₂ => exact IH₁.trans IH₂ [GOAL] case trans α : Type uu β : Type vv l₁✝¹ l₂✝¹ : List α f : α → Option β l₁ l₂ l₁✝ l₂✝ l₃✝ : List α _p₁ : l₁✝ ~ l₂✝ _p₂ : l₂✝ ~ l₃✝ IH₁ : List.filterMap f l₁✝ ~ List.filterMap f l₂✝ IH₂ : List.filterMap f l₂✝ ~ List.filterMap f l₃✝ ⊢ List.filterMap f l₁✝ ~ List.filterMap f l₃✝ [PROOFSTEP] exact IH₁.trans IH₂ [GOAL] α : Type uu β : Type vv l₁✝ l₂✝ : List α p✝ : α → Prop f : (a : α) → p✝ a → β l₁ l₂ : List α p : l₁ ~ l₂ H₁ : ∀ (a : α), a ∈ l₁ → p✝ a H₂ : ∀ (a : α), a ∈ l₂ → p✝ a ⊢ List.pmap f l₁ H₁ ~ List.pmap f l₂ H₂ [PROOFSTEP] induction p with | nil => simp | cons x _p IH => simp [IH, Perm.cons] | swap x y => simp [swap] | trans _p₁ p₂ IH₁ IH₂ => refine' IH₁.trans IH₂ exact fun a m => H₂ a (p₂.subset m) [GOAL] α : Type uu β : Type vv l₁✝ l₂✝ : List α p✝ : α → Prop f : (a : α) → p✝ a → β l₁ l₂ : List α p : l₁ ~ l₂ H₁ : ∀ (a : α), a ∈ l₁ → p✝ a H₂ : ∀ (a : α), a ∈ l₂ → p✝ a ⊢ List.pmap f l₁ H₁ ~ List.pmap f l₂ H₂ [PROOFSTEP] induction p with | nil => simp | cons x _p IH => simp [IH, Perm.cons] | swap x y => simp [swap] | trans _p₁ p₂ IH₁ IH₂ => refine' IH₁.trans IH₂ exact fun a m => H₂ a (p₂.subset m) [GOAL] case nil α : Type uu β : Type vv l₁✝ l₂✝ : List α p : α → Prop f : (a : α) → p a → β l₁ l₂ : List α H₁ H₂ : ∀ (a : α), a ∈ [] → p a ⊢ List.pmap f [] H₁ ~ List.pmap f [] H₂ [PROOFSTEP] | nil => simp [GOAL] case nil α : Type uu β : Type vv l₁✝ l₂✝ : List α p : α → Prop f : (a : α) → p a → β l₁ l₂ : List α H₁ H₂ : ∀ (a : α), a ∈ [] → p a ⊢ List.pmap f [] H₁ ~ List.pmap f [] H₂ [PROOFSTEP] simp [GOAL] case cons α : Type uu β : Type vv l₁✝¹ l₂✝¹ : List α p : α → Prop f : (a : α) → p a → β l₁ l₂ : List α x : α l₁✝ l₂✝ : List α _p : l₁✝ ~ l₂✝ IH : ∀ {H₁ : ∀ (a : α), a ∈ l₁✝ → p a} {H₂ : ∀ (a : α), a ∈ l₂✝ → p a}, List.pmap f l₁✝ H₁ ~ List.pmap f l₂✝ H₂ H₁ : ∀ (a : α), a ∈ x :: l₁✝ → p a H₂ : ∀ (a : α), a ∈ x :: l₂✝ → p a ⊢ List.pmap f (x :: l₁✝) H₁ ~ List.pmap f (x :: l₂✝) H₂ [PROOFSTEP] | cons x _p IH => simp [IH, Perm.cons] [GOAL] case cons α : Type uu β : Type vv l₁✝¹ l₂✝¹ : List α p : α → Prop f : (a : α) → p a → β l₁ l₂ : List α x : α l₁✝ l₂✝ : List α _p : l₁✝ ~ l₂✝ IH : ∀ {H₁ : ∀ (a : α), a ∈ l₁✝ → p a} {H₂ : ∀ (a : α), a ∈ l₂✝ → p a}, List.pmap f l₁✝ H₁ ~ List.pmap f l₂✝ H₂ H₁ : ∀ (a : α), a ∈ x :: l₁✝ → p a H₂ : ∀ (a : α), a ∈ x :: l₂✝ → p a ⊢ List.pmap f (x :: l₁✝) H₁ ~ List.pmap f (x :: l₂✝) H₂ [PROOFSTEP] simp [IH, Perm.cons] [GOAL] case swap α : Type uu β : Type vv l₁✝ l₂✝ : List α p : α → Prop f : (a : α) → p a → β l₁ l₂ : List α x y : α l✝ : List α H₁ : ∀ (a : α), a ∈ y :: x :: l✝ → p a H₂ : ∀ (a : α), a ∈ x :: y :: l✝ → p a ⊢ List.pmap f (y :: x :: l✝) H₁ ~ List.pmap f (x :: y :: l✝) H₂ [PROOFSTEP] | swap x y => simp [swap] [GOAL] case swap α : Type uu β : Type vv l₁✝ l₂✝ : List α p : α → Prop f : (a : α) → p a → β l₁ l₂ : List α x y : α l✝ : List α H₁ : ∀ (a : α), a ∈ y :: x :: l✝ → p a H₂ : ∀ (a : α), a ∈ x :: y :: l✝ → p a ⊢ List.pmap f (y :: x :: l✝) H₁ ~ List.pmap f (x :: y :: l✝) H₂ [PROOFSTEP] simp [swap] [GOAL] case trans α : Type uu β : Type vv l₁✝¹ l₂✝¹ : List α p : α → Prop f : (a : α) → p a → β l₁ l₂ l₁✝ l₂✝ l₃✝ : List α _p₁ : l₁✝ ~ l₂✝ p₂ : l₂✝ ~ l₃✝ IH₁ : ∀ {H₁ : ∀ (a : α), a ∈ l₁✝ → p a} {H₂ : ∀ (a : α), a ∈ l₂✝ → p a}, List.pmap f l₁✝ H₁ ~ List.pmap f l₂✝ H₂ IH₂ : ∀ {H₁ : ∀ (a : α), a ∈ l₂✝ → p a} {H₂ : ∀ (a : α), a ∈ l₃✝ → p a}, List.pmap f l₂✝ H₁ ~ List.pmap f l₃✝ H₂ H₁ : ∀ (a : α), a ∈ l₁✝ → p a H₂ : ∀ (a : α), a ∈ l₃✝ → p a ⊢ List.pmap f l₁✝ H₁ ~ List.pmap f l₃✝ H₂ [PROOFSTEP] | trans _p₁ p₂ IH₁ IH₂ => refine' IH₁.trans IH₂ exact fun a m => H₂ a (p₂.subset m) [GOAL] case trans α : Type uu β : Type vv l₁✝¹ l₂✝¹ : List α p : α → Prop f : (a : α) → p a → β l₁ l₂ l₁✝ l₂✝ l₃✝ : List α _p₁ : l₁✝ ~ l₂✝ p₂ : l₂✝ ~ l₃✝ IH₁ : ∀ {H₁ : ∀ (a : α), a ∈ l₁✝ → p a} {H₂ : ∀ (a : α), a ∈ l₂✝ → p a}, List.pmap f l₁✝ H₁ ~ List.pmap f l₂✝ H₂ IH₂ : ∀ {H₁ : ∀ (a : α), a ∈ l₂✝ → p a} {H₂ : ∀ (a : α), a ∈ l₃✝ → p a}, List.pmap f l₂✝ H₁ ~ List.pmap f l₃✝ H₂ H₁ : ∀ (a : α), a ∈ l₁✝ → p a H₂ : ∀ (a : α), a ∈ l₃✝ → p a ⊢ List.pmap f l₁✝ H₁ ~ List.pmap f l₃✝ H₂ [PROOFSTEP] refine' IH₁.trans IH₂ [GOAL] case trans α : Type uu β : Type vv l₁✝¹ l₂✝¹ : List α p : α → Prop f : (a : α) → p a → β l₁ l₂ l₁✝ l₂✝ l₃✝ : List α _p₁ : l₁✝ ~ l₂✝ p₂ : l₂✝ ~ l₃✝ IH₁ : ∀ {H₁ : ∀ (a : α), a ∈ l₁✝ → p a} {H₂ : ∀ (a : α), a ∈ l₂✝ → p a}, List.pmap f l₁✝ H₁ ~ List.pmap f l₂✝ H₂ IH₂ : ∀ {H₁ : ∀ (a : α), a ∈ l₂✝ → p a} {H₂ : ∀ (a : α), a ∈ l₃✝ → p a}, List.pmap f l₂✝ H₁ ~ List.pmap f l₃✝ H₂ H₁ : ∀ (a : α), a ∈ l₁✝ → p a H₂ : ∀ (a : α), a ∈ l₃✝ → p a ⊢ ∀ (a : α), a ∈ l₂✝ → p a [PROOFSTEP] exact fun a m => H₂ a (p₂.subset m) [GOAL] α : Type uu β : Type vv l₁✝ l₂✝ : List α p : α → Bool l₁ l₂ : List α s : l₁ ~ l₂ ⊢ List.filter p l₁ ~ List.filter p l₂ [PROOFSTEP] rw [← filterMap_eq_filter] [GOAL] α : Type uu β : Type vv l₁✝ l₂✝ : List α p : α → Bool l₁ l₂ : List α s : l₁ ~ l₂ ⊢ List.filterMap (Option.guard fun x => p x = true) l₁ ~ List.filterMap (Option.guard fun x => p x = true) l₂ [PROOFSTEP] apply s.filterMap _ [GOAL] α : Type uu β : Type vv l₁ l₂ : List α p : α → Bool l : List α ⊢ filter p l ++ filter (fun x => decide ¬p x = true) l ~ l [PROOFSTEP] induction' l with x l ih [GOAL] case nil α : Type uu β : Type vv l₁ l₂ : List α p : α → Bool ⊢ filter p [] ++ filter (fun x => decide ¬p x = true) [] ~ [] [PROOFSTEP] rfl [GOAL] case cons α : Type uu β : Type vv l₁ l₂ : List α p : α → Bool x : α l : List α ih : filter p l ++ filter (fun x => decide ¬p x = true) l ~ l ⊢ filter p (x :: l) ++ filter (fun x => decide ¬p x = true) (x :: l) ~ x :: l [PROOFSTEP] by_cases h : p x [GOAL] case pos α : Type uu β : Type vv l₁ l₂ : List α p : α → Bool x : α l : List α ih : filter p l ++ filter (fun x => decide ¬p x = true) l ~ l h : p x = true ⊢ filter p (x :: l) ++ filter (fun x => decide ¬p x = true) (x :: l) ~ x :: l [PROOFSTEP] simp only [h, filter_cons_of_pos, filter_cons_of_neg, not_true, not_false_iff, cons_append] [GOAL] case pos α : Type uu β : Type vv l₁ l₂ : List α p : α → Bool x : α l : List α ih : filter p l ++ filter (fun x => decide ¬p x = true) l ~ l h : p x = true ⊢ x :: (filter p l ++ filter (fun x => decide ¬p x = true) l) ~ x :: l [PROOFSTEP] exact ih.cons x [GOAL] case neg α : Type uu β : Type vv l₁ l₂ : List α p : α → Bool x : α l : List α ih : filter p l ++ filter (fun x => decide ¬p x = true) l ~ l h : ¬p x = true ⊢ filter p (x :: l) ++ filter (fun x => decide ¬p x = true) (x :: l) ~ x :: l [PROOFSTEP] simp only [h, filter_cons_of_neg, not_false_iff, filter_cons_of_pos] [GOAL] case neg α : Type uu β : Type vv l₁ l₂ : List α p : α → Bool x : α l : List α ih : filter p l ++ filter (fun x => decide ¬p x = true) l ~ l h : ¬p x = true ⊢ filter p l ++ x :: filter (fun x => decide ¬p x = true) l ~ x :: l [PROOFSTEP] refine' Perm.trans _ (ih.cons x) [GOAL] case neg α : Type uu β : Type vv l₁ l₂ : List α p : α → Bool x : α l : List α ih : filter p l ++ filter (fun x => decide ¬p x = true) l ~ l h : ¬p x = true ⊢ filter p l ++ x :: filter (fun x => decide ¬p x = true) l ~ x :: (filter p l ++ filter (fun x => decide ¬p x = true) l) [PROOFSTEP] exact perm_append_comm.trans (perm_append_comm.cons _) [GOAL] α : Type uu β : Type vv l₁✝ l₂✝ l₁ l₂ l₂' : List α s : l₁ <+ l₂ p : l₂ ~ l₂' ⊢ ∃ l₁' x, l₁' <+ l₂' [PROOFSTEP] induction p generalizing l₁ with | nil => exact ⟨[], eq_nil_of_sublist_nil s ▸ Perm.refl _, nil_sublist _⟩ | cons x _ IH => cases' s with _ _ _ s l₁ _ _ s · exact let ⟨l₁', p', s'⟩ := IH s ⟨l₁', p', s'.cons _⟩ · exact let ⟨l₁', p', s'⟩ := IH s ⟨x :: l₁', p'.cons x, s'.cons₂ _⟩ | swap x y _ => cases' s with _ _ _ s l₁ _ _ s <;> cases' s with _ _ _ s l₁ _ _ s · exact ⟨l₁, Perm.refl _, (s.cons _).cons _⟩ · exact ⟨x :: l₁, Perm.refl _, (s.cons _).cons₂ _⟩ · exact ⟨y :: l₁, Perm.refl _, (s.cons₂ _).cons _⟩ · exact ⟨x :: y :: l₁, Perm.swap _ _ _, (s.cons₂ _).cons₂ _⟩ | trans _ _ IH₁ IH₂ => exact let ⟨m₁, pm, sm⟩ := IH₁ s let ⟨r₁, pr, sr⟩ := IH₂ sm ⟨r₁, pr.trans pm, sr⟩ [GOAL] α : Type uu β : Type vv l₁✝ l₂✝ l₁ l₂ l₂' : List α s : l₁ <+ l₂ p : l₂ ~ l₂' ⊢ ∃ l₁' x, l₁' <+ l₂' [PROOFSTEP] induction p generalizing l₁ with | nil => exact ⟨[], eq_nil_of_sublist_nil s ▸ Perm.refl _, nil_sublist _⟩ | cons x _ IH => cases' s with _ _ _ s l₁ _ _ s · exact let ⟨l₁', p', s'⟩ := IH s ⟨l₁', p', s'.cons _⟩ · exact let ⟨l₁', p', s'⟩ := IH s ⟨x :: l₁', p'.cons x, s'.cons₂ _⟩ | swap x y _ => cases' s with _ _ _ s l₁ _ _ s <;> cases' s with _ _ _ s l₁ _ _ s · exact ⟨l₁, Perm.refl _, (s.cons _).cons _⟩ · exact ⟨x :: l₁, Perm.refl _, (s.cons _).cons₂ _⟩ · exact ⟨y :: l₁, Perm.refl _, (s.cons₂ _).cons _⟩ · exact ⟨x :: y :: l₁, Perm.swap _ _ _, (s.cons₂ _).cons₂ _⟩ | trans _ _ IH₁ IH₂ => exact let ⟨m₁, pm, sm⟩ := IH₁ s let ⟨r₁, pr, sr⟩ := IH₂ sm ⟨r₁, pr.trans pm, sr⟩ [GOAL] case nil α : Type uu β : Type vv l₁✝ l₂✝ l₂ l₂' l₁ : List α s : l₁ <+ [] ⊢ ∃ l₁' x, l₁' <+ [] [PROOFSTEP] | nil => exact ⟨[], eq_nil_of_sublist_nil s ▸ Perm.refl _, nil_sublist _⟩ [GOAL] case nil α : Type uu β : Type vv l₁✝ l₂✝ l₂ l₂' l₁ : List α s : l₁ <+ [] ⊢ ∃ l₁' x, l₁' <+ [] [PROOFSTEP] exact ⟨[], eq_nil_of_sublist_nil s ▸ Perm.refl _, nil_sublist _⟩ [GOAL] case cons α : Type uu β : Type vv l₁✝¹ l₂✝¹ l₂ l₂' : List α x : α l₁✝ l₂✝ : List α a✝ : l₁✝ ~ l₂✝ IH : ∀ {l₁ : List α}, l₁ <+ l₁✝ → ∃ l₁' x, l₁' <+ l₂✝ l₁ : List α s : l₁ <+ x :: l₁✝ ⊢ ∃ l₁' x_1, l₁' <+ x :: l₂✝ [PROOFSTEP] | cons x _ IH => cases' s with _ _ _ s l₁ _ _ s · exact let ⟨l₁', p', s'⟩ := IH s ⟨l₁', p', s'.cons _⟩ · exact let ⟨l₁', p', s'⟩ := IH s ⟨x :: l₁', p'.cons x, s'.cons₂ _⟩ [GOAL] case cons α : Type uu β : Type vv l₁✝¹ l₂✝¹ l₂ l₂' : List α x : α l₁✝ l₂✝ : List α a✝ : l₁✝ ~ l₂✝ IH : ∀ {l₁ : List α}, l₁ <+ l₁✝ → ∃ l₁' x, l₁' <+ l₂✝ l₁ : List α s : l₁ <+ x :: l₁✝ ⊢ ∃ l₁' x_1, l₁' <+ x :: l₂✝ [PROOFSTEP] cases' s with _ _ _ s l₁ _ _ s [GOAL] case cons.cons α : Type uu β : Type vv l₁✝¹ l₂✝¹ l₂ l₂' : List α x : α l₁✝ l₂✝ : List α a✝ : l₁✝ ~ l₂✝ IH : ∀ {l₁ : List α}, l₁ <+ l₁✝ → ∃ l₁' x, l₁' <+ l₂✝ l₁ : List α s : l₁ <+ l₁✝ ⊢ ∃ l₁' x_1, l₁' <+ x :: l₂✝ [PROOFSTEP] exact let ⟨l₁', p', s'⟩ := IH s ⟨l₁', p', s'.cons _⟩ [GOAL] case cons.cons₂ α : Type uu β : Type vv l₁✝¹ l₂✝¹ l₂ l₂' : List α x : α l₁✝ l₂✝ : List α a✝ : l₁✝ ~ l₂✝ IH : ∀ {l₁ : List α}, l₁ <+ l₁✝ → ∃ l₁' x, l₁' <+ l₂✝ l₁ : List α s : l₁ <+ l₁✝ ⊢ ∃ l₁' x_1, l₁' <+ x :: l₂✝ [PROOFSTEP] exact let ⟨l₁', p', s'⟩ := IH s ⟨x :: l₁', p'.cons x, s'.cons₂ _⟩ [GOAL] case swap α : Type uu β : Type vv l₁✝ l₂✝ l₂ l₂' : List α x y : α l✝ l₁ : List α s : l₁ <+ y :: x :: l✝ ⊢ ∃ l₁' x_1, l₁' <+ x :: y :: l✝ [PROOFSTEP] | swap x y _ => cases' s with _ _ _ s l₁ _ _ s <;> cases' s with _ _ _ s l₁ _ _ s · exact ⟨l₁, Perm.refl _, (s.cons _).cons _⟩ · exact ⟨x :: l₁, Perm.refl _, (s.cons _).cons₂ _⟩ · exact ⟨y :: l₁, Perm.refl _, (s.cons₂ _).cons _⟩ · exact ⟨x :: y :: l₁, Perm.swap _ _ _, (s.cons₂ _).cons₂ _⟩ [GOAL] case swap α : Type uu β : Type vv l₁✝ l₂✝ l₂ l₂' : List α x y : α l✝ l₁ : List α s : l₁ <+ y :: x :: l✝ ⊢ ∃ l₁' x_1, l₁' <+ x :: y :: l✝ [PROOFSTEP] cases' s with _ _ _ s l₁ _ _ s [GOAL] case swap.cons α : Type uu β : Type vv l₁✝ l₂✝ l₂ l₂' : List α x y : α l✝ l₁ : List α s : l₁ <+ x :: l✝ ⊢ ∃ l₁' x_1, l₁' <+ x :: y :: l✝ [PROOFSTEP] cases' s with _ _ _ s l₁ _ _ s [GOAL] case swap.cons₂ α : Type uu β : Type vv l₁✝ l₂✝ l₂ l₂' : List α x y : α l✝ l₁ : List α s : l₁ <+ x :: l✝ ⊢ ∃ l₁' x_1, l₁' <+ x :: y :: l✝ [PROOFSTEP] cases' s with _ _ _ s l₁ _ _ s [GOAL] case swap.cons.cons α : Type uu β : Type vv l₁✝ l₂✝ l₂ l₂' : List α x y : α l✝ l₁ : List α s : l₁ <+ l✝ ⊢ ∃ l₁' x_1, l₁' <+ x :: y :: l✝ [PROOFSTEP] exact ⟨l₁, Perm.refl _, (s.cons _).cons _⟩ [GOAL] case swap.cons.cons₂ α : Type uu β : Type vv l₁✝ l₂✝ l₂ l₂' : List α x y : α l✝ l₁ : List α s : l₁ <+ l✝ ⊢ ∃ l₁' x_1, l₁' <+ x :: y :: l✝ [PROOFSTEP] exact ⟨x :: l₁, Perm.refl _, (s.cons _).cons₂ _⟩ [GOAL] case swap.cons₂.cons α : Type uu β : Type vv l₁✝ l₂✝ l₂ l₂' : List α x y : α l✝ l₁ : List α s : l₁ <+ l✝ ⊢ ∃ l₁' x_1, l₁' <+ x :: y :: l✝ [PROOFSTEP] exact ⟨y :: l₁, Perm.refl _, (s.cons₂ _).cons _⟩ [GOAL] case swap.cons₂.cons₂ α : Type uu β : Type vv l₁✝ l₂✝ l₂ l₂' : List α x y : α l✝ l₁ : List α s : l₁ <+ l✝ ⊢ ∃ l₁' x_1, l₁' <+ x :: y :: l✝ [PROOFSTEP] exact ⟨x :: y :: l₁, Perm.swap _ _ _, (s.cons₂ _).cons₂ _⟩ [GOAL] case trans α : Type uu β : Type vv l₁✝¹ l₂✝¹ l₂ l₂' l₁✝ l₂✝ l₃✝ : List α a✝¹ : l₁✝ ~ l₂✝ a✝ : l₂✝ ~ l₃✝ IH₁ : ∀ {l₁ : List α}, l₁ <+ l₁✝ → ∃ l₁' x, l₁' <+ l₂✝ IH₂ : ∀ {l₁ : List α}, l₁ <+ l₂✝ → ∃ l₁' x, l₁' <+ l₃✝ l₁ : List α s : l₁ <+ l₁✝ ⊢ ∃ l₁' x, l₁' <+ l₃✝ [PROOFSTEP] | trans _ _ IH₁ IH₂ => exact let ⟨m₁, pm, sm⟩ := IH₁ s let ⟨r₁, pr, sr⟩ := IH₂ sm ⟨r₁, pr.trans pm, sr⟩ [GOAL] case trans α : Type uu β : Type vv l₁✝¹ l₂✝¹ l₂ l₂' l₁✝ l₂✝ l₃✝ : List α a✝¹ : l₁✝ ~ l₂✝ a✝ : l₂✝ ~ l₃✝ IH₁ : ∀ {l₁ : List α}, l₁ <+ l₁✝ → ∃ l₁' x, l₁' <+ l₂✝ IH₂ : ∀ {l₁ : List α}, l₁ <+ l₂✝ → ∃ l₁' x, l₁' <+ l₃✝ l₁ : List α s : l₁ <+ l₁✝ ⊢ ∃ l₁' x, l₁' <+ l₃✝ [PROOFSTEP] exact let ⟨m₁, pm, sm⟩ := IH₁ s let ⟨r₁, pr, sr⟩ := IH₂ sm ⟨r₁, pr.trans pm, sr⟩ [GOAL] α : Type uu β : Type vv l₁✝ l₂✝ : List α inst✝ : SizeOf α l₁ l₂ : List α h : l₁ ~ l₂ ⊢ sizeOf l₁ = sizeOf l₂ [PROOFSTEP] induction h with -- hd l₁ l₂ h₁₂ h_sz₁₂ a b l l₁ l₂ l₃ h₁₂ h₂₃ h_sz₁₂ h_sz₂₃ | nil => rfl | cons _ _ h_sz₁₂ => simp [h_sz₁₂] | swap => simp [add_left_comm] | trans _ _ h_sz₁₂ h_sz₂₃ => simp [h_sz₁₂, h_sz₂₃] [GOAL] α : Type uu β : Type vv l₁✝ l₂✝ : List α inst✝ : SizeOf α l₁ l₂ : List α h : l₁ ~ l₂ ⊢ sizeOf l₁ = sizeOf l₂ [PROOFSTEP] induction h with -- hd l₁ l₂ h₁₂ h_sz₁₂ a b l l₁ l₂ l₃ h₁₂ h₂₃ h_sz₁₂ h_sz₂₃ | nil => rfl | cons _ _ h_sz₁₂ => simp [h_sz₁₂] | swap => simp [add_left_comm] | trans _ _ h_sz₁₂ h_sz₂₃ => simp [h_sz₁₂, h_sz₂₃] [GOAL] case nil α : Type uu β : Type vv l₁✝ l₂✝ : List α inst✝ : SizeOf α l₁ l₂ : List α ⊢ sizeOf [] = sizeOf [] [PROOFSTEP] | nil => rfl [GOAL] case nil α : Type uu β : Type vv l₁✝ l₂✝ : List α inst✝ : SizeOf α l₁ l₂ : List α ⊢ sizeOf [] = sizeOf [] [PROOFSTEP] rfl [GOAL] case cons α : Type uu β : Type vv l₁✝¹ l₂✝¹ : List α inst✝ : SizeOf α l₁ l₂ : List α x✝ : α l₁✝ l₂✝ : List α a✝ : l₁✝ ~ l₂✝ h_sz₁₂ : sizeOf l₁✝ = sizeOf l₂✝ ⊢ sizeOf (x✝ :: l₁✝) = sizeOf (x✝ :: l₂✝) [PROOFSTEP] | cons _ _ h_sz₁₂ => simp [h_sz₁₂] [GOAL] case cons α : Type uu β : Type vv l₁✝¹ l₂✝¹ : List α inst✝ : SizeOf α l₁ l₂ : List α x✝ : α l₁✝ l₂✝ : List α a✝ : l₁✝ ~ l₂✝ h_sz₁₂ : sizeOf l₁✝ = sizeOf l₂✝ ⊢ sizeOf (x✝ :: l₁✝) = sizeOf (x✝ :: l₂✝) [PROOFSTEP] simp [h_sz₁₂] [GOAL] case swap α : Type uu β : Type vv l₁✝ l₂✝ : List α inst✝ : SizeOf α l₁ l₂ : List α x✝ y✝ : α l✝ : List α ⊢ sizeOf (y✝ :: x✝ :: l✝) = sizeOf (x✝ :: y✝ :: l✝) [PROOFSTEP] | swap => simp [add_left_comm] [GOAL] case swap α : Type uu β : Type vv l₁✝ l₂✝ : List α inst✝ : SizeOf α l₁ l₂ : List α x✝ y✝ : α l✝ : List α ⊢ sizeOf (y✝ :: x✝ :: l✝) = sizeOf (x✝ :: y✝ :: l✝) [PROOFSTEP] simp [add_left_comm] [GOAL] case trans α : Type uu β : Type vv l₁✝¹ l₂✝¹ : List α inst✝ : SizeOf α l₁ l₂ l₁✝ l₂✝ l₃✝ : List α a✝¹ : l₁✝ ~ l₂✝ a✝ : l₂✝ ~ l₃✝ h_sz₁₂ : sizeOf l₁✝ = sizeOf l₂✝ h_sz₂₃ : sizeOf l₂✝ = sizeOf l₃✝ ⊢ sizeOf l₁✝ = sizeOf l₃✝ [PROOFSTEP] | trans _ _ h_sz₁₂ h_sz₂₃ => simp [h_sz₁₂, h_sz₂₃] [GOAL] case trans α : Type uu β : Type vv l₁✝¹ l₂✝¹ : List α inst✝ : SizeOf α l₁ l₂ l₁✝ l₂✝ l₃✝ : List α a✝¹ : l₁✝ ~ l₂✝ a✝ : l₂✝ ~ l₃✝ h_sz₁₂ : sizeOf l₁✝ = sizeOf l₂✝ h_sz₂₃ : sizeOf l₂✝ = sizeOf l₃✝ ⊢ sizeOf l₁✝ = sizeOf l₃✝ [PROOFSTEP] simp [h_sz₁₂, h_sz₂₃] [GOAL] α : Type uu β : Type vv l₁ l₂ : List α γ : Type u_1 δ : Type u_2 r : α → β → Prop p : γ → δ → Prop ⊢ Perm ∘r Perm = Perm [PROOFSTEP] funext a c [GOAL] case h.h α : Type uu β : Type vv l₁ l₂ : List α γ : Type u_1 δ : Type u_2 r : α → β → Prop p : γ → δ → Prop a c : List α ⊢ (Perm ∘r Perm) a c = (a ~ c) [PROOFSTEP] apply propext [GOAL] case h.h.a α : Type uu β : Type vv l₁ l₂ : List α γ : Type u_1 δ : Type u_2 r : α → β → Prop p : γ → δ → Prop a c : List α ⊢ (Perm ∘r Perm) a c ↔ a ~ c [PROOFSTEP] constructor [GOAL] case h.h.a.mp α : Type uu β : Type vv l₁ l₂ : List α γ : Type u_1 δ : Type u_2 r : α → β → Prop p : γ → δ → Prop a c : List α ⊢ (Perm ∘r Perm) a c → a ~ c [PROOFSTEP] exact fun ⟨b, hab, hba⟩ => Perm.trans hab hba [GOAL] case h.h.a.mpr α : Type uu β : Type vv l₁ l₂ : List α γ : Type u_1 δ : Type u_2 r : α → β → Prop p : γ → δ → Prop a c : List α ⊢ a ~ c → (Perm ∘r Perm) a c [PROOFSTEP] exact fun h => ⟨a, Perm.refl a, h⟩ [GOAL] α : Type uu β : Type vv l₁ l₂ : List α γ : Type u_1 δ : Type u_2 r : α → β → Prop p : γ → δ → Prop l u : List α v : List β hlu : l ~ u huv : Forall₂ r u v ⊢ (Forall₂ r ∘r Perm) l v [PROOFSTEP] induction hlu generalizing v [GOAL] case nil α : Type uu β : Type vv l₁ l₂ : List α γ : Type u_1 δ : Type u_2 r : α → β → Prop p : γ → δ → Prop l u : List α v : List β huv : Forall₂ r [] v ⊢ (Forall₂ r ∘r Perm) [] v case cons α : Type uu β : Type vv l₁ l₂ : List α γ : Type u_1 δ : Type u_2 r : α → β → Prop p : γ → δ → Prop l u : List α x✝ : α l₁✝ l₂✝ : List α a✝ : l₁✝ ~ l₂✝ a_ih✝ : ∀ {v : List β}, Forall₂ r l₂✝ v → (Forall₂ r ∘r Perm) l₁✝ v v : List β huv : Forall₂ r (x✝ :: l₂✝) v ⊢ (Forall₂ r ∘r Perm) (x✝ :: l₁✝) v case swap α : Type uu β : Type vv l₁ l₂ : List α γ : Type u_1 δ : Type u_2 r : α → β → Prop p : γ → δ → Prop l u : List α x✝ y✝ : α l✝ : List α v : List β huv : Forall₂ r (x✝ :: y✝ :: l✝) v ⊢ (Forall₂ r ∘r Perm) (y✝ :: x✝ :: l✝) v case trans α : Type uu β : Type vv l₁ l₂ : List α γ : Type u_1 δ : Type u_2 r : α → β → Prop p : γ → δ → Prop l u l₁✝ l₂✝ l₃✝ : List α a✝¹ : l₁✝ ~ l₂✝ a✝ : l₂✝ ~ l₃✝ a_ih✝¹ : ∀ {v : List β}, Forall₂ r l₂✝ v → (Forall₂ r ∘r Perm) l₁✝ v a_ih✝ : ∀ {v : List β}, Forall₂ r l₃✝ v → (Forall₂ r ∘r Perm) l₂✝ v v : List β huv : Forall₂ r l₃✝ v ⊢ (Forall₂ r ∘r Perm) l₁✝ v [PROOFSTEP] case nil => cases huv; exact ⟨[], Forall₂.nil, Perm.nil⟩ [GOAL] α : Type uu β : Type vv l₁ l₂ : List α γ : Type u_1 δ : Type u_2 r : α → β → Prop p : γ → δ → Prop l u : List α v : List β huv : Forall₂ r [] v ⊢ (Forall₂ r ∘r Perm) [] v [PROOFSTEP] case nil => cases huv; exact ⟨[], Forall₂.nil, Perm.nil⟩ [GOAL] α : Type uu β : Type vv l₁ l₂ : List α γ : Type u_1 δ : Type u_2 r : α → β → Prop p : γ → δ → Prop l u : List α v : List β huv : Forall₂ r [] v ⊢ (Forall₂ r ∘r Perm) [] v [PROOFSTEP] cases huv [GOAL] case nil α : Type uu β : Type vv l₁ l₂ : List α γ : Type u_1 δ : Type u_2 r : α → β → Prop p : γ → δ → Prop l u : List α ⊢ (Forall₂ r ∘r Perm) [] [] [PROOFSTEP] exact ⟨[], Forall₂.nil, Perm.nil⟩ [GOAL] case cons α : Type uu β : Type vv l₁ l₂ : List α γ : Type u_1 δ : Type u_2 r : α → β → Prop p : γ → δ → Prop l u : List α x✝ : α l₁✝ l₂✝ : List α a✝ : l₁✝ ~ l₂✝ a_ih✝ : ∀ {v : List β}, Forall₂ r l₂✝ v → (Forall₂ r ∘r Perm) l₁✝ v v : List β huv : Forall₂ r (x✝ :: l₂✝) v ⊢ (Forall₂ r ∘r Perm) (x✝ :: l₁✝) v case swap α : Type uu β : Type vv l₁ l₂ : List α γ : Type u_1 δ : Type u_2 r : α → β → Prop p : γ → δ → Prop l u : List α x✝ y✝ : α l✝ : List α v : List β huv : Forall₂ r (x✝ :: y✝ :: l✝) v ⊢ (Forall₂ r ∘r Perm) (y✝ :: x✝ :: l✝) v case trans α : Type uu β : Type vv l₁ l₂ : List α γ : Type u_1 δ : Type u_2 r : α → β → Prop p : γ → δ → Prop l u l₁✝ l₂✝ l₃✝ : List α a✝¹ : l₁✝ ~ l₂✝ a✝ : l₂✝ ~ l₃✝ a_ih✝¹ : ∀ {v : List β}, Forall₂ r l₂✝ v → (Forall₂ r ∘r Perm) l₁✝ v a_ih✝ : ∀ {v : List β}, Forall₂ r l₃✝ v → (Forall₂ r ∘r Perm) l₂✝ v v : List β huv : Forall₂ r l₃✝ v ⊢ (Forall₂ r ∘r Perm) l₁✝ v [PROOFSTEP] case cons a l u _hlu ih => cases' huv with _ b _ v hab huv' rcases ih huv' with ⟨l₂, h₁₂, h₂₃⟩ exact ⟨b :: l₂, Forall₂.cons hab h₁₂, h₂₃.cons _⟩ [GOAL] α : Type uu β : Type vv l₁ l₂ : List α γ : Type u_1 δ : Type u_2 r : α → β → Prop p : γ → δ → Prop l✝ u✝ : List α a : α l u : List α _hlu : l ~ u ih : ∀ {v : List β}, Forall₂ r u v → (Forall₂ r ∘r Perm) l v v : List β huv : Forall₂ r (a :: u) v ⊢ (Forall₂ r ∘r Perm) (a :: l) v [PROOFSTEP] case cons a l u _hlu ih => cases' huv with _ b _ v hab huv' rcases ih huv' with ⟨l₂, h₁₂, h₂₃⟩ exact ⟨b :: l₂, Forall₂.cons hab h₁₂, h₂₃.cons _⟩ [GOAL] α : Type uu β : Type vv l₁ l₂ : List α γ : Type u_1 δ : Type u_2 r : α → β → Prop p : γ → δ → Prop l✝ u✝ : List α a : α l u : List α _hlu : l ~ u ih : ∀ {v : List β}, Forall₂ r u v → (Forall₂ r ∘r Perm) l v v : List β huv : Forall₂ r (a :: u) v ⊢ (Forall₂ r ∘r Perm) (a :: l) v [PROOFSTEP] cases' huv with _ b _ v hab huv' [GOAL] case cons α : Type uu β : Type vv l₁ l₂ : List α γ : Type u_1 δ : Type u_2 r : α → β → Prop p : γ → δ → Prop l✝ u✝ : List α a : α l u : List α _hlu : l ~ u ih : ∀ {v : List β}, Forall₂ r u v → (Forall₂ r ∘r Perm) l v b : β v : List β hab : r a b huv' : Forall₂ r u v ⊢ (Forall₂ r ∘r Perm) (a :: l) (b :: v) [PROOFSTEP] rcases ih huv' with ⟨l₂, h₁₂, h₂₃⟩ [GOAL] case cons.intro.intro α : Type uu β : Type vv l₁ l₂✝ : List α γ : Type u_1 δ : Type u_2 r : α → β → Prop p : γ → δ → Prop l✝ u✝ : List α a : α l u : List α _hlu : l ~ u ih : ∀ {v : List β}, Forall₂ r u v → (Forall₂ r ∘r Perm) l v b : β v : List β hab : r a b huv' : Forall₂ r u v l₂ : List β h₁₂ : Forall₂ r l l₂ h₂₃ : l₂ ~ v ⊢ (Forall₂ r ∘r Perm) (a :: l) (b :: v) [PROOFSTEP] exact ⟨b :: l₂, Forall₂.cons hab h₁₂, h₂₃.cons _⟩ [GOAL] case swap α : Type uu β : Type vv l₁ l₂ : List α γ : Type u_1 δ : Type u_2 r : α → β → Prop p : γ → δ → Prop l u : List α x✝ y✝ : α l✝ : List α v : List β huv : Forall₂ r (x✝ :: y✝ :: l✝) v ⊢ (Forall₂ r ∘r Perm) (y✝ :: x✝ :: l✝) v case trans α : Type uu β : Type vv l₁ l₂ : List α γ : Type u_1 δ : Type u_2 r : α → β → Prop p : γ → δ → Prop l u l₁✝ l₂✝ l₃✝ : List α a✝¹ : l₁✝ ~ l₂✝ a✝ : l₂✝ ~ l₃✝ a_ih✝¹ : ∀ {v : List β}, Forall₂ r l₂✝ v → (Forall₂ r ∘r Perm) l₁✝ v a_ih✝ : ∀ {v : List β}, Forall₂ r l₃✝ v → (Forall₂ r ∘r Perm) l₂✝ v v : List β huv : Forall₂ r l₃✝ v ⊢ (Forall₂ r ∘r Perm) l₁✝ v [PROOFSTEP] case swap a₁ a₂ h₂₃ => cases' huv with _ b₁ _ l₂ h₁ hr₂₃ cases' hr₂₃ with _ b₂ _ l₂ h₂ h₁₂ exact ⟨b₂ :: b₁ :: l₂, Forall₂.cons h₂ (Forall₂.cons h₁ h₁₂), Perm.swap _ _ _⟩ [GOAL] α : Type uu β : Type vv l₁ l₂ : List α γ : Type u_1 δ : Type u_2 r : α → β → Prop p : γ → δ → Prop l u : List α a₁ a₂ : α h₂₃ : List α v : List β huv : Forall₂ r (a₁ :: a₂ :: h₂₃) v ⊢ (Forall₂ r ∘r Perm) (a₂ :: a₁ :: h₂₃) v [PROOFSTEP] case swap a₁ a₂ h₂₃ => cases' huv with _ b₁ _ l₂ h₁ hr₂₃ cases' hr₂₃ with _ b₂ _ l₂ h₂ h₁₂ exact ⟨b₂ :: b₁ :: l₂, Forall₂.cons h₂ (Forall₂.cons h₁ h₁₂), Perm.swap _ _ _⟩ [GOAL] α : Type uu β : Type vv l₁ l₂ : List α γ : Type u_1 δ : Type u_2 r : α → β → Prop p : γ → δ → Prop l u : List α a₁ a₂ : α h₂₃ : List α v : List β huv : Forall₂ r (a₁ :: a₂ :: h₂₃) v ⊢ (Forall₂ r ∘r Perm) (a₂ :: a₁ :: h₂₃) v [PROOFSTEP] cases' huv with _ b₁ _ l₂ h₁ hr₂₃ [GOAL] case cons α : Type uu β : Type vv l₁ l₂✝ : List α γ : Type u_1 δ : Type u_2 r : α → β → Prop p : γ → δ → Prop l u : List α a₁ a₂ : α h₂₃ : List α b₁ : β l₂ : List β h₁ : r a₁ b₁ hr₂₃ : Forall₂ r (a₂ :: h₂₃) l₂ ⊢ (Forall₂ r ∘r Perm) (a₂ :: a₁ :: h₂₃) (b₁ :: l₂) [PROOFSTEP] cases' hr₂₃ with _ b₂ _ l₂ h₂ h₁₂ [GOAL] case cons.cons α : Type uu β : Type vv l₁ l₂✝ : List α γ : Type u_1 δ : Type u_2 r : α → β → Prop p : γ → δ → Prop l u : List α a₁ a₂ : α h₂₃ : List α b₁ : β h₁ : r a₁ b₁ b₂ : β l₂ : List β h₂ : r a₂ b₂ h₁₂ : Forall₂ r h₂₃ l₂ ⊢ (Forall₂ r ∘r Perm) (a₂ :: a₁ :: h₂₃) (b₁ :: b₂ :: l₂) [PROOFSTEP] exact ⟨b₂ :: b₁ :: l₂, Forall₂.cons h₂ (Forall₂.cons h₁ h₁₂), Perm.swap _ _ _⟩ [GOAL] case trans α : Type uu β : Type vv l₁ l₂ : List α γ : Type u_1 δ : Type u_2 r : α → β → Prop p : γ → δ → Prop l u l₁✝ l₂✝ l₃✝ : List α a✝¹ : l₁✝ ~ l₂✝ a✝ : l₂✝ ~ l₃✝ a_ih✝¹ : ∀ {v : List β}, Forall₂ r l₂✝ v → (Forall₂ r ∘r Perm) l₁✝ v a_ih✝ : ∀ {v : List β}, Forall₂ r l₃✝ v → (Forall₂ r ∘r Perm) l₂✝ v v : List β huv : Forall₂ r l₃✝ v ⊢ (Forall₂ r ∘r Perm) l₁✝ v [PROOFSTEP] case trans la₁ la₂ la₃ _ _ ih₁ ih₂ => rcases ih₂ huv with ⟨lb₂, hab₂, h₂₃⟩ rcases ih₁ hab₂ with ⟨lb₁, hab₁, h₁₂⟩ exact ⟨lb₁, hab₁, Perm.trans h₁₂ h₂₃⟩ [GOAL] α : Type uu β : Type vv l₁ l₂ : List α γ : Type u_1 δ : Type u_2 r : α → β → Prop p : γ → δ → Prop l u la₁ la₂ la₃ : List α a✝¹ : la₁ ~ la₂ a✝ : la₂ ~ la₃ ih₁ : ∀ {v : List β}, Forall₂ r la₂ v → (Forall₂ r ∘r Perm) la₁ v ih₂ : ∀ {v : List β}, Forall₂ r la₃ v → (Forall₂ r ∘r Perm) la₂ v v : List β huv : Forall₂ r la₃ v ⊢ (Forall₂ r ∘r Perm) la₁ v [PROOFSTEP] case trans la₁ la₂ la₃ _ _ ih₁ ih₂ => rcases ih₂ huv with ⟨lb₂, hab₂, h₂₃⟩ rcases ih₁ hab₂ with ⟨lb₁, hab₁, h₁₂⟩ exact ⟨lb₁, hab₁, Perm.trans h₁₂ h₂₃⟩ [GOAL] α : Type uu β : Type vv l₁ l₂ : List α γ : Type u_1 δ : Type u_2 r : α → β → Prop p : γ → δ → Prop l u la₁ la₂ la₃ : List α a✝¹ : la₁ ~ la₂ a✝ : la₂ ~ la₃ ih₁ : ∀ {v : List β}, Forall₂ r la₂ v → (Forall₂ r ∘r Perm) la₁ v ih₂ : ∀ {v : List β}, Forall₂ r la₃ v → (Forall₂ r ∘r Perm) la₂ v v : List β huv : Forall₂ r la₃ v ⊢ (Forall₂ r ∘r Perm) la₁ v [PROOFSTEP] rcases ih₂ huv with ⟨lb₂, hab₂, h₂₃⟩ [GOAL] case intro.intro α : Type uu β : Type vv l₁ l₂ : List α γ : Type u_1 δ : Type u_2 r : α → β → Prop p : γ → δ → Prop l u la₁ la₂ la₃ : List α a✝¹ : la₁ ~ la₂ a✝ : la₂ ~ la₃ ih₁ : ∀ {v : List β}, Forall₂ r la₂ v → (Forall₂ r ∘r Perm) la₁ v ih₂ : ∀ {v : List β}, Forall₂ r la₃ v → (Forall₂ r ∘r Perm) la₂ v v : List β huv : Forall₂ r la₃ v lb₂ : List β hab₂ : Forall₂ r la₂ lb₂ h₂₃ : lb₂ ~ v ⊢ (Forall₂ r ∘r Perm) la₁ v [PROOFSTEP] rcases ih₁ hab₂ with ⟨lb₁, hab₁, h₁₂⟩ [GOAL] case intro.intro.intro.intro α : Type uu β : Type vv l₁ l₂ : List α γ : Type u_1 δ : Type u_2 r : α → β → Prop p : γ → δ → Prop l u la₁ la₂ la₃ : List α a✝¹ : la₁ ~ la₂ a✝ : la₂ ~ la₃ ih₁ : ∀ {v : List β}, Forall₂ r la₂ v → (Forall₂ r ∘r Perm) la₁ v ih₂ : ∀ {v : List β}, Forall₂ r la₃ v → (Forall₂ r ∘r Perm) la₂ v v : List β huv : Forall₂ r la₃ v lb₂ : List β hab₂ : Forall₂ r la₂ lb₂ h₂₃ : lb₂ ~ v lb₁ : List β hab₁ : Forall₂ r la₁ lb₁ h₁₂ : lb₁ ~ lb₂ ⊢ (Forall₂ r ∘r Perm) la₁ v [PROOFSTEP] exact ⟨lb₁, hab₁, Perm.trans h₁₂ h₂₃⟩ [GOAL] α : Type uu β : Type vv l₁ l₂ : List α γ : Type u_1 δ : Type u_2 r : α → β → Prop p : γ → δ → Prop ⊢ Forall₂ r ∘r Perm = Perm ∘r Forall₂ r [PROOFSTEP] funext l₁ l₃ [GOAL] case h.h α : Type uu β : Type vv l₁✝ l₂ : List α γ : Type u_1 δ : Type u_2 r : α → β → Prop p : γ → δ → Prop l₁ : List α l₃ : List β ⊢ (Forall₂ r ∘r Perm) l₁ l₃ = (Perm ∘r Forall₂ r) l₁ l₃ [PROOFSTEP] apply propext [GOAL] case h.h.a α : Type uu β : Type vv l₁✝ l₂ : List α γ : Type u_1 δ : Type u_2 r : α → β → Prop p : γ → δ → Prop l₁ : List α l₃ : List β ⊢ (Forall₂ r ∘r Perm) l₁ l₃ ↔ (Perm ∘r Forall₂ r) l₁ l₃ [PROOFSTEP] constructor [GOAL] case h.h.a.mp α : Type uu β : Type vv l₁✝ l₂ : List α γ : Type u_1 δ : Type u_2 r : α → β → Prop p : γ → δ → Prop l₁ : List α l₃ : List β ⊢ (Forall₂ r ∘r Perm) l₁ l₃ → (Perm ∘r Forall₂ r) l₁ l₃ [PROOFSTEP] intro h [GOAL] case h.h.a.mp α : Type uu β : Type vv l₁✝ l₂ : List α γ : Type u_1 δ : Type u_2 r : α → β → Prop p : γ → δ → Prop l₁ : List α l₃ : List β h : (Forall₂ r ∘r Perm) l₁ l₃ ⊢ (Perm ∘r Forall₂ r) l₁ l₃ [PROOFSTEP] rcases h with ⟨l₂, h₁₂, h₂₃⟩ [GOAL] case h.h.a.mp.intro.intro α : Type uu β : Type vv l₁✝ l₂✝ : List α γ : Type u_1 δ : Type u_2 r : α → β → Prop p : γ → δ → Prop l₁ : List α l₃ l₂ : List β h₁₂ : Forall₂ r l₁ l₂ h₂₃ : l₂ ~ l₃ ⊢ (Perm ∘r Forall₂ r) l₁ l₃ [PROOFSTEP] have : Forall₂ (flip r) l₂ l₁ := h₁₂.flip [GOAL] case h.h.a.mp.intro.intro α : Type uu β : Type vv l₁✝ l₂✝ : List α γ : Type u_1 δ : Type u_2 r : α → β → Prop p : γ → δ → Prop l₁ : List α l₃ l₂ : List β h₁₂ : Forall₂ r l₁ l₂ h₂₃ : l₂ ~ l₃ this : Forall₂ (flip r) l₂ l₁ ⊢ (Perm ∘r Forall₂ r) l₁ l₃ [PROOFSTEP] rcases perm_comp_forall₂ h₂₃.symm this with ⟨l', h₁, h₂⟩ [GOAL] case h.h.a.mp.intro.intro.intro.intro α : Type uu β : Type vv l₁✝ l₂✝ : List α γ : Type u_1 δ : Type u_2 r : α → β → Prop p : γ → δ → Prop l₁ : List α l₃ l₂ : List β h₁₂ : Forall₂ r l₁ l₂ h₂₃ : l₂ ~ l₃ this : Forall₂ (flip r) l₂ l₁ l' : List α h₁ : Forall₂ (flip r) l₃ l' h₂ : l' ~ l₁ ⊢ (Perm ∘r Forall₂ r) l₁ l₃ [PROOFSTEP] exact ⟨l', h₂.symm, h₁.flip⟩ [GOAL] case h.h.a.mpr α : Type uu β : Type vv l₁✝ l₂ : List α γ : Type u_1 δ : Type u_2 r : α → β → Prop p : γ → δ → Prop l₁ : List α l₃ : List β ⊢ (Perm ∘r Forall₂ r) l₁ l₃ → (Forall₂ r ∘r Perm) l₁ l₃ [PROOFSTEP] exact fun ⟨l₂, h₁₂, h₂₃⟩ => perm_comp_forall₂ h₁₂ h₂₃ [GOAL] α : Type uu β : Type vv l₁ l₂ : List α γ : Type u_1 δ : Type u_2 r : α → β → Prop p : γ → δ → Prop hr : RightUnique r a : List α b : List β h₁ : Forall₂ r a b c : List α d : List β h₂ : Forall₂ r c d h : a ~ c this : (flip (Forall₂ r) ∘r Perm ∘r Forall₂ r) b d ⊢ ((flip (Forall₂ r) ∘r Forall₂ r) ∘r Perm) b d [PROOFSTEP] rwa [← forall₂_comp_perm_eq_perm_comp_forall₂, ← Relation.comp_assoc] at this [GOAL] α : Type uu β : Type vv l₁ l₂ l : List α ⊢ [] <+ l [PROOFSTEP] simp [GOAL] α : Type uu β : Type vv l₁ l₂ : List α p : α → Bool l l' : List α h : l <+~ l' ⊢ List.filter p l <+~ List.filter p l' [PROOFSTEP] obtain ⟨xs, hp, h⟩ := h [GOAL] case intro.intro α : Type uu β : Type vv l₁ l₂ : List α p : α → Bool l l' xs : List α hp : xs ~ l h : xs <+ l' ⊢ List.filter p l <+~ List.filter p l' [PROOFSTEP] exact ⟨_, hp.filter p, h.filter p⟩ [GOAL] α : Type uu β : Type vv l₁✝ l₂✝ : List α p : α → Bool l₁ l₂ : List α s : l₁ ~ l₂ ⊢ countp p l₁ = countp p l₂ [PROOFSTEP] rw [countp_eq_length_filter, countp_eq_length_filter] [GOAL] α : Type uu β : Type vv l₁✝ l₂✝ : List α p : α → Bool l₁ l₂ : List α s : l₁ ~ l₂ ⊢ length (List.filter p l₁) = length (List.filter p l₂) [PROOFSTEP] exact (s.filter _).length_eq [GOAL] α : Type uu β : Type vv l₁ l₂ : List α s : l₁ ~ l₂ p p' : α → Bool hp : ∀ (x : α), x ∈ l₁ → p x = p' x ⊢ countp p l₁ = countp p' l₂ [PROOFSTEP] rw [← s.countp_eq p'] [GOAL] α : Type uu β : Type vv l₁ l₂ : List α s : l₁ ~ l₂ p p' : α → Bool hp : ∀ (x : α), x ∈ l₁ → p x = p' x ⊢ countp p l₁ = countp p' l₁ [PROOFSTEP] clear s [GOAL] α : Type uu β : Type vv l₁ l₂ : List α p p' : α → Bool hp : ∀ (x : α), x ∈ l₁ → p x = p' x ⊢ countp p l₁ = countp p' l₁ [PROOFSTEP] induction' l₁ with y s hs [GOAL] case nil α : Type uu β : Type vv l₁ l₂ : List α p p' : α → Bool hp✝ : ∀ (x : α), x ∈ l₁ → p x = p' x hp : ∀ (x : α), x ∈ [] → p x = p' x ⊢ countp p [] = countp p' [] [PROOFSTEP] rfl [GOAL] case cons α : Type uu β : Type vv l₁ l₂ : List α p p' : α → Bool hp✝ : ∀ (x : α), x ∈ l₁ → p x = p' x y : α s : List α hs : (∀ (x : α), x ∈ s → p x = p' x) → countp p s = countp p' s hp : ∀ (x : α), x ∈ y :: s → p x = p' x ⊢ countp p (y :: s) = countp p' (y :: s) [PROOFSTEP] simp only [mem_cons, forall_eq_or_imp] at hp [GOAL] case cons α : Type uu β : Type vv l₁ l₂ : List α p p' : α → Bool hp✝ : ∀ (x : α), x ∈ l₁ → p x = p' x y : α s : List α hs : (∀ (x : α), x ∈ s → p x = p' x) → countp p s = countp p' s hp : p y = p' y ∧ ∀ (a : α), a ∈ s → p a = p' a ⊢ countp p (y :: s) = countp p' (y :: s) [PROOFSTEP] simp only [countp_cons, hs hp.2, hp.1] [GOAL] α : Type uu β : Type vv l₁ l₂ l : List α p q : α → Bool ⊢ countp p l = countp p (filter q l) + countp p (filter (fun a => decide ¬q a = true) l) [PROOFSTEP] rw [← countp_append] [GOAL] α : Type uu β : Type vv l₁ l₂ l : List α p q : α → Bool ⊢ countp p l = countp p (filter q l ++ filter (fun a => decide ¬q a = true) l) [PROOFSTEP] exact Perm.countp_eq _ (filter_append_perm _ _).symm [GOAL] α : Type uu β : Type vv l₁✝ l₂✝ : List α f : β → α → β l₁ l₂ : List α p : l₁ ~ l₂ x y : α t₁ t₂ : List α _p : t₁ ~ t₂ r : (∀ (x : α), x ∈ t₁ → ∀ (y : α), y ∈ t₁ → ∀ (z : β), f (f z x) y = f (f z y) x) → ∀ (b : β), foldl f b t₁ = foldl f b t₂ H : ∀ (x_1 : α), x_1 ∈ y :: x :: t₁ → ∀ (y_1 : α), y_1 ∈ y :: x :: t₁ → ∀ (z : β), f (f z x_1) y_1 = f (f z y_1) x_1 b : β ⊢ foldl f b (y :: x :: t₁) = foldl f b (x :: y :: t₂) [PROOFSTEP] simp only [foldl] [GOAL] α : Type uu β : Type vv l₁✝ l₂✝ : List α f : β → α → β l₁ l₂ : List α p : l₁ ~ l₂ x y : α t₁ t₂ : List α _p : t₁ ~ t₂ r : (∀ (x : α), x ∈ t₁ → ∀ (y : α), y ∈ t₁ → ∀ (z : β), f (f z x) y = f (f z y) x) → ∀ (b : β), foldl f b t₁ = foldl f b t₂ H : ∀ (x_1 : α), x_1 ∈ y :: x :: t₁ → ∀ (y_1 : α), y_1 ∈ y :: x :: t₁ → ∀ (z : β), f (f z x_1) y_1 = f (f z y_1) x_1 b : β ⊢ foldl f (f (f b y) x) t₁ = foldl f (f (f b x) y) t₂ [PROOFSTEP] rw [H x (.tail _ <| .head _) y (.head _)] [GOAL] α : Type uu β : Type vv l₁✝ l₂✝ : List α f : β → α → β l₁ l₂ : List α p : l₁ ~ l₂ x y : α t₁ t₂ : List α _p : t₁ ~ t₂ r : (∀ (x : α), x ∈ t₁ → ∀ (y : α), y ∈ t₁ → ∀ (z : β), f (f z x) y = f (f z y) x) → ∀ (b : β), foldl f b t₁ = foldl f b t₂ H : ∀ (x_1 : α), x_1 ∈ y :: x :: t₁ → ∀ (y_1 : α), y_1 ∈ y :: x :: t₁ → ∀ (z : β), f (f z x_1) y_1 = f (f z y_1) x_1 b : β ⊢ foldl f (f (f b y) x) t₁ = foldl f (f (f b y) x) t₂ [PROOFSTEP] exact r (fun x hx y hy => H _ (.tail _ <| .tail _ hx) _ (.tail _ <| .tail _ hy)) _ [GOAL] α : Type uu β : Type vv l₁✝ l₂✝ : List α f : α → β → β l₁ l₂ : List α lcomm : LeftCommutative f p : l₁ ~ l₂ x : α t₁ t₂ : List α _p : t₁ ~ t₂ r : ∀ (b : β), foldr f b t₁ = foldr f b t₂ b : β ⊢ foldr f b (x :: t₁) = foldr f b (x :: t₂) [PROOFSTEP] simp [GOAL] α : Type uu β : Type vv l₁✝ l₂✝ : List α f : α → β → β l₁ l₂ : List α lcomm : LeftCommutative f p : l₁ ~ l₂ x : α t₁ t₂ : List α _p : t₁ ~ t₂ r : ∀ (b : β), foldr f b t₁ = foldr f b t₂ b : β ⊢ f x (foldr f b t₁) = f x (foldr f b t₂) [PROOFSTEP] rw [r b] [GOAL] α : Type uu β : Type vv l₁✝ l₂✝ : List α f : α → β → β l₁ l₂ : List α lcomm : LeftCommutative f p : l₁ ~ l₂ x y : α t₁ t₂ : List α _p : t₁ ~ t₂ r : ∀ (b : β), foldr f b t₁ = foldr f b t₂ b : β ⊢ foldr f b (y :: x :: t₁) = foldr f b (x :: y :: t₂) [PROOFSTEP] simp [GOAL] α : Type uu β : Type vv l₁✝ l₂✝ : List α f : α → β → β l₁ l₂ : List α lcomm : LeftCommutative f p : l₁ ~ l₂ x y : α t₁ t₂ : List α _p : t₁ ~ t₂ r : ∀ (b : β), foldr f b t₁ = foldr f b t₂ b : β ⊢ f y (f x (foldr f b t₁)) = f x (f y (foldr f b t₂)) [PROOFSTEP] rw [lcomm, r b] [GOAL] α : Type uu β✝ : Type vv l₁ l₂ : List α β : List α → Sort u_1 f : (a : α) → (l : List α) → β l → β (a :: l) b : β [] l l' : List α hl : l ~ l' f_congr : ∀ {a : α} {l l' : List α} {b : β l} {b' : β l'}, l ~ l' → HEq b b' → HEq (f a l b) (f a l' b') f_swap : ∀ {a a' : α} {l : List α} {b : β l}, HEq (f a (a' :: l) (f a' l b)) (f a' (a :: l) (f a l b)) ⊢ HEq (List.rec b f l) (List.rec b f l') [PROOFSTEP] induction hl [GOAL] case nil α : Type uu β✝ : Type vv l₁ l₂ : List α β : List α → Sort u_1 f : (a : α) → (l : List α) → β l → β (a :: l) b : β [] l l' : List α f_congr : ∀ {a : α} {l l' : List α} {b : β l} {b' : β l'}, l ~ l' → HEq b b' → HEq (f a l b) (f a l' b') f_swap : ∀ {a a' : α} {l : List α} {b : β l}, HEq (f a (a' :: l) (f a' l b)) (f a' (a :: l) (f a l b)) ⊢ HEq (List.rec b f []) (List.rec b f []) case cons α : Type uu β✝ : Type vv l₁ l₂ : List α β : List α → Sort u_1 f : (a : α) → (l : List α) → β l → β (a :: l) b : β [] l l' : List α f_congr : ∀ {a : α} {l l' : List α} {b : β l} {b' : β l'}, l ~ l' → HEq b b' → HEq (f a l b) (f a l' b') f_swap : ∀ {a a' : α} {l : List α} {b : β l}, HEq (f a (a' :: l) (f a' l b)) (f a' (a :: l) (f a l b)) x✝ : α l₁✝ l₂✝ : List α a✝ : l₁✝ ~ l₂✝ a_ih✝ : HEq (List.rec b f l₁✝) (List.rec b f l₂✝) ⊢ HEq (List.rec b f (x✝ :: l₁✝)) (List.rec b f (x✝ :: l₂✝)) case swap α : Type uu β✝ : Type vv l₁ l₂ : List α β : List α → Sort u_1 f : (a : α) → (l : List α) → β l → β (a :: l) b : β [] l l' : List α f_congr : ∀ {a : α} {l l' : List α} {b : β l} {b' : β l'}, l ~ l' → HEq b b' → HEq (f a l b) (f a l' b') f_swap : ∀ {a a' : α} {l : List α} {b : β l}, HEq (f a (a' :: l) (f a' l b)) (f a' (a :: l) (f a l b)) x✝ y✝ : α l✝ : List α ⊢ HEq (List.rec b f (y✝ :: x✝ :: l✝)) (List.rec b f (x✝ :: y✝ :: l✝)) case trans α : Type uu β✝ : Type vv l₁ l₂ : List α β : List α → Sort u_1 f : (a : α) → (l : List α) → β l → β (a :: l) b : β [] l l' : List α f_congr : ∀ {a : α} {l l' : List α} {b : β l} {b' : β l'}, l ~ l' → HEq b b' → HEq (f a l b) (f a l' b') f_swap : ∀ {a a' : α} {l : List α} {b : β l}, HEq (f a (a' :: l) (f a' l b)) (f a' (a :: l) (f a l b)) l₁✝ l₂✝ l₃✝ : List α a✝¹ : l₁✝ ~ l₂✝ a✝ : l₂✝ ~ l₃✝ a_ih✝¹ : HEq (List.rec b f l₁✝) (List.rec b f l₂✝) a_ih✝ : HEq (List.rec b f l₂✝) (List.rec b f l₃✝) ⊢ HEq (List.rec b f l₁✝) (List.rec b f l₃✝) [PROOFSTEP] case nil => rfl [GOAL] α : Type uu β✝ : Type vv l₁ l₂ : List α β : List α → Sort u_1 f : (a : α) → (l : List α) → β l → β (a :: l) b : β [] l l' : List α f_congr : ∀ {a : α} {l l' : List α} {b : β l} {b' : β l'}, l ~ l' → HEq b b' → HEq (f a l b) (f a l' b') f_swap : ∀ {a a' : α} {l : List α} {b : β l}, HEq (f a (a' :: l) (f a' l b)) (f a' (a :: l) (f a l b)) ⊢ HEq (List.rec b f []) (List.rec b f []) [PROOFSTEP] case nil => rfl [GOAL] α : Type uu β✝ : Type vv l₁ l₂ : List α β : List α → Sort u_1 f : (a : α) → (l : List α) → β l → β (a :: l) b : β [] l l' : List α f_congr : ∀ {a : α} {l l' : List α} {b : β l} {b' : β l'}, l ~ l' → HEq b b' → HEq (f a l b) (f a l' b') f_swap : ∀ {a a' : α} {l : List α} {b : β l}, HEq (f a (a' :: l) (f a' l b)) (f a' (a :: l) (f a l b)) ⊢ HEq (List.rec b f []) (List.rec b f []) [PROOFSTEP] rfl [GOAL] case cons α : Type uu β✝ : Type vv l₁ l₂ : List α β : List α → Sort u_1 f : (a : α) → (l : List α) → β l → β (a :: l) b : β [] l l' : List α f_congr : ∀ {a : α} {l l' : List α} {b : β l} {b' : β l'}, l ~ l' → HEq b b' → HEq (f a l b) (f a l' b') f_swap : ∀ {a a' : α} {l : List α} {b : β l}, HEq (f a (a' :: l) (f a' l b)) (f a' (a :: l) (f a l b)) x✝ : α l₁✝ l₂✝ : List α a✝ : l₁✝ ~ l₂✝ a_ih✝ : HEq (List.rec b f l₁✝) (List.rec b f l₂✝) ⊢ HEq (List.rec b f (x✝ :: l₁✝)) (List.rec b f (x✝ :: l₂✝)) case swap α : Type uu β✝ : Type vv l₁ l₂ : List α β : List α → Sort u_1 f : (a : α) → (l : List α) → β l → β (a :: l) b : β [] l l' : List α f_congr : ∀ {a : α} {l l' : List α} {b : β l} {b' : β l'}, l ~ l' → HEq b b' → HEq (f a l b) (f a l' b') f_swap : ∀ {a a' : α} {l : List α} {b : β l}, HEq (f a (a' :: l) (f a' l b)) (f a' (a :: l) (f a l b)) x✝ y✝ : α l✝ : List α ⊢ HEq (List.rec b f (y✝ :: x✝ :: l✝)) (List.rec b f (x✝ :: y✝ :: l✝)) case trans α : Type uu β✝ : Type vv l₁ l₂ : List α β : List α → Sort u_1 f : (a : α) → (l : List α) → β l → β (a :: l) b : β [] l l' : List α f_congr : ∀ {a : α} {l l' : List α} {b : β l} {b' : β l'}, l ~ l' → HEq b b' → HEq (f a l b) (f a l' b') f_swap : ∀ {a a' : α} {l : List α} {b : β l}, HEq (f a (a' :: l) (f a' l b)) (f a' (a :: l) (f a l b)) l₁✝ l₂✝ l₃✝ : List α a✝¹ : l₁✝ ~ l₂✝ a✝ : l₂✝ ~ l₃✝ a_ih✝¹ : HEq (List.rec b f l₁✝) (List.rec b f l₂✝) a_ih✝ : HEq (List.rec b f l₂✝) (List.rec b f l₃✝) ⊢ HEq (List.rec b f l₁✝) (List.rec b f l₃✝) [PROOFSTEP] case cons a l l' h ih => exact f_congr h ih [GOAL] α : Type uu β✝ : Type vv l₁ l₂ : List α β : List α → Sort u_1 f : (a : α) → (l : List α) → β l → β (a :: l) b : β [] l✝ l'✝ : List α f_congr : ∀ {a : α} {l l' : List α} {b : β l} {b' : β l'}, l ~ l' → HEq b b' → HEq (f a l b) (f a l' b') f_swap : ∀ {a a' : α} {l : List α} {b : β l}, HEq (f a (a' :: l) (f a' l b)) (f a' (a :: l) (f a l b)) a : α l l' : List α h : l ~ l' ih : HEq (List.rec b f l) (List.rec b f l') ⊢ HEq (List.rec b f (a :: l)) (List.rec b f (a :: l')) [PROOFSTEP] case cons a l l' h ih => exact f_congr h ih [GOAL] α : Type uu β✝ : Type vv l₁ l₂ : List α β : List α → Sort u_1 f : (a : α) → (l : List α) → β l → β (a :: l) b : β [] l✝ l'✝ : List α f_congr : ∀ {a : α} {l l' : List α} {b : β l} {b' : β l'}, l ~ l' → HEq b b' → HEq (f a l b) (f a l' b') f_swap : ∀ {a a' : α} {l : List α} {b : β l}, HEq (f a (a' :: l) (f a' l b)) (f a' (a :: l) (f a l b)) a : α l l' : List α h : l ~ l' ih : HEq (List.rec b f l) (List.rec b f l') ⊢ HEq (List.rec b f (a :: l)) (List.rec b f (a :: l')) [PROOFSTEP] exact f_congr h ih [GOAL] case swap α : Type uu β✝ : Type vv l₁ l₂ : List α β : List α → Sort u_1 f : (a : α) → (l : List α) → β l → β (a :: l) b : β [] l l' : List α f_congr : ∀ {a : α} {l l' : List α} {b : β l} {b' : β l'}, l ~ l' → HEq b b' → HEq (f a l b) (f a l' b') f_swap : ∀ {a a' : α} {l : List α} {b : β l}, HEq (f a (a' :: l) (f a' l b)) (f a' (a :: l) (f a l b)) x✝ y✝ : α l✝ : List α ⊢ HEq (List.rec b f (y✝ :: x✝ :: l✝)) (List.rec b f (x✝ :: y✝ :: l✝)) case trans α : Type uu β✝ : Type vv l₁ l₂ : List α β : List α → Sort u_1 f : (a : α) → (l : List α) → β l → β (a :: l) b : β [] l l' : List α f_congr : ∀ {a : α} {l l' : List α} {b : β l} {b' : β l'}, l ~ l' → HEq b b' → HEq (f a l b) (f a l' b') f_swap : ∀ {a a' : α} {l : List α} {b : β l}, HEq (f a (a' :: l) (f a' l b)) (f a' (a :: l) (f a l b)) l₁✝ l₂✝ l₃✝ : List α a✝¹ : l₁✝ ~ l₂✝ a✝ : l₂✝ ~ l₃✝ a_ih✝¹ : HEq (List.rec b f l₁✝) (List.rec b f l₂✝) a_ih✝ : HEq (List.rec b f l₂✝) (List.rec b f l₃✝) ⊢ HEq (List.rec b f l₁✝) (List.rec b f l₃✝) [PROOFSTEP] case swap a a' l => exact f_swap [GOAL] α : Type uu β✝ : Type vv l₁ l₂ : List α β : List α → Sort u_1 f : (a : α) → (l : List α) → β l → β (a :: l) b : β [] l✝ l' : List α f_congr : ∀ {a : α} {l l' : List α} {b : β l} {b' : β l'}, l ~ l' → HEq b b' → HEq (f a l b) (f a l' b') f_swap : ∀ {a a' : α} {l : List α} {b : β l}, HEq (f a (a' :: l) (f a' l b)) (f a' (a :: l) (f a l b)) a a' : α l : List α ⊢ HEq (List.rec b f (a' :: a :: l)) (List.rec b f (a :: a' :: l)) [PROOFSTEP] case swap a a' l => exact f_swap [GOAL] α : Type uu β✝ : Type vv l₁ l₂ : List α β : List α → Sort u_1 f : (a : α) → (l : List α) → β l → β (a :: l) b : β [] l✝ l' : List α f_congr : ∀ {a : α} {l l' : List α} {b : β l} {b' : β l'}, l ~ l' → HEq b b' → HEq (f a l b) (f a l' b') f_swap : ∀ {a a' : α} {l : List α} {b : β l}, HEq (f a (a' :: l) (f a' l b)) (f a' (a :: l) (f a l b)) a a' : α l : List α ⊢ HEq (List.rec b f (a' :: a :: l)) (List.rec b f (a :: a' :: l)) [PROOFSTEP] exact f_swap [GOAL] case trans α : Type uu β✝ : Type vv l₁ l₂ : List α β : List α → Sort u_1 f : (a : α) → (l : List α) → β l → β (a :: l) b : β [] l l' : List α f_congr : ∀ {a : α} {l l' : List α} {b : β l} {b' : β l'}, l ~ l' → HEq b b' → HEq (f a l b) (f a l' b') f_swap : ∀ {a a' : α} {l : List α} {b : β l}, HEq (f a (a' :: l) (f a' l b)) (f a' (a :: l) (f a l b)) l₁✝ l₂✝ l₃✝ : List α a✝¹ : l₁✝ ~ l₂✝ a✝ : l₂✝ ~ l₃✝ a_ih✝¹ : HEq (List.rec b f l₁✝) (List.rec b f l₂✝) a_ih✝ : HEq (List.rec b f l₂✝) (List.rec b f l₃✝) ⊢ HEq (List.rec b f l₁✝) (List.rec b f l₃✝) [PROOFSTEP] case trans l₁ l₂ l₃ _h₁ _h₂ ih₁ ih₂ => exact HEq.trans ih₁ ih₂ [GOAL] α : Type uu β✝ : Type vv l₁✝ l₂✝ : List α β : List α → Sort u_1 f : (a : α) → (l : List α) → β l → β (a :: l) b : β [] l l' : List α f_congr : ∀ {a : α} {l l' : List α} {b : β l} {b' : β l'}, l ~ l' → HEq b b' → HEq (f a l b) (f a l' b') f_swap : ∀ {a a' : α} {l : List α} {b : β l}, HEq (f a (a' :: l) (f a' l b)) (f a' (a :: l) (f a l b)) l₁ l₂ l₃ : List α _h₁ : l₁ ~ l₂ _h₂ : l₂ ~ l₃ ih₁ : HEq (List.rec b f l₁) (List.rec b f l₂) ih₂ : HEq (List.rec b f l₂) (List.rec b f l₃) ⊢ HEq (List.rec b f l₁) (List.rec b f l₃) [PROOFSTEP] case trans l₁ l₂ l₃ _h₁ _h₂ ih₁ ih₂ => exact HEq.trans ih₁ ih₂ [GOAL] α : Type uu β✝ : Type vv l₁✝ l₂✝ : List α β : List α → Sort u_1 f : (a : α) → (l : List α) → β l → β (a :: l) b : β [] l l' : List α f_congr : ∀ {a : α} {l l' : List α} {b : β l} {b' : β l'}, l ~ l' → HEq b b' → HEq (f a l b) (f a l' b') f_swap : ∀ {a a' : α} {l : List α} {b : β l}, HEq (f a (a' :: l) (f a' l b)) (f a' (a :: l) (f a l b)) l₁ l₂ l₃ : List α _h₁ : l₁ ~ l₂ _h₂ : l₂ ~ l₃ ih₁ : HEq (List.rec b f l₁) (List.rec b f l₂) ih₂ : HEq (List.rec b f l₂) (List.rec b f l₃) ⊢ HEq (List.rec b f l₁) (List.rec b f l₃) [PROOFSTEP] exact HEq.trans ih₁ ih₂ [GOAL] α : Type uu β : Type vv l₁✝ l₂✝ : List α M : Monoid α l₁ l₂ : List α h : l₁ ~ l₂ hc : Pairwise Commute l₁ ⊢ prod l₁ = prod l₂ [PROOFSTEP] refine h.foldl_eq' ?_ _ [GOAL] α : Type uu β : Type vv l₁✝ l₂✝ : List α M : Monoid α l₁ l₂ : List α h : l₁ ~ l₂ hc : Pairwise Commute l₁ ⊢ ∀ (x : α), x ∈ l₁ → ∀ (y : α), y ∈ l₁ → ∀ (z : α), z * x * y = z * y * x [PROOFSTEP] apply Pairwise.forall_of_forall [GOAL] case H α : Type uu β : Type vv l₁✝ l₂✝ : List α M : Monoid α l₁ l₂ : List α h : l₁ ~ l₂ hc : Pairwise Commute l₁ ⊢ Symmetric fun x y => ∀ (z : α), z * x * y = z * y * x [PROOFSTEP] intro x y h z [GOAL] case H α : Type uu β : Type vv l₁✝ l₂✝ : List α M : Monoid α l₁ l₂ : List α h✝ : l₁ ~ l₂ hc : Pairwise Commute l₁ x y : α h : ∀ (z : α), z * x * y = z * y * x z : α ⊢ z * y * x = z * x * y [PROOFSTEP] exact (h z).symm [GOAL] case H₁ α : Type uu β : Type vv l₁✝ l₂✝ : List α M : Monoid α l₁ l₂ : List α h : l₁ ~ l₂ hc : Pairwise Commute l₁ ⊢ ∀ (x : α), x ∈ l₁ → ∀ (z : α), z * x * x = z * x * x [PROOFSTEP] intros [GOAL] case H₁ α : Type uu β : Type vv l₁✝ l₂✝ : List α M : Monoid α l₁ l₂ : List α h : l₁ ~ l₂ hc : Pairwise Commute l₁ x✝ : α a✝ : x✝ ∈ l₁ z✝ : α ⊢ z✝ * x✝ * x✝ = z✝ * x✝ * x✝ [PROOFSTEP] rfl [GOAL] case H₂ α : Type uu β : Type vv l₁✝ l₂✝ : List α M : Monoid α l₁ l₂ : List α h : l₁ ~ l₂ hc : Pairwise Commute l₁ ⊢ Pairwise (fun x y => ∀ (z : α), z * x * y = z * y * x) l₁ [PROOFSTEP] apply hc.imp [GOAL] case H₂ α : Type uu β : Type vv l₁✝ l₂✝ : List α M : Monoid α l₁ l₂ : List α h : l₁ ~ l₂ hc : Pairwise Commute l₁ ⊢ ∀ {a b : α}, Commute a b → ∀ (z : α), z * a * b = z * b * a [PROOFSTEP] intro a b h z [GOAL] case H₂ α : Type uu β : Type vv l₁✝ l₂✝ : List α M : Monoid α l₁ l₂ : List α h✝ : l₁ ~ l₂ hc : Pairwise Commute l₁ a b : α h : Commute a b z : α ⊢ z * a * b = z * b * a [PROOFSTEP] rw [mul_assoc z, mul_assoc z, h] [GOAL] α : Type uu β : Type vv l₁✝ l₂✝ : List α a : α l₁ l₂ r₁ r₂ : List α ⊢ l₁ ++ a :: r₁ ~ l₂ ++ a :: r₂ → l₁ ++ r₁ ~ l₂ ++ r₂ [PROOFSTEP] generalize e₁ : l₁ ++ a :: r₁ = s₁ [GOAL] α : Type uu β : Type vv l₁✝ l₂✝ : List α a : α l₁ l₂ r₁ r₂ s₁ : List α e₁ : l₁ ++ a :: r₁ = s₁ ⊢ s₁ ~ l₂ ++ a :: r₂ → l₁ ++ r₁ ~ l₂ ++ r₂ [PROOFSTEP] generalize e₂ : l₂ ++ a :: r₂ = s₂ [GOAL] α : Type uu β : Type vv l₁✝ l₂✝ : List α a : α l₁ l₂ r₁ r₂ s₁ : List α e₁ : l₁ ++ a :: r₁ = s₁ s₂ : List α e₂ : l₂ ++ a :: r₂ = s₂ ⊢ s₁ ~ s₂ → l₁ ++ r₁ ~ l₂ ++ r₂ [PROOFSTEP] intro p [GOAL] α : Type uu β : Type vv l₁✝ l₂✝ : List α a : α l₁ l₂ r₁ r₂ s₁ : List α e₁ : l₁ ++ a :: r₁ = s₁ s₂ : List α e₂ : l₂ ++ a :: r₂ = s₂ p : s₁ ~ s₂ ⊢ l₁ ++ r₁ ~ l₂ ++ r₂ [PROOFSTEP] revert l₁ l₂ r₁ r₂ e₁ e₂ [GOAL] α : Type uu β : Type vv l₁ l₂ : List α a : α s₁ s₂ : List α p : s₁ ~ s₂ ⊢ ∀ {l₁ l₂ r₁ r₂ : List α}, l₁ ++ a :: r₁ = s₁ → l₂ ++ a :: r₂ = s₂ → l₁ ++ r₁ ~ l₂ ++ r₂ [PROOFSTEP] clear l₁ l₂ β [GOAL] α : Type uu a : α s₁ s₂ : List α p : s₁ ~ s₂ ⊢ ∀ {l₁ l₂ r₁ r₂ : List α}, l₁ ++ a :: r₁ = s₁ → l₂ ++ a :: r₂ = s₂ → l₁ ++ r₁ ~ l₂ ++ r₂ [PROOFSTEP] show ∀ _ _ _ _, _ [GOAL] α : Type uu a : α s₁ s₂ : List α p : s₁ ~ s₂ ⊢ ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = s₁ → x_1 ++ a :: x_3 = s₂ → x ++ x_2 ~ x_1 ++ x_3 [PROOFSTEP] refine perm_induction_on p ?_ (fun x t₁ t₂ p IH => ?_) (fun x y t₁ t₂ p IH => ?_) fun t₁ t₂ t₃ p₁ p₂ IH₁ IH₂ => ?_ [GOAL] case refine_1 α : Type uu a : α s₁ s₂ : List α p : s₁ ~ s₂ ⊢ ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = [] → x_1 ++ a :: x_3 = [] → x ++ x_2 ~ x_1 ++ x_3 [PROOFSTEP] intro l₁ l₂ r₁ r₂ e₁ e₂ [GOAL] case refine_2 α : Type uu a : α s₁ s₂ : List α p✝ : s₁ ~ s₂ x : α t₁ t₂ : List α p : t₁ ~ t₂ IH : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = t₁ → x_1 ++ a :: x_3 = t₂ → x ++ x_2 ~ x_1 ++ x_3 ⊢ ∀ (x_1 x_2 x_3 x_4 : List α), x_1 ++ a :: x_3 = x :: t₁ → x_2 ++ a :: x_4 = x :: t₂ → x_1 ++ x_3 ~ x_2 ++ x_4 [PROOFSTEP] intro l₁ l₂ r₁ r₂ e₁ e₂ [GOAL] case refine_3 α : Type uu a : α s₁ s₂ : List α p✝ : s₁ ~ s₂ x y : α t₁ t₂ : List α p : t₁ ~ t₂ IH : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = t₁ → x_1 ++ a :: x_3 = t₂ → x ++ x_2 ~ x_1 ++ x_3 ⊢ ∀ (x_1 x_2 x_3 x_4 : List α), x_1 ++ a :: x_3 = y :: x :: t₁ → x_2 ++ a :: x_4 = x :: y :: t₂ → x_1 ++ x_3 ~ x_2 ++ x_4 [PROOFSTEP] intro l₁ l₂ r₁ r₂ e₁ e₂ [GOAL] case refine_4 α : Type uu a : α s₁ s₂ : List α p : s₁ ~ s₂ t₁ t₂ t₃ : List α p₁ : t₁ ~ t₂ p₂ : t₂ ~ t₃ IH₁ : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = t₁ → x_1 ++ a :: x_3 = t₂ → x ++ x_2 ~ x_1 ++ x_3 IH₂ : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = t₂ → x_1 ++ a :: x_3 = t₃ → x ++ x_2 ~ x_1 ++ x_3 ⊢ ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = t₁ → x_1 ++ a :: x_3 = t₃ → x ++ x_2 ~ x_1 ++ x_3 [PROOFSTEP] intro l₁ l₂ r₁ r₂ e₁ e₂ [GOAL] case refine_1 α : Type uu a : α s₁ s₂ : List α p : s₁ ~ s₂ l₁ l₂ r₁ r₂ : List α e₁ : l₁ ++ a :: r₁ = [] e₂ : l₂ ++ a :: r₂ = [] ⊢ l₁ ++ r₁ ~ l₂ ++ r₂ [PROOFSTEP] apply (not_mem_nil a).elim [GOAL] case refine_1 α : Type uu a : α s₁ s₂ : List α p : s₁ ~ s₂ l₁ l₂ r₁ r₂ : List α e₁ : l₁ ++ a :: r₁ = [] e₂ : l₂ ++ a :: r₂ = [] ⊢ a ∈ [] [PROOFSTEP] rw [← e₁] [GOAL] case refine_1 α : Type uu a : α s₁ s₂ : List α p : s₁ ~ s₂ l₁ l₂ r₁ r₂ : List α e₁ : l₁ ++ a :: r₁ = [] e₂ : l₂ ++ a :: r₂ = [] ⊢ a ∈ l₁ ++ a :: r₁ [PROOFSTEP] simp [GOAL] case refine_2 α : Type uu a : α s₁ s₂ : List α p✝ : s₁ ~ s₂ x : α t₁ t₂ : List α p : t₁ ~ t₂ IH : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = t₁ → x_1 ++ a :: x_3 = t₂ → x ++ x_2 ~ x_1 ++ x_3 l₁ l₂ r₁ r₂ : List α e₁ : l₁ ++ a :: r₁ = x :: t₁ e₂ : l₂ ++ a :: r₂ = x :: t₂ ⊢ l₁ ++ r₁ ~ l₂ ++ r₂ [PROOFSTEP] cases' l₁ with y l₁ [GOAL] case refine_2.nil α : Type uu a : α s₁ s₂ : List α p✝ : s₁ ~ s₂ x : α t₁ t₂ : List α p : t₁ ~ t₂ IH : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = t₁ → x_1 ++ a :: x_3 = t₂ → x ++ x_2 ~ x_1 ++ x_3 l₂ r₁ r₂ : List α e₂ : l₂ ++ a :: r₂ = x :: t₂ e₁ : [] ++ a :: r₁ = x :: t₁ ⊢ [] ++ r₁ ~ l₂ ++ r₂ [PROOFSTEP] cases' l₂ with z l₂ [GOAL] case refine_2.cons α : Type uu a : α s₁ s₂ : List α p✝ : s₁ ~ s₂ x : α t₁ t₂ : List α p : t₁ ~ t₂ IH : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = t₁ → x_1 ++ a :: x_3 = t₂ → x ++ x_2 ~ x_1 ++ x_3 l₂ r₁ r₂ : List α e₂ : l₂ ++ a :: r₂ = x :: t₂ y : α l₁ : List α e₁ : y :: l₁ ++ a :: r₁ = x :: t₁ ⊢ y :: l₁ ++ r₁ ~ l₂ ++ r₂ [PROOFSTEP] cases' l₂ with z l₂ [GOAL] case refine_2.nil.nil α : Type uu a : α s₁ s₂ : List α p✝ : s₁ ~ s₂ x : α t₁ t₂ : List α p : t₁ ~ t₂ IH : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = t₁ → x_1 ++ a :: x_3 = t₂ → x ++ x_2 ~ x_1 ++ x_3 r₁ r₂ : List α e₁ : [] ++ a :: r₁ = x :: t₁ e₂ : [] ++ a :: r₂ = x :: t₂ ⊢ [] ++ r₁ ~ [] ++ r₂ [PROOFSTEP] dsimp at e₁ e₂ [GOAL] case refine_2.nil.cons α : Type uu a : α s₁ s₂ : List α p✝ : s₁ ~ s₂ x : α t₁ t₂ : List α p : t₁ ~ t₂ IH : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = t₁ → x_1 ++ a :: x_3 = t₂ → x ++ x_2 ~ x_1 ++ x_3 r₁ r₂ : List α e₁ : [] ++ a :: r₁ = x :: t₁ z : α l₂ : List α e₂ : z :: l₂ ++ a :: r₂ = x :: t₂ ⊢ [] ++ r₁ ~ z :: l₂ ++ r₂ [PROOFSTEP] dsimp at e₁ e₂ [GOAL] case refine_2.cons.nil α : Type uu a : α s₁ s₂ : List α p✝ : s₁ ~ s₂ x : α t₁ t₂ : List α p : t₁ ~ t₂ IH : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = t₁ → x_1 ++ a :: x_3 = t₂ → x ++ x_2 ~ x_1 ++ x_3 r₁ r₂ : List α y : α l₁ : List α e₁ : y :: l₁ ++ a :: r₁ = x :: t₁ e₂ : [] ++ a :: r₂ = x :: t₂ ⊢ y :: l₁ ++ r₁ ~ [] ++ r₂ [PROOFSTEP] dsimp at e₁ e₂ [GOAL] case refine_2.cons.cons α : Type uu a : α s₁ s₂ : List α p✝ : s₁ ~ s₂ x : α t₁ t₂ : List α p : t₁ ~ t₂ IH : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = t₁ → x_1 ++ a :: x_3 = t₂ → x ++ x_2 ~ x_1 ++ x_3 r₁ r₂ : List α y : α l₁ : List α e₁ : y :: l₁ ++ a :: r₁ = x :: t₁ z : α l₂ : List α e₂ : z :: l₂ ++ a :: r₂ = x :: t₂ ⊢ y :: l₁ ++ r₁ ~ z :: l₂ ++ r₂ [PROOFSTEP] dsimp at e₁ e₂ [GOAL] case refine_2.nil.nil α : Type uu a : α s₁ s₂ : List α p✝ : s₁ ~ s₂ x : α t₁ t₂ : List α p : t₁ ~ t₂ IH : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = t₁ → x_1 ++ a :: x_3 = t₂ → x ++ x_2 ~ x_1 ++ x_3 r₁ r₂ : List α e₁ : a :: r₁ = x :: t₁ e₂ : a :: r₂ = x :: t₂ ⊢ [] ++ r₁ ~ [] ++ r₂ [PROOFSTEP] injections [GOAL] case refine_2.nil.cons α : Type uu a : α s₁ s₂ : List α p✝ : s₁ ~ s₂ x : α t₁ t₂ : List α p : t₁ ~ t₂ IH : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = t₁ → x_1 ++ a :: x_3 = t₂ → x ++ x_2 ~ x_1 ++ x_3 r₁ r₂ : List α e₁ : a :: r₁ = x :: t₁ z : α l₂ : List α e₂ : z :: (l₂ ++ a :: r₂) = x :: t₂ ⊢ [] ++ r₁ ~ z :: l₂ ++ r₂ [PROOFSTEP] injections [GOAL] case refine_2.cons.nil α : Type uu a : α s₁ s₂ : List α p✝ : s₁ ~ s₂ x : α t₁ t₂ : List α p : t₁ ~ t₂ IH : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = t₁ → x_1 ++ a :: x_3 = t₂ → x ++ x_2 ~ x_1 ++ x_3 r₁ r₂ : List α y : α l₁ : List α e₁ : y :: (l₁ ++ a :: r₁) = x :: t₁ e₂ : a :: r₂ = x :: t₂ ⊢ y :: l₁ ++ r₁ ~ [] ++ r₂ [PROOFSTEP] injections [GOAL] case refine_2.cons.cons α : Type uu a : α s₁ s₂ : List α p✝ : s₁ ~ s₂ x : α t₁ t₂ : List α p : t₁ ~ t₂ IH : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = t₁ → x_1 ++ a :: x_3 = t₂ → x ++ x_2 ~ x_1 ++ x_3 r₁ r₂ : List α y : α l₁ : List α e₁ : y :: (l₁ ++ a :: r₁) = x :: t₁ z : α l₂ : List α e₂ : z :: (l₂ ++ a :: r₂) = x :: t₂ ⊢ y :: l₁ ++ r₁ ~ z :: l₂ ++ r₂ [PROOFSTEP] injections [GOAL] case refine_2.nil.nil α : Type uu a : α s₁ s₂ : List α p✝ : s₁ ~ s₂ x : α t₁ t₂ : List α p : t₁ ~ t₂ IH : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = t₁ → x_1 ++ a :: x_3 = t₂ → x ++ x_2 ~ x_1 ++ x_3 r₁ r₂ : List α head_eq✝¹ : a = x tail_eq✝¹ : r₁ = t₁ head_eq✝ : a = x tail_eq✝ : r₂ = t₂ ⊢ [] ++ r₁ ~ [] ++ r₂ [PROOFSTEP] subst x [GOAL] case refine_2.nil.cons α : Type uu a : α s₁ s₂ : List α p✝ : s₁ ~ s₂ x : α t₁ t₂ : List α p : t₁ ~ t₂ IH : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = t₁ → x_1 ++ a :: x_3 = t₂ → x ++ x_2 ~ x_1 ++ x_3 r₁ r₂ : List α z : α l₂ : List α head_eq✝¹ : a = x tail_eq✝¹ : r₁ = t₁ head_eq✝ : z = x tail_eq✝ : l₂ ++ a :: r₂ = t₂ ⊢ [] ++ r₁ ~ z :: l₂ ++ r₂ [PROOFSTEP] subst x [GOAL] case refine_2.cons.nil α : Type uu a : α s₁ s₂ : List α p✝ : s₁ ~ s₂ x : α t₁ t₂ : List α p : t₁ ~ t₂ IH : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = t₁ → x_1 ++ a :: x_3 = t₂ → x ++ x_2 ~ x_1 ++ x_3 r₁ r₂ : List α y : α l₁ : List α head_eq✝¹ : y = x tail_eq✝¹ : l₁ ++ a :: r₁ = t₁ head_eq✝ : a = x tail_eq✝ : r₂ = t₂ ⊢ y :: l₁ ++ r₁ ~ [] ++ r₂ [PROOFSTEP] subst x [GOAL] case refine_2.cons.cons α : Type uu a : α s₁ s₂ : List α p✝ : s₁ ~ s₂ x : α t₁ t₂ : List α p : t₁ ~ t₂ IH : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = t₁ → x_1 ++ a :: x_3 = t₂ → x ++ x_2 ~ x_1 ++ x_3 r₁ r₂ : List α y : α l₁ : List α z : α l₂ : List α head_eq✝¹ : y = x tail_eq✝¹ : l₁ ++ a :: r₁ = t₁ head_eq✝ : z = x tail_eq✝ : l₂ ++ a :: r₂ = t₂ ⊢ y :: l₁ ++ r₁ ~ z :: l₂ ++ r₂ [PROOFSTEP] subst x [GOAL] case refine_2.nil.nil α : Type uu a : α s₁ s₂ : List α p✝ : s₁ ~ s₂ t₁ t₂ : List α p : t₁ ~ t₂ IH : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = t₁ → x_1 ++ a :: x_3 = t₂ → x ++ x_2 ~ x_1 ++ x_3 r₁ r₂ : List α tail_eq✝¹ : r₁ = t₁ tail_eq✝ : r₂ = t₂ head_eq✝ : a = a ⊢ [] ++ r₁ ~ [] ++ r₂ [PROOFSTEP] substs t₁ t₂ [GOAL] case refine_2.nil.nil α : Type uu a : α s₁ s₂ : List α p✝ : s₁ ~ s₂ r₁ r₂ : List α head_eq✝ : a = a p : r₁ ~ r₂ IH : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = r₁ → x_1 ++ a :: x_3 = r₂ → x ++ x_2 ~ x_1 ++ x_3 ⊢ [] ++ r₁ ~ [] ++ r₂ [PROOFSTEP] exact p [GOAL] case refine_2.nil.cons α : Type uu a : α s₁ s₂ : List α p✝ : s₁ ~ s₂ t₁ t₂ : List α p : t₁ ~ t₂ IH : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = t₁ → x_1 ++ a :: x_3 = t₂ → x ++ x_2 ~ x_1 ++ x_3 r₁ r₂ : List α z : α l₂ : List α tail_eq✝¹ : r₁ = t₁ tail_eq✝ : l₂ ++ a :: r₂ = t₂ head_eq✝ : z = a ⊢ [] ++ r₁ ~ z :: l₂ ++ r₂ [PROOFSTEP] substs z t₁ t₂ [GOAL] case refine_2.nil.cons α : Type uu a : α s₁ s₂ : List α p✝ : s₁ ~ s₂ r₁ r₂ l₂ : List α p : r₁ ~ l₂ ++ a :: r₂ IH : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = r₁ → x_1 ++ a :: x_3 = l₂ ++ a :: r₂ → x ++ x_2 ~ x_1 ++ x_3 ⊢ [] ++ r₁ ~ a :: l₂ ++ r₂ [PROOFSTEP] exact p.trans perm_middle [GOAL] case refine_2.cons.nil α : Type uu a : α s₁ s₂ : List α p✝ : s₁ ~ s₂ t₁ t₂ : List α p : t₁ ~ t₂ IH : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = t₁ → x_1 ++ a :: x_3 = t₂ → x ++ x_2 ~ x_1 ++ x_3 r₁ r₂ : List α y : α l₁ : List α tail_eq✝¹ : l₁ ++ a :: r₁ = t₁ tail_eq✝ : r₂ = t₂ head_eq✝ : a = y ⊢ y :: l₁ ++ r₁ ~ [] ++ r₂ [PROOFSTEP] substs y t₁ t₂ [GOAL] case refine_2.cons.nil α : Type uu a : α s₁ s₂ : List α p✝ : s₁ ~ s₂ r₁ r₂ l₁ : List α p : l₁ ++ a :: r₁ ~ r₂ IH : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = l₁ ++ a :: r₁ → x_1 ++ a :: x_3 = r₂ → x ++ x_2 ~ x_1 ++ x_3 ⊢ a :: l₁ ++ r₁ ~ [] ++ r₂ [PROOFSTEP] exact perm_middle.symm.trans p [GOAL] case refine_2.cons.cons α : Type uu a : α s₁ s₂ : List α p✝ : s₁ ~ s₂ t₁ t₂ : List α p : t₁ ~ t₂ IH : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = t₁ → x_1 ++ a :: x_3 = t₂ → x ++ x_2 ~ x_1 ++ x_3 r₁ r₂ : List α y : α l₁ : List α z : α l₂ : List α tail_eq✝¹ : l₁ ++ a :: r₁ = t₁ tail_eq✝ : l₂ ++ a :: r₂ = t₂ head_eq✝ : z = y ⊢ y :: l₁ ++ r₁ ~ z :: l₂ ++ r₂ [PROOFSTEP] substs z t₁ t₂ [GOAL] case refine_2.cons.cons α : Type uu a : α s₁ s₂ : List α p✝ : s₁ ~ s₂ r₁ r₂ : List α y : α l₁ l₂ : List α p : l₁ ++ a :: r₁ ~ l₂ ++ a :: r₂ IH : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = l₁ ++ a :: r₁ → x_1 ++ a :: x_3 = l₂ ++ a :: r₂ → x ++ x_2 ~ x_1 ++ x_3 ⊢ y :: l₁ ++ r₁ ~ y :: l₂ ++ r₂ [PROOFSTEP] exact (IH _ _ _ _ rfl rfl).cons y [GOAL] case refine_3 α : Type uu a : α s₁ s₂ : List α p✝ : s₁ ~ s₂ x y : α t₁ t₂ : List α p : t₁ ~ t₂ IH : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = t₁ → x_1 ++ a :: x_3 = t₂ → x ++ x_2 ~ x_1 ++ x_3 l₁ l₂ r₁ r₂ : List α e₁ : l₁ ++ a :: r₁ = y :: x :: t₁ e₂ : l₂ ++ a :: r₂ = x :: y :: t₂ ⊢ l₁ ++ r₁ ~ l₂ ++ r₂ [PROOFSTEP] rcases l₁ with (_ | ⟨y, _ | ⟨z, l₁⟩⟩) [GOAL] case refine_3.nil α : Type uu a : α s₁ s₂ : List α p✝ : s₁ ~ s₂ x y : α t₁ t₂ : List α p : t₁ ~ t₂ IH : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = t₁ → x_1 ++ a :: x_3 = t₂ → x ++ x_2 ~ x_1 ++ x_3 l₂ r₁ r₂ : List α e₂ : l₂ ++ a :: r₂ = x :: y :: t₂ e₁ : [] ++ a :: r₁ = y :: x :: t₁ ⊢ [] ++ r₁ ~ l₂ ++ r₂ [PROOFSTEP] rcases l₂ with (_ | ⟨u, _ | ⟨v, l₂⟩⟩) [GOAL] case refine_3.cons.nil α : Type uu a : α s₁ s₂ : List α p✝ : s₁ ~ s₂ x y✝ : α t₁ t₂ : List α p : t₁ ~ t₂ IH : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = t₁ → x_1 ++ a :: x_3 = t₂ → x ++ x_2 ~ x_1 ++ x_3 l₂ r₁ r₂ : List α e₂ : l₂ ++ a :: r₂ = x :: y✝ :: t₂ y : α e₁ : [y] ++ a :: r₁ = y✝ :: x :: t₁ ⊢ [y] ++ r₁ ~ l₂ ++ r₂ [PROOFSTEP] rcases l₂ with (_ | ⟨u, _ | ⟨v, l₂⟩⟩) [GOAL] case refine_3.cons.cons α : Type uu a : α s₁ s₂ : List α p✝ : s₁ ~ s₂ x y✝ : α t₁ t₂ : List α p : t₁ ~ t₂ IH : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = t₁ → x_1 ++ a :: x_3 = t₂ → x ++ x_2 ~ x_1 ++ x_3 l₂ r₁ r₂ : List α e₂ : l₂ ++ a :: r₂ = x :: y✝ :: t₂ y z : α l₁ : List α e₁ : y :: z :: l₁ ++ a :: r₁ = y✝ :: x :: t₁ ⊢ y :: z :: l₁ ++ r₁ ~ l₂ ++ r₂ [PROOFSTEP] rcases l₂ with (_ | ⟨u, _ | ⟨v, l₂⟩⟩) [GOAL] case refine_3.nil.nil α : Type uu a : α s₁ s₂ : List α p✝ : s₁ ~ s₂ x y : α t₁ t₂ : List α p : t₁ ~ t₂ IH : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = t₁ → x_1 ++ a :: x_3 = t₂ → x ++ x_2 ~ x_1 ++ x_3 r₁ r₂ : List α e₁ : [] ++ a :: r₁ = y :: x :: t₁ e₂ : [] ++ a :: r₂ = x :: y :: t₂ ⊢ [] ++ r₁ ~ [] ++ r₂ [PROOFSTEP] dsimp at e₁ e₂ [GOAL] case refine_3.nil.cons.nil α : Type uu a : α s₁ s₂ : List α p✝ : s₁ ~ s₂ x y : α t₁ t₂ : List α p : t₁ ~ t₂ IH : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = t₁ → x_1 ++ a :: x_3 = t₂ → x ++ x_2 ~ x_1 ++ x_3 r₁ r₂ : List α e₁ : [] ++ a :: r₁ = y :: x :: t₁ u : α e₂ : [u] ++ a :: r₂ = x :: y :: t₂ ⊢ [] ++ r₁ ~ [u] ++ r₂ [PROOFSTEP] dsimp at e₁ e₂ [GOAL] case refine_3.nil.cons.cons α : Type uu a : α s₁ s₂ : List α p✝ : s₁ ~ s₂ x y : α t₁ t₂ : List α p : t₁ ~ t₂ IH : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = t₁ → x_1 ++ a :: x_3 = t₂ → x ++ x_2 ~ x_1 ++ x_3 r₁ r₂ : List α e₁ : [] ++ a :: r₁ = y :: x :: t₁ u v : α l₂ : List α e₂ : u :: v :: l₂ ++ a :: r₂ = x :: y :: t₂ ⊢ [] ++ r₁ ~ u :: v :: l₂ ++ r₂ [PROOFSTEP] dsimp at e₁ e₂ [GOAL] case refine_3.cons.nil.nil α : Type uu a : α s₁ s₂ : List α p✝ : s₁ ~ s₂ x y✝ : α t₁ t₂ : List α p : t₁ ~ t₂ IH : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = t₁ → x_1 ++ a :: x_3 = t₂ → x ++ x_2 ~ x_1 ++ x_3 r₁ r₂ : List α y : α e₁ : [y] ++ a :: r₁ = y✝ :: x :: t₁ e₂ : [] ++ a :: r₂ = x :: y✝ :: t₂ ⊢ [y] ++ r₁ ~ [] ++ r₂ [PROOFSTEP] dsimp at e₁ e₂ [GOAL] case refine_3.cons.nil.cons.nil α : Type uu a : α s₁ s₂ : List α p✝ : s₁ ~ s₂ x y✝ : α t₁ t₂ : List α p : t₁ ~ t₂ IH : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = t₁ → x_1 ++ a :: x_3 = t₂ → x ++ x_2 ~ x_1 ++ x_3 r₁ r₂ : List α y : α e₁ : [y] ++ a :: r₁ = y✝ :: x :: t₁ u : α e₂ : [u] ++ a :: r₂ = x :: y✝ :: t₂ ⊢ [y] ++ r₁ ~ [u] ++ r₂ [PROOFSTEP] dsimp at e₁ e₂ [GOAL] case refine_3.cons.nil.cons.cons α : Type uu a : α s₁ s₂ : List α p✝ : s₁ ~ s₂ x y✝ : α t₁ t₂ : List α p : t₁ ~ t₂ IH : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = t₁ → x_1 ++ a :: x_3 = t₂ → x ++ x_2 ~ x_1 ++ x_3 r₁ r₂ : List α y : α e₁ : [y] ++ a :: r₁ = y✝ :: x :: t₁ u v : α l₂ : List α e₂ : u :: v :: l₂ ++ a :: r₂ = x :: y✝ :: t₂ ⊢ [y] ++ r₁ ~ u :: v :: l₂ ++ r₂ [PROOFSTEP] dsimp at e₁ e₂ [GOAL] case refine_3.cons.cons.nil α : Type uu a : α s₁ s₂ : List α p✝ : s₁ ~ s₂ x y✝ : α t₁ t₂ : List α p : t₁ ~ t₂ IH : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = t₁ → x_1 ++ a :: x_3 = t₂ → x ++ x_2 ~ x_1 ++ x_3 r₁ r₂ : List α y z : α l₁ : List α e₁ : y :: z :: l₁ ++ a :: r₁ = y✝ :: x :: t₁ e₂ : [] ++ a :: r₂ = x :: y✝ :: t₂ ⊢ y :: z :: l₁ ++ r₁ ~ [] ++ r₂ [PROOFSTEP] dsimp at e₁ e₂ [GOAL] case refine_3.cons.cons.cons.nil α : Type uu a : α s₁ s₂ : List α p✝ : s₁ ~ s₂ x y✝ : α t₁ t₂ : List α p : t₁ ~ t₂ IH : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = t₁ → x_1 ++ a :: x_3 = t₂ → x ++ x_2 ~ x_1 ++ x_3 r₁ r₂ : List α y z : α l₁ : List α e₁ : y :: z :: l₁ ++ a :: r₁ = y✝ :: x :: t₁ u : α e₂ : [u] ++ a :: r₂ = x :: y✝ :: t₂ ⊢ y :: z :: l₁ ++ r₁ ~ [u] ++ r₂ [PROOFSTEP] dsimp at e₁ e₂ [GOAL] case refine_3.cons.cons.cons.cons α : Type uu a : α s₁ s₂ : List α p✝ : s₁ ~ s₂ x y✝ : α t₁ t₂ : List α p : t₁ ~ t₂ IH : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = t₁ → x_1 ++ a :: x_3 = t₂ → x ++ x_2 ~ x_1 ++ x_3 r₁ r₂ : List α y z : α l₁ : List α e₁ : y :: z :: l₁ ++ a :: r₁ = y✝ :: x :: t₁ u v : α l₂ : List α e₂ : u :: v :: l₂ ++ a :: r₂ = x :: y✝ :: t₂ ⊢ y :: z :: l₁ ++ r₁ ~ u :: v :: l₂ ++ r₂ [PROOFSTEP] dsimp at e₁ e₂ [GOAL] case refine_3.nil.nil α : Type uu a : α s₁ s₂ : List α p✝ : s₁ ~ s₂ x y : α t₁ t₂ : List α p : t₁ ~ t₂ IH : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = t₁ → x_1 ++ a :: x_3 = t₂ → x ++ x_2 ~ x_1 ++ x_3 r₁ r₂ : List α e₁ : a :: r₁ = y :: x :: t₁ e₂ : a :: r₂ = x :: y :: t₂ ⊢ [] ++ r₁ ~ [] ++ r₂ [PROOFSTEP] injections [GOAL] case refine_3.nil.cons.nil α : Type uu a : α s₁ s₂ : List α p✝ : s₁ ~ s₂ x y : α t₁ t₂ : List α p : t₁ ~ t₂ IH : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = t₁ → x_1 ++ a :: x_3 = t₂ → x ++ x_2 ~ x_1 ++ x_3 r₁ r₂ : List α e₁ : a :: r₁ = y :: x :: t₁ u : α e₂ : u :: a :: r₂ = x :: y :: t₂ ⊢ [] ++ r₁ ~ [u] ++ r₂ [PROOFSTEP] injections [GOAL] case refine_3.nil.cons.cons α : Type uu a : α s₁ s₂ : List α p✝ : s₁ ~ s₂ x y : α t₁ t₂ : List α p : t₁ ~ t₂ IH : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = t₁ → x_1 ++ a :: x_3 = t₂ → x ++ x_2 ~ x_1 ++ x_3 r₁ r₂ : List α e₁ : a :: r₁ = y :: x :: t₁ u v : α l₂ : List α e₂ : u :: v :: (l₂ ++ a :: r₂) = x :: y :: t₂ ⊢ [] ++ r₁ ~ u :: v :: l₂ ++ r₂ [PROOFSTEP] injections [GOAL] case refine_3.cons.nil.nil α : Type uu a : α s₁ s₂ : List α p✝ : s₁ ~ s₂ x y✝ : α t₁ t₂ : List α p : t₁ ~ t₂ IH : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = t₁ → x_1 ++ a :: x_3 = t₂ → x ++ x_2 ~ x_1 ++ x_3 r₁ r₂ : List α y : α e₁ : y :: a :: r₁ = y✝ :: x :: t₁ e₂ : a :: r₂ = x :: y✝ :: t₂ ⊢ [y] ++ r₁ ~ [] ++ r₂ [PROOFSTEP] injections [GOAL] case refine_3.cons.nil.cons.nil α : Type uu a : α s₁ s₂ : List α p✝ : s₁ ~ s₂ x y✝ : α t₁ t₂ : List α p : t₁ ~ t₂ IH : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = t₁ → x_1 ++ a :: x_3 = t₂ → x ++ x_2 ~ x_1 ++ x_3 r₁ r₂ : List α y : α e₁ : y :: a :: r₁ = y✝ :: x :: t₁ u : α e₂ : u :: a :: r₂ = x :: y✝ :: t₂ ⊢ [y] ++ r₁ ~ [u] ++ r₂ [PROOFSTEP] injections [GOAL] case refine_3.cons.nil.cons.cons α : Type uu a : α s₁ s₂ : List α p✝ : s₁ ~ s₂ x y✝ : α t₁ t₂ : List α p : t₁ ~ t₂ IH : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = t₁ → x_1 ++ a :: x_3 = t₂ → x ++ x_2 ~ x_1 ++ x_3 r₁ r₂ : List α y : α e₁ : y :: a :: r₁ = y✝ :: x :: t₁ u v : α l₂ : List α e₂ : u :: v :: (l₂ ++ a :: r₂) = x :: y✝ :: t₂ ⊢ [y] ++ r₁ ~ u :: v :: l₂ ++ r₂ [PROOFSTEP] injections [GOAL] case refine_3.cons.cons.nil α : Type uu a : α s₁ s₂ : List α p✝ : s₁ ~ s₂ x y✝ : α t₁ t₂ : List α p : t₁ ~ t₂ IH : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = t₁ → x_1 ++ a :: x_3 = t₂ → x ++ x_2 ~ x_1 ++ x_3 r₁ r₂ : List α y z : α l₁ : List α e₁ : y :: z :: (l₁ ++ a :: r₁) = y✝ :: x :: t₁ e₂ : a :: r₂ = x :: y✝ :: t₂ ⊢ y :: z :: l₁ ++ r₁ ~ [] ++ r₂ [PROOFSTEP] injections [GOAL] case refine_3.cons.cons.cons.nil α : Type uu a : α s₁ s₂ : List α p✝ : s₁ ~ s₂ x y✝ : α t₁ t₂ : List α p : t₁ ~ t₂ IH : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = t₁ → x_1 ++ a :: x_3 = t₂ → x ++ x_2 ~ x_1 ++ x_3 r₁ r₂ : List α y z : α l₁ : List α e₁ : y :: z :: (l₁ ++ a :: r₁) = y✝ :: x :: t₁ u : α e₂ : u :: a :: r₂ = x :: y✝ :: t₂ ⊢ y :: z :: l₁ ++ r₁ ~ [u] ++ r₂ [PROOFSTEP] injections [GOAL] case refine_3.cons.cons.cons.cons α : Type uu a : α s₁ s₂ : List α p✝ : s₁ ~ s₂ x y✝ : α t₁ t₂ : List α p : t₁ ~ t₂ IH : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = t₁ → x_1 ++ a :: x_3 = t₂ → x ++ x_2 ~ x_1 ++ x_3 r₁ r₂ : List α y z : α l₁ : List α e₁ : y :: z :: (l₁ ++ a :: r₁) = y✝ :: x :: t₁ u v : α l₂ : List α e₂ : u :: v :: (l₂ ++ a :: r₂) = x :: y✝ :: t₂ ⊢ y :: z :: l₁ ++ r₁ ~ u :: v :: l₂ ++ r₂ [PROOFSTEP] injections [GOAL] case refine_3.nil.nil α : Type uu a : α s₁ s₂ : List α p✝ : s₁ ~ s₂ x y : α t₁ t₂ : List α p : t₁ ~ t₂ IH : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = t₁ → x_1 ++ a :: x_3 = t₂ → x ++ x_2 ~ x_1 ++ x_3 r₁ r₂ : List α head_eq✝¹ : a = y tail_eq✝¹ : r₁ = x :: t₁ head_eq✝ : a = x tail_eq✝ : r₂ = y :: t₂ ⊢ [] ++ r₁ ~ [] ++ r₂ [PROOFSTEP] substs x y [GOAL] case refine_3.nil.cons.nil α : Type uu a : α s₁ s₂ : List α p✝ : s₁ ~ s₂ x y : α t₁ t₂ : List α p : t₁ ~ t₂ IH : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = t₁ → x_1 ++ a :: x_3 = t₂ → x ++ x_2 ~ x_1 ++ x_3 r₁ r₂ : List α u : α head_eq✝² : a = y tail_eq✝¹ : r₁ = x :: t₁ head_eq✝¹ : u = x head_eq✝ : a = y tail_eq✝ : r₂ = t₂ ⊢ [] ++ r₁ ~ [u] ++ r₂ [PROOFSTEP] substs x y [GOAL] case refine_3.nil.cons.cons α : Type uu a : α s₁ s₂ : List α p✝ : s₁ ~ s₂ x y : α t₁ t₂ : List α p : t₁ ~ t₂ IH : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = t₁ → x_1 ++ a :: x_3 = t₂ → x ++ x_2 ~ x_1 ++ x_3 r₁ r₂ : List α u v : α l₂ : List α head_eq✝² : a = y tail_eq✝¹ : r₁ = x :: t₁ head_eq✝¹ : u = x head_eq✝ : v = y tail_eq✝ : l₂ ++ a :: r₂ = t₂ ⊢ [] ++ r₁ ~ u :: v :: l₂ ++ r₂ [PROOFSTEP] substs x y [GOAL] case refine_3.cons.nil.nil α : Type uu a : α s₁ s₂ : List α p✝ : s₁ ~ s₂ x y✝ : α t₁ t₂ : List α p : t₁ ~ t₂ IH : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = t₁ → x_1 ++ a :: x_3 = t₂ → x ++ x_2 ~ x_1 ++ x_3 r₁ r₂ : List α y : α head_eq✝² : y = y✝ head_eq✝¹ : a = x tail_eq✝¹ : r₁ = t₁ head_eq✝ : a = x tail_eq✝ : r₂ = y✝ :: t₂ ⊢ [y] ++ r₁ ~ [] ++ r₂ [PROOFSTEP] substs x y [GOAL] case refine_3.cons.nil.cons.nil α : Type uu a : α s₁ s₂ : List α p✝ : s₁ ~ s₂ x y✝ : α t₁ t₂ : List α p : t₁ ~ t₂ IH : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = t₁ → x_1 ++ a :: x_3 = t₂ → x ++ x_2 ~ x_1 ++ x_3 r₁ r₂ : List α y u : α head_eq✝³ : y = y✝ head_eq✝² : a = x tail_eq✝¹ : r₁ = t₁ head_eq✝¹ : u = x head_eq✝ : a = y✝ tail_eq✝ : r₂ = t₂ ⊢ [y] ++ r₁ ~ [u] ++ r₂ [PROOFSTEP] substs x y [GOAL] case refine_3.cons.nil.cons.cons α : Type uu a : α s₁ s₂ : List α p✝ : s₁ ~ s₂ x y✝ : α t₁ t₂ : List α p : t₁ ~ t₂ IH : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = t₁ → x_1 ++ a :: x_3 = t₂ → x ++ x_2 ~ x_1 ++ x_3 r₁ r₂ : List α y u v : α l₂ : List α head_eq✝³ : y = y✝ head_eq✝² : a = x tail_eq✝¹ : r₁ = t₁ head_eq✝¹ : u = x head_eq✝ : v = y✝ tail_eq✝ : l₂ ++ a :: r₂ = t₂ ⊢ [y] ++ r₁ ~ u :: v :: l₂ ++ r₂ [PROOFSTEP] substs x y [GOAL] case refine_3.cons.cons.nil α : Type uu a : α s₁ s₂ : List α p✝ : s₁ ~ s₂ x y✝ : α t₁ t₂ : List α p : t₁ ~ t₂ IH : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = t₁ → x_1 ++ a :: x_3 = t₂ → x ++ x_2 ~ x_1 ++ x_3 r₁ r₂ : List α y z : α l₁ : List α head_eq✝² : y = y✝ head_eq✝¹ : z = x tail_eq✝¹ : l₁ ++ a :: r₁ = t₁ head_eq✝ : a = x tail_eq✝ : r₂ = y✝ :: t₂ ⊢ y :: z :: l₁ ++ r₁ ~ [] ++ r₂ [PROOFSTEP] substs x y [GOAL] case refine_3.cons.cons.cons.nil α : Type uu a : α s₁ s₂ : List α p✝ : s₁ ~ s₂ x y✝ : α t₁ t₂ : List α p : t₁ ~ t₂ IH : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = t₁ → x_1 ++ a :: x_3 = t₂ → x ++ x_2 ~ x_1 ++ x_3 r₁ r₂ : List α y z : α l₁ : List α u : α head_eq✝³ : y = y✝ head_eq✝² : z = x tail_eq✝¹ : l₁ ++ a :: r₁ = t₁ head_eq✝¹ : u = x head_eq✝ : a = y✝ tail_eq✝ : r₂ = t₂ ⊢ y :: z :: l₁ ++ r₁ ~ [u] ++ r₂ [PROOFSTEP] substs x y [GOAL] case refine_3.cons.cons.cons.cons α : Type uu a : α s₁ s₂ : List α p✝ : s₁ ~ s₂ x y✝ : α t₁ t₂ : List α p : t₁ ~ t₂ IH : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = t₁ → x_1 ++ a :: x_3 = t₂ → x ++ x_2 ~ x_1 ++ x_3 r₁ r₂ : List α y z : α l₁ : List α u v : α l₂ : List α head_eq✝³ : y = y✝ head_eq✝² : z = x tail_eq✝¹ : l₁ ++ a :: r₁ = t₁ head_eq✝¹ : u = x head_eq✝ : v = y✝ tail_eq✝ : l₂ ++ a :: r₂ = t₂ ⊢ y :: z :: l₁ ++ r₁ ~ u :: v :: l₂ ++ r₂ [PROOFSTEP] substs x y [GOAL] case refine_3.nil.nil α : Type uu a : α s₁ s₂ : List α p✝ : s₁ ~ s₂ t₁ t₂ : List α p : t₁ ~ t₂ IH : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = t₁ → x_1 ++ a :: x_3 = t₂ → x ++ x_2 ~ x_1 ++ x_3 r₁ r₂ : List α tail_eq✝¹ : r₁ = a :: t₁ tail_eq✝ : r₂ = a :: t₂ ⊢ [] ++ r₁ ~ [] ++ r₂ [PROOFSTEP] substs r₁ r₂ [GOAL] case refine_3.nil.nil α : Type uu a : α s₁ s₂ : List α p✝ : s₁ ~ s₂ t₁ t₂ : List α p : t₁ ~ t₂ IH : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = t₁ → x_1 ++ a :: x_3 = t₂ → x ++ x_2 ~ x_1 ++ x_3 ⊢ [] ++ a :: t₁ ~ [] ++ a :: t₂ [PROOFSTEP] exact p.cons a [GOAL] case refine_3.nil.cons.nil α : Type uu a : α s₁ s₂ : List α p✝ : s₁ ~ s₂ t₁ t₂ : List α p : t₁ ~ t₂ IH : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = t₁ → x_1 ++ a :: x_3 = t₂ → x ++ x_2 ~ x_1 ++ x_3 r₁ r₂ : List α u : α tail_eq✝¹ : r₂ = t₂ tail_eq✝ : r₁ = u :: t₁ head_eq✝ : a = a ⊢ [] ++ r₁ ~ [u] ++ r₂ [PROOFSTEP] substs r₁ r₂ [GOAL] case refine_3.nil.cons.nil α : Type uu a : α s₁ s₂ : List α p✝ : s₁ ~ s₂ t₁ t₂ : List α p : t₁ ~ t₂ IH : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = t₁ → x_1 ++ a :: x_3 = t₂ → x ++ x_2 ~ x_1 ++ x_3 u : α head_eq✝ : a = a ⊢ [] ++ u :: t₁ ~ [u] ++ t₂ [PROOFSTEP] exact p.cons u [GOAL] case refine_3.nil.cons.cons α : Type uu a : α s₁ s₂ : List α p✝ : s₁ ~ s₂ t₁ t₂ : List α p : t₁ ~ t₂ IH : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = t₁ → x_1 ++ a :: x_3 = t₂ → x ++ x_2 ~ x_1 ++ x_3 r₁ r₂ : List α u v : α l₂ : List α tail_eq✝¹ : l₂ ++ a :: r₂ = t₂ tail_eq✝ : r₁ = u :: t₁ head_eq✝ : v = a ⊢ [] ++ r₁ ~ u :: v :: l₂ ++ r₂ [PROOFSTEP] substs r₁ v t₂ [GOAL] case refine_3.nil.cons.cons α : Type uu a : α s₁ s₂ : List α p✝ : s₁ ~ s₂ t₁ r₂ : List α u : α l₂ : List α p : t₁ ~ l₂ ++ a :: r₂ IH : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = t₁ → x_1 ++ a :: x_3 = l₂ ++ a :: r₂ → x ++ x_2 ~ x_1 ++ x_3 ⊢ [] ++ u :: t₁ ~ u :: a :: l₂ ++ r₂ [PROOFSTEP] exact (p.trans perm_middle).cons u [GOAL] case refine_3.cons.nil.nil α : Type uu a : α s₁ s₂ : List α p✝ : s₁ ~ s₂ y : α t₁ t₂ : List α p : t₁ ~ t₂ IH : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = t₁ → x_1 ++ a :: x_3 = t₂ → x ++ x_2 ~ x_1 ++ x_3 r₁ r₂ : List α tail_eq✝¹ : r₁ = t₁ tail_eq✝ : r₂ = y :: t₂ head_eq✝ : a = a ⊢ [y] ++ r₁ ~ [] ++ r₂ [PROOFSTEP] substs r₁ r₂ [GOAL] case refine_3.cons.nil.nil α : Type uu a : α s₁ s₂ : List α p✝ : s₁ ~ s₂ y : α t₁ t₂ : List α p : t₁ ~ t₂ IH : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = t₁ → x_1 ++ a :: x_3 = t₂ → x ++ x_2 ~ x_1 ++ x_3 head_eq✝ : a = a ⊢ [y] ++ t₁ ~ [] ++ y :: t₂ [PROOFSTEP] exact p.cons y [GOAL] case refine_3.cons.nil.cons.nil α : Type uu a : α s₁ s₂ : List α p✝ : s₁ ~ s₂ y : α t₁ t₂ : List α p : t₁ ~ t₂ IH : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = t₁ → x_1 ++ a :: x_3 = t₂ → x ++ x_2 ~ x_1 ++ x_3 r₁ r₂ : List α u : α tail_eq✝¹ : r₁ = t₁ head_eq✝¹ : a = y tail_eq✝ : r₂ = t₂ head_eq✝ : u = a ⊢ [y] ++ r₁ ~ [u] ++ r₂ [PROOFSTEP] substs r₁ r₂ y u [GOAL] case refine_3.cons.nil.cons.nil α : Type uu a : α s₁ s₂ : List α p✝ : s₁ ~ s₂ t₁ t₂ : List α p : t₁ ~ t₂ IH : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = t₁ → x_1 ++ a :: x_3 = t₂ → x ++ x_2 ~ x_1 ++ x_3 ⊢ [a] ++ t₁ ~ [a] ++ t₂ [PROOFSTEP] exact p.cons a [GOAL] case refine_3.cons.nil.cons.cons α : Type uu a : α s₁ s₂ : List α p✝ : s₁ ~ s₂ y : α t₁ t₂ : List α p : t₁ ~ t₂ IH : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = t₁ → x_1 ++ a :: x_3 = t₂ → x ++ x_2 ~ x_1 ++ x_3 r₁ r₂ : List α u v : α l₂ : List α tail_eq✝¹ : r₁ = t₁ head_eq✝¹ : v = y tail_eq✝ : l₂ ++ a :: r₂ = t₂ head_eq✝ : u = a ⊢ [y] ++ r₁ ~ u :: v :: l₂ ++ r₂ [PROOFSTEP] substs r₁ u v t₂ [GOAL] case refine_3.cons.nil.cons.cons α : Type uu a : α s₁ s₂ : List α p✝ : s₁ ~ s₂ y : α t₁ r₂ l₂ : List α p : t₁ ~ l₂ ++ a :: r₂ IH : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = t₁ → x_1 ++ a :: x_3 = l₂ ++ a :: r₂ → x ++ x_2 ~ x_1 ++ x_3 ⊢ [y] ++ t₁ ~ a :: y :: l₂ ++ r₂ [PROOFSTEP] exact ((p.trans perm_middle).cons y).trans (swap _ _ _) [GOAL] case refine_3.cons.cons.nil α : Type uu a : α s₁ s₂ : List α p✝ : s₁ ~ s₂ y : α t₁ t₂ : List α p : t₁ ~ t₂ IH : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = t₁ → x_1 ++ a :: x_3 = t₂ → x ++ x_2 ~ x_1 ++ x_3 r₁ r₂ : List α z : α l₁ : List α tail_eq✝¹ : l₁ ++ a :: r₁ = t₁ tail_eq✝ : r₂ = y :: t₂ head_eq✝ : a = z ⊢ y :: z :: l₁ ++ r₁ ~ [] ++ r₂ [PROOFSTEP] substs r₂ z t₁ [GOAL] case refine_3.cons.cons.nil α : Type uu a : α s₁ s₂ : List α p✝ : s₁ ~ s₂ y : α t₂ r₁ l₁ : List α p : l₁ ++ a :: r₁ ~ t₂ IH : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = l₁ ++ a :: r₁ → x_1 ++ a :: x_3 = t₂ → x ++ x_2 ~ x_1 ++ x_3 ⊢ y :: a :: l₁ ++ r₁ ~ [] ++ y :: t₂ [PROOFSTEP] exact (perm_middle.symm.trans p).cons y [GOAL] case refine_3.cons.cons.cons.nil α : Type uu a : α s₁ s₂ : List α p✝ : s₁ ~ s₂ y : α t₁ t₂ : List α p : t₁ ~ t₂ IH : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = t₁ → x_1 ++ a :: x_3 = t₂ → x ++ x_2 ~ x_1 ++ x_3 r₁ r₂ : List α z : α l₁ : List α u : α tail_eq✝¹ : l₁ ++ a :: r₁ = t₁ head_eq✝¹ : a = y tail_eq✝ : r₂ = t₂ head_eq✝ : u = z ⊢ y :: z :: l₁ ++ r₁ ~ [u] ++ r₂ [PROOFSTEP] substs r₂ y z t₁ [GOAL] case refine_3.cons.cons.cons.nil α : Type uu a : α s₁ s₂ : List α p✝ : s₁ ~ s₂ t₂ r₁ l₁ : List α u : α p : l₁ ++ a :: r₁ ~ t₂ IH : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = l₁ ++ a :: r₁ → x_1 ++ a :: x_3 = t₂ → x ++ x_2 ~ x_1 ++ x_3 ⊢ a :: u :: l₁ ++ r₁ ~ [u] ++ t₂ [PROOFSTEP] exact (swap _ _ _).trans ((perm_middle.symm.trans p).cons u) [GOAL] case refine_3.cons.cons.cons.cons α : Type uu a : α s₁ s₂ : List α p✝ : s₁ ~ s₂ y : α t₁ t₂ : List α p : t₁ ~ t₂ IH : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = t₁ → x_1 ++ a :: x_3 = t₂ → x ++ x_2 ~ x_1 ++ x_3 r₁ r₂ : List α z : α l₁ : List α u v : α l₂ : List α tail_eq✝¹ : l₁ ++ a :: r₁ = t₁ head_eq✝¹ : v = y tail_eq✝ : l₂ ++ a :: r₂ = t₂ head_eq✝ : u = z ⊢ y :: z :: l₁ ++ r₁ ~ u :: v :: l₂ ++ r₂ [PROOFSTEP] substs u v t₁ t₂ [GOAL] case refine_3.cons.cons.cons.cons α : Type uu a : α s₁ s₂ : List α p✝ : s₁ ~ s₂ y : α r₁ r₂ : List α z : α l₁ l₂ : List α p : l₁ ++ a :: r₁ ~ l₂ ++ a :: r₂ IH : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = l₁ ++ a :: r₁ → x_1 ++ a :: x_3 = l₂ ++ a :: r₂ → x ++ x_2 ~ x_1 ++ x_3 ⊢ y :: z :: l₁ ++ r₁ ~ z :: y :: l₂ ++ r₂ [PROOFSTEP] exact (IH _ _ _ _ rfl rfl).swap' _ _ [GOAL] case refine_4 α : Type uu a : α s₁ s₂ : List α p : s₁ ~ s₂ t₁ t₂ t₃ : List α p₁ : t₁ ~ t₂ p₂ : t₂ ~ t₃ IH₁ : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = t₁ → x_1 ++ a :: x_3 = t₂ → x ++ x_2 ~ x_1 ++ x_3 IH₂ : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = t₂ → x_1 ++ a :: x_3 = t₃ → x ++ x_2 ~ x_1 ++ x_3 l₁ l₂ r₁ r₂ : List α e₁ : l₁ ++ a :: r₁ = t₁ e₂ : l₂ ++ a :: r₂ = t₃ ⊢ l₁ ++ r₁ ~ l₂ ++ r₂ [PROOFSTEP] substs t₁ t₃ [GOAL] case refine_4 α : Type uu a : α s₁ s₂ : List α p : s₁ ~ s₂ t₂ l₁ l₂ r₁ r₂ : List α p₁ : l₁ ++ a :: r₁ ~ t₂ IH₁ : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = l₁ ++ a :: r₁ → x_1 ++ a :: x_3 = t₂ → x ++ x_2 ~ x_1 ++ x_3 p₂ : t₂ ~ l₂ ++ a :: r₂ IH₂ : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = t₂ → x_1 ++ a :: x_3 = l₂ ++ a :: r₂ → x ++ x_2 ~ x_1 ++ x_3 ⊢ l₁ ++ r₁ ~ l₂ ++ r₂ [PROOFSTEP] have : a ∈ t₂ := p₁.subset (by simp) [GOAL] α : Type uu a : α s₁ s₂ : List α p : s₁ ~ s₂ t₂ l₁ l₂ r₁ r₂ : List α p₁ : l₁ ++ a :: r₁ ~ t₂ IH₁ : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = l₁ ++ a :: r₁ → x_1 ++ a :: x_3 = t₂ → x ++ x_2 ~ x_1 ++ x_3 p₂ : t₂ ~ l₂ ++ a :: r₂ IH₂ : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = t₂ → x_1 ++ a :: x_3 = l₂ ++ a :: r₂ → x ++ x_2 ~ x_1 ++ x_3 ⊢ a ∈ l₁ ++ a :: r₁ [PROOFSTEP] simp [GOAL] case refine_4 α : Type uu a : α s₁ s₂ : List α p : s₁ ~ s₂ t₂ l₁ l₂ r₁ r₂ : List α p₁ : l₁ ++ a :: r₁ ~ t₂ IH₁ : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = l₁ ++ a :: r₁ → x_1 ++ a :: x_3 = t₂ → x ++ x_2 ~ x_1 ++ x_3 p₂ : t₂ ~ l₂ ++ a :: r₂ IH₂ : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = t₂ → x_1 ++ a :: x_3 = l₂ ++ a :: r₂ → x ++ x_2 ~ x_1 ++ x_3 this : a ∈ t₂ ⊢ l₁ ++ r₁ ~ l₂ ++ r₂ [PROOFSTEP] rcases mem_split this with ⟨l₂, r₂, e₂⟩ [GOAL] case refine_4.intro.intro α : Type uu a : α s₁ s₂ : List α p : s₁ ~ s₂ t₂ l₁ l₂✝ r₁ r₂✝ : List α p₁ : l₁ ++ a :: r₁ ~ t₂ IH₁ : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = l₁ ++ a :: r₁ → x_1 ++ a :: x_3 = t₂ → x ++ x_2 ~ x_1 ++ x_3 p₂ : t₂ ~ l₂✝ ++ a :: r₂✝ IH₂ : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = t₂ → x_1 ++ a :: x_3 = l₂✝ ++ a :: r₂✝ → x ++ x_2 ~ x_1 ++ x_3 this : a ∈ t₂ l₂ r₂ : List α e₂ : t₂ = l₂ ++ a :: r₂ ⊢ l₁ ++ r₁ ~ l₂✝ ++ r₂✝ [PROOFSTEP] subst t₂ [GOAL] case refine_4.intro.intro α : Type uu a : α s₁ s₂ : List α p : s₁ ~ s₂ l₁ l₂✝ r₁ r₂✝ l₂ r₂ : List α p₁ : l₁ ++ a :: r₁ ~ l₂ ++ a :: r₂ IH₁ : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = l₁ ++ a :: r₁ → x_1 ++ a :: x_3 = l₂ ++ a :: r₂ → x ++ x_2 ~ x_1 ++ x_3 p₂ : l₂ ++ a :: r₂ ~ l₂✝ ++ a :: r₂✝ IH₂ : ∀ (x x_1 x_2 x_3 : List α), x ++ a :: x_2 = l₂ ++ a :: r₂ → x_1 ++ a :: x_3 = l₂✝ ++ a :: r₂✝ → x ++ x_2 ~ x_1 ++ x_3 this : a ∈ l₂ ++ a :: r₂ ⊢ l₁ ++ r₁ ~ l₂✝ ++ r₂✝ [PROOFSTEP] exact (IH₁ _ _ _ _ rfl rfl).trans (IH₂ _ _ _ _ rfl rfl) [GOAL] α : Type uu β : Type vv l₁ l₂ : List α o₁ o₂ : Option α ⊢ Option.toList o₁ ~ Option.toList o₂ ↔ o₁ = o₂ [PROOFSTEP] refine' ⟨fun p => _, fun e => e ▸ Perm.refl _⟩ [GOAL] α : Type uu β : Type vv l₁ l₂ : List α o₁ o₂ : Option α p : Option.toList o₁ ~ Option.toList o₂ ⊢ o₁ = o₂ [PROOFSTEP] cases' o₁ with a [GOAL] case none α : Type uu β : Type vv l₁ l₂ : List α o₂ : Option α p : Option.toList none ~ Option.toList o₂ ⊢ none = o₂ [PROOFSTEP] cases' o₂ with b [GOAL] case some α : Type uu β : Type vv l₁ l₂ : List α o₂ : Option α a : α p : Option.toList (some a) ~ Option.toList o₂ ⊢ some a = o₂ [PROOFSTEP] cases' o₂ with b [GOAL] case none.none α : Type uu β : Type vv l₁ l₂ : List α p : Option.toList none ~ Option.toList none ⊢ none = none [PROOFSTEP] rfl [GOAL] case none.some α : Type uu β : Type vv l₁ l₂ : List α b : α p : Option.toList none ~ Option.toList (some b) ⊢ none = some b [PROOFSTEP] cases p.length_eq [GOAL] case some.none α : Type uu β : Type vv l₁ l₂ : List α a : α p : Option.toList (some a) ~ Option.toList none ⊢ some a = none [PROOFSTEP] cases p.length_eq [GOAL] case some.some α : Type uu β : Type vv l₁ l₂ : List α a b : α p : Option.toList (some a) ~ Option.toList (some b) ⊢ some a = some b [PROOFSTEP] exact Option.mem_toList.1 (p.symm.subset <| by simp) [GOAL] α : Type uu β : Type vv l₁ l₂ : List α a b : α p : Option.toList (some a) ~ Option.toList (some b) ⊢ b ∈ Option.toList (some b) [PROOFSTEP] simp [GOAL] α : Type uu β : Type vv l₁✝ l₂✝ : List α a : α l₁ l₂ : List α x✝ : a :: l₁ <+~ a :: l₂ l : List α p : l ~ a :: l₁ s : l <+ a :: l₂ ⊢ l₁ <+~ l₂ [PROOFSTEP] cases' s with _ _ _ s' u _ _ s' [GOAL] case cons α : Type uu β : Type vv l₁✝ l₂✝ : List α a : α l₁ l₂ : List α x✝ : a :: l₁ <+~ a :: l₂ l : List α p : l ~ a :: l₁ s' : l <+ l₂ ⊢ l₁ <+~ l₂ [PROOFSTEP] exact (p.subperm_left.2 <| (sublist_cons _ _).subperm).trans s'.subperm [GOAL] case cons₂ α : Type uu β : Type vv l₁✝ l₂✝ : List α a : α l₁ l₂ : List α x✝ : a :: l₁ <+~ a :: l₂ u : List α p : a :: u ~ a :: l₁ s' : u <+ l₂ ⊢ l₁ <+~ l₂ [PROOFSTEP] exact ⟨u, p.cons_inv, s'⟩ [GOAL] α : Type uu β : Type vv l₁✝ l₂✝ : List α a : α l₁ l₂ : List α d₁ : Nodup l₁ h₁ : ¬a ∈ l₁ h₂ : a ∈ l₂ s : l₁ <+~ l₂ ⊢ a :: l₁ <+~ l₂ [PROOFSTEP] rcases s with ⟨l, p, s⟩ [GOAL] case intro.intro α : Type uu β : Type vv l₁✝ l₂✝ : List α a : α l₁ l₂ : List α d₁ : Nodup l₁ h₁ : ¬a ∈ l₁ h₂ : a ∈ l₂ l : List α p : l ~ l₁ s : l <+ l₂ ⊢ a :: l₁ <+~ l₂ [PROOFSTEP] induction s generalizing l₁ [GOAL] case intro.intro.slnil α : Type uu β : Type vv l₁✝ l₂✝ : List α a : α l₂ l l₁ : List α d₁ : Nodup l₁ h₁ : ¬a ∈ l₁ h₂ : a ∈ [] p : [] ~ l₁ ⊢ a :: l₁ <+~ [] case intro.intro.cons α : Type uu β : Type vv l₁✝¹ l₂✝¹ : List α a : α l₂ l l₁✝ l₂✝ : List α a✝¹ : α a✝ : l₁✝ <+ l₂✝ a_ih✝ : ∀ {l₁ : List α}, Nodup l₁ → ¬a ∈ l₁ → a ∈ l₂✝ → l₁✝ ~ l₁ → a :: l₁ <+~ l₂✝ l₁ : List α d₁ : Nodup l₁ h₁ : ¬a ∈ l₁ h₂ : a ∈ a✝¹ :: l₂✝ p : l₁✝ ~ l₁ ⊢ a :: l₁ <+~ a✝¹ :: l₂✝ case intro.intro.cons₂ α : Type uu β : Type vv l₁✝¹ l₂✝¹ : List α a : α l₂ l l₁✝ l₂✝ : List α a✝¹ : α a✝ : l₁✝ <+ l₂✝ a_ih✝ : ∀ {l₁ : List α}, Nodup l₁ → ¬a ∈ l₁ → a ∈ l₂✝ → l₁✝ ~ l₁ → a :: l₁ <+~ l₂✝ l₁ : List α d₁ : Nodup l₁ h₁ : ¬a ∈ l₁ h₂ : a ∈ a✝¹ :: l₂✝ p : a✝¹ :: l₁✝ ~ l₁ ⊢ a :: l₁ <+~ a✝¹ :: l₂✝ [PROOFSTEP] case slnil => cases h₂ [GOAL] α : Type uu β : Type vv l₁✝ l₂✝ : List α a : α l₂ l l₁ : List α d₁ : Nodup l₁ h₁ : ¬a ∈ l₁ h₂ : a ∈ [] p : [] ~ l₁ ⊢ a :: l₁ <+~ [] [PROOFSTEP] case slnil => cases h₂ [GOAL] α : Type uu β : Type vv l₁✝ l₂✝ : List α a : α l₂ l l₁ : List α d₁ : Nodup l₁ h₁ : ¬a ∈ l₁ h₂ : a ∈ [] p : [] ~ l₁ ⊢ a :: l₁ <+~ [] [PROOFSTEP] cases h₂ [GOAL] case intro.intro.cons α : Type uu β : Type vv l₁✝¹ l₂✝¹ : List α a : α l₂ l l₁✝ l₂✝ : List α a✝¹ : α a✝ : l₁✝ <+ l₂✝ a_ih✝ : ∀ {l₁ : List α}, Nodup l₁ → ¬a ∈ l₁ → a ∈ l₂✝ → l₁✝ ~ l₁ → a :: l₁ <+~ l₂✝ l₁ : List α d₁ : Nodup l₁ h₁ : ¬a ∈ l₁ h₂ : a ∈ a✝¹ :: l₂✝ p : l₁✝ ~ l₁ ⊢ a :: l₁ <+~ a✝¹ :: l₂✝ case intro.intro.cons₂ α : Type uu β : Type vv l₁✝¹ l₂✝¹ : List α a : α l₂ l l₁✝ l₂✝ : List α a✝¹ : α a✝ : l₁✝ <+ l₂✝ a_ih✝ : ∀ {l₁ : List α}, Nodup l₁ → ¬a ∈ l₁ → a ∈ l₂✝ → l₁✝ ~ l₁ → a :: l₁ <+~ l₂✝ l₁ : List α d₁ : Nodup l₁ h₁ : ¬a ∈ l₁ h₂ : a ∈ a✝¹ :: l₂✝ p : a✝¹ :: l₁✝ ~ l₁ ⊢ a :: l₁ <+~ a✝¹ :: l₂✝ [PROOFSTEP] case cons r₁ r₂ b s' ih => simp at h₂ cases' h₂ with e m · subst b exact ⟨a :: r₁, p.cons a, s'.cons₂ _⟩ · rcases ih d₁ h₁ m p with ⟨t, p', s'⟩ exact ⟨t, p', s'.cons _⟩ [GOAL] α : Type uu β : Type vv l₁✝ l₂✝ : List α a : α l₂ l r₁ r₂ : List α b : α s' : r₁ <+ r₂ ih : ∀ {l₁ : List α}, Nodup l₁ → ¬a ∈ l₁ → a ∈ r₂ → r₁ ~ l₁ → a :: l₁ <+~ r₂ l₁ : List α d₁ : Nodup l₁ h₁ : ¬a ∈ l₁ h₂ : a ∈ b :: r₂ p : r₁ ~ l₁ ⊢ a :: l₁ <+~ b :: r₂ [PROOFSTEP] case cons r₁ r₂ b s' ih => simp at h₂ cases' h₂ with e m · subst b exact ⟨a :: r₁, p.cons a, s'.cons₂ _⟩ · rcases ih d₁ h₁ m p with ⟨t, p', s'⟩ exact ⟨t, p', s'.cons _⟩ [GOAL] α : Type uu β : Type vv l₁✝ l₂✝ : List α a : α l₂ l r₁ r₂ : List α b : α s' : r₁ <+ r₂ ih : ∀ {l₁ : List α}, Nodup l₁ → ¬a ∈ l₁ → a ∈ r₂ → r₁ ~ l₁ → a :: l₁ <+~ r₂ l₁ : List α d₁ : Nodup l₁ h₁ : ¬a ∈ l₁ h₂ : a ∈ b :: r₂ p : r₁ ~ l₁ ⊢ a :: l₁ <+~ b :: r₂ [PROOFSTEP] simp at h₂ [GOAL] α : Type uu β : Type vv l₁✝ l₂✝ : List α a : α l₂ l r₁ r₂ : List α b : α s' : r₁ <+ r₂ ih : ∀ {l₁ : List α}, Nodup l₁ → ¬a ∈ l₁ → a ∈ r₂ → r₁ ~ l₁ → a :: l₁ <+~ r₂ l₁ : List α d₁ : Nodup l₁ h₁ : ¬a ∈ l₁ p : r₁ ~ l₁ h₂ : a = b ∨ a ∈ r₂ ⊢ a :: l₁ <+~ b :: r₂ [PROOFSTEP] cases' h₂ with e m [GOAL] case inl α : Type uu β : Type vv l₁✝ l₂✝ : List α a : α l₂ l r₁ r₂ : List α b : α s' : r₁ <+ r₂ ih : ∀ {l₁ : List α}, Nodup l₁ → ¬a ∈ l₁ → a ∈ r₂ → r₁ ~ l₁ → a :: l₁ <+~ r₂ l₁ : List α d₁ : Nodup l₁ h₁ : ¬a ∈ l₁ p : r₁ ~ l₁ e : a = b ⊢ a :: l₁ <+~ b :: r₂ [PROOFSTEP] subst b [GOAL] case inl α : Type uu β : Type vv l₁✝ l₂✝ : List α a : α l₂ l r₁ r₂ : List α s' : r₁ <+ r₂ ih : ∀ {l₁ : List α}, Nodup l₁ → ¬a ∈ l₁ → a ∈ r₂ → r₁ ~ l₁ → a :: l₁ <+~ r₂ l₁ : List α d₁ : Nodup l₁ h₁ : ¬a ∈ l₁ p : r₁ ~ l₁ ⊢ a :: l₁ <+~ a :: r₂ [PROOFSTEP] exact ⟨a :: r₁, p.cons a, s'.cons₂ _⟩ [GOAL] case inr α : Type uu β : Type vv l₁✝ l₂✝ : List α a : α l₂ l r₁ r₂ : List α b : α s' : r₁ <+ r₂ ih : ∀ {l₁ : List α}, Nodup l₁ → ¬a ∈ l₁ → a ∈ r₂ → r₁ ~ l₁ → a :: l₁ <+~ r₂ l₁ : List α d₁ : Nodup l₁ h₁ : ¬a ∈ l₁ p : r₁ ~ l₁ m : a ∈ r₂ ⊢ a :: l₁ <+~ b :: r₂ [PROOFSTEP] rcases ih d₁ h₁ m p with ⟨t, p', s'⟩ [GOAL] case inr.intro.intro α : Type uu β : Type vv l₁✝ l₂✝ : List α a : α l₂ l r₁ r₂ : List α b : α s'✝ : r₁ <+ r₂ ih : ∀ {l₁ : List α}, Nodup l₁ → ¬a ∈ l₁ → a ∈ r₂ → r₁ ~ l₁ → a :: l₁ <+~ r₂ l₁ : List α d₁ : Nodup l₁ h₁ : ¬a ∈ l₁ p : r₁ ~ l₁ m : a ∈ r₂ t : List α p' : t ~ a :: l₁ s' : t <+ r₂ ⊢ a :: l₁ <+~ b :: r₂ [PROOFSTEP] exact ⟨t, p', s'.cons _⟩ [GOAL] case intro.intro.cons₂ α : Type uu β : Type vv l₁✝¹ l₂✝¹ : List α a : α l₂ l l₁✝ l₂✝ : List α a✝¹ : α a✝ : l₁✝ <+ l₂✝ a_ih✝ : ∀ {l₁ : List α}, Nodup l₁ → ¬a ∈ l₁ → a ∈ l₂✝ → l₁✝ ~ l₁ → a :: l₁ <+~ l₂✝ l₁ : List α d₁ : Nodup l₁ h₁ : ¬a ∈ l₁ h₂ : a ∈ a✝¹ :: l₂✝ p : a✝¹ :: l₁✝ ~ l₁ ⊢ a :: l₁ <+~ a✝¹ :: l₂✝ [PROOFSTEP] case cons₂ r₁ r₂ b _ ih => have bm : b ∈ l₁ := p.subset <| mem_cons_self _ _ have am : a ∈ r₂ := by simp only [find?, mem_cons] at h₂ exact h₂.resolve_left fun e => h₁ <| e.symm ▸ bm rcases mem_split bm with ⟨t₁, t₂, rfl⟩ have st : t₁ ++ t₂ <+ t₁ ++ b :: t₂ := by simp rcases ih (d₁.sublist st) (mt (fun x => st.subset x) h₁) am (Perm.cons_inv <| p.trans perm_middle) with ⟨t, p', s'⟩ exact ⟨b :: t, (p'.cons b).trans <| (swap _ _ _).trans (perm_middle.symm.cons a), s'.cons₂ _⟩ [GOAL] α : Type uu β : Type vv l₁✝ l₂✝ : List α a : α l₂ l r₁ r₂ : List α b : α a✝ : r₁ <+ r₂ ih : ∀ {l₁ : List α}, Nodup l₁ → ¬a ∈ l₁ → a ∈ r₂ → r₁ ~ l₁ → a :: l₁ <+~ r₂ l₁ : List α d₁ : Nodup l₁ h₁ : ¬a ∈ l₁ h₂ : a ∈ b :: r₂ p : b :: r₁ ~ l₁ ⊢ a :: l₁ <+~ b :: r₂ [PROOFSTEP] case cons₂ r₁ r₂ b _ ih => have bm : b ∈ l₁ := p.subset <| mem_cons_self _ _ have am : a ∈ r₂ := by simp only [find?, mem_cons] at h₂ exact h₂.resolve_left fun e => h₁ <| e.symm ▸ bm rcases mem_split bm with ⟨t₁, t₂, rfl⟩ have st : t₁ ++ t₂ <+ t₁ ++ b :: t₂ := by simp rcases ih (d₁.sublist st) (mt (fun x => st.subset x) h₁) am (Perm.cons_inv <| p.trans perm_middle) with ⟨t, p', s'⟩ exact ⟨b :: t, (p'.cons b).trans <| (swap _ _ _).trans (perm_middle.symm.cons a), s'.cons₂ _⟩ [GOAL] α : Type uu β : Type vv l₁✝ l₂✝ : List α a : α l₂ l r₁ r₂ : List α b : α a✝ : r₁ <+ r₂ ih : ∀ {l₁ : List α}, Nodup l₁ → ¬a ∈ l₁ → a ∈ r₂ → r₁ ~ l₁ → a :: l₁ <+~ r₂ l₁ : List α d₁ : Nodup l₁ h₁ : ¬a ∈ l₁ h₂ : a ∈ b :: r₂ p : b :: r₁ ~ l₁ ⊢ a :: l₁ <+~ b :: r₂ [PROOFSTEP] have bm : b ∈ l₁ := p.subset <| mem_cons_self _ _ [GOAL] α : Type uu β : Type vv l₁✝ l₂✝ : List α a : α l₂ l r₁ r₂ : List α b : α a✝ : r₁ <+ r₂ ih : ∀ {l₁ : List α}, Nodup l₁ → ¬a ∈ l₁ → a ∈ r₂ → r₁ ~ l₁ → a :: l₁ <+~ r₂ l₁ : List α d₁ : Nodup l₁ h₁ : ¬a ∈ l₁ h₂ : a ∈ b :: r₂ p : b :: r₁ ~ l₁ bm : b ∈ l₁ ⊢ a :: l₁ <+~ b :: r₂ [PROOFSTEP] have am : a ∈ r₂ := by simp only [find?, mem_cons] at h₂ exact h₂.resolve_left fun e => h₁ <| e.symm ▸ bm [GOAL] α : Type uu β : Type vv l₁✝ l₂✝ : List α a : α l₂ l r₁ r₂ : List α b : α a✝ : r₁ <+ r₂ ih : ∀ {l₁ : List α}, Nodup l₁ → ¬a ∈ l₁ → a ∈ r₂ → r₁ ~ l₁ → a :: l₁ <+~ r₂ l₁ : List α d₁ : Nodup l₁ h₁ : ¬a ∈ l₁ h₂ : a ∈ b :: r₂ p : b :: r₁ ~ l₁ bm : b ∈ l₁ ⊢ a ∈ r₂ [PROOFSTEP] simp only [find?, mem_cons] at h₂ [GOAL] α : Type uu β : Type vv l₁✝ l₂✝ : List α a : α l₂ l r₁ r₂ : List α b : α a✝ : r₁ <+ r₂ ih : ∀ {l₁ : List α}, Nodup l₁ → ¬a ∈ l₁ → a ∈ r₂ → r₁ ~ l₁ → a :: l₁ <+~ r₂ l₁ : List α d₁ : Nodup l₁ h₁ : ¬a ∈ l₁ p : b :: r₁ ~ l₁ bm : b ∈ l₁ h₂ : a = b ∨ a ∈ r₂ ⊢ a ∈ r₂ [PROOFSTEP] exact h₂.resolve_left fun e => h₁ <| e.symm ▸ bm [GOAL] α : Type uu β : Type vv l₁✝ l₂✝ : List α a : α l₂ l r₁ r₂ : List α b : α a✝ : r₁ <+ r₂ ih : ∀ {l₁ : List α}, Nodup l₁ → ¬a ∈ l₁ → a ∈ r₂ → r₁ ~ l₁ → a :: l₁ <+~ r₂ l₁ : List α d₁ : Nodup l₁ h₁ : ¬a ∈ l₁ h₂ : a ∈ b :: r₂ p : b :: r₁ ~ l₁ bm : b ∈ l₁ am : a ∈ r₂ ⊢ a :: l₁ <+~ b :: r₂ [PROOFSTEP] rcases mem_split bm with ⟨t₁, t₂, rfl⟩ [GOAL] case intro.intro α : Type uu β : Type vv l₁ l₂✝ : List α a : α l₂ l r₁ r₂ : List α b : α a✝ : r₁ <+ r₂ ih : ∀ {l₁ : List α}, Nodup l₁ → ¬a ∈ l₁ → a ∈ r₂ → r₁ ~ l₁ → a :: l₁ <+~ r₂ h₂ : a ∈ b :: r₂ am : a ∈ r₂ t₁ t₂ : List α d₁ : Nodup (t₁ ++ b :: t₂) h₁ : ¬a ∈ t₁ ++ b :: t₂ p : b :: r₁ ~ t₁ ++ b :: t₂ bm : b ∈ t₁ ++ b :: t₂ ⊢ a :: (t₁ ++ b :: t₂) <+~ b :: r₂ [PROOFSTEP] have st : t₁ ++ t₂ <+ t₁ ++ b :: t₂ := by simp [GOAL] α : Type uu β : Type vv l₁ l₂✝ : List α a : α l₂ l r₁ r₂ : List α b : α a✝ : r₁ <+ r₂ ih : ∀ {l₁ : List α}, Nodup l₁ → ¬a ∈ l₁ → a ∈ r₂ → r₁ ~ l₁ → a :: l₁ <+~ r₂ h₂ : a ∈ b :: r₂ am : a ∈ r₂ t₁ t₂ : List α d₁ : Nodup (t₁ ++ b :: t₂) h₁ : ¬a ∈ t₁ ++ b :: t₂ p : b :: r₁ ~ t₁ ++ b :: t₂ bm : b ∈ t₁ ++ b :: t₂ ⊢ t₁ ++ t₂ <+ t₁ ++ b :: t₂ [PROOFSTEP] simp [GOAL] case intro.intro α : Type uu β : Type vv l₁ l₂✝ : List α a : α l₂ l r₁ r₂ : List α b : α a✝ : r₁ <+ r₂ ih : ∀ {l₁ : List α}, Nodup l₁ → ¬a ∈ l₁ → a ∈ r₂ → r₁ ~ l₁ → a :: l₁ <+~ r₂ h₂ : a ∈ b :: r₂ am : a ∈ r₂ t₁ t₂ : List α d₁ : Nodup (t₁ ++ b :: t₂) h₁ : ¬a ∈ t₁ ++ b :: t₂ p : b :: r₁ ~ t₁ ++ b :: t₂ bm : b ∈ t₁ ++ b :: t₂ st : t₁ ++ t₂ <+ t₁ ++ b :: t₂ ⊢ a :: (t₁ ++ b :: t₂) <+~ b :: r₂ [PROOFSTEP] rcases ih (d₁.sublist st) (mt (fun x => st.subset x) h₁) am (Perm.cons_inv <| p.trans perm_middle) with ⟨t, p', s'⟩ [GOAL] case intro.intro.intro.intro α : Type uu β : Type vv l₁ l₂✝ : List α a : α l₂ l r₁ r₂ : List α b : α a✝ : r₁ <+ r₂ ih : ∀ {l₁ : List α}, Nodup l₁ → ¬a ∈ l₁ → a ∈ r₂ → r₁ ~ l₁ → a :: l₁ <+~ r₂ h₂ : a ∈ b :: r₂ am : a ∈ r₂ t₁ t₂ : List α d₁ : Nodup (t₁ ++ b :: t₂) h₁ : ¬a ∈ t₁ ++ b :: t₂ p : b :: r₁ ~ t₁ ++ b :: t₂ bm : b ∈ t₁ ++ b :: t₂ st : t₁ ++ t₂ <+ t₁ ++ b :: t₂ t : List α p' : t ~ a :: (t₁ ++ t₂) s' : t <+ r₂ ⊢ a :: (t₁ ++ b :: t₂) <+~ b :: r₂ [PROOFSTEP] exact ⟨b :: t, (p'.cons b).trans <| (swap _ _ _).trans (perm_middle.symm.cons a), s'.cons₂ _⟩ [GOAL] α : Type uu β : Type vv l₁✝ l₂✝ l₁ l₂ l : List α p : l ~ l₁ s : l <+ l₂ h : length l₁ < length l₂ ⊢ ∃ a, a :: l₁ <+~ l₂ [PROOFSTEP] suffices length l < length l₂ → ∃ a : α, a :: l <+~ l₂ from (this <| p.symm.length_eq ▸ h).imp fun a => (p.cons a).subperm_right.1 [GOAL] α : Type uu β : Type vv l₁✝ l₂✝ l₁ l₂ l : List α p : l ~ l₁ s : l <+ l₂ h : length l₁ < length l₂ ⊢ length l < length l₂ → ∃ a, a :: l <+~ l₂ [PROOFSTEP] clear h p l₁ [GOAL] α : Type uu β : Type vv l₁ l₂✝ l₂ l : List α s : l <+ l₂ ⊢ length l < length l₂ → ∃ a, a :: l <+~ l₂ [PROOFSTEP] induction' s with l₁ l₂ a s IH _ _ b _ IH [GOAL] case slnil α : Type uu β : Type vv l₁ l₂✝ l₂ l : List α ⊢ length [] < length [] → ∃ a, [a] <+~ [] [PROOFSTEP] intro h [GOAL] case cons α : Type uu β : Type vv l₁✝ l₂✝¹ l₂✝ l l₁ l₂ : List α a : α s : l₁ <+ l₂ IH : length l₁ < length l₂ → ∃ a, a :: l₁ <+~ l₂ ⊢ length l₁ < length (a :: l₂) → ∃ a_2, a_2 :: l₁ <+~ a :: l₂ [PROOFSTEP] intro h [GOAL] case cons₂ α : Type uu β : Type vv l₁ l₂✝¹ l₂ l l₁✝ l₂✝ : List α b : α a✝ : l₁✝ <+ l₂✝ IH : length l₁✝ < length l₂✝ → ∃ a, a :: l₁✝ <+~ l₂✝ ⊢ length (b :: l₁✝) < length (b :: l₂✝) → ∃ a, a :: b :: l₁✝ <+~ b :: l₂✝ [PROOFSTEP] intro h [GOAL] case slnil α : Type uu β : Type vv l₁ l₂✝ l₂ l : List α h : length [] < length [] ⊢ ∃ a, [a] <+~ [] [PROOFSTEP] cases h [GOAL] case cons α : Type uu β : Type vv l₁✝ l₂✝¹ l₂✝ l l₁ l₂ : List α a : α s : l₁ <+ l₂ IH : length l₁ < length l₂ → ∃ a, a :: l₁ <+~ l₂ h : length l₁ < length (a :: l₂) ⊢ ∃ a_1, a_1 :: l₁ <+~ a :: l₂ [PROOFSTEP] cases' lt_or_eq_of_le (Nat.le_of_lt_succ h : length l₁ ≤ length l₂) with h h [GOAL] case cons.inl α : Type uu β : Type vv l₁✝ l₂✝¹ l₂✝ l l₁ l₂ : List α a : α s : l₁ <+ l₂ IH : length l₁ < length l₂ → ∃ a, a :: l₁ <+~ l₂ h✝ : length l₁ < length (a :: l₂) h : length l₁ < length l₂ ⊢ ∃ a_1, a_1 :: l₁ <+~ a :: l₂ [PROOFSTEP] exact (IH h).imp fun a s => s.trans (sublist_cons _ _).subperm [GOAL] case cons.inr α : Type uu β : Type vv l₁✝ l₂✝¹ l₂✝ l l₁ l₂ : List α a : α s : l₁ <+ l₂ IH : length l₁ < length l₂ → ∃ a, a :: l₁ <+~ l₂ h✝ : length l₁ < length (a :: l₂) h : length l₁ = length l₂ ⊢ ∃ a_1, a_1 :: l₁ <+~ a :: l₂ [PROOFSTEP] exact ⟨a, s.eq_of_length h ▸ Subperm.refl _⟩ [GOAL] case cons₂ α : Type uu β : Type vv l₁ l₂✝¹ l₂ l l₁✝ l₂✝ : List α b : α a✝ : l₁✝ <+ l₂✝ IH : length l₁✝ < length l₂✝ → ∃ a, a :: l₁✝ <+~ l₂✝ h : length (b :: l₁✝) < length (b :: l₂✝) ⊢ ∃ a, a :: b :: l₁✝ <+~ b :: l₂✝ [PROOFSTEP] exact (IH <| Nat.lt_of_succ_lt_succ h).imp fun a s => (swap _ _ _).subperm_right.1 <| (subperm_cons _).2 s [GOAL] α : Type uu β : Type vv l₁ l₂ : List α d : Nodup l₁ H : l₁ ⊆ l₂ ⊢ l₁ <+~ l₂ [PROOFSTEP] induction' d with a l₁' h d IH [GOAL] case nil α : Type uu β : Type vv l₁ l₂ : List α H✝ : l₁ ⊆ l₂ H : [] ⊆ l₂ ⊢ [] <+~ l₂ [PROOFSTEP] exact ⟨nil, Perm.nil, nil_sublist _⟩ [GOAL] case cons α : Type uu β : Type vv l₁ l₂ : List α H✝ : l₁ ⊆ l₂ a : α l₁' : List α h : ∀ (a' : α), a' ∈ l₁' → a ≠ a' d : Pairwise (fun x x_1 => x ≠ x_1) l₁' IH : l₁' ⊆ l₂ → l₁' <+~ l₂ H : a :: l₁' ⊆ l₂ ⊢ a :: l₁' <+~ l₂ [PROOFSTEP] cases' forall_mem_cons.1 H with H₁ H₂ [GOAL] case cons.intro α : Type uu β : Type vv l₁ l₂ : List α H✝ : l₁ ⊆ l₂ a : α l₁' : List α h : ∀ (a' : α), a' ∈ l₁' → a ≠ a' d : Pairwise (fun x x_1 => x ≠ x_1) l₁' IH : l₁' ⊆ l₂ → l₁' <+~ l₂ H : a :: l₁' ⊆ l₂ H₁ : a ∈ l₂ H₂ : ∀ (x : α), x ∈ l₁' → x ∈ l₂ ⊢ a :: l₁' <+~ l₂ [PROOFSTEP] simp at h [GOAL] case cons.intro α : Type uu β : Type vv l₁ l₂ : List α H✝ : l₁ ⊆ l₂ a : α l₁' : List α d : Pairwise (fun x x_1 => x ≠ x_1) l₁' IH : l₁' ⊆ l₂ → l₁' <+~ l₂ H : a :: l₁' ⊆ l₂ H₁ : a ∈ l₂ H₂ : ∀ (x : α), x ∈ l₁' → x ∈ l₂ h : ¬a ∈ l₁' ⊢ a :: l₁' <+~ l₂ [PROOFSTEP] exact cons_subperm_of_mem d h H₁ (IH H₂) [GOAL] α : Type uu β : Type vv l₁✝ l₂✝ l₁ l₂ l : List α d : Nodup l s₁ : l₁ <+ l s₂ : l₂ <+ l h : l₁ ~ l₂ ⊢ l₁ = l₂ [PROOFSTEP] induction' s₂ with l₂ l a s₂ IH l₂ l a _ IH generalizing l₁ [GOAL] case slnil α : Type uu β : Type vv l₁✝¹ l₂✝ l₁✝ l₂ l : List α d✝ : Nodup l s₁✝ : l₁✝ <+ l h✝ : l₁✝ ~ l₂ l₁ : List α d : Nodup [] s₁ : l₁ <+ [] h : l₁ ~ [] ⊢ l₁ = [] [PROOFSTEP] exact h.eq_nil [GOAL] case cons α : Type uu β : Type vv l₁✝¹ l₂✝¹ l₁✝ l₂✝ l✝ : List α d✝ : Nodup l✝ s₁✝ : l₁✝ <+ l✝ h✝ : l₁✝ ~ l₂✝ l₂ l : List α a : α s₂ : l₂ <+ l IH : ∀ {l₁ : List α}, Nodup l → l₁ <+ l → l₁ ~ l₂ → l₁ = l₂ l₁ : List α d : Nodup (a :: l) s₁ : l₁ <+ a :: l h : l₁ ~ l₂ ⊢ l₁ = l₂ [PROOFSTEP] simp at d [GOAL] case cons α : Type uu β : Type vv l₁✝¹ l₂✝¹ l₁✝ l₂✝ l✝ : List α d✝ : Nodup l✝ s₁✝ : l₁✝ <+ l✝ h✝ : l₁✝ ~ l₂✝ l₂ l : List α a : α s₂ : l₂ <+ l IH : ∀ {l₁ : List α}, Nodup l → l₁ <+ l → l₁ ~ l₂ → l₁ = l₂ l₁ : List α s₁ : l₁ <+ a :: l h : l₁ ~ l₂ d : ¬a ∈ l ∧ Nodup l ⊢ l₁ = l₂ [PROOFSTEP] cases' s₁ with _ _ _ s₁ l₁ _ _ s₁ [GOAL] case cons.cons α : Type uu β : Type vv l₁✝¹ l₂✝¹ l₁✝ l₂✝ l✝ : List α d✝ : Nodup l✝ s₁✝ : l₁✝ <+ l✝ h✝ : l₁✝ ~ l₂✝ l₂ l : List α a : α s₂ : l₂ <+ l IH : ∀ {l₁ : List α}, Nodup l → l₁ <+ l → l₁ ~ l₂ → l₁ = l₂ l₁ : List α h : l₁ ~ l₂ d : ¬a ∈ l ∧ Nodup l s₁ : l₁ <+ l ⊢ l₁ = l₂ [PROOFSTEP] exact IH d.2 s₁ h [GOAL] case cons.cons₂ α : Type uu β : Type vv l₁✝¹ l₂✝¹ l₁✝ l₂✝ l✝ : List α d✝ : Nodup l✝ s₁✝ : l₁✝ <+ l✝ h✝ : l₁✝ ~ l₂✝ l₂ l : List α a : α s₂ : l₂ <+ l IH : ∀ {l₁ : List α}, Nodup l → l₁ <+ l → l₁ ~ l₂ → l₁ = l₂ d : ¬a ∈ l ∧ Nodup l l₁ : List α h : a :: l₁ ~ l₂ s₁ : l₁ <+ l ⊢ a :: l₁ = l₂ [PROOFSTEP] apply d.1.elim [GOAL] case cons.cons₂ α : Type uu β : Type vv l₁✝¹ l₂✝¹ l₁✝ l₂✝ l✝ : List α d✝ : Nodup l✝ s₁✝ : l₁✝ <+ l✝ h✝ : l₁✝ ~ l₂✝ l₂ l : List α a : α s₂ : l₂ <+ l IH : ∀ {l₁ : List α}, Nodup l → l₁ <+ l → l₁ ~ l₂ → l₁ = l₂ d : ¬a ∈ l ∧ Nodup l l₁ : List α h : a :: l₁ ~ l₂ s₁ : l₁ <+ l ⊢ a ∈ l [PROOFSTEP] exact Subperm.subset ⟨_, h.symm, s₂⟩ (mem_cons_self _ _) [GOAL] case cons₂ α : Type uu β : Type vv l₁✝¹ l₂✝¹ l₁✝ l₂✝ l✝ : List α d✝ : Nodup l✝ s₁✝ : l₁✝ <+ l✝ h✝ : l₁✝ ~ l₂✝ l₂ l : List α a : α a✝ : l₂ <+ l IH : ∀ {l₁ : List α}, Nodup l → l₁ <+ l → l₁ ~ l₂ → l₁ = l₂ l₁ : List α d : Nodup (a :: l) s₁ : l₁ <+ a :: l h : l₁ ~ a :: l₂ ⊢ l₁ = a :: l₂ [PROOFSTEP] simp at d [GOAL] case cons₂ α : Type uu β : Type vv l₁✝¹ l₂✝¹ l₁✝ l₂✝ l✝ : List α d✝ : Nodup l✝ s₁✝ : l₁✝ <+ l✝ h✝ : l₁✝ ~ l₂✝ l₂ l : List α a : α a✝ : l₂ <+ l IH : ∀ {l₁ : List α}, Nodup l → l₁ <+ l → l₁ ~ l₂ → l₁ = l₂ l₁ : List α s₁ : l₁ <+ a :: l h : l₁ ~ a :: l₂ d : ¬a ∈ l ∧ Nodup l ⊢ l₁ = a :: l₂ [PROOFSTEP] cases' s₁ with _ _ _ s₁ l₁ _ _ s₁ [GOAL] case cons₂.cons α : Type uu β : Type vv l₁✝¹ l₂✝¹ l₁✝ l₂✝ l✝ : List α d✝ : Nodup l✝ s₁✝ : l₁✝ <+ l✝ h✝ : l₁✝ ~ l₂✝ l₂ l : List α a : α a✝ : l₂ <+ l IH : ∀ {l₁ : List α}, Nodup l → l₁ <+ l → l₁ ~ l₂ → l₁ = l₂ l₁ : List α h : l₁ ~ a :: l₂ d : ¬a ∈ l ∧ Nodup l s₁ : l₁ <+ l ⊢ l₁ = a :: l₂ [PROOFSTEP] apply d.1.elim [GOAL] case cons₂.cons α : Type uu β : Type vv l₁✝¹ l₂✝¹ l₁✝ l₂✝ l✝ : List α d✝ : Nodup l✝ s₁✝ : l₁✝ <+ l✝ h✝ : l₁✝ ~ l₂✝ l₂ l : List α a : α a✝ : l₂ <+ l IH : ∀ {l₁ : List α}, Nodup l → l₁ <+ l → l₁ ~ l₂ → l₁ = l₂ l₁ : List α h : l₁ ~ a :: l₂ d : ¬a ∈ l ∧ Nodup l s₁ : l₁ <+ l ⊢ a ∈ l [PROOFSTEP] exact Subperm.subset ⟨_, h, s₁⟩ (mem_cons_self _ _) [GOAL] case cons₂.cons₂ α : Type uu β : Type vv l₁✝¹ l₂✝¹ l₁✝ l₂✝ l✝ : List α d✝ : Nodup l✝ s₁✝ : l₁✝ <+ l✝ h✝ : l₁✝ ~ l₂✝ l₂ l : List α a : α a✝ : l₂ <+ l IH : ∀ {l₁ : List α}, Nodup l → l₁ <+ l → l₁ ~ l₂ → l₁ = l₂ d : ¬a ∈ l ∧ Nodup l l₁ : List α h : a :: l₁ ~ a :: l₂ s₁ : l₁ <+ l ⊢ a :: l₁ = a :: l₂ [PROOFSTEP] rw [IH d.2 s₁ h.cons_inv] [GOAL] α : Type uu β : Type vv l₁✝ l₂✝ l₁ l₂ l : List α d : Nodup l s₁ : l₁ <+ l s₂ : l₂ <+ l h : l₁ = l₂ ⊢ l₁ ~ l₂ [PROOFSTEP] rw [h] [GOAL] α : Type uu β : Type vv l₁✝ l₂✝ : List α inst✝ : DecidableEq α a : α l₁ l₂ : List α p : l₁ ~ l₂ h₁ : ¬a ∈ l₁ ⊢ List.erase l₁ a ~ List.erase l₂ a [PROOFSTEP] have h₂ : a ∉ l₂ := mt p.mem_iff.2 h₁ [GOAL] α : Type uu β : Type vv l₁✝ l₂✝ : List α inst✝ : DecidableEq α a : α l₁ l₂ : List α p : l₁ ~ l₂ h₁ : ¬a ∈ l₁ h₂ : ¬a ∈ l₂ ⊢ List.erase l₁ a ~ List.erase l₂ a [PROOFSTEP] rw [erase_of_not_mem h₁, erase_of_not_mem h₂] [GOAL] α : Type uu β : Type vv l₁✝ l₂✝ : List α inst✝ : DecidableEq α a : α l₁ l₂ : List α p : l₁ ~ l₂ h₁ : ¬a ∈ l₁ h₂ : ¬a ∈ l₂ ⊢ l₁ ~ l₂ [PROOFSTEP] exact p [GOAL] α : Type uu β : Type vv l₁ l₂ : List α inst✝ : DecidableEq α a : α l : List α ⊢ l <+~ a :: List.erase l a [PROOFSTEP] by_cases h : a ∈ l [GOAL] case pos α : Type uu β : Type vv l₁ l₂ : List α inst✝ : DecidableEq α a : α l : List α h : a ∈ l ⊢ l <+~ a :: List.erase l a [PROOFSTEP] exact (perm_cons_erase h).subperm [GOAL] case neg α : Type uu β : Type vv l₁ l₂ : List α inst✝ : DecidableEq α a : α l : List α h : ¬a ∈ l ⊢ l <+~ a :: List.erase l a [PROOFSTEP] rw [erase_of_not_mem h] [GOAL] case neg α : Type uu β : Type vv l₁ l₂ : List α inst✝ : DecidableEq α a : α l : List α h : ¬a ∈ l ⊢ l <+~ a :: l [PROOFSTEP] exact (sublist_cons _ _).subperm [GOAL] α : Type uu β : Type vv l₁✝ l₂✝ : List α inst✝ : DecidableEq α l₁ l₂ t : List α h : l₁ ~ l₂ ⊢ List.diff l₁ t ~ List.diff l₂ t [PROOFSTEP] induction t generalizing l₁ l₂ h [GOAL] case nil α : Type uu β : Type vv l₁✝ l₂✝ : List α inst✝ : DecidableEq α l₁ l₂ : List α h : l₁ ~ l₂ ⊢ List.diff l₁ [] ~ List.diff l₂ [] [PROOFSTEP] simp [*, Perm.erase] [GOAL] case cons α : Type uu β : Type vv l₁✝ l₂✝ : List α inst✝ : DecidableEq α head✝ : α tail✝ : List α tail_ih✝ : ∀ {l₁ l₂ : List α}, l₁ ~ l₂ → List.diff l₁ tail✝ ~ List.diff l₂ tail✝ l₁ l₂ : List α h : l₁ ~ l₂ ⊢ List.diff l₁ (head✝ :: tail✝) ~ List.diff l₂ (head✝ :: tail✝) [PROOFSTEP] simp [*, Perm.erase] [GOAL] α : Type uu β : Type vv l₁ l₂ : List α inst✝ : DecidableEq α l t₁ t₂ : List α h : t₁ ~ t₂ ⊢ List.diff l t₁ = List.diff l t₂ [PROOFSTEP] induction h generalizing l [GOAL] case nil α : Type uu β : Type vv l₁ l₂ : List α inst✝ : DecidableEq α t₁ t₂ l : List α ⊢ List.diff l [] = List.diff l [] [PROOFSTEP] first | simp [*, Perm.erase, erase_comm] [GOAL] case nil α : Type uu β : Type vv l₁ l₂ : List α inst✝ : DecidableEq α t₁ t₂ l : List α ⊢ List.diff l [] = List.diff l [] [PROOFSTEP] simp [*, Perm.erase, erase_comm] [GOAL] case cons α : Type uu β : Type vv l₁ l₂ : List α inst✝ : DecidableEq α t₁ t₂ : List α x✝ : α l₁✝ l₂✝ : List α a✝ : l₁✝ ~ l₂✝ a_ih✝ : ∀ (l : List α), List.diff l l₁✝ = List.diff l l₂✝ l : List α ⊢ List.diff l (x✝ :: l₁✝) = List.diff l (x✝ :: l₂✝) [PROOFSTEP] first | simp [*, Perm.erase, erase_comm] [GOAL] case cons α : Type uu β : Type vv l₁ l₂ : List α inst✝ : DecidableEq α t₁ t₂ : List α x✝ : α l₁✝ l₂✝ : List α a✝ : l₁✝ ~ l₂✝ a_ih✝ : ∀ (l : List α), List.diff l l₁✝ = List.diff l l₂✝ l : List α ⊢ List.diff l (x✝ :: l₁✝) = List.diff l (x✝ :: l₂✝) [PROOFSTEP] simp [*, Perm.erase, erase_comm] [GOAL] case swap α : Type uu β : Type vv l₁ l₂ : List α inst✝ : DecidableEq α t₁ t₂ : List α x✝ y✝ : α l✝ l : List α ⊢ List.diff l (y✝ :: x✝ :: l✝) = List.diff l (x✝ :: y✝ :: l✝) [PROOFSTEP] first | simp [*, Perm.erase, erase_comm] [GOAL] case swap α : Type uu β : Type vv l₁ l₂ : List α inst✝ : DecidableEq α t₁ t₂ : List α x✝ y✝ : α l✝ l : List α ⊢ List.diff l (y✝ :: x✝ :: l✝) = List.diff l (x✝ :: y✝ :: l✝) [PROOFSTEP] simp [*, Perm.erase, erase_comm] [GOAL] case trans α : Type uu β : Type vv l₁ l₂ : List α inst✝ : DecidableEq α t₁ t₂ l₁✝ l₂✝ l₃✝ : List α a✝¹ : l₁✝ ~ l₂✝ a✝ : l₂✝ ~ l₃✝ a_ih✝¹ : ∀ (l : List α), List.diff l l₁✝ = List.diff l l₂✝ a_ih✝ : ∀ (l : List α), List.diff l l₂✝ = List.diff l l₃✝ l : List α ⊢ List.diff l l₁✝ = List.diff l l₃✝ [PROOFSTEP] first | simp [*, Perm.erase, erase_comm] [GOAL] case trans α : Type uu β : Type vv l₁ l₂ : List α inst✝ : DecidableEq α t₁ t₂ l₁✝ l₂✝ l₃✝ : List α a✝¹ : l₁✝ ~ l₂✝ a✝ : l₂✝ ~ l₃✝ a_ih✝¹ : ∀ (l : List α), List.diff l l₁✝ = List.diff l l₂✝ a_ih✝ : ∀ (l : List α), List.diff l l₂✝ = List.diff l l₃✝ l : List α ⊢ List.diff l l₁✝ = List.diff l l₃✝ [PROOFSTEP] simp [*, Perm.erase, erase_comm] [GOAL] α : Type uu β : Type vv l₁✝ l₂✝ : List α inst✝ : DecidableEq α l₁ l₂ : List α h : l₁ <+~ l₂ t : List α ⊢ List.diff l₁ t <+~ List.diff l₂ t [PROOFSTEP] induction t generalizing l₁ l₂ h [GOAL] case nil α : Type uu β : Type vv l₁✝ l₂✝ : List α inst✝ : DecidableEq α l₁ l₂ : List α h : l₁ <+~ l₂ ⊢ List.diff l₁ [] <+~ List.diff l₂ [] [PROOFSTEP] simp [*, Subperm.erase] [GOAL] case cons α : Type uu β : Type vv l₁✝ l₂✝ : List α inst✝ : DecidableEq α head✝ : α tail✝ : List α tail_ih✝ : ∀ {l₁ l₂ : List α}, l₁ <+~ l₂ → List.diff l₁ tail✝ <+~ List.diff l₂ tail✝ l₁ l₂ : List α h : l₁ <+~ l₂ ⊢ List.diff l₁ (head✝ :: tail✝) <+~ List.diff l₂ (head✝ :: tail✝) [PROOFSTEP] simp [*, Subperm.erase] [GOAL] α : Type uu β : Type vv l₁ l₂ : List α inst✝ : DecidableEq α a b : α l : List α ⊢ List.erase (a :: l) b <+~ a :: List.erase l b [PROOFSTEP] by_cases h : a = b [GOAL] case pos α : Type uu β : Type vv l₁ l₂ : List α inst✝ : DecidableEq α a b : α l : List α h : a = b ⊢ List.erase (a :: l) b <+~ a :: List.erase l b [PROOFSTEP] subst b [GOAL] case pos α : Type uu β : Type vv l₁ l₂ : List α inst✝ : DecidableEq α a : α l : List α ⊢ List.erase (a :: l) a <+~ a :: List.erase l a [PROOFSTEP] rw [erase_cons_head] [GOAL] case pos α : Type uu β : Type vv l₁ l₂ : List α inst✝ : DecidableEq α a : α l : List α ⊢ l <+~ a :: List.erase l a [PROOFSTEP] apply subperm_cons_erase [GOAL] case neg α : Type uu β : Type vv l₁ l₂ : List α inst✝ : DecidableEq α a b : α l : List α h : ¬a = b ⊢ List.erase (a :: l) b <+~ a :: List.erase l b [PROOFSTEP] rw [erase_cons_tail _ h] [GOAL] α : Type uu β : Type vv l₁✝ l₂ : List α inst✝ : DecidableEq α a : α l₁ : List α ⊢ ∃ x, a :: l₁ <+ a :: List.diff l₁ [] [PROOFSTEP] simp [GOAL] α : Type uu β : Type vv l₁✝ l₂✝ : List α inst✝ : DecidableEq α a : α l₁ : List α b : α l₂ : List α ⊢ List.diff (a :: l₁) (b :: l₂) <+~ a :: List.diff l₁ (b :: l₂) [PROOFSTEP] simp only [diff_cons] [GOAL] α : Type uu β : Type vv l₁✝ l₂✝ : List α inst✝ : DecidableEq α a : α l₁ : List α b : α l₂ : List α ⊢ List.diff (List.erase (a :: l₁) b) l₂ <+~ a :: List.diff (List.erase l₁ b) l₂ [PROOFSTEP] refine' ((erase_cons_subperm_cons_erase a b l₁).diff_right l₂).trans _ [GOAL] α : Type uu β : Type vv l₁✝ l₂✝ : List α inst✝ : DecidableEq α a : α l₁ : List α b : α l₂ : List α ⊢ List.diff (a :: List.erase l₁ b) l₂ <+~ a :: List.diff (List.erase l₁ b) l₂ [PROOFSTEP] apply subperm_cons_diff [GOAL] α : Type uu β : Type vv l₁✝ l₂✝ : List α inst✝ : DecidableEq α l₁ l₂ t : List α h : l₁ ~ l₂ ⊢ List.bagInter l₁ t ~ List.bagInter l₂ t [PROOFSTEP] induction' h with x _ _ _ _ x y _ _ _ _ _ _ ih_1 ih_2 generalizing t [GOAL] case nil α : Type uu β : Type vv l₁✝ l₂✝ : List α inst✝ : DecidableEq α l₁ l₂ t✝ t : List α ⊢ List.bagInter [] t ~ List.bagInter [] t [PROOFSTEP] simp [GOAL] case cons α : Type uu β : Type vv l₁✝¹ l₂✝¹ : List α inst✝ : DecidableEq α l₁ l₂ t✝ : List α x : α l₁✝ l₂✝ : List α a✝ : l₁✝ ~ l₂✝ a_ih✝ : ∀ (t : List α), List.bagInter l₁✝ t ~ List.bagInter l₂✝ t t : List α ⊢ List.bagInter (x :: l₁✝) t ~ List.bagInter (x :: l₂✝) t [PROOFSTEP] by_cases x ∈ t [GOAL] case cons α : Type uu β : Type vv l₁✝¹ l₂✝¹ : List α inst✝ : DecidableEq α l₁ l₂ t✝ : List α x : α l₁✝ l₂✝ : List α a✝ : l₁✝ ~ l₂✝ a_ih✝ : ∀ (t : List α), List.bagInter l₁✝ t ~ List.bagInter l₂✝ t t : List α ⊢ List.bagInter (x :: l₁✝) t ~ List.bagInter (x :: l₂✝) t [PROOFSTEP] by_cases x ∈ t [GOAL] case pos α : Type uu β : Type vv l₁✝¹ l₂✝¹ : List α inst✝ : DecidableEq α l₁ l₂ t✝ : List α x : α l₁✝ l₂✝ : List α a✝ : l₁✝ ~ l₂✝ a_ih✝ : ∀ (t : List α), List.bagInter l₁✝ t ~ List.bagInter l₂✝ t t : List α h : x ∈ t ⊢ List.bagInter (x :: l₁✝) t ~ List.bagInter (x :: l₂✝) t [PROOFSTEP] simp [*, Perm.cons] [GOAL] case neg α : Type uu β : Type vv l₁✝¹ l₂✝¹ : List α inst✝ : DecidableEq α l₁ l₂ t✝ : List α x : α l₁✝ l₂✝ : List α a✝ : l₁✝ ~ l₂✝ a_ih✝ : ∀ (t : List α), List.bagInter l₁✝ t ~ List.bagInter l₂✝ t t : List α h : ¬x ∈ t ⊢ List.bagInter (x :: l₁✝) t ~ List.bagInter (x :: l₂✝) t [PROOFSTEP] simp [*, Perm.cons] [GOAL] case swap α : Type uu β : Type vv l₁✝ l₂✝ : List α inst✝ : DecidableEq α l₁ l₂ t✝ : List α x y : α l✝ t : List α ⊢ List.bagInter (y :: x :: l✝) t ~ List.bagInter (x :: y :: l✝) t [PROOFSTEP] by_cases h : x = y [GOAL] case pos α : Type uu β : Type vv l₁✝ l₂✝ : List α inst✝ : DecidableEq α l₁ l₂ t✝ : List α x y : α l✝ t : List α h : x = y ⊢ List.bagInter (y :: x :: l✝) t ~ List.bagInter (x :: y :: l✝) t [PROOFSTEP] simp [h] [GOAL] case neg α : Type uu β : Type vv l₁✝ l₂✝ : List α inst✝ : DecidableEq α l₁ l₂ t✝ : List α x y : α l✝ t : List α h : ¬x = y ⊢ List.bagInter (y :: x :: l✝) t ~ List.bagInter (x :: y :: l✝) t [PROOFSTEP] by_cases xt : x ∈ t [GOAL] case pos α : Type uu β : Type vv l₁✝ l₂✝ : List α inst✝ : DecidableEq α l₁ l₂ t✝ : List α x y : α l✝ t : List α h : ¬x = y xt : x ∈ t ⊢ List.bagInter (y :: x :: l✝) t ~ List.bagInter (x :: y :: l✝) t [PROOFSTEP] by_cases yt : y ∈ t [GOAL] case neg α : Type uu β : Type vv l₁✝ l₂✝ : List α inst✝ : DecidableEq α l₁ l₂ t✝ : List α x y : α l✝ t : List α h : ¬x = y xt : ¬x ∈ t ⊢ List.bagInter (y :: x :: l✝) t ~ List.bagInter (x :: y :: l✝) t [PROOFSTEP] by_cases yt : y ∈ t [GOAL] case pos α : Type uu β : Type vv l₁✝ l₂✝ : List α inst✝ : DecidableEq α l₁ l₂ t✝ : List α x y : α l✝ t : List α h : ¬x = y xt : x ∈ t yt : y ∈ t ⊢ List.bagInter (y :: x :: l✝) t ~ List.bagInter (x :: y :: l✝) t [PROOFSTEP] simp [xt, yt, mem_erase_of_ne h, mem_erase_of_ne (Ne.symm h), erase_comm, swap] [GOAL] case neg α : Type uu β : Type vv l₁✝ l₂✝ : List α inst✝ : DecidableEq α l₁ l₂ t✝ : List α x y : α l✝ t : List α h : ¬x = y xt : x ∈ t yt : ¬y ∈ t ⊢ List.bagInter (y :: x :: l✝) t ~ List.bagInter (x :: y :: l✝) t [PROOFSTEP] simp [xt, yt, mt mem_of_mem_erase, Perm.cons] [GOAL] case pos α : Type uu β : Type vv l₁✝ l₂✝ : List α inst✝ : DecidableEq α l₁ l₂ t✝ : List α x y : α l✝ t : List α h : ¬x = y xt : ¬x ∈ t yt : y ∈ t ⊢ List.bagInter (y :: x :: l✝) t ~ List.bagInter (x :: y :: l✝) t [PROOFSTEP] simp [xt, yt, mt mem_of_mem_erase, Perm.cons] [GOAL] case neg α : Type uu β : Type vv l₁✝ l₂✝ : List α inst✝ : DecidableEq α l₁ l₂ t✝ : List α x y : α l✝ t : List α h : ¬x = y xt : ¬x ∈ t yt : ¬y ∈ t ⊢ List.bagInter (y :: x :: l✝) t ~ List.bagInter (x :: y :: l✝) t [PROOFSTEP] simp [xt, yt] [GOAL] case trans α : Type uu β : Type vv l₁✝¹ l₂✝¹ : List α inst✝ : DecidableEq α l₁ l₂ t✝ l₁✝ l₂✝ l₃✝ : List α a✝¹ : l₁✝ ~ l₂✝ a✝ : l₂✝ ~ l₃✝ ih_1 : ∀ (t : List α), List.bagInter l₁✝ t ~ List.bagInter l₂✝ t ih_2 : ∀ (t : List α), List.bagInter l₂✝ t ~ List.bagInter l₃✝ t t : List α ⊢ List.bagInter l₁✝ t ~ List.bagInter l₃✝ t [PROOFSTEP] exact (ih_1 _).trans (ih_2 _) [GOAL] α : Type uu β : Type vv l₁ l₂ : List α inst✝ : DecidableEq α l t₁ t₂ : List α p : t₁ ~ t₂ ⊢ List.bagInter l t₁ = List.bagInter l t₂ [PROOFSTEP] induction' l with a l IH generalizing t₁ t₂ p [GOAL] case nil α : Type uu β : Type vv l₁ l₂ : List α inst✝ : DecidableEq α t₁✝ t₂✝ : List α p✝ : t₁✝ ~ t₂✝ t₁ t₂ : List α p : t₁ ~ t₂ ⊢ List.bagInter [] t₁ = List.bagInter [] t₂ [PROOFSTEP] simp [GOAL] case cons α : Type uu β : Type vv l₁ l₂ : List α inst✝ : DecidableEq α t₁✝ t₂✝ : List α p✝ : t₁✝ ~ t₂✝ a : α l : List α IH : ∀ {t₁ t₂ : List α}, t₁ ~ t₂ → List.bagInter l t₁ = List.bagInter l t₂ t₁ t₂ : List α p : t₁ ~ t₂ ⊢ List.bagInter (a :: l) t₁ = List.bagInter (a :: l) t₂ [PROOFSTEP] by_cases h : a ∈ t₁ [GOAL] case pos α : Type uu β : Type vv l₁ l₂ : List α inst✝ : DecidableEq α t₁✝ t₂✝ : List α p✝ : t₁✝ ~ t₂✝ a : α l : List α IH : ∀ {t₁ t₂ : List α}, t₁ ~ t₂ → List.bagInter l t₁ = List.bagInter l t₂ t₁ t₂ : List α p : t₁ ~ t₂ h : a ∈ t₁ ⊢ List.bagInter (a :: l) t₁ = List.bagInter (a :: l) t₂ [PROOFSTEP] simp [h, p.subset h, IH (p.erase _)] [GOAL] case neg α : Type uu β : Type vv l₁ l₂ : List α inst✝ : DecidableEq α t₁✝ t₂✝ : List α p✝ : t₁✝ ~ t₂✝ a : α l : List α IH : ∀ {t₁ t₂ : List α}, t₁ ~ t₂ → List.bagInter l t₁ = List.bagInter l t₂ t₁ t₂ : List α p : t₁ ~ t₂ h : ¬a ∈ t₁ ⊢ List.bagInter (a :: l) t₁ = List.bagInter (a :: l) t₂ [PROOFSTEP] simp [h, mt p.mem_iff.2 h, IH p] [GOAL] α : Type uu β : Type vv l₁✝ l₂✝ : List α inst✝ : DecidableEq α l₁ l₂ : List α H : ∀ (a : α), count a l₁ = count a l₂ ⊢ l₁ ~ l₂ [PROOFSTEP] induction' l₁ with a l₁ IH generalizing l₂ [GOAL] case nil α : Type uu β : Type vv l₁✝ l₂✝¹ : List α inst✝ : DecidableEq α l₁ l₂✝ : List α H✝ : ∀ (a : α), count a l₁ = count a l₂✝ l₂ : List α H : ∀ (a : α), count a [] = count a l₂ ⊢ [] ~ l₂ [PROOFSTEP] cases' l₂ with b l₂ [GOAL] case nil.nil α : Type uu β : Type vv l₁✝ l₂✝ : List α inst✝ : DecidableEq α l₁ l₂ : List α H✝ : ∀ (a : α), count a l₁ = count a l₂ H : ∀ (a : α), count a [] = count a [] ⊢ [] ~ [] [PROOFSTEP] rfl [GOAL] case nil.cons α : Type uu β : Type vv l₁✝ l₂✝¹ : List α inst✝ : DecidableEq α l₁ l₂✝ : List α H✝ : ∀ (a : α), count a l₁ = count a l₂✝ b : α l₂ : List α H : ∀ (a : α), count a [] = count a (b :: l₂) ⊢ [] ~ b :: l₂ [PROOFSTEP] specialize H b [GOAL] case nil.cons α : Type uu β : Type vv l₁✝ l₂✝¹ : List α inst✝ : DecidableEq α l₁ l₂✝ : List α H✝ : ∀ (a : α), count a l₁ = count a l₂✝ b : α l₂ : List α H : count b [] = count b (b :: l₂) ⊢ [] ~ b :: l₂ [PROOFSTEP] simp at H [GOAL] case nil.cons α : Type uu β : Type vv l₁✝ l₂✝¹ : List α inst✝ : DecidableEq α l₁ l₂✝ : List α H✝ : ∀ (a : α), count a l₁ = count a l₂✝ b : α l₂ : List α H : 0 = count b l₂ + 1 ⊢ [] ~ b :: l₂ [PROOFSTEP] contradiction [GOAL] case cons α : Type uu β : Type vv l₁✝¹ l₂✝¹ : List α inst✝ : DecidableEq α l₁✝ l₂✝ : List α H✝ : ∀ (a : α), count a l₁✝ = count a l₂✝ a : α l₁ : List α IH : ∀ {l₂ : List α}, (∀ (a : α), count a l₁ = count a l₂) → l₁ ~ l₂ l₂ : List α H : ∀ (a_1 : α), count a_1 (a :: l₁) = count a_1 l₂ ⊢ a :: l₁ ~ l₂ [PROOFSTEP] have : a ∈ l₂ := count_pos.1 (by rw [← H]; simp) [GOAL] α : Type uu β : Type vv l₁✝¹ l₂✝¹ : List α inst✝ : DecidableEq α l₁✝ l₂✝ : List α H✝ : ∀ (a : α), count a l₁✝ = count a l₂✝ a : α l₁ : List α IH : ∀ {l₂ : List α}, (∀ (a : α), count a l₁ = count a l₂) → l₁ ~ l₂ l₂ : List α H : ∀ (a_1 : α), count a_1 (a :: l₁) = count a_1 l₂ ⊢ 0 < count a l₂ [PROOFSTEP] rw [← H] [GOAL] α : Type uu β : Type vv l₁✝¹ l₂✝¹ : List α inst✝ : DecidableEq α l₁✝ l₂✝ : List α H✝ : ∀ (a : α), count a l₁✝ = count a l₂✝ a : α l₁ : List α IH : ∀ {l₂ : List α}, (∀ (a : α), count a l₁ = count a l₂) → l₁ ~ l₂ l₂ : List α H : ∀ (a_1 : α), count a_1 (a :: l₁) = count a_1 l₂ ⊢ 0 < count a (a :: l₁) [PROOFSTEP] simp [GOAL] case cons α : Type uu β : Type vv l₁✝¹ l₂✝¹ : List α inst✝ : DecidableEq α l₁✝ l₂✝ : List α H✝ : ∀ (a : α), count a l₁✝ = count a l₂✝ a : α l₁ : List α IH : ∀ {l₂ : List α}, (∀ (a : α), count a l₁ = count a l₂) → l₁ ~ l₂ l₂ : List α H : ∀ (a_1 : α), count a_1 (a :: l₁) = count a_1 l₂ this : a ∈ l₂ ⊢ a :: l₁ ~ l₂ [PROOFSTEP] refine' ((IH fun b => _).cons a).trans (perm_cons_erase this).symm [GOAL] case cons α : Type uu β : Type vv l₁✝¹ l₂✝¹ : List α inst✝ : DecidableEq α l₁✝ l₂✝ : List α H✝ : ∀ (a : α), count a l₁✝ = count a l₂✝ a : α l₁ : List α IH : ∀ {l₂ : List α}, (∀ (a : α), count a l₁ = count a l₂) → l₁ ~ l₂ l₂ : List α H : ∀ (a_1 : α), count a_1 (a :: l₁) = count a_1 l₂ this : a ∈ l₂ b : α ⊢ count b l₁ = count b (List.erase l₂ a) [PROOFSTEP] specialize H b [GOAL] case cons α : Type uu β : Type vv l₁✝¹ l₂✝¹ : List α inst✝ : DecidableEq α l₁✝ l₂✝ : List α H✝ : ∀ (a : α), count a l₁✝ = count a l₂✝ a : α l₁ : List α IH : ∀ {l₂ : List α}, (∀ (a : α), count a l₁ = count a l₂) → l₁ ~ l₂ l₂ : List α this : a ∈ l₂ b : α H : count b (a :: l₁) = count b l₂ ⊢ count b l₁ = count b (List.erase l₂ a) [PROOFSTEP] rw [(perm_cons_erase this).count_eq] at H [GOAL] case cons α : Type uu β : Type vv l₁✝¹ l₂✝¹ : List α inst✝ : DecidableEq α l₁✝ l₂✝ : List α H✝ : ∀ (a : α), count a l₁✝ = count a l₂✝ a : α l₁ : List α IH : ∀ {l₂ : List α}, (∀ (a : α), count a l₁ = count a l₂) → l₁ ~ l₂ l₂ : List α this : a ∈ l₂ b : α H : count b (a :: l₁) = count b (a :: List.erase l₂ a) ⊢ count b l₁ = count b (List.erase l₂ a) [PROOFSTEP] by_cases h : b = a [GOAL] case pos α : Type uu β : Type vv l₁✝¹ l₂✝¹ : List α inst✝ : DecidableEq α l₁✝ l₂✝ : List α H✝ : ∀ (a : α), count a l₁✝ = count a l₂✝ a : α l₁ : List α IH : ∀ {l₂ : List α}, (∀ (a : α), count a l₁ = count a l₂) → l₁ ~ l₂ l₂ : List α this : a ∈ l₂ b : α H : count b (a :: l₁) = count b (a :: List.erase l₂ a) h : b = a ⊢ count b l₁ = count b (List.erase l₂ a) [PROOFSTEP] simpa [h] using H [GOAL] case neg α : Type uu β : Type vv l₁✝¹ l₂✝¹ : List α inst✝ : DecidableEq α l₁✝ l₂✝ : List α H✝ : ∀ (a : α), count a l₁✝ = count a l₂✝ a : α l₁ : List α IH : ∀ {l₂ : List α}, (∀ (a : α), count a l₁ = count a l₂) → l₁ ~ l₂ l₂ : List α this : a ∈ l₂ b : α H : count b (a :: l₁) = count b (a :: List.erase l₂ a) h : ¬b = a ⊢ count b l₁ = count b (List.erase l₂ a) [PROOFSTEP] simpa [h] using H [GOAL] α : Type uu β : Type vv l₁ l₂ : List α inst✝ : DecidableEq α l : List α a b : α m n : ℕ h : a ≠ b ⊢ l ~ replicate m a ++ replicate n b ↔ count a l = m ∧ count b l = n ∧ l ⊆ [a, b] [PROOFSTEP] rw [perm_iff_count, ← Decidable.and_forall_ne a, ← Decidable.and_forall_ne b] [GOAL] α : Type uu β : Type vv l₁ l₂ : List α inst✝ : DecidableEq α l : List α a b : α m n : ℕ h : a ≠ b ⊢ (count a l = count a (replicate m a ++ replicate n b) ∧ (b ≠ a → count b l = count b (replicate m a ++ replicate n b)) ∧ ∀ (b_1 : α), b_1 ≠ b → b_1 ≠ a → count b_1 l = count b_1 (replicate m a ++ replicate n b)) ↔ count a l = m ∧ count b l = n ∧ l ⊆ [a, b] [PROOFSTEP] suffices : l ⊆ [a, b] ↔ ∀ c, c ≠ b → c ≠ a → c ∉ l [GOAL] α : Type uu β : Type vv l₁ l₂ : List α inst✝ : DecidableEq α l : List α a b : α m n : ℕ h : a ≠ b this : l ⊆ [a, b] ↔ ∀ (c : α), c ≠ b → c ≠ a → ¬c ∈ l ⊢ (count a l = count a (replicate m a ++ replicate n b) ∧ (b ≠ a → count b l = count b (replicate m a ++ replicate n b)) ∧ ∀ (b_1 : α), b_1 ≠ b → b_1 ≠ a → count b_1 l = count b_1 (replicate m a ++ replicate n b)) ↔ count a l = m ∧ count b l = n ∧ l ⊆ [a, b] case this α : Type uu β : Type vv l₁ l₂ : List α inst✝ : DecidableEq α l : List α a b : α m n : ℕ h : a ≠ b ⊢ l ⊆ [a, b] ↔ ∀ (c : α), c ≠ b → c ≠ a → ¬c ∈ l [PROOFSTEP] {simp (config := { contextual := true }) [count_replicate, h, h.symm, this] } [GOAL] α : Type uu β : Type vv l₁ l₂ : List α inst✝ : DecidableEq α l : List α a b : α m n : ℕ h : a ≠ b this : l ⊆ [a, b] ↔ ∀ (c : α), c ≠ b → c ≠ a → ¬c ∈ l ⊢ (count a l = count a (replicate m a ++ replicate n b) ∧ (b ≠ a → count b l = count b (replicate m a ++ replicate n b)) ∧ ∀ (b_1 : α), b_1 ≠ b → b_1 ≠ a → count b_1 l = count b_1 (replicate m a ++ replicate n b)) ↔ count a l = m ∧ count b l = n ∧ l ⊆ [a, b] [PROOFSTEP] simp (config := { contextual := true }) [count_replicate, h, h.symm, this] [GOAL] case this α : Type uu β : Type vv l₁ l₂ : List α inst✝ : DecidableEq α l : List α a b : α m n : ℕ h : a ≠ b ⊢ l ⊆ [a, b] ↔ ∀ (c : α), c ≠ b → c ≠ a → ¬c ∈ l [PROOFSTEP] simp_rw [Ne.def, ← and_imp, ← not_or, Decidable.not_imp_not, subset_def, mem_cons, not_mem_nil, or_false, or_comm] [GOAL] α : Type uu β : Type vv l₁✝ l₂✝ : List α inst✝ : DecidableEq α l₁ l₂ : List α h : ∀ (x : α), x ∈ l₁ → count x l₁ ≤ count x l₂ ⊢ l₁ ++ List.diff l₂ l₁ ~ l₂ [PROOFSTEP] induction' l₁ with hd tl IH generalizing l₂ [GOAL] case nil α : Type uu β : Type vv l₁✝ l₂✝¹ : List α inst✝ : DecidableEq α l₁ l₂✝ : List α h✝ : ∀ (x : α), x ∈ l₁ → count x l₁ ≤ count x l₂✝ l₂ : List α h : ∀ (x : α), x ∈ [] → count x [] ≤ count x l₂ ⊢ [] ++ List.diff l₂ [] ~ l₂ [PROOFSTEP] simp [GOAL] case cons α : Type uu β : Type vv l₁✝ l₂✝¹ : List α inst✝ : DecidableEq α l₁ l₂✝ : List α h✝ : ∀ (x : α), x ∈ l₁ → count x l₁ ≤ count x l₂✝ hd : α tl : List α IH : ∀ {l₂ : List α}, (∀ (x : α), x ∈ tl → count x tl ≤ count x l₂) → tl ++ List.diff l₂ tl ~ l₂ l₂ : List α h : ∀ (x : α), x ∈ hd :: tl → count x (hd :: tl) ≤ count x l₂ ⊢ hd :: tl ++ List.diff l₂ (hd :: tl) ~ l₂ [PROOFSTEP] have : hd ∈ l₂ := by rw [← count_pos] exact lt_of_lt_of_le (count_pos.mpr (mem_cons_self _ _)) (h hd (mem_cons_self _ _)) [GOAL] α : Type uu β : Type vv l₁✝ l₂✝¹ : List α inst✝ : DecidableEq α l₁ l₂✝ : List α h✝ : ∀ (x : α), x ∈ l₁ → count x l₁ ≤ count x l₂✝ hd : α tl : List α IH : ∀ {l₂ : List α}, (∀ (x : α), x ∈ tl → count x tl ≤ count x l₂) → tl ++ List.diff l₂ tl ~ l₂ l₂ : List α h : ∀ (x : α), x ∈ hd :: tl → count x (hd :: tl) ≤ count x l₂ ⊢ hd ∈ l₂ [PROOFSTEP] rw [← count_pos] [GOAL] α : Type uu β : Type vv l₁✝ l₂✝¹ : List α inst✝ : DecidableEq α l₁ l₂✝ : List α h✝ : ∀ (x : α), x ∈ l₁ → count x l₁ ≤ count x l₂✝ hd : α tl : List α IH : ∀ {l₂ : List α}, (∀ (x : α), x ∈ tl → count x tl ≤ count x l₂) → tl ++ List.diff l₂ tl ~ l₂ l₂ : List α h : ∀ (x : α), x ∈ hd :: tl → count x (hd :: tl) ≤ count x l₂ ⊢ 0 < count hd l₂ [PROOFSTEP] exact lt_of_lt_of_le (count_pos.mpr (mem_cons_self _ _)) (h hd (mem_cons_self _ _)) [GOAL] case cons α : Type uu β : Type vv l₁✝ l₂✝¹ : List α inst✝ : DecidableEq α l₁ l₂✝ : List α h✝ : ∀ (x : α), x ∈ l₁ → count x l₁ ≤ count x l₂✝ hd : α tl : List α IH : ∀ {l₂ : List α}, (∀ (x : α), x ∈ tl → count x tl ≤ count x l₂) → tl ++ List.diff l₂ tl ~ l₂ l₂ : List α h : ∀ (x : α), x ∈ hd :: tl → count x (hd :: tl) ≤ count x l₂ this : hd ∈ l₂ ⊢ hd :: tl ++ List.diff l₂ (hd :: tl) ~ l₂ [PROOFSTEP] replace := perm_cons_erase this [GOAL] case cons α : Type uu β : Type vv l₁✝ l₂✝¹ : List α inst✝ : DecidableEq α l₁ l₂✝ : List α h✝ : ∀ (x : α), x ∈ l₁ → count x l₁ ≤ count x l₂✝ hd : α tl : List α IH : ∀ {l₂ : List α}, (∀ (x : α), x ∈ tl → count x tl ≤ count x l₂) → tl ++ List.diff l₂ tl ~ l₂ l₂ : List α h : ∀ (x : α), x ∈ hd :: tl → count x (hd :: tl) ≤ count x l₂ this : l₂ ~ hd :: List.erase l₂ hd ⊢ hd :: tl ++ List.diff l₂ (hd :: tl) ~ l₂ [PROOFSTEP] refine' Perm.trans _ this.symm [GOAL] case cons α : Type uu β : Type vv l₁✝ l₂✝¹ : List α inst✝ : DecidableEq α l₁ l₂✝ : List α h✝ : ∀ (x : α), x ∈ l₁ → count x l₁ ≤ count x l₂✝ hd : α tl : List α IH : ∀ {l₂ : List α}, (∀ (x : α), x ∈ tl → count x tl ≤ count x l₂) → tl ++ List.diff l₂ tl ~ l₂ l₂ : List α h : ∀ (x : α), x ∈ hd :: tl → count x (hd :: tl) ≤ count x l₂ this : l₂ ~ hd :: List.erase l₂ hd ⊢ hd :: tl ++ List.diff l₂ (hd :: tl) ~ hd :: List.erase l₂ hd [PROOFSTEP] rw [cons_append, diff_cons, perm_cons] [GOAL] case cons α : Type uu β : Type vv l₁✝ l₂✝¹ : List α inst✝ : DecidableEq α l₁ l₂✝ : List α h✝ : ∀ (x : α), x ∈ l₁ → count x l₁ ≤ count x l₂✝ hd : α tl : List α IH : ∀ {l₂ : List α}, (∀ (x : α), x ∈ tl → count x tl ≤ count x l₂) → tl ++ List.diff l₂ tl ~ l₂ l₂ : List α h : ∀ (x : α), x ∈ hd :: tl → count x (hd :: tl) ≤ count x l₂ this : l₂ ~ hd :: List.erase l₂ hd ⊢ tl ++ List.diff (List.erase l₂ hd) tl ~ List.erase l₂ hd [PROOFSTEP] refine' IH fun x hx => _ [GOAL] case cons α : Type uu β : Type vv l₁✝ l₂✝¹ : List α inst✝ : DecidableEq α l₁ l₂✝ : List α h✝ : ∀ (x : α), x ∈ l₁ → count x l₁ ≤ count x l₂✝ hd : α tl : List α IH : ∀ {l₂ : List α}, (∀ (x : α), x ∈ tl → count x tl ≤ count x l₂) → tl ++ List.diff l₂ tl ~ l₂ l₂ : List α h : ∀ (x : α), x ∈ hd :: tl → count x (hd :: tl) ≤ count x l₂ this : l₂ ~ hd :: List.erase l₂ hd x : α hx : x ∈ tl ⊢ count x tl ≤ count x (List.erase l₂ hd) [PROOFSTEP] specialize h x (mem_cons_of_mem _ hx) [GOAL] case cons α : Type uu β : Type vv l₁✝ l₂✝¹ : List α inst✝ : DecidableEq α l₁ l₂✝ : List α h✝ : ∀ (x : α), x ∈ l₁ → count x l₁ ≤ count x l₂✝ hd : α tl : List α IH : ∀ {l₂ : List α}, (∀ (x : α), x ∈ tl → count x tl ≤ count x l₂) → tl ++ List.diff l₂ tl ~ l₂ l₂ : List α this : l₂ ~ hd :: List.erase l₂ hd x : α hx : x ∈ tl h : count x (hd :: tl) ≤ count x l₂ ⊢ count x tl ≤ count x (List.erase l₂ hd) [PROOFSTEP] rw [perm_iff_count.mp this] at h [GOAL] case cons α : Type uu β : Type vv l₁✝ l₂✝¹ : List α inst✝ : DecidableEq α l₁ l₂✝ : List α h✝ : ∀ (x : α), x ∈ l₁ → count x l₁ ≤ count x l₂✝ hd : α tl : List α IH : ∀ {l₂ : List α}, (∀ (x : α), x ∈ tl → count x tl ≤ count x l₂) → tl ++ List.diff l₂ tl ~ l₂ l₂ : List α this : l₂ ~ hd :: List.erase l₂ hd x : α hx : x ∈ tl h : count x (hd :: tl) ≤ count x (hd :: List.erase l₂ hd) ⊢ count x tl ≤ count x (List.erase l₂ hd) [PROOFSTEP] by_cases hx : x = hd [GOAL] case pos α : Type uu β : Type vv l₁✝ l₂✝¹ : List α inst✝ : DecidableEq α l₁ l₂✝ : List α h✝ : ∀ (x : α), x ∈ l₁ → count x l₁ ≤ count x l₂✝ hd : α tl : List α IH : ∀ {l₂ : List α}, (∀ (x : α), x ∈ tl → count x tl ≤ count x l₂) → tl ++ List.diff l₂ tl ~ l₂ l₂ : List α this : l₂ ~ hd :: List.erase l₂ hd x : α hx✝ : x ∈ tl h : count x (hd :: tl) ≤ count x (hd :: List.erase l₂ hd) hx : x = hd ⊢ count x tl ≤ count x (List.erase l₂ hd) [PROOFSTEP] subst hd [GOAL] case pos α : Type uu β : Type vv l₁✝ l₂✝¹ : List α inst✝ : DecidableEq α l₁ l₂✝ : List α h✝ : ∀ (x : α), x ∈ l₁ → count x l₁ ≤ count x l₂✝ tl : List α IH : ∀ {l₂ : List α}, (∀ (x : α), x ∈ tl → count x tl ≤ count x l₂) → tl ++ List.diff l₂ tl ~ l₂ l₂ : List α x : α hx : x ∈ tl this : l₂ ~ x :: List.erase l₂ x h : count x (x :: tl) ≤ count x (x :: List.erase l₂ x) ⊢ count x tl ≤ count x (List.erase l₂ x) [PROOFSTEP] simpa [Nat.succ_le_succ_iff] using h [GOAL] case neg α : Type uu β : Type vv l₁✝ l₂✝¹ : List α inst✝ : DecidableEq α l₁ l₂✝ : List α h✝ : ∀ (x : α), x ∈ l₁ → count x l₁ ≤ count x l₂✝ hd : α tl : List α IH : ∀ {l₂ : List α}, (∀ (x : α), x ∈ tl → count x tl ≤ count x l₂) → tl ++ List.diff l₂ tl ~ l₂ l₂ : List α this : l₂ ~ hd :: List.erase l₂ hd x : α hx✝ : x ∈ tl h : count x (hd :: tl) ≤ count x (hd :: List.erase l₂ hd) hx : ¬x = hd ⊢ count x tl ≤ count x (List.erase l₂ hd) [PROOFSTEP] simpa [hx] using h [GOAL] α : Type uu β : Type vv l₁✝ l₂✝ : List α inst✝ : DecidableEq α l₁ l₂ : List α ⊢ l₁ <+~ l₂ ↔ ∀ (x : α), x ∈ l₁ → count x l₁ ≤ count x l₂ [PROOFSTEP] refine' ⟨fun h x _ => Subperm.count_le h x, fun h => _⟩ [GOAL] α : Type uu β : Type vv l₁✝ l₂✝ : List α inst✝ : DecidableEq α l₁ l₂ : List α h : ∀ (x : α), x ∈ l₁ → count x l₁ ≤ count x l₂ ⊢ l₁ <+~ l₂ [PROOFSTEP] suffices l₁ <+~ l₂.diff l₁ ++ l₁ by refine' this.trans (Perm.subperm _) exact perm_append_comm.trans (subperm_append_diff_self_of_count_le h) [GOAL] α : Type uu β : Type vv l₁✝ l₂✝ : List α inst✝ : DecidableEq α l₁ l₂ : List α h : ∀ (x : α), x ∈ l₁ → count x l₁ ≤ count x l₂ this : l₁ <+~ List.diff l₂ l₁ ++ l₁ ⊢ l₁ <+~ l₂ [PROOFSTEP] refine' this.trans (Perm.subperm _) [GOAL] α : Type uu β : Type vv l₁✝ l₂✝ : List α inst✝ : DecidableEq α l₁ l₂ : List α h : ∀ (x : α), x ∈ l₁ → count x l₁ ≤ count x l₂ this : l₁ <+~ List.diff l₂ l₁ ++ l₁ ⊢ List.diff l₂ l₁ ++ l₁ ~ l₂ [PROOFSTEP] exact perm_append_comm.trans (subperm_append_diff_self_of_count_le h) [GOAL] α : Type uu β : Type vv l₁✝ l₂✝ : List α inst✝ : DecidableEq α l₁ l₂ : List α h : ∀ (x : α), x ∈ l₁ → count x l₁ ≤ count x l₂ ⊢ l₁ <+~ List.diff l₂ l₁ ++ l₁ [PROOFSTEP] exact (subperm_append_right l₁).mpr nil_subperm [GOAL] α✝ : Type uu β : Type vv l₁ l₂ : List α✝ inst✝ : DecidableEq α✝ α : Type u_1 l : List α a : α x✝ : [a] <+~ l s : List α hla : s ~ [a] h : s <+ l ⊢ a ∈ l [PROOFSTEP] rwa [perm_singleton.mp hla, singleton_sublist] at h [GOAL] α : Type uu β : Type vv l₁✝ l₂✝ : List α inst✝ : DecidableEq α l₁ l₂ : List α h : l₁ <+~ l₂ x : α hx : count x l₁ < count x l₂ ⊢ x :: l₁ <+~ l₂ [PROOFSTEP] rw [subperm_ext_iff] at h ⊢ [GOAL] α : Type uu β : Type vv l₁✝ l₂✝ : List α inst✝ : DecidableEq α l₁ l₂ : List α h✝ : l₁ <+~ l₂ h : ∀ (x : α), x ∈ l₁ → count x l₁ ≤ count x l₂ x : α hx : count x l₁ < count x l₂ ⊢ ∀ (x_1 : α), x_1 ∈ x :: l₁ → count x_1 (x :: l₁) ≤ count x_1 l₂ [PROOFSTEP] intro y hy [GOAL] α : Type uu β : Type vv l₁✝ l₂✝ : List α inst✝ : DecidableEq α l₁ l₂ : List α h✝ : l₁ <+~ l₂ h : ∀ (x : α), x ∈ l₁ → count x l₁ ≤ count x l₂ x : α hx : count x l₁ < count x l₂ y : α hy : y ∈ x :: l₁ ⊢ count y (x :: l₁) ≤ count y l₂ [PROOFSTEP] by_cases hy' : y = x [GOAL] case pos α : Type uu β : Type vv l₁✝ l₂✝ : List α inst✝ : DecidableEq α l₁ l₂ : List α h✝ : l₁ <+~ l₂ h : ∀ (x : α), x ∈ l₁ → count x l₁ ≤ count x l₂ x : α hx : count x l₁ < count x l₂ y : α hy : y ∈ x :: l₁ hy' : y = x ⊢ count y (x :: l₁) ≤ count y l₂ [PROOFSTEP] subst x [GOAL] case pos α : Type uu β : Type vv l₁✝ l₂✝ : List α inst✝ : DecidableEq α l₁ l₂ : List α h✝ : l₁ <+~ l₂ h : ∀ (x : α), x ∈ l₁ → count x l₁ ≤ count x l₂ y : α hx : count y l₁ < count y l₂ hy : y ∈ y :: l₁ ⊢ count y (y :: l₁) ≤ count y l₂ [PROOFSTEP] simpa using Nat.succ_le_of_lt hx [GOAL] case neg α : Type uu β : Type vv l₁✝ l₂✝ : List α inst✝ : DecidableEq α l₁ l₂ : List α h✝ : l₁ <+~ l₂ h : ∀ (x : α), x ∈ l₁ → count x l₁ ≤ count x l₂ x : α hx : count x l₁ < count x l₂ y : α hy : y ∈ x :: l₁ hy' : ¬y = x ⊢ count y (x :: l₁) ≤ count y l₂ [PROOFSTEP] rw [count_cons_of_ne hy'] [GOAL] case neg α : Type uu β : Type vv l₁✝ l₂✝ : List α inst✝ : DecidableEq α l₁ l₂ : List α h✝ : l₁ <+~ l₂ h : ∀ (x : α), x ∈ l₁ → count x l₁ ≤ count x l₂ x : α hx : count x l₁ < count x l₂ y : α hy : y ∈ x :: l₁ hy' : ¬y = x ⊢ count y l₁ ≤ count y l₂ [PROOFSTEP] refine' h y _ [GOAL] case neg α : Type uu β : Type vv l₁✝ l₂✝ : List α inst✝ : DecidableEq α l₁ l₂ : List α h✝ : l₁ <+~ l₂ h : ∀ (x : α), x ∈ l₁ → count x l₁ ≤ count x l₂ x : α hx : count x l₁ < count x l₂ y : α hy : y ∈ x :: l₁ hy' : ¬y = x ⊢ y ∈ l₁ [PROOFSTEP] simpa [hy'] using hy [GOAL] α : Type uu β : Type vv l₁ l₂✝ : List α inst✝ : DecidableEq α b : α l₂ : List α h : [] ~ b :: l₂ ⊢ False [PROOFSTEP] have := h.nil_eq [GOAL] α : Type uu β : Type vv l₁ l₂✝ : List α inst✝ : DecidableEq α b : α l₂ : List α h : [] ~ b :: l₂ this : [] = b :: l₂ ⊢ False [PROOFSTEP] contradiction [GOAL] α : Type uu β : Type vv l₁✝ l₂✝ : List α inst✝ : DecidableEq α l₁ l₂ : List α p : l₁ ~ l₂ a : α h : a ∈ l₁ ⊢ count a (List.dedup l₁) = count a (List.dedup l₂) [PROOFSTEP] simp [nodup_dedup, h, p.subset h] [GOAL] α : Type uu β : Type vv l₁✝ l₂✝ : List α inst✝ : DecidableEq α l₁ l₂ : List α p : l₁ ~ l₂ a : α h : ¬a ∈ l₁ ⊢ count a (List.dedup l₁) = count a (List.dedup l₂) [PROOFSTEP] simp [h, mt p.mem_iff.2 h] [GOAL] α : Type uu β : Type vv l₁✝ l₂✝ : List α inst✝ : DecidableEq α a : α l₁ l₂ : List α p : l₁ ~ l₂ h : a ∈ l₁ ⊢ List.insert a l₁ ~ List.insert a l₂ [PROOFSTEP] simpa [h, p.subset h] using p [GOAL] α : Type uu β : Type vv l₁✝ l₂✝ : List α inst✝ : DecidableEq α a : α l₁ l₂ : List α p : l₁ ~ l₂ h : ¬a ∈ l₁ ⊢ List.insert a l₁ ~ List.insert a l₂ [PROOFSTEP] simpa [h, mt p.mem_iff.2 h] using p.cons a [GOAL] α : Type uu β : Type vv l₁ l₂ : List α inst✝ : DecidableEq α x y : α l : List α ⊢ List.insert x (List.insert y l) ~ List.insert y (List.insert x l) [PROOFSTEP] by_cases xl : x ∈ l [GOAL] case pos α : Type uu β : Type vv l₁ l₂ : List α inst✝ : DecidableEq α x y : α l : List α xl : x ∈ l ⊢ List.insert x (List.insert y l) ~ List.insert y (List.insert x l) [PROOFSTEP] by_cases yl : y ∈ l [GOAL] case neg α : Type uu β : Type vv l₁ l₂ : List α inst✝ : DecidableEq α x y : α l : List α xl : ¬x ∈ l ⊢ List.insert x (List.insert y l) ~ List.insert y (List.insert x l) [PROOFSTEP] by_cases yl : y ∈ l [GOAL] case pos α : Type uu β : Type vv l₁ l₂ : List α inst✝ : DecidableEq α x y : α l : List α xl : x ∈ l yl : y ∈ l ⊢ List.insert x (List.insert y l) ~ List.insert y (List.insert x l) [PROOFSTEP] simp [xl, yl] [GOAL] case neg α : Type uu β : Type vv l₁ l₂ : List α inst✝ : DecidableEq α x y : α l : List α xl : x ∈ l yl : ¬y ∈ l ⊢ List.insert x (List.insert y l) ~ List.insert y (List.insert x l) [PROOFSTEP] simp [xl, yl] [GOAL] case pos α : Type uu β : Type vv l₁ l₂ : List α inst✝ : DecidableEq α x y : α l : List α xl : ¬x ∈ l yl : y ∈ l ⊢ List.insert x (List.insert y l) ~ List.insert y (List.insert x l) [PROOFSTEP] simp [xl, yl] [GOAL] case neg α : Type uu β : Type vv l₁ l₂ : List α inst✝ : DecidableEq α x y : α l : List α xl : ¬x ∈ l yl : ¬y ∈ l ⊢ List.insert x (List.insert y l) ~ List.insert y (List.insert x l) [PROOFSTEP] simp [xl, yl] [GOAL] case neg α : Type uu β : Type vv l₁ l₂ : List α inst✝ : DecidableEq α x y : α l : List α xl : ¬x ∈ l yl : ¬y ∈ l ⊢ List.insert x (y :: l) ~ List.insert y (x :: l) [PROOFSTEP] by_cases xy : x = y [GOAL] case pos α : Type uu β : Type vv l₁ l₂ : List α inst✝ : DecidableEq α x y : α l : List α xl : ¬x ∈ l yl : ¬y ∈ l xy : x = y ⊢ List.insert x (y :: l) ~ List.insert y (x :: l) [PROOFSTEP] simp [xy] [GOAL] case neg α : Type uu β : Type vv l₁ l₂ : List α inst✝ : DecidableEq α x y : α l : List α xl : ¬x ∈ l yl : ¬y ∈ l xy : ¬x = y ⊢ List.insert x (y :: l) ~ List.insert y (x :: l) [PROOFSTEP] simp [List.insert, xl, yl, xy, Ne.symm xy] [GOAL] case neg α : Type uu β : Type vv l₁ l₂ : List α inst✝ : DecidableEq α x y : α l : List α xl : ¬x ∈ l yl : ¬y ∈ l xy : ¬x = y ⊢ x :: y :: l ~ y :: x :: l [PROOFSTEP] constructor [GOAL] α✝ : Type uu β : Type vv l₁ l₂ : List α✝ inst✝ : DecidableEq α✝ α : Type u_1 x : α l : List α n : ℕ h : n ≤ length l ⊢ insertNth n x l ~ x :: l [PROOFSTEP] induction' l with _ _ l_ih generalizing n [GOAL] case nil α✝ : Type uu β : Type vv l₁ l₂ : List α✝ inst✝ : DecidableEq α✝ α : Type u_1 x : α l : List α n✝ : ℕ h✝ : n✝ ≤ length l n : ℕ h : n ≤ length [] ⊢ insertNth n x [] ~ [x] [PROOFSTEP] cases n [GOAL] case nil.zero α✝ : Type uu β : Type vv l₁ l₂ : List α✝ inst✝ : DecidableEq α✝ α : Type u_1 x : α l : List α n : ℕ h✝ : n ≤ length l h : zero ≤ length [] ⊢ insertNth zero x [] ~ [x] case nil.succ α✝ : Type uu β : Type vv l₁ l₂ : List α✝ inst✝ : DecidableEq α✝ α : Type u_1 x : α l : List α n : ℕ h✝ : n ≤ length l n✝ : ℕ h : succ n✝ ≤ length [] ⊢ insertNth (succ n✝) x [] ~ [x] [PROOFSTEP] rfl [GOAL] case nil.succ α✝ : Type uu β : Type vv l₁ l₂ : List α✝ inst✝ : DecidableEq α✝ α : Type u_1 x : α l : List α n : ℕ h✝ : n ≤ length l n✝ : ℕ h : succ n✝ ≤ length [] ⊢ insertNth (succ n✝) x [] ~ [x] [PROOFSTEP] cases h [GOAL] case cons α✝ : Type uu β : Type vv l₁ l₂ : List α✝ inst✝ : DecidableEq α✝ α : Type u_1 x : α l : List α n✝ : ℕ h✝ : n✝ ≤ length l head✝ : α tail✝ : List α l_ih : ∀ {n : ℕ}, n ≤ length tail✝ → insertNth n x tail✝ ~ x :: tail✝ n : ℕ h : n ≤ length (head✝ :: tail✝) ⊢ insertNth n x (head✝ :: tail✝) ~ x :: head✝ :: tail✝ [PROOFSTEP] cases n [GOAL] case cons.zero α✝ : Type uu β : Type vv l₁ l₂ : List α✝ inst✝ : DecidableEq α✝ α : Type u_1 x : α l : List α n : ℕ h✝ : n ≤ length l head✝ : α tail✝ : List α l_ih : ∀ {n : ℕ}, n ≤ length tail✝ → insertNth n x tail✝ ~ x :: tail✝ h : zero ≤ length (head✝ :: tail✝) ⊢ insertNth zero x (head✝ :: tail✝) ~ x :: head✝ :: tail✝ [PROOFSTEP] simp [insertNth] [GOAL] case cons.succ α✝ : Type uu β : Type vv l₁ l₂ : List α✝ inst✝ : DecidableEq α✝ α : Type u_1 x : α l : List α n : ℕ h✝ : n ≤ length l head✝ : α tail✝ : List α l_ih : ∀ {n : ℕ}, n ≤ length tail✝ → insertNth n x tail✝ ~ x :: tail✝ n✝ : ℕ h : succ n✝ ≤ length (head✝ :: tail✝) ⊢ insertNth (succ n✝) x (head✝ :: tail✝) ~ x :: head✝ :: tail✝ [PROOFSTEP] simp only [insertNth, modifyNthTail] [GOAL] case cons.succ α✝ : Type uu β : Type vv l₁ l₂ : List α✝ inst✝ : DecidableEq α✝ α : Type u_1 x : α l : List α n : ℕ h✝ : n ≤ length l head✝ : α tail✝ : List α l_ih : ∀ {n : ℕ}, n ≤ length tail✝ → insertNth n x tail✝ ~ x :: tail✝ n✝ : ℕ h : succ n✝ ≤ length (head✝ :: tail✝) ⊢ head✝ :: modifyNthTail (cons x) n✝ tail✝ ~ x :: head✝ :: tail✝ [PROOFSTEP] refine' Perm.trans (Perm.cons _ (l_ih _)) _ [GOAL] case cons.succ.refine'_1 α✝ : Type uu β : Type vv l₁ l₂ : List α✝ inst✝ : DecidableEq α✝ α : Type u_1 x : α l : List α n : ℕ h✝ : n ≤ length l head✝ : α tail✝ : List α l_ih : ∀ {n : ℕ}, n ≤ length tail✝ → insertNth n x tail✝ ~ x :: tail✝ n✝ : ℕ h : succ n✝ ≤ length (head✝ :: tail✝) ⊢ n✝ ≤ length tail✝ [PROOFSTEP] apply Nat.le_of_succ_le_succ h [GOAL] case cons.succ.refine'_2 α✝ : Type uu β : Type vv l₁ l₂ : List α✝ inst✝ : DecidableEq α✝ α : Type u_1 x : α l : List α n : ℕ h✝ : n ≤ length l head✝ : α tail✝ : List α l_ih : ∀ {n : ℕ}, n ≤ length tail✝ → insertNth n x tail✝ ~ x :: tail✝ n✝ : ℕ h : succ n✝ ≤ length (head✝ :: tail✝) ⊢ head✝ :: x :: tail✝ ~ x :: head✝ :: tail✝ [PROOFSTEP] apply Perm.swap [GOAL] α : Type uu β : Type vv l₁✝ l₂✝ : List α inst✝ : DecidableEq α l₁ l₂ t₁ : List α h : l₁ ~ l₂ ⊢ l₁ ∪ t₁ ~ l₂ ∪ t₁ [PROOFSTEP] induction' h with a _ _ _ ih _ _ _ _ _ _ _ _ ih_1 ih_2 [GOAL] case nil α : Type uu β : Type vv l₁✝ l₂✝ : List α inst✝ : DecidableEq α l₁ l₂ t₁ : List α ⊢ [] ∪ t₁ ~ [] ∪ t₁ [PROOFSTEP] try simp [GOAL] case nil α : Type uu β : Type vv l₁✝ l₂✝ : List α inst✝ : DecidableEq α l₁ l₂ t₁ : List α ⊢ [] ∪ t₁ ~ [] ∪ t₁ [PROOFSTEP] simp [GOAL] case cons α : Type uu β : Type vv l₁✝¹ l₂✝¹ : List α inst✝ : DecidableEq α l₁ l₂ t₁ : List α a : α l₁✝ l₂✝ : List α a✝ : l₁✝ ~ l₂✝ ih : l₁✝ ∪ t₁ ~ l₂✝ ∪ t₁ ⊢ a :: l₁✝ ∪ t₁ ~ a :: l₂✝ ∪ t₁ [PROOFSTEP] try simp [GOAL] case cons α : Type uu β : Type vv l₁✝¹ l₂✝¹ : List α inst✝ : DecidableEq α l₁ l₂ t₁ : List α a : α l₁✝ l₂✝ : List α a✝ : l₁✝ ~ l₂✝ ih : l₁✝ ∪ t₁ ~ l₂✝ ∪ t₁ ⊢ a :: l₁✝ ∪ t₁ ~ a :: l₂✝ ∪ t₁ [PROOFSTEP] simp [GOAL] case swap α : Type uu β : Type vv l₁✝ l₂✝ : List α inst✝ : DecidableEq α l₁ l₂ t₁ : List α x✝ y✝ : α l✝ : List α ⊢ y✝ :: x✝ :: l✝ ∪ t₁ ~ x✝ :: y✝ :: l✝ ∪ t₁ [PROOFSTEP] try simp [GOAL] case swap α : Type uu β : Type vv l₁✝ l₂✝ : List α inst✝ : DecidableEq α l₁ l₂ t₁ : List α x✝ y✝ : α l✝ : List α ⊢ y✝ :: x✝ :: l✝ ∪ t₁ ~ x✝ :: y✝ :: l✝ ∪ t₁ [PROOFSTEP] simp [GOAL] case trans α : Type uu β : Type vv l₁✝¹ l₂✝¹ : List α inst✝ : DecidableEq α l₁ l₂ t₁ l₁✝ l₂✝ l₃✝ : List α a✝¹ : l₁✝ ~ l₂✝ a✝ : l₂✝ ~ l₃✝ ih_1 : l₁✝ ∪ t₁ ~ l₂✝ ∪ t₁ ih_2 : l₂✝ ∪ t₁ ~ l₃✝ ∪ t₁ ⊢ l₁✝ ∪ t₁ ~ l₃✝ ∪ t₁ [PROOFSTEP] try simp [GOAL] case trans α : Type uu β : Type vv l₁✝¹ l₂✝¹ : List α inst✝ : DecidableEq α l₁ l₂ t₁ l₁✝ l₂✝ l₃✝ : List α a✝¹ : l₁✝ ~ l₂✝ a✝ : l₂✝ ~ l₃✝ ih_1 : l₁✝ ∪ t₁ ~ l₂✝ ∪ t₁ ih_2 : l₂✝ ∪ t₁ ~ l₃✝ ∪ t₁ ⊢ l₁✝ ∪ t₁ ~ l₃✝ ∪ t₁ [PROOFSTEP] simp [GOAL] case cons α : Type uu β : Type vv l₁✝¹ l₂✝¹ : List α inst✝ : DecidableEq α l₁ l₂ t₁ : List α a : α l₁✝ l₂✝ : List α a✝ : l₁✝ ~ l₂✝ ih : l₁✝ ∪ t₁ ~ l₂✝ ∪ t₁ ⊢ List.insert a (l₁✝ ∪ t₁) ~ List.insert a (l₂✝ ∪ t₁) [PROOFSTEP] exact ih.insert a [GOAL] case swap α : Type uu β : Type vv l₁✝ l₂✝ : List α inst✝ : DecidableEq α l₁ l₂ t₁ : List α x✝ y✝ : α l✝ : List α ⊢ List.insert y✝ (List.insert x✝ (l✝ ∪ t₁)) ~ List.insert x✝ (List.insert y✝ (l✝ ∪ t₁)) [PROOFSTEP] apply perm_insert_swap [GOAL] case trans α : Type uu β : Type vv l₁✝¹ l₂✝¹ : List α inst✝ : DecidableEq α l₁ l₂ t₁ l₁✝ l₂✝ l₃✝ : List α a✝¹ : l₁✝ ~ l₂✝ a✝ : l₂✝ ~ l₃✝ ih_1 : l₁✝ ∪ t₁ ~ l₂✝ ∪ t₁ ih_2 : l₂✝ ∪ t₁ ~ l₃✝ ∪ t₁ ⊢ l₁✝ ∪ t₁ ~ l₃✝ ∪ t₁ [PROOFSTEP] exact ih_1.trans ih_2 [GOAL] α : Type uu β : Type vv l₁ l₂ : List α inst✝ : DecidableEq α l t₁ t₂ : List α h : t₁ ~ t₂ ⊢ l ∪ t₁ ~ l ∪ t₂ [PROOFSTEP] induction l [GOAL] case nil α : Type uu β : Type vv l₁ l₂ : List α inst✝ : DecidableEq α t₁ t₂ : List α h : t₁ ~ t₂ ⊢ [] ∪ t₁ ~ [] ∪ t₂ [PROOFSTEP] simp [*, Perm.insert] [GOAL] case cons α : Type uu β : Type vv l₁ l₂ : List α inst✝ : DecidableEq α t₁ t₂ : List α h : t₁ ~ t₂ head✝ : α tail✝ : List α tail_ih✝ : tail✝ ∪ t₁ ~ tail✝ ∪ t₂ ⊢ head✝ :: tail✝ ∪ t₁ ~ head✝ :: tail✝ ∪ t₂ [PROOFSTEP] simp [*, Perm.insert] [GOAL] α : Type uu β : Type vv l₁ l₂ : List α inst✝ : DecidableEq α l t₁ t₂ : List α p : t₁ ~ t₂ a : α x✝ : a ∈ l ⊢ decide (a ∈ t₁) = true ↔ decide (a ∈ t₂) = true [PROOFSTEP] simpa using p.mem_iff [GOAL] α : Type uu β : Type vv l₁ l₂ : List α inst✝ : DecidableEq α l t₁ t₂ : List α h : Disjoint t₁ t₂ ⊢ l ∩ (t₁ ++ t₂) ~ l ∩ t₁ ++ l ∩ t₂ [PROOFSTEP] induction l [GOAL] case nil α : Type uu β : Type vv l₁ l₂ : List α inst✝ : DecidableEq α t₁ t₂ : List α h : Disjoint t₁ t₂ ⊢ [] ∩ (t₁ ++ t₂) ~ [] ∩ t₁ ++ [] ∩ t₂ case cons α : Type uu β : Type vv l₁ l₂ : List α inst✝ : DecidableEq α t₁ t₂ : List α h : Disjoint t₁ t₂ head✝ : α tail✝ : List α tail_ih✝ : tail✝ ∩ (t₁ ++ t₂) ~ tail✝ ∩ t₁ ++ tail✝ ∩ t₂ ⊢ (head✝ :: tail✝) ∩ (t₁ ++ t₂) ~ (head✝ :: tail✝) ∩ t₁ ++ (head✝ :: tail✝) ∩ t₂ [PROOFSTEP] case nil => simp [GOAL] α : Type uu β : Type vv l₁ l₂ : List α inst✝ : DecidableEq α t₁ t₂ : List α h : Disjoint t₁ t₂ ⊢ [] ∩ (t₁ ++ t₂) ~ [] ∩ t₁ ++ [] ∩ t₂ [PROOFSTEP] case nil => simp [GOAL] α : Type uu β : Type vv l₁ l₂ : List α inst✝ : DecidableEq α t₁ t₂ : List α h : Disjoint t₁ t₂ ⊢ [] ∩ (t₁ ++ t₂) ~ [] ∩ t₁ ++ [] ∩ t₂ [PROOFSTEP] simp [GOAL] case cons α : Type uu β : Type vv l₁ l₂ : List α inst✝ : DecidableEq α t₁ t₂ : List α h : Disjoint t₁ t₂ head✝ : α tail✝ : List α tail_ih✝ : tail✝ ∩ (t₁ ++ t₂) ~ tail✝ ∩ t₁ ++ tail✝ ∩ t₂ ⊢ (head✝ :: tail✝) ∩ (t₁ ++ t₂) ~ (head✝ :: tail✝) ∩ t₁ ++ (head✝ :: tail✝) ∩ t₂ [PROOFSTEP] case cons x xs l_ih => by_cases h₁ : x ∈ t₁ · have h₂ : x ∉ t₂ := h h₁ simp [*] by_cases h₂ : x ∈ t₂ · simp only [*, inter_cons_of_not_mem, false_or_iff, mem_append, inter_cons_of_mem, not_false_iff] refine' Perm.trans (Perm.cons _ l_ih) _ change [x] ++ xs ∩ t₁ ++ xs ∩ t₂ ~ xs ∩ t₁ ++ ([x] ++ xs ∩ t₂) rw [← List.append_assoc] solve_by_elim [Perm.append_right, perm_append_comm] · simp [*] [GOAL] α : Type uu β : Type vv l₁ l₂ : List α inst✝ : DecidableEq α t₁ t₂ : List α h : Disjoint t₁ t₂ x : α xs : List α l_ih : xs ∩ (t₁ ++ t₂) ~ xs ∩ t₁ ++ xs ∩ t₂ ⊢ (x :: xs) ∩ (t₁ ++ t₂) ~ (x :: xs) ∩ t₁ ++ (x :: xs) ∩ t₂ [PROOFSTEP] case cons x xs l_ih => by_cases h₁ : x ∈ t₁ · have h₂ : x ∉ t₂ := h h₁ simp [*] by_cases h₂ : x ∈ t₂ · simp only [*, inter_cons_of_not_mem, false_or_iff, mem_append, inter_cons_of_mem, not_false_iff] refine' Perm.trans (Perm.cons _ l_ih) _ change [x] ++ xs ∩ t₁ ++ xs ∩ t₂ ~ xs ∩ t₁ ++ ([x] ++ xs ∩ t₂) rw [← List.append_assoc] solve_by_elim [Perm.append_right, perm_append_comm] · simp [*] [GOAL] α : Type uu β : Type vv l₁ l₂ : List α inst✝ : DecidableEq α t₁ t₂ : List α h : Disjoint t₁ t₂ x : α xs : List α l_ih : xs ∩ (t₁ ++ t₂) ~ xs ∩ t₁ ++ xs ∩ t₂ ⊢ (x :: xs) ∩ (t₁ ++ t₂) ~ (x :: xs) ∩ t₁ ++ (x :: xs) ∩ t₂ [PROOFSTEP] by_cases h₁ : x ∈ t₁ [GOAL] case pos α : Type uu β : Type vv l₁ l₂ : List α inst✝ : DecidableEq α t₁ t₂ : List α h : Disjoint t₁ t₂ x : α xs : List α l_ih : xs ∩ (t₁ ++ t₂) ~ xs ∩ t₁ ++ xs ∩ t₂ h₁ : x ∈ t₁ ⊢ (x :: xs) ∩ (t₁ ++ t₂) ~ (x :: xs) ∩ t₁ ++ (x :: xs) ∩ t₂ [PROOFSTEP] have h₂ : x ∉ t₂ := h h₁ [GOAL] case pos α : Type uu β : Type vv l₁ l₂ : List α inst✝ : DecidableEq α t₁ t₂ : List α h : Disjoint t₁ t₂ x : α xs : List α l_ih : xs ∩ (t₁ ++ t₂) ~ xs ∩ t₁ ++ xs ∩ t₂ h₁ : x ∈ t₁ h₂ : ¬x ∈ t₂ ⊢ (x :: xs) ∩ (t₁ ++ t₂) ~ (x :: xs) ∩ t₁ ++ (x :: xs) ∩ t₂ [PROOFSTEP] simp [*] [GOAL] case neg α : Type uu β : Type vv l₁ l₂ : List α inst✝ : DecidableEq α t₁ t₂ : List α h : Disjoint t₁ t₂ x : α xs : List α l_ih : xs ∩ (t₁ ++ t₂) ~ xs ∩ t₁ ++ xs ∩ t₂ h₁ : ¬x ∈ t₁ ⊢ (x :: xs) ∩ (t₁ ++ t₂) ~ (x :: xs) ∩ t₁ ++ (x :: xs) ∩ t₂ [PROOFSTEP] by_cases h₂ : x ∈ t₂ [GOAL] case pos α : Type uu β : Type vv l₁ l₂ : List α inst✝ : DecidableEq α t₁ t₂ : List α h : Disjoint t₁ t₂ x : α xs : List α l_ih : xs ∩ (t₁ ++ t₂) ~ xs ∩ t₁ ++ xs ∩ t₂ h₁ : ¬x ∈ t₁ h₂ : x ∈ t₂ ⊢ (x :: xs) ∩ (t₁ ++ t₂) ~ (x :: xs) ∩ t₁ ++ (x :: xs) ∩ t₂ [PROOFSTEP] simp only [*, inter_cons_of_not_mem, false_or_iff, mem_append, inter_cons_of_mem, not_false_iff] [GOAL] case pos α : Type uu β : Type vv l₁ l₂ : List α inst✝ : DecidableEq α t₁ t₂ : List α h : Disjoint t₁ t₂ x : α xs : List α l_ih : xs ∩ (t₁ ++ t₂) ~ xs ∩ t₁ ++ xs ∩ t₂ h₁ : ¬x ∈ t₁ h₂ : x ∈ t₂ ⊢ x :: xs ∩ (t₁ ++ t₂) ~ xs ∩ t₁ ++ x :: xs ∩ t₂ [PROOFSTEP] refine' Perm.trans (Perm.cons _ l_ih) _ [GOAL] case pos α : Type uu β : Type vv l₁ l₂ : List α inst✝ : DecidableEq α t₁ t₂ : List α h : Disjoint t₁ t₂ x : α xs : List α l_ih : xs ∩ (t₁ ++ t₂) ~ xs ∩ t₁ ++ xs ∩ t₂ h₁ : ¬x ∈ t₁ h₂ : x ∈ t₂ ⊢ x :: (xs ∩ t₁ ++ xs ∩ t₂) ~ xs ∩ t₁ ++ x :: xs ∩ t₂ [PROOFSTEP] change [x] ++ xs ∩ t₁ ++ xs ∩ t₂ ~ xs ∩ t₁ ++ ([x] ++ xs ∩ t₂) [GOAL] case pos α : Type uu β : Type vv l₁ l₂ : List α inst✝ : DecidableEq α t₁ t₂ : List α h : Disjoint t₁ t₂ x : α xs : List α l_ih : xs ∩ (t₁ ++ t₂) ~ xs ∩ t₁ ++ xs ∩ t₂ h₁ : ¬x ∈ t₁ h₂ : x ∈ t₂ ⊢ [x] ++ xs ∩ t₁ ++ xs ∩ t₂ ~ xs ∩ t₁ ++ ([x] ++ xs ∩ t₂) [PROOFSTEP] rw [← List.append_assoc] [GOAL] case pos α : Type uu β : Type vv l₁ l₂ : List α inst✝ : DecidableEq α t₁ t₂ : List α h : Disjoint t₁ t₂ x : α xs : List α l_ih : xs ∩ (t₁ ++ t₂) ~ xs ∩ t₁ ++ xs ∩ t₂ h₁ : ¬x ∈ t₁ h₂ : x ∈ t₂ ⊢ [x] ++ xs ∩ t₁ ++ xs ∩ t₂ ~ xs ∩ t₁ ++ [x] ++ xs ∩ t₂ [PROOFSTEP] solve_by_elim [Perm.append_right, perm_append_comm] [GOAL] case neg α : Type uu β : Type vv l₁ l₂ : List α inst✝ : DecidableEq α t₁ t₂ : List α h : Disjoint t₁ t₂ x : α xs : List α l_ih : xs ∩ (t₁ ++ t₂) ~ xs ∩ t₁ ++ xs ∩ t₂ h₁ : ¬x ∈ t₁ h₂ : ¬x ∈ t₂ ⊢ (x :: xs) ∩ (t₁ ++ t₂) ~ (x :: xs) ∩ t₁ ++ (x :: xs) ∩ t₂ [PROOFSTEP] simp [*] [GOAL] α : Type uu β : Type vv l₁✝¹ l₂✝¹ : List α R : α → α → Prop S : Symmetric R l₁✝ l₂✝ l₁ l₂ : List α p : l₁ ~ l₂ d : Pairwise R l₁ ⊢ Pairwise R l₂ [PROOFSTEP] induction' d with a l₁ h _ IH generalizing l₂ [GOAL] case nil α : Type uu β : Type vv l₁✝¹ l₂✝² : List α R : α → α → Prop S : Symmetric R l₁✝ l₂✝¹ l₁ l₂✝ : List α p✝ : l₁ ~ l₂✝ l₂ : List α p : [] ~ l₂ ⊢ Pairwise R l₂ [PROOFSTEP] rw [← p.nil_eq] [GOAL] case nil α : Type uu β : Type vv l₁✝¹ l₂✝² : List α R : α → α → Prop S : Symmetric R l₁✝ l₂✝¹ l₁ l₂✝ : List α p✝ : l₁ ~ l₂✝ l₂ : List α p : [] ~ l₂ ⊢ Pairwise R [] [PROOFSTEP] constructor [GOAL] case cons α : Type uu β : Type vv l₁✝² l₂✝² : List α R : α → α → Prop S : Symmetric R l₁✝¹ l₂✝¹ l₁✝ l₂✝ : List α p✝ : l₁✝ ~ l₂✝ a : α l₁ : List α h : ∀ (a' : α), a' ∈ l₁ → R a a' a✝ : Pairwise R l₁ IH : ∀ (l₂ : List α), l₁ ~ l₂ → Pairwise R l₂ l₂ : List α p : a :: l₁ ~ l₂ ⊢ Pairwise R l₂ [PROOFSTEP] have : a ∈ l₂ := p.subset (mem_cons_self _ _) [GOAL] case cons α : Type uu β : Type vv l₁✝² l₂✝² : List α R : α → α → Prop S : Symmetric R l₁✝¹ l₂✝¹ l₁✝ l₂✝ : List α p✝ : l₁✝ ~ l₂✝ a : α l₁ : List α h : ∀ (a' : α), a' ∈ l₁ → R a a' a✝ : Pairwise R l₁ IH : ∀ (l₂ : List α), l₁ ~ l₂ → Pairwise R l₂ l₂ : List α p : a :: l₁ ~ l₂ this : a ∈ l₂ ⊢ Pairwise R l₂ [PROOFSTEP] rcases mem_split this with ⟨s₂, t₂, rfl⟩ [GOAL] case cons.intro.intro α : Type uu β : Type vv l₁✝² l₂✝¹ : List α R : α → α → Prop S : Symmetric R l₁✝¹ l₂✝ l₁✝ l₂ : List α p✝ : l₁✝ ~ l₂ a : α l₁ : List α h : ∀ (a' : α), a' ∈ l₁ → R a a' a✝ : Pairwise R l₁ IH : ∀ (l₂ : List α), l₁ ~ l₂ → Pairwise R l₂ s₂ t₂ : List α p : a :: l₁ ~ s₂ ++ a :: t₂ this : a ∈ s₂ ++ a :: t₂ ⊢ Pairwise R (s₂ ++ a :: t₂) [PROOFSTEP] have p' := (p.trans perm_middle).cons_inv [GOAL] case cons.intro.intro α : Type uu β : Type vv l₁✝² l₂✝¹ : List α R : α → α → Prop S : Symmetric R l₁✝¹ l₂✝ l₁✝ l₂ : List α p✝ : l₁✝ ~ l₂ a : α l₁ : List α h : ∀ (a' : α), a' ∈ l₁ → R a a' a✝ : Pairwise R l₁ IH : ∀ (l₂ : List α), l₁ ~ l₂ → Pairwise R l₂ s₂ t₂ : List α p : a :: l₁ ~ s₂ ++ a :: t₂ this : a ∈ s₂ ++ a :: t₂ p' : l₁ ~ s₂ ++ t₂ ⊢ Pairwise R (s₂ ++ a :: t₂) [PROOFSTEP] refine' (pairwise_middle S).2 (pairwise_cons.2 ⟨fun b m => _, IH _ p'⟩) [GOAL] case cons.intro.intro α : Type uu β : Type vv l₁✝² l₂✝¹ : List α R : α → α → Prop S : Symmetric R l₁✝¹ l₂✝ l₁✝ l₂ : List α p✝ : l₁✝ ~ l₂ a : α l₁ : List α h : ∀ (a' : α), a' ∈ l₁ → R a a' a✝ : Pairwise R l₁ IH : ∀ (l₂ : List α), l₁ ~ l₂ → Pairwise R l₂ s₂ t₂ : List α p : a :: l₁ ~ s₂ ++ a :: t₂ this : a ∈ s₂ ++ a :: t₂ p' : l₁ ~ s₂ ++ t₂ b : α m : b ∈ s₂ ++ t₂ ⊢ R a b [PROOFSTEP] exact h _ (p'.symm.subset m) [GOAL] α : Type uu β : Type vv l₁✝ l₂✝ : List α l₁ l₂ : List (List α) h : l₁ ~ l₂ x₁ x₂ : List α xs : List (List α) ⊢ List.join (x₂ :: x₁ :: xs) ~ List.join (x₁ :: x₂ :: xs) [PROOFSTEP] simpa only [join, append_assoc] using perm_append_comm.append_right _ [GOAL] α : Type uu β : Type vv l₁ l₂ l : List α f g : α → List β h : ∀ (a : α), a ∈ l → f a ~ g a ⊢ Forall₂ (fun x x_1 => x ~ x_1) (List.map f l) (List.map g l) [PROOFSTEP] rwa [List.forall₂_map_right_iff, List.forall₂_map_left_iff, List.forall₂_same] [GOAL] α : Type uu β : Type vv l₁ l₂ l : List α f g : α → List β ⊢ List.bind l f ++ List.bind l g ~ List.bind l fun x => f x ++ g x [PROOFSTEP] induction' l with a l IH [GOAL] case nil α : Type uu β : Type vv l₁ l₂ : List α f g : α → List β ⊢ List.bind [] f ++ List.bind [] g ~ List.bind [] fun x => f x ++ g x [PROOFSTEP] simp [GOAL] case cons α : Type uu β : Type vv l₁ l₂ : List α f g : α → List β a : α l : List α IH : List.bind l f ++ List.bind l g ~ List.bind l fun x => f x ++ g x ⊢ List.bind (a :: l) f ++ List.bind (a :: l) g ~ List.bind (a :: l) fun x => f x ++ g x [PROOFSTEP] simp [GOAL] case cons α : Type uu β : Type vv l₁ l₂ : List α f g : α → List β a : α l : List α IH : List.bind l f ++ List.bind l g ~ List.bind l fun x => f x ++ g x ⊢ f a ++ (List.bind l f ++ (g a ++ List.bind l g)) ~ f a ++ (g a ++ List.bind l fun x => f x ++ g x) [PROOFSTEP] refine' (Perm.trans _ (IH.append_left _)).append_left _ [GOAL] case cons α : Type uu β : Type vv l₁ l₂ : List α f g : α → List β a : α l : List α IH : List.bind l f ++ List.bind l g ~ List.bind l fun x => f x ++ g x ⊢ List.bind l f ++ (g a ++ List.bind l g) ~ g a ++ (List.bind l f ++ List.bind l g) [PROOFSTEP] rw [← append_assoc, ← append_assoc] [GOAL] case cons α : Type uu β : Type vv l₁ l₂ : List α f g : α → List β a : α l : List α IH : List.bind l f ++ List.bind l g ~ List.bind l fun x => f x ++ g x ⊢ List.bind l f ++ g a ++ List.bind l g ~ g a ++ List.bind l f ++ List.bind l g [PROOFSTEP] exact perm_append_comm.append_right _ [GOAL] α : Type uu β : Type vv l₁ l₂ l : List α f : α → β g : α → List β ⊢ map f l ++ List.bind l g ~ List.bind l fun x => f x :: g x [PROOFSTEP] simpa [← map_eq_bind] using bind_append_perm l (fun x => [f x]) g [GOAL] α : Type uu β : Type vv l₁✝ l₂✝ : List α f : α → Option α l₁ l₂ : List α H : Pairwise (fun a b => ∀ (c : α), c ∈ f a → ∀ (d : α), d ∈ f b → a = b ∧ c = d) l₁ p : l₁ ~ l₂ ⊢ lookmap f l₁ ~ lookmap f l₂ [PROOFSTEP] induction' p with a l₁ l₂ p IH a b l l₁ l₂ l₃ p₁ _ IH₁ IH₂ [GOAL] case nil α : Type uu β : Type vv l₁✝ l₂✝ : List α f : α → Option α l₁ l₂ : List α H✝ : Pairwise (fun a b => ∀ (c : α), c ∈ f a → ∀ (d : α), d ∈ f b → a = b ∧ c = d) l₁ H : Pairwise (fun a b => ∀ (c : α), c ∈ f a → ∀ (d : α), d ∈ f b → a = b ∧ c = d) [] ⊢ lookmap f [] ~ lookmap f [] [PROOFSTEP] simp [GOAL] case cons α : Type uu β : Type vv l₁✝¹ l₂✝¹ : List α f : α → Option α l₁✝ l₂✝ : List α H✝ : Pairwise (fun a b => ∀ (c : α), c ∈ f a → ∀ (d : α), d ∈ f b → a = b ∧ c = d) l₁✝ a : α l₁ l₂ : List α p : l₁ ~ l₂ IH : Pairwise (fun a b => ∀ (c : α), c ∈ f a → ∀ (d : α), d ∈ f b → a = b ∧ c = d) l₁ → lookmap f l₁ ~ lookmap f l₂ H : Pairwise (fun a b => ∀ (c : α), c ∈ f a → ∀ (d : α), d ∈ f b → a = b ∧ c = d) (a :: l₁) ⊢ lookmap f (a :: l₁) ~ lookmap f (a :: l₂) [PROOFSTEP] cases h : f a [GOAL] case cons.none α : Type uu β : Type vv l₁✝¹ l₂✝¹ : List α f : α → Option α l₁✝ l₂✝ : List α H✝ : Pairwise (fun a b => ∀ (c : α), c ∈ f a → ∀ (d : α), d ∈ f b → a = b ∧ c = d) l₁✝ a : α l₁ l₂ : List α p : l₁ ~ l₂ IH : Pairwise (fun a b => ∀ (c : α), c ∈ f a → ∀ (d : α), d ∈ f b → a = b ∧ c = d) l₁ → lookmap f l₁ ~ lookmap f l₂ H : Pairwise (fun a b => ∀ (c : α), c ∈ f a → ∀ (d : α), d ∈ f b → a = b ∧ c = d) (a :: l₁) h : f a = none ⊢ lookmap f (a :: l₁) ~ lookmap f (a :: l₂) [PROOFSTEP] simp [h] [GOAL] case cons.none α : Type uu β : Type vv l₁✝¹ l₂✝¹ : List α f : α → Option α l₁✝ l₂✝ : List α H✝ : Pairwise (fun a b => ∀ (c : α), c ∈ f a → ∀ (d : α), d ∈ f b → a = b ∧ c = d) l₁✝ a : α l₁ l₂ : List α p : l₁ ~ l₂ IH : Pairwise (fun a b => ∀ (c : α), c ∈ f a → ∀ (d : α), d ∈ f b → a = b ∧ c = d) l₁ → lookmap f l₁ ~ lookmap f l₂ H : Pairwise (fun a b => ∀ (c : α), c ∈ f a → ∀ (d : α), d ∈ f b → a = b ∧ c = d) (a :: l₁) h : f a = none ⊢ lookmap f l₁ ~ lookmap f l₂ [PROOFSTEP] exact IH (pairwise_cons.1 H).2 [GOAL] case cons.some α : Type uu β : Type vv l₁✝¹ l₂✝¹ : List α f : α → Option α l₁✝ l₂✝ : List α H✝ : Pairwise (fun a b => ∀ (c : α), c ∈ f a → ∀ (d : α), d ∈ f b → a = b ∧ c = d) l₁✝ a : α l₁ l₂ : List α p : l₁ ~ l₂ IH : Pairwise (fun a b => ∀ (c : α), c ∈ f a → ∀ (d : α), d ∈ f b → a = b ∧ c = d) l₁ → lookmap f l₁ ~ lookmap f l₂ H : Pairwise (fun a b => ∀ (c : α), c ∈ f a → ∀ (d : α), d ∈ f b → a = b ∧ c = d) (a :: l₁) val✝ : α h : f a = some val✝ ⊢ lookmap f (a :: l₁) ~ lookmap f (a :: l₂) [PROOFSTEP] simp [lookmap_cons_some _ _ h, p] [GOAL] case swap α : Type uu β : Type vv l₁✝ l₂✝ : List α f : α → Option α l₁ l₂ : List α H✝ : Pairwise (fun a b => ∀ (c : α), c ∈ f a → ∀ (d : α), d ∈ f b → a = b ∧ c = d) l₁ a b : α l : List α H : Pairwise (fun a b => ∀ (c : α), c ∈ f a → ∀ (d : α), d ∈ f b → a = b ∧ c = d) (b :: a :: l) ⊢ lookmap f (b :: a :: l) ~ lookmap f (a :: b :: l) [PROOFSTEP] cases' h₁ : f a with c [GOAL] case swap.none α : Type uu β : Type vv l₁✝ l₂✝ : List α f : α → Option α l₁ l₂ : List α H✝ : Pairwise (fun a b => ∀ (c : α), c ∈ f a → ∀ (d : α), d ∈ f b → a = b ∧ c = d) l₁ a b : α l : List α H : Pairwise (fun a b => ∀ (c : α), c ∈ f a → ∀ (d : α), d ∈ f b → a = b ∧ c = d) (b :: a :: l) h₁ : f a = none ⊢ lookmap f (b :: a :: l) ~ lookmap f (a :: b :: l) [PROOFSTEP] cases' h₂ : f b with d [GOAL] case swap.some α : Type uu β : Type vv l₁✝ l₂✝ : List α f : α → Option α l₁ l₂ : List α H✝ : Pairwise (fun a b => ∀ (c : α), c ∈ f a → ∀ (d : α), d ∈ f b → a = b ∧ c = d) l₁ a b : α l : List α H : Pairwise (fun a b => ∀ (c : α), c ∈ f a → ∀ (d : α), d ∈ f b → a = b ∧ c = d) (b :: a :: l) c : α h₁ : f a = some c ⊢ lookmap f (b :: a :: l) ~ lookmap f (a :: b :: l) [PROOFSTEP] cases' h₂ : f b with d [GOAL] case swap.none.none α : Type uu β : Type vv l₁✝ l₂✝ : List α f : α → Option α l₁ l₂ : List α H✝ : Pairwise (fun a b => ∀ (c : α), c ∈ f a → ∀ (d : α), d ∈ f b → a = b ∧ c = d) l₁ a b : α l : List α H : Pairwise (fun a b => ∀ (c : α), c ∈ f a → ∀ (d : α), d ∈ f b → a = b ∧ c = d) (b :: a :: l) h₁ : f a = none h₂ : f b = none ⊢ lookmap f (b :: a :: l) ~ lookmap f (a :: b :: l) [PROOFSTEP] simp [h₁, h₂] [GOAL] case swap.none.none α : Type uu β : Type vv l₁✝ l₂✝ : List α f : α → Option α l₁ l₂ : List α H✝ : Pairwise (fun a b => ∀ (c : α), c ∈ f a → ∀ (d : α), d ∈ f b → a = b ∧ c = d) l₁ a b : α l : List α H : Pairwise (fun a b => ∀ (c : α), c ∈ f a → ∀ (d : α), d ∈ f b → a = b ∧ c = d) (b :: a :: l) h₁ : f a = none h₂ : f b = none ⊢ b :: a :: lookmap f l ~ a :: b :: lookmap f l [PROOFSTEP] apply swap [GOAL] case swap.none.some α : Type uu β : Type vv l₁✝ l₂✝ : List α f : α → Option α l₁ l₂ : List α H✝ : Pairwise (fun a b => ∀ (c : α), c ∈ f a → ∀ (d : α), d ∈ f b → a = b ∧ c = d) l₁ a b : α l : List α H : Pairwise (fun a b => ∀ (c : α), c ∈ f a → ∀ (d : α), d ∈ f b → a = b ∧ c = d) (b :: a :: l) h₁ : f a = none d : α h₂ : f b = some d ⊢ lookmap f (b :: a :: l) ~ lookmap f (a :: b :: l) [PROOFSTEP] simp [h₁, lookmap_cons_some _ _ h₂] [GOAL] case swap.none.some α : Type uu β : Type vv l₁✝ l₂✝ : List α f : α → Option α l₁ l₂ : List α H✝ : Pairwise (fun a b => ∀ (c : α), c ∈ f a → ∀ (d : α), d ∈ f b → a = b ∧ c = d) l₁ a b : α l : List α H : Pairwise (fun a b => ∀ (c : α), c ∈ f a → ∀ (d : α), d ∈ f b → a = b ∧ c = d) (b :: a :: l) h₁ : f a = none d : α h₂ : f b = some d ⊢ d :: a :: l ~ a :: d :: l [PROOFSTEP] apply swap [GOAL] case swap.some.none α : Type uu β : Type vv l₁✝ l₂✝ : List α f : α → Option α l₁ l₂ : List α H✝ : Pairwise (fun a b => ∀ (c : α), c ∈ f a → ∀ (d : α), d ∈ f b → a = b ∧ c = d) l₁ a b : α l : List α H : Pairwise (fun a b => ∀ (c : α), c ∈ f a → ∀ (d : α), d ∈ f b → a = b ∧ c = d) (b :: a :: l) c : α h₁ : f a = some c h₂ : f b = none ⊢ lookmap f (b :: a :: l) ~ lookmap f (a :: b :: l) [PROOFSTEP] simp [lookmap_cons_some _ _ h₁, h₂] [GOAL] case swap.some.none α : Type uu β : Type vv l₁✝ l₂✝ : List α f : α → Option α l₁ l₂ : List α H✝ : Pairwise (fun a b => ∀ (c : α), c ∈ f a → ∀ (d : α), d ∈ f b → a = b ∧ c = d) l₁ a b : α l : List α H : Pairwise (fun a b => ∀ (c : α), c ∈ f a → ∀ (d : α), d ∈ f b → a = b ∧ c = d) (b :: a :: l) c : α h₁ : f a = some c h₂ : f b = none ⊢ b :: c :: l ~ c :: b :: l [PROOFSTEP] apply swap [GOAL] case swap.some.some α : Type uu β : Type vv l₁✝ l₂✝ : List α f : α → Option α l₁ l₂ : List α H✝ : Pairwise (fun a b => ∀ (c : α), c ∈ f a → ∀ (d : α), d ∈ f b → a = b ∧ c = d) l₁ a b : α l : List α H : Pairwise (fun a b => ∀ (c : α), c ∈ f a → ∀ (d : α), d ∈ f b → a = b ∧ c = d) (b :: a :: l) c : α h₁ : f a = some c d : α h₂ : f b = some d ⊢ lookmap f (b :: a :: l) ~ lookmap f (a :: b :: l) [PROOFSTEP] simp [lookmap_cons_some _ _ h₁, lookmap_cons_some _ _ h₂] [GOAL] case swap.some.some α : Type uu β : Type vv l₁✝ l₂✝ : List α f : α → Option α l₁ l₂ : List α H✝ : Pairwise (fun a b => ∀ (c : α), c ∈ f a → ∀ (d : α), d ∈ f b → a = b ∧ c = d) l₁ a b : α l : List α H : Pairwise (fun a b => ∀ (c : α), c ∈ f a → ∀ (d : α), d ∈ f b → a = b ∧ c = d) (b :: a :: l) c : α h₁ : f a = some c d : α h₂ : f b = some d ⊢ d :: a :: l ~ c :: b :: l [PROOFSTEP] rcases(pairwise_cons.1 H).1 _ (mem_cons.2 (Or.inl rfl)) _ h₂ _ h₁ with ⟨rfl, rfl⟩ [GOAL] case swap.some.some.intro α : Type uu β : Type vv l₁✝ l₂✝ : List α f : α → Option α l₁ l₂ : List α H✝ : Pairwise (fun a b => ∀ (c : α), c ∈ f a → ∀ (d : α), d ∈ f b → a = b ∧ c = d) l₁ b : α l : List α d : α h₂ : f b = some d H : Pairwise (fun a b => ∀ (c : α), c ∈ f a → ∀ (d : α), d ∈ f b → a = b ∧ c = d) (b :: b :: l) h₁ : f b = some d ⊢ d :: b :: l ~ d :: b :: l [PROOFSTEP] exact Perm.refl _ [GOAL] case trans α : Type uu β : Type vv l₁✝¹ l₂✝¹ : List α f : α → Option α l₁✝ l₂✝ : List α H✝ : Pairwise (fun a b => ∀ (c : α), c ∈ f a → ∀ (d : α), d ∈ f b → a = b ∧ c = d) l₁✝ l₁ l₂ l₃ : List α p₁ : l₁ ~ l₂ a✝ : l₂ ~ l₃ IH₁ : Pairwise (fun a b => ∀ (c : α), c ∈ f a → ∀ (d : α), d ∈ f b → a = b ∧ c = d) l₁ → lookmap f l₁ ~ lookmap f l₂ IH₂ : Pairwise (fun a b => ∀ (c : α), c ∈ f a → ∀ (d : α), d ∈ f b → a = b ∧ c = d) l₂ → lookmap f l₂ ~ lookmap f l₃ H : Pairwise (fun a b => ∀ (c : α), c ∈ f a → ∀ (d : α), d ∈ f b → a = b ∧ c = d) l₁ ⊢ lookmap f l₁ ~ lookmap f l₃ [PROOFSTEP] refine' (IH₁ H).trans (IH₂ ((p₁.pairwise_iff _).1 H)) [GOAL] case trans α : Type uu β : Type vv l₁✝¹ l₂✝¹ : List α f : α → Option α l₁✝ l₂✝ : List α H✝ : Pairwise (fun a b => ∀ (c : α), c ∈ f a → ∀ (d : α), d ∈ f b → a = b ∧ c = d) l₁✝ l₁ l₂ l₃ : List α p₁ : l₁ ~ l₂ a✝ : l₂ ~ l₃ IH₁ : Pairwise (fun a b => ∀ (c : α), c ∈ f a → ∀ (d : α), d ∈ f b → a = b ∧ c = d) l₁ → lookmap f l₁ ~ lookmap f l₂ IH₂ : Pairwise (fun a b => ∀ (c : α), c ∈ f a → ∀ (d : α), d ∈ f b → a = b ∧ c = d) l₂ → lookmap f l₂ ~ lookmap f l₃ H : Pairwise (fun a b => ∀ (c : α), c ∈ f a → ∀ (d : α), d ∈ f b → a = b ∧ c = d) l₁ ⊢ Symmetric fun a b => ∀ (c : α), c ∈ f a → ∀ (d : α), d ∈ f b → a = b ∧ c = d [PROOFSTEP] exact fun a b h c h₁ d h₂ => (h d h₂ c h₁).imp Eq.symm Eq.symm [GOAL] α : Type uu β : Type vv l₁✝ l₂✝ : List α f : α → Prop inst✝ : DecidablePred f l₁ l₂ : List α H : Pairwise (fun a b => f a → f b → False) l₁ p : l₁ ~ l₂ ⊢ eraseP (fun b => decide (f b)) l₁ ~ eraseP (fun b => decide (f b)) l₂ [PROOFSTEP] induction' p with a l₁ l₂ p IH a b l l₁ l₂ l₃ p₁ _ IH₁ IH₂ [GOAL] case nil α : Type uu β : Type vv l₁✝ l₂✝ : List α f : α → Prop inst✝ : DecidablePred f l₁ l₂ : List α H✝ : Pairwise (fun a b => f a → f b → False) l₁ H : Pairwise (fun a b => f a → f b → False) [] ⊢ eraseP (fun b => decide (f b)) [] ~ eraseP (fun b => decide (f b)) [] [PROOFSTEP] simp [GOAL] case cons α : Type uu β : Type vv l₁✝¹ l₂✝¹ : List α f : α → Prop inst✝ : DecidablePred f l₁✝ l₂✝ : List α H✝ : Pairwise (fun a b => f a → f b → False) l₁✝ a : α l₁ l₂ : List α p : l₁ ~ l₂ IH : Pairwise (fun a b => f a → f b → False) l₁ → eraseP (fun b => decide (f b)) l₁ ~ eraseP (fun b => decide (f b)) l₂ H : Pairwise (fun a b => f a → f b → False) (a :: l₁) ⊢ eraseP (fun b => decide (f b)) (a :: l₁) ~ eraseP (fun b => decide (f b)) (a :: l₂) [PROOFSTEP] by_cases h : f a [GOAL] case pos α : Type uu β : Type vv l₁✝¹ l₂✝¹ : List α f : α → Prop inst✝ : DecidablePred f l₁✝ l₂✝ : List α H✝ : Pairwise (fun a b => f a → f b → False) l₁✝ a : α l₁ l₂ : List α p : l₁ ~ l₂ IH : Pairwise (fun a b => f a → f b → False) l₁ → eraseP (fun b => decide (f b)) l₁ ~ eraseP (fun b => decide (f b)) l₂ H : Pairwise (fun a b => f a → f b → False) (a :: l₁) h : f a ⊢ eraseP (fun b => decide (f b)) (a :: l₁) ~ eraseP (fun b => decide (f b)) (a :: l₂) [PROOFSTEP] simp [h, p] [GOAL] case neg α : Type uu β : Type vv l₁✝¹ l₂✝¹ : List α f : α → Prop inst✝ : DecidablePred f l₁✝ l₂✝ : List α H✝ : Pairwise (fun a b => f a → f b → False) l₁✝ a : α l₁ l₂ : List α p : l₁ ~ l₂ IH : Pairwise (fun a b => f a → f b → False) l₁ → eraseP (fun b => decide (f b)) l₁ ~ eraseP (fun b => decide (f b)) l₂ H : Pairwise (fun a b => f a → f b → False) (a :: l₁) h : ¬f a ⊢ eraseP (fun b => decide (f b)) (a :: l₁) ~ eraseP (fun b => decide (f b)) (a :: l₂) [PROOFSTEP] simp [h] [GOAL] case neg α : Type uu β : Type vv l₁✝¹ l₂✝¹ : List α f : α → Prop inst✝ : DecidablePred f l₁✝ l₂✝ : List α H✝ : Pairwise (fun a b => f a → f b → False) l₁✝ a : α l₁ l₂ : List α p : l₁ ~ l₂ IH : Pairwise (fun a b => f a → f b → False) l₁ → eraseP (fun b => decide (f b)) l₁ ~ eraseP (fun b => decide (f b)) l₂ H : Pairwise (fun a b => f a → f b → False) (a :: l₁) h : ¬f a ⊢ eraseP (fun b => decide (f b)) l₁ ~ eraseP (fun b => decide (f b)) l₂ [PROOFSTEP] exact IH (pairwise_cons.1 H).2 [GOAL] case swap α : Type uu β : Type vv l₁✝ l₂✝ : List α f : α → Prop inst✝ : DecidablePred f l₁ l₂ : List α H✝ : Pairwise (fun a b => f a → f b → False) l₁ a b : α l : List α H : Pairwise (fun a b => f a → f b → False) (b :: a :: l) ⊢ eraseP (fun b => decide (f b)) (b :: a :: l) ~ eraseP (fun b => decide (f b)) (a :: b :: l) [PROOFSTEP] by_cases h₁ : f a [GOAL] case pos α : Type uu β : Type vv l₁✝ l₂✝ : List α f : α → Prop inst✝ : DecidablePred f l₁ l₂ : List α H✝ : Pairwise (fun a b => f a → f b → False) l₁ a b : α l : List α H : Pairwise (fun a b => f a → f b → False) (b :: a :: l) h₁ : f a ⊢ eraseP (fun b => decide (f b)) (b :: a :: l) ~ eraseP (fun b => decide (f b)) (a :: b :: l) [PROOFSTEP] by_cases h₂ : f b [GOAL] case neg α : Type uu β : Type vv l₁✝ l₂✝ : List α f : α → Prop inst✝ : DecidablePred f l₁ l₂ : List α H✝ : Pairwise (fun a b => f a → f b → False) l₁ a b : α l : List α H : Pairwise (fun a b => f a → f b → False) (b :: a :: l) h₁ : ¬f a ⊢ eraseP (fun b => decide (f b)) (b :: a :: l) ~ eraseP (fun b => decide (f b)) (a :: b :: l) [PROOFSTEP] by_cases h₂ : f b [GOAL] case pos α : Type uu β : Type vv l₁✝ l₂✝ : List α f : α → Prop inst✝ : DecidablePred f l₁ l₂ : List α H✝ : Pairwise (fun a b => f a → f b → False) l₁ a b : α l : List α H : Pairwise (fun a b => f a → f b → False) (b :: a :: l) h₁ : f a h₂ : f b ⊢ eraseP (fun b => decide (f b)) (b :: a :: l) ~ eraseP (fun b => decide (f b)) (a :: b :: l) [PROOFSTEP] simp [h₁, h₂] [GOAL] case neg α : Type uu β : Type vv l₁✝ l₂✝ : List α f : α → Prop inst✝ : DecidablePred f l₁ l₂ : List α H✝ : Pairwise (fun a b => f a → f b → False) l₁ a b : α l : List α H : Pairwise (fun a b => f a → f b → False) (b :: a :: l) h₁ : f a h₂ : ¬f b ⊢ eraseP (fun b => decide (f b)) (b :: a :: l) ~ eraseP (fun b => decide (f b)) (a :: b :: l) [PROOFSTEP] simp [h₁, h₂] [GOAL] case pos α : Type uu β : Type vv l₁✝ l₂✝ : List α f : α → Prop inst✝ : DecidablePred f l₁ l₂ : List α H✝ : Pairwise (fun a b => f a → f b → False) l₁ a b : α l : List α H : Pairwise (fun a b => f a → f b → False) (b :: a :: l) h₁ : ¬f a h₂ : f b ⊢ eraseP (fun b => decide (f b)) (b :: a :: l) ~ eraseP (fun b => decide (f b)) (a :: b :: l) [PROOFSTEP] simp [h₁, h₂] [GOAL] case neg α : Type uu β : Type vv l₁✝ l₂✝ : List α f : α → Prop inst✝ : DecidablePred f l₁ l₂ : List α H✝ : Pairwise (fun a b => f a → f b → False) l₁ a b : α l : List α H : Pairwise (fun a b => f a → f b → False) (b :: a :: l) h₁ : ¬f a h₂ : ¬f b ⊢ eraseP (fun b => decide (f b)) (b :: a :: l) ~ eraseP (fun b => decide (f b)) (a :: b :: l) [PROOFSTEP] simp [h₁, h₂] [GOAL] case pos α : Type uu β : Type vv l₁✝ l₂✝ : List α f : α → Prop inst✝ : DecidablePred f l₁ l₂ : List α H✝ : Pairwise (fun a b => f a → f b → False) l₁ a b : α l : List α H : Pairwise (fun a b => f a → f b → False) (b :: a :: l) h₁ : f a h₂ : f b ⊢ a :: l ~ b :: l [PROOFSTEP] cases (pairwise_cons.1 H).1 _ (mem_cons.2 (Or.inl rfl)) h₂ h₁ [GOAL] case neg α : Type uu β : Type vv l₁✝ l₂✝ : List α f : α → Prop inst✝ : DecidablePred f l₁ l₂ : List α H✝ : Pairwise (fun a b => f a → f b → False) l₁ a b : α l : List α H : Pairwise (fun a b => f a → f b → False) (b :: a :: l) h₁ : ¬f a h₂ : ¬f b ⊢ b :: a :: eraseP (fun b => decide (f b)) l ~ a :: b :: eraseP (fun b => decide (f b)) l [PROOFSTEP] apply swap [GOAL] case trans α : Type uu β : Type vv l₁✝¹ l₂✝¹ : List α f : α → Prop inst✝ : DecidablePred f l₁✝ l₂✝ : List α H✝ : Pairwise (fun a b => f a → f b → False) l₁✝ l₁ l₂ l₃ : List α p₁ : l₁ ~ l₂ a✝ : l₂ ~ l₃ IH₁ : Pairwise (fun a b => f a → f b → False) l₁ → eraseP (fun b => decide (f b)) l₁ ~ eraseP (fun b => decide (f b)) l₂ IH₂ : Pairwise (fun a b => f a → f b → False) l₂ → eraseP (fun b => decide (f b)) l₂ ~ eraseP (fun b => decide (f b)) l₃ H : Pairwise (fun a b => f a → f b → False) l₁ ⊢ eraseP (fun b => decide (f b)) l₁ ~ eraseP (fun b => decide (f b)) l₃ [PROOFSTEP] refine' (IH₁ H).trans (IH₂ ((p₁.pairwise_iff _).1 H)) [GOAL] case trans α : Type uu β : Type vv l₁✝¹ l₂✝¹ : List α f : α → Prop inst✝ : DecidablePred f l₁✝ l₂✝ : List α H✝ : Pairwise (fun a b => f a → f b → False) l₁✝ l₁ l₂ l₃ : List α p₁ : l₁ ~ l₂ a✝ : l₂ ~ l₃ IH₁ : Pairwise (fun a b => f a → f b → False) l₁ → eraseP (fun b => decide (f b)) l₁ ~ eraseP (fun b => decide (f b)) l₂ IH₂ : Pairwise (fun a b => f a → f b → False) l₂ → eraseP (fun b => decide (f b)) l₂ ~ eraseP (fun b => decide (f b)) l₃ H : Pairwise (fun a b => f a → f b → False) l₁ ⊢ Symmetric fun a b => f a → f b → False [PROOFSTEP] exact fun a b h h₁ h₂ => h h₂ h₁ [GOAL] α✝ : Type uu β : Type vv l₁ l₂ : List α✝ α : Type u_1 inst✝ : DecidableEq α xs ys : List α n : ℕ h : xs ~ ys h' : Nodup ys ⊢ take n xs ~ List.inter ys (take n xs) [PROOFSTEP] simp only [List.inter] [GOAL] α✝ : Type uu β : Type vv l₁ l₂ : List α✝ α : Type u_1 inst✝ : DecidableEq α xs ys : List α n : ℕ h : xs ~ ys h' : Nodup ys ⊢ take n xs ~ List.filter (fun x => decide (x ∈ take n xs)) ys [PROOFSTEP] exact Perm.trans (show xs.take n ~ xs.filter (· ∈ xs.take n) by conv_lhs => rw [Nodup.take_eq_filter_mem ((Perm.nodup_iff h).2 h')]) (Perm.filter _ h) [GOAL] α✝ : Type uu β : Type vv l₁ l₂ : List α✝ α : Type u_1 inst✝ : DecidableEq α xs ys : List α n : ℕ h : xs ~ ys h' : Nodup ys ⊢ take n xs ~ List.filter (fun x => decide (x ∈ take n xs)) xs [PROOFSTEP] conv_lhs => rw [Nodup.take_eq_filter_mem ((Perm.nodup_iff h).2 h')] [GOAL] α✝ : Type uu β : Type vv l₁ l₂ : List α✝ α : Type u_1 inst✝ : DecidableEq α xs ys : List α n : ℕ h : xs ~ ys h' : Nodup ys | take n xs [PROOFSTEP] rw [Nodup.take_eq_filter_mem ((Perm.nodup_iff h).2 h')] [GOAL] α✝ : Type uu β : Type vv l₁ l₂ : List α✝ α : Type u_1 inst✝ : DecidableEq α xs ys : List α n : ℕ h : xs ~ ys h' : Nodup ys | take n xs [PROOFSTEP] rw [Nodup.take_eq_filter_mem ((Perm.nodup_iff h).2 h')] [GOAL] α✝ : Type uu β : Type vv l₁ l₂ : List α✝ α : Type u_1 inst✝ : DecidableEq α xs ys : List α n : ℕ h : xs ~ ys h' : Nodup ys | take n xs [PROOFSTEP] rw [Nodup.take_eq_filter_mem ((Perm.nodup_iff h).2 h')] [GOAL] α✝ : Type uu β : Type vv l₁ l₂ : List α✝ α : Type u_1 inst✝ : DecidableEq α xs ys : List α n : ℕ h : xs ~ ys h' : Nodup ys ⊢ drop n xs ~ List.inter ys (drop n xs) [PROOFSTEP] by_cases h'' : n ≤ xs.length [GOAL] case pos α✝ : Type uu β : Type vv l₁ l₂ : List α✝ α : Type u_1 inst✝ : DecidableEq α xs ys : List α n : ℕ h : xs ~ ys h' : Nodup ys h'' : n ≤ length xs ⊢ drop n xs ~ List.inter ys (drop n xs) [PROOFSTEP] let n' := xs.length - n [GOAL] case pos α✝ : Type uu β : Type vv l₁ l₂ : List α✝ α : Type u_1 inst✝ : DecidableEq α xs ys : List α n : ℕ h : xs ~ ys h' : Nodup ys h'' : n ≤ length xs n' : ℕ := length xs - n ⊢ drop n xs ~ List.inter ys (drop n xs) [PROOFSTEP] have h₀ : n = xs.length - n' := by rwa [tsub_tsub_cancel_of_le] [GOAL] α✝ : Type uu β : Type vv l₁ l₂ : List α✝ α : Type u_1 inst✝ : DecidableEq α xs ys : List α n : ℕ h : xs ~ ys h' : Nodup ys h'' : n ≤ length xs n' : ℕ := length xs - n ⊢ n = length xs - n' [PROOFSTEP] rwa [tsub_tsub_cancel_of_le] [GOAL] case pos α✝ : Type uu β : Type vv l₁ l₂ : List α✝ α : Type u_1 inst✝ : DecidableEq α xs ys : List α n : ℕ h : xs ~ ys h' : Nodup ys h'' : n ≤ length xs n' : ℕ := length xs - n h₀ : n = length xs - n' ⊢ drop n xs ~ List.inter ys (drop n xs) [PROOFSTEP] have h₁ : n' ≤ xs.length := by apply tsub_le_self [GOAL] α✝ : Type uu β : Type vv l₁ l₂ : List α✝ α : Type u_1 inst✝ : DecidableEq α xs ys : List α n : ℕ h : xs ~ ys h' : Nodup ys h'' : n ≤ length xs n' : ℕ := length xs - n h₀ : n = length xs - n' ⊢ n' ≤ length xs [PROOFSTEP] apply tsub_le_self [GOAL] case pos α✝ : Type uu β : Type vv l₁ l₂ : List α✝ α : Type u_1 inst✝ : DecidableEq α xs ys : List α n : ℕ h : xs ~ ys h' : Nodup ys h'' : n ≤ length xs n' : ℕ := length xs - n h₀ : n = length xs - n' h₁ : n' ≤ length xs ⊢ drop n xs ~ List.inter ys (drop n xs) [PROOFSTEP] have h₂ : xs.drop n = (xs.reverse.take n').reverse := by rw [reverse_take _ h₁, h₀, reverse_reverse] [GOAL] α✝ : Type uu β : Type vv l₁ l₂ : List α✝ α : Type u_1 inst✝ : DecidableEq α xs ys : List α n : ℕ h : xs ~ ys h' : Nodup ys h'' : n ≤ length xs n' : ℕ := length xs - n h₀ : n = length xs - n' h₁ : n' ≤ length xs ⊢ drop n xs = reverse (take n' (reverse xs)) [PROOFSTEP] rw [reverse_take _ h₁, h₀, reverse_reverse] [GOAL] case pos α✝ : Type uu β : Type vv l₁ l₂ : List α✝ α : Type u_1 inst✝ : DecidableEq α xs ys : List α n : ℕ h : xs ~ ys h' : Nodup ys h'' : n ≤ length xs n' : ℕ := length xs - n h₀ : n = length xs - n' h₁ : n' ≤ length xs h₂ : drop n xs = reverse (take n' (reverse xs)) ⊢ drop n xs ~ List.inter ys (drop n xs) [PROOFSTEP] rw [h₂] [GOAL] case pos α✝ : Type uu β : Type vv l₁ l₂ : List α✝ α : Type u_1 inst✝ : DecidableEq α xs ys : List α n : ℕ h : xs ~ ys h' : Nodup ys h'' : n ≤ length xs n' : ℕ := length xs - n h₀ : n = length xs - n' h₁ : n' ≤ length xs h₂ : drop n xs = reverse (take n' (reverse xs)) ⊢ reverse (take n' (reverse xs)) ~ List.inter ys (reverse (take n' (reverse xs))) [PROOFSTEP] apply (reverse_perm _).trans [GOAL] case pos α✝ : Type uu β : Type vv l₁ l₂ : List α✝ α : Type u_1 inst✝ : DecidableEq α xs ys : List α n : ℕ h : xs ~ ys h' : Nodup ys h'' : n ≤ length xs n' : ℕ := length xs - n h₀ : n = length xs - n' h₁ : n' ≤ length xs h₂ : drop n xs = reverse (take n' (reverse xs)) ⊢ take n' (reverse xs) ~ List.inter ys (reverse (take n' (reverse xs))) [PROOFSTEP] rw [inter_reverse] [GOAL] case pos α✝ : Type uu β : Type vv l₁ l₂ : List α✝ α : Type u_1 inst✝ : DecidableEq α xs ys : List α n : ℕ h : xs ~ ys h' : Nodup ys h'' : n ≤ length xs n' : ℕ := length xs - n h₀ : n = length xs - n' h₁ : n' ≤ length xs h₂ : drop n xs = reverse (take n' (reverse xs)) ⊢ take n' (reverse xs) ~ List.inter ys (take n' (reverse xs)) [PROOFSTEP] apply Perm.take_inter _ _ h' [GOAL] α✝ : Type uu β : Type vv l₁ l₂ : List α✝ α : Type u_1 inst✝ : DecidableEq α xs ys : List α n : ℕ h : xs ~ ys h' : Nodup ys h'' : n ≤ length xs n' : ℕ := length xs - n h₀ : n = length xs - n' h₁ : n' ≤ length xs h₂ : drop n xs = reverse (take n' (reverse xs)) ⊢ reverse xs ~ ys [PROOFSTEP] apply (reverse_perm _).trans [GOAL] α✝ : Type uu β : Type vv l₁ l₂ : List α✝ α : Type u_1 inst✝ : DecidableEq α xs ys : List α n : ℕ h : xs ~ ys h' : Nodup ys h'' : n ≤ length xs n' : ℕ := length xs - n h₀ : n = length xs - n' h₁ : n' ≤ length xs h₂ : drop n xs = reverse (take n' (reverse xs)) ⊢ xs ~ ys [PROOFSTEP] assumption [GOAL] case neg α✝ : Type uu β : Type vv l₁ l₂ : List α✝ α : Type u_1 inst✝ : DecidableEq α xs ys : List α n : ℕ h : xs ~ ys h' : Nodup ys h'' : ¬n ≤ length xs ⊢ drop n xs ~ List.inter ys (drop n xs) [PROOFSTEP] have : drop n xs = [] := by apply eq_nil_of_length_eq_zero rw [length_drop, tsub_eq_zero_iff_le] apply le_of_not_ge h'' [GOAL] α✝ : Type uu β : Type vv l₁ l₂ : List α✝ α : Type u_1 inst✝ : DecidableEq α xs ys : List α n : ℕ h : xs ~ ys h' : Nodup ys h'' : ¬n ≤ length xs ⊢ drop n xs = [] [PROOFSTEP] apply eq_nil_of_length_eq_zero [GOAL] case x α✝ : Type uu β : Type vv l₁ l₂ : List α✝ α : Type u_1 inst✝ : DecidableEq α xs ys : List α n : ℕ h : xs ~ ys h' : Nodup ys h'' : ¬n ≤ length xs ⊢ length (drop n xs) = 0 [PROOFSTEP] rw [length_drop, tsub_eq_zero_iff_le] [GOAL] case x α✝ : Type uu β : Type vv l₁ l₂ : List α✝ α : Type u_1 inst✝ : DecidableEq α xs ys : List α n : ℕ h : xs ~ ys h' : Nodup ys h'' : ¬n ≤ length xs ⊢ length xs ≤ n [PROOFSTEP] apply le_of_not_ge h'' [GOAL] case neg α✝ : Type uu β : Type vv l₁ l₂ : List α✝ α : Type u_1 inst✝ : DecidableEq α xs ys : List α n : ℕ h : xs ~ ys h' : Nodup ys h'' : ¬n ≤ length xs this : drop n xs = [] ⊢ drop n xs ~ List.inter ys (drop n xs) [PROOFSTEP] simp [this, List.inter] [GOAL] α✝ : Type uu β : Type vv l₁ l₂ : List α✝ α : Type u_1 inst✝ : DecidableEq α xs ys : List α n m : ℕ h : xs ~ ys h' : Nodup ys ⊢ dropSlice n m xs ~ ys ∩ dropSlice n m xs [PROOFSTEP] simp only [dropSlice_eq] [GOAL] α✝ : Type uu β : Type vv l₁ l₂ : List α✝ α : Type u_1 inst✝ : DecidableEq α xs ys : List α n m : ℕ h : xs ~ ys h' : Nodup ys ⊢ take n xs ++ drop (n + m) xs ~ ys ∩ (take n xs ++ drop (n + m) xs) [PROOFSTEP] have : n ≤ n + m := Nat.le_add_right _ _ [GOAL] α✝ : Type uu β : Type vv l₁ l₂ : List α✝ α : Type u_1 inst✝ : DecidableEq α xs ys : List α n m : ℕ h : xs ~ ys h' : Nodup ys this : n ≤ n + m ⊢ take n xs ++ drop (n + m) xs ~ ys ∩ (take n xs ++ drop (n + m) xs) [PROOFSTEP] have h₂ := h.nodup_iff.2 h' [GOAL] α✝ : Type uu β : Type vv l₁ l₂ : List α✝ α : Type u_1 inst✝ : DecidableEq α xs ys : List α n m : ℕ h : xs ~ ys h' : Nodup ys this : n ≤ n + m h₂ : Nodup xs ⊢ take n xs ++ drop (n + m) xs ~ ys ∩ (take n xs ++ drop (n + m) xs) [PROOFSTEP] apply Perm.trans _ (Perm.inter_append _).symm [GOAL] α✝ : Type uu β : Type vv l₁ l₂ : List α✝ α : Type u_1 inst✝ : DecidableEq α xs ys : List α n m : ℕ h : xs ~ ys h' : Nodup ys this : n ≤ n + m h₂ : Nodup xs ⊢ take n xs ++ drop (n + m) xs ~ ys ∩ take n xs ++ ys ∩ drop (n + m) xs [PROOFSTEP] exact Perm.append (Perm.take_inter _ h h') (Perm.drop_inter _ h h') [GOAL] α✝ : Type uu β : Type vv l₁ l₂ : List α✝ α : Type u_1 inst✝ : DecidableEq α xs ys : List α n m : ℕ h : xs ~ ys h' : Nodup ys this : n ≤ n + m h₂ : Nodup xs ⊢ Disjoint (take n xs) (drop (n + m) xs) [PROOFSTEP] exact disjoint_take_drop h₂ this [GOAL] α : Type uu β : Type vv l₁ l₂ : List α ⊢ ∀ {ts is l : List α}, l ∈ permutationsAux ts is → l ~ ts ++ is [PROOFSTEP] show ∀ (ts is l : List α), l ∈ permutationsAux ts is → l ~ ts ++ is [GOAL] α : Type uu β : Type vv l₁ l₂ : List α ⊢ ∀ (ts is l : List α), l ∈ permutationsAux ts is → l ~ ts ++ is [PROOFSTEP] refine' permutationsAux.rec (by simp) _ [GOAL] α : Type uu β : Type vv l₁ l₂ : List α ⊢ ∀ (is l : List α), l ∈ permutationsAux [] is → l ~ [] ++ is [PROOFSTEP] simp [GOAL] α : Type uu β : Type vv l₁ l₂ : List α ⊢ ∀ (t : α) (ts is : List α), (∀ (l : List α), l ∈ permutationsAux ts (t :: is) → l ~ ts ++ t :: is) → (∀ (l : List α), l ∈ permutationsAux is [] → l ~ is ++ []) → ∀ (l : List α), l ∈ permutationsAux (t :: ts) is → l ~ t :: ts ++ is [PROOFSTEP] introv IH1 IH2 m [GOAL] α : Type uu β : Type vv l₁ l₂ : List α t : α ts is : List α IH1 : ∀ (l : List α), l ∈ permutationsAux ts (t :: is) → l ~ ts ++ t :: is IH2 : ∀ (l : List α), l ∈ permutationsAux is [] → l ~ is ++ [] l : List α m : l ∈ permutationsAux (t :: ts) is ⊢ l ~ t :: ts ++ is [PROOFSTEP] rw [permutationsAux_cons, permutations, mem_foldr_permutationsAux2] at m [GOAL] α : Type uu β : Type vv l₁ l₂ : List α t : α ts is : List α IH1 : ∀ (l : List α), l ∈ permutationsAux ts (t :: is) → l ~ ts ++ t :: is IH2 : ∀ (l : List α), l ∈ permutationsAux is [] → l ~ is ++ [] l : List α m : l ∈ permutationsAux ts (t :: is) ∨ ∃ l₁ l₂, l₁ ++ l₂ ∈ is :: permutationsAux is [] ∧ l₂ ≠ [] ∧ l = l₁ ++ t :: l₂ ++ ts ⊢ l ~ t :: ts ++ is [PROOFSTEP] rcases m with (m | ⟨l₁, l₂, m, _, e⟩) [GOAL] case inl α : Type uu β : Type vv l₁ l₂ : List α t : α ts is : List α IH1 : ∀ (l : List α), l ∈ permutationsAux ts (t :: is) → l ~ ts ++ t :: is IH2 : ∀ (l : List α), l ∈ permutationsAux is [] → l ~ is ++ [] l : List α m : l ∈ permutationsAux ts (t :: is) ⊢ l ~ t :: ts ++ is [PROOFSTEP] exact (IH1 _ m).trans perm_middle [GOAL] case inr.intro.intro.intro.intro α : Type uu β : Type vv l₁✝ l₂✝ : List α t : α ts is : List α IH1 : ∀ (l : List α), l ∈ permutationsAux ts (t :: is) → l ~ ts ++ t :: is IH2 : ∀ (l : List α), l ∈ permutationsAux is [] → l ~ is ++ [] l l₁ l₂ : List α m : l₁ ++ l₂ ∈ is :: permutationsAux is [] left✝ : l₂ ≠ [] e : l = l₁ ++ t :: l₂ ++ ts ⊢ l ~ t :: ts ++ is [PROOFSTEP] subst e [GOAL] case inr.intro.intro.intro.intro α : Type uu β : Type vv l₁✝ l₂✝ : List α t : α ts is : List α IH1 : ∀ (l : List α), l ∈ permutationsAux ts (t :: is) → l ~ ts ++ t :: is IH2 : ∀ (l : List α), l ∈ permutationsAux is [] → l ~ is ++ [] l₁ l₂ : List α m : l₁ ++ l₂ ∈ is :: permutationsAux is [] left✝ : l₂ ≠ [] ⊢ l₁ ++ t :: l₂ ++ ts ~ t :: ts ++ is [PROOFSTEP] have p : l₁ ++ l₂ ~ is := by simp [permutations] at m cases' m with e m · simp [e] exact is.append_nil ▸ IH2 _ m [GOAL] α : Type uu β : Type vv l₁✝ l₂✝ : List α t : α ts is : List α IH1 : ∀ (l : List α), l ∈ permutationsAux ts (t :: is) → l ~ ts ++ t :: is IH2 : ∀ (l : List α), l ∈ permutationsAux is [] → l ~ is ++ [] l₁ l₂ : List α m : l₁ ++ l₂ ∈ is :: permutationsAux is [] left✝ : l₂ ≠ [] ⊢ l₁ ++ l₂ ~ is [PROOFSTEP] simp [permutations] at m [GOAL] α : Type uu β : Type vv l₁✝ l₂✝ : List α t : α ts is : List α IH1 : ∀ (l : List α), l ∈ permutationsAux ts (t :: is) → l ~ ts ++ t :: is IH2 : ∀ (l : List α), l ∈ permutationsAux is [] → l ~ is ++ [] l₁ l₂ : List α left✝ : l₂ ≠ [] m : l₁ ++ l₂ = is ∨ l₁ ++ l₂ ∈ permutationsAux is [] ⊢ l₁ ++ l₂ ~ is [PROOFSTEP] cases' m with e m [GOAL] case inl α : Type uu β : Type vv l₁✝ l₂✝ : List α t : α ts is : List α IH1 : ∀ (l : List α), l ∈ permutationsAux ts (t :: is) → l ~ ts ++ t :: is IH2 : ∀ (l : List α), l ∈ permutationsAux is [] → l ~ is ++ [] l₁ l₂ : List α left✝ : l₂ ≠ [] e : l₁ ++ l₂ = is ⊢ l₁ ++ l₂ ~ is [PROOFSTEP] simp [e] [GOAL] case inr α : Type uu β : Type vv l₁✝ l₂✝ : List α t : α ts is : List α IH1 : ∀ (l : List α), l ∈ permutationsAux ts (t :: is) → l ~ ts ++ t :: is IH2 : ∀ (l : List α), l ∈ permutationsAux is [] → l ~ is ++ [] l₁ l₂ : List α left✝ : l₂ ≠ [] m : l₁ ++ l₂ ∈ permutationsAux is [] ⊢ l₁ ++ l₂ ~ is [PROOFSTEP] exact is.append_nil ▸ IH2 _ m [GOAL] case inr.intro.intro.intro.intro α : Type uu β : Type vv l₁✝ l₂✝ : List α t : α ts is : List α IH1 : ∀ (l : List α), l ∈ permutationsAux ts (t :: is) → l ~ ts ++ t :: is IH2 : ∀ (l : List α), l ∈ permutationsAux is [] → l ~ is ++ [] l₁ l₂ : List α m : l₁ ++ l₂ ∈ is :: permutationsAux is [] left✝ : l₂ ≠ [] p : l₁ ++ l₂ ~ is ⊢ l₁ ++ t :: l₂ ++ ts ~ t :: ts ++ is [PROOFSTEP] exact ((perm_middle.trans (p.cons _)).append_right _).trans (perm_append_comm.cons _) [GOAL] α : Type uu β : Type vv l₁ l₂ : List α ⊢ ∀ (ts is : List α), length (permutationsAux ts is) + (length is)! = (length ts + length is)! [PROOFSTEP] refine' permutationsAux.rec (by simp) _ [GOAL] α : Type uu β : Type vv l₁ l₂ : List α ⊢ ∀ (is : List α), length (permutationsAux [] is) + (length is)! = (length [] + length is)! [PROOFSTEP] simp [GOAL] α : Type uu β : Type vv l₁ l₂ : List α ⊢ ∀ (t : α) (ts is : List α), length (permutationsAux ts (t :: is)) + (length (t :: is))! = (length ts + length (t :: is))! → length (permutationsAux is []) + (length [])! = (length is + length [])! → length (permutationsAux (t :: ts) is) + (length is)! = (length (t :: ts) + length is)! [PROOFSTEP] intro t ts is IH1 IH2 [GOAL] α : Type uu β : Type vv l₁ l₂ : List α t : α ts is : List α IH1 : length (permutationsAux ts (t :: is)) + (length (t :: is))! = (length ts + length (t :: is))! IH2 : length (permutationsAux is []) + (length [])! = (length is + length [])! ⊢ length (permutationsAux (t :: ts) is) + (length is)! = (length (t :: ts) + length is)! [PROOFSTEP] have IH2 : length (permutationsAux is nil) + 1 = is.length ! := by simpa using IH2 [GOAL] α : Type uu β : Type vv l₁ l₂ : List α t : α ts is : List α IH1 : length (permutationsAux ts (t :: is)) + (length (t :: is))! = (length ts + length (t :: is))! IH2 : length (permutationsAux is []) + (length [])! = (length is + length [])! ⊢ length (permutationsAux is []) + 1 = (length is)! [PROOFSTEP] simpa using IH2 [GOAL] α : Type uu β : Type vv l₁ l₂ : List α t : α ts is : List α IH1 : length (permutationsAux ts (t :: is)) + (length (t :: is))! = (length ts + length (t :: is))! IH2✝ : length (permutationsAux is []) + (length [])! = (length is + length [])! IH2 : length (permutationsAux is []) + 1 = (length is)! ⊢ length (permutationsAux (t :: ts) is) + (length is)! = (length (t :: ts) + length is)! [PROOFSTEP] simp [Nat.factorial, Nat.add_succ, mul_comm] at IH1 [GOAL] α : Type uu β : Type vv l₁ l₂ : List α t : α ts is : List α IH2✝ : length (permutationsAux is []) + (length [])! = (length is + length [])! IH2 : length (permutationsAux is []) + 1 = (length is)! IH1 : length (permutationsAux ts (t :: is)) + (length is)! * succ (length is) = (length ts + length is)! * succ (length ts + length is) ⊢ length (permutationsAux (t :: ts) is) + (length is)! = (length (t :: ts) + length is)! [PROOFSTEP] rw [permutationsAux_cons, length_foldr_permutationsAux2' _ _ _ _ _ fun l m => (perm_of_mem_permutations m).length_eq, permutations, length, length, IH2, Nat.succ_add, Nat.factorial_succ, mul_comm (_ + 1), ← Nat.succ_eq_add_one, ← IH1, add_comm (_ * _), add_assoc, Nat.mul_succ, mul_comm] [GOAL] α : Type uu β : Type vv l₁ l₂ is l : List α H : l ~ [] ++ is → (∃ ts' x, l = ts' ++ is) ∨ l ∈ permutationsAux is [] ⊢ l ~ is → l ∈ permutations is [PROOFSTEP] simpa [permutations, perm_nil] using H [GOAL] α : Type uu β : Type vv l₁ l₂ : List α ⊢ ∀ {ts is l : List α}, l ~ is ++ ts → (∃ is' x, l = is' ++ ts) ∨ l ∈ permutationsAux ts is [PROOFSTEP] show ∀ (ts is l : List α), l ~ is ++ ts → (∃ (is' : _) (_ : is' ~ is), l = is' ++ ts) ∨ l ∈ permutationsAux ts is [GOAL] α : Type uu β : Type vv l₁ l₂ : List α ⊢ ∀ (ts is l : List α), l ~ is ++ ts → (∃ is' x, l = is' ++ ts) ∨ l ∈ permutationsAux ts is [PROOFSTEP] refine' permutationsAux.rec (by simp) _ [GOAL] α : Type uu β : Type vv l₁ l₂ : List α ⊢ ∀ (is l : List α), l ~ is ++ [] → (∃ is' x, l = is' ++ []) ∨ l ∈ permutationsAux [] is [PROOFSTEP] simp [GOAL] α : Type uu β : Type vv l₁ l₂ : List α ⊢ ∀ (t : α) (ts is : List α), (∀ (l : List α), l ~ t :: is ++ ts → (∃ is' x, l = is' ++ ts) ∨ l ∈ permutationsAux ts (t :: is)) → (∀ (l : List α), l ~ [] ++ is → (∃ is' x, l = is' ++ is) ∨ l ∈ permutationsAux is []) → ∀ (l : List α), l ~ is ++ t :: ts → (∃ is' x, l = is' ++ t :: ts) ∨ l ∈ permutationsAux (t :: ts) is [PROOFSTEP] intro t ts is IH1 IH2 l p [GOAL] α : Type uu β : Type vv l₁ l₂ : List α t : α ts is : List α IH1 : ∀ (l : List α), l ~ t :: is ++ ts → (∃ is' x, l = is' ++ ts) ∨ l ∈ permutationsAux ts (t :: is) IH2 : ∀ (l : List α), l ~ [] ++ is → (∃ is' x, l = is' ++ is) ∨ l ∈ permutationsAux is [] l : List α p : l ~ is ++ t :: ts ⊢ (∃ is' x, l = is' ++ t :: ts) ∨ l ∈ permutationsAux (t :: ts) is [PROOFSTEP] rw [permutationsAux_cons, mem_foldr_permutationsAux2] [GOAL] α : Type uu β : Type vv l₁ l₂ : List α t : α ts is : List α IH1 : ∀ (l : List α), l ~ t :: is ++ ts → (∃ is' x, l = is' ++ ts) ∨ l ∈ permutationsAux ts (t :: is) IH2 : ∀ (l : List α), l ~ [] ++ is → (∃ is' x, l = is' ++ is) ∨ l ∈ permutationsAux is [] l : List α p : l ~ is ++ t :: ts ⊢ (∃ is' x, l = is' ++ t :: ts) ∨ l ∈ permutationsAux ts (t :: is) ∨ ∃ l₁ l₂, l₁ ++ l₂ ∈ permutations is ∧ l₂ ≠ [] ∧ l = l₁ ++ t :: l₂ ++ ts [PROOFSTEP] rcases IH1 _ (p.trans perm_middle) with (⟨is', p', e⟩ | m) [GOAL] case inl.intro.intro α : Type uu β : Type vv l₁ l₂ : List α t : α ts is : List α IH1 : ∀ (l : List α), l ~ t :: is ++ ts → (∃ is' x, l = is' ++ ts) ∨ l ∈ permutationsAux ts (t :: is) IH2 : ∀ (l : List α), l ~ [] ++ is → (∃ is' x, l = is' ++ is) ∨ l ∈ permutationsAux is [] l : List α p : l ~ is ++ t :: ts is' : List α p' : is' ~ t :: is e : l = is' ++ ts ⊢ (∃ is' x, l = is' ++ t :: ts) ∨ l ∈ permutationsAux ts (t :: is) ∨ ∃ l₁ l₂, l₁ ++ l₂ ∈ permutations is ∧ l₂ ≠ [] ∧ l = l₁ ++ t :: l₂ ++ ts [PROOFSTEP] clear p [GOAL] case inl.intro.intro α : Type uu β : Type vv l₁ l₂ : List α t : α ts is : List α IH1 : ∀ (l : List α), l ~ t :: is ++ ts → (∃ is' x, l = is' ++ ts) ∨ l ∈ permutationsAux ts (t :: is) IH2 : ∀ (l : List α), l ~ [] ++ is → (∃ is' x, l = is' ++ is) ∨ l ∈ permutationsAux is [] l is' : List α p' : is' ~ t :: is e : l = is' ++ ts ⊢ (∃ is' x, l = is' ++ t :: ts) ∨ l ∈ permutationsAux ts (t :: is) ∨ ∃ l₁ l₂, l₁ ++ l₂ ∈ permutations is ∧ l₂ ≠ [] ∧ l = l₁ ++ t :: l₂ ++ ts [PROOFSTEP] subst e [GOAL] case inl.intro.intro α : Type uu β : Type vv l₁ l₂ : List α t : α ts is : List α IH1 : ∀ (l : List α), l ~ t :: is ++ ts → (∃ is' x, l = is' ++ ts) ∨ l ∈ permutationsAux ts (t :: is) IH2 : ∀ (l : List α), l ~ [] ++ is → (∃ is' x, l = is' ++ is) ∨ l ∈ permutationsAux is [] is' : List α p' : is' ~ t :: is ⊢ (∃ is'_1 x, is' ++ ts = is'_1 ++ t :: ts) ∨ is' ++ ts ∈ permutationsAux ts (t :: is) ∨ ∃ l₁ l₂, l₁ ++ l₂ ∈ permutations is ∧ l₂ ≠ [] ∧ is' ++ ts = l₁ ++ t :: l₂ ++ ts [PROOFSTEP] rcases mem_split (p'.symm.subset (mem_cons_self _ _)) with ⟨l₁, l₂, e⟩ [GOAL] case inl.intro.intro.intro.intro α : Type uu β : Type vv l₁✝ l₂✝ : List α t : α ts is : List α IH1 : ∀ (l : List α), l ~ t :: is ++ ts → (∃ is' x, l = is' ++ ts) ∨ l ∈ permutationsAux ts (t :: is) IH2 : ∀ (l : List α), l ~ [] ++ is → (∃ is' x, l = is' ++ is) ∨ l ∈ permutationsAux is [] is' : List α p' : is' ~ t :: is l₁ l₂ : List α e : is' = l₁ ++ t :: l₂ ⊢ (∃ is'_1 x, is' ++ ts = is'_1 ++ t :: ts) ∨ is' ++ ts ∈ permutationsAux ts (t :: is) ∨ ∃ l₁ l₂, l₁ ++ l₂ ∈ permutations is ∧ l₂ ≠ [] ∧ is' ++ ts = l₁ ++ t :: l₂ ++ ts [PROOFSTEP] subst is' [GOAL] case inl.intro.intro.intro.intro α : Type uu β : Type vv l₁✝ l₂✝ : List α t : α ts is : List α IH1 : ∀ (l : List α), l ~ t :: is ++ ts → (∃ is' x, l = is' ++ ts) ∨ l ∈ permutationsAux ts (t :: is) IH2 : ∀ (l : List α), l ~ [] ++ is → (∃ is' x, l = is' ++ is) ∨ l ∈ permutationsAux is [] l₁ l₂ : List α p' : l₁ ++ t :: l₂ ~ t :: is ⊢ (∃ is' x, l₁ ++ t :: l₂ ++ ts = is' ++ t :: ts) ∨ l₁ ++ t :: l₂ ++ ts ∈ permutationsAux ts (t :: is) ∨ ∃ l₁_1 l₂_1, l₁_1 ++ l₂_1 ∈ permutations is ∧ l₂_1 ≠ [] ∧ l₁ ++ t :: l₂ ++ ts = l₁_1 ++ t :: l₂_1 ++ ts [PROOFSTEP] have p := (perm_middle.symm.trans p').cons_inv [GOAL] case inl.intro.intro.intro.intro α : Type uu β : Type vv l₁✝ l₂✝ : List α t : α ts is : List α IH1 : ∀ (l : List α), l ~ t :: is ++ ts → (∃ is' x, l = is' ++ ts) ∨ l ∈ permutationsAux ts (t :: is) IH2 : ∀ (l : List α), l ~ [] ++ is → (∃ is' x, l = is' ++ is) ∨ l ∈ permutationsAux is [] l₁ l₂ : List α p' : l₁ ++ t :: l₂ ~ t :: is p : l₁ ++ l₂ ~ is ⊢ (∃ is' x, l₁ ++ t :: l₂ ++ ts = is' ++ t :: ts) ∨ l₁ ++ t :: l₂ ++ ts ∈ permutationsAux ts (t :: is) ∨ ∃ l₁_1 l₂_1, l₁_1 ++ l₂_1 ∈ permutations is ∧ l₂_1 ≠ [] ∧ l₁ ++ t :: l₂ ++ ts = l₁_1 ++ t :: l₂_1 ++ ts [PROOFSTEP] cases' l₂ with a l₂' [GOAL] case inl.intro.intro.intro.intro.nil α : Type uu β : Type vv l₁✝ l₂ : List α t : α ts is : List α IH1 : ∀ (l : List α), l ~ t :: is ++ ts → (∃ is' x, l = is' ++ ts) ∨ l ∈ permutationsAux ts (t :: is) IH2 : ∀ (l : List α), l ~ [] ++ is → (∃ is' x, l = is' ++ is) ∨ l ∈ permutationsAux is [] l₁ : List α p' : l₁ ++ [t] ~ t :: is p : l₁ ++ [] ~ is ⊢ (∃ is' x, l₁ ++ [t] ++ ts = is' ++ t :: ts) ∨ l₁ ++ [t] ++ ts ∈ permutationsAux ts (t :: is) ∨ ∃ l₁_1 l₂, l₁_1 ++ l₂ ∈ permutations is ∧ l₂ ≠ [] ∧ l₁ ++ [t] ++ ts = l₁_1 ++ t :: l₂ ++ ts [PROOFSTEP] exact Or.inl ⟨l₁, by simpa using p⟩ [GOAL] α : Type uu β : Type vv l₁✝ l₂ : List α t : α ts is : List α IH1 : ∀ (l : List α), l ~ t :: is ++ ts → (∃ is' x, l = is' ++ ts) ∨ l ∈ permutationsAux ts (t :: is) IH2 : ∀ (l : List α), l ~ [] ++ is → (∃ is' x, l = is' ++ is) ∨ l ∈ permutationsAux is [] l₁ : List α p' : l₁ ++ [t] ~ t :: is p : l₁ ++ [] ~ is ⊢ ∃ x, l₁ ++ [t] ++ ts = l₁ ++ t :: ts [PROOFSTEP] simpa using p [GOAL] case inl.intro.intro.intro.intro.cons α : Type uu β : Type vv l₁✝ l₂ : List α t : α ts is : List α IH1 : ∀ (l : List α), l ~ t :: is ++ ts → (∃ is' x, l = is' ++ ts) ∨ l ∈ permutationsAux ts (t :: is) IH2 : ∀ (l : List α), l ~ [] ++ is → (∃ is' x, l = is' ++ is) ∨ l ∈ permutationsAux is [] l₁ : List α a : α l₂' : List α p' : l₁ ++ t :: a :: l₂' ~ t :: is p : l₁ ++ a :: l₂' ~ is ⊢ (∃ is' x, l₁ ++ t :: a :: l₂' ++ ts = is' ++ t :: ts) ∨ l₁ ++ t :: a :: l₂' ++ ts ∈ permutationsAux ts (t :: is) ∨ ∃ l₁_1 l₂, l₁_1 ++ l₂ ∈ permutations is ∧ l₂ ≠ [] ∧ l₁ ++ t :: a :: l₂' ++ ts = l₁_1 ++ t :: l₂ ++ ts [PROOFSTEP] exact Or.inr (Or.inr ⟨l₁, a :: l₂', mem_permutations_of_perm_lemma (IH2 _) p, by simp⟩) [GOAL] α : Type uu β : Type vv l₁✝ l₂ : List α t : α ts is : List α IH1 : ∀ (l : List α), l ~ t :: is ++ ts → (∃ is' x, l = is' ++ ts) ∨ l ∈ permutationsAux ts (t :: is) IH2 : ∀ (l : List α), l ~ [] ++ is → (∃ is' x, l = is' ++ is) ∨ l ∈ permutationsAux is [] l₁ : List α a : α l₂' : List α p' : l₁ ++ t :: a :: l₂' ~ t :: is p : l₁ ++ a :: l₂' ~ is ⊢ a :: l₂' ≠ [] ∧ l₁ ++ t :: a :: l₂' ++ ts = l₁ ++ t :: a :: l₂' ++ ts [PROOFSTEP] simp [GOAL] case inr α : Type uu β : Type vv l₁ l₂ : List α t : α ts is : List α IH1 : ∀ (l : List α), l ~ t :: is ++ ts → (∃ is' x, l = is' ++ ts) ∨ l ∈ permutationsAux ts (t :: is) IH2 : ∀ (l : List α), l ~ [] ++ is → (∃ is' x, l = is' ++ is) ∨ l ∈ permutationsAux is [] l : List α p : l ~ is ++ t :: ts m : l ∈ permutationsAux ts (t :: is) ⊢ (∃ is' x, l = is' ++ t :: ts) ∨ l ∈ permutationsAux ts (t :: is) ∨ ∃ l₁ l₂, l₁ ++ l₂ ∈ permutations is ∧ l₂ ≠ [] ∧ l = l₁ ++ t :: l₂ ++ ts [PROOFSTEP] exact Or.inr (Or.inl m) [GOAL] α✝ : Type uu β : Type vv l₁ l₂ : List α✝ α : Type u_1 inst✝ : DecidableEq α ⊢ List.beq = fun a b => decide (a = b) [PROOFSTEP] funext l₁ l₂ [GOAL] case h.h α✝ : Type uu β : Type vv l₁✝ l₂✝ : List α✝ α : Type u_1 inst✝ : DecidableEq α l₁ l₂ : List α ⊢ List.beq l₁ l₂ = decide (l₁ = l₂) [PROOFSTEP] show (l₁ == l₂) = _ [GOAL] case h.h α✝ : Type uu β : Type vv l₁✝ l₂✝ : List α✝ α : Type u_1 inst✝ : DecidableEq α l₁ l₂ : List α ⊢ (l₁ == l₂) = decide (l₁ = l₂) [PROOFSTEP] rw [Bool.eq_iff_eq_true_iff, @beq_iff_eq _ (_), decide_eq_true_iff] [GOAL] α : Type uu β : Type vv l₁ l₂ : List α a b : α l : List α ⊢ List.bind (permutations'Aux a l) (permutations'Aux b) ~ List.bind (permutations'Aux b l) (permutations'Aux a) [PROOFSTEP] induction' l with c l ih [GOAL] case nil α : Type uu β : Type vv l₁ l₂ : List α a b : α ⊢ List.bind (permutations'Aux a []) (permutations'Aux b) ~ List.bind (permutations'Aux b []) (permutations'Aux a) [PROOFSTEP] simp [swap] [GOAL] case cons α : Type uu β : Type vv l₁ l₂ : List α a b c : α l : List α ih : List.bind (permutations'Aux a l) (permutations'Aux b) ~ List.bind (permutations'Aux b l) (permutations'Aux a) ⊢ List.bind (permutations'Aux a (c :: l)) (permutations'Aux b) ~ List.bind (permutations'Aux b (c :: l)) (permutations'Aux a) [PROOFSTEP] simp [permutations'Aux] [GOAL] case cons α : Type uu β : Type vv l₁ l₂ : List α a b c : α l : List α ih : List.bind (permutations'Aux a l) (permutations'Aux b) ~ List.bind (permutations'Aux b l) (permutations'Aux a) ⊢ (b :: a :: c :: l) :: (a :: b :: c :: l) :: (map (cons a ∘ cons c) (permutations'Aux b l) ++ List.bind (map (cons c) (permutations'Aux a l)) (permutations'Aux b)) ~ (a :: b :: c :: l) :: (b :: a :: c :: l) :: (map (cons b ∘ cons c) (permutations'Aux a l) ++ List.bind (map (cons c) (permutations'Aux b l)) (permutations'Aux a)) [PROOFSTEP] apply Perm.swap' [GOAL] case cons.p α : Type uu β : Type vv l₁ l₂ : List α a b c : α l : List α ih : List.bind (permutations'Aux a l) (permutations'Aux b) ~ List.bind (permutations'Aux b l) (permutations'Aux a) ⊢ map (cons a ∘ cons c) (permutations'Aux b l) ++ List.bind (map (cons c) (permutations'Aux a l)) (permutations'Aux b) ~ map (cons b ∘ cons c) (permutations'Aux a l) ++ List.bind (map (cons c) (permutations'Aux b l)) (permutations'Aux a) [PROOFSTEP] have : ∀ a b, (map (cons c) (permutations'Aux a l)).bind (permutations'Aux b) ~ map (cons b ∘ cons c) (permutations'Aux a l) ++ map (cons c) ((permutations'Aux a l).bind (permutations'Aux b)) := by intros a' b' simp only [map_bind, permutations'Aux] show List.bind (permutations'Aux _ l) (fun a => ([b' :: c :: a] ++ map (cons c) (permutations'Aux _ a))) ~ _ refine' (bind_append_perm _ (fun x => [b' :: c :: x]) _).symm.trans _ rw [← map_eq_bind, ← bind_map] exact Perm.refl _ [GOAL] α : Type uu β : Type vv l₁ l₂ : List α a b c : α l : List α ih : List.bind (permutations'Aux a l) (permutations'Aux b) ~ List.bind (permutations'Aux b l) (permutations'Aux a) ⊢ ∀ (a b : α), List.bind (map (cons c) (permutations'Aux a l)) (permutations'Aux b) ~ map (cons b ∘ cons c) (permutations'Aux a l) ++ map (cons c) (List.bind (permutations'Aux a l) (permutations'Aux b)) [PROOFSTEP] intros a' b' [GOAL] α : Type uu β : Type vv l₁ l₂ : List α a b c : α l : List α ih : List.bind (permutations'Aux a l) (permutations'Aux b) ~ List.bind (permutations'Aux b l) (permutations'Aux a) a' b' : α ⊢ List.bind (map (cons c) (permutations'Aux a' l)) (permutations'Aux b') ~ map (cons b' ∘ cons c) (permutations'Aux a' l) ++ map (cons c) (List.bind (permutations'Aux a' l) (permutations'Aux b')) [PROOFSTEP] simp only [map_bind, permutations'Aux] [GOAL] α : Type uu β : Type vv l₁ l₂ : List α a b c : α l : List α ih : List.bind (permutations'Aux a l) (permutations'Aux b) ~ List.bind (permutations'Aux b l) (permutations'Aux a) a' b' : α ⊢ (List.bind (permutations'Aux a' l) fun a => (b' :: c :: a) :: map (cons c) (permutations'Aux b' a)) ~ map (cons b' ∘ cons c) (permutations'Aux a' l) ++ map (cons c) (List.bind (permutations'Aux a' l) (permutations'Aux b')) [PROOFSTEP] show List.bind (permutations'Aux _ l) (fun a => ([b' :: c :: a] ++ map (cons c) (permutations'Aux _ a))) ~ _ [GOAL] α : Type uu β : Type vv l₁ l₂ : List α a b c : α l : List α ih : List.bind (permutations'Aux a l) (permutations'Aux b) ~ List.bind (permutations'Aux b l) (permutations'Aux a) a' b' : α ⊢ (List.bind (permutations'Aux a' l) fun a => [b' :: c :: a] ++ map (cons c) (permutations'Aux b' a)) ~ map (cons b' ∘ cons c) (permutations'Aux a' l) ++ map (cons c) (List.bind (permutations'Aux a' l) (permutations'Aux b')) [PROOFSTEP] refine' (bind_append_perm _ (fun x => [b' :: c :: x]) _).symm.trans _ [GOAL] α : Type uu β : Type vv l₁ l₂ : List α a b c : α l : List α ih : List.bind (permutations'Aux a l) (permutations'Aux b) ~ List.bind (permutations'Aux b l) (permutations'Aux a) a' b' : α ⊢ ((List.bind (permutations'Aux a' l) fun x => [b' :: c :: x]) ++ List.bind (permutations'Aux a' l) fun a => map (cons c) (permutations'Aux b' a)) ~ map (cons b' ∘ cons c) (permutations'Aux a' l) ++ map (cons c) (List.bind (permutations'Aux a' l) (permutations'Aux b')) [PROOFSTEP] rw [← map_eq_bind, ← bind_map] [GOAL] α : Type uu β : Type vv l₁ l₂ : List α a b c : α l : List α ih : List.bind (permutations'Aux a l) (permutations'Aux b) ~ List.bind (permutations'Aux b l) (permutations'Aux a) a' b' : α ⊢ map (fun x => b' :: c :: x) (permutations'Aux a' l) ++ map (cons c) (List.bind (permutations'Aux a' l) fun a => permutations'Aux b' a) ~ map (cons b' ∘ cons c) (permutations'Aux a' l) ++ map (cons c) (List.bind (permutations'Aux a' l) (permutations'Aux b')) [PROOFSTEP] exact Perm.refl _ [GOAL] case cons.p α : Type uu β : Type vv l₁ l₂ : List α a b c : α l : List α ih : List.bind (permutations'Aux a l) (permutations'Aux b) ~ List.bind (permutations'Aux b l) (permutations'Aux a) this : ∀ (a b : α), List.bind (map (cons c) (permutations'Aux a l)) (permutations'Aux b) ~ map (cons b ∘ cons c) (permutations'Aux a l) ++ map (cons c) (List.bind (permutations'Aux a l) (permutations'Aux b)) ⊢ map (cons a ∘ cons c) (permutations'Aux b l) ++ List.bind (map (cons c) (permutations'Aux a l)) (permutations'Aux b) ~ map (cons b ∘ cons c) (permutations'Aux a l) ++ List.bind (map (cons c) (permutations'Aux b l)) (permutations'Aux a) [PROOFSTEP] refine' (((this _ _).append_left _).trans _).trans ((this _ _).append_left _).symm [GOAL] case cons.p α : Type uu β : Type vv l₁ l₂ : List α a b c : α l : List α ih : List.bind (permutations'Aux a l) (permutations'Aux b) ~ List.bind (permutations'Aux b l) (permutations'Aux a) this : ∀ (a b : α), List.bind (map (cons c) (permutations'Aux a l)) (permutations'Aux b) ~ map (cons b ∘ cons c) (permutations'Aux a l) ++ map (cons c) (List.bind (permutations'Aux a l) (permutations'Aux b)) ⊢ map (cons a ∘ cons c) (permutations'Aux b l) ++ (map (cons b ∘ cons c) (permutations'Aux a l) ++ map (cons c) (List.bind (permutations'Aux a l) (permutations'Aux b))) ~ map (cons b ∘ cons c) (permutations'Aux a l) ++ (map (cons a ∘ cons c) (permutations'Aux b l) ++ map (cons c) (List.bind (permutations'Aux b l) (permutations'Aux a))) [PROOFSTEP] rw [← append_assoc, ← append_assoc] [GOAL] case cons.p α : Type uu β : Type vv l₁ l₂ : List α a b c : α l : List α ih : List.bind (permutations'Aux a l) (permutations'Aux b) ~ List.bind (permutations'Aux b l) (permutations'Aux a) this : ∀ (a b : α), List.bind (map (cons c) (permutations'Aux a l)) (permutations'Aux b) ~ map (cons b ∘ cons c) (permutations'Aux a l) ++ map (cons c) (List.bind (permutations'Aux a l) (permutations'Aux b)) ⊢ map (cons a ∘ cons c) (permutations'Aux b l) ++ map (cons b ∘ cons c) (permutations'Aux a l) ++ map (cons c) (List.bind (permutations'Aux a l) (permutations'Aux b)) ~ map (cons b ∘ cons c) (permutations'Aux a l) ++ map (cons a ∘ cons c) (permutations'Aux b l) ++ map (cons c) (List.bind (permutations'Aux b l) (permutations'Aux a)) [PROOFSTEP] exact perm_append_comm.append (ih.map _) [GOAL] α : Type uu β : Type vv l₁ l₂ s t : List α p : s ~ t ⊢ List.permutations' s ~ List.permutations' t [PROOFSTEP] induction' p with a s t _ IH a b l s t u _ _ IH₁ IH₂ [GOAL] case nil α : Type uu β : Type vv l₁ l₂ s t : List α ⊢ List.permutations' [] ~ List.permutations' [] [PROOFSTEP] simp [GOAL] case cons α : Type uu β : Type vv l₁ l₂ s✝ t✝ : List α a : α s t : List α a✝ : s ~ t IH : List.permutations' s ~ List.permutations' t ⊢ List.permutations' (a :: s) ~ List.permutations' (a :: t) [PROOFSTEP] exact IH.bind_right _ [GOAL] case swap α : Type uu β : Type vv l₁ l₂ s t : List α a b : α l : List α ⊢ List.permutations' (b :: a :: l) ~ List.permutations' (a :: b :: l) [PROOFSTEP] dsimp [permutations'] [GOAL] case swap α : Type uu β : Type vv l₁ l₂ s t : List α a b : α l : List α ⊢ List.bind (List.bind (List.permutations' l) (permutations'Aux a)) (permutations'Aux b) ~ List.bind (List.bind (List.permutations' l) (permutations'Aux b)) (permutations'Aux a) [PROOFSTEP] rw [bind_assoc, bind_assoc] [GOAL] case swap α : Type uu β : Type vv l₁ l₂ s t : List α a b : α l : List α ⊢ (List.bind (List.permutations' l) fun x => List.bind (permutations'Aux a x) (permutations'Aux b)) ~ List.bind (List.permutations' l) fun x => List.bind (permutations'Aux b x) (permutations'Aux a) [PROOFSTEP] apply Perm.bind_left [GOAL] case swap.h α : Type uu β : Type vv l₁ l₂ s t : List α a b : α l : List α ⊢ ∀ (a_1 : List α), a_1 ∈ List.permutations' l → List.bind (permutations'Aux a a_1) (permutations'Aux b) ~ List.bind (permutations'Aux b a_1) (permutations'Aux a) [PROOFSTEP] intro l' _ [GOAL] case swap.h α : Type uu β : Type vv l₁ l₂ s t : List α a b : α l l' : List α a✝ : l' ∈ List.permutations' l ⊢ List.bind (permutations'Aux a l') (permutations'Aux b) ~ List.bind (permutations'Aux b l') (permutations'Aux a) [PROOFSTEP] apply perm_permutations'Aux_comm [GOAL] case trans α : Type uu β : Type vv l₁ l₂ s✝ t✝ s t u : List α a✝¹ : s ~ t a✝ : t ~ u IH₁ : List.permutations' s ~ List.permutations' t IH₂ : List.permutations' t ~ List.permutations' u ⊢ List.permutations' s ~ List.permutations' u [PROOFSTEP] exact IH₁.trans IH₂ [GOAL] α : Type uu β : Type vv l₁ l₂ ts : List α ⊢ permutations ts ~ permutations' ts [PROOFSTEP] obtain ⟨n, h⟩ : ∃ n, length ts < n := ⟨_, Nat.lt_succ_self _⟩ [GOAL] case intro α : Type uu β : Type vv l₁ l₂ ts : List α n : ℕ h : length ts < n ⊢ permutations ts ~ permutations' ts [PROOFSTEP] induction' n with n IH generalizing ts [GOAL] case intro.zero α : Type uu β : Type vv l₁ l₂ ts✝ : List α n : ℕ h✝ : length ts✝ < n ts : List α h : length ts < zero ⊢ permutations ts ~ permutations' ts [PROOFSTEP] cases h [GOAL] case intro.succ α : Type uu β : Type vv l₁ l₂ ts✝ : List α n✝ : ℕ h✝ : length ts✝ < n✝ n : ℕ IH : ∀ (ts : List α), length ts < n → permutations ts ~ permutations' ts ts : List α h : length ts < succ n ⊢ permutations ts ~ permutations' ts [PROOFSTEP] refine' List.reverseRecOn ts (fun _ => _) (fun ts t _ h => _) h [GOAL] case intro.succ.refine'_1 α : Type uu β : Type vv l₁ l₂ ts✝ : List α n✝ : ℕ h✝ : length ts✝ < n✝ n : ℕ IH : ∀ (ts : List α), length ts < n → permutations ts ~ permutations' ts ts : List α h : length ts < succ n x✝ : length [] < succ n ⊢ permutations [] ~ permutations' [] [PROOFSTEP] simp [permutations] [GOAL] case intro.succ.refine'_2 α : Type uu β : Type vv l₁ l₂ ts✝¹ : List α n✝ : ℕ h✝¹ : length ts✝¹ < n✝ n : ℕ IH : ∀ (ts : List α), length ts < n → permutations ts ~ permutations' ts ts✝ : List α h✝ : length ts✝ < succ n ts : List α t : α x✝ : length ts < succ n → permutations ts ~ permutations' ts h : length (ts ++ [t]) < succ n ⊢ permutations (ts ++ [t]) ~ permutations' (ts ++ [t]) [PROOFSTEP] rw [← concat_eq_append, length_concat, Nat.succ_lt_succ_iff] at h [GOAL] case intro.succ.refine'_2 α : Type uu β : Type vv l₁ l₂ ts✝¹ : List α n✝ : ℕ h✝¹ : length ts✝¹ < n✝ n : ℕ IH : ∀ (ts : List α), length ts < n → permutations ts ~ permutations' ts ts✝ : List α h✝ : length ts✝ < succ n ts : List α t : α x✝ : length ts < succ n → permutations ts ~ permutations' ts h : length ts < n ⊢ permutations (ts ++ [t]) ~ permutations' (ts ++ [t]) [PROOFSTEP] have IH₂ := (IH ts.reverse (by rwa [length_reverse])).trans (reverse_perm _).permutations' [GOAL] α : Type uu β : Type vv l₁ l₂ ts✝¹ : List α n✝ : ℕ h✝¹ : length ts✝¹ < n✝ n : ℕ IH : ∀ (ts : List α), length ts < n → permutations ts ~ permutations' ts ts✝ : List α h✝ : length ts✝ < succ n ts : List α t : α x✝ : length ts < succ n → permutations ts ~ permutations' ts h : length ts < n ⊢ length (reverse ts) < n [PROOFSTEP] rwa [length_reverse] [GOAL] case intro.succ.refine'_2 α : Type uu β : Type vv l₁ l₂ ts✝¹ : List α n✝ : ℕ h✝¹ : length ts✝¹ < n✝ n : ℕ IH : ∀ (ts : List α), length ts < n → permutations ts ~ permutations' ts ts✝ : List α h✝ : length ts✝ < succ n ts : List α t : α x✝ : length ts < succ n → permutations ts ~ permutations' ts h : length ts < n IH₂ : permutations (reverse ts) ~ permutations' ts ⊢ permutations (ts ++ [t]) ~ permutations' (ts ++ [t]) [PROOFSTEP] simp only [permutations_append, foldr_permutationsAux2, permutationsAux_nil, permutationsAux_cons, append_nil] [GOAL] case intro.succ.refine'_2 α : Type uu β : Type vv l₁ l₂ ts✝¹ : List α n✝ : ℕ h✝¹ : length ts✝¹ < n✝ n : ℕ IH : ∀ (ts : List α), length ts < n → permutations ts ~ permutations' ts ts✝ : List α h✝ : length ts✝ < succ n ts : List α t : α x✝ : length ts < succ n → permutations ts ~ permutations' ts h : length ts < n IH₂ : permutations (reverse ts) ~ permutations' ts ⊢ (map (fun x => x ++ [t]) (permutations ts) ++ List.bind (permutations (reverse ts)) fun y => (permutationsAux2 t [] [] y id).snd) ~ permutations' (ts ++ [t]) [PROOFSTEP] refine' (perm_append_comm.trans ((IH₂.bind_right _).append ((IH _ h).map _))).trans (Perm.trans _ perm_append_comm.permutations') [GOAL] case intro.succ.refine'_2 α : Type uu β : Type vv l₁ l₂ ts✝¹ : List α n✝ : ℕ h✝¹ : length ts✝¹ < n✝ n : ℕ IH : ∀ (ts : List α), length ts < n → permutations ts ~ permutations' ts ts✝ : List α h✝ : length ts✝ < succ n ts : List α t : α x✝ : length ts < succ n → permutations ts ~ permutations' ts h : length ts < n IH₂ : permutations (reverse ts) ~ permutations' ts ⊢ (List.bind (permutations' ts) fun y => (permutationsAux2 t [] [] y id).snd) ++ map (fun x => x ++ [t]) (permutations' ts) ~ permutations' ([t] ++ ts) [PROOFSTEP] rw [map_eq_bind, singleton_append, permutations'] [GOAL] case intro.succ.refine'_2 α : Type uu β : Type vv l₁ l₂ ts✝¹ : List α n✝ : ℕ h✝¹ : length ts✝¹ < n✝ n : ℕ IH : ∀ (ts : List α), length ts < n → permutations ts ~ permutations' ts ts✝ : List α h✝ : length ts✝ < succ n ts : List α t : α x✝ : length ts < succ n → permutations ts ~ permutations' ts h : length ts < n IH₂ : permutations (reverse ts) ~ permutations' ts ⊢ ((List.bind (permutations' ts) fun y => (permutationsAux2 t [] [] y id).snd) ++ List.bind (permutations' ts) fun x => [x ++ [t]]) ~ List.bind (permutations' ts) (permutations'Aux t) [PROOFSTEP] refine' (bind_append_perm _ _ _).trans _ [GOAL] case intro.succ.refine'_2 α : Type uu β : Type vv l₁ l₂ ts✝¹ : List α n✝ : ℕ h✝¹ : length ts✝¹ < n✝ n : ℕ IH : ∀ (ts : List α), length ts < n → permutations ts ~ permutations' ts ts✝ : List α h✝ : length ts✝ < succ n ts : List α t : α x✝ : length ts < succ n → permutations ts ~ permutations' ts h : length ts < n IH₂ : permutations (reverse ts) ~ permutations' ts ⊢ (List.bind (permutations' ts) fun x => (permutationsAux2 t [] [] x id).snd ++ [x ++ [t]]) ~ List.bind (permutations' ts) (permutations'Aux t) [PROOFSTEP] refine' Perm.of_eq _ [GOAL] case intro.succ.refine'_2 α : Type uu β : Type vv l₁ l₂ ts✝¹ : List α n✝ : ℕ h✝¹ : length ts✝¹ < n✝ n : ℕ IH : ∀ (ts : List α), length ts < n → permutations ts ~ permutations' ts ts✝ : List α h✝ : length ts✝ < succ n ts : List α t : α x✝ : length ts < succ n → permutations ts ~ permutations' ts h : length ts < n IH₂ : permutations (reverse ts) ~ permutations' ts ⊢ (List.bind (permutations' ts) fun x => (permutationsAux2 t [] [] x id).snd ++ [x ++ [t]]) = List.bind (permutations' ts) (permutations'Aux t) [PROOFSTEP] congr [GOAL] case intro.succ.refine'_2.e_b α : Type uu β : Type vv l₁ l₂ ts✝¹ : List α n✝ : ℕ h✝¹ : length ts✝¹ < n✝ n : ℕ IH : ∀ (ts : List α), length ts < n → permutations ts ~ permutations' ts ts✝ : List α h✝ : length ts✝ < succ n ts : List α t : α x✝ : length ts < succ n → permutations ts ~ permutations' ts h : length ts < n IH₂ : permutations (reverse ts) ~ permutations' ts ⊢ (fun x => (permutationsAux2 t [] [] x id).snd ++ [x ++ [t]]) = permutations'Aux t [PROOFSTEP] funext _ [GOAL] case intro.succ.refine'_2.e_b.h α : Type uu β : Type vv l₁ l₂ ts✝¹ : List α n✝ : ℕ h✝¹ : length ts✝¹ < n✝ n : ℕ IH : ∀ (ts : List α), length ts < n → permutations ts ~ permutations' ts ts✝ : List α h✝ : length ts✝ < succ n ts : List α t : α x✝¹ : length ts < succ n → permutations ts ~ permutations' ts h : length ts < n IH₂ : permutations (reverse ts) ~ permutations' ts x✝ : List α ⊢ (permutationsAux2 t [] [] x✝ id).snd ++ [x✝ ++ [t]] = permutations'Aux t x✝ [PROOFSTEP] rw [permutations'Aux_eq_permutationsAux2, permutationsAux2_append] [GOAL] α : Type uu β : Type vv l₁ l₂ s : List α x : α n : ℕ hn : n < length (permutations'Aux x s) ⊢ nthLe (permutations'Aux x s) n hn = insertNth n x s [PROOFSTEP] induction' s with y s IH generalizing n [GOAL] case nil α : Type uu β : Type vv l₁ l₂ s : List α x : α n✝ : ℕ hn✝ : n✝ < length (permutations'Aux x s) n : ℕ hn : n < length (permutations'Aux x []) ⊢ nthLe (permutations'Aux x []) n hn = insertNth n x [] [PROOFSTEP] simp only [length, zero_add, lt_one_iff] at hn [GOAL] case nil α : Type uu β : Type vv l₁ l₂ s : List α x : α n✝ : ℕ hn✝¹ : n✝ < length (permutations'Aux x s) n : ℕ hn✝ : n < length (permutations'Aux x []) hn : n = 0 ⊢ nthLe (permutations'Aux x []) n hn✝ = insertNth n x [] [PROOFSTEP] simp [hn] [GOAL] case cons α : Type uu β : Type vv l₁ l₂ s✝ : List α x : α n✝ : ℕ hn✝ : n✝ < length (permutations'Aux x s✝) y : α s : List α IH : ∀ (n : ℕ) (hn : n < length (permutations'Aux x s)), nthLe (permutations'Aux x s) n hn = insertNth n x s n : ℕ hn : n < length (permutations'Aux x (y :: s)) ⊢ nthLe (permutations'Aux x (y :: s)) n hn = insertNth n x (y :: s) [PROOFSTEP] cases n [GOAL] case cons.zero α : Type uu β : Type vv l₁ l₂ s✝ : List α x : α n : ℕ hn✝ : n < length (permutations'Aux x s✝) y : α s : List α IH : ∀ (n : ℕ) (hn : n < length (permutations'Aux x s)), nthLe (permutations'Aux x s) n hn = insertNth n x s hn : zero < length (permutations'Aux x (y :: s)) ⊢ nthLe (permutations'Aux x (y :: s)) zero hn = insertNth zero x (y :: s) [PROOFSTEP] simp [nthLe] [GOAL] case cons.succ α : Type uu β : Type vv l₁ l₂ s✝ : List α x : α n : ℕ hn✝ : n < length (permutations'Aux x s✝) y : α s : List α IH : ∀ (n : ℕ) (hn : n < length (permutations'Aux x s)), nthLe (permutations'Aux x s) n hn = insertNth n x s n✝ : ℕ hn : succ n✝ < length (permutations'Aux x (y :: s)) ⊢ nthLe (permutations'Aux x (y :: s)) (succ n✝) hn = insertNth (succ n✝) x (y :: s) [PROOFSTEP] simpa [nthLe] using IH _ _ [GOAL] α : Type uu β : Type vv l₁ l₂ : List α inst✝ : DecidableEq α l : List α x : α ⊢ count (x :: l) (permutations'Aux x l) = length (takeWhile (fun b => decide ((fun x x_1 => x = x_1) x b)) l) + 1 [PROOFSTEP] induction' l with y l IH generalizing x [GOAL] case nil α : Type uu β : Type vv l₁ l₂ : List α inst✝ : DecidableEq α x✝ x : α ⊢ count [x] (permutations'Aux x []) = length (takeWhile (fun b => decide ((fun x x_1 => x = x_1) x b)) []) + 1 [PROOFSTEP] simp [takeWhile, count] [GOAL] case cons α : Type uu β : Type vv l₁ l₂ : List α inst✝ : DecidableEq α x✝ y : α l : List α IH : ∀ (x : α), count (x :: l) (permutations'Aux x l) = length (takeWhile (fun b => decide ((fun x x_1 => x = x_1) x b)) l) + 1 x : α ⊢ count (x :: y :: l) (permutations'Aux x (y :: l)) = length (takeWhile (fun b => decide ((fun x x_1 => x = x_1) x b)) (y :: l)) + 1 [PROOFSTEP] rw [permutations'Aux, DecEq_eq, count_cons_self] [GOAL] case cons α : Type uu β : Type vv l₁ l₂ : List α inst✝ : DecidableEq α x✝ y : α l : List α IH : ∀ (x : α), count (x :: l) (permutations'Aux x l) = length (takeWhile (fun b => decide ((fun x x_1 => x = x_1) x b)) l) + 1 x : α ⊢ count (x :: y :: l) (map (cons y) (permutations'Aux x l)) + 1 = length (takeWhile (fun b => decide ((fun x x_1 => x = x_1) x b)) (y :: l)) + 1 [PROOFSTEP] by_cases hx : x = y [GOAL] case pos α : Type uu β : Type vv l₁ l₂ : List α inst✝ : DecidableEq α x✝ y : α l : List α IH : ∀ (x : α), count (x :: l) (permutations'Aux x l) = length (takeWhile (fun b => decide ((fun x x_1 => x = x_1) x b)) l) + 1 x : α hx : x = y ⊢ count (x :: y :: l) (map (cons y) (permutations'Aux x l)) + 1 = length (takeWhile (fun b => decide ((fun x x_1 => x = x_1) x b)) (y :: l)) + 1 [PROOFSTEP] subst hx [GOAL] case pos α : Type uu β : Type vv l₁ l₂ : List α inst✝ : DecidableEq α x✝ : α l : List α IH : ∀ (x : α), count (x :: l) (permutations'Aux x l) = length (takeWhile (fun b => decide ((fun x x_1 => x = x_1) x b)) l) + 1 x : α ⊢ count (x :: x :: l) (map (cons x) (permutations'Aux x l)) + 1 = length (takeWhile (fun b => decide ((fun x x_1 => x = x_1) x b)) (x :: l)) + 1 [PROOFSTEP] simpa [takeWhile, Nat.succ_inj', DecEq_eq] using IH _ [GOAL] case neg α : Type uu β : Type vv l₁ l₂ : List α inst✝ : DecidableEq α x✝ y : α l : List α IH : ∀ (x : α), count (x :: l) (permutations'Aux x l) = length (takeWhile (fun b => decide ((fun x x_1 => x = x_1) x b)) l) + 1 x : α hx : ¬x = y ⊢ count (x :: y :: l) (map (cons y) (permutations'Aux x l)) + 1 = length (takeWhile (fun b => decide ((fun x x_1 => x = x_1) x b)) (y :: l)) + 1 [PROOFSTEP] rw [takeWhile] [GOAL] case neg α : Type uu β : Type vv l₁ l₂ : List α inst✝ : DecidableEq α x✝ y : α l : List α IH : ∀ (x : α), count (x :: l) (permutations'Aux x l) = length (takeWhile (fun b => decide ((fun x x_1 => x = x_1) x b)) l) + 1 x : α hx : ¬x = y ⊢ count (x :: y :: l) (map (cons y) (permutations'Aux x l)) + 1 = length (match decide ((fun x x_1 => x = x_1) x y) with | true => y :: takeWhile (fun b => decide ((fun x x_1 => x = x_1) x b)) l | false => []) + 1 [PROOFSTEP] simp only [mem_map, cons.injEq, Ne.symm hx, false_and, and_false, exists_false, not_false_iff, count_eq_zero_of_not_mem, zero_add, hx, decide_False, length_nil] [GOAL] α : Type uu β : Type vv l₁ l₂ s : List α x : α ⊢ length (permutations'Aux x s) = length s + 1 [PROOFSTEP] induction' s with y s IH [GOAL] case nil α : Type uu β : Type vv l₁ l₂ : List α x : α ⊢ length (permutations'Aux x []) = length [] + 1 [PROOFSTEP] simp [GOAL] case cons α : Type uu β : Type vv l₁ l₂ : List α x y : α s : List α IH : length (permutations'Aux x s) = length s + 1 ⊢ length (permutations'Aux x (y :: s)) = length (y :: s) + 1 [PROOFSTEP] simpa using IH [GOAL] α : Type uu β : Type vv l₁ l₂ s : List α x : α ⊢ 0 < length (permutations'Aux x s) [PROOFSTEP] simp [GOAL] α : Type uu β : Type vv l₁ l₂ : List α x : α ⊢ Function.Injective (permutations'Aux x) [PROOFSTEP] intro s t h [GOAL] α : Type uu β : Type vv l₁ l₂ : List α x : α s t : List α h : permutations'Aux x s = permutations'Aux x t ⊢ s = t [PROOFSTEP] apply insertNth_injective s.length x [GOAL] case a α : Type uu β : Type vv l₁ l₂ : List α x : α s t : List α h : permutations'Aux x s = permutations'Aux x t ⊢ insertNth (length s) x s = insertNth (length s) x t [PROOFSTEP] have hl : s.length = t.length := by simpa using congr_arg length h [GOAL] α : Type uu β : Type vv l₁ l₂ : List α x : α s t : List α h : permutations'Aux x s = permutations'Aux x t ⊢ length s = length t [PROOFSTEP] simpa using congr_arg length h [GOAL] case a α : Type uu β : Type vv l₁ l₂ : List α x : α s t : List α h : permutations'Aux x s = permutations'Aux x t hl : length s = length t ⊢ insertNth (length s) x s = insertNth (length s) x t [PROOFSTEP] rw [← nthLe_permutations'Aux s x s.length (by simp), ← nthLe_permutations'Aux t x s.length (by simp [hl])] [GOAL] α : Type uu β : Type vv l₁ l₂ : List α x : α s t : List α h : permutations'Aux x s = permutations'Aux x t hl : length s = length t ⊢ length s < length (permutations'Aux x s) [PROOFSTEP] simp [GOAL] α : Type uu β : Type vv l₁ l₂ : List α x : α s t : List α h : permutations'Aux x s = permutations'Aux x t hl : length s = length t ⊢ length s < length (permutations'Aux x t) [PROOFSTEP] simp [hl] [GOAL] case a α : Type uu β : Type vv l₁ l₂ : List α x : α s t : List α h : permutations'Aux x s = permutations'Aux x t hl : length s = length t ⊢ nthLe (permutations'Aux x s) (length s) (_ : length s < length (permutations'Aux x s)) = nthLe (permutations'Aux x t) (length s) (_ : length s < length (permutations'Aux x t)) [PROOFSTEP] simp [h, hl] [GOAL] α : Type uu β : Type vv l₁ l₂ s : List α x : α hx : ¬x ∈ s ⊢ Nodup (permutations'Aux x s) [PROOFSTEP] induction' s with y s IH [GOAL] case nil α : Type uu β : Type vv l₁ l₂ s : List α x : α hx✝ : ¬x ∈ s hx : ¬x ∈ [] ⊢ Nodup (permutations'Aux x []) [PROOFSTEP] simp [GOAL] case cons α : Type uu β : Type vv l₁ l₂ s✝ : List α x : α hx✝ : ¬x ∈ s✝ y : α s : List α IH : ¬x ∈ s → Nodup (permutations'Aux x s) hx : ¬x ∈ y :: s ⊢ Nodup (permutations'Aux x (y :: s)) [PROOFSTEP] simp only [not_or, mem_cons] at hx [GOAL] case cons α : Type uu β : Type vv l₁ l₂ s✝ : List α x : α hx✝ : ¬x ∈ s✝ y : α s : List α IH : ¬x ∈ s → Nodup (permutations'Aux x s) hx : ¬x = y ∧ ¬x ∈ s ⊢ Nodup (permutations'Aux x (y :: s)) [PROOFSTEP] simp only [permutations'Aux, nodup_cons, mem_map, cons.injEq, exists_eq_right_right, not_and] [GOAL] case cons α : Type uu β : Type vv l₁ l₂ s✝ : List α x : α hx✝ : ¬x ∈ s✝ y : α s : List α IH : ¬x ∈ s → Nodup (permutations'Aux x s) hx : ¬x = y ∧ ¬x ∈ s ⊢ (y :: s ∈ permutations'Aux x s → ¬y = x) ∧ Nodup (map (cons y) (permutations'Aux x s)) [PROOFSTEP] refine' ⟨fun _ => Ne.symm hx.left, _⟩ [GOAL] case cons α : Type uu β : Type vv l₁ l₂ s✝ : List α x : α hx✝ : ¬x ∈ s✝ y : α s : List α IH : ¬x ∈ s → Nodup (permutations'Aux x s) hx : ¬x = y ∧ ¬x ∈ s ⊢ Nodup (map (cons y) (permutations'Aux x s)) [PROOFSTEP] rw [nodup_map_iff] [GOAL] case cons α : Type uu β : Type vv l₁ l₂ s✝ : List α x : α hx✝ : ¬x ∈ s✝ y : α s : List α IH : ¬x ∈ s → Nodup (permutations'Aux x s) hx : ¬x = y ∧ ¬x ∈ s ⊢ Nodup (permutations'Aux x s) [PROOFSTEP] exact IH hx.right [GOAL] case cons α : Type uu β : Type vv l₁ l₂ s✝ : List α x : α hx✝ : ¬x ∈ s✝ y : α s : List α IH : ¬x ∈ s → Nodup (permutations'Aux x s) hx : ¬x = y ∧ ¬x ∈ s ⊢ Function.Injective (cons y) [PROOFSTEP] simp [GOAL] α : Type uu β : Type vv l₁ l₂ s : List α x : α ⊢ Nodup (permutations'Aux x s) ↔ ¬x ∈ s [PROOFSTEP] refine' ⟨fun h => _, nodup_permutations'Aux_of_not_mem _ _⟩ [GOAL] α : Type uu β : Type vv l₁ l₂ s : List α x : α h : Nodup (permutations'Aux x s) ⊢ ¬x ∈ s [PROOFSTEP] intro H [GOAL] α : Type uu β : Type vv l₁ l₂ s : List α x : α h : Nodup (permutations'Aux x s) H : x ∈ s ⊢ False [PROOFSTEP] obtain ⟨k, hk, hk'⟩ := nthLe_of_mem H [GOAL] case intro.intro α : Type uu β : Type vv l₁ l₂ s : List α x : α h : Nodup (permutations'Aux x s) H : x ∈ s k : ℕ hk : k < length s hk' : nthLe s k hk = x ⊢ False [PROOFSTEP] rw [nodup_iff_nthLe_inj] at h [GOAL] case intro.intro α : Type uu β : Type vv l₁ l₂ s : List α x : α h : ∀ (i j : ℕ) (h₁ : i < length (permutations'Aux x s)) (h₂ : j < length (permutations'Aux x s)), nthLe (permutations'Aux x s) i h₁ = nthLe (permutations'Aux x s) j h₂ → i = j H : x ∈ s k : ℕ hk : k < length s hk' : nthLe s k hk = x ⊢ False [PROOFSTEP] suffices k = k + 1 by simp at this [GOAL] α : Type uu β : Type vv l₁ l₂ s : List α x : α h : ∀ (i j : ℕ) (h₁ : i < length (permutations'Aux x s)) (h₂ : j < length (permutations'Aux x s)), nthLe (permutations'Aux x s) i h₁ = nthLe (permutations'Aux x s) j h₂ → i = j H : x ∈ s k : ℕ hk : k < length s hk' : nthLe s k hk = x this : k = k + 1 ⊢ False [PROOFSTEP] simp at this [GOAL] case intro.intro α : Type uu β : Type vv l₁ l₂ s : List α x : α h : ∀ (i j : ℕ) (h₁ : i < length (permutations'Aux x s)) (h₂ : j < length (permutations'Aux x s)), nthLe (permutations'Aux x s) i h₁ = nthLe (permutations'Aux x s) j h₂ → i = j H : x ∈ s k : ℕ hk : k < length s hk' : nthLe s k hk = x ⊢ k = k + 1 [PROOFSTEP] refine' h k (k + 1) _ _ _ [GOAL] case intro.intro.refine'_1 α : Type uu β : Type vv l₁ l₂ s : List α x : α h : ∀ (i j : ℕ) (h₁ : i < length (permutations'Aux x s)) (h₂ : j < length (permutations'Aux x s)), nthLe (permutations'Aux x s) i h₁ = nthLe (permutations'Aux x s) j h₂ → i = j H : x ∈ s k : ℕ hk : k < length s hk' : nthLe s k hk = x ⊢ k < length (permutations'Aux x s) [PROOFSTEP] simpa [Nat.lt_succ_iff] using hk.le [GOAL] case intro.intro.refine'_2 α : Type uu β : Type vv l₁ l₂ s : List α x : α h : ∀ (i j : ℕ) (h₁ : i < length (permutations'Aux x s)) (h₂ : j < length (permutations'Aux x s)), nthLe (permutations'Aux x s) i h₁ = nthLe (permutations'Aux x s) j h₂ → i = j H : x ∈ s k : ℕ hk : k < length s hk' : nthLe s k hk = x ⊢ k + 1 < length (permutations'Aux x s) [PROOFSTEP] simpa using hk [GOAL] case intro.intro.refine'_3 α : Type uu β : Type vv l₁ l₂ s : List α x : α h : ∀ (i j : ℕ) (h₁ : i < length (permutations'Aux x s)) (h₂ : j < length (permutations'Aux x s)), nthLe (permutations'Aux x s) i h₁ = nthLe (permutations'Aux x s) j h₂ → i = j H : x ∈ s k : ℕ hk : k < length s hk' : nthLe s k hk = x ⊢ nthLe (permutations'Aux x s) k (_ : k < length (permutations'Aux x s)) = nthLe (permutations'Aux x s) (k + 1) (_ : k + 1 < length (permutations'Aux x s)) [PROOFSTEP] rw [nthLe_permutations'Aux, nthLe_permutations'Aux] [GOAL] case intro.intro.refine'_3 α : Type uu β : Type vv l₁ l₂ s : List α x : α h : ∀ (i j : ℕ) (h₁ : i < length (permutations'Aux x s)) (h₂ : j < length (permutations'Aux x s)), nthLe (permutations'Aux x s) i h₁ = nthLe (permutations'Aux x s) j h₂ → i = j H : x ∈ s k : ℕ hk : k < length s hk' : nthLe s k hk = x ⊢ insertNth k x s = insertNth (k + 1) x s [PROOFSTEP] have hl : length (insertNth k x s) = length (insertNth (k + 1) x s) := by rw [length_insertNth _ _ hk.le, length_insertNth _ _ (Nat.succ_le_of_lt hk)] [GOAL] α : Type uu β : Type vv l₁ l₂ s : List α x : α h : ∀ (i j : ℕ) (h₁ : i < length (permutations'Aux x s)) (h₂ : j < length (permutations'Aux x s)), nthLe (permutations'Aux x s) i h₁ = nthLe (permutations'Aux x s) j h₂ → i = j H : x ∈ s k : ℕ hk : k < length s hk' : nthLe s k hk = x ⊢ length (insertNth k x s) = length (insertNth (k + 1) x s) [PROOFSTEP] rw [length_insertNth _ _ hk.le, length_insertNth _ _ (Nat.succ_le_of_lt hk)] [GOAL] case intro.intro.refine'_3 α : Type uu β : Type vv l₁ l₂ s : List α x : α h : ∀ (i j : ℕ) (h₁ : i < length (permutations'Aux x s)) (h₂ : j < length (permutations'Aux x s)), nthLe (permutations'Aux x s) i h₁ = nthLe (permutations'Aux x s) j h₂ → i = j H : x ∈ s k : ℕ hk : k < length s hk' : nthLe s k hk = x hl : length (insertNth k x s) = length (insertNth (k + 1) x s) ⊢ insertNth k x s = insertNth (k + 1) x s [PROOFSTEP] refine' ext_nthLe hl fun n hn hn' => _ [GOAL] case intro.intro.refine'_3 α : Type uu β : Type vv l₁ l₂ s : List α x : α h : ∀ (i j : ℕ) (h₁ : i < length (permutations'Aux x s)) (h₂ : j < length (permutations'Aux x s)), nthLe (permutations'Aux x s) i h₁ = nthLe (permutations'Aux x s) j h₂ → i = j H : x ∈ s k : ℕ hk : k < length s hk' : nthLe s k hk = x hl : length (insertNth k x s) = length (insertNth (k + 1) x s) n : ℕ hn : n < length (insertNth k x s) hn' : n < length (insertNth (k + 1) x s) ⊢ nthLe (insertNth k x s) n hn = nthLe (insertNth (k + 1) x s) n hn' [PROOFSTEP] rcases lt_trichotomy n k with (H | rfl | H) [GOAL] case intro.intro.refine'_3.inl α : Type uu β : Type vv l₁ l₂ s : List α x : α h : ∀ (i j : ℕ) (h₁ : i < length (permutations'Aux x s)) (h₂ : j < length (permutations'Aux x s)), nthLe (permutations'Aux x s) i h₁ = nthLe (permutations'Aux x s) j h₂ → i = j H✝ : x ∈ s k : ℕ hk : k < length s hk' : nthLe s k hk = x hl : length (insertNth k x s) = length (insertNth (k + 1) x s) n : ℕ hn : n < length (insertNth k x s) hn' : n < length (insertNth (k + 1) x s) H : n < k ⊢ nthLe (insertNth k x s) n hn = nthLe (insertNth (k + 1) x s) n hn' [PROOFSTEP] rw [nthLe_insertNth_of_lt _ _ _ _ H (H.trans hk), nthLe_insertNth_of_lt _ _ _ _ (H.trans (Nat.lt_succ_self _))] [GOAL] case intro.intro.refine'_3.inr.inl α : Type uu β : Type vv l₁ l₂ s : List α x : α h : ∀ (i j : ℕ) (h₁ : i < length (permutations'Aux x s)) (h₂ : j < length (permutations'Aux x s)), nthLe (permutations'Aux x s) i h₁ = nthLe (permutations'Aux x s) j h₂ → i = j H : x ∈ s n : ℕ hk : n < length s hk' : nthLe s n hk = x hl : length (insertNth n x s) = length (insertNth (n + 1) x s) hn : n < length (insertNth n x s) hn' : n < length (insertNth (n + 1) x s) ⊢ nthLe (insertNth n x s) n hn = nthLe (insertNth (n + 1) x s) n hn' [PROOFSTEP] rw [nthLe_insertNth_self _ _ _ hk.le, nthLe_insertNth_of_lt _ _ _ _ (Nat.lt_succ_self _) hk, hk'] [GOAL] case intro.intro.refine'_3.inr.inr α : Type uu β : Type vv l₁ l₂ s : List α x : α h : ∀ (i j : ℕ) (h₁ : i < length (permutations'Aux x s)) (h₂ : j < length (permutations'Aux x s)), nthLe (permutations'Aux x s) i h₁ = nthLe (permutations'Aux x s) j h₂ → i = j H✝ : x ∈ s k : ℕ hk : k < length s hk' : nthLe s k hk = x hl : length (insertNth k x s) = length (insertNth (k + 1) x s) n : ℕ hn : n < length (insertNth k x s) hn' : n < length (insertNth (k + 1) x s) H : k < n ⊢ nthLe (insertNth k x s) n hn = nthLe (insertNth (k + 1) x s) n hn' [PROOFSTEP] rcases(Nat.succ_le_of_lt H).eq_or_lt with (rfl | H') [GOAL] case intro.intro.refine'_3.inr.inr.inl α : Type uu β : Type vv l₁ l₂ s : List α x : α h : ∀ (i j : ℕ) (h₁ : i < length (permutations'Aux x s)) (h₂ : j < length (permutations'Aux x s)), nthLe (permutations'Aux x s) i h₁ = nthLe (permutations'Aux x s) j h₂ → i = j H✝ : x ∈ s k : ℕ hk : k < length s hk' : nthLe s k hk = x hl : length (insertNth k x s) = length (insertNth (k + 1) x s) hn : succ k < length (insertNth k x s) hn' : succ k < length (insertNth (k + 1) x s) H : k < succ k ⊢ nthLe (insertNth k x s) (succ k) hn = nthLe (insertNth (k + 1) x s) (succ k) hn' [PROOFSTEP] rw [nthLe_insertNth_self _ _ _ (Nat.succ_le_of_lt hk)] [GOAL] case intro.intro.refine'_3.inr.inr.inl α : Type uu β : Type vv l₁ l₂ s : List α x : α h : ∀ (i j : ℕ) (h₁ : i < length (permutations'Aux x s)) (h₂ : j < length (permutations'Aux x s)), nthLe (permutations'Aux x s) i h₁ = nthLe (permutations'Aux x s) j h₂ → i = j H✝ : x ∈ s k : ℕ hk : k < length s hk' : nthLe s k hk = x hl : length (insertNth k x s) = length (insertNth (k + 1) x s) hn : succ k < length (insertNth k x s) hn' : succ k < length (insertNth (k + 1) x s) H : k < succ k ⊢ nthLe (insertNth k x s) (succ k) hn = x [PROOFSTEP] convert hk' using 1 [GOAL] case h.e'_2 α : Type uu β : Type vv l₁ l₂ s : List α x : α h : ∀ (i j : ℕ) (h₁ : i < length (permutations'Aux x s)) (h₂ : j < length (permutations'Aux x s)), nthLe (permutations'Aux x s) i h₁ = nthLe (permutations'Aux x s) j h₂ → i = j H✝ : x ∈ s k : ℕ hk : k < length s hk' : nthLe s k hk = x hl : length (insertNth k x s) = length (insertNth (k + 1) x s) hn : succ k < length (insertNth k x s) hn' : succ k < length (insertNth (k + 1) x s) H : k < succ k ⊢ nthLe (insertNth k x s) (succ k) hn = nthLe s k hk [PROOFSTEP] exact nthLe_insertNth_add_succ _ _ _ 0 _ [GOAL] case intro.intro.refine'_3.inr.inr.inr α : Type uu β : Type vv l₁ l₂ s : List α x : α h : ∀ (i j : ℕ) (h₁ : i < length (permutations'Aux x s)) (h₂ : j < length (permutations'Aux x s)), nthLe (permutations'Aux x s) i h₁ = nthLe (permutations'Aux x s) j h₂ → i = j H✝ : x ∈ s k : ℕ hk : k < length s hk' : nthLe s k hk = x hl : length (insertNth k x s) = length (insertNth (k + 1) x s) n : ℕ hn : n < length (insertNth k x s) hn' : n < length (insertNth (k + 1) x s) H : k < n H' : succ k < n ⊢ nthLe (insertNth k x s) n hn = nthLe (insertNth (k + 1) x s) n hn' [PROOFSTEP] obtain ⟨m, rfl⟩ := Nat.exists_eq_add_of_lt H' [GOAL] case intro.intro.refine'_3.inr.inr.inr.intro α : Type uu β : Type vv l₁ l₂ s : List α x : α h : ∀ (i j : ℕ) (h₁ : i < length (permutations'Aux x s)) (h₂ : j < length (permutations'Aux x s)), nthLe (permutations'Aux x s) i h₁ = nthLe (permutations'Aux x s) j h₂ → i = j H✝ : x ∈ s k : ℕ hk : k < length s hk' : nthLe s k hk = x hl : length (insertNth k x s) = length (insertNth (k + 1) x s) m : ℕ hn : succ k + m + 1 < length (insertNth k x s) hn' : succ k + m + 1 < length (insertNth (k + 1) x s) H : k < succ k + m + 1 H' : succ k < succ k + m + 1 ⊢ nthLe (insertNth k x s) (succ k + m + 1) hn = nthLe (insertNth (k + 1) x s) (succ k + m + 1) hn' [PROOFSTEP] erw [length_insertNth _ _ hk.le, Nat.succ_lt_succ_iff, Nat.succ_add] at hn [GOAL] case intro.intro.refine'_3.inr.inr.inr.intro α : Type uu β : Type vv l₁ l₂ s : List α x : α h : ∀ (i j : ℕ) (h₁ : i < length (permutations'Aux x s)) (h₂ : j < length (permutations'Aux x s)), nthLe (permutations'Aux x s) i h₁ = nthLe (permutations'Aux x s) j h₂ → i = j H✝ : x ∈ s k : ℕ hk : k < length s hk' : nthLe s k hk = x hl : length (insertNth k x s) = length (insertNth (k + 1) x s) m : ℕ hn✝ : succ k + m + 1 < length (insertNth k x s) hn : succ (k + m) < length s hn' : succ k + m + 1 < length (insertNth (k + 1) x s) H : k < succ k + m + 1 H' : succ k < succ k + m + 1 ⊢ nthLe (insertNth k x s) (succ k + m + 1) hn✝ = nthLe (insertNth (k + 1) x s) (succ k + m + 1) hn' [PROOFSTEP] rw [nthLe_insertNth_add_succ] [GOAL] case intro.intro.refine'_3.inr.inr.inr.intro α : Type uu β : Type vv l₁ l₂ s : List α x : α h : ∀ (i j : ℕ) (h₁ : i < length (permutations'Aux x s)) (h₂ : j < length (permutations'Aux x s)), nthLe (permutations'Aux x s) i h₁ = nthLe (permutations'Aux x s) j h₂ → i = j H✝ : x ∈ s k : ℕ hk : k < length s hk' : nthLe s k hk = x hl : length (insertNth k x s) = length (insertNth (k + 1) x s) m : ℕ hn✝ : succ k + m + 1 < length (insertNth k x s) hn : succ (k + m) < length s hn' : succ k + m + 1 < length (insertNth (k + 1) x s) H : k < succ k + m + 1 H' : succ k < succ k + m + 1 ⊢ nthLe (insertNth k x s) (succ k + m + 1) hn✝ = nthLe s (k + 1 + m) ?intro.intro.refine'_3.inr.inr.inr.intro.hk' case intro.intro.refine'_3.inr.inr.inr.intro.hk' α : Type uu β : Type vv l₁ l₂ s : List α x : α h : ∀ (i j : ℕ) (h₁ : i < length (permutations'Aux x s)) (h₂ : j < length (permutations'Aux x s)), nthLe (permutations'Aux x s) i h₁ = nthLe (permutations'Aux x s) j h₂ → i = j H✝ : x ∈ s k : ℕ hk : k < length s hk' : nthLe s k hk = x hl : length (insertNth k x s) = length (insertNth (k + 1) x s) m : ℕ hn✝ : succ k + m + 1 < length (insertNth k x s) hn : succ (k + m) < length s hn' : succ k + m + 1 < length (insertNth (k + 1) x s) H : k < succ k + m + 1 H' : succ k < succ k + m + 1 ⊢ k + 1 + m < length s [PROOFSTEP] convert nthLe_insertNth_add_succ s x k m.succ (by simpa using hn) using 2 [GOAL] α : Type uu β : Type vv l₁ l₂ s : List α x : α h : ∀ (i j : ℕ) (h₁ : i < length (permutations'Aux x s)) (h₂ : j < length (permutations'Aux x s)), nthLe (permutations'Aux x s) i h₁ = nthLe (permutations'Aux x s) j h₂ → i = j H✝ : x ∈ s k : ℕ hk : k < length s hk' : nthLe s k hk = x hl : length (insertNth k x s) = length (insertNth (k + 1) x s) m : ℕ hn✝ : succ k + m + 1 < length (insertNth k x s) hn : succ (k + m) < length s hn' : succ k + m + 1 < length (insertNth (k + 1) x s) H : k < succ k + m + 1 H' : succ k < succ k + m + 1 ⊢ k + succ m < length s [PROOFSTEP] simpa using hn [GOAL] case h.e'_2.h.e'_3 α : Type uu β : Type vv l₁ l₂ s : List α x : α h : ∀ (i j : ℕ) (h₁ : i < length (permutations'Aux x s)) (h₂ : j < length (permutations'Aux x s)), nthLe (permutations'Aux x s) i h₁ = nthLe (permutations'Aux x s) j h₂ → i = j H✝ : x ∈ s k : ℕ hk : k < length s hk' : nthLe s k hk = x hl : length (insertNth k x s) = length (insertNth (k + 1) x s) m : ℕ hn✝ : succ k + m + 1 < length (insertNth k x s) hn : succ (k + m) < length s hn' : succ k + m + 1 < length (insertNth (k + 1) x s) H : k < succ k + m + 1 H' : succ k < succ k + m + 1 ⊢ succ k + m + 1 = k + succ m + 1 [PROOFSTEP] simp [Nat.add_succ, Nat.succ_add] [GOAL] case h.e'_3.h.e'_3 α : Type uu β : Type vv l₁ l₂ s : List α x : α h : ∀ (i j : ℕ) (h₁ : i < length (permutations'Aux x s)) (h₂ : j < length (permutations'Aux x s)), nthLe (permutations'Aux x s) i h₁ = nthLe (permutations'Aux x s) j h₂ → i = j H✝ : x ∈ s k : ℕ hk : k < length s hk' : nthLe s k hk = x hl : length (insertNth k x s) = length (insertNth (k + 1) x s) m : ℕ hn✝ : succ k + m + 1 < length (insertNth k x s) hn : succ (k + m) < length s hn' : succ k + m + 1 < length (insertNth (k + 1) x s) H : k < succ k + m + 1 H' : succ k < succ k + m + 1 ⊢ k + 1 + m = k + succ m [PROOFSTEP] simp [add_left_comm, add_comm] [GOAL] case intro.intro.refine'_3.inr.inr.inr.intro.hk' α : Type uu β : Type vv l₁ l₂ s : List α x : α h : ∀ (i j : ℕ) (h₁ : i < length (permutations'Aux x s)) (h₂ : j < length (permutations'Aux x s)), nthLe (permutations'Aux x s) i h₁ = nthLe (permutations'Aux x s) j h₂ → i = j H✝ : x ∈ s k : ℕ hk : k < length s hk' : nthLe s k hk = x hl : length (insertNth k x s) = length (insertNth (k + 1) x s) m : ℕ hn✝ : succ k + m + 1 < length (insertNth k x s) hn : succ (k + m) < length s hn' : succ k + m + 1 < length (insertNth (k + 1) x s) H : k < succ k + m + 1 H' : succ k < succ k + m + 1 ⊢ k + 1 + m < length s [PROOFSTEP] simpa [Nat.succ_add] using hn [GOAL] α : Type uu β : Type vv l₁ l₂ s : List α hs : Nodup s ⊢ Nodup (permutations s) [PROOFSTEP] rw [(permutations_perm_permutations' s).nodup_iff] [GOAL] α : Type uu β : Type vv l₁ l₂ s : List α hs : Nodup s ⊢ Nodup (permutations' s) [PROOFSTEP] induction' hs with x l h h' IH [GOAL] case nil α : Type uu β : Type vv l₁ l₂ s : List α ⊢ Nodup (permutations' []) [PROOFSTEP] simp [GOAL] case cons α : Type uu β : Type vv l₁ l₂ s : List α x : α l : List α h : ∀ (a' : α), a' ∈ l → x ≠ a' h' : Pairwise (fun x x_1 => x ≠ x_1) l IH : Nodup (permutations' l) ⊢ Nodup (permutations' (x :: l)) [PROOFSTEP] rw [permutations'] [GOAL] case cons α : Type uu β : Type vv l₁ l₂ s : List α x : α l : List α h : ∀ (a' : α), a' ∈ l → x ≠ a' h' : Pairwise (fun x x_1 => x ≠ x_1) l IH : Nodup (permutations' l) ⊢ Nodup (List.bind (permutations' l) (permutations'Aux x)) [PROOFSTEP] rw [nodup_bind] [GOAL] case cons α : Type uu β : Type vv l₁ l₂ s : List α x : α l : List α h : ∀ (a' : α), a' ∈ l → x ≠ a' h' : Pairwise (fun x x_1 => x ≠ x_1) l IH : Nodup (permutations' l) ⊢ (∀ (x_1 : List α), x_1 ∈ permutations' l → Nodup (permutations'Aux x x_1)) ∧ Pairwise (fun a b => Disjoint (permutations'Aux x a) (permutations'Aux x b)) (permutations' l) [PROOFSTEP] constructor [GOAL] case cons.left α : Type uu β : Type vv l₁ l₂ s : List α x : α l : List α h : ∀ (a' : α), a' ∈ l → x ≠ a' h' : Pairwise (fun x x_1 => x ≠ x_1) l IH : Nodup (permutations' l) ⊢ ∀ (x_1 : List α), x_1 ∈ permutations' l → Nodup (permutations'Aux x x_1) [PROOFSTEP] intro ys hy [GOAL] case cons.left α : Type uu β : Type vv l₁ l₂ s : List α x : α l : List α h : ∀ (a' : α), a' ∈ l → x ≠ a' h' : Pairwise (fun x x_1 => x ≠ x_1) l IH : Nodup (permutations' l) ys : List α hy : ys ∈ permutations' l ⊢ Nodup (permutations'Aux x ys) [PROOFSTEP] rw [mem_permutations'] at hy [GOAL] case cons.left α : Type uu β : Type vv l₁ l₂ s : List α x : α l : List α h : ∀ (a' : α), a' ∈ l → x ≠ a' h' : Pairwise (fun x x_1 => x ≠ x_1) l IH : Nodup (permutations' l) ys : List α hy : ys ~ l ⊢ Nodup (permutations'Aux x ys) [PROOFSTEP] rw [nodup_permutations'Aux_iff, hy.mem_iff] [GOAL] case cons.left α : Type uu β : Type vv l₁ l₂ s : List α x : α l : List α h : ∀ (a' : α), a' ∈ l → x ≠ a' h' : Pairwise (fun x x_1 => x ≠ x_1) l IH : Nodup (permutations' l) ys : List α hy : ys ~ l ⊢ ¬x ∈ l [PROOFSTEP] exact fun H => h x H rfl [GOAL] case cons.right α : Type uu β : Type vv l₁ l₂ s : List α x : α l : List α h : ∀ (a' : α), a' ∈ l → x ≠ a' h' : Pairwise (fun x x_1 => x ≠ x_1) l IH : Nodup (permutations' l) ⊢ Pairwise (fun a b => Disjoint (permutations'Aux x a) (permutations'Aux x b)) (permutations' l) [PROOFSTEP] refine' IH.pairwise_of_forall_ne fun as ha bs hb H => _ [GOAL] case cons.right α : Type uu β : Type vv l₁ l₂ s : List α x : α l : List α h : ∀ (a' : α), a' ∈ l → x ≠ a' h' : Pairwise (fun x x_1 => x ≠ x_1) l IH : Nodup (permutations' l) as : List α ha : as ∈ permutations' l bs : List α hb : bs ∈ permutations' l H : as ≠ bs ⊢ Disjoint (permutations'Aux x as) (permutations'Aux x bs) [PROOFSTEP] rw [disjoint_iff_ne] [GOAL] case cons.right α : Type uu β : Type vv l₁ l₂ s : List α x : α l : List α h : ∀ (a' : α), a' ∈ l → x ≠ a' h' : Pairwise (fun x x_1 => x ≠ x_1) l IH : Nodup (permutations' l) as : List α ha : as ∈ permutations' l bs : List α hb : bs ∈ permutations' l H : as ≠ bs ⊢ ∀ (a : List α), a ∈ permutations'Aux x as → ∀ (b : List α), b ∈ permutations'Aux x bs → a ≠ b [PROOFSTEP] rintro a ha' b hb' rfl [GOAL] case cons.right α : Type uu β : Type vv l₁ l₂ s : List α x : α l : List α h : ∀ (a' : α), a' ∈ l → x ≠ a' h' : Pairwise (fun x x_1 => x ≠ x_1) l IH : Nodup (permutations' l) as : List α ha : as ∈ permutations' l bs : List α hb : bs ∈ permutations' l H : as ≠ bs a : List α ha' : a ∈ permutations'Aux x as hb' : a ∈ permutations'Aux x bs ⊢ False [PROOFSTEP] obtain ⟨⟨n, hn⟩, hn'⟩ := get_of_mem ha' [GOAL] case cons.right.intro.mk α : Type uu β : Type vv l₁ l₂ s : List α x : α l : List α h : ∀ (a' : α), a' ∈ l → x ≠ a' h' : Pairwise (fun x x_1 => x ≠ x_1) l IH : Nodup (permutations' l) as : List α ha : as ∈ permutations' l bs : List α hb : bs ∈ permutations' l H : as ≠ bs a : List α ha' : a ∈ permutations'Aux x as hb' : a ∈ permutations'Aux x bs n : ℕ hn : n < length (permutations'Aux x as) hn' : get (permutations'Aux x as) { val := n, isLt := hn } = a ⊢ False [PROOFSTEP] obtain ⟨⟨m, hm⟩, hm'⟩ := get_of_mem hb' [GOAL] case cons.right.intro.mk.intro.mk α : Type uu β : Type vv l₁ l₂ s : List α x : α l : List α h : ∀ (a' : α), a' ∈ l → x ≠ a' h' : Pairwise (fun x x_1 => x ≠ x_1) l IH : Nodup (permutations' l) as : List α ha : as ∈ permutations' l bs : List α hb : bs ∈ permutations' l H : as ≠ bs a : List α ha' : a ∈ permutations'Aux x as hb' : a ∈ permutations'Aux x bs n : ℕ hn : n < length (permutations'Aux x as) hn' : get (permutations'Aux x as) { val := n, isLt := hn } = a m : ℕ hm : m < length (permutations'Aux x bs) hm' : get (permutations'Aux x bs) { val := m, isLt := hm } = a ⊢ False [PROOFSTEP] rw [mem_permutations'] at ha hb [GOAL] case cons.right.intro.mk.intro.mk α : Type uu β : Type vv l₁ l₂ s : List α x : α l : List α h : ∀ (a' : α), a' ∈ l → x ≠ a' h' : Pairwise (fun x x_1 => x ≠ x_1) l IH : Nodup (permutations' l) as : List α ha : as ~ l bs : List α hb : bs ~ l H : as ≠ bs a : List α ha' : a ∈ permutations'Aux x as hb' : a ∈ permutations'Aux x bs n : ℕ hn : n < length (permutations'Aux x as) hn' : get (permutations'Aux x as) { val := n, isLt := hn } = a m : ℕ hm : m < length (permutations'Aux x bs) hm' : get (permutations'Aux x bs) { val := m, isLt := hm } = a ⊢ False [PROOFSTEP] have hl : as.length = bs.length := (ha.trans hb.symm).length_eq [GOAL] case cons.right.intro.mk.intro.mk α : Type uu β : Type vv l₁ l₂ s : List α x : α l : List α h : ∀ (a' : α), a' ∈ l → x ≠ a' h' : Pairwise (fun x x_1 => x ≠ x_1) l IH : Nodup (permutations' l) as : List α ha : as ~ l bs : List α hb : bs ~ l H : as ≠ bs a : List α ha' : a ∈ permutations'Aux x as hb' : a ∈ permutations'Aux x bs n : ℕ hn : n < length (permutations'Aux x as) hn' : get (permutations'Aux x as) { val := n, isLt := hn } = a m : ℕ hm : m < length (permutations'Aux x bs) hm' : get (permutations'Aux x bs) { val := m, isLt := hm } = a hl : length as = length bs ⊢ False [PROOFSTEP] simp only [Nat.lt_succ_iff, length_permutations'Aux] at hn hm [GOAL] case cons.right.intro.mk.intro.mk α : Type uu β : Type vv l₁ l₂ s : List α x : α l : List α h : ∀ (a' : α), a' ∈ l → x ≠ a' h' : Pairwise (fun x x_1 => x ≠ x_1) l IH : Nodup (permutations' l) as : List α ha : as ~ l bs : List α hb : bs ~ l H : as ≠ bs a : List α ha' : a ∈ permutations'Aux x as hb' : a ∈ permutations'Aux x bs n : ℕ hn✝ : n < length (permutations'Aux x as) hn' : get (permutations'Aux x as) { val := n, isLt := hn✝ } = a m : ℕ hm✝ : m < length (permutations'Aux x bs) hm' : get (permutations'Aux x bs) { val := m, isLt := hm✝ } = a hl : length as = length bs hn : n ≤ length as hm : m ≤ length bs ⊢ False [PROOFSTEP] rw [← nthLe, nthLe_permutations'Aux] at hn' hm' [GOAL] case cons.right.intro.mk.intro.mk α : Type uu β : Type vv l₁ l₂ s : List α x : α l : List α h : ∀ (a' : α), a' ∈ l → x ≠ a' h' : Pairwise (fun x x_1 => x ≠ x_1) l IH : Nodup (permutations' l) as : List α ha : as ~ l bs : List α hb : bs ~ l H : as ≠ bs a : List α ha' : a ∈ permutations'Aux x as hb' : a ∈ permutations'Aux x bs n : ℕ hn✝ : n < length (permutations'Aux x as) hn' : insertNth n x as = a m : ℕ hm✝ : m < length (permutations'Aux x bs) hm' : insertNth m x bs = a hl : length as = length bs hn : n ≤ length as hm : m ≤ length bs ⊢ False [PROOFSTEP] have hx : nthLe (insertNth n x as) m (by rwa [length_insertNth _ _ hn, Nat.lt_succ_iff, hl]) = x := by simp [hn', ← hm', hm] [GOAL] α : Type uu β : Type vv l₁ l₂ s : List α x : α l : List α h : ∀ (a' : α), a' ∈ l → x ≠ a' h' : Pairwise (fun x x_1 => x ≠ x_1) l IH : Nodup (permutations' l) as : List α ha : as ~ l bs : List α hb : bs ~ l H : as ≠ bs a : List α ha' : a ∈ permutations'Aux x as hb' : a ∈ permutations'Aux x bs n : ℕ hn✝ : n < length (permutations'Aux x as) hn' : insertNth n x as = a m : ℕ hm✝ : m < length (permutations'Aux x bs) hm' : insertNth m x bs = a hl : length as = length bs hn : n ≤ length as hm : m ≤ length bs ⊢ m < length (insertNth n x as) [PROOFSTEP] rwa [length_insertNth _ _ hn, Nat.lt_succ_iff, hl] [GOAL] α : Type uu β : Type vv l₁ l₂ s : List α x : α l : List α h : ∀ (a' : α), a' ∈ l → x ≠ a' h' : Pairwise (fun x x_1 => x ≠ x_1) l IH : Nodup (permutations' l) as : List α ha : as ~ l bs : List α hb : bs ~ l H : as ≠ bs a : List α ha' : a ∈ permutations'Aux x as hb' : a ∈ permutations'Aux x bs n : ℕ hn✝ : n < length (permutations'Aux x as) hn' : insertNth n x as = a m : ℕ hm✝ : m < length (permutations'Aux x bs) hm' : insertNth m x bs = a hl : length as = length bs hn : n ≤ length as hm : m ≤ length bs ⊢ nthLe (insertNth n x as) m (_ : m < length (insertNth n x as)) = x [PROOFSTEP] simp [hn', ← hm', hm] [GOAL] case cons.right.intro.mk.intro.mk α : Type uu β : Type vv l₁ l₂ s : List α x : α l : List α h : ∀ (a' : α), a' ∈ l → x ≠ a' h' : Pairwise (fun x x_1 => x ≠ x_1) l IH : Nodup (permutations' l) as : List α ha : as ~ l bs : List α hb : bs ~ l H : as ≠ bs a : List α ha' : a ∈ permutations'Aux x as hb' : a ∈ permutations'Aux x bs n : ℕ hn✝ : n < length (permutations'Aux x as) hn' : insertNth n x as = a m : ℕ hm✝ : m < length (permutations'Aux x bs) hm' : insertNth m x bs = a hl : length as = length bs hn : n ≤ length as hm : m ≤ length bs hx : nthLe (insertNth n x as) m (_ : m < length (insertNth n x as)) = x ⊢ False [PROOFSTEP] have hx' : nthLe (insertNth m x bs) n (by rwa [length_insertNth _ _ hm, Nat.lt_succ_iff, ← hl]) = x := by simp [hm', ← hn', hn] [GOAL] α : Type uu β : Type vv l₁ l₂ s : List α x : α l : List α h : ∀ (a' : α), a' ∈ l → x ≠ a' h' : Pairwise (fun x x_1 => x ≠ x_1) l IH : Nodup (permutations' l) as : List α ha : as ~ l bs : List α hb : bs ~ l H : as ≠ bs a : List α ha' : a ∈ permutations'Aux x as hb' : a ∈ permutations'Aux x bs n : ℕ hn✝ : n < length (permutations'Aux x as) hn' : insertNth n x as = a m : ℕ hm✝ : m < length (permutations'Aux x bs) hm' : insertNth m x bs = a hl : length as = length bs hn : n ≤ length as hm : m ≤ length bs hx : nthLe (insertNth n x as) m (_ : m < length (insertNth n x as)) = x ⊢ n < length (insertNth m x bs) [PROOFSTEP] rwa [length_insertNth _ _ hm, Nat.lt_succ_iff, ← hl] [GOAL] α : Type uu β : Type vv l₁ l₂ s : List α x : α l : List α h : ∀ (a' : α), a' ∈ l → x ≠ a' h' : Pairwise (fun x x_1 => x ≠ x_1) l IH : Nodup (permutations' l) as : List α ha : as ~ l bs : List α hb : bs ~ l H : as ≠ bs a : List α ha' : a ∈ permutations'Aux x as hb' : a ∈ permutations'Aux x bs n : ℕ hn✝ : n < length (permutations'Aux x as) hn' : insertNth n x as = a m : ℕ hm✝ : m < length (permutations'Aux x bs) hm' : insertNth m x bs = a hl : length as = length bs hn : n ≤ length as hm : m ≤ length bs hx : nthLe (insertNth n x as) m (_ : m < length (insertNth n x as)) = x ⊢ nthLe (insertNth m x bs) n (_ : n < length (insertNth m x bs)) = x [PROOFSTEP] simp [hm', ← hn', hn] [GOAL] case cons.right.intro.mk.intro.mk α : Type uu β : Type vv l₁ l₂ s : List α x : α l : List α h : ∀ (a' : α), a' ∈ l → x ≠ a' h' : Pairwise (fun x x_1 => x ≠ x_1) l IH : Nodup (permutations' l) as : List α ha : as ~ l bs : List α hb : bs ~ l H : as ≠ bs a : List α ha' : a ∈ permutations'Aux x as hb' : a ∈ permutations'Aux x bs n : ℕ hn✝ : n < length (permutations'Aux x as) hn' : insertNth n x as = a m : ℕ hm✝ : m < length (permutations'Aux x bs) hm' : insertNth m x bs = a hl : length as = length bs hn : n ≤ length as hm : m ≤ length bs hx : nthLe (insertNth n x as) m (_ : m < length (insertNth n x as)) = x hx' : nthLe (insertNth m x bs) n (_ : n < length (insertNth m x bs)) = x ⊢ False [PROOFSTEP] rcases lt_trichotomy n m with (ht | ht | ht) [GOAL] case cons.right.intro.mk.intro.mk.inl α : Type uu β : Type vv l₁ l₂ s : List α x : α l : List α h : ∀ (a' : α), a' ∈ l → x ≠ a' h' : Pairwise (fun x x_1 => x ≠ x_1) l IH : Nodup (permutations' l) as : List α ha : as ~ l bs : List α hb : bs ~ l H : as ≠ bs a : List α ha' : a ∈ permutations'Aux x as hb' : a ∈ permutations'Aux x bs n : ℕ hn✝ : n < length (permutations'Aux x as) hn' : insertNth n x as = a m : ℕ hm✝ : m < length (permutations'Aux x bs) hm' : insertNth m x bs = a hl : length as = length bs hn : n ≤ length as hm : m ≤ length bs hx : nthLe (insertNth n x as) m (_ : m < length (insertNth n x as)) = x hx' : nthLe (insertNth m x bs) n (_ : n < length (insertNth m x bs)) = x ht : n < m ⊢ False [PROOFSTEP] suffices x ∈ bs by exact h x (hb.subset this) rfl [GOAL] α : Type uu β : Type vv l₁ l₂ s : List α x : α l : List α h : ∀ (a' : α), a' ∈ l → x ≠ a' h' : Pairwise (fun x x_1 => x ≠ x_1) l IH : Nodup (permutations' l) as : List α ha : as ~ l bs : List α hb : bs ~ l H : as ≠ bs a : List α ha' : a ∈ permutations'Aux x as hb' : a ∈ permutations'Aux x bs n : ℕ hn✝ : n < length (permutations'Aux x as) hn' : insertNth n x as = a m : ℕ hm✝ : m < length (permutations'Aux x bs) hm' : insertNth m x bs = a hl : length as = length bs hn : n ≤ length as hm : m ≤ length bs hx : nthLe (insertNth n x as) m (_ : m < length (insertNth n x as)) = x hx' : nthLe (insertNth m x bs) n (_ : n < length (insertNth m x bs)) = x ht : n < m this : x ∈ bs ⊢ False [PROOFSTEP] exact h x (hb.subset this) rfl [GOAL] case cons.right.intro.mk.intro.mk.inl α : Type uu β : Type vv l₁ l₂ s : List α x : α l : List α h : ∀ (a' : α), a' ∈ l → x ≠ a' h' : Pairwise (fun x x_1 => x ≠ x_1) l IH : Nodup (permutations' l) as : List α ha : as ~ l bs : List α hb : bs ~ l H : as ≠ bs a : List α ha' : a ∈ permutations'Aux x as hb' : a ∈ permutations'Aux x bs n : ℕ hn✝ : n < length (permutations'Aux x as) hn' : insertNth n x as = a m : ℕ hm✝ : m < length (permutations'Aux x bs) hm' : insertNth m x bs = a hl : length as = length bs hn : n ≤ length as hm : m ≤ length bs hx : nthLe (insertNth n x as) m (_ : m < length (insertNth n x as)) = x hx' : nthLe (insertNth m x bs) n (_ : n < length (insertNth m x bs)) = x ht : n < m ⊢ x ∈ bs [PROOFSTEP] rw [← hx', nthLe_insertNth_of_lt _ _ _ _ ht (ht.trans_le hm)] [GOAL] case cons.right.intro.mk.intro.mk.inl α : Type uu β : Type vv l₁ l₂ s : List α x : α l : List α h : ∀ (a' : α), a' ∈ l → x ≠ a' h' : Pairwise (fun x x_1 => x ≠ x_1) l IH : Nodup (permutations' l) as : List α ha : as ~ l bs : List α hb : bs ~ l H : as ≠ bs a : List α ha' : a ∈ permutations'Aux x as hb' : a ∈ permutations'Aux x bs n : ℕ hn✝ : n < length (permutations'Aux x as) hn' : insertNth n x as = a m : ℕ hm✝ : m < length (permutations'Aux x bs) hm' : insertNth m x bs = a hl : length as = length bs hn : n ≤ length as hm : m ≤ length bs hx : nthLe (insertNth n x as) m (_ : m < length (insertNth n x as)) = x hx' : nthLe (insertNth m x bs) n (_ : n < length (insertNth m x bs)) = x ht : n < m ⊢ nthLe bs n (_ : n < length bs) ∈ bs [PROOFSTEP] exact nthLe_mem _ _ _ [GOAL] case cons.right.intro.mk.intro.mk.inr.inl α : Type uu β : Type vv l₁ l₂ s : List α x : α l : List α h : ∀ (a' : α), a' ∈ l → x ≠ a' h' : Pairwise (fun x x_1 => x ≠ x_1) l IH : Nodup (permutations' l) as : List α ha : as ~ l bs : List α hb : bs ~ l H : as ≠ bs a : List α ha' : a ∈ permutations'Aux x as hb' : a ∈ permutations'Aux x bs n : ℕ hn✝ : n < length (permutations'Aux x as) hn' : insertNth n x as = a m : ℕ hm✝ : m < length (permutations'Aux x bs) hm' : insertNth m x bs = a hl : length as = length bs hn : n ≤ length as hm : m ≤ length bs hx : nthLe (insertNth n x as) m (_ : m < length (insertNth n x as)) = x hx' : nthLe (insertNth m x bs) n (_ : n < length (insertNth m x bs)) = x ht : n = m ⊢ False [PROOFSTEP] simp only [ht] at hm' hn' [GOAL] case cons.right.intro.mk.intro.mk.inr.inl α : Type uu β : Type vv l₁ l₂ s : List α x : α l : List α h : ∀ (a' : α), a' ∈ l → x ≠ a' h' : Pairwise (fun x x_1 => x ≠ x_1) l IH : Nodup (permutations' l) as : List α ha : as ~ l bs : List α hb : bs ~ l H : as ≠ bs a : List α ha' : a ∈ permutations'Aux x as hb' : a ∈ permutations'Aux x bs n : ℕ hn✝ : n < length (permutations'Aux x as) m : ℕ hm✝ : m < length (permutations'Aux x bs) hm' : insertNth m x bs = a hl : length as = length bs hn : n ≤ length as hm : m ≤ length bs hx : nthLe (insertNth n x as) m (_ : m < length (insertNth n x as)) = x hx' : nthLe (insertNth m x bs) n (_ : n < length (insertNth m x bs)) = x ht : n = m hn' : insertNth m x as = a ⊢ False [PROOFSTEP] rw [← hm'] at hn' [GOAL] case cons.right.intro.mk.intro.mk.inr.inl α : Type uu β : Type vv l₁ l₂ s : List α x : α l : List α h : ∀ (a' : α), a' ∈ l → x ≠ a' h' : Pairwise (fun x x_1 => x ≠ x_1) l IH : Nodup (permutations' l) as : List α ha : as ~ l bs : List α hb : bs ~ l H : as ≠ bs a : List α ha' : a ∈ permutations'Aux x as hb' : a ∈ permutations'Aux x bs n : ℕ hn✝ : n < length (permutations'Aux x as) m : ℕ hm✝ : m < length (permutations'Aux x bs) hm' : insertNth m x bs = a hl : length as = length bs hn : n ≤ length as hm : m ≤ length bs hx : nthLe (insertNth n x as) m (_ : m < length (insertNth n x as)) = x hx' : nthLe (insertNth m x bs) n (_ : n < length (insertNth m x bs)) = x ht : n = m hn' : insertNth m x as = insertNth m x bs ⊢ False [PROOFSTEP] exact H (insertNth_injective _ _ hn') [GOAL] case cons.right.intro.mk.intro.mk.inr.inr α : Type uu β : Type vv l₁ l₂ s : List α x : α l : List α h : ∀ (a' : α), a' ∈ l → x ≠ a' h' : Pairwise (fun x x_1 => x ≠ x_1) l IH : Nodup (permutations' l) as : List α ha : as ~ l bs : List α hb : bs ~ l H : as ≠ bs a : List α ha' : a ∈ permutations'Aux x as hb' : a ∈ permutations'Aux x bs n : ℕ hn✝ : n < length (permutations'Aux x as) hn' : insertNth n x as = a m : ℕ hm✝ : m < length (permutations'Aux x bs) hm' : insertNth m x bs = a hl : length as = length bs hn : n ≤ length as hm : m ≤ length bs hx : nthLe (insertNth n x as) m (_ : m < length (insertNth n x as)) = x hx' : nthLe (insertNth m x bs) n (_ : n < length (insertNth m x bs)) = x ht : m < n ⊢ False [PROOFSTEP] suffices x ∈ as by exact h x (ha.subset this) rfl [GOAL] α : Type uu β : Type vv l₁ l₂ s : List α x : α l : List α h : ∀ (a' : α), a' ∈ l → x ≠ a' h' : Pairwise (fun x x_1 => x ≠ x_1) l IH : Nodup (permutations' l) as : List α ha : as ~ l bs : List α hb : bs ~ l H : as ≠ bs a : List α ha' : a ∈ permutations'Aux x as hb' : a ∈ permutations'Aux x bs n : ℕ hn✝ : n < length (permutations'Aux x as) hn' : insertNth n x as = a m : ℕ hm✝ : m < length (permutations'Aux x bs) hm' : insertNth m x bs = a hl : length as = length bs hn : n ≤ length as hm : m ≤ length bs hx : nthLe (insertNth n x as) m (_ : m < length (insertNth n x as)) = x hx' : nthLe (insertNth m x bs) n (_ : n < length (insertNth m x bs)) = x ht : m < n this : x ∈ as ⊢ False [PROOFSTEP] exact h x (ha.subset this) rfl [GOAL] case cons.right.intro.mk.intro.mk.inr.inr α : Type uu β : Type vv l₁ l₂ s : List α x : α l : List α h : ∀ (a' : α), a' ∈ l → x ≠ a' h' : Pairwise (fun x x_1 => x ≠ x_1) l IH : Nodup (permutations' l) as : List α ha : as ~ l bs : List α hb : bs ~ l H : as ≠ bs a : List α ha' : a ∈ permutations'Aux x as hb' : a ∈ permutations'Aux x bs n : ℕ hn✝ : n < length (permutations'Aux x as) hn' : insertNth n x as = a m : ℕ hm✝ : m < length (permutations'Aux x bs) hm' : insertNth m x bs = a hl : length as = length bs hn : n ≤ length as hm : m ≤ length bs hx : nthLe (insertNth n x as) m (_ : m < length (insertNth n x as)) = x hx' : nthLe (insertNth m x bs) n (_ : n < length (insertNth m x bs)) = x ht : m < n ⊢ x ∈ as [PROOFSTEP] rw [← hx, nthLe_insertNth_of_lt _ _ _ _ ht (ht.trans_le hn)] [GOAL] case cons.right.intro.mk.intro.mk.inr.inr α : Type uu β : Type vv l₁ l₂ s : List α x : α l : List α h : ∀ (a' : α), a' ∈ l → x ≠ a' h' : Pairwise (fun x x_1 => x ≠ x_1) l IH : Nodup (permutations' l) as : List α ha : as ~ l bs : List α hb : bs ~ l H : as ≠ bs a : List α ha' : a ∈ permutations'Aux x as hb' : a ∈ permutations'Aux x bs n : ℕ hn✝ : n < length (permutations'Aux x as) hn' : insertNth n x as = a m : ℕ hm✝ : m < length (permutations'Aux x bs) hm' : insertNth m x bs = a hl : length as = length bs hn : n ≤ length as hm : m ≤ length bs hx : nthLe (insertNth n x as) m (_ : m < length (insertNth n x as)) = x hx' : nthLe (insertNth m x bs) n (_ : n < length (insertNth m x bs)) = x ht : m < n ⊢ nthLe as m (_ : m < length as) ∈ as [PROOFSTEP] exact nthLe_mem _ _ _
// Copyright (c) Microsoft Corporation. // Licensed under the MIT License. //------------------------------------------------------------------------------ // KeypointSpatialIndex.h // // Allows to construction, query, and removal of Keypoint entries based on pixel position. //------------------------------------------------------------------------------ #pragma once #include "Utils\thread_memory.h" #include <memory> #include <vector> #include <functional> #include <opencv2\features2d\features2d.hpp> #include <gsl/span> namespace mage { class KeypointSpatialIndex { struct Impl; public: KeypointSpatialIndex(gsl::span<const cv::KeyPoint> keypoints); void Query(const cv::Point2f& center, int octave, float radius, temp::vector<size_t>& results) const; void Remove(const cv::KeyPoint& keypoint, size_t value); ~KeypointSpatialIndex(); private: static constexpr float octaveSpacing = 100; static constexpr float octaveQueryRange = 1; std::unique_ptr<Impl> m_impl; }; }
module Impute using BSON using CSV using DataDeps using Distances using IterTools using Missings using NamedDims using NearestNeighbors using Random using Statistics using StatsBase using TableOperations using Tables: Tables, materializer, istable using Base.Iterators using LinearAlgebra using LinearAlgebra: Diagonal include("utils.jl") include("declaremissings.jl") include("imputors.jl") include("filter.jl") include("validators.jl") include("chain.jl") include("deprecated.jl") include("functional.jl") include("data.jl") __init__() = register_datadep() end # module
Cropped jackets are versatile. However, there are a few rules to follow when buying one. A cropped jacket can be worn in more than one way. Therefore, you need to make a wise choice when buying a cropped black jacket. The good thing about these jackets is that you can wear it to work and even on casual occasions. If you are looking for something a bit different from a sweater, then you should consider buying a cropped jacket. The cropped black jacket is one of the great designs that could be missing from your wardrobe. Here are just a few things you should look for when purchasing one. The material of the jacket matters a lot when you are buying it. You need to be familiar with the different materials to make the right choice. While Cotton jackets are suitable for warm weather, jackets made of wool are more formal. Make sure that you choose a material that will last for a long time. You do not want to buy something that will get worn off within the first few months of wearing it. Women jackets differ in length. Therefore, it is advisable to go for a jacket that will fit perfectly. Cropped jacket lie just a few inches above your waist. You can also opt for waist-length jackets that will draw attention to your waist. Hip length jackets reach over the hip and can flatter your figure. When choosing the length of the jacket, make sure you go for something you are comfortable wearing. For instance, short ladies will not look good in jackets that are a few inches above the waist. In as much as you are looking for a jacket, it is important to remain stylish. Cropped jackets come in a variety of designs and styles. Therefore, you should buy something that will give you that one of a kind look. A good jacket is not only comfortable but stylish as well. Furthermore, when you purchase a blazer that has a great style, you will stand out from the rest. Go ahead and make a choice on a blazer that will make you feel confident and looks elegant. This entry was posted on January 26, 2018 by Maxine Eastman.
/******************************************************************** created: 2012/08/27 created: 27:8:2012 15:04 filename: VietesFormulaeTest.cpp file path: test file base: VietesFormulaeTest file ext: cpp author: Han Hu purpose: *********************************************************************/ #include "GAGPL/MATH/VietesFormulae.h" #include <boost/foreach.hpp> #include <iostream> int main() { using namespace msmath; using namespace std; double mycoef[] = {3, 2, 0, 5, 1}; PolyCoef poly_eq(mycoef, mycoef + sizeof(mycoef) / sizeof(double)); VietesFormulae formulae(poly_eq); EleSymPolyVec ele_values = formulae.getElementarySymmetricFunctionFromCoef(); // The correct value should be: 1, -5, 0, -2, 3. for(size_t i=0; i<ele_values.size(); i++) { cout << i << ": " << ele_values.at(i) << endl; } cout << "The End!" << endl; cin.get(); }
subsection \<open>Definite Assignment\<close> theory DefiniteAssignment imports WellType begin text \<open>Definite Assignment Analysis (cf. 16) The definite assignment analysis approximates the sets of local variables that will be assigned at a certain point of evaluation, and ensures that we will only read variables which previously were assigned. It should conform to the following idea: If the evaluation of a term completes normally (no abruption (exception, break, continue, return) appeared) , the set of local variables calculated by the analysis is a subset of the variables that were actually assigned during evaluation. To get more precise information about the sets of assigned variables the analysis includes the following optimisations: \begin{itemize} \item Inside of a while loop we also take care of the variables assigned before break statements, since the break causes the while loop to continue normally. \item For conditional statements we take care of constant conditions to statically determine the path of evaluation. \item Inside a distinct path of a conditional statements we know to which boolean value the condition has evaluated to, and so can retrieve more information about the variables assigned during evaluation of the boolean condition. \end{itemize} Since in our model of Java the return values of methods are stored in a local variable we also ensure that every path of (normal) evaluation will assign the result variable, or in the sense of real Java every path ends up in and return instruction. Not covered yet: \begin{itemize} \item analysis of definite unassigned \item special treatment of final fields \end{itemize} \<close> subsubsection \<open>Correct nesting of jump statements\<close> text \<open>For definite assignment it becomes crucial, that jumps (break, continue, return) are nested correctly i.e. a continue jump is nested in a matching while statement, a break jump is nested in a proper label statement, a class initialiser does not terminate abruptly with a return. With this we can for example ensure that evaluation of an expression will never end up with a jump, since no breaks, continues or returns are allowed in an expression.\<close> primrec jumpNestingOkS :: "jump set \<Rightarrow> stmt \<Rightarrow> bool" where "jumpNestingOkS jmps (Skip) = True" | "jumpNestingOkS jmps (Expr e) = True" | "jumpNestingOkS jmps (j\<bullet> s) = jumpNestingOkS ({j} \<union> jmps) s" | "jumpNestingOkS jmps (c1;;c2) = (jumpNestingOkS jmps c1 \<and> jumpNestingOkS jmps c2)" | "jumpNestingOkS jmps (If(e) c1 Else c2) = (jumpNestingOkS jmps c1 \<and> jumpNestingOkS jmps c2)" | "jumpNestingOkS jmps (l\<bullet> While(e) c) = jumpNestingOkS ({Cont l} \<union> jmps) c" \<comment> \<open>The label of the while loop only handles continue jumps. Breaks are only handled by \<^term>\<open>Lab\<close>\<close> | "jumpNestingOkS jmps (Jmp j) = (j \<in> jmps)" | "jumpNestingOkS jmps (Throw e) = True" | "jumpNestingOkS jmps (Try c1 Catch(C vn) c2) = (jumpNestingOkS jmps c1 \<and> jumpNestingOkS jmps c2)" | "jumpNestingOkS jmps (c1 Finally c2) = (jumpNestingOkS jmps c1 \<and> jumpNestingOkS jmps c2)" | "jumpNestingOkS jmps (Init C) = True" \<comment> \<open>wellformedness of the program must enshure that for all initializers jumpNestingOkS {} holds\<close> \<comment> \<open>Dummy analysis for intermediate smallstep term \<^term>\<open>FinA\<close>\<close> | "jumpNestingOkS jmps (FinA a c) = False" definition jumpNestingOk :: "jump set \<Rightarrow> term \<Rightarrow> bool" where "jumpNestingOk jmps t = (case t of In1 se \<Rightarrow> (case se of Inl e \<Rightarrow> True | Inr s \<Rightarrow> jumpNestingOkS jmps s) | In2 v \<Rightarrow> True | In3 es \<Rightarrow> True)" lemma jumpNestingOk_expr_simp [simp]: "jumpNestingOk jmps (In1l e) = True" by (simp add: jumpNestingOk_def) lemma jumpNestingOk_expr_simp1 [simp]: "jumpNestingOk jmps \<langle>e::expr\<rangle> = True" by (simp add: inj_term_simps) lemma jumpNestingOk_stmt_simp [simp]: "jumpNestingOk jmps (In1r s) = jumpNestingOkS jmps s" by (simp add: jumpNestingOk_def) lemma jumpNestingOk_stmt_simp1 [simp]: "jumpNestingOk jmps \<langle>s::stmt\<rangle> = jumpNestingOkS jmps s" by (simp add: inj_term_simps) lemma jumpNestingOk_var_simp [simp]: "jumpNestingOk jmps (In2 v) = True" by (simp add: jumpNestingOk_def) lemma jumpNestingOk_var_simp1 [simp]: "jumpNestingOk jmps \<langle>v::var\<rangle> = True" by (simp add: inj_term_simps) lemma jumpNestingOk_expr_list_simp [simp]: "jumpNestingOk jmps (In3 es) = True" by (simp add: jumpNestingOk_def) lemma jumpNestingOk_expr_list_simp1 [simp]: "jumpNestingOk jmps \<langle>es::expr list\<rangle> = True" by (simp add: inj_term_simps) subsubsection \<open>Calculation of assigned variables for boolean expressions\<close> subsection \<open>Very restricted calculation fallback calculation\<close> primrec the_LVar_name :: "var \<Rightarrow> lname" where "the_LVar_name (LVar n) = n" primrec assignsE :: "expr \<Rightarrow> lname set" and assignsV :: "var \<Rightarrow> lname set" and assignsEs:: "expr list \<Rightarrow> lname set" where "assignsE (NewC c) = {}" | "assignsE (NewA t e) = assignsE e" | "assignsE (Cast t e) = assignsE e" | "assignsE (e InstOf r) = assignsE e" | "assignsE (Lit val) = {}" | "assignsE (UnOp unop e) = assignsE e" | "assignsE (BinOp binop e1 e2) = (if binop=CondAnd \<or> binop=CondOr then (assignsE e1) else (assignsE e1) \<union> (assignsE e2))" | "assignsE (Super) = {}" | "assignsE (Acc v) = assignsV v" | "assignsE (v:=e) = (assignsV v) \<union> (assignsE e) \<union> (if \<exists> n. v=(LVar n) then {the_LVar_name v} else {})" | "assignsE (b? e1 : e2) = (assignsE b) \<union> ((assignsE e1) \<inter> (assignsE e2))" | "assignsE ({accC,statT,mode}objRef\<cdot>mn({pTs}args)) = (assignsE objRef) \<union> (assignsEs args)" \<comment> \<open>Only dummy analysis for intermediate expressions \<^term>\<open>Methd\<close>, \<^term>\<open>Body\<close>, \<^term>\<open>InsInitE\<close> and \<^term>\<open>Callee\<close>\<close> | "assignsE (Methd C sig) = {}" | "assignsE (Body C s) = {}" | "assignsE (InsInitE s e) = {}" | "assignsE (Callee l e) = {}" | "assignsV (LVar n) = {}" | "assignsV ({accC,statDeclC,stat}objRef..fn) = assignsE objRef" | "assignsV (e1.[e2]) = assignsE e1 \<union> assignsE e2" | "assignsEs [] = {}" | "assignsEs (e#es) = assignsE e \<union> assignsEs es" definition assigns :: "term \<Rightarrow> lname set" where "assigns t = (case t of In1 se \<Rightarrow> (case se of Inl e \<Rightarrow> assignsE e | Inr s \<Rightarrow> {}) | In2 v \<Rightarrow> assignsV v | In3 es \<Rightarrow> assignsEs es)" lemma assigns_expr_simp [simp]: "assigns (In1l e) = assignsE e" by (simp add: assigns_def) lemma assigns_expr_simp1 [simp]: "assigns (\<langle>e\<rangle>) = assignsE e" by (simp add: inj_term_simps) lemma assigns_stmt_simp [simp]: "assigns (In1r s) = {}" by (simp add: assigns_def) lemma assigns_stmt_simp1 [simp]: "assigns (\<langle>s::stmt\<rangle>) = {}" by (simp add: inj_term_simps) lemma assigns_var_simp [simp]: "assigns (In2 v) = assignsV v" by (simp add: assigns_def) lemma assigns_var_simp1 [simp]: "assigns (\<langle>v\<rangle>) = assignsV v" by (simp add: inj_term_simps) lemma assigns_expr_list_simp [simp]: "assigns (In3 es) = assignsEs es" by (simp add: assigns_def) lemma assigns_expr_list_simp1 [simp]: "assigns (\<langle>es\<rangle>) = assignsEs es" by (simp add: inj_term_simps) subsection "Analysis of constant expressions" primrec constVal :: "expr \<Rightarrow> val option" where "constVal (NewC c) = None" | "constVal (NewA t e) = None" | "constVal (Cast t e) = None" | "constVal (Inst e r) = None" | "constVal (Lit val) = Some val" | "constVal (UnOp unop e) = (case (constVal e) of None \<Rightarrow> None | Some v \<Rightarrow> Some (eval_unop unop v))" | "constVal (BinOp binop e1 e2) = (case (constVal e1) of None \<Rightarrow> None | Some v1 \<Rightarrow> (case (constVal e2) of None \<Rightarrow> None | Some v2 \<Rightarrow> Some (eval_binop binop v1 v2)))" | "constVal (Super) = None" | "constVal (Acc v) = None" | "constVal (Ass v e) = None" | "constVal (Cond b e1 e2) = (case (constVal b) of None \<Rightarrow> None | Some bv\<Rightarrow> (case the_Bool bv of True \<Rightarrow> (case (constVal e2) of None \<Rightarrow> None | Some v \<Rightarrow> constVal e1) | False\<Rightarrow> (case (constVal e1) of None \<Rightarrow> None | Some v \<Rightarrow> constVal e2)))" \<comment> \<open>Note that \<open>constVal (Cond b e1 e2)\<close> is stricter as it could be. It requires that all tree expressions are constant even if we can decide which branch to choose, provided the constant value of \<^term>\<open>b\<close>\<close> | "constVal (Call accC statT mode objRef mn pTs args) = None" | "constVal (Methd C sig) = None" | "constVal (Body C s) = None" | "constVal (InsInitE s e) = None" | "constVal (Callee l e) = None" lemma constVal_Some_induct [consumes 1, case_names Lit UnOp BinOp CondL CondR]: assumes const: "constVal e = Some v" and hyp_Lit: "\<And> v. P (Lit v)" and hyp_UnOp: "\<And> unop e'. P e' \<Longrightarrow> P (UnOp unop e')" and hyp_BinOp: "\<And> binop e1 e2. \<lbrakk>P e1; P e2\<rbrakk> \<Longrightarrow> P (BinOp binop e1 e2)" and hyp_CondL: "\<And> b bv e1 e2. \<lbrakk>constVal b = Some bv; the_Bool bv; P b; P e1\<rbrakk> \<Longrightarrow> P (b? e1 : e2)" and hyp_CondR: "\<And> b bv e1 e2. \<lbrakk>constVal b = Some bv; \<not>the_Bool bv; P b; P e2\<rbrakk> \<Longrightarrow> P (b? e1 : e2)" shows "P e" proof - have "\<And> v. constVal e = Some v \<Longrightarrow> P e" proof (induct e) case Lit show ?case by (rule hyp_Lit) next case UnOp thus ?case by (auto intro: hyp_UnOp) next case BinOp thus ?case by (auto intro: hyp_BinOp) next case (Cond b e1 e2) then obtain v where v: "constVal (b ? e1 : e2) = Some v" by blast then obtain bv where bv: "constVal b = Some bv" by simp show ?case proof (cases "the_Bool bv") case True with Cond show ?thesis using v bv by (auto intro: hyp_CondL) next case False with Cond show ?thesis using v bv by (auto intro: hyp_CondR) qed qed (simp_all add: hyp_Lit) with const show ?thesis by blast qed lemma assignsE_const_simp: "constVal e = Some v \<Longrightarrow> assignsE e = {}" by (induct rule: constVal_Some_induct) simp_all subsection \<open>Main analysis for boolean expressions\<close> text \<open>Assigned local variables after evaluating the expression if it evaluates to a specific boolean value. If the expression cannot evaluate to a \<^term>\<open>Boolean\<close> value UNIV is returned. If we expect true/false the opposite constant false/true will also lead to UNIV.\<close> primrec assigns_if :: "bool \<Rightarrow> expr \<Rightarrow> lname set" where "assigns_if b (NewC c) = UNIV" \<comment> \<open>can never evaluate to Boolean\<close> | "assigns_if b (NewA t e) = UNIV" \<comment> \<open>can never evaluate to Boolean\<close> | "assigns_if b (Cast t e) = assigns_if b e" | "assigns_if b (Inst e r) = assignsE e" \<comment> \<open>Inst has type Boolean but e is a reference type\<close> | "assigns_if b (Lit val) = (if val=Bool b then {} else UNIV)" | "assigns_if b (UnOp unop e) = (case constVal (UnOp unop e) of None \<Rightarrow> (if unop = UNot then assigns_if (\<not>b) e else UNIV) | Some v \<Rightarrow> (if v=Bool b then {} else UNIV))" | "assigns_if b (BinOp binop e1 e2) = (case constVal (BinOp binop e1 e2) of None \<Rightarrow> (if binop=CondAnd then (case b of True \<Rightarrow> assigns_if True e1 \<union> assigns_if True e2 | False \<Rightarrow> assigns_if False e1 \<inter> (assigns_if True e1 \<union> assigns_if False e2)) else (if binop=CondOr then (case b of True \<Rightarrow> assigns_if True e1 \<inter> (assigns_if False e1 \<union> assigns_if True e2) | False \<Rightarrow> assigns_if False e1 \<union> assigns_if False e2) else assignsE e1 \<union> assignsE e2)) | Some v \<Rightarrow> (if v=Bool b then {} else UNIV))" | "assigns_if b (Super) = UNIV" \<comment> \<open>can never evaluate to Boolean\<close> | "assigns_if b (Acc v) = (assignsV v)" | "assigns_if b (v := e) = (assignsE (Ass v e))" | "assigns_if b (c? e1 : e2) = (assignsE c) \<union> (case (constVal c) of None \<Rightarrow> (assigns_if b e1) \<inter> (assigns_if b e2) | Some bv \<Rightarrow> (case the_Bool bv of True \<Rightarrow> assigns_if b e1 | False \<Rightarrow> assigns_if b e2))" | "assigns_if b ({accC,statT,mode}objRef\<cdot>mn({pTs}args)) = assignsE ({accC,statT,mode}objRef\<cdot>mn({pTs}args)) " \<comment> \<open>Only dummy analysis for intermediate expressions \<^term>\<open>Methd\<close>, \<^term>\<open>Body\<close>, \<^term>\<open>InsInitE\<close> and \<^term>\<open>Callee\<close>\<close> | "assigns_if b (Methd C sig) = {}" | "assigns_if b (Body C s) = {}" | "assigns_if b (InsInitE s e) = {}" | "assigns_if b (Callee l e) = {}" lemma assigns_if_const_b_simp: assumes boolConst: "constVal e = Some (Bool b)" (is "?Const b e") shows "assigns_if b e = {}" (is "?Ass b e") proof - have "\<And> b. ?Const b e \<Longrightarrow> ?Ass b e" proof (induct e) case Lit thus ?case by simp next case UnOp thus ?case by simp next case (BinOp binop) thus ?case by (cases binop) (simp_all) next case (Cond c e1 e2 b) note hyp_c = \<open>\<And> b. ?Const b c \<Longrightarrow> ?Ass b c\<close> note hyp_e1 = \<open>\<And> b. ?Const b e1 \<Longrightarrow> ?Ass b e1\<close> note hyp_e2 = \<open>\<And> b. ?Const b e2 \<Longrightarrow> ?Ass b e2\<close> note const = \<open>constVal (c ? e1 : e2) = Some (Bool b)\<close> then obtain bv where bv: "constVal c = Some bv" by simp hence emptyC: "assignsE c = {}" by (rule assignsE_const_simp) show ?case proof (cases "the_Bool bv") case True with const bv have "?Const b e1" by simp hence "?Ass b e1" by (rule hyp_e1) with emptyC bv True show ?thesis by simp next case False with const bv have "?Const b e2" by simp hence "?Ass b e2" by (rule hyp_e2) with emptyC bv False show ?thesis by simp qed qed (simp_all) with boolConst show ?thesis by blast qed lemma assigns_if_const_not_b_simp: assumes boolConst: "constVal e = Some (Bool b)" (is "?Const b e") shows "assigns_if (\<not>b) e = UNIV" (is "?Ass b e") proof - have "\<And> b. ?Const b e \<Longrightarrow> ?Ass b e" proof (induct e) case Lit thus ?case by simp next case UnOp thus ?case by simp next case (BinOp binop) thus ?case by (cases binop) (simp_all) next case (Cond c e1 e2 b) note hyp_c = \<open>\<And> b. ?Const b c \<Longrightarrow> ?Ass b c\<close> note hyp_e1 = \<open>\<And> b. ?Const b e1 \<Longrightarrow> ?Ass b e1\<close> note hyp_e2 = \<open>\<And> b. ?Const b e2 \<Longrightarrow> ?Ass b e2\<close> note const = \<open>constVal (c ? e1 : e2) = Some (Bool b)\<close> then obtain bv where bv: "constVal c = Some bv" by simp show ?case proof (cases "the_Bool bv") case True with const bv have "?Const b e1" by simp hence "?Ass b e1" by (rule hyp_e1) with bv True show ?thesis by simp next case False with const bv have "?Const b e2" by simp hence "?Ass b e2" by (rule hyp_e2) with bv False show ?thesis by simp qed qed (simp_all) with boolConst show ?thesis by blast qed subsection \<open>Lifting set operations to range of tables (map to a set)\<close> definition union_ts :: "('a,'b) tables \<Rightarrow> ('a,'b) tables \<Rightarrow> ('a,'b) tables" ("_ \<Rightarrow>\<union> _" [67,67] 65) where "A \<Rightarrow>\<union> B = (\<lambda> k. A k \<union> B k)" definition intersect_ts :: "('a,'b) tables \<Rightarrow> ('a,'b) tables \<Rightarrow> ('a,'b) tables" ("_ \<Rightarrow>\<inter> _" [72,72] 71) where "A \<Rightarrow>\<inter> B = (\<lambda>k. A k \<inter> B k)" definition all_union_ts :: "('a,'b) tables \<Rightarrow> 'b set \<Rightarrow> ('a,'b) tables" (infixl "\<Rightarrow>\<union>\<^sub>\<forall>" 40) where "(A \<Rightarrow>\<union>\<^sub>\<forall> B) = (\<lambda> k. A k \<union> B)" subsubsection \<open>Binary union of tables\<close> lemma union_ts_iff [simp]: "(c \<in> (A \<Rightarrow>\<union> B) k) = (c \<in> A k \<or> c \<in> B k)" by (unfold union_ts_def) blast lemma union_tsI1 [elim?]: "c \<in> A k \<Longrightarrow> c \<in> (A \<Rightarrow>\<union> B) k" by simp lemma union_tsI2 [elim?]: "c \<in> B k \<Longrightarrow> c \<in> (A \<Rightarrow>\<union> B) k" by simp lemma union_tsCI [intro!]: "(c \<notin> B k \<Longrightarrow> c \<in> A k) \<Longrightarrow> c \<in> (A \<Rightarrow>\<union> B) k" by auto lemma union_tsE [elim!]: "\<lbrakk>c \<in> (A \<Rightarrow>\<union> B) k; (c \<in> A k \<Longrightarrow> P); (c \<in> B k \<Longrightarrow> P)\<rbrakk> \<Longrightarrow> P" by (unfold union_ts_def) blast subsubsection \<open>Binary intersection of tables\<close> lemma intersect_ts_iff [simp]: "c \<in> (A \<Rightarrow>\<inter> B) k = (c \<in> A k \<and> c \<in> B k)" by (unfold intersect_ts_def) blast lemma intersect_tsI [intro!]: "\<lbrakk>c \<in> A k; c \<in> B k\<rbrakk> \<Longrightarrow> c \<in> (A \<Rightarrow>\<inter> B) k" by simp lemma intersect_tsD1: "c \<in> (A \<Rightarrow>\<inter> B) k \<Longrightarrow> c \<in> A k" by simp lemma intersect_tsD2: "c \<in> (A \<Rightarrow>\<inter> B) k \<Longrightarrow> c \<in> B k" by simp lemma intersect_tsE [elim!]: "\<lbrakk>c \<in> (A \<Rightarrow>\<inter> B) k; \<lbrakk>c \<in> A k; c \<in> B k\<rbrakk> \<Longrightarrow> P\<rbrakk> \<Longrightarrow> P" by simp subsubsection \<open>All-Union of tables and set\<close> lemma all_union_ts_iff [simp]: "(c \<in> (A \<Rightarrow>\<union>\<^sub>\<forall> B) k) = (c \<in> A k \<or> c \<in> B)" by (unfold all_union_ts_def) blast lemma all_union_tsI1 [elim?]: "c \<in> A k \<Longrightarrow> c \<in> (A \<Rightarrow>\<union>\<^sub>\<forall> B) k" by simp lemma all_union_tsI2 [elim?]: "c \<in> B \<Longrightarrow> c \<in> (A \<Rightarrow>\<union>\<^sub>\<forall> B) k" by simp lemma all_union_tsCI [intro!]: "(c \<notin> B \<Longrightarrow> c \<in> A k) \<Longrightarrow> c \<in> (A \<Rightarrow>\<union>\<^sub>\<forall> B) k" by auto lemma all_union_tsE [elim!]: "\<lbrakk>c \<in> (A \<Rightarrow>\<union>\<^sub>\<forall> B) k; (c \<in> A k \<Longrightarrow> P); (c \<in> B \<Longrightarrow> P)\<rbrakk> \<Longrightarrow> P" by (unfold all_union_ts_def) blast subsubsection "The rules of definite assignment" type_synonym breakass = "(label, lname) tables" \<comment> \<open>Mapping from a break label, to the set of variables that will be assigned if the evaluation terminates with this break\<close> record assigned = nrm :: "lname set" \<comment> \<open>Definetly assigned variables for normal completion\<close> brk :: "breakass" \<comment> \<open>Definetly assigned variables for abrupt completion with a break\<close> definition rmlab :: "'a \<Rightarrow> ('a,'b) tables \<Rightarrow> ('a,'b) tables" where "rmlab k A = (\<lambda>x. if x=k then UNIV else A x)" (* definition setbrk :: "breakass \<Rightarrow> assigned \<Rightarrow> breakass set" where "setbrk b A = {b} \<union> {a| a. a\<in> brk A \<and> lab a \<noteq> lab b}" *) definition range_inter_ts :: "('a,'b) tables \<Rightarrow> 'b set" ("\<Rightarrow>\<Inter>_" 80) where "\<Rightarrow>\<Inter>A = {x |x. \<forall> k. x \<in> A k}" text \<open> In \<open>E\<turnstile> B \<guillemotright>t\<guillemotright> A\<close>, \<open>B\<close> denotes the ''assigned'' variables before evaluating term \<open>t\<close>, whereas \<open>A\<close> denotes the ''assigned'' variables after evaluating term \<open>t\<close>. The environment \<^term>\<open>E\<close> is only needed for the conditional \<open>_ ? _ : _\<close>. The definite assignment rules refer to the typing rules here to distinguish boolean and other expressions. \<close> inductive da :: "env \<Rightarrow> lname set \<Rightarrow> term \<Rightarrow> assigned \<Rightarrow> bool" ("_\<turnstile> _ \<guillemotright>_\<guillemotright> _" [65,65,65,65] 71) where Skip: "Env\<turnstile> B \<guillemotright>\<langle>Skip\<rangle>\<guillemotright> \<lparr>nrm=B,brk=\<lambda> l. UNIV\<rparr>" | Expr: "Env\<turnstile> B \<guillemotright>\<langle>e\<rangle>\<guillemotright> A \<Longrightarrow> Env\<turnstile> B \<guillemotright>\<langle>Expr e\<rangle>\<guillemotright> A" | Lab: "\<lbrakk>Env\<turnstile> B \<guillemotright>\<langle>c\<rangle>\<guillemotright> C; nrm A = nrm C \<inter> (brk C) l; brk A = rmlab l (brk C)\<rbrakk> \<Longrightarrow> Env\<turnstile> B \<guillemotright>\<langle>Break l\<bullet> c\<rangle>\<guillemotright> A" | Comp: "\<lbrakk>Env\<turnstile> B \<guillemotright>\<langle>c1\<rangle>\<guillemotright> C1; Env\<turnstile> nrm C1 \<guillemotright>\<langle>c2\<rangle>\<guillemotright> C2; nrm A = nrm C2; brk A = (brk C1) \<Rightarrow>\<inter> (brk C2)\<rbrakk> \<Longrightarrow> Env\<turnstile> B \<guillemotright>\<langle>c1;; c2\<rangle>\<guillemotright> A" | If: "\<lbrakk>Env\<turnstile> B \<guillemotright>\<langle>e\<rangle>\<guillemotright> E; Env\<turnstile> (B \<union> assigns_if True e) \<guillemotright>\<langle>c1\<rangle>\<guillemotright> C1; Env\<turnstile> (B \<union> assigns_if False e) \<guillemotright>\<langle>c2\<rangle>\<guillemotright> C2; nrm A = nrm C1 \<inter> nrm C2; brk A = brk C1 \<Rightarrow>\<inter> brk C2 \<rbrakk> \<Longrightarrow> Env\<turnstile> B \<guillemotright>\<langle>If(e) c1 Else c2\<rangle>\<guillemotright> A" \<comment> \<open>Note that \<^term>\<open>E\<close> is not further used, because we take the specialized sets that also consider if the expression evaluates to true or false. Inside of \<^term>\<open>e\<close> there is no {\tt break} or {\tt finally}, so the break map of \<^term>\<open>E\<close> will be the trivial one. So \<^term>\<open>Env\<turnstile>B \<guillemotright>\<langle>e\<rangle>\<guillemotright> E\<close> is just used to ensure the definite assignment in expression \<^term>\<open>e\<close>. Notice the implicit analysis of a constant boolean expression \<^term>\<open>e\<close> in this rule. For example, if \<^term>\<open>e\<close> is constantly \<^term>\<open>True\<close> then \<^term>\<open>assigns_if False e = UNIV\<close> and therefor \<^term>\<open>nrm C2=UNIV\<close>. So finally \<^term>\<open>nrm A = nrm C1\<close>. For the break maps this trick workd too, because the trivial break map will map all labels to \<^term>\<open>UNIV\<close>. In the example, if no break occurs in \<^term>\<open>c2\<close> the break maps will trivially map to \<^term>\<open>UNIV\<close> and if a break occurs it will map to \<^term>\<open>UNIV\<close> too, because \<^term>\<open>assigns_if False e = UNIV\<close>. So in the intersection of the break maps the path \<^term>\<open>c2\<close> will have no contribution.\<close> | Loop: "\<lbrakk>Env\<turnstile> B \<guillemotright>\<langle>e\<rangle>\<guillemotright> E; Env\<turnstile> (B \<union> assigns_if True e) \<guillemotright>\<langle>c\<rangle>\<guillemotright> C; nrm A = nrm C \<inter> (B \<union> assigns_if False e); brk A = brk C\<rbrakk> \<Longrightarrow> Env\<turnstile> B \<guillemotright>\<langle>l\<bullet> While(e) c\<rangle>\<guillemotright> A" \<comment> \<open>The \<open>Loop\<close> rule resembles some of the ideas of the \<open>If\<close> rule. For the \<^term>\<open>nrm A\<close> the set \<^term>\<open>B \<union> assigns_if False e\<close> will be \<^term>\<open>UNIV\<close> if the condition is constantly true. To normally exit the while loop, we must consider the body \<^term>\<open>c\<close> to be completed normally (\<^term>\<open>nrm C\<close>) or with a break. But in this model, the label \<^term>\<open>l\<close> of the loop only handles continue labels, not break labels. The break label will be handled by an enclosing \<^term>\<open>Lab\<close> statement. So we don't have to handle the breaks specially.\<close> | Jmp: "\<lbrakk>jump=Ret \<longrightarrow> Result \<in> B; nrm A = UNIV; brk A = (case jump of Break l \<Rightarrow> \<lambda> k. if k=l then B else UNIV | Cont l \<Rightarrow> \<lambda> k. UNIV | Ret \<Rightarrow> \<lambda> k. UNIV)\<rbrakk> \<Longrightarrow> Env\<turnstile> B \<guillemotright>\<langle>Jmp jump\<rangle>\<guillemotright> A" \<comment> \<open>In case of a break to label \<^term>\<open>l\<close> the corresponding break set is all variables assigned before the break. The assigned variables for normal completion of the \<^term>\<open>Jmp\<close> is \<^term>\<open>UNIV\<close>, because the statement will never complete normally. For continue and return the break map is the trivial one. In case of a return we enshure that the result value is assigned.\<close> | Throw: "\<lbrakk>Env\<turnstile> B \<guillemotright>\<langle>e\<rangle>\<guillemotright> E; nrm A = UNIV; brk A = (\<lambda> l. UNIV)\<rbrakk> \<Longrightarrow> Env\<turnstile> B \<guillemotright>\<langle>Throw e\<rangle>\<guillemotright> A" | Try: "\<lbrakk>Env\<turnstile> B \<guillemotright>\<langle>c1\<rangle>\<guillemotright> C1; Env\<lparr>lcl := (lcl Env)(VName vn\<mapsto>Class C)\<rparr>\<turnstile> (B \<union> {VName vn}) \<guillemotright>\<langle>c2\<rangle>\<guillemotright> C2; nrm A = nrm C1 \<inter> nrm C2; brk A = brk C1 \<Rightarrow>\<inter> brk C2\<rbrakk> \<Longrightarrow> Env\<turnstile> B \<guillemotright>\<langle>Try c1 Catch(C vn) c2\<rangle>\<guillemotright> A" | Fin: "\<lbrakk>Env\<turnstile> B \<guillemotright>\<langle>c1\<rangle>\<guillemotright> C1; Env\<turnstile> B \<guillemotright>\<langle>c2\<rangle>\<guillemotright> C2; nrm A = nrm C1 \<union> nrm C2; brk A = ((brk C1) \<Rightarrow>\<union>\<^sub>\<forall> (nrm C2)) \<Rightarrow>\<inter> (brk C2)\<rbrakk> \<Longrightarrow> Env\<turnstile> B \<guillemotright>\<langle>c1 Finally c2\<rangle>\<guillemotright> A" \<comment> \<open>The set of assigned variables before execution \<^term>\<open>c2\<close> are the same as before execution \<^term>\<open>c1\<close>, because \<^term>\<open>c1\<close> could throw an exception and so we can't guarantee that any variable will be assigned in \<^term>\<open>c1\<close>. The \<open>Finally\<close> statement completes normally if both \<^term>\<open>c1\<close> and \<^term>\<open>c2\<close> complete normally. If \<^term>\<open>c1\<close> completes abruptly with a break, then \<^term>\<open>c2\<close> also will be executed and may terminate normally or with a break. The overall break map then is the intersection of the maps of both paths. If \<^term>\<open>c2\<close> terminates normally we have to extend all break sets in \<^term>\<open>brk C1\<close> with \<^term>\<open>nrm C2\<close> (\<open>\<Rightarrow>\<union>\<^sub>\<forall>\<close>). If \<^term>\<open>c2\<close> exits with a break this break will appear in the overall result state. We don't know if \<^term>\<open>c1\<close> completed normally or abruptly (maybe with an exception not only a break) so \<^term>\<open>c1\<close> has no contribution to the break map following this path.\<close> \<comment> \<open>Evaluation of expressions and the break sets of definite assignment: Thinking of a Java expression we assume that we can never have a break statement inside of a expression. So for all expressions the break sets could be set to the trivial one: \<^term>\<open>\<lambda> l. UNIV\<close>. But we can't trivially proof, that evaluating an expression will never result in a break, allthough Java expressions allready syntactically don't allow nested stetements in them. The reason are the nested class initialzation statements which are inserted by the evaluation rules. So to proof the absence of a break we need to ensure, that the initialization statements will never end up in a break. In a wellfromed initialization statement, of course, were breaks are nested correctly inside of \<^term>\<open>Lab\<close> or \<^term>\<open>Loop\<close> statements evaluation of the whole initialization statement will never result in a break, because this break will be handled inside of the statement. But for simplicity we haven't added the analysis of the correct nesting of breaks in the typing judgments right now. So we have decided to adjust the rules of definite assignment to fit to these circumstances. If an initialization is involved during evaluation of the expression (evaluation rules \<open>FVar\<close>, \<open>NewC\<close> and \<open>NewA\<close>\<close> | Init: "Env\<turnstile> B \<guillemotright>\<langle>Init C\<rangle>\<guillemotright> \<lparr>nrm=B,brk=\<lambda> l. UNIV\<rparr>" \<comment> \<open>Wellformedness of a program will ensure, that every static initialiser is definetly assigned and the jumps are nested correctly. The case here for \<^term>\<open>Init\<close> is just for convenience, to get a proper precondition for the induction hypothesis in various proofs, so that we don't have to expand the initialisation on every point where it is triggerred by the evaluation rules.\<close> | NewC: "Env\<turnstile> B \<guillemotright>\<langle>NewC C\<rangle>\<guillemotright> \<lparr>nrm=B,brk=\<lambda> l. UNIV\<rparr>" | NewA: "Env\<turnstile> B \<guillemotright>\<langle>e\<rangle>\<guillemotright> A \<Longrightarrow> Env\<turnstile> B \<guillemotright>\<langle>New T[e]\<rangle>\<guillemotright> A" | Cast: "Env\<turnstile> B \<guillemotright>\<langle>e\<rangle>\<guillemotright> A \<Longrightarrow> Env\<turnstile> B \<guillemotright>\<langle>Cast T e\<rangle>\<guillemotright> A" | Inst: "Env\<turnstile> B \<guillemotright>\<langle>e\<rangle>\<guillemotright> A \<Longrightarrow> Env\<turnstile> B \<guillemotright>\<langle>e InstOf T\<rangle>\<guillemotright> A" | Lit: "Env\<turnstile> B \<guillemotright>\<langle>Lit v\<rangle>\<guillemotright> \<lparr>nrm=B,brk=\<lambda> l. UNIV\<rparr>" | UnOp: "Env\<turnstile> B \<guillemotright>\<langle>e\<rangle>\<guillemotright> A \<Longrightarrow> Env\<turnstile> B \<guillemotright>\<langle>UnOp unop e\<rangle>\<guillemotright> A" | CondAnd: "\<lbrakk>Env\<turnstile> B \<guillemotright>\<langle>e1\<rangle>\<guillemotright> E1; Env\<turnstile> (B \<union> assigns_if True e1) \<guillemotright>\<langle>e2\<rangle>\<guillemotright> E2; nrm A = B \<union> (assigns_if True (BinOp CondAnd e1 e2) \<inter> assigns_if False (BinOp CondAnd e1 e2)); brk A = (\<lambda> l. UNIV) \<rbrakk> \<Longrightarrow> Env\<turnstile> B \<guillemotright>\<langle>BinOp CondAnd e1 e2\<rangle>\<guillemotright> A" | CondOr: "\<lbrakk>Env\<turnstile> B \<guillemotright>\<langle>e1\<rangle>\<guillemotright> E1; Env\<turnstile> (B \<union> assigns_if False e1) \<guillemotright>\<langle>e2\<rangle>\<guillemotright> E2; nrm A = B \<union> (assigns_if True (BinOp CondOr e1 e2) \<inter> assigns_if False (BinOp CondOr e1 e2)); brk A = (\<lambda> l. UNIV) \<rbrakk> \<Longrightarrow> Env\<turnstile> B \<guillemotright>\<langle>BinOp CondOr e1 e2\<rangle>\<guillemotright> A" | BinOp: "\<lbrakk>Env\<turnstile> B \<guillemotright>\<langle>e1\<rangle>\<guillemotright> E1; Env\<turnstile> nrm E1 \<guillemotright>\<langle>e2\<rangle>\<guillemotright> A; binop \<noteq> CondAnd; binop \<noteq> CondOr\<rbrakk> \<Longrightarrow> Env\<turnstile> B \<guillemotright>\<langle>BinOp binop e1 e2\<rangle>\<guillemotright> A" | Super: "This \<in> B \<Longrightarrow> Env\<turnstile> B \<guillemotright>\<langle>Super\<rangle>\<guillemotright> \<lparr>nrm=B,brk=\<lambda> l. UNIV\<rparr>" | AccLVar: "\<lbrakk>vn \<in> B; nrm A = B; brk A = (\<lambda> k. UNIV)\<rbrakk> \<Longrightarrow> Env\<turnstile> B \<guillemotright>\<langle>Acc (LVar vn)\<rangle>\<guillemotright> A" \<comment> \<open>To properly access a local variable we have to test the definite assignment here. The variable must occur in the set \<^term>\<open>B\<close>\<close> | Acc: "\<lbrakk>\<forall> vn. v \<noteq> LVar vn; Env\<turnstile> B \<guillemotright>\<langle>v\<rangle>\<guillemotright> A\<rbrakk> \<Longrightarrow> Env\<turnstile> B \<guillemotright>\<langle>Acc v\<rangle>\<guillemotright> A" | AssLVar: "\<lbrakk>Env\<turnstile> B \<guillemotright>\<langle>e\<rangle>\<guillemotright> E; nrm A = nrm E \<union> {vn}; brk A = brk E\<rbrakk> \<Longrightarrow> Env\<turnstile> B \<guillemotright>\<langle>(LVar vn) := e\<rangle>\<guillemotright> A" | Ass: "\<lbrakk>\<forall> vn. v \<noteq> LVar vn; Env\<turnstile> B \<guillemotright>\<langle>v\<rangle>\<guillemotright> V; Env\<turnstile> nrm V \<guillemotright>\<langle>e\<rangle>\<guillemotright> A\<rbrakk> \<Longrightarrow> Env\<turnstile> B \<guillemotright>\<langle>v := e\<rangle>\<guillemotright> A" | CondBool: "\<lbrakk>Env\<turnstile>(c ? e1 : e2)\<Colon>-(PrimT Boolean); Env\<turnstile> B \<guillemotright>\<langle>c\<rangle>\<guillemotright> C; Env\<turnstile> (B \<union> assigns_if True c) \<guillemotright>\<langle>e1\<rangle>\<guillemotright> E1; Env\<turnstile> (B \<union> assigns_if False c) \<guillemotright>\<langle>e2\<rangle>\<guillemotright> E2; nrm A = B \<union> (assigns_if True (c ? e1 : e2) \<inter> assigns_if False (c ? e1 : e2)); brk A = (\<lambda> l. UNIV)\<rbrakk> \<Longrightarrow> Env\<turnstile> B \<guillemotright>\<langle>c ? e1 : e2\<rangle>\<guillemotright> A" | Cond: "\<lbrakk>\<not> Env\<turnstile>(c ? e1 : e2)\<Colon>-(PrimT Boolean); Env\<turnstile> B \<guillemotright>\<langle>c\<rangle>\<guillemotright> C; Env\<turnstile> (B \<union> assigns_if True c) \<guillemotright>\<langle>e1\<rangle>\<guillemotright> E1; Env\<turnstile> (B \<union> assigns_if False c) \<guillemotright>\<langle>e2\<rangle>\<guillemotright> E2; nrm A = nrm E1 \<inter> nrm E2; brk A = (\<lambda> l. UNIV)\<rbrakk> \<Longrightarrow> Env\<turnstile> B \<guillemotright>\<langle>c ? e1 : e2\<rangle>\<guillemotright> A" | Call: "\<lbrakk>Env\<turnstile> B \<guillemotright>\<langle>e\<rangle>\<guillemotright> E; Env\<turnstile> nrm E \<guillemotright>\<langle>args\<rangle>\<guillemotright> A\<rbrakk> \<Longrightarrow> Env\<turnstile> B \<guillemotright>\<langle>{accC,statT,mode}e\<cdot>mn({pTs}args)\<rangle>\<guillemotright> A" \<comment> \<open>The interplay of \<^term>\<open>Call\<close>, \<^term>\<open>Methd\<close> and \<^term>\<open>Body\<close>: Why rules for \<^term>\<open>Methd\<close> and \<^term>\<open>Body\<close> at all? Note that a Java source program will not include bare \<^term>\<open>Methd\<close> or \<^term>\<open>Body\<close> terms. These terms are just introduced during evaluation. So definite assignment of \<^term>\<open>Call\<close> does not consider \<^term>\<open>Methd\<close> or \<^term>\<open>Body\<close> at all. So for definite assignment alone we could omit the rules for \<^term>\<open>Methd\<close> and \<^term>\<open>Body\<close>. But since evaluation of the method invocation is split up into three rules we must ensure that we have enough information about the call even in the \<^term>\<open>Body\<close> term to make sure that we can proof type safety. Also we must be able transport this information from \<^term>\<open>Call\<close> to \<^term>\<open>Methd\<close> and then further to \<^term>\<open>Body\<close> during evaluation to establish the definite assignment of \<^term>\<open>Methd\<close> during evaluation of \<^term>\<open>Call\<close>, and of \<^term>\<open>Body\<close> during evaluation of \<^term>\<open>Methd\<close>. This is necessary since definite assignment will be a precondition for each induction hypothesis coming out of the evaluation rules, and therefor we have to establish the definite assignment of the sub-evaluation during the type-safety proof. Note that well-typedness is also a precondition for type-safety and so we can omit some assertion that are already ensured by well-typedness.\<close> | Methd: "\<lbrakk>methd (prg Env) D sig = Some m; Env\<turnstile> B \<guillemotright>\<langle>Body (declclass m) (stmt (mbody (mthd m)))\<rangle>\<guillemotright> A \<rbrakk> \<Longrightarrow> Env\<turnstile> B \<guillemotright>\<langle>Methd D sig\<rangle>\<guillemotright> A" | Body: "\<lbrakk>Env\<turnstile> B \<guillemotright>\<langle>c\<rangle>\<guillemotright> C; jumpNestingOkS {Ret} c; Result \<in> nrm C; nrm A = B; brk A = (\<lambda> l. UNIV)\<rbrakk> \<Longrightarrow> Env\<turnstile> B \<guillemotright>\<langle>Body D c\<rangle>\<guillemotright> A" \<comment> \<open>Note that \<^term>\<open>A\<close> is not correlated to \<^term>\<open>C\<close>. If the body statement returns abruptly with return, evaluation of \<^term>\<open>Body\<close> will absorb this return and complete normally. So we cannot trivially get the assigned variables of the body statement since it has not completed normally or with a break. If the body completes normally we guarantee that the result variable is set with this rule. But if the body completes abruptly with a return we can't guarantee that the result variable is set here, since definite assignment only talks about normal completion and breaks. So for a return the \<^term>\<open>Jump\<close> rule ensures that the result variable is set and then this information must be carried over to the \<^term>\<open>Body\<close> rule by the conformance predicate of the state.\<close> | LVar: "Env\<turnstile> B \<guillemotright>\<langle>LVar vn\<rangle>\<guillemotright> \<lparr>nrm=B, brk=\<lambda> l. UNIV\<rparr>" | FVar: "Env\<turnstile> B \<guillemotright>\<langle>e\<rangle>\<guillemotright> A \<Longrightarrow> Env\<turnstile> B \<guillemotright>\<langle>{accC,statDeclC,stat}e..fn\<rangle>\<guillemotright> A" | AVar: "\<lbrakk>Env\<turnstile> B \<guillemotright>\<langle>e1\<rangle>\<guillemotright> E1; Env\<turnstile> nrm E1 \<guillemotright>\<langle>e2\<rangle>\<guillemotright> A\<rbrakk> \<Longrightarrow> Env\<turnstile> B \<guillemotright>\<langle>e1.[e2]\<rangle>\<guillemotright> A" | Nil: "Env\<turnstile> B \<guillemotright>\<langle>[]::expr list\<rangle>\<guillemotright> \<lparr>nrm=B, brk=\<lambda> l. UNIV\<rparr>" | Cons: "\<lbrakk>Env\<turnstile> B \<guillemotright>\<langle>e::expr\<rangle>\<guillemotright> E; Env\<turnstile> nrm E \<guillemotright>\<langle>es\<rangle>\<guillemotright> A\<rbrakk> \<Longrightarrow> Env\<turnstile> B \<guillemotright>\<langle>e#es\<rangle>\<guillemotright> A" declare inj_term_sym_simps [simp] declare assigns_if.simps [simp del] declare split_paired_All [simp del] split_paired_Ex [simp del] setup \<open>map_theory_simpset (fn ctxt => ctxt delloop "split_all_tac")\<close> inductive_cases da_elim_cases [cases set]: "Env\<turnstile> B \<guillemotright>\<langle>Skip\<rangle>\<guillemotright> A" "Env\<turnstile> B \<guillemotright>In1r Skip\<guillemotright> A" "Env\<turnstile> B \<guillemotright>\<langle>Expr e\<rangle>\<guillemotright> A" "Env\<turnstile> B \<guillemotright>In1r (Expr e)\<guillemotright> A" "Env\<turnstile> B \<guillemotright>\<langle>l\<bullet> c\<rangle>\<guillemotright> A" "Env\<turnstile> B \<guillemotright>In1r (l\<bullet> c)\<guillemotright> A" "Env\<turnstile> B \<guillemotright>\<langle>c1;; c2\<rangle>\<guillemotright> A" "Env\<turnstile> B \<guillemotright>In1r (c1;; c2)\<guillemotright> A" "Env\<turnstile> B \<guillemotright>\<langle>If(e) c1 Else c2\<rangle>\<guillemotright> A" "Env\<turnstile> B \<guillemotright>In1r (If(e) c1 Else c2)\<guillemotright> A" "Env\<turnstile> B \<guillemotright>\<langle>l\<bullet> While(e) c\<rangle>\<guillemotright> A" "Env\<turnstile> B \<guillemotright>In1r (l\<bullet> While(e) c)\<guillemotright> A" "Env\<turnstile> B \<guillemotright>\<langle>Jmp jump\<rangle>\<guillemotright> A" "Env\<turnstile> B \<guillemotright>In1r (Jmp jump)\<guillemotright> A" "Env\<turnstile> B \<guillemotright>\<langle>Throw e\<rangle>\<guillemotright> A" "Env\<turnstile> B \<guillemotright>In1r (Throw e)\<guillemotright> A" "Env\<turnstile> B \<guillemotright>\<langle>Try c1 Catch(C vn) c2\<rangle>\<guillemotright> A" "Env\<turnstile> B \<guillemotright>In1r (Try c1 Catch(C vn) c2)\<guillemotright> A" "Env\<turnstile> B \<guillemotright>\<langle>c1 Finally c2\<rangle>\<guillemotright> A" "Env\<turnstile> B \<guillemotright>In1r (c1 Finally c2)\<guillemotright> A" "Env\<turnstile> B \<guillemotright>\<langle>Init C\<rangle>\<guillemotright> A" "Env\<turnstile> B \<guillemotright>In1r (Init C)\<guillemotright> A" "Env\<turnstile> B \<guillemotright>\<langle>NewC C\<rangle>\<guillemotright> A" "Env\<turnstile> B \<guillemotright>In1l (NewC C)\<guillemotright> A" "Env\<turnstile> B \<guillemotright>\<langle>New T[e]\<rangle>\<guillemotright> A" "Env\<turnstile> B \<guillemotright>In1l (New T[e])\<guillemotright> A" "Env\<turnstile> B \<guillemotright>\<langle>Cast T e\<rangle>\<guillemotright> A" "Env\<turnstile> B \<guillemotright>In1l (Cast T e)\<guillemotright> A" "Env\<turnstile> B \<guillemotright>\<langle>e InstOf T\<rangle>\<guillemotright> A" "Env\<turnstile> B \<guillemotright>In1l (e InstOf T)\<guillemotright> A" "Env\<turnstile> B \<guillemotright>\<langle>Lit v\<rangle>\<guillemotright> A" "Env\<turnstile> B \<guillemotright>In1l (Lit v)\<guillemotright> A" "Env\<turnstile> B \<guillemotright>\<langle>UnOp unop e\<rangle>\<guillemotright> A" "Env\<turnstile> B \<guillemotright>In1l (UnOp unop e)\<guillemotright> A" "Env\<turnstile> B \<guillemotright>\<langle>BinOp binop e1 e2\<rangle>\<guillemotright> A" "Env\<turnstile> B \<guillemotright>In1l (BinOp binop e1 e2)\<guillemotright> A" "Env\<turnstile> B \<guillemotright>\<langle>Super\<rangle>\<guillemotright> A" "Env\<turnstile> B \<guillemotright>In1l (Super)\<guillemotright> A" "Env\<turnstile> B \<guillemotright>\<langle>Acc v\<rangle>\<guillemotright> A" "Env\<turnstile> B \<guillemotright>In1l (Acc v)\<guillemotright> A" "Env\<turnstile> B \<guillemotright>\<langle>v := e\<rangle>\<guillemotright> A" "Env\<turnstile> B \<guillemotright>In1l (v := e)\<guillemotright> A" "Env\<turnstile> B \<guillemotright>\<langle>c ? e1 : e2\<rangle>\<guillemotright> A" "Env\<turnstile> B \<guillemotright>In1l (c ? e1 : e2)\<guillemotright> A" "Env\<turnstile> B \<guillemotright>\<langle>{accC,statT,mode}e\<cdot>mn({pTs}args)\<rangle>\<guillemotright> A" "Env\<turnstile> B \<guillemotright>In1l ({accC,statT,mode}e\<cdot>mn({pTs}args))\<guillemotright> A" "Env\<turnstile> B \<guillemotright>\<langle>Methd C sig\<rangle>\<guillemotright> A" "Env\<turnstile> B \<guillemotright>In1l (Methd C sig)\<guillemotright> A" "Env\<turnstile> B \<guillemotright>\<langle>Body D c\<rangle>\<guillemotright> A" "Env\<turnstile> B \<guillemotright>In1l (Body D c)\<guillemotright> A" "Env\<turnstile> B \<guillemotright>\<langle>LVar vn\<rangle>\<guillemotright> A" "Env\<turnstile> B \<guillemotright>In2 (LVar vn)\<guillemotright> A" "Env\<turnstile> B \<guillemotright>\<langle>{accC,statDeclC,stat}e..fn\<rangle>\<guillemotright> A" "Env\<turnstile> B \<guillemotright>In2 ({accC,statDeclC,stat}e..fn)\<guillemotright> A" "Env\<turnstile> B \<guillemotright>\<langle>e1.[e2]\<rangle>\<guillemotright> A" "Env\<turnstile> B \<guillemotright>In2 (e1.[e2])\<guillemotright> A" "Env\<turnstile> B \<guillemotright>\<langle>[]::expr list\<rangle>\<guillemotright> A" "Env\<turnstile> B \<guillemotright>In3 ([]::expr list)\<guillemotright> A" "Env\<turnstile> B \<guillemotright>\<langle>e#es\<rangle>\<guillemotright> A" "Env\<turnstile> B \<guillemotright>In3 (e#es)\<guillemotright> A" declare inj_term_sym_simps [simp del] declare assigns_if.simps [simp] declare split_paired_All [simp] split_paired_Ex [simp] setup \<open>map_theory_simpset (fn ctxt => ctxt addloop ("split_all_tac", split_all_tac))\<close> (* To be able to eliminate both the versions with the overloaded brackets: (B \<guillemotright>\<langle>Skip\<rangle>\<guillemotright> A) and with the explicit constructor (B \<guillemotright>In1r Skip\<guillemotright> A), every rule appears in both versions *) lemma da_Skip: "A = \<lparr>nrm=B,brk=\<lambda> l. UNIV\<rparr> \<Longrightarrow> Env\<turnstile> B \<guillemotright>\<langle>Skip\<rangle>\<guillemotright> A" by (auto intro: da.Skip) lemma da_NewC: "A = \<lparr>nrm=B,brk=\<lambda> l. UNIV\<rparr> \<Longrightarrow> Env\<turnstile> B \<guillemotright>\<langle>NewC C\<rangle>\<guillemotright> A" by (auto intro: da.NewC) lemma da_Lit: "A = \<lparr>nrm=B,brk=\<lambda> l. UNIV\<rparr> \<Longrightarrow> Env\<turnstile> B \<guillemotright>\<langle>Lit v\<rangle>\<guillemotright> A" by (auto intro: da.Lit) lemma da_Super: "\<lbrakk>This \<in> B;A = \<lparr>nrm=B,brk=\<lambda> l. UNIV\<rparr>\<rbrakk> \<Longrightarrow> Env\<turnstile> B \<guillemotright>\<langle>Super\<rangle>\<guillemotright> A" by (auto intro: da.Super) lemma da_Init: "A = \<lparr>nrm=B,brk=\<lambda> l. UNIV\<rparr> \<Longrightarrow> Env\<turnstile> B \<guillemotright>\<langle>Init C\<rangle>\<guillemotright> A" by (auto intro: da.Init) (* For boolean expressions: The following holds: "assignsE e \<subseteq> assigns_if True e \<inter> assigns_if False e" but not vice versa: "assigns_if True e \<inter> assigns_if False e \<subseteq> assignsE e" Example: e = ((x < 5) || (y = true)) && (y = true) = ( a || b ) && c assigns_if True a = {} assigns_if False a = {} assigns_if True b = {y} assigns_if False b = {y} assigns_if True c = {y} assigns_if False c = {y} assigns_if True (a || b) = assigns_if True a \<inter> (assigns_if False a \<union> assigns_if True b) = {} \<inter> ({} \<union> {y}) = {} assigns_if False (a || b) = assigns_if False a \<union> assigns_if False b = {} \<union> {y} = {y} assigns_ifE True e = assigns_if True (a || b) \<union> assigns_if True c = {} \<union> {y} = {y} assigns_ifE False e = assigns_if False (a || b) \<inter> (assigns_if True (a || b) \<union> assigns_if False c) = {y} \<inter> ({} \<union> {y}) = {y} assignsE e = {} *) lemma assignsE_subseteq_assigns_ifs: assumes boolEx: "E\<turnstile>e\<Colon>-PrimT Boolean" (is "?Boolean e") shows "assignsE e \<subseteq> assigns_if True e \<inter> assigns_if False e" (is "?Incl e") proof - obtain vv where ex_lit: "E\<turnstile>Lit vv\<Colon>- PrimT Boolean" using typeof.simps(2) wt.Lit by blast have "?Boolean e \<Longrightarrow> ?Incl e" proof (induct e) case (Cast T e) have "E\<turnstile>e\<Colon>- (PrimT Boolean)" proof - from \<open>E\<turnstile>(Cast T e)\<Colon>- (PrimT Boolean)\<close> obtain Te where "E\<turnstile>e\<Colon>-Te" "prg E\<turnstile>Te\<preceq>? PrimT Boolean" by cases simp thus ?thesis by - (drule cast_Boolean2,simp) qed with Cast.hyps show ?case by simp next case (Lit val) thus ?case by - (erule wt_elim_cases, cases "val", auto simp add: empty_dt_def) next case (UnOp unop e) thus ?case by - (erule wt_elim_cases,cases unop, (fastforce simp add: assignsE_const_simp)+) next case (BinOp binop e1 e2) from BinOp.prems obtain e1T e2T where "E\<turnstile>e1\<Colon>-e1T" and "E\<turnstile>e2\<Colon>-e2T" and "wt_binop (prg E) binop e1T e2T" and "(binop_type binop) = Boolean" by (elim wt_elim_cases) simp with BinOp.hyps show ?case by - (cases binop, auto simp add: assignsE_const_simp) next case (Cond c e1 e2) note hyp_c = \<open>?Boolean c \<Longrightarrow> ?Incl c\<close> note hyp_e1 = \<open>?Boolean e1 \<Longrightarrow> ?Incl e1\<close> note hyp_e2 = \<open>?Boolean e2 \<Longrightarrow> ?Incl e2\<close> note wt = \<open>E\<turnstile>(c ? e1 : e2)\<Colon>-PrimT Boolean\<close> then obtain boolean_c: "E\<turnstile>c\<Colon>-PrimT Boolean" and boolean_e1: "E\<turnstile>e1\<Colon>-PrimT Boolean" and boolean_e2: "E\<turnstile>e2\<Colon>-PrimT Boolean" by (elim wt_elim_cases) (auto dest: widen_Boolean2) show ?case proof (cases "constVal c") case None with boolean_e1 boolean_e2 show ?thesis using hyp_e1 hyp_e2 by (auto) next case (Some bv) show ?thesis proof (cases "the_Bool bv") case True with Some show ?thesis using hyp_e1 boolean_e1 by auto next case False with Some show ?thesis using hyp_e2 boolean_e2 by auto qed qed next show "\<And>x. E\<turnstile>Lit vv\<Colon>-PrimT Boolean" by (rule ex_lit) qed (simp_all add: ex_lit) with boolEx show ?thesis by blast qed (* Trick: If you have a rule with the abstract term injections: e.g: da.Skip "B \<guillemotright>\<langle>Skip\<rangle>\<guillemotright> A" and the current goal state as an concrete injection: e.g: "B \<guillemotright>In1r Skip\<guillemotright> A" you can convert the rule by: da.Skip [simplified] if inj_term_simps is in the simpset *) lemma rmlab_same_label [simp]: "(rmlab l A) l = UNIV" by (simp add: rmlab_def) lemma rmlab_same_label1 [simp]: "l=l' \<Longrightarrow> (rmlab l A) l' = UNIV" by (simp add: rmlab_def) lemma rmlab_other_label [simp]: "l\<noteq>l'\<Longrightarrow> (rmlab l A) l' = A l'" by (auto simp add: rmlab_def) lemma range_inter_ts_subseteq [intro]: "\<forall> k. A k \<subseteq> B k \<Longrightarrow> \<Rightarrow>\<Inter>A \<subseteq> \<Rightarrow>\<Inter>B" by (auto simp add: range_inter_ts_def) lemma range_inter_ts_subseteq': "\<forall> k. A k \<subseteq> B k \<Longrightarrow> x \<in> \<Rightarrow>\<Inter>A \<Longrightarrow> x \<in> \<Rightarrow>\<Inter>B" by (auto simp add: range_inter_ts_def) lemma da_monotone: assumes da: "Env\<turnstile> B \<guillemotright>t\<guillemotright> A" and "B \<subseteq> B'" and da': "Env\<turnstile> B' \<guillemotright>t\<guillemotright> A'" shows "(nrm A \<subseteq> nrm A') \<and> (\<forall> l. (brk A l \<subseteq> brk A' l))" proof - from da have "\<And> B' A'. \<lbrakk>Env\<turnstile> B' \<guillemotright>t\<guillemotright> A'; B \<subseteq> B'\<rbrakk> \<Longrightarrow> (nrm A \<subseteq> nrm A') \<and> (\<forall> l. (brk A l \<subseteq> brk A' l))" (is "PROP ?Hyp Env B t A") proof (induct) case Skip then show ?case by cases simp next case Expr from Expr.prems Expr.hyps show ?case by cases simp next case (Lab Env B c C A l B' A') note A = \<open>nrm A = nrm C \<inter> brk C l\<close> \<open>brk A = rmlab l (brk C)\<close> note \<open>PROP ?Hyp Env B \<langle>c\<rangle> C\<close> moreover note \<open>B \<subseteq> B'\<close> moreover obtain C' where "Env\<turnstile> B' \<guillemotright>\<langle>c\<rangle>\<guillemotright> C'" and A': "nrm A' = nrm C' \<inter> brk C' l" "brk A' = rmlab l (brk C')" using Lab.prems by cases simp ultimately have "nrm C \<subseteq> nrm C'" and hyp_brk: "(\<forall>l. brk C l \<subseteq> brk C' l)" by auto then have "nrm C \<inter> brk C l \<subseteq> nrm C' \<inter> brk C' l" by auto moreover { fix l' from hyp_brk have "rmlab l (brk C) l' \<subseteq> rmlab l (brk C') l'" by (cases "l=l'") simp_all } moreover note A A' ultimately show ?case by simp next case (Comp Env B c1 C1 c2 C2 A B' A') note A = \<open>nrm A = nrm C2\<close> \<open>brk A = brk C1 \<Rightarrow>\<inter> brk C2\<close> from \<open>Env\<turnstile> B' \<guillemotright>\<langle>c1;; c2\<rangle>\<guillemotright> A'\<close> obtain C1' C2' where da_c1: "Env\<turnstile> B' \<guillemotright>\<langle>c1\<rangle>\<guillemotright> C1'" and da_c2: "Env\<turnstile> nrm C1' \<guillemotright>\<langle>c2\<rangle>\<guillemotright> C2'" and A': "nrm A' = nrm C2'" "brk A' = brk C1' \<Rightarrow>\<inter> brk C2'" by cases auto note \<open>PROP ?Hyp Env B \<langle>c1\<rangle> C1\<close> moreover note \<open>B \<subseteq> B'\<close> moreover note da_c1 ultimately have C1': "nrm C1 \<subseteq> nrm C1'" "(\<forall>l. brk C1 l \<subseteq> brk C1' l)" by auto note \<open>PROP ?Hyp Env (nrm C1) \<langle>c2\<rangle> C2\<close> with da_c2 C1' have C2': "nrm C2 \<subseteq> nrm C2'" "(\<forall>l. brk C2 l \<subseteq> brk C2' l)" by auto with A A' C1' show ?case by auto next case (If Env B e E c1 C1 c2 C2 A B' A') note A = \<open>nrm A = nrm C1 \<inter> nrm C2\<close> \<open>brk A = brk C1 \<Rightarrow>\<inter> brk C2\<close> from \<open>Env\<turnstile> B' \<guillemotright>\<langle>If(e) c1 Else c2\<rangle>\<guillemotright> A'\<close> obtain C1' C2' where da_c1: "Env\<turnstile> B' \<union> assigns_if True e \<guillemotright>\<langle>c1\<rangle>\<guillemotright> C1'" and da_c2: "Env\<turnstile> B' \<union> assigns_if False e \<guillemotright>\<langle>c2\<rangle>\<guillemotright> C2'" and A': "nrm A' = nrm C1' \<inter> nrm C2'" "brk A' = brk C1' \<Rightarrow>\<inter> brk C2'" by cases auto note \<open>PROP ?Hyp Env (B \<union> assigns_if True e) \<langle>c1\<rangle> C1\<close> moreover note B' = \<open>B \<subseteq> B'\<close> moreover note da_c1 ultimately obtain C1': "nrm C1 \<subseteq> nrm C1'" "(\<forall>l. brk C1 l \<subseteq> brk C1' l)" by blast note \<open>PROP ?Hyp Env (B \<union> assigns_if False e) \<langle>c2\<rangle> C2\<close> with da_c2 B' obtain C2': "nrm C2 \<subseteq> nrm C2'" "(\<forall>l. brk C2 l \<subseteq> brk C2' l)" by blast with A A' C1' show ?case by auto next case (Loop Env B e E c C A l B' A') note A = \<open>nrm A = nrm C \<inter> (B \<union> assigns_if False e)\<close> \<open>brk A = brk C\<close> from \<open>Env\<turnstile> B' \<guillemotright>\<langle>l\<bullet> While(e) c\<rangle>\<guillemotright> A'\<close> obtain C' where da_c': "Env\<turnstile> B' \<union> assigns_if True e \<guillemotright>\<langle>c\<rangle>\<guillemotright> C'" and A': "nrm A' = nrm C' \<inter> (B' \<union> assigns_if False e)" "brk A' = brk C'" by cases auto note \<open>PROP ?Hyp Env (B \<union> assigns_if True e) \<langle>c\<rangle> C\<close> moreover note B' = \<open>B \<subseteq> B'\<close> moreover note da_c' ultimately obtain C': "nrm C \<subseteq> nrm C'" "(\<forall>l. brk C l \<subseteq> brk C' l)" by blast with A A' B' have "nrm A \<subseteq> nrm A'" by blast moreover { fix l' have "brk A l' \<subseteq> brk A' l'" proof (cases "constVal e") case None with A A' C' show ?thesis by (cases "l=l'") auto next case (Some bv) with A A' C' show ?thesis by (cases "the_Bool bv", cases "l=l'") auto qed } ultimately show ?case by auto next case (Jmp jump B A Env B' A') thus ?case by (elim da_elim_cases) (auto split: jump.splits) next case Throw thus ?case by (elim da_elim_cases) auto next case (Try Env B c1 C1 vn C c2 C2 A B' A') note A = \<open>nrm A = nrm C1 \<inter> nrm C2\<close> \<open>brk A = brk C1 \<Rightarrow>\<inter> brk C2\<close> from \<open>Env\<turnstile> B' \<guillemotright>\<langle>Try c1 Catch(C vn) c2\<rangle>\<guillemotright> A'\<close> obtain C1' C2' where da_c1': "Env\<turnstile> B' \<guillemotright>\<langle>c1\<rangle>\<guillemotright> C1'" and da_c2': "Env\<lparr>lcl := (lcl Env)(VName vn\<mapsto>Class C)\<rparr>\<turnstile> B' \<union> {VName vn} \<guillemotright>\<langle>c2\<rangle>\<guillemotright> C2'" and A': "nrm A' = nrm C1' \<inter> nrm C2'" "brk A' = brk C1' \<Rightarrow>\<inter> brk C2'" by cases auto note \<open>PROP ?Hyp Env B \<langle>c1\<rangle> C1\<close> moreover note B' = \<open>B \<subseteq> B'\<close> moreover note da_c1' ultimately obtain C1': "nrm C1 \<subseteq> nrm C1'" "(\<forall>l. brk C1 l \<subseteq> brk C1' l)" by blast note \<open>PROP ?Hyp (Env\<lparr>lcl := (lcl Env)(VName vn\<mapsto>Class C)\<rparr>) (B \<union> {VName vn}) \<langle>c2\<rangle> C2\<close> with B' da_c2' obtain "nrm C2 \<subseteq> nrm C2'" "(\<forall>l. brk C2 l \<subseteq> brk C2' l)" by blast with C1' A A' show ?case by auto next case (Fin Env B c1 C1 c2 C2 A B' A') note A = \<open>nrm A = nrm C1 \<union> nrm C2\<close> \<open>brk A = (brk C1 \<Rightarrow>\<union>\<^sub>\<forall> nrm C2) \<Rightarrow>\<inter> (brk C2)\<close> from \<open>Env\<turnstile> B' \<guillemotright>\<langle>c1 Finally c2\<rangle>\<guillemotright> A'\<close> obtain C1' C2' where da_c1': "Env\<turnstile> B' \<guillemotright>\<langle>c1\<rangle>\<guillemotright> C1'" and da_c2': "Env\<turnstile> B' \<guillemotright>\<langle>c2\<rangle>\<guillemotright> C2'" and A': "nrm A' = nrm C1' \<union> nrm C2'" "brk A' = (brk C1' \<Rightarrow>\<union>\<^sub>\<forall> nrm C2') \<Rightarrow>\<inter> (brk C2')" by cases auto note \<open>PROP ?Hyp Env B \<langle>c1\<rangle> C1\<close> moreover note B' = \<open>B \<subseteq> B'\<close> moreover note da_c1' ultimately obtain C1': "nrm C1 \<subseteq> nrm C1'" "(\<forall>l. brk C1 l \<subseteq> brk C1' l)" by blast note hyp_c2 = \<open>PROP ?Hyp Env B \<langle>c2\<rangle> C2\<close> from da_c2' B' obtain "nrm C2 \<subseteq> nrm C2'" "(\<forall>l. brk C2 l \<subseteq> brk C2' l)" by - (drule hyp_c2,auto) with A A' C1' show ?case by auto next case Init thus ?case by (elim da_elim_cases) auto next case NewC thus ?case by (elim da_elim_cases) auto next case NewA thus ?case by (elim da_elim_cases) auto next case Cast thus ?case by (elim da_elim_cases) auto next case Inst thus ?case by (elim da_elim_cases) auto next case Lit thus ?case by (elim da_elim_cases) auto next case UnOp thus ?case by (elim da_elim_cases) auto next case (CondAnd Env B e1 E1 e2 E2 A B' A') note A = \<open>nrm A = B \<union> assigns_if True (BinOp CondAnd e1 e2) \<inter> assigns_if False (BinOp CondAnd e1 e2)\<close> \<open>brk A = (\<lambda>l. UNIV)\<close> from \<open>Env\<turnstile> B' \<guillemotright>\<langle>BinOp CondAnd e1 e2\<rangle>\<guillemotright> A'\<close> obtain A': "nrm A' = B' \<union> assigns_if True (BinOp CondAnd e1 e2) \<inter> assigns_if False (BinOp CondAnd e1 e2)" "brk A' = (\<lambda>l. UNIV)" by cases auto note B' = \<open>B \<subseteq> B'\<close> with A A' show ?case by auto next case CondOr thus ?case by (elim da_elim_cases) auto next case BinOp thus ?case by (elim da_elim_cases) auto next case Super thus ?case by (elim da_elim_cases) auto next case AccLVar thus ?case by (elim da_elim_cases) auto next case Acc thus ?case by (elim da_elim_cases) auto next case AssLVar thus ?case by (elim da_elim_cases) auto next case Ass thus ?case by (elim da_elim_cases) auto next case (CondBool Env c e1 e2 B C E1 E2 A B' A') note A = \<open>nrm A = B \<union> assigns_if True (c ? e1 : e2) \<inter> assigns_if False (c ? e1 : e2)\<close> \<open>brk A = (\<lambda>l. UNIV)\<close> note \<open>Env\<turnstile> (c ? e1 : e2)\<Colon>- (PrimT Boolean)\<close> moreover note \<open>Env\<turnstile> B' \<guillemotright>\<langle>c ? e1 : e2\<rangle>\<guillemotright> A'\<close> ultimately obtain A': "nrm A' = B' \<union> assigns_if True (c ? e1 : e2) \<inter> assigns_if False (c ? e1 : e2)" "brk A' = (\<lambda>l. UNIV)" by (elim da_elim_cases) (auto simp add: inj_term_simps) (* inj_term_simps needed to handle wt (defined without \<langle>\<rangle>) *) note B' = \<open>B \<subseteq> B'\<close> with A A' show ?case by auto next case (Cond Env c e1 e2 B C E1 E2 A B' A') note A = \<open>nrm A = nrm E1 \<inter> nrm E2\<close> \<open>brk A = (\<lambda>l. UNIV)\<close> note not_bool = \<open>\<not> Env\<turnstile> (c ? e1 : e2)\<Colon>- (PrimT Boolean)\<close> from \<open>Env\<turnstile> B' \<guillemotright>\<langle>c ? e1 : e2\<rangle>\<guillemotright> A'\<close> obtain E1' E2' where da_e1': "Env\<turnstile> B' \<union> assigns_if True c \<guillemotright>\<langle>e1\<rangle>\<guillemotright> E1'" and da_e2': "Env\<turnstile> B' \<union> assigns_if False c \<guillemotright>\<langle>e2\<rangle>\<guillemotright> E2'" and A': "nrm A' = nrm E1' \<inter> nrm E2'" "brk A' = (\<lambda>l. UNIV)" using not_bool by (elim da_elim_cases) (auto simp add: inj_term_simps) (* inj_term_simps needed to handle wt (defined without \<langle>\<rangle>) *) note \<open>PROP ?Hyp Env (B \<union> assigns_if True c) \<langle>e1\<rangle> E1\<close> moreover note B' = \<open>B \<subseteq> B'\<close> moreover note da_e1' ultimately obtain E1': "nrm E1 \<subseteq> nrm E1'" "(\<forall>l. brk E1 l \<subseteq> brk E1' l)" by blast note \<open>PROP ?Hyp Env (B \<union> assigns_if False c) \<langle>e2\<rangle> E2\<close> with B' da_e2' obtain "nrm E2 \<subseteq> nrm E2'" "(\<forall>l. brk E2 l \<subseteq> brk E2' l)" by blast with E1' A A' show ?case by auto next case Call from Call.prems and Call.hyps show ?case by cases auto next case Methd thus ?case by (elim da_elim_cases) auto next case Body thus ?case by (elim da_elim_cases) auto next case LVar thus ?case by (elim da_elim_cases) auto next case FVar thus ?case by (elim da_elim_cases) auto next case AVar thus ?case by (elim da_elim_cases) auto next case Nil thus ?case by (elim da_elim_cases) auto next case Cons thus ?case by (elim da_elim_cases) auto qed from this [OF da' \<open>B \<subseteq> B'\<close>] show ?thesis . qed lemma da_weaken: assumes da: "Env\<turnstile> B \<guillemotright>t\<guillemotright> A" and "B \<subseteq> B'" shows "\<exists> A'. Env \<turnstile> B' \<guillemotright>t\<guillemotright> A'" proof - note assigned.select_convs [Pure.intro] from da have "\<And> B'. B \<subseteq> B' \<Longrightarrow> \<exists> A'. Env\<turnstile> B' \<guillemotright>t\<guillemotright> A'" (is "PROP ?Hyp Env B t") proof (induct) case Skip thus ?case by (iprover intro: da.Skip) next case Expr thus ?case by (iprover intro: da.Expr) next case (Lab Env B c C A l B') note \<open>PROP ?Hyp Env B \<langle>c\<rangle>\<close> moreover note B' = \<open>B \<subseteq> B'\<close> ultimately obtain C' where "Env\<turnstile> B' \<guillemotright>\<langle>c\<rangle>\<guillemotright> C'" by iprover then obtain A' where "Env\<turnstile> B' \<guillemotright>\<langle>Break l\<bullet> c\<rangle>\<guillemotright> A'" by (iprover intro: da.Lab) thus ?case .. next case (Comp Env B c1 C1 c2 C2 A B') note da_c1 = \<open>Env\<turnstile> B \<guillemotright>\<langle>c1\<rangle>\<guillemotright> C1\<close> note \<open>PROP ?Hyp Env B \<langle>c1\<rangle>\<close> moreover note B' = \<open>B \<subseteq> B'\<close> ultimately obtain C1' where da_c1': "Env\<turnstile> B' \<guillemotright>\<langle>c1\<rangle>\<guillemotright> C1'" by iprover with da_c1 B' have "nrm C1 \<subseteq> nrm C1'" by (rule da_monotone [elim_format]) simp moreover note \<open>PROP ?Hyp Env (nrm C1) \<langle>c2\<rangle>\<close> ultimately obtain C2' where "Env\<turnstile> nrm C1' \<guillemotright>\<langle>c2\<rangle>\<guillemotright> C2'" by iprover with da_c1' obtain A' where "Env\<turnstile> B' \<guillemotright>\<langle>c1;; c2\<rangle>\<guillemotright> A'" by (iprover intro: da.Comp) thus ?case .. next case (If Env B e E c1 C1 c2 C2 A B') note B' = \<open>B \<subseteq> B'\<close> obtain E' where "Env\<turnstile> B' \<guillemotright>\<langle>e\<rangle>\<guillemotright> E'" proof - have "PROP ?Hyp Env B \<langle>e\<rangle>" by (rule If.hyps) with B' show ?thesis using that by iprover qed moreover obtain C1' where "Env\<turnstile> (B' \<union> assigns_if True e) \<guillemotright>\<langle>c1\<rangle>\<guillemotright> C1'" proof - from B' have "(B \<union> assigns_if True e) \<subseteq> (B' \<union> assigns_if True e)" by blast moreover have "PROP ?Hyp Env (B \<union> assigns_if True e) \<langle>c1\<rangle>" by (rule If.hyps) ultimately show ?thesis using that by iprover qed moreover obtain C2' where "Env\<turnstile> (B' \<union> assigns_if False e) \<guillemotright>\<langle>c2\<rangle>\<guillemotright> C2'" proof - from B' have "(B \<union> assigns_if False e) \<subseteq> (B' \<union> assigns_if False e)" by blast moreover have "PROP ?Hyp Env (B \<union> assigns_if False e) \<langle>c2\<rangle>" by (rule If.hyps) ultimately show ?thesis using that by iprover qed ultimately obtain A' where "Env\<turnstile> B' \<guillemotright>\<langle>If(e) c1 Else c2\<rangle>\<guillemotright> A'" by (iprover intro: da.If) thus ?case .. next case (Loop Env B e E c C A l B') note B' = \<open>B \<subseteq> B'\<close> obtain E' where "Env\<turnstile> B' \<guillemotright>\<langle>e\<rangle>\<guillemotright> E'" proof - have "PROP ?Hyp Env B \<langle>e\<rangle>" by (rule Loop.hyps) with B' show ?thesis using that by iprover qed moreover obtain C' where "Env\<turnstile> (B' \<union> assigns_if True e) \<guillemotright>\<langle>c\<rangle>\<guillemotright> C'" proof - from B' have "(B \<union> assigns_if True e) \<subseteq> (B' \<union> assigns_if True e)" by blast moreover have "PROP ?Hyp Env (B \<union> assigns_if True e) \<langle>c\<rangle>" by (rule Loop.hyps) ultimately show ?thesis using that by iprover qed ultimately obtain A' where "Env\<turnstile> B' \<guillemotright>\<langle>l\<bullet> While(e) c\<rangle>\<guillemotright> A'" by (iprover intro: da.Loop ) thus ?case .. next case (Jmp jump B A Env B') note B' = \<open>B \<subseteq> B'\<close> with Jmp.hyps have "jump = Ret \<longrightarrow> Result \<in> B' " by auto moreover obtain A'::assigned where "nrm A' = UNIV" "brk A' = (case jump of Break l \<Rightarrow> \<lambda>k. if k = l then B' else UNIV | Cont l \<Rightarrow> \<lambda>k. UNIV | Ret \<Rightarrow> \<lambda>k. UNIV)" by iprover ultimately have "Env\<turnstile> B' \<guillemotright>\<langle>Jmp jump\<rangle>\<guillemotright> A'" by (rule da.Jmp) thus ?case .. next case Throw thus ?case by (iprover intro: da.Throw ) next case (Try Env B c1 C1 vn C c2 C2 A B') note B' = \<open>B \<subseteq> B'\<close> obtain C1' where "Env\<turnstile> B' \<guillemotright>\<langle>c1\<rangle>\<guillemotright> C1'" proof - have "PROP ?Hyp Env B \<langle>c1\<rangle>" by (rule Try.hyps) with B' show ?thesis using that by iprover qed moreover obtain C2' where "Env\<lparr>lcl := (lcl Env)(VName vn\<mapsto>Class C)\<rparr>\<turnstile> B' \<union> {VName vn} \<guillemotright>\<langle>c2\<rangle>\<guillemotright> C2'" proof - from B' have "B \<union> {VName vn} \<subseteq> B' \<union> {VName vn}" by blast moreover have "PROP ?Hyp (Env\<lparr>lcl := (lcl Env)(VName vn\<mapsto>Class C)\<rparr>) (B \<union> {VName vn}) \<langle>c2\<rangle>" by (rule Try.hyps) ultimately show ?thesis using that by iprover qed ultimately obtain A' where "Env\<turnstile> B' \<guillemotright>\<langle>Try c1 Catch(C vn) c2\<rangle>\<guillemotright> A'" by (iprover intro: da.Try ) thus ?case .. next case (Fin Env B c1 C1 c2 C2 A B') note B' = \<open>B \<subseteq> B'\<close> obtain C1' where C1': "Env\<turnstile> B' \<guillemotright>\<langle>c1\<rangle>\<guillemotright> C1'" proof - have "PROP ?Hyp Env B \<langle>c1\<rangle>" by (rule Fin.hyps) with B' show ?thesis using that by iprover qed moreover obtain C2' where "Env\<turnstile> B' \<guillemotright>\<langle>c2\<rangle>\<guillemotright> C2'" proof - have "PROP ?Hyp Env B \<langle>c2\<rangle>" by (rule Fin.hyps) with B' show ?thesis using that by iprover qed ultimately obtain A' where "Env\<turnstile> B' \<guillemotright>\<langle>c1 Finally c2\<rangle>\<guillemotright> A'" by (iprover intro: da.Fin ) thus ?case .. next case Init thus ?case by (iprover intro: da.Init) next case NewC thus ?case by (iprover intro: da.NewC) next case NewA thus ?case by (iprover intro: da.NewA) next case Cast thus ?case by (iprover intro: da.Cast) next case Inst thus ?case by (iprover intro: da.Inst) next case Lit thus ?case by (iprover intro: da.Lit) next case UnOp thus ?case by (iprover intro: da.UnOp) next case (CondAnd Env B e1 E1 e2 E2 A B') note B' = \<open>B \<subseteq> B'\<close> obtain E1' where "Env\<turnstile> B' \<guillemotright>\<langle>e1\<rangle>\<guillemotright> E1'" proof - have "PROP ?Hyp Env B \<langle>e1\<rangle>" by (rule CondAnd.hyps) with B' show ?thesis using that by iprover qed moreover obtain E2' where "Env\<turnstile> B' \<union> assigns_if True e1 \<guillemotright>\<langle>e2\<rangle>\<guillemotright> E2'" proof - from B' have "B \<union> assigns_if True e1 \<subseteq> B' \<union> assigns_if True e1" by blast moreover have "PROP ?Hyp Env (B \<union> assigns_if True e1) \<langle>e2\<rangle>" by (rule CondAnd.hyps) ultimately show ?thesis using that by iprover qed ultimately obtain A' where "Env\<turnstile> B' \<guillemotright>\<langle>BinOp CondAnd e1 e2\<rangle>\<guillemotright> A'" by (iprover intro: da.CondAnd) thus ?case .. next case (CondOr Env B e1 E1 e2 E2 A B') note B' = \<open>B \<subseteq> B'\<close> obtain E1' where "Env\<turnstile> B' \<guillemotright>\<langle>e1\<rangle>\<guillemotright> E1'" proof - have "PROP ?Hyp Env B \<langle>e1\<rangle>" by (rule CondOr.hyps) with B' show ?thesis using that by iprover qed moreover obtain E2' where "Env\<turnstile> B' \<union> assigns_if False e1 \<guillemotright>\<langle>e2\<rangle>\<guillemotright> E2'" proof - from B' have "B \<union> assigns_if False e1 \<subseteq> B' \<union> assigns_if False e1" by blast moreover have "PROP ?Hyp Env (B \<union> assigns_if False e1) \<langle>e2\<rangle>" by (rule CondOr.hyps) ultimately show ?thesis using that by iprover qed ultimately obtain A' where "Env\<turnstile> B' \<guillemotright>\<langle>BinOp CondOr e1 e2\<rangle>\<guillemotright> A'" by (iprover intro: da.CondOr) thus ?case .. next case (BinOp Env B e1 E1 e2 A binop B') note B' = \<open>B \<subseteq> B'\<close> obtain E1' where E1': "Env\<turnstile> B' \<guillemotright>\<langle>e1\<rangle>\<guillemotright> E1'" proof - have "PROP ?Hyp Env B \<langle>e1\<rangle>" by (rule BinOp.hyps) with B' show ?thesis using that by iprover qed moreover obtain A' where "Env\<turnstile> nrm E1' \<guillemotright>\<langle>e2\<rangle>\<guillemotright> A'" proof - have "Env\<turnstile> B \<guillemotright>\<langle>e1\<rangle>\<guillemotright> E1" by (rule BinOp.hyps) from this B' E1' have "nrm E1 \<subseteq> nrm E1'" by (rule da_monotone [THEN conjE]) moreover have "PROP ?Hyp Env (nrm E1) \<langle>e2\<rangle>" by (rule BinOp.hyps) ultimately show ?thesis using that by iprover qed ultimately have "Env\<turnstile> B' \<guillemotright>\<langle>BinOp binop e1 e2\<rangle>\<guillemotright> A'" using BinOp.hyps by (iprover intro: da.BinOp) thus ?case .. next case (Super B Env B') note B' = \<open>B \<subseteq> B'\<close> with Super.hyps have "This \<in> B'" by auto thus ?case by (iprover intro: da.Super) next case (AccLVar vn B A Env B') note \<open>vn \<in> B\<close> moreover note \<open>B \<subseteq> B'\<close> ultimately have "vn \<in> B'" by auto thus ?case by (iprover intro: da.AccLVar) next case Acc thus ?case by (iprover intro: da.Acc) next case (AssLVar Env B e E A vn B') note B' = \<open>B \<subseteq> B'\<close> then obtain E' where "Env\<turnstile> B' \<guillemotright>\<langle>e\<rangle>\<guillemotright> E'" by (rule AssLVar.hyps [elim_format]) iprover then obtain A' where "Env\<turnstile> B' \<guillemotright>\<langle>LVar vn:=e\<rangle>\<guillemotright> A'" by (iprover intro: da.AssLVar) thus ?case .. next case (Ass v Env B V e A B') note B' = \<open>B \<subseteq> B'\<close> note \<open>\<forall>vn. v \<noteq> LVar vn\<close> moreover obtain V' where V': "Env\<turnstile> B' \<guillemotright>\<langle>v\<rangle>\<guillemotright> V'" proof - have "PROP ?Hyp Env B \<langle>v\<rangle>" by (rule Ass.hyps) with B' show ?thesis using that by iprover qed moreover obtain A' where "Env\<turnstile> nrm V' \<guillemotright>\<langle>e\<rangle>\<guillemotright> A'" proof - have "Env\<turnstile> B \<guillemotright>\<langle>v\<rangle>\<guillemotright> V" by (rule Ass.hyps) from this B' V' have "nrm V \<subseteq> nrm V'" by (rule da_monotone [THEN conjE]) moreover have "PROP ?Hyp Env (nrm V) \<langle>e\<rangle>" by (rule Ass.hyps) ultimately show ?thesis using that by iprover qed ultimately have "Env\<turnstile> B' \<guillemotright>\<langle>v := e\<rangle>\<guillemotright> A'" by (iprover intro: da.Ass) thus ?case .. next case (CondBool Env c e1 e2 B C E1 E2 A B') note B' = \<open>B \<subseteq> B'\<close> note \<open>Env\<turnstile>(c ? e1 : e2)\<Colon>-(PrimT Boolean)\<close> moreover obtain C' where C': "Env\<turnstile> B' \<guillemotright>\<langle>c\<rangle>\<guillemotright> C'" proof - have "PROP ?Hyp Env B \<langle>c\<rangle>" by (rule CondBool.hyps) with B' show ?thesis using that by iprover qed moreover obtain E1' where "Env\<turnstile> B' \<union> assigns_if True c \<guillemotright>\<langle>e1\<rangle>\<guillemotright> E1'" proof - from B' have "(B \<union> assigns_if True c) \<subseteq> (B' \<union> assigns_if True c)" by blast moreover have "PROP ?Hyp Env (B \<union> assigns_if True c) \<langle>e1\<rangle>" by (rule CondBool.hyps) ultimately show ?thesis using that by iprover qed moreover obtain E2' where "Env\<turnstile> B' \<union> assigns_if False c \<guillemotright>\<langle>e2\<rangle>\<guillemotright> E2'" proof - from B' have "(B \<union> assigns_if False c) \<subseteq> (B' \<union> assigns_if False c)" by blast moreover have "PROP ?Hyp Env (B \<union> assigns_if False c) \<langle>e2\<rangle>" by(rule CondBool.hyps) ultimately show ?thesis using that by iprover qed ultimately obtain A' where "Env\<turnstile> B' \<guillemotright>\<langle>c ? e1 : e2\<rangle>\<guillemotright> A'" by (iprover intro: da.CondBool) thus ?case .. next case (Cond Env c e1 e2 B C E1 E2 A B') note B' = \<open>B \<subseteq> B'\<close> note \<open>\<not> Env\<turnstile>(c ? e1 : e2)\<Colon>-(PrimT Boolean)\<close> moreover obtain C' where C': "Env\<turnstile> B' \<guillemotright>\<langle>c\<rangle>\<guillemotright> C'" proof - have "PROP ?Hyp Env B \<langle>c\<rangle>" by (rule Cond.hyps) with B' show ?thesis using that by iprover qed moreover obtain E1' where "Env\<turnstile> B' \<union> assigns_if True c \<guillemotright>\<langle>e1\<rangle>\<guillemotright> E1'" proof - from B' have "(B \<union> assigns_if True c) \<subseteq> (B' \<union> assigns_if True c)" by blast moreover have "PROP ?Hyp Env (B \<union> assigns_if True c) \<langle>e1\<rangle>" by (rule Cond.hyps) ultimately show ?thesis using that by iprover qed moreover obtain E2' where "Env\<turnstile> B' \<union> assigns_if False c \<guillemotright>\<langle>e2\<rangle>\<guillemotright> E2'" proof - from B' have "(B \<union> assigns_if False c) \<subseteq> (B' \<union> assigns_if False c)" by blast moreover have "PROP ?Hyp Env (B \<union> assigns_if False c) \<langle>e2\<rangle>" by (rule Cond.hyps) ultimately show ?thesis using that by iprover qed ultimately obtain A' where "Env\<turnstile> B' \<guillemotright>\<langle>c ? e1 : e2\<rangle>\<guillemotright> A'" by (iprover intro: da.Cond) thus ?case .. next case (Call Env B e E args A accC statT mode mn pTs B') note B' = \<open>B \<subseteq> B'\<close> obtain E' where E': "Env\<turnstile> B' \<guillemotright>\<langle>e\<rangle>\<guillemotright> E'" proof - have "PROP ?Hyp Env B \<langle>e\<rangle>" by (rule Call.hyps) with B' show ?thesis using that by iprover qed moreover obtain A' where "Env\<turnstile> nrm E' \<guillemotright>\<langle>args\<rangle>\<guillemotright> A'" proof - have "Env\<turnstile> B \<guillemotright>\<langle>e\<rangle>\<guillemotright> E" by (rule Call.hyps) from this B' E' have "nrm E \<subseteq> nrm E'" by (rule da_monotone [THEN conjE]) moreover have "PROP ?Hyp Env (nrm E) \<langle>args\<rangle>" by (rule Call.hyps) ultimately show ?thesis using that by iprover qed ultimately have "Env\<turnstile> B' \<guillemotright>\<langle>{accC,statT,mode}e\<cdot>mn( {pTs}args)\<rangle>\<guillemotright> A'" by (iprover intro: da.Call) thus ?case .. next case Methd thus ?case by (iprover intro: da.Methd) next case (Body Env B c C A D B') note B' = \<open>B \<subseteq> B'\<close> obtain C' where C': "Env\<turnstile> B' \<guillemotright>\<langle>c\<rangle>\<guillemotright> C'" and nrm_C': "nrm C \<subseteq> nrm C'" proof - have "Env\<turnstile> B \<guillemotright>\<langle>c\<rangle>\<guillemotright> C" by (rule Body.hyps) moreover note B' moreover from B' obtain C' where da_c: "Env\<turnstile> B' \<guillemotright>\<langle>c\<rangle>\<guillemotright> C'" by (rule Body.hyps [elim_format]) blast ultimately have "nrm C \<subseteq> nrm C'" by (rule da_monotone [THEN conjE]) with da_c that show ?thesis by iprover qed moreover note \<open>Result \<in> nrm C\<close> with nrm_C' have "Result \<in> nrm C'" by blast moreover note \<open>jumpNestingOkS {Ret} c\<close> ultimately obtain A' where "Env\<turnstile> B' \<guillemotright>\<langle>Body D c\<rangle>\<guillemotright> A'" by (iprover intro: da.Body) thus ?case .. next case LVar thus ?case by (iprover intro: da.LVar) next case FVar thus ?case by (iprover intro: da.FVar) next case (AVar Env B e1 E1 e2 A B') note B' = \<open>B \<subseteq> B'\<close> obtain E1' where E1': "Env\<turnstile> B' \<guillemotright>\<langle>e1\<rangle>\<guillemotright> E1'" proof - have "PROP ?Hyp Env B \<langle>e1\<rangle>" by (rule AVar.hyps) with B' show ?thesis using that by iprover qed moreover obtain A' where "Env\<turnstile> nrm E1' \<guillemotright>\<langle>e2\<rangle>\<guillemotright> A'" proof - have "Env\<turnstile> B \<guillemotright>\<langle>e1\<rangle>\<guillemotright> E1" by (rule AVar.hyps) from this B' E1' have "nrm E1 \<subseteq> nrm E1'" by (rule da_monotone [THEN conjE]) moreover have "PROP ?Hyp Env (nrm E1) \<langle>e2\<rangle>" by (rule AVar.hyps) ultimately show ?thesis using that by iprover qed ultimately have "Env\<turnstile> B' \<guillemotright>\<langle>e1.[e2]\<rangle>\<guillemotright> A'" by (iprover intro: da.AVar) thus ?case .. next case Nil thus ?case by (iprover intro: da.Nil) next case (Cons Env B e E es A B') note B' = \<open>B \<subseteq> B'\<close> obtain E' where E': "Env\<turnstile> B' \<guillemotright>\<langle>e\<rangle>\<guillemotright> E'" proof - have "PROP ?Hyp Env B \<langle>e\<rangle>" by (rule Cons.hyps) with B' show ?thesis using that by iprover qed moreover obtain A' where "Env\<turnstile> nrm E' \<guillemotright>\<langle>es\<rangle>\<guillemotright> A'" proof - have "Env\<turnstile> B \<guillemotright>\<langle>e\<rangle>\<guillemotright> E" by (rule Cons.hyps) from this B' E' have "nrm E \<subseteq> nrm E'" by (rule da_monotone [THEN conjE]) moreover have "PROP ?Hyp Env (nrm E) \<langle>es\<rangle>" by (rule Cons.hyps) ultimately show ?thesis using that by iprover qed ultimately have "Env\<turnstile> B' \<guillemotright>\<langle>e # es\<rangle>\<guillemotright> A'" by (iprover intro: da.Cons) thus ?case .. qed from this [OF \<open>B \<subseteq> B'\<close>] show ?thesis . qed (* Remarks about the proof style: "by (rule <Case>.hyps)" vs "." -------------------------- with <Case>.hyps you state more precise were the rule comes from . takes all assumptions into account, but looks more "light" and is more resistent for cut and paste proof in different cases. "intro: da.intros" vs "da.<Case>" --------------------------------- The first ist more convinient for cut and paste between cases, the second is more informativ for the reader *) corollary da_weakenE [consumes 2]: assumes da: "Env\<turnstile> B \<guillemotright>t\<guillemotright> A" and B': "B \<subseteq> B'" and ex_mono: "\<And> A'. \<lbrakk>Env\<turnstile> B' \<guillemotright>t\<guillemotright> A'; nrm A \<subseteq> nrm A'; \<And> l. brk A l \<subseteq> brk A' l\<rbrakk> \<Longrightarrow> P" shows "P" proof - from da B' obtain A' where A': "Env\<turnstile> B' \<guillemotright>t\<guillemotright> A'" by (rule da_weaken [elim_format]) iprover with da B' have "nrm A \<subseteq> nrm A' \<and> (\<forall> l. brk A l \<subseteq> brk A' l)" by (rule da_monotone) with A' ex_mono show ?thesis by iprover qed end
Med School Phone on the Campus Payphones node Phone Number: (530) 7569834 Location: On your left side as you walk towards the Health Sciences Bookstore, across from the Health Sciences Library Description: Your Standard MCI campus phone Charge: $0.50. Remember when phone calls were only 20 cents?
\documentclass[a4paper,12pt]{llncs} \usepackage{latexsym} \newcommand\comment[1]{} \renewcommand\comment[1]{#1} % to print enclosed text \renewcommand\comment[1]{} % to not print enclosed text \begin{document} \title{Formalised Induction Patterns for Admissibility Proofs in Coq} \author{Jeremy E.\ Dawson\\ etc} % \date{} \maketitle \abstract{ This document describes general reasoning patterns for inductive proofs of properties such as invertibility of rules, contraction admissibility, and describes spedifically their implementation in Coq. } \section{Introduction} Proofs of properties such as admissibility weakening and exchange admissibility, invertibility of certain rules, contraction admissibility, and cut admissibility are generally proved by induction, though there are variations on the exact forms of induction proofs available. For example, induction on the height (or size) of a proof, which can sometimes require that a previously proved property such as weakening admissibility be proved as height-preserving weakening admissibility. Alternatively, induction on the ``proof'' itself, ie assuming that a formula or sequent which is earlier in the proof satisfies the required property. It is useful to have general reasoning patterns for inductive proofs, formalising the variety of manners of induction available. \section{\texttt{derrec}, \texttt{derl}, \texttt{adm}}\label{s-dda} We use the framework previously implemented in Isabelle, described in \cite[Appendix A.1]{dawson-gore-gls}, which we recapitulate briefly here, with reference to its implementation in Coq. An inference rule is represented as a list of premises and a conclusion, where the premises and conclusion are of some unspecified type. (For all our illustrations of this work, they will be sequents whose antecedents and succedents are lists of modal formulae). This suggests using \texttt{rules : list X -> X -> Prop} (as we did initially), but we found that in Coq, we could define the height or size of a proof object only if we generally used \texttt{Type}, instead of \texttt{Prop}. The predicates \texttt{derrec} and \texttt{derl} have the following meanings: \texttt{derrec rules prems concl} means that conclusion \texttt{concl} is derivable from premises (or assumptions) \texttt{prems} using rules \texttt{rules} \texttt{derl rules ps c} means that there is a derived rule (proof tree) with premises (leaves) \texttt{ps} and conclusion (root) \texttt{c} constructed using rules \texttt{rules} Their inductive definitions also require defining \texttt{dersrec} and \texttt{dersl} which involve a list of conclusions. The inductive definitions follow: \begin{verbatim} Inductive derrec (X : Type) (rules : list X -> X -> Type) (prems : X -> Type) : X -> Type := dpI : forall concl : X, prems concl -> derrec rules prems concl | derI : forall (ps : list X) (concl : X), rules ps concl -> dersrec rules prems ps -> derrec rules prems concl with dersrec (X : Type) (rules : list X -> X -> Type) (prems : X -> Type) : list X -> Type := dlNil : dersrec rules prems [] | dlCons : forall (seq : X) (seqs : list X), derrec rules prems seq -> dersrec rules prems seqs -> dersrec rules prems (seq :: seqs) Inductive derl (X : Type) (rules : list X -> X -> Type) : list X -> X -> Type := asmI : forall p : X, derl rules [p] p | dtderI : forall (pss ps : list X) (concl : X), rules ps concl -> dersl rules pss ps -> derl rules pss concl with dersl (X : Type) (rules : list X -> X -> Type) : list X -> list X -> Type := dtNil : dersl rules [] [] | dtCons : forall (ps : list X) (c : X) (pss cs : list X), derl rules ps c -> dersl rules pss cs -> dersl rules (ps ++ pss) (c :: cs) \end{verbatim} We get useful results like the following: \begin{verbatim} derl_derrec_trans : forall (X : Type) (rules : list X -> X -> Type) (prems : X -> Type) (rps : list X) (concl : X), derl rules rps concl -> dersrec rules prems rps -> derrec rules prems concl derrec_derl_deriv : forall (X : Type) (rules : list X -> X -> Type) (prems : X -> Type) (concl : X), derrec (derl rules) prems concl -> derrec rules prems concl derl_deriv': forall (X : Type) (rules : list X -> X -> Type) (prems : list X) (concl : X), derl (derl rules) prems concl -> derl rules prems concl derrec_derrec : forall (X : Type) (rules : list X -> X -> Type) (prems : X -> Type) (c : X), derrec rules (derrec rules prems) c -> derrec rules prems c \end{verbatim} We also define \emph{admissible} rules: a rule is \emph{admissible} if, assuming its premises are derivable (from no assumptions) its conclusion is likewise derivable. \begin{verbatim} Inductive adm (X : Type) (rules : list X -> X -> Type) (ps : list X) (c : X) : Type := admI : (dersrec rules (emptyT (X:=X)) ps -> derrec rules (emptyT (X:=X)) c) -> adm rules ps c \end{verbatim} Note that \texttt{adm}, unlike \texttt{derl}, is not monotonic in the rules used. From this we get the results \begin{verbatim} adm_adm: forall (X : Type) (rules : list X -> X -> Type) (ps : list X) (c : X), adm (adm rules) ps c -> adm rules ps c derl_sub_adm: forall (X : Type) (rules : list X -> X -> Type) (ps : list X) (c : X), derl rules ps c -> adm rules ps c derl_adm: forall (X : Type) (rules : list X -> X -> Type) (ps : list X) (c : X), derl (adm rules) ps c -> adm rules ps c derrec_adm: forall (X : Type) (rls : list X -> X -> Type) (c : X), derrec (adm rls) (emptyT (X:=X)) c -> derrec rls (emptyT (X:=X)) c TODO - adm (derl rules) vs adm rules \end{verbatim} \section{Our induction patterns} In previous work we defined a general lemma which enables proof of a property of a pair of proved sequents, notably cut-admissibility. That general lemma, simplified, applies also to proving similar properties of a single proved sequent, such as contraction admissibility, or invertibility of a particular rule. A particular problem with admissibility proofs is that, when, for example, one logic is obtained from another by adding additional rules, and the steps in proving an admissibility result are similar for the rules common to both logics, it requires extra effort to formalise the proofs in a way which enables similar parts of the proofs to be reused. Our focus here is to set out induction patterns and relevant lemmas useful for inductive proofs of admissibility properties which permit relevant portions of the proof to be re-used for other calculi containing some of the same rules. \comment{ NB - it seems I did something pretty similar in Isabelle, I think not written up, unless it's in the stuff we did with Jesse, in ~jeremy/isabelle/2005/seqms/ctr.thy see the definition of \verb|inv_stepm| as follows \begin{verbatim} primrec (* step in invertibility ; means that, if premises (ps) derivable, and can apply (invs_of irls) to premises, then can apply (invs_of irls) to conclusion (concl) *) "inv_step derivs irls (ps, concl) = (set ps <= derivs --> (ALL p : set ps. invs_of irls p <= derivs) --> invs_of irls concl <= derivs)" primrec (* variation on inv_step which will often be true and monotonic, whereby the inversions of the conclusion are derivable from the inversions of the premises *) "inv_stepm drls irls (ps, concl) = (invs_of irls concl <= derrec drls (set ps Un UNION (set ps) (invs_of irls)))" \end{verbatim} (here, \verb|invs_of irls| means the result of changing a sequent by inverting some rule in irls) } The simplest example of this is probably the following: substitution admissibility: for example, to show that if $A \land B \vdash A$ is derivable, then $C \land D \vdash C$ is derivable. The proof of this is obvious: you just go through the proof, changing every $A$ to $C$ and every $B$ to $D$. Even this example raises two points about the formal calculus used: each individual rule must permit these substitutions in particular the ``axiom'' rule must be $A \vdash A$ (for any formula $A$), rather than $p \vdash p$ (for any primitive proposition $p$). Note that in describing the implementation, the word ``rule'' implies specific premises and conclusion, for example, one of the substitution instances of a ``rule'' as in Table~\ref{k4-rules}. So we formalise this argument: we define a relation $R$ between sequents: for example, $R s_1 s_2$ could mean that $s_2$ is obtained from $s_1$ by changing all occurrences of $A$ to $C$, and all $B$ to $D$ (or, applying any substitution uniformly). Then the relevant requirement on the set of rules is that for any rule $\displaystyle \frac{P_1 \ldots P_n} {Q}$, and for any $Q'$ such that $R~Q~Q'$, there exist modified premises ${P'_1 \ldots P'_n}$ and a rule $\displaystyle \frac {P'_1 \ldots P'_n} {Q'}$. This obviously permits changing $A$ to $C$ and $B$ to $D$ throughout the entire proof. So we define \texttt{can\_trf\_rules} to express this property of a set of rules, and the theorem \texttt{der\_trf} expresses the use of that property. \begin{definition}[\texttt{can-trf-rules}]\label{def-can-trf-rules} text of defn \end{definition} \begin{theorem}[\texttt{der-trf}] \label{t-der-trf} text of thm \end{theorem} \begin{verbatim} can_trf_rules = fun (sty : Type) (R : sty -> sty -> Type) (rules : list sty -> sty -> Type) (ps : list sty) (c : sty) => forall c' : sty, R c c' -> {ps' : list sty & rules ps' c' * ForallT (fun p' : sty => {p : sty & InT p ps * R p p'}) ps'} : forall sty : Type, (sty -> sty -> Type) -> (list sty -> sty -> Type) -> list sty -> sty -> Type der_trf : forall (sty : Type) (R : sty -> sty -> Type) (rules : list sty -> sty -> Type), (forall (ps : list sty) (c : sty), rules ps c -> can_trf_rules R rules ps c) -> forall concl : sty, derrec rules (emptyT (X:=sty)) concl -> forall concl' : sty, R concl concl' -> derrec rules (emptyT (X:=sty)) concl' \end{verbatim} \subsection{Extensions and generalizations} We use as a running example a sequent calculus for the modal logic K4, with a minimal set of logical connectives ($\to$ and $\bot$) and modal operator $\Box$. A sequent is represented as a list for formulae on either side of the turnstile $\vdash$. There is no explicit weakening rule (except that the $(\Box)$ rule incorporates weakening (in arbitrary positions) in its conclusion, and no explicit exchange rule. \begin{figure}\label{k4-rules} GIVE RULES \caption{Rules of our sequent calculus for K4} \end{figure} TO DO describe the weakening theorems, including the gen\_ext predicate, and the exchange theorems In various places we found we needed extensions to the theorem. \subsubsection{Reflexive closure of $R$}\label{s-refl-clos} For example the K4 ($\Box$) rule allows weakening (at arbitrary points) in the antecedent of the conclusion. Consider the inductive proof of exchange (which we define as the interchanging of two sublists which are not necessarily adjacent): in the case where we are exchanging two sublists both of which consist entirely of formula which were ``weakened in'' in the use of the ($\Box$) rule. Then in this case there are two rule occurrences whose conclusions are related by the exchange relation, but which have the same premise, rather than related premises. So we extend the definition and theorem above to the case where the premises are related by the reflexive closure of the relation $R$, thus: \begin{theorem}[\texttt{der-trf-rc}]\label{t-der-trf-rc} \end{theorem} \begin{verbatim} can_trf_rules_rc = fun (sty : Type) (R : sty -> sty -> Type) (rules : list sty -> sty -> Type) (ps : list sty) (c : sty) => forall c' : sty, R c c' -> {ps' : list sty & rules ps' c' * ForallT (fun p' : sty => {p : sty & InT p ps * clos_reflT R p p'}) ps'} : forall sty : Type, (sty -> sty -> Type) -> (list sty -> sty -> Type) -> list sty -> sty -> Type der_trf_rc : forall (sty : Type) (R : sty -> sty -> Type) (rules : list sty -> sty -> Type), (forall (ps : list sty) (c : sty), rules ps c -> can_trf_rules_rc R rules ps c) -> forall concl : sty, derrec rules (emptyT (X:=sty)) concl -> forall concl' : sty, R concl concl' -> derrec rules (emptyT (X:=sty)) concl' \end{verbatim} Note that it would be trivial to infer \texttt{can-trf-rules sty (clos-reflT R) rules ps c} from \texttt{can-trf-rules sty R rules ps c} if we had defined \texttt{can-trf-rules} to require that ps and c were premises and conclusion of a rule, and in this case this definition and theorem would not be necessary. But some of our other results would then be more complicated. \subsubsection{Transitive closure of $R$}\label{s-refl-trans-clos} The ($\Box$) rule (omitting the weakening incorporated into it) is $\displaystyle \frac{\Gamma \Box\Gamma \vdash A}{\Box\Gamma \vdash \Box A}$. To show the admissibility of exchange, we consider an exchange in $\Gamma$, to give $\Gamma'$. The corresponding rule has premise $\Gamma' \Box\Gamma' \vdash A$ --- that is, $\Gamma \Box\Gamma \vdash A$ subjected to \emph{two} exchanges. We therefore needed (and were able to) extend the results of \S\ref{s-refl-clos} to reflexive transitive closure, as follows \begin{theorem}[\texttt{der-trf-rtc}]\label{t-der-trf-rtc} \end{theorem} \begin{verbatim} can_trf_rules_rtc = fun (sty : Type) (R : sty -> sty -> Type) (rules : list sty -> sty -> Type) (ps : list sty) (c : sty) => forall c' : sty, R c c' -> {ps' : list sty & rules ps' c' * ForallT (fun p' : sty => {p : sty & InT p ps * clos_refl_transT_n1 R p p'}) ps'} : forall sty : Type, (sty -> sty -> Type) -> (list sty -> sty -> Type) -> list sty -> sty -> Type der_trf_rtc : forall (sty : Type) (R : sty -> sty -> Type) (rules : list sty -> sty -> Type), (forall (ps : list sty) (c : sty), rules ps c -> can_trf_rules_rtc R rules ps c) -> forall concl : sty, derrec rules (emptyT (X:=sty)) concl -> forall concl' : sty, clos_refl_transT_n1 R concl concl' -> derrec rules (emptyT (X:=sty)) concl' \end{verbatim} \subsubsection{Related rule is a derived rule}\label{s-der} In proving invertibility of a rule there are some special cases. Supposing that we are proving that if $\Gamma \vdash A \to B$ then $\Gamma, A \vdash B$ (ie, proving that the ($\vdash\to$) rule is invertible. Supposing then that the last rule of the proof is exactly $\displaystyle \frac {\Gamma, A \vdash B} {\Gamma \vdash A \to B}$. That is, given a rule $p/c$ and $c'$, we want to find $p'$ such that $p'/c'$ is a rule and $R\ p\ p'$, but here $c'$ is exactly $p$ (and our $p'$ will be the same also). So here the relation between $p$ and $p'$ is the reflexive closure of $R$, and the relationship between $p'$ and $c'$ is like a reflexive closure of inference rule application. Now we have defined \texttt{derl}, which is like a reflexive transitive closure of rule application, and it is in fact easy to extend the mian result to derl, thus (this differs from \texttt{der-trf-derl} only by the insertion of the single word \texttt{derl}): \begin{theorem}[\texttt{der-trf-derl}]\label{t-der-trf-derl} \end{theorem} \begin{verbatim} der_trf_derl : forall (sty : Type) (R : sty -> sty -> Type) (rules : list sty -> sty -> Type), (forall (ps : list sty) (c : sty), rules ps c -> can_trf_rules R (derl rules) ps c) -> forall concl : sty, derrec rules (emptyT (X:=sty)) concl -> forall concl' : sty, R concl concl' -> derrec rules (emptyT (X:=sty)) concl' \end{verbatim} But we also need a result allowing for the fact that $p = p'$, and we can do this: \begin{theorem}[\texttt{der-trf-rc-derl}]\label{t-der-trf-rc-derl} \end{theorem} \begin{verbatim} der_trf_rc_derl : forall (sty : Type) (R : sty -> sty -> Type) (rules : list sty -> sty -> Type), (forall (ps : list sty) (c : sty), rules ps c -> can_trf_rules_rc R (derl rules) ps c) -> forall concl : sty, derrec rules (emptyT (X:=sty)) concl -> forall concl' : sty, R concl concl' -> derrec rules (emptyT (X:=sty)) concl' \end{verbatim} Notice that at this point we have extended Theorem~\ref{t-der-trf} on the one hand, to \texttt{derl} and to reflexive closure of $R$ (in Theorem~\ref{t-der-trf-rc-derl}), and on the other hand, to reflexive transitive closure (in Theorem~\ref{t-der-trf-rtc}). Can we make both extensions together? We have tried (to some extent), and so far have been unable. \subsubsection{Related rule is only an admissible rule}\label{s-rc-adm} Again we consider proving the invertibility of the ($\vdash\to$) rule, but here, in the form, for example, where the last rule of the proof is $\displaystyle \frac p c = \frac {\Gamma, A \vdash B} {\Gamma \vdash A \to B}$, as before, but our choice of inversion which we want to prove is $c' = {A, \Gamma \vdash B}$. So here we need to set $p' = {\Gamma, A \vdash B}$, and the relationship between $p'$ and $c'$ is that $p'/c'$ is an admissible rule, not even a derivable rule. \begin{theorem}[\texttt{der-trf-rc-adm}]\label{t-der-trf-rc-adm} \end{theorem} \begin{verbatim} der_trf_rc_adm : forall (sty : Type) (R : sty -> sty -> Type) (rules : list sty -> sty -> Type), (forall (ps : list sty) (c : sty), rules ps c -> can_trf_rules_rc R (adm rules) ps c) -> forall concl : sty, derrec rules (emptyT (X:=sty)) concl -> forall concl' : sty, R concl concl' -> derrec rules (emptyT (X:=sty)) concl' \end{verbatim} \subsection{Height-preserving admissibility} In our implementation in Isabelle of the framework described in \S\ref{s-dda}, we defined \texttt{derl} and \texttt{derrec} as inductively defined sets. We also defined a datatype representing a derivation tree, as follows \begin{verbatim} datatype 'a dertree = Der 'a ('a dertree list) | Unf 'a \end{verbatim} (where \texttt{Unf} represents an unproved leaf). We then defined a predicate testing whether a tree is valid according to a set of rules (which requires no unproved leaf, and that every \texttt{Der}-node is one of the given rules). We then had to prove that a sequent is derivable if and only if it is the root of a valid derivation tree. With a derivation tree ``object'', we can define its height, size (number of rules), etc, and prove theorems about these. Alternatively, we could have dealt with such concepts as height, size, etc, by defining new inductive sets (eg, sequents derivable in at most $n$ steps), but this requires a new inductive definition for each such property which may become of interest. In Coq the inductive definition provides a "proof object", whose height (etc) can be defined. This does require that the types of \texttt{derl}, \texttt{derrec}, etc, use \texttt{Type}, whereas our first implementation of these used \texttt{Prop}. \begin{verbatim} derrec : forall X : Type, (list X -> X -> Type) -> (X -> Type) -> X -> Type derl : forall X : Type, (list X -> X -> Type) -> list X -> X -> Type \end{verbatim} It also requires that relevant proved theorems (such as those proving some instance of \texttt{derrec \ldots \ldots \ldots}) finish with Defined rather than Qed. We proved a theorem corresponding to Theorem~\ref{t-der-trf}, but with the additional property that the derivation tree for the related conclusion is no higher than that of the original conclusion. \begin{verbatim} der_trf_ht : forall (sty : Type) (R : sty -> sty -> Type) (rules : list sty -> sty -> Type), (forall (ps : list sty) (c : sty), rules ps c -> can_trf_rules R rules ps c) -> forall (concl : sty) (D : derrec rules (emptyT (X:=sty)) concl) (concl' : sty), R concl concl' -> {D' : derrec rules (emptyT (X:=sty)) concl' & derrec_height D' <= derrec_height D} \end{verbatim} There was also another problem: for the derivation tree in Isabelle, if you take off the last rule, you get a list of subtrees each of which derives one premise of the last rule. In Coq, these subtrees all prove different things, so they have different types, so they can't be made into a list. So what we did instead was define \texttt{in\_dersrec}, which expresses that a derivation (of type \texttt{derrec \ldots \ldots \ldots}) is one of the derivations in an object of type \texttt{dersrec \ldots \ldots \ldots}). Likewise we defined \texttt{allPder} which represents that all the derivations represented in in an object of type \texttt{dersrec \ldots \ldots \ldots}). satisfy some property. \begin{verbatim} in_dersrec : forall (X : Type) (rules : rlsT X) (prems : X -> Type) (concl : X), derrec rules prems concl -> forall concls : list X, dersrec rules prems concls -> Type allPder : forall (X : Type) (rules : rlsT X) (prems : X -> Type), (forall x : X, derrec rules prems x -> Type) -> forall concls : list X, dersrec rules prems concls -> Type \end{verbatim} However manipulating these functions proved difficult, and we suspect that at some point it may become necessary to implement a derivation tree object as we have described doing in Isabelle. In particular there were problems which I don't understand, which were solved by using dependent destruction (which relies on some axiom, which may or may not be valid). \section{Coq proofs about K4} We now describe the proofs about the calculus for K4 presented in Figure~\ref{k4-rules}. \subsection{Formalisation of the rules} DISCUSS eg \texttt{seqrule}, applies contexts (left and right, antecedent and succedent) to premises and conclusion \texttt{cgerule}, applies weakening (inserting formulae anywhere in the antecedent and succedent lists) to conclusion \texttt{princrule}, \texttt{K4prrule}, ($\Box$) rule without the weakening \texttt{sing\_empty}, We then probed a weakening admissibility result, not using the framework described here. We next describe the proofs of further admissibility rules, showing how we used the functions described earlier, and how the intermediate results would be equally useful in similar proofs about a different logic, such as S4. \subsection{Exchange} This lemma is meant for the classical rules, but formulated to apply to any set of rules constructed from base rules in which antecedent satisfies \texttt{sing\_empty} by extending premises and conclusion uniformly with contexts. \begin{verbatim} exchL_std_rule: forall W (rules : rlsT (Seql W)), (forall ps U S, rules ps (U, S) -> sing_empty U) -> forall ps c, seqrule rules ps c -> can_trf_rules (fst_rel (swapped (T:=W))) (seqrule rules) ps c \end{verbatim} The relation between lists of which sublists have been swapped is \texttt{swapped}, and so \texttt{fst\_rel (@swapped W)} is the relation between sequents whose antecedent lists are swapped. Here, \texttt{sing\_empty U} means that the list $U$ is either singleton or empty. As an easy corollary of this, we get exchange for classical propositional logic. \begin{verbatim} exchL_cpl: forall V concl, derrec (seqrule (@princrule V)) (@emptyT _) concl -> forall concl', fst_rel (@swapped _) concl concl' -> derrec (seqrule (@princrule V)) (@emptyT _) concl'. \end{verbatim} Then, adding the ($\Box$) rule we get \begin{verbatim} exchL_K4: forall V ps c, cgerule (@K4prrule V) ps c -> can_trf_rules_rtc (fst_rel (@swapped _)) (cgerule (@K4prrule V)) ps c. \end{verbatim} Then we put these results together, using these further lemmas: \begin{lemma}[\texttt{can\_trf\_rules\_rtc}]\label{l-can-trf-rules-rtc} \texttt{can\_trf\_rules\_rtc} is monotonic in the rule set \end{lemma} \begin{lemma}[\texttt{can\_trf\_rules\_imp\_rtc}]\label{l-can-trf-rules-imp-rtc} \texttt{can\_trf\_rules} R rules ps c -> \texttt{can\_trf\_rules\_rtc} R rules ps c \end{lemma} Then we use Theorem~\ref{t-der-trf-rtc} to get the result \begin{theorem}[\texttt{exchL\_rtc, exchL}]\label{t-exchL} If concl is derivable, then concl', got from concl by exchange[s] in the antecedent, is derivable. \end{theorem} \subsection{Invertibility of the classical propositional rules} From here on it is pretty incomplete. First we proved the invertibility of the ($\vdash\to$) rule. This uses Lemma~\ref{t-der-trf-rc-adm} and the following Lemma. \begin{lemma}[\texttt{can\_trf\_ImpRinv}]\label{t-can-trf-ImpRinv} \end{lemma} \begin{verbatim} Lemma can_trf_ImpRinv V ps c: @K4rules V ps c -> can_trf_rules_rc (seqrel (@ImpRinv V)) (adm (@K4rules V)) ps c. \end{verbatim} The proof of this lemma involves 5 cases, one for each rule, each one being a separate lemma. \footnote{as things stand right now, I need to massage these somewhat to make them equally easily usable for other logics like S4} For the ($\Box$) rule, the formula to be inverted can only be a formula added in the weakening aspect of the rule, so no real difficulty there. For the axiom rule, the only problem case is where the formula to be inverted is the formula appearing on both sides (eg $C \to D \vdash C \to D$, so the inversion gives $C, C \to D \vdash D$). Here we need the related rule to be a derivable (or admissible) rule, rather than simply a rule of the calculus. The ($\bot\vdash$) is easy because inversion of $C \to D$ won't change the fact that there is a $\bot$ in the antecedent. The remaining two rules produce a lot of cases. Firstly, where the last rule is also ($\vdash\to$), involving the same formula, then the issue described in \S\ref{s-rc-adm} arises. Then there are a multitude of cases, according to where the principal formulae of the last rule and the formula to be inverted appear in the contexts of both the last rule and the inversion relation. So they need some effort in programming Coq Ltac tactics to handle all the cases. Finally, using Lemma~\ref{t-der-trf-rc-adm} we get \begin{theorem}[\texttt{K4\_ImpRinv}]\label{t-K4-ImpRinv} \end{theorem} \begin{verbatim} Theorem K4_ImpRinv V concl: derrec (@K4rules V) (@emptyT _) concl -> forall concl', seqrel (@ImpRinv V) concl concl' -> derrec (@K4rules V) (@emptyT _) concl'. \end{verbatim} Trying to generalise the above: first, I adapted the proof for the last rule being ($\vdash\to$) and the rule being inverted ($\to\vdash$), to a case where the last rule is (in its conclusion) any singleton on the right, and the rule being inverted is (in its conclusion) any singleton on the left. So I did this successfully for any such rules that are singletons (bearing in mind that a ``rule'' like ($\vdash\to$) is actually lots of rules). But then I have difficulty manipulating this to get the previous results. Hopefully this won't be too hard. Have to think about it. (the problem is that I can't simply say rule = union of rule instances, and subsitute as the argument of can\_trf\_rules\_rc because the ``='' really means that if there is a proof object of one then there is a proof object of the other (I think!!)). \begin{thebibliography}{10} \bibitem{dawson-gore-gls} J~E Dawson and R Gor{\'e}. \newblock Generic Methods for Formalising Sequent Calculi Applied to Provability Logic. \newblock In Proc.\ Logic for Programming, Artificial Intelligence and Reasoning (LPAR 2010), LNCS 6397, 263-277. \end{thebibliography} \end{document}
MY HONEST REVIEW OF RODAN AND FIELDS | AND WHY I STILL USE TULA TOO. This post was first published on October 31, 2017. Post has been updated in 2019. I remember when my friend from high school got off the train and I saw her skin looking better than it had in all the time I've known her. She told me she had started using Rodan and Fields and was selling it too, so being the supportive friend I am, I had to try it. There are definitely products I can't live without, but there were some I couldn't live with either. There were also some products I was using that just couldn't be replaced by one of the most sought-after products lines out there. I'll cut right to the chase and tell you that the Redefine line from Rodan and Fields really does work. I do think you need to use every piece for it to work its magic. My skin is more even-toned than it has ever been in my life. That said, there are some products I didn't think lived up to the hype too. I do love the Redefine cleanser, I think it is one of the key pieces to evening out my skin. That said, it did not remove my makeup. This could be a deal-breaker for some of you, and I don't blame you. I'm not much of a double cleansing girl either. Because of this, it was important for me to start double-cleansing, but I needed to find something super gentle so my skin wouldn't get irritated. I found a few alternatives I like much better. Most recently I also started using the Colleen Rothschild Cleansing Balm and it has become my holy grail cleanser. It's a one stop shop and makes my skin glow. I don't even need my Clarisonic! Use code SUNSETS20 to save 20% off. This is another one of my favorite R+F products. It has helped my (and Mike's) skin become so much less red over time. I never miss a day without it and I love having it on hand when my allergies flare or when I do any sort of microdermabrasion. Recently I was lucky enough to try Tula's Dual-Phase Skin Reviving Treatment Pads. Tula sent them to me and asked me to use them for the first time live on Instastories. I was so excited for a company to allow me to do an honset, in-the-moment review. These claim to be an at home facial for sensitive skin, and knowing my problems with the R+F paste, I thought I'd give it a try. I've used it twice since and both times it gave me the SAME results as R+F, with much less redness. I'm actually going to make a permanent switch. ​You can see a live video of me using them below! I acutally didn't like the lash boost. I think I might be the only one on the planet. Did it work? Yes, it did. Did I pay a major price with sandpaper eyes that were puffy and red... oh yes. I opted to discontinue use because it was truly unbearable. I've also used Neulash and had a less severe reaction. That said, since then I've started using castor or argan oil on my lashes and it is doing a nice job. I also struggled with the eye cream which made me sad since it is supposed to be a miracle product. It made my eyes burn terribly too. I do find now that I can sometimes use it, if I apply it correctly and put it no where near my eyes. Makes sense no? Luckily I found a few other eye products that I love that don't irritate my eyes at all. This eye cream depuffs my eyes and it is amazing. Even the new R+F eye cream aimed at puffiness hasn't given me as good as a result as this does. Again, I do have sensitive eyes, so many products are just too strong. For moisturizing my eye area, I love the Tula Revitalizing Eye Cream. It makes them so soft and doesn't create the least bit of irritation. I also love Josie Maran's Devotion Creamy Argan Milk Eye Serum for when my sensitive eyes need a dose of extra moisture. I LOVE layering this on at night as it can be used on eyelids too. It has never burned or bothered my eyes at all. One of the things I felt was missing from the Redefine line was a plumping of my skin. With Tula's Volume Defense Deep Wrinkle Serum, you see results instantly- your skin wrinkles are less noticeable right away, and I didn't see any of that with Rodan and Fields, until I bought the Active Hydration serum. This products definitely plumps skin, but it is SUPER pricey- especially when I know I can get similar results from both the Tula Volume Defense Deep Wrinkle Serum and the Kefir Moisture Repair Pressed Serum as well. I love the results I have gotten with Rodan and Fields but I can't say there haven't been some challenges too. I also know that R+F is an expensive investment, and I like that many of my tried and true Tula products are a great, more budget-friendly option for those who can't afford Rodan and Fields. If you're going to make an investment and don't have sensitive skin, buy the Redefine four piece kit and call it a day. Then supplement with Tula or anything else you think you may need. I love doing this with Tula because it doesn't clash with any of the other products I'm using either. Since originally posting this review I have stopped using Rodan and Fields. I've only been using Tula and Colleen Rothschild on my skin. I can't say it is as even overall but it is much less irritated which is worth it to me! Just wondering how long the Tula discount code is valid. You listed a couple of different code options sunsets20 and sunsetsholiday20 and neither seem to work. Any ideas? Thanks! Hi! The Tula codes just changed with November and the new code is SUNSETSHOLIDAY. That one should work throughout the holiday season- let me know if you still have any trouble! Hey girl! The Redefine Daily Cleansing Mask is NOT meant to remove make-up. It's best to remove make up with a make up removing cleanser or a make up removing wipe.
From Perennial.program_proof Require Import grove_prelude. From Goose.github_com.mit_pdos.gokv.simplepb Require Export admin. From Perennial.program_proof.grove_shared Require Import urpc_proof urpc_spec. From Perennial.goose_lang.lib Require Import waitgroup. From iris.base_logic Require Export lib.ghost_var mono_nat. From iris.algebra Require Import dfrac_agree mono_list. From Perennial.program_proof.simplepb Require Import pb_definitions config_proof pb_setstate_proof pb_getstate_proof pb_becomeprimary_proof pb_makeclerk_proof. Section config_global. Context {pb_record:PBRecord}. Notation pbG := (pbG (pb_record:=pb_record)). Notation OpType := (pb_OpType pb_record). Context `{!gooseGlobalGS Σ}. Context `{!configG Σ}. Context `{!pbG Σ}. Definition adminN := nroot .@ "admin". Definition is_conf_inv γpb γconf : iProp Σ := inv adminN (∃ epoch conf (confγs:list pb_server_names) epoch_lb, "Hepoch" ∷ config_proof.own_epoch γconf epoch ∗ "Hconf" ∷ own_config γconf conf ∗ "#His_conf" ∷ is_epoch_config γpb epoch_lb confγs ∗ "#His_conf_prop" ∷ is_epoch_config_proposal γpb epoch_lb confγs ∗ "#His_hosts" ∷ ([∗ list] γsrv ; host ∈ confγs ; conf, is_pb_host host γpb γsrv) ∗ "#His_lbs" ∷ (∀ γsrv, ⌜γsrv ∈ confγs⌝ → pb_ghost.is_epoch_lb γsrv epoch_lb) ∗ "Hunused" ∷ ([∗ set] epoch' ∈ (fin_to_set u64), ⌜int.nat epoch < int.nat epoch'⌝ → config_proposal_unset γpb epoch' ∗ config_unset γpb epoch' ∗ own_proposal_unused γpb epoch' ∗ own_init_proposal_unused γpb epoch') ∗ "Hunset_or_set" ∷ (config_unset γpb epoch ∨ ⌜int.nat epoch_lb = int.nat epoch⌝) ∗ "#His_skip" ∷ (∀ epoch_skip, ⌜int.nat epoch_lb < int.nat epoch_skip⌝ → ⌜int.nat epoch_skip < int.nat epoch⌝ → is_epoch_skipped γpb epoch_skip) ) . (* before calling this lemma, have to already allocate pb ghost state *) Lemma config_ghost_init_2 γsys conf confγs : ([∗ list] γsrv ; host ∈ confγs ; conf, is_pb_host host γsys γsrv) -∗ pb_init_config γsys confγs ={⊤}=∗ ∃ γconf, is_conf_inv γsys γconf ∗ makeConfigServer_pre γconf conf. Proof. iIntros "#Hhosts Hinitconf". iMod (config_ghost_init conf) as (γconf) "(Hconfpre & Hepoch & Hconf)". iExists _; iFrame "Hconfpre". iMod (inv_alloc with "[-]") as "$"; last done. iNext. iExists (U64 0), conf, confγs, (U64 0). iFrame. iNamed "Hinitconf". iFrame "∗#%". iSplitR. { iRight. done. } iIntros (???). exfalso. word. Qed. End config_global. Section admin_proof. Context {pb_record:PBRecord}. Notation pbG := (pbG (pb_record:=pb_record)). Notation OpType := (pb_OpType pb_record). Notation has_op_encoding := (pb_has_op_encoding pb_record). Notation has_snap_encoding := (pb_has_snap_encoding pb_record). Notation compute_reply := (pb_compute_reply pb_record). Notation wp_Clerk__GetState := (wp_Clerk__GetState (pb_record:=pb_record)). Notation wp_Clerk__SetState := (wp_Clerk__SetState (pb_record:=pb_record)). Context `{!heapGS Σ}. Context `{!pbG Σ}. Context `{!configG Σ}. Context `{!waitgroupG Σ}. Definition is_conf_host confHost γpb : iProp Σ := ∃ γconf, config_proof.is_host confHost γconf ∗ is_conf_inv γpb γconf. Definition is_Clerk2 ck γpb : iProp Σ := ∃ γconf, "#Hinv" ∷ is_conf_inv γpb γconf ∗ "#Hck" ∷ is_Clerk ck γconf. Lemma wp_MakeClerk2 (configHost:u64) γpb : {{{ is_conf_host configHost γpb }}} config.MakeClerk #configHost {{{ ck, RET #ck; is_Clerk2 ck γpb }}}. Proof. iIntros (Φ) "#Hhost HΦ". iDestruct "Hhost" as (?) "[Hhost Hinv]". wp_apply (config_proof.wp_MakeClerk with "[$Hhost]"). iIntros. iApply "HΦ". iExists _; iFrame "#". Qed. Lemma wp_Clerk__GetConfig2 ck γpb Φ : is_Clerk2 ck γpb -∗ □(∀ confγs (conf:list u64) config_sl, (is_slice_small config_sl uint64T 1 conf ∗ ([∗ list] γsrv ; host ∈ confγs ; conf, is_pb_host host γpb γsrv) -∗ Φ (slice_val config_sl)%V )) -∗ WP config.Clerk__GetConfig #ck {{ Φ }} . Proof. iIntros "#Hck #HΦ". iNamed "Hck". wp_apply (wp_Clerk__GetConfig with "[$Hck]"). iModIntro. iIntros "Hlc". iInv "Hinv" as "Hi" "Hclose". iMod (lc_fupd_elim_later with "Hlc Hi") as "Hi". iApply fupd_mask_intro. { set_solver. } iIntros "Hmask". iNamed "Hi". iExists _. iFrame. iIntros "Hconfig". iMod "Hmask". iMod ("Hclose" with "[-]"). { iNext. iExists _, _, _, _. iFrame "∗#%". } iModIntro. iIntros (?) "Hconf". iApply "HΦ". iFrame "∗#". Qed. Lemma wp_Clerk__GetEpochAndConfig2 ck γpb Φ : is_Clerk2 ck γpb -∗ □(∀ (epoch epoch_lb:u64) confγs (conf:list u64) config_sl, (is_slice_small config_sl uint64T 1 conf ∗ config_proposal_unset γpb epoch ∗ own_proposal_unused γpb epoch ∗ own_init_proposal_unused γpb epoch ∗ is_epoch_config γpb epoch_lb confγs ∗ (∀ epoch_skip, ⌜int.nat epoch_lb < int.nat epoch_skip⌝ → ⌜int.nat epoch_skip < int.nat epoch⌝ → is_epoch_skipped γpb epoch_skip) ∗ ([∗ list] γsrv ; host ∈ confγs ; conf, is_pb_host host γpb γsrv) ∗ (∀ γsrv, ⌜γsrv ∈ confγs⌝ → pb_ghost.is_epoch_lb γsrv epoch_lb)) -∗ Φ (#epoch, slice_val config_sl)%V ) -∗ WP config.Clerk__GetEpochAndConfig #ck {{ Φ }} . Proof. iIntros "#Hck #HΦ". iNamed "Hck". wp_apply (wp_Clerk__GetEpochAndConfig with "[$Hck]"). iModIntro. iIntros "Hlc". iInv "Hinv" as "Hi" "Hclose". iMod (lc_fupd_elim_later with "Hlc Hi") as "Hi". iApply fupd_mask_intro. { set_solver. } iIntros "Hmask". iNamed "Hi". iExists _, _. iFrame. iIntros "%Hno_overflow Hepoch Hconf". iMod "Hmask". (* Hunset becomes skipped, and the first unused becomes unset. *) iDestruct (big_sepS_elem_of_acc_impl (word.add epoch (U64 1)) with "Hunused") as "[Hunset_new Hunused]". { set_solver. } iSpecialize ("Hunset_new" with "[]"). { done. } iDestruct "Hunset_new" as "(Hunset_new & Hunset_new2 & Hprop)". iDestruct "Hunset_or_set" as "[Hunset|%Hset]". { iMod (own_update with "Hunset") as "Hskip1". { apply singleton_update. apply dfrac_agree_persist. } iDestruct "Hskip1" as "#Hskip1". iMod ("Hclose" with "[Hunset_new2 Hunused Hepoch Hconf]"). { iNext. iExists _, _, _, _. iFrame "∗#". iSplitL "Hunused". { iApply "Hunused". { iModIntro. iIntros (???) "H". iIntros. iApply "H". iPureIntro. word. } { iIntros. exfalso. word. } } iIntros (???). assert (int.nat epoch_skip = int.nat epoch ∨ int.nat epoch_skip < int.nat epoch ∨ int.nat epoch_skip >= int.nat (word.add epoch (U64 1))) as Hineq. { word. } destruct Hineq as [Heq|[Hineq|Hineq]]. { replace (epoch_skip) with (epoch) by word. iFrame "Hskip1". } { iApply "His_skip". { done. } { done. } } { exfalso. word. } } iModIntro. iIntros. iApply "HΦ". iDestruct "Hprop" as "[$ $]". iFrame "∗#". (* TODO: repetetive proof *) iIntros. assert (int.nat epoch_skip = int.nat epoch ∨ int.nat epoch_skip < int.nat epoch ∨ int.nat epoch_skip >= int.nat (word.add epoch (U64 1))) as Hineq. { word. } destruct Hineq as [Heq|[Hineq|Hineq]]. { replace (epoch_skip) with (epoch) by word. iFrame "Hskip1". } { iApply "His_skip". { done. } { done. } } { exfalso. word. } } { iClear "His_skip". iMod ("Hclose" with "[Hunset_new2 Hunused Hepoch Hconf]"). { iNext. iExists _, _, _, _. iFrame "∗#". iSplitL "Hunused". { iApply "Hunused". { iModIntro. iIntros (???) "H". iIntros. iApply "H". iPureIntro. word. } { iIntros. exfalso. word. } } iIntros (???). exfalso. rewrite Hset in H. replace (int.nat (word.add epoch 1%Z)) with (int.nat epoch + 1) in H0 by word. word. } iModIntro. iIntros. iApply "HΦ". iDestruct "Hprop" as "[$ $]". iFrame "∗#". iIntros (???). exfalso. rewrite Hset in H. replace (int.nat (word.add epoch 1%Z)) with (int.nat epoch + 1) in H0 by word. word. } Qed. Lemma wp_Clerk__WriteConfig2 ck γpb Φ config_sl conf confγ epoch : is_Clerk2 ck γpb -∗ is_slice_small config_sl uint64T 1 conf -∗ is_epoch_config_proposal γpb epoch confγ -∗ ([∗ list] γsrv ; host ∈ confγ ; conf, is_pb_host host γpb γsrv) -∗ (∀ γsrv, ⌜γsrv ∈ confγ⌝ → pb_ghost.is_epoch_lb γsrv epoch) -∗ □ (∀ (err:u64), (if (decide (err = U64 0)) then is_epoch_config γpb epoch confγ else True) -∗ is_slice_small config_sl uint64T 1 conf -∗ Φ #err) -∗ WP config.Clerk__WriteConfig #ck #epoch (slice_val config_sl) {{ Φ }} . Proof. iIntros "#Hck Hsl #Hconf_prop #Hhosts #Hlbs #HΦ". iNamed "Hck". wp_apply (wp_Clerk__WriteConfig with "Hck Hsl"); last first. { iIntros. iApply ("HΦ" with "[] [$]"). destruct (decide _). { exfalso. done. } done. } iModIntro. iIntros "Hlc". iInv "Hinv" as "Hi" "Hclose". iMod (lc_fupd_elim_later with "Hlc Hi") as "Hi". iApply fupd_mask_intro. { set_solver. } iIntros "Hmask". iNamed "Hi". iExists _. iFrame "∗". destruct (decide (_)); last first. { (* write failed because of stale epoch. *) iIntros "Hepoch". iMod "Hmask" as "_". iMod ("Hclose" with "[Hepoch Hconf Hunused Hunset_or_set]"). { iNext. iExists _, _, _, _. iFrame "∗#". } iModIntro. iIntros (??) "Hsl". wp_pures. iApply ("HΦ" with "[] Hsl"). destruct (decide (_)). { exfalso. done. } done. } { (* successful write *) rewrite e. iExists _. iFrame. iIntros "Hconf Hepoch". iMod "Hmask" as "_". iDestruct "Hunset_or_set" as "[Hunset|%Hset]"; last first. { (* config was already set before *) replace (epoch) with (epoch_lb) by word. iDestruct "Hconf_prop" as "[Hconf_prop %Hle]". iDestruct "His_conf_prop" as "[His_conf_prop _]". iDestruct (own_valid_2 with "His_conf_prop Hconf_prop") as %Hvalid. rewrite singleton_op in Hvalid. rewrite singleton_valid in Hvalid. rewrite dfrac_agree_op_valid in Hvalid. replace confγ with confγs in * by naive_solver. iMod ("Hclose" with "[Hepoch Hconf Hunused]"). { iNext. iExists _, _, _, _. iFrame "∗#". iSplitL; first done. by iRight. } iApply "HΦ". iModIntro. iFrame "#". } { (* config is being set for the first time *) iMod (own_update with "Hunset") as "Hset". { apply singleton_update. apply cmra_update_exclusive. instantiate (1:=(to_dfrac_agree (DfracOwn 1) ((Some confγ) : (leibnizO _)))). done. } iMod (own_update with "Hset") as "Hset". { apply singleton_update. apply dfrac_agree_persist. } iDestruct "Hset" as "#Hset". iMod ("Hclose" with "[Hconf Hepoch Hunused]"). { iNext. iExists _, _, _, _. iFrame "∗#". iDestruct "Hconf_prop" as "[_ %Hineq]". iSplitR; first done. iSplitL. { by iRight. } iIntros (???). exfalso. word. } iApply "HΦ". iDestruct "Hconf_prop" as "[_ %Hineq]". iFrame "#". done. } } Qed. Lemma wp_Reconfig γ (configHost:u64) (servers:list u64) (servers_sl:Slice.t) server_γs : {{{ "Hservers_sl" ∷ is_slice servers_sl uint64T 1 servers ∗ "#Hhost" ∷ ([∗ list] γsrv ; host ∈ server_γs ; servers, is_pb_host host γ γsrv) ∗ "#Hconf_host" ∷ is_conf_host configHost γ }}} EnterNewConfig #configHost (slice_val servers_sl) {{{ (err:u64), RET #err; True }}}. Proof using waitgroupG0. iIntros (Φ) "Hpre HΦ". iNamed "Hpre". wp_call. wp_apply (wp_slice_len). wp_pures. iDestruct (is_slice_sz with "Hservers_sl") as %Hservers_sz. wp_if_destruct. { by iApply "HΦ". } wp_apply (wp_MakeClerk2 with "Hconf_host"). iIntros (ck) "#Hck". wp_pures. wp_bind (Clerk__GetEpochAndConfig _). iApply (wp_frame_wand with "[HΦ Hservers_sl]"). { iNamedAccu. } wp_apply (wp_Clerk__GetEpochAndConfig2 with "[$Hck]"). iModIntro. iIntros (?????) "Hpost1". iNamed 1. wp_pures. unfold prelude.Data.randomUint64. wp_pures. set (s:=(u64_instance.u64.(word.add) (U64 0) (U64 1))). generalize s as randId. clear s. intros randId. wp_apply (wp_slice_len). wp_pures. iDestruct "Hpost1" as "(Hconf_sl & Hconf_unset & Hprop & Hinit & #His_conf & #Hskip & #His_hosts & #Hlb)". iAssert (⌜length conf ≠ 0⌝)%I as %Hold_conf_ne. { iDestruct "His_conf" as "[_ %Hconfγ_nz]". iDestruct (big_sepL2_length with "His_hosts") as %Heq. iPureIntro. lia. } iDestruct (is_slice_small_sz with "Hconf_sl") as %Hconf_len. set (oldNodeId:=word.modu randId config_sl.(Slice.sz)). assert (int.nat oldNodeId < length conf) as Hlookup_conf. { rewrite Hconf_len. unfold oldNodeId. enough (int.Z randId `mod` int.Z config_sl.(Slice.sz) < int.Z config_sl.(Slice.sz))%Z. { word. } apply Z.mod_pos_bound. word. } apply lookup_lt_is_Some_2 in Hlookup_conf as [host Hlookup_conf]. wp_apply (wp_SliceGet with "[$Hconf_sl]"). { done. } iIntros "Hconf_sl". simpl. (* FIXME: how does wp_MakeClerk work here? *) iDestruct (big_sepL2_lookup_2_some with "His_hosts") as %HH. { done. } destruct HH as [γsrv_old Hconfγ_lookup]. wp_apply (wp_MakeClerk with "[]"). { iDestruct (big_sepL2_lookup_acc with "His_hosts") as "[$ _]"; done. } iIntros (oldClerk) "#HoldClerk". wp_pures. (* Get the old state *) wp_apply (wp_allocStruct). { naive_solver. } iIntros (args) "Hargs". iDestruct (struct_fields_split with "Hargs") as "HH". iNamed "HH". wp_apply (wp_Clerk__GetState with "[$HoldClerk $Epoch]"). { iApply "Hlb". iPureIntro. eapply elem_of_list_lookup_2. done. } iIntros (reply err) "Hpost". wp_pures. destruct (decide (err = _)); last first. { (* err ≠ 0; error. *) iNamed "Hpost". wp_loadField. wp_pures. rewrite bool_decide_false; last naive_solver. wp_pures. wp_loadField. simpl. iApply "HΦ". done. } (* err = 0; keep going with reconfig *) (* Got the old state now *) iDestruct "Hpost" as (???) "(%Hepoch_lb_ineq & %Hepoch_ub_ineq & #Hacc_ro & #Hprop_facts & #Hprop_lb & Hreply & %Henc & %Hlen_no_overflow)". destruct (decide (int.nat epochacc = int.nat epoch)) as [Heq|Hepochacc_ne_epoch]. { replace (epochacc) with (epoch) by word. iDestruct (own_valid_2 with "Hprop Hprop_lb") as %Hvalid. exfalso. rewrite singleton_op singleton_valid in Hvalid. rewrite auth_map.Cinl_Cinr_op in Hvalid. done. } iMod (ghost_init_primary with "Hprop_lb Hprop_facts His_conf Hacc_ro Hskip Hprop Hinit") as "(Hprop & #Hprop_facts2 & #Hinit)". { by eapply elem_of_list_lookup_2. } { word. } { word. } iNamed "Hreply". wp_loadField. simpl. wp_pures. wp_apply (wp_slice_len). wp_apply (wp_NewSlice). iIntros (clerks_sl) "Hclerks_sl". wp_pures. iDestruct (is_slice_to_small with "Hservers_sl") as "Hservers_sl". rewrite -Hservers_sz. iDestruct (is_slice_to_small with "Hclerks_sl") as "Hclerks_sl". iDestruct (is_slice_small_sz with "Hclerks_sl") as %Hclerks_sz. rewrite replicate_length in Hclerks_sz. simpl. wp_apply (wp_ref_to). { eauto. } iIntros (i_ptr) "Hi". wp_pures. (* weaken to loop invariant *) iAssert ( ∃ (i:u64) clerksComplete clerksLeft, "Hi" ∷ i_ptr ↦[uint64T] #i ∗ "%HcompleteLen" ∷ ⌜length clerksComplete = int.nat i⌝ ∗ "%Hlen" ∷ ⌜length (clerksComplete ++ clerksLeft) = length servers⌝ ∗ "Hclerks_sl" ∷ is_slice_small clerks_sl ptrT 1 (clerksComplete ++ clerksLeft) ∗ "Hservers_sl" ∷ is_slice_small servers_sl uint64T 1 servers ∗ "#Hclerks_is" ∷ ([∗ list] ck ; γsrv ∈ clerksComplete ; (take (length clerksComplete) server_γs), pb_definitions.is_Clerk ck γ γsrv ) )%I with "[Hclerks_sl Hservers_sl Hi]" as "HH". { iExists _, [], _. simpl. iFrame "∗#". iPureIntro. split; first word. apply replicate_length. } wp_forBreak_cond. wp_pures. iNamed "HH". wp_load. wp_apply (wp_slice_len). wp_pures. clear host Hlookup_conf. wp_if_destruct. { (* loop not finished *) wp_pures. wp_load. assert (int.nat i < length servers) as Hlookup. { word. } apply list_lookup_lt in Hlookup as [host Hlookup]. wp_apply (wp_SliceGet with "[$Hservers_sl]"). { done. } iIntros "Hserver_sl". iDestruct (big_sepL2_lookup_2_some with "Hhost") as %HH. { done. } destruct HH as [γsrv Hserver_γs_lookup]. wp_apply (wp_MakeClerk with "[]"). { iDestruct (big_sepL2_lookup_acc with "Hhost") as "[$ _]"; done. } iIntros (pbCk) "#HpbCk". wp_load. wp_apply (wp_SliceSet (V:=loc) with "[Hclerks_sl]"). { iFrame "Hclerks_sl". iPureIntro. apply list_lookup_lt. word. } iIntros "Hclerks_sl". wp_load. wp_store. iLeft. iModIntro. iSplitR; first done. iFrame "∗#". iExists _, _, _. iFrame "∗". instantiate (1:=clerksComplete ++ [pbCk]). iSplitR. { iPureIntro. rewrite app_length. simpl. word. } instantiate (2:=tail clerksLeft). destruct clerksLeft. { exfalso. rewrite app_nil_r in Hlen. word. } iSplitR. { iPureIntro. rewrite app_length. rewrite app_length. simpl. rewrite -Hlen. rewrite app_length. simpl. word. } iSplitL. { iApply to_named. iExactEq "Hclerks_sl". { f_equal. simpl. rewrite -HcompleteLen. replace (length _) with (length clerksComplete + 0) by lia. rewrite insert_app_r. simpl. rewrite -app_assoc. f_equal. } } rewrite app_length. simpl. iDestruct (big_sepL2_length with "Hhost") as %Hserver_len_eq. rewrite take_more; last first. { lia. } iApply (big_sepL2_app with "Hclerks_is []"). replace (take 1 (drop (_) server_γs)) with ([γsrv]); last first. { apply ListSolver.list_eq_bounded. { simpl. rewrite take_length. rewrite drop_length. word. } intros. rewrite list_lookup_singleton. destruct i0; last first. { exfalso. simpl in *. word. } rewrite lookup_take; last first. { word. } rewrite lookup_drop. rewrite HcompleteLen. rewrite -Hserver_γs_lookup. f_equal. word. (* TODO: list_solver. *) } iApply big_sepL2_singleton. iFrame "#". } (* done with for loop *) iRight. iSplitR; first done. iModIntro. assert (int.nat i = length servers) as Hi_done. { rewrite Hclerks_sz. rewrite app_length in Hlen. word. } wp_pures. replace (clerksLeft) with ([] : list loc) in *; last first. { (* TODO: list_solver. pure fact *) enough (length clerksLeft = 0). { symmetry. apply nil_length_inv. done. } rewrite app_length in Hlen. word. } wp_apply (wp_NewWaitGroup_free). iIntros (wg) "Hwg". wp_pures. wp_apply (wp_slice_len). wp_apply (wp_new_slice). (* XXX: untyped *) { done. } clear err e. iIntros (errs_sl) "Herrs_sl". iDestruct (slice.is_slice_sz with "Herrs_sl") as "%Herrs_sz". wp_pures. wp_store. wp_pures. rewrite app_nil_r. rename clerksComplete into clerks. iApply fupd_wp. iMod (fupd_mask_subseteq (↑adminN)) as "Hmask". { set_solver. } set (P:= (λ i, ∃ (err:u64) γsrv', ⌜server_γs !! int.nat i = Some γsrv'⌝ ∗ readonly ((errs_sl.(Slice.ptr) +ₗ[uint64T] int.Z i)↦[uint64T] #err) ∗ □ if (decide (err = U64 0)) then pb_ghost.is_epoch_lb γsrv' epoch else True )%I : u64 → iProp Σ). iMod (free_WaitGroup_alloc adminN _ P with "Hwg") as (γwg) "Hwg". iMod "Hmask" as "_". iModIntro. (* iMod (readonly_alloc_1 with "Hreply_epoch") as "#Hreply_epoch". *) iMod (readonly_alloc_1 with "Hreply_state") as "#Hreply_state". iMod (readonly_alloc_1 with "Hreply_next_index") as "#Hreply_next_index". iDestruct "Hreply_state_sl" as "#Hreply_state_sl". (* weaken to loop invariant *) iAssert ( ∃ (i:u64), "Hi" ∷ i_ptr ↦[uint64T] #i ∗ "%Hi_ineq" ∷ ⌜int.nat i ≤ length clerks⌝ ∗ "Herrs" ∷ (errs_sl.(Slice.ptr) +ₗ[uint64T] int.Z i)↦∗[uint64T] (replicate (int.nat clerks_sl.(Slice.sz)- int.nat i) #0) ∗ "Hwg" ∷ own_WaitGroup adminN wg γwg i P )%I with "[Herrs_sl Hi Hwg]" as "HH". { unfold is_slice. unfold slice.is_slice. unfold slice.is_slice_small. clear Hlen. iDestruct "Herrs_sl" as "[[Herrs_sl %Hlen] _]". destruct Hlen as [Hlen _]. iExists _; iFrame. iSplitR; first iPureIntro. { word. } simpl. replace (1 * int.Z _)%Z with (0%Z) by word. rewrite loc_add_0. replace (int.nat _ - int.nat 0) with (int.nat clerks_sl.(Slice.sz)) by word. iFrame "Herrs_sl". } (* FIXME: copy/pasted from pb_apply_proof *) wp_forBreak_cond. clear i HcompleteLen Heqb0 Hi_done. iNamed "HH". wp_load. wp_apply (wp_slice_len). wp_pures. iDestruct (ghost_get_propose_lb with "Hprop") as "#Hprop_lb2". wp_if_destruct. { (* loop continues *) wp_pures. wp_apply (wp_WaitGroup__Add with "[$Hwg]"). { word. } iIntros "[Hwg Hwg_tok]". wp_pures. wp_load. assert (int.nat i < int.nat clerks_sl.(Slice.sz)) as Hlookup by word. rewrite -Hclerks_sz in Hlookup. rewrite app_nil_r in Hlen. rewrite -Hlen in Hlookup. apply list_lookup_lt in Hlookup as [pbCk Hlookup]. wp_apply (wp_SliceGet with "[$Hclerks_sl]"). { done. } iIntros "Hclerks_sl". wp_pures. replace (int.nat clerks_sl.(Slice.sz) - int.nat i) with (S (int.nat clerks_sl.(Slice.sz) - (int.nat (word.add i 1)))) by word. rewrite replicate_S. iDestruct (array_cons with "Herrs") as "[Herr_ptr Herr_ptrs]". wp_load. wp_pures. iDestruct (own_WaitGroup_to_is_WaitGroup with "[Hwg]") as "#His_wg". { by iExactEq "Hwg". } wp_apply (wp_fork with "[Hwg_tok Herr_ptr]"). { iNext. wp_pures. wp_loadField. wp_loadField. wp_apply (wp_allocStruct). { repeat econstructor. done. } iIntros (args_ptr) "Hargs". iDestruct (struct_fields_split with "Hargs") as "HH". iNamed "HH". iDestruct (big_sepL2_lookup_1_some with "Hclerks_is") as %[γsrv Hlookup2]. { done. } iDestruct (big_sepL2_lookup_acc with "Hclerks_is") as "[HH _]". { done. } { done. } wp_apply (wp_Clerk__SetState with "[Epoch NextIndex State]"). { iFrame "∗#". iSplitR. { iPureIntro. done. } iSplitR. { iPureIntro. done. } iExists _. iFrame "∗#". } iIntros (err) "#Hpost". unfold SliceSet. unfold slice.ptr. wp_pures. wp_store. iMod (readonly_alloc_1 with "Herr_ptr") as "#Herr_ptr". wp_apply (wp_WaitGroup__Done with "[$Hwg_tok $His_wg]"). { rewrite lookup_take_Some in Hlookup2. destruct Hlookup2 as [Hlookup2 _]. iModIntro. unfold P. iExists _, _. iSplitL; first done. iFrame "#". } done. } wp_pures. wp_load. wp_store. (* re-establish loop invariant *) iModIntro. iLeft. iSplitR; first done. iFrame "∗". iExists _. iFrame "∗#". iSplitR. { iPureIntro. word. } iApply to_named. iExactEq "Herr_ptrs". f_equal. rewrite loc_add_assoc. f_equal. simpl. replace (int.Z (word.add i 1%Z)) with (int.Z i + 1)%Z by word. word. } (* loop completed *) iModIntro. iRight. iSplitR; first done. wp_pures. wp_apply (wp_WaitGroup__Wait with "[$Hwg]"). iIntros "#Hwg_post". wp_pures. replace (int.nat i) with (length clerks); last first. { rewrite app_nil_r in Hlen. word. } wp_apply (wp_ref_to). { eauto. } iIntros (err_ptr) "Herr". wp_pures. wp_store. wp_pures. (* FIXME: *) (* This was copy/pasted and modified from apply_proof *) iAssert (∃ (i err:u64), "Hj" ∷ i_ptr ↦[uint64T] #i ∗ "%Hj_ub" ∷ ⌜int.nat i ≤ length clerks⌝ ∗ "Herr" ∷ err_ptr ↦[uint64T] #err ∗ "#Hrest" ∷ □ if (decide (err = (U64 0)%Z)) then (∀ (k:u64) γsrv, ⌜int.nat k < int.nat i⌝ -∗ ⌜server_γs !! (int.nat k) = Some γsrv⌝ -∗ pb_ghost.is_epoch_lb γsrv epoch) else True )%I with "[Hi Herr]" as "Hloop". { iExists _, _. iFrame "∗". iSplitL. { iPureIntro. word. } iModIntro. destruct (decide (_)); last first. { done. } iIntros. exfalso. replace (int.nat 0%Z) with 0 in H by word. word. } iClear "Herrs". wp_forBreak_cond. iNamed "Hloop". wp_pures. wp_load. wp_apply (wp_slice_len). rewrite replicate_length in Herrs_sz. rewrite -Hclerks_sz in Herrs_sz. rewrite app_nil_r in Hlen. clear i Hi_ineq Heqb0. wp_if_destruct. { (* one loop iteration *) wp_pures. wp_load. unfold SliceGet. wp_call. iDestruct (big_sepS_elem_of_acc _ _ i0 with "Hwg_post") as "[HH _]". { set_solver. } assert (int.nat i0 < int.nat errs_sl.(Slice.sz)) by word. iDestruct "HH" as "[%Hbad|HH]". { exfalso. rewrite -Herrs_sz in H. word. } iDestruct "HH" as (??) "(%HbackupLookup & Herr2 & Hpost)". wp_apply (wp_slice_ptr). wp_pure1. iEval (simpl) in "Herr2". iMod (readonly_load with "Herr2") as (?) "Herr3". wp_load. wp_pures. destruct (bool_decide (_)) as [] eqn:Herr; wp_pures. { rewrite bool_decide_eq_true in Herr. replace (err0) with (U64 0%Z) by naive_solver. wp_pures. wp_load; wp_store. iLeft. iModIntro. iSplitL ""; first done. iFrame "∗". iExists _, _. iFrame "Hj Herr". iSplitL "". { iPureIntro. word. } iModIntro. destruct (decide (err = 0%Z)). { iIntros. assert (int.nat k < int.nat i0 ∨ int.nat k = int.nat i0) as [|]. { replace (int.nat (word.add i0 1%Z)) with (int.nat i0 + 1) in * by word. word. } { by iApply "Hrest". } { destruct (decide (_)); last by exfalso. replace (γsrv') with (γsrv); last first. { replace (int.nat i0) with (int.nat k) in * by word. naive_solver. } iDestruct "Hpost" as "#$". } } { done. } } { wp_store. wp_pures. wp_load; wp_store. iLeft. iModIntro. iSplitL ""; first done. iFrame "∗". iExists _, _. iFrame "Hj Herr". destruct (decide (err0 = _)). { exfalso. naive_solver. } iPureIntro. split; last done. word. } } iRight. iModIntro. iSplitL ""; first done. wp_pures. wp_load. wp_pures. (* FIXME: *) (* End copy/paste from apply_proof *) wp_if_destruct. { (* got some error *) wp_load. iApply "HΦ". done. } (* no errors *) replace (int.nat i0) with (length clerks); last first. { word. } destruct (decide (_)); last first. { exfalso. done. } iMod (own_update with "Hconf_unset") as "Hconf_prop". { apply singleton_update. apply cmra_update_exclusive. instantiate (1:=(to_dfrac_agree (DfracOwn 1) ((Some server_γs) : (leibnizO _)))). done. } iMod (own_update with "Hconf_prop") as "Hconf_prop". { apply singleton_update. apply dfrac_agree_persist. } iDestruct "Hconf_prop" as "#Hconf_prop". wp_bind (Clerk__WriteConfig _ _ _). iApply (wp_frame_wand with "[HΦ Hconf_sl Hprop Hclerks_sl]"). { iNamedAccu. } iDestruct (big_sepL2_length with "Hhost") as %Hserver_len_eq. assert (length servers > 0) as Hserver_nz. { assert (length servers ≠ 0 ∨ length servers = 0) as [|Hbad] by word. { word. } { exfalso. rewrite Hbad in Hservers_sz. apply u64_nat_0 in Hservers_sz. rewrite Hservers_sz in Heqb. done. } } wp_apply (wp_Clerk__WriteConfig2 with "Hck Hservers_sl [$Hconf_prop] Hhost"). { iPureIntro. word. } { iIntros (?) "%Hlookup". apply elem_of_list_lookup_1 in Hlookup as [i Hlookup]. iDestruct (big_sepS_elem_of_acc _ _ (U64 i) with "Hwg_post") as "[HH _]". { set_solver. } assert (i < length server_γs). { apply lookup_lt_is_Some_1. eexists. done. } replace (length clerks) with (length server_γs) in * by word. assert (int.nat i = i) as Hi. { word. } iDestruct "HH" as "[%Hbad|HP]". { exfalso. word. } unfold P. iDestruct "HP" as (??) "(%Hlookup2 & _ & Hpost)". replace (γsrv') with (γsrv); last first. { rewrite Hi in Hlookup2. rewrite Hlookup in Hlookup2. by inversion Hlookup2. } iSpecialize ("Hrest" $! i γsrv with "[%] [%]"). { word. } { rewrite Hi. done. } iFrame "Hrest". } iModIntro. iIntros (err) "Hpost Hservers_sl". iNamed 1. wp_pures. wp_if_destruct. { (* WriteConfig failed *) iApply "HΦ". done. } (* WriteConfig succeeded *) destruct (decide (_)); last first. { exfalso. done. } iDestruct "Hpost" as "#Hconf". wp_apply (wp_allocStruct). { eauto. } clear args. iIntros (args) "Hargs". assert (0 < length clerks) as Hclerk_lookup. { word. } apply list_lookup_lt in Hclerk_lookup as [primaryCk Hclerk_lookup]. wp_apply (wp_SliceGet with "[$Hclerks_sl]"). { done. } iIntros "Hclerks_sl". wp_pures. iDestruct (big_sepL2_lookup_1_some with "Hclerks_is") as %[γsrv Hlookup2]. { done. } iDestruct (big_sepL2_lookup_acc with "Hclerks_is") as "[HprimaryCk _]". { done. } { done. } iDestruct (struct_fields_split with "Hargs") as "HH". iNamed "HH". (* Get a list of γs just for backups *) destruct (server_γs). { exfalso. rewrite lookup_take /= in Hlookup2; last first. { word. } done. } replace (p) with (γsrv) in *; last first. { rewrite lookup_take /= in Hlookup2; last first. { word. } naive_solver. } iAssert (|={⊤}=> become_primary_escrow γ γsrv epoch σ)%I with "[Hprop]" as ">#Hprimary_escrow". { iMod (inv_alloc with "[Hprop]") as "$". { iNext. iLeft. iFrame "Hprop Hprop_facts2 Hinit". } done. } wp_apply (wp_Clerk__BecomePrimary with "[$HprimaryCk Hconf Hhost Epoch Replicas Hservers_sl]"). { iFrame. instantiate (1:=(pb_marshal_proof.BecomePrimaryArgs.mkC _ _)). simpl. iFrame "#". iSplitR. { iDestruct ("Hrest" with "[%] [%]") as "H". { instantiate (1:=0). word. } { rewrite lookup_take in Hlookup2. { done. } word. } iFrame "H". } iSplitR. { iApply big_sepL2_forall. instantiate (1:=servers). iSplitL; first done. iIntros. iDestruct (big_sepL2_lookup_acc with "Hhost") as "[$ HH]". { done. } { done. } iApply "Hrest"; last first. { iPureIntro. instantiate (1:=k). replace (int.nat k) with (k). { done. } assert (k < length servers). { apply lookup_lt_Some in H. done. } word. } { iPureIntro. apply lookup_lt_Some in H. replace (int.nat k) with (k). { rewrite Hlen. done. } assert (k < length servers). (* FIXME: why do I have to assert this when it's already in context? *) { done. } word. } } { iExists _. iFrame. } } iIntros. wp_pures. iApply "HΦ". done. Qed. End admin_proof.
theory ex2_11 imports Main begin datatype exp = Var | Const int | Add exp exp | Mult exp exp fun eval :: "exp \<Rightarrow> int \<Rightarrow> int" where "eval Var v = v" | "eval (Const i) _ = i" | "eval (Add e1 e2) v = eval e1 v + eval e2 v" | "eval (Mult e1 e2) v = eval e1 v * eval e2 v" fun evalp_sub :: "int list \<Rightarrow> int \<Rightarrow> nat \<Rightarrow> int" where "evalp_sub [] _ _ = 0" | "evalp_sub (p#ps) x ith = p * (x ^ ith) + evalp_sub ps x (Suc ith)" fun evalp :: "int list \<Rightarrow> int \<Rightarrow> int" where "evalp xs v = evalp_sub xs v 0" value "evalp [4,2,-1,3] 2" fun add_p :: "int list \<Rightarrow> int list \<Rightarrow> int list" where "add_p [] ys = ys" | "add_p xs [] = xs" | "add_p (x#xs) (y#ys) = (x+y)#(add_p xs ys)" fun mult_p :: "int list \<Rightarrow> int list \<Rightarrow> int list" where "mult_p [] ys = []" | "mult_p xs [] = []" | "mult_p (x#xs) ys = add_p (map (\<lambda>a. a*x) ys) (mult_p xs (0#ys))" fun coeffs :: "exp \<Rightarrow> int list" where "coeffs Var = [0,1]" | "coeffs (Const i) = [i]" | "coeffs (Add e1 e2) = add_p (coeffs e1) (coeffs e2)" | "coeffs (Mult e1 e2) = mult_p (coeffs e1) (coeffs e2)" value "evalp (coeffs (Mult Var Var)) (-2)" value "eval (Mult Var Var) (-2)" (*----------------------*) lemma add_poly: "evalp_sub (add_p p1 p2) x px = evalp_sub p1 x px + evalp_sub p2 x px" apply (induct p1 p2 arbitrary: px rule: "add_p.induct") apply (auto simp add: algebra_simps) done lemma mult_const: "evalp_sub (map (\<lambda>a. a * const) ps) x px = const * (evalp_sub ps x px)" apply (induct ps arbitrary: const x px) apply (auto simp add: algebra_simps) done lemma unfold_poly: "evalp_sub ps x (Suc px) = x * (evalp_sub ps x px)" apply (induct ps arbitrary: px) apply (auto simp add: algebra_simps) done lemma mult_poly: "evalp_sub (mult_p p1 p2) x 0 = evalp_sub p1 x 0 * evalp_sub p2 x 0" apply (induct p1 p2 rule: mult_p.induct) apply (auto simp add: add_poly mult_const) apply (simp add: algebra_simps) apply (simp add: unfold_poly) done theorem th_2_11: "evalp (coeffs e) x = eval e x" apply(induct e) apply (auto simp add: add_poly mult_poly) done end
<a href="https://colab.research.google.com/github/NeuromatchAcademy/course-content/blob/master/tutorials/W1D5_DimensionalityReduction/student/W1D5_Tutorial1.ipynb" target="_parent"></a> &nbsp; <a href="https://kaggle.com/kernels/welcome?src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W1D5_DimensionalityReduction/student/W1D5_Tutorial1.ipynb" target="_parent"></a> # Tutorial 1: Geometric view of data **Week 1, Day 5: Dimensionality Reduction** **By Neuromatch Academy** __Content creators:__ Alex Cayco Gajic, John Murray __Content reviewers:__ Roozbeh Farhoudi, Matt Krause, Spiros Chavlis, Richard Gao, Michael Waskom, Siddharth Suresh, Natalie Schaworonkow, Ella Batty **Our 2021 Sponsors, including Presenting Sponsor Facebook Reality Labs** <p align='center'></p> --- # Tutorial Objectives *Estimated timing of tutorial: 50 minutes* In this notebook we'll explore how multivariate data can be represented in different orthonormal bases. This will help us build intuition that will be helpful in understanding PCA in the following tutorial. Overview: - Generate correlated multivariate data. - Define an arbitrary orthonormal basis. - Project the data onto the new basis. ```python # @title Tutorial slides # @markdown These are the slides for the videos in all tutorials today from IPython.display import IFrame IFrame(src=f"https://mfr.ca-1.osf.io/render?url=https://osf.io/kaq2x/?direct%26mode=render%26action=download%26mode=render", width=854, height=480) ``` ```python # @title Video 1: Geometric view of data from ipywidgets import widgets out2 = widgets.Output() with out2: from IPython.display import IFrame class BiliVideo(IFrame): def __init__(self, id, page=1, width=400, height=300, **kwargs): self.id=id src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page) super(BiliVideo, self).__init__(src, width, height, **kwargs) video = BiliVideo(id="BV1Af4y1R78w", width=854, height=480, fs=1) print('Video available at https://www.bilibili.com/video/{0}'.format(video.id)) display(video) out1 = widgets.Output() with out1: from IPython.display import YouTubeVideo video = YouTubeVideo(id="THu9yHnpq9I", width=854, height=480, fs=1, rel=0) print('Video available at https://youtube.com/watch?v=' + video.id) display(video) out = widgets.Tab([out1, out2]) out.set_title(0, 'Youtube') out.set_title(1, 'Bilibili') display(out) ``` --- # Setup ```python # Imports import numpy as np import matplotlib.pyplot as plt ``` ```python # @title Figure Settings import ipywidgets as widgets # interactive display %config InlineBackend.figure_format = 'retina' plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/nma.mplstyle") ``` ```python # @title Plotting Functions def plot_data(X): """ Plots bivariate data. Includes a plot of each random variable, and a scatter plot of their joint activity. The title indicates the sample correlation calculated from the data. Args: X (numpy array of floats) : Data matrix each column corresponds to a different random variable Returns: Nothing. """ fig = plt.figure(figsize=[8, 4]) gs = fig.add_gridspec(2, 2) ax1 = fig.add_subplot(gs[0, 0]) ax1.plot(X[:, 0], color='k') plt.ylabel('Neuron 1') plt.title('Sample var 1: {:.1f}'.format(np.var(X[:, 0]))) ax1.set_xticklabels([]) ax2 = fig.add_subplot(gs[1, 0]) ax2.plot(X[:, 1], color='k') plt.xlabel('Sample Number') plt.ylabel('Neuron 2') plt.title('Sample var 2: {:.1f}'.format(np.var(X[:, 1]))) ax3 = fig.add_subplot(gs[:, 1]) ax3.plot(X[:, 0], X[:, 1], '.', markerfacecolor=[.5, .5, .5], markeredgewidth=0) ax3.axis('equal') plt.xlabel('Neuron 1 activity') plt.ylabel('Neuron 2 activity') plt.title('Sample corr: {:.1f}'.format(np.corrcoef(X[:, 0], X[:, 1])[0, 1])) plt.show() def plot_basis_vectors(X, W): """ Plots bivariate data as well as new basis vectors. Args: X (numpy array of floats) : Data matrix each column corresponds to a different random variable W (numpy array of floats) : Square matrix representing new orthonormal basis each column represents a basis vector Returns: Nothing. """ plt.figure(figsize=[4, 4]) plt.plot(X[:, 0], X[:, 1], '.', color=[.5, .5, .5], label='Data') plt.axis('equal') plt.xlabel('Neuron 1 activity') plt.ylabel('Neuron 2 activity') plt.plot([0, W[0, 0]], [0, W[1, 0]], color='r', linewidth=3, label='Basis vector 1') plt.plot([0, W[0, 1]], [0, W[1, 1]], color='b', linewidth=3, label='Basis vector 2') plt.legend() plt.show() def plot_data_new_basis(Y): """ Plots bivariate data after transformation to new bases. Similar to plot_data but with colors corresponding to projections onto basis 1 (red) and basis 2 (blue). The title indicates the sample correlation calculated from the data. Note that samples are re-sorted in ascending order for the first random variable. Args: Y (numpy array of floats): Data matrix in new basis each column corresponds to a different random variable Returns: Nothing. """ fig = plt.figure(figsize=[8, 4]) gs = fig.add_gridspec(2, 2) ax1 = fig.add_subplot(gs[0, 0]) ax1.plot(Y[:, 0], 'r') plt.xlabel plt.ylabel('Projection \n basis vector 1') plt.title('Sample var 1: {:.1f}'.format(np.var(Y[:, 0]))) ax1.set_xticklabels([]) ax2 = fig.add_subplot(gs[1, 0]) ax2.plot(Y[:, 1], 'b') plt.xlabel('Sample number') plt.ylabel('Projection \n basis vector 2') plt.title('Sample var 2: {:.1f}'.format(np.var(Y[:, 1]))) ax3 = fig.add_subplot(gs[:, 1]) ax3.plot(Y[:, 0], Y[:, 1], '.', color=[.5, .5, .5]) ax3.axis('equal') plt.xlabel('Projection basis vector 1') plt.ylabel('Projection basis vector 2') plt.title('Sample corr: {:.1f}'.format(np.corrcoef(Y[:, 0], Y[:, 1])[0, 1])) plt.show() ``` --- # Section 1: Generate correlated multivariate data ```python # @title Video 2: Multivariate data from ipywidgets import widgets out2 = widgets.Output() with out2: from IPython.display import IFrame class BiliVideo(IFrame): def __init__(self, id, page=1, width=400, height=300, **kwargs): self.id=id src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page) super(BiliVideo, self).__init__(src, width, height, **kwargs) video = BiliVideo(id="BV1xz4y1D7ES", width=854, height=480, fs=1) print('Video available at https://www.bilibili.com/video/{0}'.format(video.id)) display(video) out1 = widgets.Output() with out1: from IPython.display import YouTubeVideo video = YouTubeVideo(id="jcTq2PgU5Vw", width=854, height=480, fs=1, rel=0) print('Video available at https://youtube.com/watch?v=' + video.id) display(video) out = widgets.Tab([out1, out2]) out.set_title(0, 'Youtube') out.set_title(1, 'Bilibili') display(out) ``` This video describes the covariance matrix and the multivariate normal distribution. <details> <summary> <font color='blue'>Click here for text recap of video </font></summary> To gain intuition, we will first use a simple model to generate multivariate data. Specifically, we will draw random samples from a *bivariate normal distribution*. This is an extension of the one-dimensional normal distribution to two dimensions, in which each $x_i$ is marginally normal with mean $\mu_i$ and variance $\sigma_i^2$: \begin{align} x_i \sim \mathcal{N}(\mu_i,\sigma_i^2). \end{align} Additionally, the joint distribution for $x_1$ and $x_2$ has a specified correlation coefficient $\rho$. Recall that the correlation coefficient is a normalized version of the covariance, and ranges between -1 and +1: \begin{align} \rho = \frac{\text{cov}(x_1,x_2)}{\sqrt{\sigma_1^2 \sigma_2^2}}. \end{align} For simplicity, we will assume that the mean of each variable has already been subtracted, so that $\mu_i=0$ for both $i=1$ and $i=2$. The remaining parameters can be summarized in the covariance matrix, which for two dimensions has the following form: \begin{align} {\bf \Sigma} = \begin{pmatrix} \text{var}(x_1) & \text{cov}(x_1,x_2) \\ \text{cov}(x_1,x_2) &\text{var}(x_2) \end{pmatrix}. \end{align} In general, $\bf \Sigma$ is a symmetric matrix with the variances $\text{var}(x_i) = \sigma_i^2$ on the diagonal, and the covariances on the off-diagonal. Later, we will see that the covariance matrix plays a key role in PCA. </details> ## Coding Exercise 1: Draw samples from a distribution We have provided code to draw random samples from a zero-mean bivariate normal distribution with a specified covariance matrix (`get_data`). Throughout this tutorial, we'll imagine these samples represent the activity (firing rates) of two recorded neurons on different trials. Fill in the function below to calculate the covariance matrix given the desired variances and correlation coefficient. The covariance can be found by rearranging the equation above: \begin{align} \text{cov}(x_1,x_2) = \rho \sqrt{\sigma_1^2 \sigma_2^2}. \end{align} ```python # @markdown Execute this cell to get helper function `get_data` def get_data(cov_matrix): """ Returns a matrix of 1000 samples from a bivariate, zero-mean Gaussian. Note that samples are sorted in ascending order for the first random variable Args: cov_matrix (numpy array of floats): desired covariance matrix Returns: (numpy array of floats) : samples from the bivariate Gaussian, with each column corresponding to a different random variable """ mean = np.array([0, 0]) X = np.random.multivariate_normal(mean, cov_matrix, size=1000) indices_for_sorting = np.argsort(X[:, 0]) X = X[indices_for_sorting, :] return X help(get_data) ``` ```python def calculate_cov_matrix(var_1, var_2, corr_coef): """ Calculates the covariance matrix based on the variances and correlation coefficient. Args: var_1 (scalar) : variance of the first random variable var_2 (scalar) : variance of the second random variable corr_coef (scalar) : correlation coefficient Returns: (numpy array of floats) : covariance matrix """ ################################################# ## TODO for students: calculate the covariance matrix # Fill out function and remove raise NotImplementedError("Student excercise: calculate the covariance matrix!") ################################################# # Calculate the covariance from the variances and correlation cov = ... cov_matrix = np.array([[var_1, cov], [cov, var_2]]) return cov_matrix # Set parameters np.random.seed(2020) # set random seed variance_1 = 1 variance_2 = 1 corr_coef = 0.8 # Compute covariance matrix cov_matrix = calculate_cov_matrix(variance_1, variance_2, corr_coef) # Generate data with this covariance matrix X = get_data(cov_matrix) # Visualize plot_data(X) ``` [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W1D5_DimensionalityReduction/solutions/W1D5_Tutorial1_Solution_85104841.py) *Example output:* ## Interactive Demo 1: Correlation effect on data We'll use the function you just completed but now we can change the correlation coefficient via slider. You should get a feel for how changing the correlation coefficient affects the geometry of the simulated data. 1. What effect do negative correlation coefficient values have? 2. What correlation coefficient results in a circular data cloud? Note that we sort the samples according to neuron 1's firing rate, meaning the plot of neuron 1 firing rate over sample number looks clean and pretty unchanging when compared to neuron 2. ```python # @markdown Execute this cell to enable widget def _calculate_cov_matrix(var_1, var_2, corr_coef): # Calculate the covariance from the variances and correlation cov = corr_coef * np.sqrt(var_1 * var_2) cov_matrix = np.array([[var_1, cov], [cov, var_2]]) return cov_matrix @widgets.interact(corr_coef = widgets.FloatSlider(value=.2, min=-1, max=1, step=0.1)) def visualize_correlated_data(corr_coef=0): variance_1 = 1 variance_2 = 1 # Compute covariance matrix cov_matrix = _calculate_cov_matrix(variance_1, variance_2, corr_coef) # Generate data with this covariance matrix X = get_data(cov_matrix) # Visualize plot_data(X) ``` [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W1D5_DimensionalityReduction/solutions/W1D5_Tutorial1_Solution_4413477e.py) --- # Section 2: Define a new orthonormal basis *Estimated timing to here from start of tutorial: 20 min* ```python # @title Video 3: Orthonormal bases from ipywidgets import widgets out2 = widgets.Output() with out2: from IPython.display import IFrame class BiliVideo(IFrame): def __init__(self, id, page=1, width=400, height=300, **kwargs): self.id=id src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page) super(BiliVideo, self).__init__(src, width, height, **kwargs) video = BiliVideo(id="BV1wT4y1E71g", width=854, height=480, fs=1) print('Video available at https://www.bilibili.com/video/{0}'.format(video.id)) display(video) out1 = widgets.Output() with out1: from IPython.display import YouTubeVideo video = YouTubeVideo(id="PC1RZELnrIg", width=854, height=480, fs=1, rel=0) print('Video available at https://youtube.com/watch?v=' + video.id) display(video) out = widgets.Tab([out1, out2]) out.set_title(0, 'Youtube') out.set_title(1, 'Bilibili') display(out) ``` This video shows that data can be represented in many ways using different bases. It also explains how to check if your favorite basis is orthonormal. <details> <summary> <font color='blue'>Click here for text recap of video </font></summary> Next, we will define a new orthonormal basis of vectors ${\bf u} = [u_1,u_2]$ and ${\bf w} = [w_1,w_2]$. As we learned in the video, two vectors are orthonormal if: 1. They are orthogonal (i.e., their dot product is zero): \begin{align} {\bf u\cdot w} = u_1 w_1 + u_2 w_2 = 0 \end{align} 2. They have unit length: \begin{align} ||{\bf u} || = ||{\bf w} || = 1 \end{align} </details> In two dimensions, it is easy to make an arbitrary orthonormal basis. All we need is a random vector ${\bf u}$, which we have normalized. If we now define the second basis vector to be ${\bf w} = [-u_2,u_1]$, we can check that both conditions are satisfied: \begin{align} {\bf u\cdot w} = - u_1 u_2 + u_2 u_1 = 0 \end{align} and \begin{align} {|| {\bf w} ||} = \sqrt{(-u_2)^2 + u_1^2} = \sqrt{u_1^2 + u_2^2} = 1, \end{align} where we used the fact that ${\bf u}$ is normalized. So, with an arbitrary input vector, we can define an orthonormal basis, which we will write in matrix by stacking the basis vectors horizontally: \begin{align} {{\bf W} } = \begin{pmatrix} u_1 & w_1 \\ u_2 & w_2 \end{pmatrix}. \end{align} ## Coding Exercise 2: Find an orthonormal basis In this exercise you will fill in the function below to define an orthonormal basis, given a single arbitrary 2-dimensional vector as an input. **Steps** * Modify the function `define_orthonormal_basis` to first normalize the first basis vector $\bf u$. * Then complete the function by finding a basis vector $\bf w$ that is orthogonal to $\bf u$. * Test the function using initial basis vector ${\bf u} = [3,1]$. Plot the resulting basis vectors on top of the data scatter plot using the function `plot_basis_vectors`. (For the data, use $\sigma_1^2 =1$, $\sigma_2^2 =1$, and $\rho = .8$). ```python def define_orthonormal_basis(u): """ Calculates an orthonormal basis given an arbitrary vector u. Args: u (numpy array of floats) : arbitrary 2-dimensional vector used for new basis Returns: (numpy array of floats) : new orthonormal basis columns correspond to basis vectors """ ################################################# ## TODO for students: calculate the orthonormal basis # Fill out function and remove raise NotImplementedError("Student excercise: implement the orthonormal basis function") ################################################# # Normalize vector u u = ... # Calculate vector w that is orthogonal to w w = ... # Put in matrix form W = np.column_stack([u, w]) return W # Set up parameters np.random.seed(2020) # set random seed variance_1 = 1 variance_2 = 1 corr_coef = 0.8 u = np.array([3, 1]) # Compute covariance matrix cov_matrix = calculate_cov_matrix(variance_1, variance_2, corr_coef) # Generate data X = get_data(cov_matrix) # Get orthonomal basis W = define_orthonormal_basis(u) # Visualize plot_basis_vectors(X, W) ``` [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W1D5_DimensionalityReduction/solutions/W1D5_Tutorial1_Solution_25e1d102.py) *Example output:* --- # Section 3: Project data onto new basis *Estimated timing to here from start of tutorial: 35 min* ```python # @title Video 4: Change of basis from ipywidgets import widgets out2 = widgets.Output() with out2: from IPython.display import IFrame class BiliVideo(IFrame): def __init__(self, id, page=1, width=400, height=300, **kwargs): self.id=id src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page) super(BiliVideo, self).__init__(src, width, height, **kwargs) video = BiliVideo(id="BV1LK411J7NQ", width=854, height=480, fs=1) print('Video available at https://www.bilibili.com/video/{0}'.format(video.id)) display(video) out1 = widgets.Output() with out1: from IPython.display import YouTubeVideo video = YouTubeVideo(id="Mj6BRQPKKUc", width=854, height=480, fs=1, rel=0) print('Video available at https://youtube.com/watch?v=' + video.id) display(video) out = widgets.Tab([out1, out2]) out.set_title(0, 'Youtube') out.set_title(1, 'Bilibili') display(out) ``` Finally, we will express our data in the new basis that we have just found. Since $\bf W$ is orthonormal, we can project the data into our new basis using simple matrix multiplication : \begin{align} {\bf Y = X W}. \end{align} We will explore the geometry of the transformed data $\bf Y$ as we vary the choice of basis. ## Coding Exercise 3: Change to orthonormal basis In this exercise you will fill in the function below to change data to a orthonormal basis. **Steps** * Complete the function `change_of_basis` to project the data onto the new basis. * Plot the projected data using the function `plot_data_new_basis`. * What happens to the correlation coefficient in the new basis? Does it increase or decrease? * What happens to variance? ```python def change_of_basis(X, W): """ Projects data onto new basis W. Args: X (numpy array of floats) : Data matrix each column corresponding to a different random variable W (numpy array of floats) : new orthonormal basis columns correspond to basis vectors Returns: (numpy array of floats) : Data matrix expressed in new basis """ ################################################# ## TODO for students: project the data onto o new basis W # Fill out function and remove raise NotImplementedError("Student excercise: implement change of basis") ################################################# # Project data onto new basis described by W Y = ... return Y # Project data to new basis Y = change_of_basis(X, W) # Visualize plot_data_new_basis(Y) ``` [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W1D5_DimensionalityReduction/solutions/W1D5_Tutorial1_Solution_80a5f41b.py) *Example output:* ## Interactive Demo 3: Play with the basis vectors To see what happens to the correlation as we change the basis vectors, run the cell below. The parameter $\theta$ controls the angle of $\bf u$ in degrees. Use the slider to rotate the basis vectors. 1. What happens to the projected data as you rotate the basis? 2. How does the correlation coefficient change? How does the variance of the projection onto each basis vector change? 3. Are you able to find a basis in which the projected data is **uncorrelated**? ```python # @markdown Make sure you execute this cell to enable the widget! def refresh(theta=0): u = np.array([1, np.tan(theta * np.pi / 180)]) W = define_orthonormal_basis(u) Y = change_of_basis(X, W) plot_basis_vectors(X, W) plot_data_new_basis(Y) _ = widgets.interact(refresh, theta=(0, 90, 5)) ``` [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W1D5_DimensionalityReduction/solutions/W1D5_Tutorial1_Solution_993c2f00.py) --- # Summary *Estimated timing of tutorial: 50 minutes* - In this tutorial, we learned that multivariate data can be visualized as a cloud of points in a high-dimensional vector space. The geometry of this cloud is shaped by the covariance matrix. - Multivariate data can be represented in a new orthonormal basis using the dot product. These new basis vectors correspond to specific mixtures of the original variables - for example, in neuroscience, they could represent different ratios of activation across a population of neurons. - The projected data (after transforming into the new basis) will generally have a different geometry from the original data. In particular, taking basis vectors that are aligned with the spread of cloud of points decorrelates the data. * These concepts - covariance, projections, and orthonormal bases - are key for understanding PCA, which we be our focus in the next tutorial. --- # Notation \begin{align} x_i &\quad \text{data point for dimension } i\\ \mu_i &\quad \text{mean along dimension } i\\ \sigma_i^2 &\quad \text{variance along dimension } i \\ \bf u, \bf w &\quad \text{orthonormal basis vectors}\\ \rho &\quad \text{correlation coefficient}\\ \bf \Sigma &\quad \text{covariance matrix}\\ \bf X &\quad \text{original data matrix}\\ \bf W &\quad \text{projection matrix}\\ \bf Y &\quad \text{transformed data}\\ \end{align}
# Test basic methods @testset "Basics" begin # initialize model and point variable m = InfiniteModel() @infinite_parameter(m, a in [0, 1]) @infinite_parameter(m, b[1:2] in [0, 1], independent = true) @infinite_parameter(m, c[1:2] in [0, 1]) @variable(m, ivref, Infinite(a, b, c)) num = Float64(0) info = VariableInfo(false, num, false, num, false, num, false, num, false, false) new_info = VariableInfo(true, 0., true, 0., true, 0., true, 0., true, false) var = PointVariable(info, ivref, [0., 0., 1., 1., 0.]) object = VariableData(var, "var") idx = PointVariableIndex(1) vref = PointVariableRef(m, idx) gvref = GeneralVariableRef(m, 1, PointVariableIndex) # JuMP.owner_model @testset "JuMP.owner_model" begin @test owner_model(vref) === m @test owner_model(gvref) === m end # JuMP.index @testset "JuMP.index" begin @test index(vref) == idx @test index(gvref) == idx end # dispatch_variable_ref @testset "dispatch_variable_ref" begin @test dispatch_variable_ref(m, idx) == vref @test dispatch_variable_ref(gvref) == vref end # _add_data_object @testset "_add_data_object" begin @test InfiniteOpt._add_data_object(m, object) == idx end # _data_dictionary @testset "_data_dictionary" begin @test InfiniteOpt._data_dictionary(m, PointVariable) === m.point_vars @test InfiniteOpt._data_dictionary(vref) === m.point_vars @test InfiniteOpt._data_dictionary(gvref) === m.point_vars end # JuMP.is_valid @testset "JuMP.is_valid" begin @test is_valid(m, vref) @test is_valid(m, gvref) end # _data_object @testset "_data_object" begin @test InfiniteOpt._data_object(vref) === object @test InfiniteOpt._data_object(gvref) === object end # _core_variable_object @testset "_core_variable_object" begin @test InfiniteOpt._core_variable_object(vref) === var @test InfiniteOpt._core_variable_object(gvref) === var end # _set_core_variable_object @testset "_set_core_variable_object" begin @test InfiniteOpt._set_core_variable_object(vref, var) isa Nothing end @testset "_variable_info" begin @test InfiniteOpt._variable_info(vref) == info end # _update_variable_info @testset "_update_variable_info" begin @test isa(InfiniteOpt._update_variable_info(vref, new_info), Nothing) @test InfiniteOpt._variable_info(vref) == new_info end # _measure_dependencies @testset "_measure_dependencies" begin @test InfiniteOpt._measure_dependencies(vref) == MeasureIndex[] @test InfiniteOpt._measure_dependencies(gvref) == MeasureIndex[] end # _constraint_dependencies @testset "_constraint_dependencies" begin @test InfiniteOpt._constraint_dependencies(vref) == InfOptConstraintIndex[] @test InfiniteOpt._constraint_dependencies(gvref) == InfOptConstraintIndex[] end # JuMP.name @testset "JuMP.name" begin @test name(vref) == "var" @test name(gvref) == "var" end # infinite_variable_ref @testset "infinite_variable_ref" begin @test infinite_variable_ref(vref) == ivref @test infinite_variable_ref(gvref) == ivref end # raw_parameter_values @testset "raw_parameter_values" begin @test raw_parameter_values(vref) == Float64[0., 0., 1., 1., 0.] @test raw_parameter_values(gvref) == Float64[0., 0., 1., 1., 0.] end # parameter_values @testset "parameter_values" begin @test parameter_values(vref) == (Float64(0), Float64[0, 1], Float64[1, 0]) @test parameter_values(gvref) == (Float64(0), Float64[0, 1], Float64[1, 0]) end # _update_variable_param_values @testset "_update_variable_param_values" begin @test isa(InfiniteOpt._update_variable_param_values(vref, ones(5)), Nothing) @test raw_parameter_values(vref) == ones(5) @test isa(InfiniteOpt._update_variable_param_values(vref, [0., 0., 1., 1., 0.]), Nothing) end # test parameter_refs @testset "parameter_refs" begin @test parameter_refs(vref) == () @test parameter_refs(gvref) == () end # JuMP.set_name @testset "JuMP.set_name" begin # test normal @test isa(set_name(vref, "new"), Nothing) @test name(vref) == "new" # test default @test isa(set_name(gvref, "a"), Nothing) @test name(vref) == "a" end # _make_variable_ref @testset "_make_variable_ref" begin @test InfiniteOpt._make_variable_ref(m, idx) == gvref end # _var_name_dict @testset "_var_name_dict" begin @test InfiniteOpt._var_name_dict(m) isa Nothing end # _update_var_name_dict @testset "_update_var_name_dict" begin m.name_to_var = Dict{String, ObjectIndex}() dict = InfiniteOpt._data_dictionary(vref) @test InfiniteOpt._update_var_name_dict(m, dict) isa Nothing @test InfiniteOpt._var_name_dict(m) == Dict(name(vref) => idx) m.name_to_var = nothing end # parameter_by_name @testset "JuMP.variable_by_name" begin # test normal @test variable_by_name(m, "a") == gvref @test variable_by_name(m, "test") isa Nothing # prepare variable with same name idx2 = PointVariableIndex(2) @test InfiniteOpt._add_data_object(m, object) == idx2 vref2 = PointVariableRef(m, idx2) @test set_name(vref2, "a") isa Nothing # test multiple name error @test_throws ErrorException variable_by_name(m, "a") end # _delete_data_object @testset "_delete_data_object" begin @test InfiniteOpt._delete_data_object(vref) isa Nothing @test length(InfiniteOpt._data_dictionary(vref)) == 1 @test !is_valid(m, vref) end end # Test variable definition methods @testset "Definition" begin # initialize model and infinite variables m = InfiniteModel() @infinite_parameter(m, pref in [0, 1]) @infinite_parameter(m, pref2 in [0, 1]) @infinite_parameter(m, prefs[1:2] in [0, 1]) num = Float64(0) info = VariableInfo(false, num, false, num, false, num, false, num, false, false) info2 = VariableInfo(true, num, true, num, true, num, true, num, true, false) info3 = VariableInfo(true, num, true, num, true, num, true, num, false, true) @variable(m, ivref, Infinite(pref, pref2)) @variable(m, ivref2, Infinite(pref, prefs)) divref = dispatch_variable_ref(ivref) divref2 = dispatch_variable_ref(ivref2) # test Point @testset "Point{V, T}" begin @test Point(ivref, [0, 0]).infinite_variable_ref == ivref @test Point(ivref, [0, 0]).parameter_values.values == [0, 0] end # _check_tuple_shape @testset "_check_tuple_shape" begin # test normal @test isa(InfiniteOpt._check_tuple_shape(error, divref, IC.VectorTuple(0.5, 0.5)), Nothing) # prepare param value tuple tuple = IC.VectorTuple(0.5, [0.5, 0.5]) # test normal with array @test isa(InfiniteOpt._check_tuple_shape(error, divref2, tuple), Nothing) # test for errors in shape @test_throws ErrorException InfiniteOpt._check_tuple_shape(error, divref, IC.VectorTuple(0.5,)) @test_throws ErrorException InfiniteOpt._check_tuple_shape(error, divref, IC.VectorTuple(0.5, [0.5])) @test_throws ErrorException InfiniteOpt._check_tuple_shape(error, divref2, IC.VectorTuple(0.5, 0.5)) tuple = IC.VectorTuple(0.5, [0.5, 0.5, 0.5]) @test_throws ErrorException InfiniteOpt._check_tuple_shape(error, divref2, tuple) end # _check_element_support @testset "_check_element_support (IndependentParameterRef)" begin # test normal ps = [dispatch_variable_ref(pref)] vals = Float64[0, 2] @test InfiniteOpt._check_element_support(error, ps, vals, 1) == 2 # test error ps = [dispatch_variable_ref(pref), dispatch_variable_ref(pref2)] @test_throws ErrorException InfiniteOpt._check_element_support(error, ps, vals, 1) end # _check_element_support @testset "_check_element_support (DependentParameterRef)" begin # test normal ps = dispatch_variable_ref.(prefs) vals = Float64[0, 1, 0, 1] @test InfiniteOpt._check_element_support(error, ps, vals, 3) == 5 # test error vals = Float64[0, 0, 2, 1] @test_throws ErrorException InfiniteOpt._check_element_support(error, ps, vals, 3) end # _check_tuple_values @testset "_check_tuple_values" begin # test normal @test isa(InfiniteOpt._check_tuple_values(error, divref, Float64[0.5, 0.5]), Nothing) # test normal with array vals = Float64[0, 0.5, 1] @test isa(InfiniteOpt._check_tuple_values(error, divref2, vals), Nothing) # test for out of bound errors @test_throws ErrorException InfiniteOpt._check_tuple_values(error, divref, Float64[0, 2]) vals = Float64[0, 2, 1] @test_throws ErrorException InfiniteOpt._check_tuple_values(error, divref2, vals) end # _update_point_info @testset "_update_point_info" begin basic_func = (a::Vector) -> 1 # prepare info for test new_info = VariableInfo(true, 0., true, 0., false, 0., false, basic_func, true, false) InfiniteOpt._update_variable_info(divref, new_info) expected = VariableInfo{Float64, Float64, Float64, Float64}(true, 0., true, 0., false, 0., false, 0., true, false) # test with current info @test InfiniteOpt._update_point_info(info, divref, Float64[0, 0]) == expected # prepare info for test new_info = VariableInfo(false, 0., false, 0., true, 0., true, basic_func, false, true) InfiniteOpt._update_variable_info(divref, new_info) expected = VariableInfo{Float64, Float64, Float64, Float64}(false, 0., false, 0., true, 0., true, 1, false, true) # test with current info @test InfiniteOpt._update_point_info(info, divref, Float64[0, 0]) == expected # prepare info for test curr_info = VariableInfo(true, 0., true, 0., false, 0., true, 0., true, false) # test with current info @test InfiniteOpt._update_point_info(curr_info, divref, Float64[0, 0]) == curr_info # undo info changes basic_func = (a::Vector) -> NaN old_info = VariableInfo(false, NaN, false, NaN, false, NaN, false, basic_func, false, false) InfiniteOpt._update_variable_info(divref, old_info) # test with user defined start function @test set_start_value_function(divref, (a, b) -> a + b) isa Nothing expected = VariableInfo{Float64, Float64, Float64, Float64}(false, 0., false, 0., false, 0., true, 3, false, false) @test InfiniteOpt._update_point_info(info, divref, Float64[1, 2]) == expected @test reset_start_value_function(divref) isa Nothing end # build_variable @testset "JuMP.build_variable" begin # test for all errors @test_throws ErrorException build_variable(error, info, Point(2)) @test_throws ErrorException build_variable(error, info, Point(pref, 0)) @test_throws ErrorException build_variable(error, info, Point(ivref)) @test_throws ErrorException build_variable(error, info, Point(ivref, "d")) @test_throws ErrorException build_variable(error, info, Point(ivref, 0.5, 0.5), bad = 42) # test a variety of builds @test build_variable(error, info, Point(ivref, 0.5, 0.5)).infinite_variable_ref == ivref @test build_variable(error, info, Point(ivref, 0.5, 0.5)).parameter_values == [0.5, 0.5] @test build_variable(error, info, Point(ivref, 0.5, 0.5)).info == info @test_throws ErrorException build_variable(error, info, Point(ivref, 0.5, 2)) @test build_variable(error, info, Point(ivref2, 0.5, [0, 0])).infinite_variable_ref == ivref2 @test build_variable(error, info, Point(ivref2, 0.5, [0, 0])).parameter_values == [0.5, 0, 0] @test_throws ErrorException build_variable(error, info, Point(ivref2, 0.5, [0, 0, 0])) end # _add_point_support @testset "_add_point_support (IndependentParameterRef)" begin # test normal ps = [dispatch_variable_ref(pref)] vals = Float64[0, 2] @test InfiniteOpt._add_point_support(ps, vals, 1) == 2 @test supports(pref, label = UserDefined) == [0] @test delete_supports(pref) isa Nothing # test other ps = [dispatch_variable_ref(pref), dispatch_variable_ref(pref2)] vals = Float64[0, 1] @test InfiniteOpt._add_point_support(ps, vals, 1) == 3 @test supports(pref, label = UserDefined) == [0] @test supports(pref2, label = UserDefined) == [1] @test delete_supports(pref) isa Nothing @test delete_supports(pref2) isa Nothing end # _add_point_support @testset "_add_point_support (DependentParameterRef)" begin # test normal ps = dispatch_variable_ref.(prefs) vals = Float64[0, 0] @test InfiniteOpt._add_point_support(ps, vals, 1) == 3 @test supports(prefs, label = UserDefined) == zeros(2, 1) @test delete_supports(prefs) isa Nothing end # _update_param_supports @testset "_update_param_supports" begin # test normal @test isa(InfiniteOpt._update_param_supports(divref, Float64[0.5, 1]), Nothing) @test supports(pref) == [0.5] @test supports(pref2) == [1] # prepare array tuple tuple = IC.VectorTuple(0.5, [0, 1]) # test normal with array @test isa(InfiniteOpt._update_param_supports(divref2, Float64[0.5, 0, 1]), Nothing) @test supports(pref) == [0.5] @test supports(prefs[1]) == [0] @test supports(prefs[2]) == [1] @test delete_supports(pref) isa Nothing @test delete_supports(prefs) isa Nothing end # _update_infinite_point_mapping @testset "_update_infinite_point_mapping" begin # test first addition idx1 = PointVariableIndex(12) pvref = PointVariableRef(m, idx1) @test isa(InfiniteOpt._update_infinite_point_mapping(pvref, divref), Nothing) @test InfiniteOpt._point_variable_dependencies(ivref) == [idx1] # test second addition idx2 = PointVariableIndex(42) pvref = PointVariableRef(m, idx2) @test isa(InfiniteOpt._update_infinite_point_mapping(pvref, divref), Nothing) @test InfiniteOpt._point_variable_dependencies(ivref) == [idx1, idx2] # undo changes empty!(InfiniteOpt._point_variable_dependencies(ivref)) end # _check_and_make_variable_ref @testset "_check_and_make_variable_ref" begin # prepare secondary model and infinite variable m2 = InfiniteModel() @infinite_parameter(m2, pref3 in [0, 1]) @variable(m2, ivref3, Infinite(pref3)) v = build_variable(error, info, Point(ivref3, 0.5)) # test for invalid variable error @test_throws VariableNotOwned{InfiniteVariableRef} InfiniteOpt._check_and_make_variable_ref(m, v, "") # test normal v = build_variable(error, info, Point(ivref3, 0)) idx = PointVariableIndex(1) vref = PointVariableRef(m2, idx) @test InfiniteOpt._check_and_make_variable_ref(m2, v, "") == vref @test supports(pref3) == [0] @test InfiniteOpt._point_variable_dependencies(ivref3) == [idx] end # add_variable @testset "JuMP.add_variable" begin # prepare secondary model and infinite variable m2 = InfiniteModel() @infinite_parameter(m2, pref3 in [0, 1]) @variable(m2, ivref3, Infinite(pref3)) v = build_variable(error, info, Point(ivref3, 0.5)) # test for invalid variable error @test_throws VariableNotOwned{InfiniteVariableRef} add_variable(m, v) # test normal v = build_variable(error, info, Point(ivref, 0, 1)) idx = PointVariableIndex(1) vref = PointVariableRef(m, idx) gvref = InfiniteOpt._make_variable_ref(m, idx) @test add_variable(m, v, "name") == gvref @test haskey(InfiniteOpt._data_dictionary(vref), idx) @test supports(pref) == [0] @test supports(pref2) == [1] @test name(vref) == "name" @test InfiniteOpt._point_variable_dependencies(ivref) == [idx] # prepare infinite variable with all the possible info additions v = build_variable(error, info2, Point(ivref, 0, 1)) # test info addition functions idx = PointVariableIndex(2) vref = PointVariableRef(m, idx) gvref = InfiniteOpt._make_variable_ref(m, idx) @test add_variable(m, v, "name") == gvref @test !optimizer_model_ready(m) # lower bound cindex = InfOptConstraintIndex(1) cref = InfOptConstraintRef(m, cindex) @test has_lower_bound(vref) @test InfiniteOpt._lower_bound_index(vref) == cindex @test constraint_object(cref) isa ScalarConstraint{GeneralVariableRef, MOI.GreaterThan{Float64}} @test InfiniteOpt._data_object(cref).is_info_constraint # upper bound cindex = InfOptConstraintIndex(2) cref = InfOptConstraintRef(m, cindex) @test has_upper_bound(vref) @test InfiniteOpt._upper_bound_index(vref) == cindex @test constraint_object(cref) isa ScalarConstraint{GeneralVariableRef, MOI.LessThan{Float64}} @test InfiniteOpt._data_object(cref).is_info_constraint # fix cindex = InfOptConstraintIndex(3) cref = InfOptConstraintRef(m, cindex) @test has_upper_bound(vref) @test is_fixed(vref) @test InfiniteOpt._fix_index(vref) == cindex @test constraint_object(cref) isa ScalarConstraint{GeneralVariableRef, MOI.EqualTo{Float64}} @test InfiniteOpt._data_object(cref).is_info_constraint # binary cindex = InfOptConstraintIndex(4) cref = InfOptConstraintRef(m, cindex) @test has_upper_bound(vref) @test is_binary(vref) @test InfiniteOpt._binary_index(vref) == cindex @test constraint_object(cref) isa ScalarConstraint{GeneralVariableRef, MOI.ZeroOne} @test InfiniteOpt._data_object(cref).is_info_constraint @test InfiniteOpt._constraint_dependencies(vref) == [InfOptConstraintIndex(i) for i = 1:4] # prepare infinite variable with integer info addition v = build_variable(error, info3, Point(ivref, 0, 1)) # test integer addition functions idx = PointVariableIndex(3) vref = PointVariableRef(m, idx) gvref = InfiniteOpt._make_variable_ref(m, idx) @test add_variable(m, v, "name") == gvref @test !optimizer_model_ready(m) cindex = InfOptConstraintIndex(8) cref = InfOptConstraintRef(m, cindex) @test has_upper_bound(vref) @test is_integer(vref) @test InfiniteOpt._integer_index(vref) == cindex @test constraint_object(cref) isa ScalarConstraint{GeneralVariableRef, MOI.Integer} @test InfiniteOpt._data_object(cref).is_info_constraint @test InfiniteOpt._constraint_dependencies(vref) == [InfOptConstraintIndex(i) for i = 5:8] end end # Test the point variable macro @testset "Macro Definition" begin # initialize model, parameters, and infinite variables m = InfiniteModel() @infinite_parameter(m, t in [0, 1]) @infinite_parameter(m, x[1:2] in [-1, 1]) @variable(m, 0 <= z <= 1, Infinite(t, x), Int, start = (a, x) -> a + sum(x)) @variable(m, z2[1:2] == 3, Infinite(t)) # test single variable definition @testset "Single" begin # test simple anon case idx = PointVariableIndex(1) vref = PointVariableRef(m, idx) gvref = InfiniteOpt._make_variable_ref(m, idx) @test @variable(m, variable_type = Point(z, 0, [0, 0])) == gvref @test infinite_variable_ref(vref) == z @test parameter_values(vref) == (0, [0, 0]) @test is_integer(vref) @test lower_bound(vref) == 0 @test start_value(vref) == 0 # test anon with changes to fixed idx = PointVariableIndex(2) vref = PointVariableRef(m, idx) gvref = InfiniteOpt._make_variable_ref(m, idx) @test @variable(m, variable_type = Point(z, 0, [0, 0]), lower_bound = -5, binary = true) == gvref @test infinite_variable_ref(vref) == z @test parameter_values(vref) == (0, [0, 0]) @test !is_integer(vref) @test is_binary(vref) @test lower_bound(vref) == -5 # test regular with alias idx = PointVariableIndex(3) vref = PointVariableRef(m, idx) gvref = InfiniteOpt._make_variable_ref(m, idx) @test @variable(m, z0, Point(z, 0, [0, 0]), Bin) == gvref @test infinite_variable_ref(vref) == z @test parameter_values(vref) == (0, [0, 0]) @test is_binary(vref) @test lower_bound(vref) == 0 @test name(vref) == "z0" # test regular with semi anon idx = PointVariableIndex(4) vref = PointVariableRef(m, idx) gvref = InfiniteOpt._make_variable_ref(m, idx) @test @variable(m, variable_type = Point(z, 0, [0, 0]), base_name = "z0", binary = true) == gvref @test infinite_variable_ref(vref) == z @test parameter_values(vref) == (0, [0, 0]) @test is_binary(vref) @test lower_bound(vref) == 0 @test name(vref) == "z0" end # test array variable definition @testset "Array" begin # test anon array with one infvar idxs = [PointVariableIndex(5), PointVariableIndex(6)] vrefs = [PointVariableRef(m, idx) for idx in idxs] gvrefs = [InfiniteOpt._make_variable_ref(m, idx) for idx in idxs] @test @variable(m, [1:2], Point(z, 0, [0, 0])) == gvrefs @test infinite_variable_ref(vrefs[1]) == z @test parameter_values(vrefs[2]) == (0, [0, 0]) @test is_integer(vrefs[1]) @test lower_bound(vrefs[2]) == 0 # test anon array with different inf vars idxs = [PointVariableIndex(7), PointVariableIndex(8)] vrefs = [PointVariableRef(m, idx) for idx in idxs] gvrefs = [InfiniteOpt._make_variable_ref(m, idx) for idx in idxs] @test @variable(m, [i = 1:2], Point(z2[i], 0)) == gvrefs @test infinite_variable_ref(vrefs[1]) == z2[1] @test infinite_variable_ref(vrefs[2]) == z2[2] @test parameter_values(vrefs[2]) == (0,) @test fix_value(vrefs[2]) == 3 @test name(vrefs[1]) == "" # test array with same infvar idxs = [PointVariableIndex(9), PointVariableIndex(10)] vrefs = [PointVariableRef(m, idx) for idx in idxs] gvrefs = [InfiniteOpt._make_variable_ref(m, idx) for idx in idxs] @test @variable(m, a[1:2], Point(z, 0, [0, 0]), Bin) == gvrefs @test infinite_variable_ref(vrefs[1]) == z @test parameter_values(vrefs[2]) == (0, [0, 0]) @test is_binary(vrefs[1]) @test lower_bound(vrefs[2]) == 0 @test name(vrefs[1]) == "a[1]" # test test array with differnt infvars idxs = [PointVariableIndex(11), PointVariableIndex(12)] vrefs = [PointVariableRef(m, idx) for idx in idxs] gvrefs = [InfiniteOpt._make_variable_ref(m, idx) for idx in idxs] @test @variable(m, b[i = 1:2] >= -5, Point(z2[i], 0)) == gvrefs @test infinite_variable_ref(vrefs[1]) == z2[1] @test infinite_variable_ref(vrefs[2]) == z2[2] @test parameter_values(vrefs[2]) == (0,) @test lower_bound(vrefs[2]) == -5 @test name(vrefs[1]) == "b[1]" # test semi anon array idxs = [PointVariableIndex(13), PointVariableIndex(14)] vrefs = [PointVariableRef(m, idx) for idx in idxs] gvrefs = [InfiniteOpt._make_variable_ref(m, idx) for idx in idxs] @test @variable(m, [i = 1:2], Point(z2[i], 0), lower_bound = -5) == gvrefs @test infinite_variable_ref(vrefs[1]) == z2[1] @test infinite_variable_ref(vrefs[2]) == z2[2] @test lower_bound(vrefs[2]) == -5 @test name(vrefs[1]) == "" end # test errors @testset "Errors" begin # test other syntaxes @test_macro_throws ErrorException @variable(m, w, Point(z2[1], 0), bad = 1) # test redefinition catch @test_macro_throws ErrorException @variable(m, z0, Point(z, 0, [0, 0])) end # test the deprecations @testset "@point_variable" begin @test_macro_throws ErrorException @point_variable(m, z2[1](0)) end end # test usage methods @testset "Usage" begin # initialize model and stuff m = InfiniteModel() @infinite_parameter(m, t in [0, 1]) @infinite_parameter(m, x[1:2] in [-1, 1]) @variable(m, y, Infinite(t, x)) @variable(m, y0, Point(y, 0, [0, 0])) vref = dispatch_variable_ref(y0) # test used_by_measure @testset "used_by_measure" begin @test !used_by_measure(vref) push!(InfiniteOpt._measure_dependencies(vref), MeasureIndex(1)) @test used_by_measure(y0) @test used_by_measure(vref) empty!(InfiniteOpt._measure_dependencies(vref)) end # test used_by_constraint @testset "used_by_constraint" begin @test !used_by_constraint(vref) push!(InfiniteOpt._constraint_dependencies(vref), InfOptConstraintIndex(1)) @test used_by_constraint(y0) @test used_by_constraint(vref) empty!(InfiniteOpt._constraint_dependencies(vref)) end # test used_by_objective @testset "used_by_objective" begin @test !used_by_objective(y0) @test !used_by_objective(vref) InfiniteOpt._data_object(vref).in_objective = true @test used_by_objective(vref) InfiniteOpt._data_object(vref).in_objective = false end # test is_used @testset "is_used" begin # test not used @test !is_used(vref) # test used by constraint and/or measure push!(InfiniteOpt._constraint_dependencies(vref), InfOptConstraintIndex(1)) @test is_used(y0) empty!(InfiniteOpt._constraint_dependencies(vref)) # test used by objective InfiniteOpt._data_object(vref).in_objective = true @test is_used(vref) end end
(* This Isabelle theory is produced using the TIP tool offered at the following website: https://github.com/tip-org/tools This file was originally provided as part of TIP benchmark at the following website: https://github.com/tip-org/benchmarks Yutaka Nagashima at CIIRC, CTU changed the TIP output theory file slightly to make it compatible with Isabelle2017.*) theory TIP_sort_nat_TSortIsSort imports "../../Test_Base" begin datatype 'a list = nil2 | cons2 "'a" "'a list" datatype Nat = Z | S "Nat" datatype Tree = TNode "Tree" "Nat" "Tree" | TNil fun le :: "Nat => Nat => bool" where "le (Z) y = True" | "le (S z) (Z) = False" | "le (S z) (S x2) = le z x2" fun insert :: "Nat => Nat list => Nat list" where "insert x (nil2) = cons2 x (nil2)" | "insert x (cons2 z xs) = (if le x z then cons2 x (cons2 z xs) else cons2 z (insert x xs))" fun isort :: "Nat list => Nat list" where "isort (nil2) = nil2" | "isort (cons2 y xs) = insert y (isort xs)" fun flatten :: "Tree => Nat list => Nat list" where "flatten (TNode q z r) y = flatten q (cons2 z (flatten r y))" | "flatten (TNil) y = y" fun add :: "Nat => Tree => Tree" where "add x (TNode q z r) = (if le x z then TNode (add x q) z r else TNode q z (add x r))" | "add x (TNil) = TNode TNil x TNil" fun toTree :: "Nat list => Tree" where "toTree (nil2) = TNil" | "toTree (cons2 y xs) = add y (toTree xs)" fun tsort :: "Nat list => Nat list" where "tsort x = flatten (toTree x) (nil2)" theorem property0 : "((tsort xs) = (isort xs))" oops end
From Coq Require Import micromega.Lia. From Coq Require Import ZArith.ZArith. From compcert Require Import common.Values. From compcert Require Import lib.Integers. From VST Require Import floyd.proofauto. Local Open Scope Z. Definition int_or_ptr__is_valid_int (x: val): Prop := if Archi.ptr64 then match x with | Vlong i => Int64.testbit i 0 = true | _ => False end else match x with | Vint i => Int.testbit i 0 = true | _ => False end. Definition int_or_ptr__is_valid_ptr (x: val): Prop := match x with | Vptr _ z => Ptrofs.testbit z 0 = false /\ Ptrofs.testbit z 1 = false | _ => False end. Definition int_or_ptr__is_valid (x: val): Prop := int_or_ptr__is_valid_int x \/ int_or_ptr__is_valid_ptr x. Definition int_or_ptr: Type := option {p: val | int_or_ptr__is_valid p}. #[global]Instance int_or_ptr__Inhabitant (X: Type) : (Inhabitant int_or_ptr) := None. Definition int_or_ptr__to_val (x: int_or_ptr): val := match x with | None => Vundef | Some v => proj1_sig v end. Lemma int_or_ptr__to_val__is_valid (x: int_or_ptr) (Hx: x <> None): int_or_ptr__is_valid (int_or_ptr__to_val x). Proof. destruct x as [x|] ; try congruence. apply (proj2_sig x). Qed. Definition int_or_ptr__is_int (x: val): bool := if Archi.ptr64 then match x with | Vlong _ => true | _ => false end else match x with | Vint _ => true | _ => false end. Lemma int_or_ptr__is_int__true (x: val): (int_or_ptr__is_valid x /\ int_or_ptr__is_int x = true) <-> int_or_ptr__is_valid_int x. Proof. split. - intros [Hx E]. destruct Hx as [Hx|Hx] ; now destruct x. - intro H. unfold int_or_ptr__is_valid. destruct x ; intuition easy. Qed. Lemma int_or_ptr__is_int__false (x: val): (int_or_ptr__is_valid x /\ int_or_ptr__is_int x = false) <-> int_or_ptr__is_valid_ptr x. Proof. split. - intros [Hx e]. destruct Hx as [Hx|Hx] ; now destruct x. - intro H. unfold int_or_ptr__is_valid. destruct x ; intuition easy. Qed. Lemma int64__odd__and_one (i: int64) (Hodd: Z.odd (Int64.unsigned i) = true): Int64.repr 1 = Int64.and i (Int64.repr 1). Proof. destruct i as [i Hi]. destruct i ; try lia ; try easy. unfold Int64.and. f_equal. simpl in *. clear -Hodd. change (Int64.unsigned (Int64.repr 1)) with 1. simpl. change 1 with (Z.of_N (1)%N). f_equal. now destruct p. Qed. Lemma int__odd__and_one (i: int) (Hodd: Z.odd (Int.unsigned i) = true): Int.repr 1 = Int.and i (Int.repr 1). Proof. destruct i as [i Hi]. destruct i ; try lia ; try easy. unfold Int.and. f_equal. simpl in *. clear -Hodd. change (Int.unsigned (Int.repr 1)) with 1. simpl. change 1 with (Z.of_N (1)%N). f_equal. now destruct p. Qed.
------------------------------------------------------------------------ -- "Equational" reasoning combinator setup ------------------------------------------------------------------------ {-# OPTIONS --sized-types #-} open import Prelude open import Labelled-transition-system module Bisimilarity.Classical.Equational-reasoning-instances {ℓ} {lts : LTS ℓ} where open import Bisimilarity.Classical lts open import Equational-reasoning instance reflexive∼ : Reflexive _∼_ reflexive∼ = is-reflexive reflexive-∼ symmetric∼ : Symmetric _∼_ symmetric∼ = is-symmetric symmetric-∼ trans∼∼ : Transitive _∼_ _∼_ trans∼∼ = is-transitive transitive-∼ convert∼∼ : Convertible _∼_ _∼_ convert∼∼ = is-convertible id
#data <- read.csv(file="ads_1.csv", header=TRUE, sep=",") #library("RColorBrewer") args = commandArgs(trailingOnly=TRUE) data <- read.csv(file=args[1], header=TRUE, sep=",",check.names=FALSE) pdf(args[2]) s <- 1.5 par(mar=c(4, 11, 4, 4)) # Resize the margin to fit the y axis labels xx <- barplot(as.numeric(data[1,]), main=args[4], horiz=TRUE, names.arg=colnames(data), las=1, font.lab=1, font.main=1, family='serif', legend=args[3], xlim=c(0,100), args.legend = list(x ="bottomright"), xlab="Precision at k (%)", cex.names=s, cex.axis=s, cex.lab=2.5, cex.main=2.5, cex.sub=s) #xx <- barplot(as.matrix(data),col=brewer.pal(n = 3, name = "RdBu"), main="Precision at K", horiz=TRUE, names.arg=colnames(data), las=1, font.lab=1, font.main=1, family='serif', xlim=c(0,100)) #font.lab and font.main defines font size #xx <- barplot(as.matrix(data),col=c("grey","#F7F7F7"), main="Precision at K", horiz=TRUE, names.arg=colnames(data), las=1, font.lab=1, font.main=1, family='serif', xlim=c(0,100), legend=c("k=10", "k=12"), args.legend = list(x ="bottomright")) #font.lab and font.main defines font size #xx <- barplot(as.numeric(data[1,]), main="Precision at K", horiz=TRUE, names.arg=colnames(data), las=1, font.lab=1, font.main=1, family='serif', xlim=c(0,100)) #font.lab and font.main defines font size mm <- max(data) text(y = xx-0.3, x = as.numeric(data[1,]) + 8, label = as.numeric(data[1,]), pos = 3, cex = s, family="serif") # show text on top of each bar dev.off()
{-# OPTIONS_GHC -fplugin GHC.TypeLits.KnownNat.Solver #-} {-# LANGUAGE CPP #-} {-# LANGUAGE DataKinds #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE GADTs #-} {-# LANGUAGE KindSignatures #-} {-# LANGUAGE RankNTypes #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE TypeOperators #-} {-# OPTIONS_GHC -fplugin GHC.TypeLits.KnownNat.Solver #-} module Test.Hedgehog.Hmatrix where import GHC.Stack (HasCallStack, withFrozenCallStack) import Data.Singletons import Data.Singletons.TypeLits import Grenade import GHC.TypeLits import Hedgehog (Gen, MonadTest, diff) import qualified Hedgehog.Gen as Gen import qualified Hedgehog.Range as Range import Numeric.LinearAlgebra (norm_Inf) import Numeric.LinearAlgebra.Data hiding ((===)) import qualified Numeric.LinearAlgebra.Static as H import Test.Hedgehog.Compat randomVector :: forall n. ( KnownNat n ) => Gen (H.R n) randomVector = (\s -> H.randomVector s H.Uniform * 2 - 1) <$> Gen.int Range.linearBounded randomPositiveVector :: forall n. ( KnownNat n ) => Gen (H.R n) randomPositiveVector = (\s -> H.randomVector s H.Uniform) <$> Gen.int Range.linearBounded randomVectorNormalised :: forall n. ( KnownNat n ) => Gen (H.R n) randomVectorNormalised = (\s -> sigmoid ((H.randomVector s H.Uniform) * 2 - 1)) <$> Gen.int Range.linearBounded where sigmoid :: Floating a => a -> a sigmoid x = 1/(1 + exp (-x)) uniformSample :: forall m n. ( KnownNat m, KnownNat n ) => Gen (H.L m n) uniformSample = (\s -> H.uniformSample s (-1) 1 ) <$> Gen.int Range.linearBounded -- | Generate random data of the desired shape genOfShape :: forall x. ( SingI x ) => Gen (S x) genOfShape = case (sing :: Sing x) of D1Sing l -> withKnownNat l $ S1D <$> randomVector D2Sing r c -> withKnownNat r $ withKnownNat c $ S2D <$> uniformSample D3Sing r c d -> withKnownNat r $ withKnownNat c $ withKnownNat d $ S3D <$> uniformSample D4Sing n c h w -> withKnownNat n $ withKnownNat c $ withKnownNat h $ withKnownNat w $ S4D <$> uniformSample nice :: S shape -> String nice (S1D x) = show . H.extract $ x nice (S2D x) = show . H.extract $ x nice (S3D x) = show . H.extract $ x nice (S4D x) = show . H.extract $ x allClose :: SingI shape => S shape -> S shape -> Bool allClose xs ys = case xs - ys of (S1D x) -> H.norm_Inf x < 0.0001 (S2D x) -> H.norm_Inf x < 0.0001 (S3D x) -> H.norm_Inf x < 0.0001 (S4D x) -> H.norm_Inf x < 0.0001 allCloseP :: SingI shape => S shape -> S shape -> RealNum -> Bool allCloseP xs ys p = case xs - ys of (S1D x) -> H.norm_Inf x < p (S2D x) -> H.norm_Inf x < p (S3D x) -> H.norm_Inf x < p (S4D x) -> H.norm_Inf x < p allCloseV :: KnownNat n => H.R n -> H.R n -> Bool allCloseV xs ys = H.norm_Inf (xs - ys) < 0.0001 -- | generate a 2D list with random elements genLists :: Int -> Int -> Gen [[RealNum]] genLists height width = Gen.list (Range.singleton height) $ Gen.list (Range.singleton width) (genRealNum (Range.constant (-2.0) 2.0)) genLists3D :: Int -> Int -> Int -> Gen [[[RealNum]]] genLists3D depth height width = Gen.list (Range.singleton depth) $ Gen.list (Range.singleton height) $ Gen.list (Range.singleton width) (genRealNum (Range.constant (-2.0) 2.0)) extractVec :: KnownNat n => S ('D1 n) -> [RealNum] extractVec (S1D vec) = toList $ H.extract vec extractMat :: (KnownNat a, KnownNat b) => S ('D2 a b) -> [[RealNum]] extractMat (S2D mat) = toLists $ H.extract mat extractMat3D :: (KnownNat a, KnownNat b, KnownNat c) => S ('D3 a b c) -> [[RealNum]] extractMat3D (S3D mat) = toLists $ H.extract mat extractMat4D :: (KnownNat a, KnownNat b, KnownNat c, KnownNat d, KnownNat (a * b * c)) => S ('D4 a b c d) -> [[RealNum]] extractMat4D (S4D mat) = toLists $ H.extract mat elementsEqual :: SingI shape => S shape -> Bool elementsEqual m = case m of S1D x -> listSameElements . toList $ H.extract x S2D x -> listSameElements . concat . toLists $ H.extract x S3D x -> listSameElements . concat . toLists $ H.extract x S4D x -> listSameElements . concat . toLists $ H.extract x listSameElements :: Eq a => [a] -> Bool listSameElements [] = True listSameElements [_] = True listSameElements (x:x':xs) | x == x' = listSameElements (x':xs) | otherwise = False maxVal :: S shape -> RealNum maxVal ( S1D x ) = norm_Inf x maxVal ( S2D x ) = norm_Inf x maxVal ( S3D x ) = norm_Inf x maxVal ( S4D x ) = norm_Inf x isSimilarMatrixTo :: (MonadTest m, HasCallStack) => Matrix RealNum -> Matrix RealNum -> m () isSimilarMatrixTo x y = withFrozenCallStack $ diff x (\a b -> norm_Inf (a - b) < precision) y isSimilarVectorTo :: (MonadTest m, HasCallStack) => Vector RealNum -> Vector RealNum -> m () isSimilarVectorTo x y = withFrozenCallStack $ diff x (\a b -> norm_Inf (a - b) < precision) y
(* Definition of high-level semantics *) Require Import Program Arith. (* Model *) Record HLocalState := HHonest {hhl_input : bool; hhl_decision : option bool}. Record HGlobalState := HGS {hg_n : nat; h_localstates : nat -> option HLocalState}. (* Semantics *) Definition Hstep_decide_loc (ls : HLocalState) (b : bool) : HLocalState := HHonest (hhl_input ls) (Some b). Definition Hstep_decide (gs : HGlobalState) (i : nat) (b : bool) : HGlobalState := let n := hg_n gs in let ls := h_localstates gs in HGS n (fun j => if j =? i then match ls i with | Some ls => Some (Hstep_decide_loc ls b) | None => None end else ls j). Definition Hextract_loc (ls : option HLocalState) : option bool := match ls with | Some (HHonest _ d) => d | _ => None end. Definition mergeb (l : option bool) (r : option bool) : option bool := match l with | Some b => Some b | None => r end. Definition Hextract (gs : HGlobalState) (i : nat) : option bool := match gs with | HGS n ls => if i <? n then Hextract_loc (ls i) else None end. (* TODO Hide behind a monad, so it can be written as a fun *) Inductive HStep : HGlobalState -> HGlobalState -> Prop := | NOTHING : forall gs, HStep gs gs | DECIDE : forall gs i b gs', ((forall j, Hextract gs j = None) /\ (gs' = Hstep_decide gs i b)) -> HStep gs gs' | AGREE : forall gs i b gs', ((exists j, Hextract gs j = Some b) /\ (gs' = Hstep_decide gs i b)) -> HStep gs gs'. Inductive HSteps : HGlobalState -> HGlobalState -> Prop := | HONE : forall gs gs', HStep gs gs' -> HSteps gs gs' | HMANY : forall gs gs' gs'', HSteps gs gs' -> HStep gs' gs'' -> HSteps gs gs''.
module Core.FC import Text.PrettyPrint.Prettyprinter %default total public export FilePos : Type FilePos = (Int, Int) showPos : FilePos -> String showPos (l, c) = show (l + 1) ++ ":" ++ show (c + 1) public export FileName : Type FileName = String ||| A file context is a filename together with starting and ending positions public export data FC = MkFC FileName FilePos FilePos | EmptyFC export Eq FC where (==) (MkFC n s e) (MkFC n' s' e') = n == n' && s == s' && e == e' (==) EmptyFC EmptyFC = True (==) _ _ = False export file : FC -> FileName file (MkFC fn _ _) = fn file EmptyFC = "" export startPos : FC -> FilePos startPos (MkFC _ s _) = s startPos EmptyFC = (0, 0) export endPos : FC -> FilePos endPos (MkFC _ _ e) = e endPos EmptyFC = (0, 0) -- Return whether a given file position is within the file context (assuming we're -- in the right file) export within : FilePos -> FC -> Bool within (x, y) (MkFC _ start end) = (x, y) >= start && (x, y) <= end within _ _ = False -- Return whether a given line is on the same line as the file context (assuming -- we're in the right file) export onLine : Int -> FC -> Bool onLine x (MkFC _ start end) = x >= fst start && x <= fst end onLine _ _ = False export emptyFC : FC emptyFC = EmptyFC export toplevelFC : FC toplevelFC = MkFC "(toplevel)" (0, 0) (0, 0) %name FC fc export Show FC where show loc = file loc ++ ":" ++ showPos (startPos loc) ++ "--" ++ showPos (endPos loc) export Pretty FC where pretty loc = pretty (file loc) <+> colon <+> prettyPos (startPos loc) <+> pretty "--" <+> prettyPos (endPos loc) where prettyPos : FilePos -> Doc ann prettyPos (l, c) = pretty (l + 1) <+> colon <+> pretty (c + 1)
\htmlhr \chapter{Aliasing Checker\label{aliasing-checker}} The Aliasing Checker identifies expressions that definitely have no aliases. Two expressions are aliased when they have the same non-primitive value; that is, they are references to the identical Java object in the heap. Another way of saying this is that two expressions, $\mathit{exprA}$ and $\mathit{exprB}$, are aliases of each other when $\mathit{exprA} \<==> \mathit{exprB}$ at the same program point. Assigning to a variable or field typically creates an alias. For example, after the statement \<a = b;>, the variables \<a> and \<b> are aliased. Knowing that an expression is not aliased permits more accurate reasoning about how side effects modify the expression's value. To run the Aliasing Checker, supply the \code{-processor org.checkerframework.common.aliasing.AliasingChecker} command-line option to javac. However, a user rarely runs the Aliasing Checker directly. This type system is mainly intended to be used together with other type systems. For example, the SPARTA information flow type-checker (Section~\ref{sparta-checker}) uses the Aliasing Checker to improve its type refinement --- if an expression has no aliases, a more refined type can often be inferred, otherwise the type-checker makes conservative assumptions. \section{Aliasing annotations\label{aliasing-annotations}} \begin{figure} \includeimage{aliasing}{2cm} \caption{Type hierarchy for the Aliasing type system.} \label{fig-aliasing-hierarchy} \end{figure} There are two possible types for an expression: \begin{description} \item[\refqualclass{common/aliasing/qual}{MaybeAliased}] is the type of an expression that might have an alias. This is the default, so every unannotated type is \code{@MaybeAliased}. (This includes the type of \code{null}.) \item[\refqualclass{common/aliasing/qual}{Unique}] is the type of an expression that has no aliases. The \code{@Unique} annotation is only allowed at local variables, method parameters, constructor results, and method returns. A constructor's result should be annotated with \code{@Unique} only if the constructor's body does not creates an alias to the constructed object. \end{description} There are also two annotations, which are currently trusted instead of verified, that can be used on formal parameters (including the receiver parameter, \<this>): \begin{description} \item[\refqualclass{common/aliasing/qual}{NonLeaked}] identifies a formal parameter that is not leaked nor returned by the method body. For example, the formal parameter of the String copy constructor, \code{String(String s)}, is \code{@NonLeaked} because the body of the method only makes a copy of the parameter. \item[\refqualclass{common/aliasing/qual}{LeakedToResult}] is used when the parameter may be returned, but it is not otherwise leaked. For example, the receiver parameter of \code{StringBuffer.append(StringBuffer this, String s)} is \code{@LeakedToResult}, because the method returns the updated receiver. \end{description} \section{Leaking contexts\label{aliasing-leaking-contexts}} This section lists the expressions that create aliases. These are also called ``leaking contexts''. \begin{description} \item[Assignments] After an assignment, the left-hand side and the right-hand side are typically aliased. (The only counterexample is when the right-hand side is a fresh expression; see Section~\ref{aliasing-refinement}.) \begin{Verbatim} @Unique Object u = ...; Object o = u; // (not.unique) type-checking error! \end{Verbatim} If this example type-checked, then \<u> and \<o> would be aliased. For this example to type-check, either the \<@Unique> annotation on the type of \<u>, or the \<o = u;> assignment, must be removed. \item[Method calls and returns (pseudo-assignments)] Passing an argument to a method is a ``pseudo-assignment'' because it effectively assigns the argument to the formal parameter. Return statements are also pseudo-assignments. As with assignments, the left-hand side and right-hand side of pseudo-assignments are typically aliased. Here is an example for argument-passing: \begin{Verbatim} void foo(Object o) { ... } @Unique Object u = ...; foo(u); // type-checking error, because foo may create an alias of the passed argument \end{Verbatim} Passing a non-aliased reference to a method does not necessarily create an alias. However, the body of the method might create an alias or leak the reference. Thus, the Aliasing Checker always treats a method call as creating aliases for each argument unless the corresponding formal parameter is marked as @\refqualclass{common/aliasing/qual}{NonLeaked} or @\refqualclass{common/aliasing/qual}{LeakedToResult}. Here is an example for a return statement: \begin{Verbatim} Object id(@Unique Object p) { return p; // (not.unique) type-checking error! } \end{Verbatim} If this code type-checked, then it would be possible for clients to write code like this: \begin{Verbatim} @Unique Object u = ...; Object o = id(u); \end{Verbatim} \noindent after which there is an alias to \<u> even though it is declared as \<@Unique>. However, it is permitted to write \begin{Verbatim} Object id(@LeakedToResult Object p) { return p; } \end{Verbatim} \noindent after which the following code type-checks: \begin{Verbatim} @Unique Object u = ...; id(u); // method call result is not used Object o1 = ...; Object o2 = id(o1); // argument is not @Unique \end{Verbatim} \item[Throws] A thrown exception can be captured by a catch block, which creates an alias of the thrown exception. \begin{Verbatim} void foo() { @Unique Exception uex = new Exception(); try { throw uex; // (not.unique) type-checking error! } catch (Exception ex) { // uex and ex refer to the same object here. } } \end{Verbatim} \item[Array initializers] Array initializers assign the elements in the initializers to corresponding indexes in the array, therefore expressions in an array initializer are leaked. \begin{Verbatim} void foo() { @Unique Object o = new Object(); Object[] ar = new Object[] { o }; // (not.unique) type-checking error! // The expressions o and ar[0] are now aliased. } \end{Verbatim} %Remember to add enhanced for statement if support to type variables is added. \end{description} \section{Restrictions on where \<@Unique> may be written\label{aliasing-unique-restrictions}} The \<@Unique> qualifier may not be written on locations such as fields, array elements, and type parameters. As an example of why \<@Unique> may not be written on a field's type, consider the following code: \begin{Verbatim} class MyClass { @Unique Object field; void foo() { MyClass myClass2 = this; // this.field is now an alias of myClass2.field } } \end{Verbatim} That code must not type-check, because \<field> is declared as \<@Unique> but has an alias. The Aliasing Checker solves the problem by forbidding the \<@Unique> qualifier on subcomponents of a structure, such as fields. Other solutions might be possible; they would be more complicated but would permit more code to type-check. \<@Unique> may not be written on a type parameter for similar reasons. The assignment \begin{Verbatim} List<@Unique Object> l1 = ...; List<@Unique Object> l2 = l1; \end{Verbatim} \noindent must be forbidden because it would alias \<l1.get(0)> with \<l2.get(0)> even though both have type \<@Unique>. The Aliasing Checker forbids this code by rejecting the type \code{List<@Unique Object>}. \section{Aliasing type refinement\label{aliasing-refinement}} Type refinement enables a type checker to treat an expression as a subtype of its declared type. For example, even if you declare a local variable as \<@MaybeAliased> (or don't write anything, since \<@MaybeAliased> is the default), sometimes the Aliasing Checker can determine that it is actually \<@Unique>. % This prevents the type checker from issuing false positive warnings. For more details, see Section~\ref{type-refinement}. The Aliasing Checker treats type refinement in the usual way, except that at (pseudo-)assignments the right-hand-side (RHS) may lose its type refinement, before the left-hand-side (LHS) is type-refined. The RHS always loses its type refinement (it is widened to \code{@MaybeAliased}, and its declared type must have been \code{@MaybeAliased}) except in the following cases: \begin{itemize} \item The RHS is a fresh expression --- an expression that returns a different value each time it is evaluated. In practice, this is only method/constructor calls with \code{@Unique} return type. A variable/field is not fresh because it can return the same value when evaluated twice. \item The LHS is a \code{@NonLeaked} formal parameter and the RHS is an argument in a method call or constructor invocation. \item The LHS is a \code{@LeakedToResult} formal parameter, the RHS is an argument in a method call or constructor invocation, and the method's return value is discarded --- that is, the method call or constructor invocation is written syntactically as a statement rather than as a part of a larger expression or statement. \end{itemize} %(Notice that the last two rules above are restricted to pseudo-assignments.) A consequence of the above rules is that most method calls are treated conservatively. If a variable with declared type \code{@MaybeAliased} has been refined to \code{@Unique} and is used as an argument of a method call, it usually loses its \code{@Unique} refined type. Figure~\ref{fig-aliasing-refinement-example} gives an example of the Aliasing Checker's type refinement rules. \begin{figure} %BEGIN LATEX \begin{smaller} %END LATEX \begin{Verbatim} // Annotations on the StringBuffer class, used in the examples below. // class StringBuffer { // @Unique StringBuffer(); // StringBuffer append(@LeakedToResult StringBuffer this, @NonLeaked String s); // } void foo() { StringBuffer sb = new StringBuffer(); // sb is refined to @Unique. StringBuffer sb2 = sb; // sb loses its refinement. // Both sb and sb2 have aliases and because of that have type @MaybeAliased. } void bar() { StringBuffer sb = new StringBuffer(); // sb is refined to @Unique. sb.append("someString"); // sb stays @Unique, as no aliases are created. StringBuffer sb2 = sb.append("someString"); // sb is leaked and becomes @MaybeAliased. // Both sb and sb2 have aliases and because of that have type @MaybeAliased. } \end{Verbatim} %BEGIN LATEX \end{smaller} %END LATEX \caption{Example of Aliasing Checker's type refinement rules.} \label{fig-aliasing-refinement-example} \end{figure} %% LocalWords: MaybeAliased NonLeaked LeakedToResult l1 l2 RHS LHS
[GOAL] R : Type u_1 inst✝⁵ : CommSemiring R A : Type u_2 inst✝⁴ : CommSemiring A inst✝³ : Algebra R A M : Type u_3 inst✝² : AddCommMonoid M inst✝¹ : Module A M inst✝ : Module R M D D1✝ D2✝ : Derivation R A M r : R a b : A D1 D2 : Derivation R A M h : (fun D => D.toFun) D1 = (fun D => D.toFun) D2 ⊢ D1 = D2 [PROOFSTEP] cases D1 [GOAL] case mk R : Type u_1 inst✝⁵ : CommSemiring R A : Type u_2 inst✝⁴ : CommSemiring A inst✝³ : Algebra R A M : Type u_3 inst✝² : AddCommMonoid M inst✝¹ : Module A M inst✝ : Module R M D D1 D2✝ : Derivation R A M r : R a b : A D2 : Derivation R A M toLinearMap✝ : A →ₗ[R] M map_one_eq_zero'✝ : ↑toLinearMap✝ 1 = 0 leibniz'✝ : ∀ (a b : A), ↑toLinearMap✝ (a * b) = a • ↑toLinearMap✝ b + b • ↑toLinearMap✝ a h : (fun D => D.toFun) { toLinearMap := toLinearMap✝, map_one_eq_zero' := map_one_eq_zero'✝, leibniz' := leibniz'✝ } = (fun D => D.toFun) D2 ⊢ { toLinearMap := toLinearMap✝, map_one_eq_zero' := map_one_eq_zero'✝, leibniz' := leibniz'✝ } = D2 [PROOFSTEP] cases D2 [GOAL] case mk.mk R : Type u_1 inst✝⁵ : CommSemiring R A : Type u_2 inst✝⁴ : CommSemiring A inst✝³ : Algebra R A M : Type u_3 inst✝² : AddCommMonoid M inst✝¹ : Module A M inst✝ : Module R M D D1 D2 : Derivation R A M r : R a b : A toLinearMap✝¹ : A →ₗ[R] M map_one_eq_zero'✝¹ : ↑toLinearMap✝¹ 1 = 0 leibniz'✝¹ : ∀ (a b : A), ↑toLinearMap✝¹ (a * b) = a • ↑toLinearMap✝¹ b + b • ↑toLinearMap✝¹ a toLinearMap✝ : A →ₗ[R] M map_one_eq_zero'✝ : ↑toLinearMap✝ 1 = 0 leibniz'✝ : ∀ (a b : A), ↑toLinearMap✝ (a * b) = a • ↑toLinearMap✝ b + b • ↑toLinearMap✝ a h : (fun D => D.toFun) { toLinearMap := toLinearMap✝¹, map_one_eq_zero' := map_one_eq_zero'✝¹, leibniz' := leibniz'✝¹ } = (fun D => D.toFun) { toLinearMap := toLinearMap✝, map_one_eq_zero' := map_one_eq_zero'✝, leibniz' := leibniz'✝ } ⊢ { toLinearMap := toLinearMap✝¹, map_one_eq_zero' := map_one_eq_zero'✝¹, leibniz' := leibniz'✝¹ } = { toLinearMap := toLinearMap✝, map_one_eq_zero' := map_one_eq_zero'✝, leibniz' := leibniz'✝ } [PROOFSTEP] congr [GOAL] case mk.mk.e_toLinearMap R : Type u_1 inst✝⁵ : CommSemiring R A : Type u_2 inst✝⁴ : CommSemiring A inst✝³ : Algebra R A M : Type u_3 inst✝² : AddCommMonoid M inst✝¹ : Module A M inst✝ : Module R M D D1 D2 : Derivation R A M r : R a b : A toLinearMap✝¹ : A →ₗ[R] M map_one_eq_zero'✝¹ : ↑toLinearMap✝¹ 1 = 0 leibniz'✝¹ : ∀ (a b : A), ↑toLinearMap✝¹ (a * b) = a • ↑toLinearMap✝¹ b + b • ↑toLinearMap✝¹ a toLinearMap✝ : A →ₗ[R] M map_one_eq_zero'✝ : ↑toLinearMap✝ 1 = 0 leibniz'✝ : ∀ (a b : A), ↑toLinearMap✝ (a * b) = a • ↑toLinearMap✝ b + b • ↑toLinearMap✝ a h : (fun D => D.toFun) { toLinearMap := toLinearMap✝¹, map_one_eq_zero' := map_one_eq_zero'✝¹, leibniz' := leibniz'✝¹ } = (fun D => D.toFun) { toLinearMap := toLinearMap✝, map_one_eq_zero' := map_one_eq_zero'✝, leibniz' := leibniz'✝ } ⊢ toLinearMap✝¹ = toLinearMap✝ [PROOFSTEP] exact FunLike.coe_injective h [GOAL] R : Type u_1 inst✝⁵ : CommSemiring R A : Type u_2 inst✝⁴ : CommSemiring A inst✝³ : Algebra R A M : Type u_3 inst✝² : AddCommMonoid M inst✝¹ : Module A M inst✝ : Module R M D D1 D2 : Derivation R A M r : R a b : A ⊢ ↑D (↑(algebraMap R A) r) = 0 [PROOFSTEP] rw [← mul_one r, RingHom.map_mul, RingHom.map_one, ← smul_def, map_smul, map_one_eq_zero, smul_zero] [GOAL] R : Type u_1 inst✝⁵ : CommSemiring R A : Type u_2 inst✝⁴ : CommSemiring A inst✝³ : Algebra R A M : Type u_3 inst✝² : AddCommMonoid M inst✝¹ : Module A M inst✝ : Module R M D D1 D2 : Derivation R A M r : R a b : A n : ℕ ⊢ ↑D ↑n = 0 [PROOFSTEP] rw [← nsmul_one, D.map_smul_of_tower n, map_one_eq_zero, smul_zero] [GOAL] R : Type u_1 inst✝⁵ : CommSemiring R A : Type u_2 inst✝⁴ : CommSemiring A inst✝³ : Algebra R A M : Type u_3 inst✝² : AddCommMonoid M inst✝¹ : Module A M inst✝ : Module R M D D1 D2 : Derivation R A M r : R a b : A n : ℕ ⊢ ↑D (a ^ n) = n • a ^ (n - 1) • ↑D a [PROOFSTEP] induction' n with n ihn [GOAL] case zero R : Type u_1 inst✝⁵ : CommSemiring R A : Type u_2 inst✝⁴ : CommSemiring A inst✝³ : Algebra R A M : Type u_3 inst✝² : AddCommMonoid M inst✝¹ : Module A M inst✝ : Module R M D D1 D2 : Derivation R A M r : R a b : A ⊢ ↑D (a ^ Nat.zero) = Nat.zero • a ^ (Nat.zero - 1) • ↑D a [PROOFSTEP] rw [Nat.zero_eq, pow_zero, map_one_eq_zero, zero_smul] [GOAL] case succ R : Type u_1 inst✝⁵ : CommSemiring R A : Type u_2 inst✝⁴ : CommSemiring A inst✝³ : Algebra R A M : Type u_3 inst✝² : AddCommMonoid M inst✝¹ : Module A M inst✝ : Module R M D D1 D2 : Derivation R A M r : R a b : A n : ℕ ihn : ↑D (a ^ n) = n • a ^ (n - 1) • ↑D a ⊢ ↑D (a ^ Nat.succ n) = Nat.succ n • a ^ (Nat.succ n - 1) • ↑D a [PROOFSTEP] rcases(zero_le n).eq_or_lt with (rfl | hpos) [GOAL] case succ.inl R : Type u_1 inst✝⁵ : CommSemiring R A : Type u_2 inst✝⁴ : CommSemiring A inst✝³ : Algebra R A M : Type u_3 inst✝² : AddCommMonoid M inst✝¹ : Module A M inst✝ : Module R M D D1 D2 : Derivation R A M r : R a b : A ihn : ↑D (a ^ 0) = 0 • a ^ (0 - 1) • ↑D a ⊢ ↑D (a ^ Nat.succ 0) = Nat.succ 0 • a ^ (Nat.succ 0 - 1) • ↑D a [PROOFSTEP] erw [pow_one, one_smul, pow_zero, one_smul] [GOAL] case succ.inr R : Type u_1 inst✝⁵ : CommSemiring R A : Type u_2 inst✝⁴ : CommSemiring A inst✝³ : Algebra R A M : Type u_3 inst✝² : AddCommMonoid M inst✝¹ : Module A M inst✝ : Module R M D D1 D2 : Derivation R A M r : R a b : A n : ℕ ihn : ↑D (a ^ n) = n • a ^ (n - 1) • ↑D a hpos : 0 < n ⊢ ↑D (a ^ Nat.succ n) = Nat.succ n • a ^ (Nat.succ n - 1) • ↑D a [PROOFSTEP] have : a * a ^ (n - 1) = a ^ n := by rw [← pow_succ, Nat.sub_add_cancel hpos] [GOAL] R : Type u_1 inst✝⁵ : CommSemiring R A : Type u_2 inst✝⁴ : CommSemiring A inst✝³ : Algebra R A M : Type u_3 inst✝² : AddCommMonoid M inst✝¹ : Module A M inst✝ : Module R M D D1 D2 : Derivation R A M r : R a b : A n : ℕ ihn : ↑D (a ^ n) = n • a ^ (n - 1) • ↑D a hpos : 0 < n ⊢ a * a ^ (n - 1) = a ^ n [PROOFSTEP] rw [← pow_succ, Nat.sub_add_cancel hpos] [GOAL] case succ.inr R : Type u_1 inst✝⁵ : CommSemiring R A : Type u_2 inst✝⁴ : CommSemiring A inst✝³ : Algebra R A M : Type u_3 inst✝² : AddCommMonoid M inst✝¹ : Module A M inst✝ : Module R M D D1 D2 : Derivation R A M r : R a b : A n : ℕ ihn : ↑D (a ^ n) = n • a ^ (n - 1) • ↑D a hpos : 0 < n this : a * a ^ (n - 1) = a ^ n ⊢ ↑D (a ^ Nat.succ n) = Nat.succ n • a ^ (Nat.succ n - 1) • ↑D a [PROOFSTEP] simp only [pow_succ, leibniz, ihn, smul_comm a n (_ : M), smul_smul a, add_smul, this, Nat.succ_eq_add_one, Nat.add_succ_sub_one, add_zero, one_nsmul] [GOAL] R : Type u_1 inst✝⁵ : CommSemiring R A : Type u_2 inst✝⁴ : CommSemiring A inst✝³ : Algebra R A M : Type u_3 inst✝² : AddCommMonoid M inst✝¹ : Module A M inst✝ : Module R M D D1 D2 : Derivation R A M r : R a b : A s : Set A h : Set.EqOn (↑D1) (↑D2) s x✝ : A hx✝ : x✝ ∈ ↑(adjoin R s) x y : A hx : ↑D1 x = ↑D2 x hy : ↑D1 y = ↑D2 y ⊢ ↑D1 (x + y) = ↑D2 (x + y) [PROOFSTEP] simp only [map_add, *] [GOAL] R : Type u_1 inst✝⁵ : CommSemiring R A : Type u_2 inst✝⁴ : CommSemiring A inst✝³ : Algebra R A M : Type u_3 inst✝² : AddCommMonoid M inst✝¹ : Module A M inst✝ : Module R M D D1 D2 : Derivation R A M r : R a b : A s : Set A h : Set.EqOn (↑D1) (↑D2) s x✝ : A hx✝ : x✝ ∈ ↑(adjoin R s) x y : A hx : ↑D1 x = ↑D2 x hy : ↑D1 y = ↑D2 y ⊢ ↑D1 (x * y) = ↑D2 (x * y) [PROOFSTEP] simp only [leibniz, *] [GOAL] R : Type u_1 inst✝⁵ : CommSemiring R A : Type u_2 inst✝⁴ : CommSemiring A inst✝³ : Algebra R A M : Type u_3 inst✝² : AddCommMonoid M inst✝¹ : Module A M inst✝ : Module R M D D1 D2 : Derivation R A M r : R a✝ b✝ a b : A ⊢ ↑0 (a * b) = a • ↑0 b + b • ↑0 a [PROOFSTEP] simp only [add_zero, LinearMap.zero_apply, smul_zero] [GOAL] R : Type u_1 inst✝⁵ : CommSemiring R A : Type u_2 inst✝⁴ : CommSemiring A inst✝³ : Algebra R A M : Type u_3 inst✝² : AddCommMonoid M inst✝¹ : Module A M inst✝ : Module R M D D1✝ D2✝ : Derivation R A M r : R a b : A D1 D2 : Derivation R A M ⊢ ↑(↑D1 + ↑D2) 1 = 0 [PROOFSTEP] simp [GOAL] R : Type u_1 inst✝⁵ : CommSemiring R A : Type u_2 inst✝⁴ : CommSemiring A inst✝³ : Algebra R A M : Type u_3 inst✝² : AddCommMonoid M inst✝¹ : Module A M inst✝ : Module R M D D1✝ D2✝ : Derivation R A M r : R a✝ b✝ : A D1 D2 : Derivation R A M a b : A ⊢ ↑(↑D1 + ↑D2) (a * b) = a • ↑(↑D1 + ↑D2) b + b • ↑(↑D1 + ↑D2) a [PROOFSTEP] simp only [leibniz, LinearMap.add_apply, coeFn_coe, smul_add, add_add_add_comm] [GOAL] R : Type u_1 inst✝¹³ : CommSemiring R A : Type u_2 inst✝¹² : CommSemiring A inst✝¹¹ : Algebra R A M : Type u_3 inst✝¹⁰ : AddCommMonoid M inst✝⁹ : Module A M inst✝⁸ : Module R M D✝ D1 D2 : Derivation R A M r✝ : R a b : A S : Type u_4 T : Type u_5 inst✝⁷ : Monoid S inst✝⁶ : DistribMulAction S M inst✝⁵ : SMulCommClass R S M inst✝⁴ : SMulCommClass S A M inst✝³ : Monoid T inst✝² : DistribMulAction T M inst✝¹ : SMulCommClass R T M inst✝ : SMulCommClass T A M r : S D : Derivation R A M ⊢ ↑(r • ↑D) 1 = 0 [PROOFSTEP] rw [LinearMap.smul_apply, coeFn_coe, D.map_one_eq_zero, smul_zero] [GOAL] R : Type u_1 inst✝¹³ : CommSemiring R A : Type u_2 inst✝¹² : CommSemiring A inst✝¹¹ : Algebra R A M : Type u_3 inst✝¹⁰ : AddCommMonoid M inst✝⁹ : Module A M inst✝⁸ : Module R M D✝ D1 D2 : Derivation R A M r✝ : R a✝ b✝ : A S : Type u_4 T : Type u_5 inst✝⁷ : Monoid S inst✝⁶ : DistribMulAction S M inst✝⁵ : SMulCommClass R S M inst✝⁴ : SMulCommClass S A M inst✝³ : Monoid T inst✝² : DistribMulAction T M inst✝¹ : SMulCommClass R T M inst✝ : SMulCommClass T A M r : S D : Derivation R A M a b : A ⊢ ↑(r • ↑D) (a * b) = a • ↑(r • ↑D) b + b • ↑(r • ↑D) a [PROOFSTEP] simp only [LinearMap.smul_apply, coeFn_coe, leibniz, smul_add, smul_comm r (_ : A) (_ : M)] [GOAL] R : Type u_1 inst✝¹⁰ : CommSemiring R A : Type u_2 inst✝⁹ : CommSemiring A inst✝⁸ : Algebra R A M : Type u_3 inst✝⁷ : AddCommMonoid M inst✝⁶ : Module A M inst✝⁵ : Module R M D✝ D1 D2 : Derivation R A M r : R a b : A N : Type u_4 inst✝⁴ : AddCommMonoid N inst✝³ : Module A N inst✝² : Module R N inst✝¹ : IsScalarTower R A M inst✝ : IsScalarTower R A N f : M →ₗ[A] N e : M ≃ₗ[A] N D : Derivation R A M ⊢ ↑(LinearMap.comp (↑R f) ↑D) 1 = 0 [PROOFSTEP] simp only [LinearMap.comp_apply, coeFn_coe, map_one_eq_zero, map_zero] [GOAL] R : Type u_1 inst✝¹⁰ : CommSemiring R A : Type u_2 inst✝⁹ : CommSemiring A inst✝⁸ : Algebra R A M : Type u_3 inst✝⁷ : AddCommMonoid M inst✝⁶ : Module A M inst✝⁵ : Module R M D✝ D1 D2 : Derivation R A M r : R a✝ b✝ : A N : Type u_4 inst✝⁴ : AddCommMonoid N inst✝³ : Module A N inst✝² : Module R N inst✝¹ : IsScalarTower R A M inst✝ : IsScalarTower R A N f : M →ₗ[A] N e : M ≃ₗ[A] N D : Derivation R A M a b : A ⊢ ↑(LinearMap.comp (↑R f) ↑D) (a * b) = a • ↑(LinearMap.comp (↑R f) ↑D) b + b • ↑(LinearMap.comp (↑R f) ↑D) a [PROOFSTEP] simp only [coeFn_coe, LinearMap.comp_apply, LinearMap.map_add, leibniz, LinearMap.coe_restrictScalars, LinearMap.map_smul] [GOAL] R : Type u_1 inst✝¹⁰ : CommSemiring R A : Type u_2 inst✝⁹ : CommSemiring A inst✝⁸ : Algebra R A M : Type u_3 inst✝⁷ : AddCommMonoid M inst✝⁶ : Module A M inst✝⁵ : Module R M D D1 D2 : Derivation R A M r : R a b : A N : Type u_4 inst✝⁴ : AddCommMonoid N inst✝³ : Module A N inst✝² : Module R N inst✝¹ : IsScalarTower R A M inst✝ : IsScalarTower R A N f : M →ₗ[A] N e : M ≃ₗ[A] N D₁ D₂ : Derivation R A M ⊢ (fun D => { toLinearMap := LinearMap.comp (↑R f) ↑D, map_one_eq_zero' := (_ : ↑(↑R f) (↑D 1) = 0), leibniz' := (_ : ∀ (a b : A), ↑(↑R f) (↑D (a * b)) = a • ↑f (↑D b) + b • ↑f (↑D a)) }) (D₁ + D₂) = (fun D => { toLinearMap := LinearMap.comp (↑R f) ↑D, map_one_eq_zero' := (_ : ↑(↑R f) (↑D 1) = 0), leibniz' := (_ : ∀ (a b : A), ↑(↑R f) (↑D (a * b)) = a • ↑f (↑D b) + b • ↑f (↑D a)) }) D₁ + (fun D => { toLinearMap := LinearMap.comp (↑R f) ↑D, map_one_eq_zero' := (_ : ↑(↑R f) (↑D 1) = 0), leibniz' := (_ : ∀ (a b : A), ↑(↑R f) (↑D (a * b)) = a • ↑f (↑D b) + b • ↑f (↑D a)) }) D₂ [PROOFSTEP] ext [GOAL] case H R : Type u_1 inst✝¹⁰ : CommSemiring R A : Type u_2 inst✝⁹ : CommSemiring A inst✝⁸ : Algebra R A M : Type u_3 inst✝⁷ : AddCommMonoid M inst✝⁶ : Module A M inst✝⁵ : Module R M D D1 D2 : Derivation R A M r : R a b : A N : Type u_4 inst✝⁴ : AddCommMonoid N inst✝³ : Module A N inst✝² : Module R N inst✝¹ : IsScalarTower R A M inst✝ : IsScalarTower R A N f : M →ₗ[A] N e : M ≃ₗ[A] N D₁ D₂ : Derivation R A M a✝ : A ⊢ ↑((fun D => { toLinearMap := LinearMap.comp (↑R f) ↑D, map_one_eq_zero' := (_ : ↑(↑R f) (↑D 1) = 0), leibniz' := (_ : ∀ (a b : A), ↑(↑R f) (↑D (a * b)) = a • ↑f (↑D b) + b • ↑f (↑D a)) }) (D₁ + D₂)) a✝ = ↑((fun D => { toLinearMap := LinearMap.comp (↑R f) ↑D, map_one_eq_zero' := (_ : ↑(↑R f) (↑D 1) = 0), leibniz' := (_ : ∀ (a b : A), ↑(↑R f) (↑D (a * b)) = a • ↑f (↑D b) + b • ↑f (↑D a)) }) D₁ + (fun D => { toLinearMap := LinearMap.comp (↑R f) ↑D, map_one_eq_zero' := (_ : ↑(↑R f) (↑D 1) = 0), leibniz' := (_ : ∀ (a b : A), ↑(↑R f) (↑D (a * b)) = a • ↑f (↑D b) + b • ↑f (↑D a)) }) D₂) a✝ [PROOFSTEP] exact LinearMap.map_add _ _ _ [GOAL] R : Type u_1 inst✝¹⁰ : CommSemiring R A : Type u_2 inst✝⁹ : CommSemiring A inst✝⁸ : Algebra R A M : Type u_3 inst✝⁷ : AddCommMonoid M inst✝⁶ : Module A M inst✝⁵ : Module R M D✝ D1 D2 : Derivation R A M r✝ : R a b : A N : Type u_4 inst✝⁴ : AddCommMonoid N inst✝³ : Module A N inst✝² : Module R N inst✝¹ : IsScalarTower R A M inst✝ : IsScalarTower R A N f : M →ₗ[A] N e : M ≃ₗ[A] N r : R D : Derivation R A M ⊢ AddHom.toFun { toFun := fun D => { toLinearMap := LinearMap.comp (↑R f) ↑D, map_one_eq_zero' := (_ : ↑(↑R f) (↑D 1) = 0), leibniz' := (_ : ∀ (a b : A), ↑(↑R f) (↑D (a * b)) = a • ↑f (↑D b) + b • ↑f (↑D a)) }, map_add' := (_ : ∀ (D₁ D₂ : Derivation R A M), (fun D => { toLinearMap := LinearMap.comp (↑R f) ↑D, map_one_eq_zero' := (_ : ↑(↑R f) (↑D 1) = 0), leibniz' := (_ : ∀ (a b : A), ↑(↑R f) (↑D (a * b)) = a • ↑f (↑D b) + b • ↑f (↑D a)) }) (D₁ + D₂) = (fun D => { toLinearMap := LinearMap.comp (↑R f) ↑D, map_one_eq_zero' := (_ : ↑(↑R f) (↑D 1) = 0), leibniz' := (_ : ∀ (a b : A), ↑(↑R f) (↑D (a * b)) = a • ↑f (↑D b) + b • ↑f (↑D a)) }) D₁ + (fun D => { toLinearMap := LinearMap.comp (↑R f) ↑D, map_one_eq_zero' := (_ : ↑(↑R f) (↑D 1) = 0), leibniz' := (_ : ∀ (a b : A), ↑(↑R f) (↑D (a * b)) = a • ↑f (↑D b) + b • ↑f (↑D a)) }) D₂) } (r • D) = ↑(RingHom.id R) r • AddHom.toFun { toFun := fun D => { toLinearMap := LinearMap.comp (↑R f) ↑D, map_one_eq_zero' := (_ : ↑(↑R f) (↑D 1) = 0), leibniz' := (_ : ∀ (a b : A), ↑(↑R f) (↑D (a * b)) = a • ↑f (↑D b) + b • ↑f (↑D a)) }, map_add' := (_ : ∀ (D₁ D₂ : Derivation R A M), (fun D => { toLinearMap := LinearMap.comp (↑R f) ↑D, map_one_eq_zero' := (_ : ↑(↑R f) (↑D 1) = 0), leibniz' := (_ : ∀ (a b : A), ↑(↑R f) (↑D (a * b)) = a • ↑f (↑D b) + b • ↑f (↑D a)) }) (D₁ + D₂) = (fun D => { toLinearMap := LinearMap.comp (↑R f) ↑D, map_one_eq_zero' := (_ : ↑(↑R f) (↑D 1) = 0), leibniz' := (_ : ∀ (a b : A), ↑(↑R f) (↑D (a * b)) = a • ↑f (↑D b) + b • ↑f (↑D a)) }) D₁ + (fun D => { toLinearMap := LinearMap.comp (↑R f) ↑D, map_one_eq_zero' := (_ : ↑(↑R f) (↑D 1) = 0), leibniz' := (_ : ∀ (a b : A), ↑(↑R f) (↑D (a * b)) = a • ↑f (↑D b) + b • ↑f (↑D a)) }) D₂) } D [PROOFSTEP] dsimp [GOAL] R : Type u_1 inst✝¹⁰ : CommSemiring R A : Type u_2 inst✝⁹ : CommSemiring A inst✝⁸ : Algebra R A M : Type u_3 inst✝⁷ : AddCommMonoid M inst✝⁶ : Module A M inst✝⁵ : Module R M D✝ D1 D2 : Derivation R A M r✝ : R a b : A N : Type u_4 inst✝⁴ : AddCommMonoid N inst✝³ : Module A N inst✝² : Module R N inst✝¹ : IsScalarTower R A M inst✝ : IsScalarTower R A N f : M →ₗ[A] N e : M ≃ₗ[A] N r : R D : Derivation R A M ⊢ { toLinearMap := LinearMap.comp (↑R f) (r • ↑D), map_one_eq_zero' := (_ : ↑f (r • ↑D 1) = 0), leibniz' := (_ : ∀ (a b : A), ↑f (r • ↑D (a * b)) = a • ↑f (r • ↑D b) + b • ↑f (r • ↑D a)) } = r • { toLinearMap := LinearMap.comp (↑R f) ↑D, map_one_eq_zero' := (_ : ↑f (↑D 1) = 0), leibniz' := (_ : ∀ (a b : A), ↑f (↑D (a * b)) = a • ↑f (↑D b) + b • ↑f (↑D a)) } [PROOFSTEP] ext [GOAL] case H R : Type u_1 inst✝¹⁰ : CommSemiring R A : Type u_2 inst✝⁹ : CommSemiring A inst✝⁸ : Algebra R A M : Type u_3 inst✝⁷ : AddCommMonoid M inst✝⁶ : Module A M inst✝⁵ : Module R M D✝ D1 D2 : Derivation R A M r✝ : R a b : A N : Type u_4 inst✝⁴ : AddCommMonoid N inst✝³ : Module A N inst✝² : Module R N inst✝¹ : IsScalarTower R A M inst✝ : IsScalarTower R A N f : M →ₗ[A] N e : M ≃ₗ[A] N r : R D : Derivation R A M a✝ : A ⊢ ↑{ toLinearMap := LinearMap.comp (↑R f) (r • ↑D), map_one_eq_zero' := (_ : ↑f (r • ↑D 1) = 0), leibniz' := (_ : ∀ (a b : A), ↑f (r • ↑D (a * b)) = a • ↑f (r • ↑D b) + b • ↑f (r • ↑D a)) } a✝ = ↑(r • { toLinearMap := LinearMap.comp (↑R f) ↑D, map_one_eq_zero' := (_ : ↑f (↑D 1) = 0), leibniz' := (_ : ∀ (a b : A), ↑f (↑D (a * b)) = a • ↑f (↑D b) + b • ↑f (↑D a)) }) a✝ [PROOFSTEP] exact LinearMap.map_smul (f : M →ₗ[R] N) _ _ [GOAL] R : Type u_1 inst✝¹⁰ : CommSemiring R A : Type u_2 inst✝⁹ : CommSemiring A inst✝⁸ : Algebra R A M : Type u_3 inst✝⁷ : AddCommMonoid M inst✝⁶ : Module A M inst✝⁵ : Module R M D D1 D2 : Derivation R A M r : R a b : A N : Type u_4 inst✝⁴ : AddCommMonoid N inst✝³ : Module A N inst✝² : Module R N inst✝¹ : IsScalarTower R A M inst✝ : IsScalarTower R A N f : M →ₗ[A] N e : M ≃ₗ[A] N f₁ f₂ : M →ₗ[A] N ⊢ (fun f => LinearMap.compDer f) (f₁ + f₂) = (fun f => LinearMap.compDer f) f₁ + (fun f => LinearMap.compDer f) f₂ [PROOFSTEP] ext [GOAL] case h.H R : Type u_1 inst✝¹⁰ : CommSemiring R A : Type u_2 inst✝⁹ : CommSemiring A inst✝⁸ : Algebra R A M : Type u_3 inst✝⁷ : AddCommMonoid M inst✝⁶ : Module A M inst✝⁵ : Module R M D D1 D2 : Derivation R A M r : R a b : A N : Type u_4 inst✝⁴ : AddCommMonoid N inst✝³ : Module A N inst✝² : Module R N inst✝¹ : IsScalarTower R A M inst✝ : IsScalarTower R A N f : M →ₗ[A] N e : M ≃ₗ[A] N f₁ f₂ : M →ₗ[A] N x✝ : Derivation R A M a✝ : A ⊢ ↑(↑((fun f => LinearMap.compDer f) (f₁ + f₂)) x✝) a✝ = ↑(↑((fun f => LinearMap.compDer f) f₁ + (fun f => LinearMap.compDer f) f₂) x✝) a✝ [PROOFSTEP] rfl [GOAL] R : Type u_1 inst✝¹⁰ : CommSemiring R A : Type u_2 inst✝⁹ : CommSemiring A inst✝⁸ : Algebra R A M : Type u_3 inst✝⁷ : AddCommMonoid M inst✝⁶ : Module A M inst✝⁵ : Module R M D✝ D1 D2 : Derivation R A M r✝ : R a b : A N : Type u_4 inst✝⁴ : AddCommMonoid N inst✝³ : Module A N inst✝² : Module R N inst✝¹ : IsScalarTower R A M inst✝ : IsScalarTower R A N f : M →ₗ[A] N e : M ≃ₗ[A] N r : A D : M →ₗ[A] N ⊢ AddHom.toFun { toFun := fun f => LinearMap.compDer f, map_add' := (_ : ∀ (f₁ f₂ : M →ₗ[A] N), (fun f => LinearMap.compDer f) (f₁ + f₂) = (fun f => LinearMap.compDer f) f₁ + (fun f => LinearMap.compDer f) f₂) } (r • D) = ↑(RingHom.id A) r • AddHom.toFun { toFun := fun f => LinearMap.compDer f, map_add' := (_ : ∀ (f₁ f₂ : M →ₗ[A] N), (fun f => LinearMap.compDer f) (f₁ + f₂) = (fun f => LinearMap.compDer f) f₁ + (fun f => LinearMap.compDer f) f₂) } D [PROOFSTEP] ext [GOAL] case h.H R : Type u_1 inst✝¹⁰ : CommSemiring R A : Type u_2 inst✝⁹ : CommSemiring A inst✝⁸ : Algebra R A M : Type u_3 inst✝⁷ : AddCommMonoid M inst✝⁶ : Module A M inst✝⁵ : Module R M D✝ D1 D2 : Derivation R A M r✝ : R a b : A N : Type u_4 inst✝⁴ : AddCommMonoid N inst✝³ : Module A N inst✝² : Module R N inst✝¹ : IsScalarTower R A M inst✝ : IsScalarTower R A N f : M →ₗ[A] N e : M ≃ₗ[A] N r : A D : M →ₗ[A] N x✝ : Derivation R A M a✝ : A ⊢ ↑(↑(AddHom.toFun { toFun := fun f => LinearMap.compDer f, map_add' := (_ : ∀ (f₁ f₂ : M →ₗ[A] N), (fun f => LinearMap.compDer f) (f₁ + f₂) = (fun f => LinearMap.compDer f) f₁ + (fun f => LinearMap.compDer f) f₂) } (r • D)) x✝) a✝ = ↑(↑(↑(RingHom.id A) r • AddHom.toFun { toFun := fun f => LinearMap.compDer f, map_add' := (_ : ∀ (f₁ f₂ : M →ₗ[A] N), (fun f => LinearMap.compDer f) (f₁ + f₂) = (fun f => LinearMap.compDer f) f₁ + (fun f => LinearMap.compDer f) f₂) } D) x✝) a✝ [PROOFSTEP] rfl [GOAL] R : Type u_1 inst✝¹⁰ : CommSemiring R A : Type u_2 inst✝⁹ : CommSemiring A inst✝⁸ : Algebra R A M : Type u_3 inst✝⁷ : AddCommMonoid M inst✝⁶ : Module A M inst✝⁵ : Module R M D✝ D1 D2 : Derivation R A M r : R a b : A N : Type u_4 inst✝⁴ : AddCommMonoid N inst✝³ : Module A N inst✝² : Module R N inst✝¹ : IsScalarTower R A M inst✝ : IsScalarTower R A N f : M →ₗ[A] N e : M ≃ₗ[A] N src✝ : Derivation R A M →ₗ[R] Derivation R A N := LinearMap.compDer ↑e D : Derivation R A M ⊢ ↑(LinearMap.compDer ↑(LinearEquiv.symm e)) (AddHom.toFun { toAddHom := src✝.toAddHom, map_smul' := (_ : ∀ (r : R) (x : Derivation R A M), AddHom.toFun src✝.toAddHom (r • x) = ↑(RingHom.id R) r • AddHom.toFun src✝.toAddHom x) }.toAddHom D) = D [PROOFSTEP] ext a [GOAL] case H R : Type u_1 inst✝¹⁰ : CommSemiring R A : Type u_2 inst✝⁹ : CommSemiring A inst✝⁸ : Algebra R A M : Type u_3 inst✝⁷ : AddCommMonoid M inst✝⁶ : Module A M inst✝⁵ : Module R M D✝ D1 D2 : Derivation R A M r : R a✝ b : A N : Type u_4 inst✝⁴ : AddCommMonoid N inst✝³ : Module A N inst✝² : Module R N inst✝¹ : IsScalarTower R A M inst✝ : IsScalarTower R A N f : M →ₗ[A] N e : M ≃ₗ[A] N src✝ : Derivation R A M →ₗ[R] Derivation R A N := LinearMap.compDer ↑e D : Derivation R A M a : A ⊢ ↑(↑(LinearMap.compDer ↑(LinearEquiv.symm e)) (AddHom.toFun { toAddHom := src✝.toAddHom, map_smul' := (_ : ∀ (r : R) (x : Derivation R A M), AddHom.toFun src✝.toAddHom (r • x) = ↑(RingHom.id R) r • AddHom.toFun src✝.toAddHom x) }.toAddHom D)) a = ↑D a [PROOFSTEP] exact e.symm_apply_apply (D a) [GOAL] R : Type u_1 inst✝¹⁰ : CommSemiring R A : Type u_2 inst✝⁹ : CommSemiring A inst✝⁸ : Algebra R A M : Type u_3 inst✝⁷ : AddCommMonoid M inst✝⁶ : Module A M inst✝⁵ : Module R M D✝ D1 D2 : Derivation R A M r : R a b : A N : Type u_4 inst✝⁴ : AddCommMonoid N inst✝³ : Module A N inst✝² : Module R N inst✝¹ : IsScalarTower R A M inst✝ : IsScalarTower R A N f : M →ₗ[A] N e : M ≃ₗ[A] N src✝ : Derivation R A M →ₗ[R] Derivation R A N := LinearMap.compDer ↑e D : Derivation R A N ⊢ AddHom.toFun { toAddHom := src✝.toAddHom, map_smul' := (_ : ∀ (r : R) (x : Derivation R A M), AddHom.toFun src✝.toAddHom (r • x) = ↑(RingHom.id R) r • AddHom.toFun src✝.toAddHom x) }.toAddHom (↑(LinearMap.compDer ↑(LinearEquiv.symm e)) D) = D [PROOFSTEP] ext a [GOAL] case H R : Type u_1 inst✝¹⁰ : CommSemiring R A : Type u_2 inst✝⁹ : CommSemiring A inst✝⁸ : Algebra R A M : Type u_3 inst✝⁷ : AddCommMonoid M inst✝⁶ : Module A M inst✝⁵ : Module R M D✝ D1 D2 : Derivation R A M r : R a✝ b : A N : Type u_4 inst✝⁴ : AddCommMonoid N inst✝³ : Module A N inst✝² : Module R N inst✝¹ : IsScalarTower R A M inst✝ : IsScalarTower R A N f : M →ₗ[A] N e : M ≃ₗ[A] N src✝ : Derivation R A M →ₗ[R] Derivation R A N := LinearMap.compDer ↑e D : Derivation R A N a : A ⊢ ↑(AddHom.toFun { toAddHom := src✝.toAddHom, map_smul' := (_ : ∀ (r : R) (x : Derivation R A M), AddHom.toFun src✝.toAddHom (r • x) = ↑(RingHom.id R) r • AddHom.toFun src✝.toAddHom x) }.toAddHom (↑(LinearMap.compDer ↑(LinearEquiv.symm e)) D)) a = ↑D a [PROOFSTEP] exact e.apply_symm_apply (D a) [GOAL] R : Type u_1 inst✝⁵ : CommSemiring R A : Type u_2 inst✝⁴ : CommSemiring A inst✝³ : Algebra R A M : Type u_3 inst✝² : AddCancelCommMonoid M inst✝¹ : Module R M inst✝ : Module A M D : A →ₗ[R] M h : ∀ (a b : A), ↑D (a * b) = a • ↑D b + b • ↑D a ⊢ ?m.473416 D h + ↑D 1 = ?m.473416 D h [PROOFSTEP] simpa only [one_smul, one_mul] using (h 1 1).symm [GOAL] R : Type u_1 inst✝⁵ : CommRing R A : Type u_2 inst✝⁴ : CommRing A inst✝³ : Algebra R A M : Type u_3 inst✝² : AddCommGroup M inst✝¹ : Module A M inst✝ : Module R M D D1 D2 : Derivation R A M r : R a b : A n : ℤ ⊢ ↑D ↑n = 0 [PROOFSTEP] rw [← zsmul_one, D.map_smul_of_tower n, map_one_eq_zero, smul_zero] [GOAL] R : Type u_1 inst✝⁵ : CommRing R A : Type u_2 inst✝⁴ : CommRing A inst✝³ : Algebra R A M : Type u_3 inst✝² : AddCommGroup M inst✝¹ : Module A M inst✝ : Module R M D D1 D2 : Derivation R A M r : R a✝ b✝ a b : A h : a * b = 1 ⊢ ↑D a = -a ^ 2 • ↑D b [PROOFSTEP] rw [neg_smul] [GOAL] R : Type u_1 inst✝⁵ : CommRing R A : Type u_2 inst✝⁴ : CommRing A inst✝³ : Algebra R A M : Type u_3 inst✝² : AddCommGroup M inst✝¹ : Module A M inst✝ : Module R M D D1 D2 : Derivation R A M r : R a✝ b✝ a b : A h : a * b = 1 ⊢ ↑D a = -(a ^ 2 • ↑D b) [PROOFSTEP] refine' eq_neg_of_add_eq_zero_left _ [GOAL] R : Type u_1 inst✝⁵ : CommRing R A : Type u_2 inst✝⁴ : CommRing A inst✝³ : Algebra R A M : Type u_3 inst✝² : AddCommGroup M inst✝¹ : Module A M inst✝ : Module R M D D1 D2 : Derivation R A M r : R a✝ b✝ a b : A h : a * b = 1 ⊢ ↑D a + a ^ 2 • ↑D b = 0 [PROOFSTEP] calc D a + a ^ 2 • D b = a • b • D a + a • a • D b := by simp only [smul_smul, h, one_smul, sq] _ = a • D (a * b) := by rw [leibniz, smul_add, add_comm] _ = 0 := by rw [h, map_one_eq_zero, smul_zero] [GOAL] R : Type u_1 inst✝⁵ : CommRing R A : Type u_2 inst✝⁴ : CommRing A inst✝³ : Algebra R A M : Type u_3 inst✝² : AddCommGroup M inst✝¹ : Module A M inst✝ : Module R M D D1 D2 : Derivation R A M r : R a✝ b✝ a b : A h : a * b = 1 ⊢ ↑D a + a ^ 2 • ↑D b = a • b • ↑D a + a • a • ↑D b [PROOFSTEP] simp only [smul_smul, h, one_smul, sq] [GOAL] R : Type u_1 inst✝⁵ : CommRing R A : Type u_2 inst✝⁴ : CommRing A inst✝³ : Algebra R A M : Type u_3 inst✝² : AddCommGroup M inst✝¹ : Module A M inst✝ : Module R M D D1 D2 : Derivation R A M r : R a✝ b✝ a b : A h : a * b = 1 ⊢ a • b • ↑D a + a • a • ↑D b = a • ↑D (a * b) [PROOFSTEP] rw [leibniz, smul_add, add_comm] [GOAL] R : Type u_1 inst✝⁵ : CommRing R A : Type u_2 inst✝⁴ : CommRing A inst✝³ : Algebra R A M : Type u_3 inst✝² : AddCommGroup M inst✝¹ : Module A M inst✝ : Module R M D D1 D2 : Derivation R A M r : R a✝ b✝ a b : A h : a * b = 1 ⊢ a • ↑D (a * b) = 0 [PROOFSTEP] rw [h, map_one_eq_zero, smul_zero] [GOAL] R : Type u_1 inst✝⁸ : CommRing R A : Type u_2 inst✝⁷ : CommRing A inst✝⁶ : Algebra R A M : Type u_3 inst✝⁵ : AddCommGroup M inst✝⁴ : Module A M inst✝³ : Module R M D✝ D1 D2 : Derivation R A M r : R a✝ b : A K : Type u_4 inst✝² : Field K inst✝¹ : Module K M inst✝ : Algebra R K D : Derivation R K M a : K ⊢ ↑D a⁻¹ = -a⁻¹ ^ 2 • ↑D a [PROOFSTEP] rcases eq_or_ne a 0 with (rfl | ha) [GOAL] case inl R : Type u_1 inst✝⁸ : CommRing R A : Type u_2 inst✝⁷ : CommRing A inst✝⁶ : Algebra R A M : Type u_3 inst✝⁵ : AddCommGroup M inst✝⁴ : Module A M inst✝³ : Module R M D✝ D1 D2 : Derivation R A M r : R a b : A K : Type u_4 inst✝² : Field K inst✝¹ : Module K M inst✝ : Algebra R K D : Derivation R K M ⊢ ↑D 0⁻¹ = -0⁻¹ ^ 2 • ↑D 0 [PROOFSTEP] simp [GOAL] case inr R : Type u_1 inst✝⁸ : CommRing R A : Type u_2 inst✝⁷ : CommRing A inst✝⁶ : Algebra R A M : Type u_3 inst✝⁵ : AddCommGroup M inst✝⁴ : Module A M inst✝³ : Module R M D✝ D1 D2 : Derivation R A M r : R a✝ b : A K : Type u_4 inst✝² : Field K inst✝¹ : Module K M inst✝ : Algebra R K D : Derivation R K M a : K ha : a ≠ 0 ⊢ ↑D a⁻¹ = -a⁻¹ ^ 2 • ↑D a [PROOFSTEP] exact D.leibniz_of_mul_eq_one (inv_mul_cancel ha) [GOAL] R : Type u_1 inst✝⁵ : CommRing R A : Type u_2 inst✝⁴ : CommRing A inst✝³ : Algebra R A M : Type u_3 inst✝² : AddCommGroup M inst✝¹ : Module A M inst✝ : Module R M D✝ D1 D2 : Derivation R A M r : R a✝ b✝ : A D : Derivation R A M a b : A ⊢ ↑(-↑D) (a * b) = a • ↑(-↑D) b + b • ↑(-↑D) a [PROOFSTEP] simp only [LinearMap.neg_apply, smul_neg, neg_add_rev, leibniz, coeFn_coe, add_comm] [GOAL] R : Type u_1 inst✝⁵ : CommRing R A : Type u_2 inst✝⁴ : CommRing A inst✝³ : Algebra R A M : Type u_3 inst✝² : AddCommGroup M inst✝¹ : Module A M inst✝ : Module R M D D1✝ D2✝ : Derivation R A M r : R a✝ b✝ : A D1 D2 : Derivation R A M a b : A ⊢ ↑(↑D1 - ↑D2) (a * b) = a • ↑(↑D1 - ↑D2) b + b • ↑(↑D1 - ↑D2) a [PROOFSTEP] simp only [LinearMap.sub_apply, leibniz, coeFn_coe, smul_sub, add_sub_add_comm]
[STATEMENT] lemma Complex_gcard: "gcard (UNIV::complex set) = C_continuum" [PROOF STATE] proof (prove) goal (1 subgoal): 1. gcard UNIV = C_continuum [PROOF STEP] by (metis Complex_vcard V_of_Complex_set bij_betw_def gcard_eq_vcard gcard_image)
text\<open> Formal Languages and Automata Theory (FLAT) according to the book with same name (2nd version) by Zongli Jiang created by Yongwang Zhao ([email protected]) School of Computer Science and Engineering, Beihang University, Beijing, China \<close> section\<open>3. Finite Automaton\<close> subsection\<open>3.3: nondeterministic finite automaton\<close> theory DFA_NFA_eq imports DFA NFAe begin subsubsection\<open>equivalence of DFA and NFA\<close> definition nfa2dfa :: "('a,'s)nfa \<Rightarrow> ('a,'s set)dfa" where "nfa2dfa A \<equiv> ({q0 A}, \<lambda>a Q. \<Union>((\<delta> A) a ` Q), {Q. \<exists>q\<in>Q. q \<in> F A})" (*** Equivalence of NA and DA ***) lemma DFA_delta_is_lift_NFA_delta: "DFA.\<delta>' (nfa2dfa A) w Q = \<Union>(NFA.\<delta>' A w ` Q)" by (induct w arbitrary:Q)(auto simp:nfa2dfa_def) lemma NFA_DFA_equiv: "NFA.accepts A w = DFA.accepts (nfa2dfa A) w" apply (simp add: DFA.accepts_def NFA.accepts_def DFA_delta_is_lift_NFA_delta) apply (simp add: nfa2dfa_def) done end
(* * Copyright 2014, NICTA * * This software may be distributed and modified according to the terms of * the BSD 2-Clause license. Note that NO WARRANTY is provided. * See "LICENSE_BSD2.txt" for details. * * @TAG(NICTA_BSD) *) theory Padding imports "~~/src/HOL/Main" begin definition padup :: "nat \<Rightarrow> nat \<Rightarrow> nat" where "padup align n \<equiv> (align - n mod align) mod align" lemma padup_dvd: "0 < b \<Longrightarrow> (padup b n = 0) = (b dvd n)" apply(clarsimp simp: padup_def dvd_eq_mod_eq_0) apply(subst mod_if [where m="b - n mod b"]) apply clarsimp apply(insert mod_less_divisor [of b n]) apply arith done lemma mod_triv_le: "0 < n \<Longrightarrow> m mod (n::nat) \<le> m" apply(case_tac "m < n") apply simp apply(subgoal_tac "m mod n < n") apply arith apply(erule mod_less_divisor) done lemma dvd_padup_add: "0 < x \<Longrightarrow> x dvd y + padup x y" apply(clarsimp simp: padup_def) apply(subst mod_if [where m="x - y mod x"]) apply(clarsimp split: split_if_asm) apply(rule conjI) apply clarsimp apply(subst ac_simps) apply(subst diff_add_assoc) apply(rule mod_triv_le) apply simp apply(rule dvd_add) apply simp apply(subst mod_div_equality') apply(subst diff_diff_right) apply(subst ac_simps) apply(subst mult_div_cancel) apply simp apply simp apply(auto simp: dvd_eq_mod_eq_0) done end
#= Constructors for Str strings Copyright 2017-2018 Gandalf Software, Inc., Scott P. Jones Licensed under MIT License, see LICENSE.md =# function _encode_ascii_latin(str, len) buf, out = _allocate(UInt8, len) @inbounds for ch in str set_codeunit!(out, ch%UInt8) out += 1 end buf end function _encode_ascii_latin(pnt::Ptr{UInt8}, len) buf, out = _allocate(UInt8, len) fin = out + len @inbounds while out < fin ch8 = get_codeunit(pnt) set_codeunit!(out, (ch8 <= 0x7f ? ch8 : (((ch8 & 3) << 6) | (get_codeunit(pnt += 1) & 0x3f)))) pnt += 1 out += 1 end buf end @inline function safe_copy(::Type{Vector{T}}, ::Type{C}, str) where {T<:CodeUnitTypes,C<:CSE} @preserve str buf begin len = ncodeunits(str) buf, out = _allocate(T, len) _memcpy(out, pointer(str), len) Str(C, buf) end end @inline safe_copy(::Type{<:Str}, ::Type{C}, str) where {C<:CSE} = Str(C, str.data) @inline safe_copy(::Type{String}, ::Type{C}, str) where {C<:CSE} = Str(C, _data(str)) function _str(str::AbstractString) # handle zero length string quickly isempty(str) && return empty_ascii len, flags, num4byte, num3byte, num2byte, latin1byte = unsafe_check_string(str) if flags == 0 Str(ASCIICSE, _encode_ascii_latin(str, len)) elseif num4byte != 0 Str(_UTF32CSE, _encode_utf32(str, len)) elseif num3byte + num2byte != 0 Str(_UCS2CSE, _encode_utf16(str, len)) else Str(latin1byte == 0 ? ASCIICSE : _LatinCSE, _encode_ascii_latin(str, len)) end end function _str(str::T) where {T<:Union{Vector{UInt8}, Str{<:Binary_CSEs}, String}} # handle zero length string quickly (siz = sizeof(str)) == 0 && return empty_ascii @preserve str begin pnt = pointer(str) len, flags, num4byte, num3byte, num2byte, latin1byte = fast_check_string(pnt, siz) if flags == 0 buf, out = _allocate(UInt8, len) _memcpy(out, pnt, len) Str(ASCIICSE, buf) elseif num4byte != 0 Str(_UTF32CSE, _encode_utf32(pnt, len)) elseif num3byte + num2byte != 0 Str(_UCS2CSE, _encode_utf16(pnt, len)) else Str(latin1byte == 0 ? ASCIICSE : _LatinCSE, _encode_ascii_latin(pnt, len)) end end end function _str_cpy(::Type{T}, str, len) where {T} @preserve str begin buf, pnt = _allocate(T, len) @inbounds for ch in str set_codeunit!(pnt, ch%T) pnt += sizeof(T) end buf end end utf_length(::Type{<:CSE}, b, e) = Int(e - b) + 1 utf_length(l) = l < 0x80 ? l : ifelse(l < 0x800, l*2-0x80, l*3-0x880) # Must have either 1,2,3 byte characters, or 3,4 byte characters utf_length(::Type{UTF8CSE}, b, e) = utf_length(e) - utf_length(b) utf_length(::Type{UTF16CSE}, b, e) = e <= 0xffff ? (e-b+1) : ifelse(b > 0xffff, (e-b+1)*2, (e*2-b-0xfffe)) function convert(::Type{<:Str{C}}, rng::UnitRange{<:CodeUnitTypes}) where {C<:CSE} isempty(rng) && return empty_str(C) b, e = rng.start, rng.stop isvalid(C, b) || strerror(StrErrors.INVALID, 1, b) isvalid(C, e) || strerror(StrErrors.INVALID, length(rng), e) # Need to calculate allocation length Str(C, _str_cpy(C, rng, Int(e - b) + 1)) end function convert(::Type{<:Str{C}}, rng::UnitRange{<:CodeUnitTypes} ) where {C<:Union{ASCIICSE,Latin_CSEs,UCS2_CSEs,UTF32_CSEs}} isempty(rng) && return empty_str(C) b, e = rng.start, rng.stop isvalid(C, b) || strerror(StrErrors.INVALID, 1, b) isvalid(C, e) || strerror(StrErrors.INVALID, length(rng), e) # If contains range 0xd800-0xdfff, then also invalid isempty(intersect(b%UInt32:e%UInt32, 0xd800:0xdfff)) || strerror(StrErrors.INVALID, 0, rng) Str(C, _str_cpy(C, rng, Int(e - b) + 1)) end function convert(::Type{<:Str{C}}, rng::UnitRange{T}) where {C<:CSE,T<:AbstractChar} isempty(rng) && return empty_str(C) b, e = rng.start, rng.stop isvalid(C, b) || strerror(StrErrors.INVALID, 1, b) isvalid(C, e) || strerror(StrErrors.INVALID, length(rng), e) # If contains range 0xd800-0xdfff, then also invalid isempty(intersect(b%UInt32:e%UInt32, 0xd800:0xdfff)) || strerror(StrErrors.INVALID, 0, rng) # get counts in range Str(C, _str_cpy(C, rng, utf_length(C, b%UInt32, e%UInt32))) end function convert(::Type{<:Str{C}}, rng::UnitRange{<:CodeUnitTypes} ) where {C<:Union{UTF8CSE,UTF16CSE}} isempty(rng) && return empty_str(C) b, e = rng.start, rng.stop isvalid(C, b) || strerror(StrErrors.INVALID, 1, b) isvalid(C, e) || strerror(StrErrors.INVALID, length(rng), e) # If contains range 0xd800-0xdfff, then also invalid isempty(intersect(b%UInt32:e%UInt32, 0xd800:0xdfff)) || strerror(StrErrors.INVALID, 0, rng) len = utf_length(C, b%UInt32, e%UInt32) buf, out = allocate(codeunit(T), len) if C === UTF8CSE while b <= min(e, 0x7f) set_codeunit!(out, b) out += 1 b += 0x01 end while b <= min(e, 0x7ff) out = output_utf8_2byte!(out, b) b += 0x01 end while b <= min(e, 0xffff) out = output_utf8_3byte!(out, b) b += 0x01 end while b <= e out = output_utf8_4byte!(out, b) b += 0x01 end else while b <= min(e, 0xffff) set_codeunit!(out, b) out += 2 b += 0x01 end while b <= e c1, c2 = get_utf16(ch) set_codeunit!(out, c1) set_codeunit!(out + 2, c2) out += 4 b += 0x01 end end Str(C, buf) end convert(::Type{S}, rng::UnitRange{T}) where {T<:AbstractChar,S<:Str{Union{UTF8CSE,UTF16CSE}}} = convert(S, (rng.start%UInt32):(rng.stop%UInt32)) convert(::Type{Str}, str::AbstractString) = _str(str) convert(::Type{Str}, str::String) = _str(str) convert(::Type{Str}, str::Str) = str convert(::Type{<:Str{C}}, str::AbstractString) where {C<:CSE} = convert(Str{C}, _str(str)) convert(::Type{<:Str{C}}, str::Str{C}) where {C<:CSE} = str convert(::Type{<:Str{RawUTF8CSE}}, str::Str{ASCIICSE}) = Str(RawUTF8CSE, str.data) convert(::Type{<:Str{RawUTF8CSE}}, str::Str{UTF8CSE}) = Str(RawUTF8CSE, str.data) convert(::Type{<:Str{RawUTF8CSE}}, str::String) = Str(RawUTF8CSE, str) convert(::Type{UniStr}, str::AbstractString) = _str(str) convert(::Type{UniStr}, str::String) = _str(str) convert(::Type{UniStr}, str::Str{<:Union{ASCIICSE,SubSet_CSEs}}) = str function convert(::Type{T}, vec::AbstractArray{UInt8}) where {C<:Union{UTF8CSE,ASCIICSE},T<:Str{C}} is_valid(T, vec) || strerror(StrErrors.INVALID) Str(C, _str_cpy(UInt8, vec, length(vec))) end convert(::Type{<:Str{C}}, vec::AbstractArray{UInt8}) where {C<:Union{BinaryCSE,Text1CSE}} = Str(C, _str_cpy(UInt8, vec, length(vec))) convert(::Type{<:Str{Text2CSE}}, vec::AbstractArray{UInt16}) = Str(Text2CSE, _str_cpy(UInt16, vec, length(vec))) convert(::Type{<:Str{Text4CSE}}, vec::AbstractArray{UInt32}) = Str(Text4CSE, _str_cpy(UInt32, vec, length(vec))) (::Type{Str})(str::AbstractString) = _str(str) (::Type{Str})(str::String) = _str(str) (::Type{Str})(str::Str) = str (::Type{UniStr})(str::AbstractString) = _str(str) (::Type{UniStr})(str::String) = _str(str) (::Type{UniStr})(str::Str{<:Union{ASCIICSE,SubSet_CSEs}}) = str function convert(::Type{UniStr}, str::T) where {T<:Str} # handle zero length string quickly is_empty(str) && return empty_ascii len, flags = count_chars(T, pointer(str), ncodeunits(str)) if flags == 0 Str(ASCIICSE, codeunit(T) == UInt8 ? str.data : _str_cpy(UInt8, str, len)) elseif (flags & ~(UTF_LATIN1%UInt)) == 0 Str(_LatinCSE, codeunit(T) == UInt8 ? str.data : _str_cpy(UInt8, str, len)) elseif (flags & UTF_UNICODE4) == 0 Str(_UCS2CSE, codeunit(T) == UInt16 ? str.data : _str_cpy(UInt16, str, len)) else Str(_UTF32CSE, codeunit(T) == UInt32 ? str.data : _str_cpy(UInt32, str, len)) end end #convert(::Type{<:Str{C}}, str::String) where {C} = convert(C, _str(str)) convert(::Type{<:Str{Text1CSE}}, str::MaybeSub{String}) = Str(Text1CSE, String(str)) convert(::Type{<:Str{BinaryCSE}}, str::MaybeSub{String}) = Str(BinaryCSE, String(str)) convert(::Type{<:Str{BinaryCSE}}, str::Str{<:Union{ASCIICSE,Latin_CSEs,UTF8_CSEs}}) = Str(BinaryCSE, str.data) convert(::Type{<:Str{BinaryCSE}}, str::SubString{<:Str{<:Union{ASCIICSE,Latin_CSEs,UTF8_CSEs}}}) = Str(BinaryCSE, String(str)) #convert(::Type{<:Str{LatinCSE}}, str::Str{_LatinCSE}) = Str(LatinCSE, str.data) convert(::Type{<:Str{UCS2CSE}}, str::Str{_UCS2CSE}) = Str(UCS2CSE, str.data) convert(::Type{<:Str{UTF32CSE}}, str::Str{_UTF32CSE}) = Str(UTF32CSE, str.data) #convert(::Type{<:Str{LatinCSE}}, str::Str{ASCIICSE}) = Str(LatinCSE, str.data) convert(::Type{String}, str::Str{<:Union{ASCIICSE,Text1CSE,BinaryCSE}}) = str.data function convert(::Type{String}, vec::AbstractArray{<:Chr}) out = get_iobuffer(sizeof(vec)) @inbounds for ch in vec print(out, ch) end String(take!(out)) end function convert(::Type{String}, vec::AbstractArray{<:Union{Text1Chr,ASCIIChr}}) @preserve buf begin buf, pnt = _allocate(UInt8, length(vec)) @inbounds for byt in vec set_codeunit!(pnt, byt) pnt += 1 end buf end end function Str(vec::AbstractArray{T}) where {CS,BT,T<:Chr{CS,BT}} C = codepoint_cse(T) buf, pnt = _allocate(BT, length(vec)) @inbounds for ch in vec set_codeunit!(pnt, ch%BT) pnt += sizeof(BT) end Str(C, buf) end """Convert to a UniStr if valid Unicode, otherwise return a Text1Str""" function unsafe_str end function _unsafe_str8(::Type{T}, str; keywords...) where {T} # handle zero length string quickly (siz = sizeof(str)) == 0 && return empty_ascii @preserve str begin pnt = pointer(str)::Ptr{UInt8} len, flags, num4byte, num3byte, num2byte, latin1byte, invalids = unsafe_check_string(pnt, 1, siz; keywords...) if flags == 0 # Don't allow this to be aliased to a mutable Vector{UInt8} safe_copy(T, ASCIICSE, str) elseif invalids != 0 safe_copy(T, Text1CSE, str) elseif num4byte != 0 Str(_UTF32CSE, _encode_utf32(pnt, len)) elseif num2byte + num3byte != 0 Str(_UCS2CSE, _encode_utf16(pnt, len)) else Str(latin1byte == 0 ? ASCIICSE : _LatinCSE, _encode_ascii_latin(pnt, len)) end end end unsafe_str(str::Vector{UInt8}; accept_invalids=true, kwargs...) = _unsafe_str8(Vector{UInt8}, str; accept_invalids=accept_invalids, kwargs...) unsafe_str(str::MaybeSub{T}; accept_invalids=true, kwargs... ) where {T<:Union{String,Text1Str,ASCIIStr,BinaryStr}} = _unsafe_str8(T, str; accept_invalids = accept_invalids, kwargs...) function unsafe_str(str::Union{MaybeSub{<:AbstractString},AbstractArray{T}}; accept_invalids = true, kwargs...) where {T<:Union{AbstractChar,UInt16,UInt32}} # handle zero length string quickly is_empty(str) && return empty_ascii len, flags, num4byte, num3byte, num2byte, latin1byte, invalids = unsafe_check_string(str; accept_invalids = accept_invalids, kwargs...) if flags == 0 Str(ASCIICSE, _cvtsize(UInt8, str, len)) elseif invalids != 0 # Todo: Make sure this handles different sorts of SubStrings effectively siz = sizeof(eltype(T)) C = siz == 4 ? Text4CSE : (siz == 2 ? Text2CSE : Text1CSE) S = codeunit(C) buf, pnt = _allocate(S, len) @inbounds for ch in str set_codeunit!(pnt, ch%S) pnt += siz end Str(C, buf) elseif num4byte != 0 Str(_UTF32CSE, _encode_utf32(str, len)) elseif num2byte + num3byte != 0 Str(_UCS2CSE, _encode_utf16(str, len)) else Str(latin1byte == 0 ? ASCIICSE : _LatinCSE, _encode_ascii_latin(str, len)) end end # Fallback constructors for Str types, from any AbstractString (::Type{T})(vec::S) where {T<:Str, S<:AbstractArray} = convert(T, vec) (::Type{T})(str::S) where {T<:Str, S<:AbstractString} = convert(T, str) (::Type{T})(str::S) where {T<:Str, S<:Str} = convert(T, str) (::Type{T})(str::T) where {T<:Str} = str
#include <stdio.h> #include <gsl/gsl_rng.h> #include <gsl/gsl_randist.h> int main (void) { int i; double x = 0, y = 0, dx, dy; const gsl_rng_type * T; gsl_rng * r; gsl_rng_env_setup(); T = gsl_rng_default; r = gsl_rng_alloc (T); printf ("%g %g\n", x, y); for (i = 0; i < 10; i++) { gsl_ran_dir_2d (r, &dx, &dy); x += dx; y += dy; printf ("%g %g\n", x, y); } gsl_rng_free (r); return 0; }
(** * HSET is a univalent_category ([is_univalent_HSET]) *) Require Import UniMath.Foundations.PartD. Require Import UniMath.Foundations.Sets. Require Import UniMath.MoreFoundations.Tactics. Require Import UniMath.Foundations.UnivalenceAxiom. Require Import UniMath.Foundations.HLevels. Require Import UniMath.CategoryTheory.Core.Categories. Require Import UniMath.CategoryTheory.Core.Isos. Require Import UniMath.CategoryTheory.Core.Univalence. Require Import UniMath.CategoryTheory.Core.Functors. Require Import UniMath.CategoryTheory.categories.HSET.Core. Require Import UniMath.CategoryTheory.categories.HSET.MonoEpiIso. Local Open Scope cat. (** ** HSET is a univalent_category. *) Definition hset_id_weq_z_iso (A B : ob HSET) : (A = B) ≃ (z_iso A B) := weqcomp (UA_for_HLevels 2 A B) (hset_equiv_weq_z_iso A B). (** The map [precat_paths_to_iso] for which we need to show [isweq] is actually equal to the carrier of the weak equivalence we constructed above. We use this fact to show that that [precat_paths_to_iso] is an equivalence. *) Lemma hset_id_weq_iso_is (A B : ob HSET): @idtoiso _ A B = pr1 (hset_id_weq_z_iso A B). Proof. apply funextfun. intro p; elim p. apply z_iso_eq; simpl. - apply funextfun; intro x; destruct A. apply idpath. Defined. Lemma is_weq_precat_paths_to_iso_hset (A B : ob HSET): isweq (@idtoiso _ A B). Proof. rewrite hset_id_weq_iso_is. apply (pr2 (hset_id_weq_z_iso A B)). Defined. Definition category_HSET : category := make_category HSET has_homsets_HSET. Lemma is_univalent_HSET : is_univalent category_HSET. Proof. intros a b. apply (is_weq_precat_paths_to_iso_hset a b). Defined. Definition HSET_univalent_category : univalent_category := _ ,, is_univalent_HSET.
\name{generate_ER} \alias{generate_ER} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Simulating networks from the Erdos-Renyi model} \description{ This function generates networks from the \enc{Erdős–Rényi}{Erdos-Renyi} model. In this model, the preferential attachment function is a constant function, i.e. \eqn{A_k = 1}, and node fitnesses are all equal to \eqn{1}. It is a wrapper of the more powerful function \code{\link{generate_net}}. } \usage{ generate_ER(N = 1000, num_seed = 2 , multiple_node = 1 , m = 1) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{N}{ Integer. Total number of nodes in the network (including the nodes in the seed graph). Default value is \code{1000}. } \item{num_seed}{ Integer. The number of nodes of the seed graph (the initial state of the network). The seed graph is a cycle. Default value is \code{2}. } \item{multiple_node}{ Positive integer. The number of new nodes at each time-step. Default value is \code{1}. } \item{m}{ Positive integer. The number of edges of each new node. Default value is \code{1}. } } \value{ The output is a \code{PAFit_net} object, which is a List contains the following four fields: \item{graph}{a three-column matrix, where each row contains information of one edge, in the form of \code{(from_id, to_id, time_stamp)}. \code{from_id} is the id of the source, \code{to_id} is the id of the destination.} \item{type}{a string indicates whether the network is \code{"directed"} or \code{"undirected"}.} \item{PA}{a numeric vector contains the true PA function.} \item{fitness}{fitness values of nodes in the network. The fitnesses are all equal to \eqn{1}.} } \author{ Thong Pham \email{[email protected]} } \references{ 1. \enc{Erdös}{Erdos} P. & \enc{Rényi}{Renyi} A.. On random graphs. Publicationes Mathematicae Debrecen. 1959;6:290–297 (\url{http://snap.stanford.edu/class/cs224w-readings/erdos59random.pdf}). } \seealso{ For subsequent estimation procedures, see \code{\link{get_statistics}}. For other functions to generate networks, see \code{\link{generate_net}}, \code{\link{generate_BA}}, \code{\link{generate_BB}} and \code{\link{generate_fit_only}}. } \examples{ library("PAFit") # generate a network from the ER model with N = 1000 nodes net <- generate_ER(N = 1000) str(net) plot(net) } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \concept{ER model}
-v bmu.v +ncaccess+rwc bmutb.v
# Semi-Monocoque Theory: corrective solutions ```python from pint import UnitRegistry import sympy import networkx as nx import numpy as np import matplotlib.pyplot as plt import sys %matplotlib inline from IPython.display import display ``` Import **Section** class, which contains all calculations ```python from Section import Section ``` Initialization of **sympy** symbolic tool and **pint** for dimension analysis (not really implemented rn as not directly compatible with sympy) ```python ureg = UnitRegistry() sympy.init_printing() ``` Define **sympy** parameters used for geometric description of sections ```python A, A0, t, t0, a, b, h, L, E, G = sympy.symbols('A A_0 t t_0 a b h L E G', positive=True) ``` We also define numerical values for each **symbol** in order to plot scaled section and perform calculations ```python values = [(A, 150 * ureg.millimeter**2),(A0, 250 * ureg.millimeter**2),(a, 80 * ureg.millimeter), \ (b, 20 * ureg.millimeter),(h, 35 * ureg.millimeter),(L, 2000 * ureg.millimeter), \ (t, 0.8 *ureg.millimeter),(E, 72e3 * ureg.MPa), (G, 27e3 * ureg.MPa)] datav = [(v[0],v[1].magnitude) for v in values] ``` # Second example: Rectangular section with 6 nodes and 2 loops Define graph describing the section: 1) **stringers** are **nodes** with parameters: - **x** coordinate - **y** coordinate - **Area** 2) **panels** are **oriented edges** with parameters: - **thickness** - **lenght** which is automatically calculated ```python stringers = {1:[(2*a,h),A], 2:[(a,h),A], 3:[(sympy.Integer(0),h),A], 4:[(sympy.Integer(0),sympy.Integer(0)),A], 5:[(a,sympy.Integer(0)),A], 6:[(2*a,sympy.Integer(0)),A]} panels = {(1,2):t, (2,3):t, (3,4):t, (4,5):t, (5,6):t, (6,1):t, (5,2):t} ``` Define section and perform first calculations ```python S1 = Section(stringers, panels) ``` ```python S1.cycles ``` ## Plot of **S1** section in original reference frame Define a dictionary of coordinates used by **Networkx** to plot section as a Directed graph. Note that arrows are actually just thicker stubs ```python start_pos={ii: [float(S1.g.node[ii]['ip'][i].subs(datav)) for i in range(2)] for ii in S1.g.nodes() } ``` ```python plt.figure(figsize=(12,8),dpi=300) nx.draw(S1.g,with_labels=True, arrows= True, pos=start_pos) plt.arrow(0,0,20,0) plt.arrow(0,0,0,20) #plt.text(0,0, 'CG', fontsize=24) plt.axis('equal') plt.title("Section in starting reference Frame",fontsize=16); ``` ## Plot of **S1** section in inertial reference Frame Section is plotted wrt **center of gravity** and rotated (if necessary) so that *x* and *y* are principal axes. **Center of Gravity** and **Shear Center** are drawn ```python positions={ii: [float(S1.g.node[ii]['pos'][i].subs(datav)) for i in range(2)] for ii in S1.g.nodes() } ``` ```python x_ct, y_ct = S1.ct.subs(datav) plt.figure(figsize=(12,8),dpi=300) nx.draw(S1.g,with_labels=True, pos=positions) plt.plot([0],[0],'o',ms=12,label='CG') plt.plot([x_ct],[y_ct],'^',ms=12, label='SC') #plt.text(0,0, 'CG', fontsize=24) #plt.text(x_ct,y_ct, 'SC', fontsize=24) plt.legend(loc='lower right', shadow=True) plt.axis('equal') plt.title("Section in pricipal reference Frame",fontsize=16); ``` Expression of **inertial properties** in *principal reference frame* ```python sympy.simplify(S1.Ixx), sympy.simplify(S1.Iyy), sympy.simplify(S1.Ixy), sympy.simplify(S1.θ) ``` ```python S1.symmetry ``` [{'edges': [((1, 2), (2, 3)), ((3, 4), (6, 1)), ((4, 5), (5, 6)), (5, 2)], 'nodes': [(1, 3), (2, 2), (4, 6), (5, 5)]}, {'edges': [((1, 2), (5, 6)), ((2, 3), (4, 5)), (3, 4), (5, 2), (6, 1)], 'nodes': [(1, 6), (2, 5), (3, 4)]}] Compute **L** matrix: with 6 nodes we expect 3 **dofs**, two with _symmetric load_ and one with _antisymmetric load_ ```python S1.compute_L() ``` ```python S1.L ``` Compute **H** matrix ```python S1.compute_H() ``` ```python S1.H.subs(datav) ``` Compute $\tilde{K}$ and $\tilde{M}$ ```python S1.compute_KM(A,h,t) ``` ```python S1.Ktilde ``` ```python S1.Mtilde.subs(datav) ``` Compute **eigenvalues** and **eigenvectors**: results are in the form: - eigenvalue - multiplicity - eigenvector ```python sol_data = (S1.Ktilde.inv()*(S1.Mtilde.subs(datav))).eigenvects() sol_data ``` Extract eigenvalues ```python β2 = [sol[0] for sol in sol_data] β2 ``` Extract and normalize eigenvectors ```python X = [sol[2][0]/sol[2][0].norm() for sol in sol_data] X ``` Compute numerical value of $\lambda$ ```python λ = [sympy.N(sympy.sqrt(E*A*h/(G*t)*βi).subs(datav)) for βi in β2] λ ``` ```python ```
chapter \<open>The Framework\<close> theory BasicDefs imports AuxLemmas begin text \<open> As slicing is a program analysis that can be completely based on the information given in the CFG, we want to provide a framework which allows us to formalize and prove properties of slicing regardless of the actual programming language. So the starting point for the formalization is the definition of an abstract CFG, i.e.\ without considering features specific for certain languages. By doing so we ensure that our framework is as generic as possible since all proofs hold for every language whose CFG conforms to this abstract CFG. This abstract CFG can be used as a basis for static intraprocedural slicing as well as for dynamic slicing, if in the dynamic case all method calls are inlined (i.e., abstract CFG paths conform to traces). \<close> section \<open>Basic Definitions\<close> subsection\<open>Edge kinds\<close> datatype 'state edge_kind = Update "'state \<Rightarrow> 'state" ("\<Up>_") | Predicate "'state \<Rightarrow> bool" ("'(_')\<^sub>\<surd>") subsection \<open>Transfer and predicate functions\<close> fun transfer :: "'state edge_kind \<Rightarrow> 'state \<Rightarrow> 'state" where "transfer (\<Up>f) s = f s" | "transfer (P)\<^sub>\<surd> s = s" fun transfers :: "'state edge_kind list \<Rightarrow> 'state \<Rightarrow> 'state" where "transfers [] s = s" | "transfers (e#es) s = transfers es (transfer e s)" fun pred :: "'state edge_kind \<Rightarrow> 'state \<Rightarrow> bool" where "pred (\<Up>f) s = True" | "pred (P)\<^sub>\<surd> s = (P s)" fun preds :: "'state edge_kind list \<Rightarrow> 'state \<Rightarrow> bool" where "preds [] s = True" | "preds (e#es) s = (pred e s \<and> preds es (transfer e s))" lemma transfers_split: "(transfers (ets@ets') s) = (transfers ets' (transfers ets s))" by(induct ets arbitrary:s) auto lemma preds_split: "(preds (ets@ets') s) = (preds ets s \<and> preds ets' (transfers ets s))" by(induct ets arbitrary:s) auto lemma transfers_id_no_influence: "transfers [et \<leftarrow> ets. et \<noteq> \<Up>id] s = transfers ets s" by(induct ets arbitrary:s,auto) lemma preds_True_no_influence: "preds [et \<leftarrow> ets. et \<noteq> (\<lambda>s. True)\<^sub>\<surd>] s = preds ets s" by(induct ets arbitrary:s,auto) end
/- Copyright (c) 2017 Simon Hudon. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Author: Simon Hudon -/ import Mathlib.PrePort import Mathlib.Lean3Lib.init.default import Mathlib.tactic.ext import Mathlib.tactic.lint.default import Mathlib.PostPort universes u v u_1 u_2 u_3 w namespace Mathlib /-! # Functors This module provides additional lemmas, definitions, and instances for `functor`s. ## Main definitions * `const α` is the functor that sends all types to `α`. * `add_const α` is `const α` but for when `α` has an additive structure. * `comp F G` for functors `F` and `G` is the functor composition of `F` and `G`. * `liftp` and `liftr` respectively lift predicates and relations on a type `α` to `F α`. Terms of `F α` are considered to, in some sense, contain values of type `α`. ## Tags functor, applicative -/ theorem functor.map_id {F : Type u → Type v} {α : Type u} [Functor F] [is_lawful_functor F] : Functor.map id = id := funext id_map theorem functor.map_comp_map {F : Type u → Type v} {α : Type u} {β : Type u} {γ : Type u} [Functor F] [is_lawful_functor F] (f : α → β) (g : β → γ) : Functor.map g ∘ Functor.map f = Functor.map (g ∘ f) := sorry theorem functor.ext {F : Type u_1 → Type u_2} {F1 : Functor F} {F2 : Functor F} [is_lawful_functor F] [is_lawful_functor F] (H : ∀ (α β : Type u_1) (f : α → β) (x : F α), f <$> x = f <$> x) : F1 = F2 := sorry /-- Introduce the `id` functor. Incidentally, this is `pure` for `id` as a `monad` and as an `applicative` functor. -/ def id.mk {α : Sort u} : α → id α := id namespace functor /-- `const α` is the constant functor, mapping every type to `α`. When `α` has a monoid structure, `const α` has an `applicative` instance. (If `α` has an additive monoid structure, see `functor.add_const`.) -/ def const (α : Type u_1) (β : Type u_2) := α /-- `const.mk` is the canonical map `α → const α β` (the identity), and it can be used as a pattern to extract this value. -/ def const.mk {α : Type u_1} {β : Type u_2} (x : α) : const α β := x /-- `const.mk'` is `const.mk` but specialized to map `α` to `const α punit`, where `punit` is the terminal object in `Type*`. -/ def const.mk' {α : Type u_1} (x : α) : const α PUnit := x /-- Extract the element of `α` from the `const` functor. -/ def const.run {α : Type u_1} {β : Type u_2} (x : const α β) : α := x namespace const protected theorem ext {α : Type u_1} {β : Type u_2} {x : const α β} {y : const α β} (h : run x = run y) : x = y := h /-- The map operation of the `const γ` functor. -/ protected def map {γ : Type u_1} {α : Type u_2} {β : Type u_3} (f : α → β) (x : const γ β) : const γ α := x protected instance functor {γ : Type u_1} : Functor (const γ) := { map := const.map, mapConst := fun (α β : Type u_2) => const.map ∘ function.const β } protected instance is_lawful_functor {γ : Type u_1} : is_lawful_functor (const γ) := is_lawful_functor.mk (fun (α : Type u_2) (x : const γ α) => Eq.refl (id <$> x)) fun (α β γ_1 : Type u_2) (g : α → β) (h : β → γ_1) (x : const γ α) => Eq.refl ((h ∘ g) <$> x) protected instance inhabited {α : Type u_1} {β : Type u_2} [Inhabited α] : Inhabited (const α β) := { default := Inhabited.default } end const /-- `add_const α` is a synonym for constant functor `const α`, mapping every type to `α`. When `α` has a additive monoid structure, `add_const α` has an `applicative` instance. (If `α` has a multiplicative monoid structure, see `functor.const`.) -/ def add_const (α : Type u_1) (β : Type u_2) := const α /-- `add_const.mk` is the canonical map `α → add_const α β`, which is the identity, where `add_const α β = const α β`. It can be used as a pattern to extract this value. -/ def add_const.mk {α : Type u_1} {β : Type u_2} (x : α) : add_const α β := x /-- Extract the element of `α` from the constant functor. -/ def add_const.run {α : Type u_1} {β : Type u_2} : add_const α β → α := id protected instance add_const.functor {γ : Type u_1} : Functor (add_const γ) := const.functor protected instance add_const.is_lawful_functor {γ : Type u_1} : is_lawful_functor (add_const γ) := const.is_lawful_functor protected instance add_const.inhabited {α : Type u_1} {β : Type u_2} [Inhabited α] : Inhabited (add_const α β) := { default := Inhabited.default } /-- `functor.comp` is a wrapper around `function.comp` for types. It prevents Lean's type class resolution mechanism from trying a `functor (comp F id)` when `functor F` would do. -/ def comp (F : Type u → Type w) (G : Type v → Type u) (α : Type v) := F (G α) /-- Construct a term of `comp F G α` from a term of `F (G α)`, which is the same type. Can be used as a pattern to extract a term of `F (G α)`. -/ def comp.mk {F : Type u → Type w} {G : Type v → Type u} {α : Type v} (x : F (G α)) : comp F G α := x /-- Extract a term of `F (G α)` from a term of `comp F G α`, which is the same type. -/ def comp.run {F : Type u → Type w} {G : Type v → Type u} {α : Type v} (x : comp F G α) : F (G α) := x namespace comp protected theorem ext {F : Type u → Type w} {G : Type v → Type u} {α : Type v} {x : comp F G α} {y : comp F G α} : run x = run y → x = y := id protected instance inhabited {F : Type u → Type w} {G : Type v → Type u} {α : Type v} [Inhabited (F (G α))] : Inhabited (comp F G α) := { default := Inhabited.default } /-- The map operation for the composition `comp F G` of functors `F` and `G`. -/ protected def map {F : Type u → Type w} {G : Type v → Type u} [Functor F] [Functor G] {α : Type v} {β : Type v} (h : α → β) : comp F G α → comp F G β := sorry protected instance functor {F : Type u → Type w} {G : Type v → Type u} [Functor F] [Functor G] : Functor (comp F G) := { map := comp.map, mapConst := fun (α β : Type v) => comp.map ∘ function.const β } theorem map_mk {F : Type u → Type w} {G : Type v → Type u} [Functor F] [Functor G] {α : Type v} {β : Type v} (h : α → β) (x : F (G α)) : h <$> mk x = mk (Functor.map h <$> x) := rfl @[simp] protected theorem run_map {F : Type u → Type w} {G : Type v → Type u} [Functor F] [Functor G] {α : Type v} {β : Type v} (h : α → β) (x : comp F G α) : run (h <$> x) = Functor.map h <$> run x := rfl protected theorem id_map {F : Type u → Type w} {G : Type v → Type u} [Functor F] [Functor G] [is_lawful_functor F] [is_lawful_functor G] {α : Type v} (x : comp F G α) : comp.map id x = x := sorry protected theorem comp_map {F : Type u → Type w} {G : Type v → Type u} [Functor F] [Functor G] [is_lawful_functor F] [is_lawful_functor G] {α : Type v} {β : Type v} {γ : Type v} (g' : α → β) (h : β → γ) (x : comp F G α) : comp.map (h ∘ g') x = comp.map h (comp.map g' x) := sorry protected instance is_lawful_functor {F : Type u → Type w} {G : Type v → Type u} [Functor F] [Functor G] [is_lawful_functor F] [is_lawful_functor G] : is_lawful_functor (comp F G) := is_lawful_functor.mk comp.id_map comp.comp_map theorem functor_comp_id {F : Type u_1 → Type u_2} [AF : Functor F] [is_lawful_functor F] : comp.functor = AF := ext fun (α β : Type u_1) (f : α → β) (x : F α) => rfl theorem functor_id_comp {F : Type u_1 → Type u_2} [AF : Functor F] [is_lawful_functor F] : comp.functor = AF := ext fun (α β : Type u_1) (f : α → β) (x : F α) => rfl end comp namespace comp /-- The `<*>` operation for the composition of applicative functors. -/ protected def seq {F : Type u → Type w} {G : Type v → Type u} [Applicative F] [Applicative G] {α : Type v} {β : Type v} : comp F G (α → β) → comp F G α → comp F G β := sorry protected instance has_pure {F : Type u → Type w} {G : Type v → Type u} [Applicative F] [Applicative G] : Pure (comp F G) := { pure := fun (_x : Type v) (x : _x) => mk (pure (pure x)) } protected instance has_seq {F : Type u → Type w} {G : Type v → Type u} [Applicative F] [Applicative G] : Seq (comp F G) := { seq := fun (_x _x_1 : Type v) (f : comp F G (_x → _x_1)) (x : comp F G _x) => comp.seq f x } @[simp] protected theorem run_pure {F : Type u → Type w} {G : Type v → Type u} [Applicative F] [Applicative G] {α : Type v} (x : α) : run (pure x) = pure (pure x) := idRhs (run (pure x) = run (pure x)) rfl @[simp] protected theorem run_seq {F : Type u → Type w} {G : Type v → Type u} [Applicative F] [Applicative G] {α : Type v} {β : Type v} (f : comp F G (α → β)) (x : comp F G α) : run (f <*> x) = Seq.seq <$> run f <*> run x := rfl protected instance applicative {F : Type u → Type w} {G : Type v → Type u} [Applicative F] [Applicative G] : Applicative (comp F G) := { toFunctor := { map := comp.map, mapConst := fun (α β : Type v) => comp.map ∘ function.const β }, toPure := { pure := pure }, toSeq := { seq := comp.seq }, toSeqLeft := { seqLeft := fun (α β : Type v) (a : comp F G α) (b : comp F G β) => comp.seq (comp.map (function.const β) a) b }, toSeqRight := { seqRight := fun (α β : Type v) (a : comp F G α) (b : comp F G β) => comp.seq (comp.map (function.const α id) a) b } } end comp /-- If we consider `x : F α` to, in some sense, contain values of type `α`, predicate `liftp p x` holds iff every value contained by `x` satisfies `p`. -/ def liftp {F : Type u → Type u} [Functor F] {α : Type u} (p : α → Prop) (x : F α) := ∃ (u : F (Subtype p)), subtype.val <$> u = x /-- If we consider `x : F α` to, in some sense, contain values of type `α`, then `liftr r x y` relates `x` and `y` iff (1) `x` and `y` have the same shape and (2) we can pair values `a` from `x` and `b` from `y` so that `r a b` holds. -/ def liftr {F : Type u → Type u} [Functor F] {α : Type u} (r : α → α → Prop) (x : F α) (y : F α) := ∃ (u : F (Subtype fun (p : α × α) => r (prod.fst p) (prod.snd p))), (fun (t : Subtype fun (p : α × α) => r (prod.fst p) (prod.snd p)) => prod.fst (subtype.val t)) <$> u = x ∧ (fun (t : Subtype fun (p : α × α) => r (prod.fst p) (prod.snd p)) => prod.snd (subtype.val t)) <$> u = y /-- If we consider `x : F α` to, in some sense, contain values of type `α`, then `supp x` is the set of values of type `α` that `x` contains. -/ def supp {F : Type u → Type u} [Functor F] {α : Type u} (x : F α) : set α := set_of fun (y : α) => ∀ {p : α → Prop}, liftp p x → p y theorem of_mem_supp {F : Type u → Type u} [Functor F] {α : Type u} {x : F α} {p : α → Prop} (h : liftp p x) (y : α) (H : y ∈ supp x) : p y := hy h end functor namespace ulift protected instance functor : Functor ulift := { map := fun (α β : Type u_1) (f : α → β) => up ∘ f ∘ down, mapConst := fun (α β : Type u_1) => (fun (f : β → α) => up ∘ f ∘ down) ∘ function.const β } end Mathlib
#' R package: DailyHRB #' #' Some useful functions I use daily #' #' @docType package #' @name DailyHRB NULL
import numpy as np import cmath class Complejo: def __init__(self,a,b): self.imaginario = b self.real = a self.norma = (a**2 + b**2)**(1/2) def conjugado(self): self.imaginario = -self.imaginario return self def calcula_norma(self): return self.norma def pow(self,p): z = np.power((self.real + 1j*self.imaginario),p) return Complejo(z.real,z.imag)
/- Copyright (c) 2022 Markus Himmel. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Markus Himmel -/ import category_theory.limits.preserves.finite /-! # Bundled exact functors > THIS FILE IS SYNCHRONIZED WITH MATHLIB4. > Any changes to this file require a corresponding PR to mathlib4. We say that a functor `F` is left exact if it preserves finite limits, it is right exact if it preserves finite colimits, and it is exact if it is both left exact and right exact. In this file, we define the categories of bundled left exact, right exact and exact functors. -/ universes v₁ v₂ u₁ u₂ open category_theory.limits namespace category_theory variables {C : Type u₁} [category.{v₁} C] {D : Type u₂} [category.{v₂} D] section variables (C) (D) /-- Bundled left-exact functors. -/ @[derive category, nolint has_nonempty_instance] def LeftExactFunctor := full_subcategory (λ F : C ⥤ D, nonempty (preserves_finite_limits F)) infixr ` ⥤ₗ `:26 := LeftExactFunctor /-- A left exact functor is in particular a functor. -/ @[derive full, derive faithful] def LeftExactFunctor.forget : (C ⥤ₗ D) ⥤ (C ⥤ D) := full_subcategory_inclusion _ /-- Bundled right-exact functors. -/ @[derive category, nolint has_nonempty_instance] def RightExactFunctor := full_subcategory (λ F : C ⥤ D, nonempty (preserves_finite_colimits F)) infixr ` ⥤ᵣ `:26 := RightExactFunctor /-- A right exact functor is in particular a functor. -/ @[derive full, derive faithful] def RightExactFunctor.forget : (C ⥤ᵣ D) ⥤ (C ⥤ D) := full_subcategory_inclusion _ /-- Bundled exact functors. -/ @[derive category, nolint has_nonempty_instance] def ExactFunctor := full_subcategory (λ F : C ⥤ D, nonempty (preserves_finite_limits F) ∧ nonempty (preserves_finite_colimits F)) infixr ` ⥤ₑ `:26 := ExactFunctor /-- An exact functor is in particular a functor. -/ @[derive full, derive faithful] def ExactFunctor.forget : (C ⥤ₑ D) ⥤ (C ⥤ D) := full_subcategory_inclusion _ /-- Turn an exact functor into a left exact functor. -/ @[derive full, derive faithful] def LeftExactFunctor.of_exact : (C ⥤ₑ D) ⥤ (C ⥤ₗ D) := full_subcategory.map (λ X, and.left) /-- Turn an exact functor into a left exact functor. -/ @[derive full, derive faithful] def RightExactFunctor.of_exact : (C ⥤ₑ D) ⥤ (C ⥤ᵣ D) := full_subcategory.map (λ X, and.right) variables {C D} @[simp] lemma LeftExactFunctor.of_exact_obj (F : C ⥤ₑ D) : (LeftExactFunctor.of_exact C D).obj F = ⟨F.1, F.2.1⟩ := rfl @[simp] lemma RightExactFunctor.of_exact_obj (F : C ⥤ₑ D) : (RightExactFunctor.of_exact C D).obj F = ⟨F.1, F.2.2⟩ := rfl @[simp] lemma LeftExactFunctor.of_exact_map {F G : C ⥤ₑ D} (α : F ⟶ G) : (LeftExactFunctor.of_exact C D).map α = α := rfl @[simp] lemma RightExactFunctor.of_exact_map {F G : C ⥤ₑ D} (α : F ⟶ G) : (RightExactFunctor.of_exact C D).map α = α := rfl @[simp] @[simp] lemma LeftExactFunctor.forget_map {F G : C ⥤ₗ D} (α : F ⟶ G) : (LeftExactFunctor.forget C D).map α = α := rfl @[simp] lemma RightExactFunctor.forget_map {F G : C ⥤ᵣ D} (α : F ⟶ G) : (RightExactFunctor.forget C D).map α = α := rfl @[simp] lemma ExactFunctor.forget_map {F G : C ⥤ₑ D} (α : F ⟶ G) : (ExactFunctor.forget C D).map α = α := rfl /-- Turn a left exact functor into an object of the category `LeftExactFunctor C D`. -/ def LeftExactFunctor.of (F : C ⥤ D) [preserves_finite_limits F] : C ⥤ₗ D := ⟨F, ⟨infer_instance⟩⟩ /-- Turn a right exact functor into an object of the category `RightExactFunctor C D`. -/ def RightExactFunctor.of (F : C ⥤ D) [preserves_finite_colimits F] : C ⥤ᵣ D := ⟨F, ⟨infer_instance⟩⟩ /-- Turn an exact functor into an object of the category `ExactFunctor C D`. -/ def ExactFunctor.of (F : C ⥤ D) [preserves_finite_limits F] [preserves_finite_colimits F] : C ⥤ₑ D := ⟨F, ⟨⟨infer_instance⟩, ⟨infer_instance⟩⟩⟩ @[simp] lemma LeftExactFunctor.of_fst (F : C ⥤ D) [preserves_finite_limits F] : (LeftExactFunctor.of F).obj = F := rfl @[simp] lemma RightExactFunctor.of_fst (F : C ⥤ D) [preserves_finite_colimits F] : (RightExactFunctor.of F).obj = F := rfl @[simp] lemma ExactFunctor.of_fst (F : C ⥤ D) [preserves_finite_limits F] [preserves_finite_colimits F] : (ExactFunctor.of F).obj = F := rfl lemma LeftExactFunctor.forget_obj_of (F : C ⥤ D) [preserves_finite_limits F] : (LeftExactFunctor.forget C D).obj (LeftExactFunctor.of F) = F := rfl lemma RightExactFunctor.forget_obj_of (F : C ⥤ D) [preserves_finite_colimits F] : (RightExactFunctor.forget C D).obj (RightExactFunctor.of F) = F := rfl lemma ExactFunctor.forget_obj_of (F : C ⥤ D) [preserves_finite_limits F] [preserves_finite_colimits F] : (ExactFunctor.forget C D).obj (ExactFunctor.of F) = F := rfl noncomputable instance (F : C ⥤ₗ D) : preserves_finite_limits F.obj := F.property.some noncomputable instance (F : C ⥤ᵣ D) : preserves_finite_colimits F.obj := F.property.some noncomputable instance (F : C ⥤ₑ D) : preserves_finite_limits F.obj := F.property.1.some noncomputable instance (F : C ⥤ₑ D) : preserves_finite_colimits F.obj := F.property.2.some end end category_theory
(* Author: Tobias Nipkow Copyright 1998 TUM *) section "From regular expressions directly to nondeterministic automata" theory RegExp2NA imports "Regular-Sets.Regular_Exp" NA begin type_synonym 'a bitsNA = "('a,bool list)na" definition "atom" :: "'a \<Rightarrow> 'a bitsNA" where "atom a = ([True], \<lambda>b s. if s=[True] \<and> b=a then {[False]} else {}, \<lambda>s. s=[False])" definition or :: "'a bitsNA \<Rightarrow> 'a bitsNA \<Rightarrow> 'a bitsNA" where "or = (\<lambda>(ql,dl,fl)(qr,dr,fr). ([], \<lambda>a s. case s of [] \<Rightarrow> (True ## dl a ql) \<union> (False ## dr a qr) | left#s \<Rightarrow> if left then True ## dl a s else False ## dr a s, \<lambda>s. case s of [] \<Rightarrow> (fl ql | fr qr) | left#s \<Rightarrow> if left then fl s else fr s))" definition conc :: "'a bitsNA \<Rightarrow> 'a bitsNA \<Rightarrow> 'a bitsNA" where "conc = (\<lambda>(ql,dl,fl)(qr,dr,fr). (True#ql, \<lambda>a s. case s of [] \<Rightarrow> {} | left#s \<Rightarrow> if left then (True ## dl a s) \<union> (if fl s then False ## dr a qr else {}) else False ## dr a s, \<lambda>s. case s of [] \<Rightarrow> False | left#s \<Rightarrow> left \<and> fl s \<and> fr qr | \<not>left \<and> fr s))" definition epsilon :: "'a bitsNA" where "epsilon = ([],\<lambda>a s. {}, \<lambda>s. s=[])" definition plus :: "'a bitsNA \<Rightarrow> 'a bitsNA" where "plus = (\<lambda>(q,d,f). (q, \<lambda>a s. d a s \<union> (if f s then d a q else {}), f))" definition star :: "'a bitsNA \<Rightarrow> 'a bitsNA" where "star A = or epsilon (plus A)" primrec rexp2na :: "'a rexp \<Rightarrow> 'a bitsNA" where "rexp2na Zero = ([], \<lambda>a s. {}, \<lambda>s. False)" | "rexp2na One = epsilon" | "rexp2na(Atom a) = atom a" | "rexp2na(Plus r s) = or (rexp2na r) (rexp2na s)" | "rexp2na(Times r s) = conc (rexp2na r) (rexp2na s)" | "rexp2na(Star r) = star (rexp2na r)" declare split_paired_all[simp] (******************************************************) (* atom *) (******************************************************) lemma fin_atom: "(fin (atom a) q) = (q = [False])" by(simp add:atom_def) lemma start_atom: "start (atom a) = [True]" by(simp add:atom_def) lemma in_step_atom_Some[simp]: "(p,q) : step (atom a) b = (p=[True] \<and> q=[False] \<and> b=a)" by (simp add: atom_def step_def) lemma False_False_in_steps_atom: "([False],[False]) : steps (atom a) w = (w = [])" apply (induct "w") apply simp apply (simp add: relcomp_unfold) done lemma start_fin_in_steps_atom: "(start (atom a), [False]) : steps (atom a) w = (w = [a])" apply (induct "w") apply (simp add: start_atom) apply (simp add: False_False_in_steps_atom relcomp_unfold start_atom) done lemma accepts_atom: "accepts (atom a) w = (w = [a])" by (simp add: accepts_conv_steps start_fin_in_steps_atom fin_atom) (******************************************************) (* or *) (******************************************************) (***** lift True/False over fin *****) lemma fin_or_True[iff]: "\<And>L R. fin (or L R) (True#p) = fin L p" by(simp add:or_def) lemma fin_or_False[iff]: "\<And>L R. fin (or L R) (False#p) = fin R p" by(simp add:or_def) (***** lift True/False over step *****) (***** lift True/False over steps *****) lemma lift_True_over_steps_or[iff]: "\<And>p. (True#p,q)\<in>steps (or L R) w = (\<exists>r. q = True # r \<and> (p,r) \<in> steps L w)" apply (induct "w") apply force apply force done lemma lift_False_over_steps_or[iff]: "\<And>p. (False#p,q)\<in>steps (or L R) w = (\<exists>r. q = False#r \<and> (p,r)\<in>steps R w)" apply (induct "w") apply force apply force done (** From the start **) lemma start_step_or[iff]: "\<And>L R. (start(or L R),q) : step(or L R) a = (\<exists>p. (q = True#p \<and> (start L,p) : step L a) | (q = False#p \<and> (start R,p) : step R a))" apply (simp add:or_def step_def) apply blast done lemma steps_or: "(start(or L R), q) : steps (or L R) w = ( (w = [] \<and> q = start(or L R)) | (w \<noteq> [] \<and> (\<exists>p. q = True # p \<and> (start L,p) : steps L w | q = False # p \<and> (start R,p) : steps R w)))" apply (case_tac "w") apply (simp) apply blast apply (simp) apply blast done lemma fin_start_or[iff]: "\<And>L R. fin (or L R) (start(or L R)) = (fin L (start L) | fin R (start R))" by (simp add:or_def) lemma accepts_or[iff]: "accepts (or L R) w = (accepts L w | accepts R w)" apply (simp add: accepts_conv_steps steps_or) (* get rid of case_tac: *) apply (case_tac "w = []") apply auto done (******************************************************) (* conc *) (******************************************************) (** True/False in fin **) lemma fin_conc_True[iff]: "\<And>L R. fin (conc L R) (True#p) = (fin L p \<and> fin R (start R))" by(simp add:conc_def) lemma fin_conc_False[iff]: "\<And>L R. fin (conc L R) (False#p) = fin R p" by(simp add:conc_def) (** True/False in step **) lemma True_step_conc[iff]: "\<And>L R. (True#p,q) : step (conc L R) a = ((\<exists>r. q=True#r \<and> (p,r): step L a) | (fin L p \<and> (\<exists>r. q=False#r \<and> (start R,r) : step R a)))" apply (simp add:conc_def step_def) apply blast done lemma False_step_conc[iff]: "\<And>L R. (False#p,q) : step (conc L R) a = (\<exists>r. q = False#r \<and> (p,r) : step R a)" apply (simp add:conc_def step_def) apply blast done (** False in steps **) lemma False_steps_conc[iff]: "\<And>p. (False#p,q): steps (conc L R) w = (\<exists>r. q=False#r \<and> (p,r): steps R w)" apply (induct "w") apply fastforce apply force done (** True in steps **) lemma True_True_steps_concI: "\<And>L R p. (p,q) : steps L w \<Longrightarrow> (True#p,True#q) : steps (conc L R) w" apply (induct "w") apply simp apply simp apply fast done lemma True_False_step_conc[iff]: "\<And>L R. (True#p,False#q) : step (conc L R) a = (fin L p \<and> (start R,q) : step R a)" by simp lemma True_steps_concD[rule_format]: "\<forall>p. (True#p,q) : steps (conc L R) w \<longrightarrow> ((\<exists>r. (p,r) : steps L w \<and> q = True#r) \<or> (\<exists>u a v. w = u@a#v \<and> (\<exists>r. (p,r) : steps L u \<and> fin L r \<and> (\<exists>s. (start R,s) : step R a \<and> (\<exists>t. (s,t) : steps R v \<and> q = False#t)))))" apply (induct "w") apply simp apply simp apply (clarify del:disjCI) apply (erule disjE) apply (clarify del:disjCI) apply (erule allE, erule impE, assumption) apply (erule disjE) apply blast apply (rule disjI2) apply (clarify) apply simp apply (rule_tac x = "a#u" in exI) apply simp apply blast apply (rule disjI2) apply (clarify) apply simp apply (rule_tac x = "[]" in exI) apply simp apply blast done lemma True_steps_conc: "(True#p,q) : steps (conc L R) w = ((\<exists>r. (p,r) : steps L w \<and> q = True#r) \<or> (\<exists>u a v. w = u@a#v \<and> (\<exists>r. (p,r) : steps L u \<and> fin L r \<and> (\<exists>s. (start R,s) : step R a \<and> (\<exists>t. (s,t) : steps R v \<and> q = False#t)))))" by(force dest!: True_steps_concD intro!: True_True_steps_concI) (** starting from the start **) lemma start_conc: "\<And>L R. start(conc L R) = True#start L" by (simp add:conc_def) lemma final_conc: "\<And>L R. fin(conc L R) p = ((fin R (start R) \<and> (\<exists>s. p = True#s \<and> fin L s)) \<or> (\<exists>s. p = False#s \<and> fin R s))" apply (simp add:conc_def split: list.split) apply blast done lemma accepts_conc: "accepts (conc L R) w = (\<exists>u v. w = u@v \<and> accepts L u \<and> accepts R v)" apply (simp add: accepts_conv_steps True_steps_conc final_conc start_conc) apply (rule iffI) apply (clarify) apply (erule disjE) apply (clarify) apply (erule disjE) apply (rule_tac x = "w" in exI) apply simp apply blast apply blast apply (erule disjE) apply blast apply (clarify) apply (rule_tac x = "u" in exI) apply simp apply blast apply (clarify) apply (case_tac "v") apply simp apply blast apply simp apply blast done (******************************************************) (* epsilon *) (******************************************************) lemma step_epsilon[simp]: "step epsilon a = {}" by(simp add:epsilon_def step_def) lemma steps_epsilon: "((p,q) : steps epsilon w) = (w=[] \<and> p=q)" by (induct "w") auto lemma accepts_epsilon[iff]: "accepts epsilon w = (w = [])" apply (simp add: steps_epsilon accepts_conv_steps) apply (simp add: epsilon_def) done (******************************************************) (* plus *) (******************************************************) lemma start_plus[simp]: "\<And>A. start (plus A) = start A" by(simp add:plus_def) lemma fin_plus[iff]: "\<And>A. fin (plus A) = fin A" by(simp add:plus_def) lemma step_plusI: "\<And>A. (p,q) : step A a \<Longrightarrow> (p,q) : step (plus A) a" by(simp add:plus_def step_def) lemma steps_plusI: "\<And>p. (p,q) : steps A w \<Longrightarrow> (p,q) \<in> steps (plus A) w" apply (induct "w") apply simp apply simp apply (blast intro: step_plusI) done lemma step_plus_conv[iff]: "\<And>A. (p,r): step (plus A) a = ( (p,r): step A a | fin A p \<and> (start A,r) : step A a )" by(simp add:plus_def step_def) lemma fin_steps_plusI: "[| (start A,q) : steps A u; u \<noteq> []; fin A p |] ==> (p,q) : steps (plus A) u" apply (case_tac "u") apply blast apply simp apply (blast intro: steps_plusI) done (* reverse list induction! Complicates matters for conc? *) lemma start_steps_plusD[rule_format]: "\<forall>r. (start A,r) \<in> steps (plus A) w \<longrightarrow> (\<exists>us v. w = concat us @ v \<and> (\<forall>u\<in>set us. accepts A u) \<and> (start A,r) \<in> steps A v)" apply (induct w rule: rev_induct) apply simp apply (rule_tac x = "[]" in exI) apply simp apply simp apply (clarify) apply (erule allE, erule impE, assumption) apply (clarify) apply (erule disjE) apply (rule_tac x = "us" in exI) apply (simp) apply blast apply (rule_tac x = "us@[v]" in exI) apply (simp add: accepts_conv_steps) apply blast done lemma steps_star_cycle[rule_format]: "us \<noteq> [] \<longrightarrow> (\<forall>u \<in> set us. accepts A u) \<longrightarrow> accepts (plus A) (concat us)" apply (simp add: accepts_conv_steps) apply (induct us rule: rev_induct) apply simp apply (rename_tac u us) apply simp apply (clarify) apply (case_tac "us = []") apply (simp) apply (blast intro: steps_plusI fin_steps_plusI) apply (clarify) apply (case_tac "u = []") apply (simp) apply (blast intro: steps_plusI fin_steps_plusI) apply (blast intro: steps_plusI fin_steps_plusI) done lemma accepts_plus[iff]: "accepts (plus A) w = (\<exists>us. us \<noteq> [] \<and> w = concat us \<and> (\<forall>u \<in> set us. accepts A u))" apply (rule iffI) apply (simp add: accepts_conv_steps) apply (clarify) apply (drule start_steps_plusD) apply (clarify) apply (rule_tac x = "us@[v]" in exI) apply (simp add: accepts_conv_steps) apply blast apply (blast intro: steps_star_cycle) done (******************************************************) (* star *) (******************************************************) lemma accepts_star: "accepts (star A) w = (\<exists>us. (\<forall>u \<in> set us. accepts A u) \<and> w = concat us)" apply(unfold star_def) apply (rule iffI) apply (clarify) apply (erule disjE) apply (rule_tac x = "[]" in exI) apply simp apply blast apply force done (***** Correctness of r2n *****) lemma accepts_rexp2na: "\<And>w. accepts (rexp2na r) w = (w : lang r)" apply (induct "r") apply (simp add: accepts_conv_steps) apply simp apply (simp add: accepts_atom) apply (simp) apply (simp add: accepts_conc Regular_Set.conc_def) apply (simp add: accepts_star in_star_iff_concat subset_iff Ball_def) done end
A lookout aboard Weehawken spotted Atlanta at 04 : 10 on the morning of 17 June . When the latter ship closed to within about 1 @.@ 5 miles ( 2 @.@ 4 km ) of the two Union ships , she fired one round from her bow gun that passed over Weehawken and landed near Nahant . Shortly afterward , Atlanta ran aground on a sandbar ; she was briefly able to free herself , but the pressure of the tide pushed her back onto the sandbar . This time Webb was unable to get off and the monitors closed the range . When Weehawken , the leading ship , closed to within 200 – 300 yards ( 180 – 270 m ) she opened fire with both of her guns . The 11 @-@ inch ( 279 mm ) shell missed , but the 15 @-@ inch ( 381 mm ) shell struck the ironclad above the port middle gun port , penetrated her armor and broke the wooden backing behind it , spraying splinters and fragments that disabled the entire gun crew and half the crew of the bow gun , even though it failed to cleanly penetrate through the backing . The next shot from the 11 @-@ inch Dahlgren gun struck the upper hull and started a small leak even though it failed to penetrate the two @-@ inch armor there . The next shell from the 15 @-@ inch Dahlgren glanced off the middle starboard gun shutter as it was being opened , wounding half the gun 's crew with fragments . The final shell was also from the 15 @-@ inch Dahlgren and it struck the top of the pilothouse , breaking the armor there and wounding both pilots in it . By this time , Atlanta had been able to fire only seven shots , none of which hit either Union ship , and was hard aground with high tide not due for another hour and a half . Weehawken and Nahant were able to freely maneuver into positions from which the Atlanta 's narrow gun ports would not allow her to reply and the damage already inflicted by the former ship made further resistance futile . Webb surrendered his ship within 15 minutes of opening fire , before Nahant even had a chance to fire . Of the ironclad 's 21 officers and 124 enlisted men , one man was killed and another sixteen were wounded badly enough to require hospitalization .
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'stockswindow.ui' # # Created by: PyQt5 UI code generator 5.15.4 # # WARNING: Any manual changes made to this file will be lost when pyuic5 is # run again. Do not edit this file unless you know what you are doing. from PyQt5 import QtCore, QtGui, QtWidgets import sys from PyQt5.QtWidgets import ( QApplication, QDialog, QMainWindow, QMessageBox ) from PyQt5.uic import loadUi from PyQt5.QtWidgets import * from PyQt5.QtGui import QPixmap from PyQt5 import QtGui from PyQt5.QtCore import QDate, Qt, QSortFilterProxyModel from datetime import datetime import tkinter as tk import pandas as pd import numpy as np import matplotlib.pyplot as plt import json from urllib.request import urlopen, Request import pyqtgraph as pg import re from pip._vendor import requests from PIL import ImageTk, Image class Window(QMainWindow) : def __init__(self,parent=None, date=20): """Initializer.""" super().__init__(parent) QMainWindow.setObjectName("MainWindow") QMainWindow.resize(1000, 1000) self.centralwidget = QtWidgets.QWidget(QMainWindow) self.centralwidget.setObjectName("centralwidget") self.highlow = PlotWidget(self.centralwidget) self.highlow.setGeometry(QtCore.QRect(40, 240, 281, 231)) self.highlow.setObjectName("highlow") self.volume = PlotWidget(self.centralwidget) self.volume.setGeometry(QtCore.QRect(40, 620, 281, 231)) self.volume.setObjectName("volume") self.stockComboBox = QtWidgets.QComboBox(self.centralwidget) self.stockComboBox.setGeometry(QtCore.QRect(70, 60, 231, 32)) self.stockComboBox.setMouseTracking(True) self.stockComboBox.setTabletTracking(True) self.stockComboBox.setAcceptDrops(True) self.stockComboBox.setEditable(True) self.stockComboBox.setCurrentText("") self.stockComboBox.setInsertPolicy(QtWidgets.QComboBox.NoInsert) self.stockComboBox.setMinimumContentsLength(0) self.stockComboBox.setObjectName("stockComboBox") self.monthComboBox = QtWidgets.QComboBox(self.centralwidget) self.monthComboBox.setGeometry(QtCore.QRect(70, 120, 231, 32)) self.monthComboBox.setEditable(True) self.monthComboBox.setInsertPolicy(QtWidgets.QComboBox.NoInsert) self.monthComboBox.setObjectName("monthComboBox") self.volume_2 = PlotWidget(self.centralwidget) self.volume_2.setGeometry(QtCore.QRect(380, 80, 591, 581)) self.volume_2.setObjectName("volume_2") self.highlowPushButton = QtWidgets.QPushButton(self.centralwidget) self.highlowPushButton.setGeometry(QtCore.QRect(80, 540, 201, 32)) self.highlowPushButton.setObjectName("highlowPushButton") self.volumePush = QtWidgets.QPushButton(self.centralwidget) self.volumePush.setGeometry(QtCore.QRect(80, 900, 201, 32)) self.volumePush.setObjectName("volumePush") self.tableWidget = QtWidgets.QTableWidget(self.centralwidget) self.tableWidget.setGeometry(QtCore.QRect(460, 740, 481, 192)) self.tableWidget.setObjectName("tableWidget") self.tableWidget.setColumnCount(0) self.tableWidget.setRowCount(0) self.tableHeader = QtWidgets.QLabel(self.centralwidget) self.tableHeader.setGeometry(QtCore.QRect(470, 700, 371, 16)) self.tableHeader.setObjectName("tableHeader") QMainWindow.setCentralWidget(self.centralwidget) self.statusbar = QtWidgets.QStatusBar(QMainWindow) self.statusbar.setObjectName("statusbar") QMainWindow.setStatusBar(self.statusbar) self.retranslateUi(QMainWindow) self.stockComboBox.setCurrentIndex(-1) QtCore.QMetaObject.connectSlotsByName(QMainWindow) def retranslateUi(self, MainWindow): _translate = QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow")) self.stockComboBox.setPlaceholderText(_translate("MainWindow", "Enter Stock Name")) self.highlowPushButton.setText(_translate("MainWindow", "Expand")) self.volumePush.setText(_translate("MainWindow", "Expand")) self.tableHeader.setText(_translate("MainWindow", "Statistics for ")) from pyqtgraph import PlotWidget if __name__ == "__main__": app = QApplication(sys.argv) win = Window() win.show() sys.exit(app.exec())
Most gods were depicted in several ways . Hathor could be a cow , cobra , lioness , or a woman with bovine horns or ears . By depicting a given god in different ways , the Egyptians expressed different aspects of its essential nature . The gods are depicted in a finite number of these symbolic forms , so that deities can often be distinguished from one another by their iconographies . These forms include men and women ( anthropomorphism ) , animals ( <unk> ) , and , more rarely , inanimate objects . Combinations of forms , such as gods with human bodies and animal heads , are common . New forms and increasingly complex combinations arose in the course of history . Some gods can only be distinguished from others if they are labeled in writing , as with Isis and Hathor . Because of the close connection between these goddesses , they could both wear the cow @-@ horn headdress that was originally Hathor 's alone .
<a href="https://colab.research.google.com/github/joaochenriques/MCTE_2020_2021/blob/main/Barrages_Turbine_Generator_Maps/Generator_constrained_least_squares_V01.ipynb" target="_parent"></a> ```python import numpy as np import pandas as pd from pylab import * from sympy import Symbol,expand %config InlineBackend.figure_formats = ['svg'] x=Symbol('x') ``` ```python import pathlib if not pathlib.Path("Generator_Data_ElisabetaTedeschi.xlsx").exists(): !curl -O https://raw.githubusercontent.com/joaochenriques/MCTE_2020_2021/main/Barrages_Turbine_Generator_Maps/Generator_Data_ElisabetaTedeschi.xlsx ``` % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 100 8684 100 8684 0 0 39294 0 --:--:-- --:--:-- --:--:-- 39294 ```python def LSQ_polyfit( NP, XL, XC_fun, XC_der ): ######################################################## # LINEAR LEAST SQUARES PROBLEM # ( xl, yl ) = XL # Vandermonde matrix X = xl[:, None] ** np.arange( NP, -1, -1 ) XTX = X.transpose().dot( X ) XTy = X.transpose().dot( yl ) # NO CONSTRAINTS IMPOSED => standard LSQ solution if XC_fun == None and XC_der == None: AA = XTX bb = XTy return np.linalg.solve( AA, bb ) ######################################################## # LAGRANGE MULTIPLIERS FOR CONSTRAINTS # if XC_fun != None: ( xc_fun, yc_fun ) = XC_fun # function constraints xF = np.array( xc_fun ) bF = np.array( yc_fun ) # Vandermonde matrix AF = xF[:, None] ** np.arange( NP, -1, -1 ) if XC_der != None: ( xc_der, yc_der ) = XC_der # derivative constraints xD = np.array( xc_der ) bD = np.array( yc_der ) # Derivative of the Vandermonde matrix AD = np.array( [ (i*xD**(i-1) if i > 0 else 0*xD) \ for i in range(NP,-1,-1) ] ).T if XC_fun != None and XC_der != None: Ac = np.block( [ [AF], [AD] ] ) bc = np.block( [ bF, bD ] ) elif XC_fun != None: Ac = AF bc = bF elif XC_der != None: Ac = AD bc = bD AcT = Ac.transpose() Z0 = np.zeros( ( Ac.shape[0], Ac.shape[0] ) ) ######################################################## # ASSEMBLE AND SOLVE GLOBAL SYSTEM # AA = np.block( [ [ XTX, AcT ], [ Ac, Z0 ] ] ) bb = np.block( [ XTy, bc ] ) alambda = np.linalg.solve( AA, bb ) # final polynomial coeffs without lambdas return alambda[ 0:NP+1 ] ``` ```python dfc = pd.read_excel( 'Generator_Data_ElisabetaTedeschi.xlsx', 'Sheet1' ) Load = np.array( dfc['Load'] ) Eff = np.array( dfc['Eff'] ) Load = np.array( Load[~np.isnan(Load)] ) Eff = np.array( Eff[~np.isnan(Eff)] ) ``` ```python Load_1 = Load[:6] Load_2 = Load[5:] Eff_1 = Eff[:6] Eff_2 = Eff[5:] ``` ```python plt.plot( Load_1, Eff_1, 'o' ) plt.plot( Load_2, Eff_2, 'o' ) poly_C1 = LSQ_polyfit( 4, ( Load_1, Eff_1 ), ([Load_1[-1]],[Eff_1[-1]]), None ) Load_1_plt = np.linspace( Load_1[0], Load_1[-1], 100 ) Eff_1_plt = np.polyval( poly_C1, Load_1_plt ) plot( Load_1_plt, Eff_1_plt, '-' ) # impose derivative at curved common point poly_C1_der = np.poly1d( poly_C1 ).deriv() der1 = poly_C1_der(Load_2[0]) poly_C2 = LSQ_polyfit( 4, ( Load_2, Eff_2 ), ([Load_2[0]],[Eff_2[0]]), ([Load_2[0]],[der1]) ) Load_2_plt = np.linspace( Load_2[0], 1.0, 100 ) Eff_2_plt = np.polyval( poly_C2, Load_2_plt ) plot( Load_2_plt, Eff_2_plt, '-' ) display( expand(np.poly1d( np.round( poly_C1, 5 ) )(x)) ) display( expand(np.poly1d( np.round( poly_C2, 5 ) )(x)) ) Load_2[0], poly_C1, poly_C2 ``` $\displaystyle - 6714.48631 x^{4} + 2591.59775 x^{3} - 380.83406 x^{2} + 27.04232 x + 0.00329$ $\displaystyle - 1.16857 x^{4} + 3.31173 x^{3} - 3.44296 x^{2} + 1.5416 x + 0.71041$ (0.125423729, array([-6.71448631e+03, 2.59159775e+03, -3.80834059e+02, 2.70423225e+01, 3.29394948e-03]), array([-1.16856952, 3.31172525, -3.44296217, 1.5416029 , 0.71040716])) ```python subplots_adjust( top=0.95, bottom=0.16, left=0.10, right=0.95 ) plot( Load_1_plt, Eff_1_plt, 'r-' ) plot( Load_2_plt, Eff_2_plt, 'g-' ) xlabel( '$\\Lambda = \\dfrac{P_\\mathrm{turb}}{P_\\mathrm{rated}}$'); ylabel( '$\\eta_\\mathrm{gen}$') savefig( 'GeneratorEff_plot.pdf' ) savefig( 'GeneratorEff_plot.svg' ) ``` ```python ```
Load LFindLoad. From lfind Require Import LFind. From QuickChick Require Import QuickChick. From adtind Require Import goal33. Derive Show for natural. Derive Arbitrary for natural. Instance Dec_Eq_natural : Dec_Eq natural. Proof. dec_eq. Qed. Lemma conj5eqsynthconj5 : forall (lv0 : natural), (@eq natural (mult lv0 Zero) (mult Zero lv0)). Admitted. QuickChick conj5eqsynthconj5.
-- 2017-05-11, Reported by Ulf -- Implicit absurd matches should be treated in the same way as explicit ones -- when it comes to being used/unused. open import Agda.Builtin.Bool open import Agda.Builtin.Equality data ⊥ : Set where record ⊤ : Set where abort : (A : Set) {_ : ⊥} → A abort A {} test : (x y : ⊥) → abort Bool {x} ≡ abort Bool {y} test x y = refl
function p = bk_kernel_q(k, l, K, L) % This program generates probability for each experts % The defaulty value in BK is uniform, thus, p is straightforward. % In reality, this can be much more complex % % function p = bk_kernel_q(k, l, K, L) % % p: vector of weights for individual expert. % % k, l: specific parameters to tune the probability distribution. % K, L: maximum values of k, l, respectively. % % Example: p = bk_kernel_q(k, l, K, L); %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % This file is part of OLPS: http://OLPS.stevenhoi.org/ % Original authors: Bin LI, Steven C.H. Hoi % Contributors: % Change log: % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% p = 1/(K*L+1); end
[STATEMENT] lemma ind_eq_0_iff: "ind P n = 0 \<longleftrightarrow> n = 0 \<or> \<not>P n" [PROOF STATE] proof (prove) goal (1 subgoal): 1. (ind P n = (0::'a)) = (n = 0 \<or> \<not> P n) [PROOF STEP] by (simp add: ind_def)
(* Title: HOL/Auth/n_germanSymIndex_lemma_on_inv__19.thy Author: Yongjian Li and Kaiqiang Duan, State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences Copyright 2016 State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences *) header{*The n_germanSymIndex Protocol Case Study*} theory n_germanSymIndex_lemma_on_inv__19 imports n_germanSymIndex_base begin section{*All lemmas on causal relation between inv__19 and some rule r*} lemma n_SendInvAckVsinv__19: assumes a1: "(\<exists> i. i\<le>N\<and>r=n_SendInvAck i)" and a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__19 p__Inv2)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain i where a1:"i\<le>N\<and>r=n_SendInvAck i" apply fastforce done from a2 obtain p__Inv2 where a2:"p__Inv2\<le>N\<and>f=inv__19 p__Inv2" apply fastforce done have "(i=p__Inv2)\<or>(i~=p__Inv2)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(i=p__Inv2)" have "?P3 s" apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Field (Para (Ident ''Chan2'') p__Inv2) ''Cmd'')) (Const Inv)) (eqn (IVar (Para (Ident ''ShrSet'') p__Inv2)) (Const false))))" in exI, auto) done then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(i~=p__Inv2)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_RecvInvAckVsinv__19: assumes a1: "(\<exists> i. i\<le>N\<and>r=n_RecvInvAck i)" and a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__19 p__Inv2)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain i where a1:"i\<le>N\<and>r=n_RecvInvAck i" apply fastforce done from a2 obtain p__Inv2 where a2:"p__Inv2\<le>N\<and>f=inv__19 p__Inv2" apply fastforce done have "(i=p__Inv2)\<or>(i~=p__Inv2)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(i=p__Inv2)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(i~=p__Inv2)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_SendGntSVsinv__19: assumes a1: "(\<exists> i. i\<le>N\<and>r=n_SendGntS i)" and a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__19 p__Inv2)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain i where a1:"i\<le>N\<and>r=n_SendGntS i" apply fastforce done from a2 obtain p__Inv2 where a2:"p__Inv2\<le>N\<and>f=inv__19 p__Inv2" apply fastforce done have "(i=p__Inv2)\<or>(i~=p__Inv2)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(i=p__Inv2)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(i~=p__Inv2)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_SendGntEVsinv__19: assumes a1: "(\<exists> i. i\<le>N\<and>r=n_SendGntE N i)" and a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__19 p__Inv2)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain i where a1:"i\<le>N\<and>r=n_SendGntE N i" apply fastforce done from a2 obtain p__Inv2 where a2:"p__Inv2\<le>N\<and>f=inv__19 p__Inv2" apply fastforce done have "(i=p__Inv2)\<or>(i~=p__Inv2)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(i=p__Inv2)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(i~=p__Inv2)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_SendReqE__part__1Vsinv__19: assumes a1: "\<exists> i. i\<le>N\<and>r=n_SendReqE__part__1 i" and a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__19 p__Inv2)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_StoreVsinv__19: assumes a1: "\<exists> i d. i\<le>N\<and>d\<le>N\<and>r=n_Store i d" and a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__19 p__Inv2)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_RecvGntSVsinv__19: assumes a1: "\<exists> i. i\<le>N\<and>r=n_RecvGntS i" and a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__19 p__Inv2)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_RecvReqEVsinv__19: assumes a1: "\<exists> i. i\<le>N\<and>r=n_RecvReqE N i" and a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__19 p__Inv2)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_RecvGntEVsinv__19: assumes a1: "\<exists> i. i\<le>N\<and>r=n_RecvGntE i" and a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__19 p__Inv2)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_SendInv__part__0Vsinv__19: assumes a1: "\<exists> i. i\<le>N\<and>r=n_SendInv__part__0 i" and a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__19 p__Inv2)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_SendReqE__part__0Vsinv__19: assumes a1: "\<exists> i. i\<le>N\<and>r=n_SendReqE__part__0 i" and a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__19 p__Inv2)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_SendInv__part__1Vsinv__19: assumes a1: "\<exists> i. i\<le>N\<and>r=n_SendInv__part__1 i" and a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__19 p__Inv2)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_SendReqSVsinv__19: assumes a1: "\<exists> i. i\<le>N\<and>r=n_SendReqS i" and a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__19 p__Inv2)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_RecvReqSVsinv__19: assumes a1: "\<exists> i. i\<le>N\<and>r=n_RecvReqS N i" and a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__19 p__Inv2)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done end
function colorbar!(fg::FigureGrid; position=:right, vertical=default_isvertical(position), kwargs...) guide_pos = guides_position(fg.figure, position) return colorbar!(guide_pos, fg; vertical, kwargs...) end """ colorbar!(figpos, grid; kwargs...) Compute colorbar for `grid` (which should be the output of [`draw!`](@ref)) and draw it in position `figpos`. Attributes allowed in `kwargs` are the same as `MakieLayout.Colorbar`. """ function colorbar!(figpos, grid; kwargs...) colorbar = compute_colorbar(grid) return isnothing(colorbar) ? nothing : Colorbar(figpos; colorbar..., kwargs...) end compute_colorbar(fg::FigureGrid) = compute_colorbar(fg.grid) function compute_colorbar(grid::Matrix{AxisEntries}) colorscales = filter(!isnothing, [get(ae.continuousscales, :color, nothing) for ae in grid]) isempty(colorscales) && return colorscale = reduce(mergescales, colorscales) label = getlabel(colorscale) limits = colorscale.extrema colormap = current_default_theme().colormap[] # FIXME: handle separate colorbars for entry in entries(grid) colormap = to_value(get(entry, :colormap, colormap)) end return (; label, limits, colormap) end
/- Author: E.W.Ayers © 2019 -/ import .util .table open tactic /-- Member of a telescope.-/ @[derive decidable_eq] meta structure hyp := (n : name) (bi : binder_info) (type : expr) /-- A telescope keeps track of all of the names and types of the free variables in the context. -/ meta def telescope := list hyp meta def telescope.to_pis : expr → telescope → expr := list.foldl (λ e ⟨n,b,y⟩, expr.pi n b y e) meta def telescope.to_lambdas : expr → telescope → expr := list.foldl (λ e ⟨n,b,y⟩, expr.lam n b y e) private meta def telescope.of_pis_aux : telescope → expr → telescope × expr | acc (expr.pi n bi y b) := telescope.of_pis_aux (⟨n,bi,y⟩::acc) b | acc x := ⟨acc,x⟩ meta def telescope.of_pis : expr → (telescope × expr) := telescope.of_pis_aux [] meta def telescope.to_pattern_core : expr → tactic (expr × list expr) |(expr.lam n bi y b) := do un ← mk_fresh_name, let x := expr.local_const un n bi b, let b := expr.instantiate_var b x, (p, xs) ← telescope.to_pattern_core b, return (p, x::xs) |x := pure (x, []) meta def telescope.to_pattern (t : telescope) (e : expr) : tactic pattern := do (e,xs) ← telescope.to_pattern_core $ telescope.to_lambdas e t, mk_pattern [] xs e [] xs @[derive decidable_eq] meta structure rule := -- relation is always `=` for now. (id : name) -- a way of identifying the rule. (ctxt : telescope) -- arguments, local context. (lhs : expr) (rhs : expr) (type : expr) (pf : expr) -- the proof expression of the given rule. (was_flipped : option (name × expr)) -- [HACK] needed to make sure `rule.flip` doesn't keep applying `eq.symm`. namespace rule meta instance has_lt : has_lt rule := ⟨λ r1 r2, (r1.lhs,r1.rhs) < (r2.lhs,r2.rhs)⟩ meta instance has_decidable_lt : decidable_rel ((<) : rule → rule → Prop) := by apply_instance meta instance : has_to_string rule := ⟨λ r, (to_string r.lhs) ++ " = " ++ (to_string r.rhs)⟩ meta instance : has_to_tactic_format rule := ⟨λ r, do plhs ← tactic.pp r.lhs, prhs ← tactic.pp r.rhs, pure $ plhs ++ " = " ++ prhs -- infer_type r.pf >>= whnf >>= tactic_format_expr ⟩ /-- Create a `rule` from a proof term and a name. -/ meta def of_prf (id : name) : expr → tactic rule := λ pf, do t ← infer_type pf >>= whnf, -- trace t, ⟨ctxt,`(%%lhs = %%rhs)⟩ ← pure $ telescope.of_pis t | (do pft ← pp pf, ppt ← pp t, fail $ (to_fmt "rule.of_prf: supplied expression ") ++ pft ++ " : " ++ ppt ++ " is not an equality proof "), pure {id := id, ctxt := ctxt, lhs := lhs, rhs := rhs, pf := pf, type := t, was_flipped := none} /-- Swap the LHS and RHS. -/ meta def flip (r : rule) : tactic rule := match r.was_flipped with |none := do let P := r.ctxt.foldl (λ e ⟨n,b,y⟩, expr.pi n b (to_pexpr y) e) $ ```(%%r.rhs = %%r.lhs), T ← to_expr $ P, pf ← tactic.fabricate (some T) (do tactic.intros, tactic.applyc `eq.symm, tactic.apply_core r.pf {new_goals := new_goals.non_dep_only}, all_goals $ try $ prop_assumption, skip ), pure { ctxt := r.ctxt , lhs := r.rhs , rhs := r.lhs , type := r.type , pf := pf , id := r.id ++ `flipped , was_flipped := some (r.id, r.pf) } |some pf := of_prf pf.1 pf.2 end /-- Sanity check that the LHS, RHS actually correspond to what the proof says.-/ meta def is_wf (r : rule) : tactic bool := do r' ← of_prf r.id $ pf $ r, pure $ r = r' /-- Take a name `n` and try to make a rule from the lemma at the name's declaration. -/ meta def of_name (n : name) : tactic rule := resolve_name n >>= pure ∘ pexpr.mk_explicit >>= to_expr >>= rule.of_prf n /--Returns true when the left hand side is a variable or metavariable. -/ meta def lhs_wildcard : rule → bool := λ r, expr.is_var r.lhs || expr.is_mvar r.lhs /--Returns true when the right hand side is a variable or metavariable. -/ meta def rhs_wildcard : rule → bool := λ r, expr.is_var r.rhs || expr.is_mvar r.rhs -- private meta def specify_aux : nat → expr → expr -- |0 acc := acc -- |(n+1) acc := specify_aux n $ expr.app acc (expr.var n) -- private meta def specify_aux₂ : list (hyp × option expr) → expr → expr -- |[] e := e -- |(⟨⟨n,b,y⟩, some E⟩::rest) e := specify_aux₂ rest $ expr.instantiate_var e E -- |(⟨⟨n,b,y⟩, none⟩ :: rest) e := specify_aux₂ rest $ expr.lam n b y e -- meta def specify : list (option expr) → rule → tactic rule | vals r := do -- when (r.ctxt.length ≠ vals.length) (fail "context assignment list is a different length to the rule's context. "), -- let rctxt := list.zip r.ctxt vals, -- let pf := specify_aux r.ctxt.length r.pf, -- let pf := specify_aux₂ rctxt pf, -- infer_type pf, -- make sure it's valid -- of_prf r.id pf meta def instantiate_mvars (r : rule) : tactic rule := tactic.instantiate_mvars r.pf >>= rule.of_prf r.id meta def get_local_const_dependencies (r : rule) : tactic (list expr) := do pf ← tactic.instantiate_mvars r.pf, let lcs := expr.list_local_consts pf, pure lcs meta def is_local_hypothesis (r : rule) : tactic bool := do lcds ← r.get_local_const_dependencies >>= list.mmap infer_type >>= list.mmap is_prop , -- [HACK] I am assuming that there are no subtypings and so on which is probably a bad assumption. pure $ list.foldl bor ff lcds meta def is_commuter (r : rule) : tactic bool := match r.lhs, r.rhs with | (expr.app (expr.app f1 (expr.var n1)) (expr.var m1)) , (expr.app (expr.app f2 (expr.var n2)) (expr.var m2)) := pure $ f1 = f2 ∧ n1 = m2 ∧ n2 = m1 |_, _ := pure ff end meta def is_def_eq (r₁ r₂ : rule) : tactic bool := tactic.is_success $ (do tactic.is_def_eq r₁.lhs r₂.lhs, tactic.is_def_eq r₁.rhs r₂.rhs ) end rule
## ---- echo = FALSE, warning = FALSE, message = FALSE----------------------------------- knitr::opts_chunk$set( message = FALSE, warning = FALSE, collapse = TRUE, comment = "#>", fig.height = 4, fig.width = 8, fig.align = "center", cache = FALSE ) library(tidyverse) ## ---- eval = F------------------------------------------------------------------------- ## rval <- reactive({ ## ... ## }) ## ---- eval = F------------------------------------------------------------------------- ## rval <- eventReactive(actionbutton, { ## ... ## }) ## ----eval = F-------------------------------------------------------------------------- ## runApp("03_submission/", display.mode = "showcase") ## ---- eval = F------------------------------------------------------------------------- ## box(..., title = NULL, width = 6, height = NULL) ## ---- eval = F------------------------------------------------------------------------- ## body <- dashboardBody( ## fluidRow( ## box(title = "Box with a width of 12 columns", ## width = 12), ## box(title = "Box with a width of 6 columns", ## width = 6, height = 200), ## box(title = "Another box with a width of 6 cols", ## width = 6, height = 200), ... ## ) ## ) ## ---- eval = F------------------------------------------------------------------------- ## sidebar <- dashboardSidebar( ## sidebarMenu( ## menuItem("Dashboard", tabName = "dashboard", ## icon = icon("dashboard")), ## menuItem("Cars", icon = icon("th"), tabName = "cars", ## badgeLabel = "new", badgeColor = "green") ## ) ## ) ## ---- eval = F------------------------------------------------------------------------- ## ## sidebar <- dashboardSidebar( ## sidebarMenu( ## menuItem("Dashboard", tabName = "dashboard", icon = icon("dashboard")), ## menuItem("Cars", icon = icon("th"), tabName = "cars", ## badgeLabel = "new", badgeColor = "green") ## ) ## ) ## ## ## body <- dashboardBody( ## tabItems( ## tabItem(tabName = "dashboard", ## h2("Dashboard tab content"), ## fluidRow( ## # Boxes ## ... ## ) ## ), ## ## tabItem(tabName = "cars", ## h2("What do you want to know about Cars?"), ## plotOutput("myplot"), ## DTOutput("mytable") ## ) ## ) ## )
/- Copyright (c) 2021 Eric Wieser. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Eric Wieser ! This file was ported from Lean 3 source module algebra.module.hom ! leanprover-community/mathlib commit be24ec5de6701447e5df5ca75400ffee19d65659 ! Please do not edit these lines, except to modify the commit id ! if you have ported upstream changes. -/ import Mathbin.Algebra.Module.Pi /-! # Bundled hom instances for module and multiplicative actions > THIS FILE IS SYNCHRONIZED WITH MATHLIB4. > Any changes to this file require a corresponding PR to mathlib4. This file defines instances for module, mul_action and related structures on bundled `_hom` types. These are analogous to the instances in `algebra.module.pi`, but for bundled instead of unbundled functions. -/ variable {R S A B : Type _} namespace AddMonoidHom section variable [Monoid R] [Monoid S] [AddMonoid A] [AddCommMonoid B] variable [DistribMulAction R B] [DistribMulAction S B] instance : DistribMulAction R (A →+ B) where smul r f := { toFun := r • f map_zero' := by simp map_add' := fun x y => by simp [smul_add] } one_smul f := by simp mul_smul r s f := by simp [mul_smul] smul_add r f g := ext fun x => by simp [smul_add] smul_zero r := ext fun x => by simp [smul_zero] /- warning: add_monoid_hom.coe_smul -> AddMonoidHom.coe_smul is a dubious translation: lean 3 declaration is forall {R : Type.{u1}} {A : Type.{u2}} {B : Type.{u3}} [_inst_1 : Monoid.{u1} R] [_inst_3 : AddMonoid.{u2} A] [_inst_4 : AddCommMonoid.{u3} B] [_inst_5 : DistribMulAction.{u1, u3} R B _inst_1 (AddCommMonoid.toAddMonoid.{u3} B _inst_4)] (r : R) (f : AddMonoidHom.{u2, u3} A B (AddMonoid.toAddZeroClass.{u2} A _inst_3) (AddMonoid.toAddZeroClass.{u3} B (AddCommMonoid.toAddMonoid.{u3} B _inst_4))), Eq.{succ (max u2 u3)} (A -> B) (coeFn.{succ (max u3 u2), succ (max u2 u3)} (AddMonoidHom.{u2, u3} A B (AddMonoid.toAddZeroClass.{u2} A _inst_3) (AddMonoid.toAddZeroClass.{u3} B (AddCommMonoid.toAddMonoid.{u3} B _inst_4))) (fun (_x : AddMonoidHom.{u2, u3} A B (AddMonoid.toAddZeroClass.{u2} A _inst_3) (AddMonoid.toAddZeroClass.{u3} B (AddCommMonoid.toAddMonoid.{u3} B _inst_4))) => A -> B) (AddMonoidHom.hasCoeToFun.{u2, u3} A B (AddMonoid.toAddZeroClass.{u2} A _inst_3) (AddMonoid.toAddZeroClass.{u3} B (AddCommMonoid.toAddMonoid.{u3} B _inst_4))) (SMul.smul.{u1, max u3 u2} R (AddMonoidHom.{u2, u3} A B (AddMonoid.toAddZeroClass.{u2} A _inst_3) (AddMonoid.toAddZeroClass.{u3} B (AddCommMonoid.toAddMonoid.{u3} B _inst_4))) (SMulZeroClass.toHasSmul.{u1, max u3 u2} R (AddMonoidHom.{u2, u3} A B (AddMonoid.toAddZeroClass.{u2} A _inst_3) (AddMonoid.toAddZeroClass.{u3} B (AddCommMonoid.toAddMonoid.{u3} B _inst_4))) (AddZeroClass.toHasZero.{max u3 u2} (AddMonoidHom.{u2, u3} A B (AddMonoid.toAddZeroClass.{u2} A _inst_3) (AddMonoid.toAddZeroClass.{u3} B (AddCommMonoid.toAddMonoid.{u3} B _inst_4))) (AddMonoid.toAddZeroClass.{max u3 u2} (AddMonoidHom.{u2, u3} A B (AddMonoid.toAddZeroClass.{u2} A _inst_3) (AddMonoid.toAddZeroClass.{u3} B (AddCommMonoid.toAddMonoid.{u3} B _inst_4))) (AddCommMonoid.toAddMonoid.{max u3 u2} (AddMonoidHom.{u2, u3} A B (AddMonoid.toAddZeroClass.{u2} A _inst_3) (AddMonoid.toAddZeroClass.{u3} B (AddCommMonoid.toAddMonoid.{u3} B _inst_4))) (AddMonoidHom.addCommMonoid.{u2, u3} A B (AddMonoid.toAddZeroClass.{u2} A _inst_3) _inst_4)))) (DistribSMul.toSmulZeroClass.{u1, max u3 u2} R (AddMonoidHom.{u2, u3} A B (AddMonoid.toAddZeroClass.{u2} A _inst_3) (AddMonoid.toAddZeroClass.{u3} B (AddCommMonoid.toAddMonoid.{u3} B _inst_4))) (AddMonoid.toAddZeroClass.{max u3 u2} (AddMonoidHom.{u2, u3} A B (AddMonoid.toAddZeroClass.{u2} A _inst_3) (AddMonoid.toAddZeroClass.{u3} B (AddCommMonoid.toAddMonoid.{u3} B _inst_4))) (AddCommMonoid.toAddMonoid.{max u3 u2} (AddMonoidHom.{u2, u3} A B (AddMonoid.toAddZeroClass.{u2} A _inst_3) (AddMonoid.toAddZeroClass.{u3} B (AddCommMonoid.toAddMonoid.{u3} B _inst_4))) (AddMonoidHom.addCommMonoid.{u2, u3} A B (AddMonoid.toAddZeroClass.{u2} A _inst_3) _inst_4))) (DistribMulAction.toDistribSMul.{u1, max u3 u2} R (AddMonoidHom.{u2, u3} A B (AddMonoid.toAddZeroClass.{u2} A _inst_3) (AddMonoid.toAddZeroClass.{u3} B (AddCommMonoid.toAddMonoid.{u3} B _inst_4))) _inst_1 (AddCommMonoid.toAddMonoid.{max u3 u2} (AddMonoidHom.{u2, u3} A B (AddMonoid.toAddZeroClass.{u2} A _inst_3) (AddMonoid.toAddZeroClass.{u3} B (AddCommMonoid.toAddMonoid.{u3} B _inst_4))) (AddMonoidHom.addCommMonoid.{u2, u3} A B (AddMonoid.toAddZeroClass.{u2} A _inst_3) _inst_4)) (AddMonoidHom.distribMulAction.{u1, u2, u3} R A B _inst_1 _inst_3 _inst_4 _inst_5)))) r f)) (SMul.smul.{u1, max u2 u3} R (A -> B) (Function.hasSMul.{u2, u1, u3} A R B (SMulZeroClass.toHasSmul.{u1, u3} R B (AddZeroClass.toHasZero.{u3} B (AddMonoid.toAddZeroClass.{u3} B (AddCommMonoid.toAddMonoid.{u3} B _inst_4))) (DistribSMul.toSmulZeroClass.{u1, u3} R B (AddMonoid.toAddZeroClass.{u3} B (AddCommMonoid.toAddMonoid.{u3} B _inst_4)) (DistribMulAction.toDistribSMul.{u1, u3} R B _inst_1 (AddCommMonoid.toAddMonoid.{u3} B _inst_4) _inst_5)))) r (coeFn.{max (succ u3) (succ u2), max (succ u2) (succ u3)} (AddMonoidHom.{u2, u3} A B (AddMonoid.toAddZeroClass.{u2} A _inst_3) (AddMonoid.toAddZeroClass.{u3} B (AddCommMonoid.toAddMonoid.{u3} B _inst_4))) (fun (_x : AddMonoidHom.{u2, u3} A B (AddMonoid.toAddZeroClass.{u2} A _inst_3) (AddMonoid.toAddZeroClass.{u3} B (AddCommMonoid.toAddMonoid.{u3} B _inst_4))) => A -> B) (AddMonoidHom.hasCoeToFun.{u2, u3} A B (AddMonoid.toAddZeroClass.{u2} A _inst_3) (AddMonoid.toAddZeroClass.{u3} B (AddCommMonoid.toAddMonoid.{u3} B _inst_4))) f)) but is expected to have type forall {R : Type.{u1}} {A : Type.{u3}} {B : Type.{u2}} [_inst_1 : Monoid.{u1} R] [_inst_3 : AddMonoid.{u3} A] [_inst_4 : AddCommMonoid.{u2} B] [_inst_5 : DistribMulAction.{u1, u2} R B _inst_1 (AddCommMonoid.toAddMonoid.{u2} B _inst_4)] (r : R) (f : AddMonoidHom.{u3, u2} A B (AddMonoid.toAddZeroClass.{u3} A _inst_3) (AddMonoid.toAddZeroClass.{u2} B (AddCommMonoid.toAddMonoid.{u2} B _inst_4))), Eq.{max (succ u3) (succ u2)} (forall (ᾰ : A), (fun ([email protected]._hyg.403 : A) => B) ᾰ) (FunLike.coe.{max (succ u3) (succ u2), succ u3, succ u2} (AddMonoidHom.{u3, u2} A B (AddMonoid.toAddZeroClass.{u3} A _inst_3) (AddMonoid.toAddZeroClass.{u2} B (AddCommMonoid.toAddMonoid.{u2} B _inst_4))) A (fun (_x : A) => (fun ([email protected]._hyg.403 : A) => B) _x) (AddHomClass.toFunLike.{max u3 u2, u3, u2} (AddMonoidHom.{u3, u2} A B (AddMonoid.toAddZeroClass.{u3} A _inst_3) (AddMonoid.toAddZeroClass.{u2} B (AddCommMonoid.toAddMonoid.{u2} B _inst_4))) A B (AddZeroClass.toAdd.{u3} A (AddMonoid.toAddZeroClass.{u3} A _inst_3)) (AddZeroClass.toAdd.{u2} B (AddMonoid.toAddZeroClass.{u2} B (AddCommMonoid.toAddMonoid.{u2} B _inst_4))) (AddMonoidHomClass.toAddHomClass.{max u3 u2, u3, u2} (AddMonoidHom.{u3, u2} A B (AddMonoid.toAddZeroClass.{u3} A _inst_3) (AddMonoid.toAddZeroClass.{u2} B (AddCommMonoid.toAddMonoid.{u2} B _inst_4))) A B (AddMonoid.toAddZeroClass.{u3} A _inst_3) (AddMonoid.toAddZeroClass.{u2} B (AddCommMonoid.toAddMonoid.{u2} B _inst_4)) (AddMonoidHom.addMonoidHomClass.{u3, u2} A B (AddMonoid.toAddZeroClass.{u3} A _inst_3) (AddMonoid.toAddZeroClass.{u2} B (AddCommMonoid.toAddMonoid.{u2} B _inst_4))))) (HSMul.hSMul.{u1, max u3 u2, max u3 u2} R (AddMonoidHom.{u3, u2} A B (AddMonoid.toAddZeroClass.{u3} A _inst_3) (AddMonoid.toAddZeroClass.{u2} B (AddCommMonoid.toAddMonoid.{u2} B _inst_4))) (AddMonoidHom.{u3, u2} A B (AddMonoid.toAddZeroClass.{u3} A _inst_3) (AddMonoid.toAddZeroClass.{u2} B (AddCommMonoid.toAddMonoid.{u2} B _inst_4))) (instHSMul.{u1, max u3 u2} R (AddMonoidHom.{u3, u2} A B (AddMonoid.toAddZeroClass.{u3} A _inst_3) (AddMonoid.toAddZeroClass.{u2} B (AddCommMonoid.toAddMonoid.{u2} B _inst_4))) (SMulZeroClass.toSMul.{u1, max u3 u2} R (AddMonoidHom.{u3, u2} A B (AddMonoid.toAddZeroClass.{u3} A _inst_3) (AddMonoid.toAddZeroClass.{u2} B (AddCommMonoid.toAddMonoid.{u2} B _inst_4))) (instZeroAddMonoidHom.{u3, u2} A B (AddMonoid.toAddZeroClass.{u3} A _inst_3) (AddMonoid.toAddZeroClass.{u2} B (AddCommMonoid.toAddMonoid.{u2} B _inst_4))) (DistribSMul.toSMulZeroClass.{u1, max u3 u2} R (AddMonoidHom.{u3, u2} A B (AddMonoid.toAddZeroClass.{u3} A _inst_3) (AddMonoid.toAddZeroClass.{u2} B (AddCommMonoid.toAddMonoid.{u2} B _inst_4))) (AddMonoid.toAddZeroClass.{max u3 u2} (AddMonoidHom.{u3, u2} A B (AddMonoid.toAddZeroClass.{u3} A _inst_3) (AddMonoid.toAddZeroClass.{u2} B (AddCommMonoid.toAddMonoid.{u2} B _inst_4))) (AddCommMonoid.toAddMonoid.{max u3 u2} (AddMonoidHom.{u3, u2} A B (AddMonoid.toAddZeroClass.{u3} A _inst_3) (AddMonoid.toAddZeroClass.{u2} B (AddCommMonoid.toAddMonoid.{u2} B _inst_4))) (AddMonoidHom.addCommMonoid.{u3, u2} A B (AddMonoid.toAddZeroClass.{u3} A _inst_3) _inst_4))) (DistribMulAction.toDistribSMul.{u1, max u3 u2} R (AddMonoidHom.{u3, u2} A B (AddMonoid.toAddZeroClass.{u3} A _inst_3) (AddMonoid.toAddZeroClass.{u2} B (AddCommMonoid.toAddMonoid.{u2} B _inst_4))) _inst_1 (AddCommMonoid.toAddMonoid.{max u3 u2} (AddMonoidHom.{u3, u2} A B (AddMonoid.toAddZeroClass.{u3} A _inst_3) (AddMonoid.toAddZeroClass.{u2} B (AddCommMonoid.toAddMonoid.{u2} B _inst_4))) (AddMonoidHom.addCommMonoid.{u3, u2} A B (AddMonoid.toAddZeroClass.{u3} A _inst_3) _inst_4)) (AddMonoidHom.distribMulAction.{u1, u3, u2} R A B _inst_1 _inst_3 _inst_4 _inst_5))))) r f)) (HSMul.hSMul.{u1, max u3 u2, max u3 u2} R (forall (a : A), (fun ([email protected]._hyg.403 : A) => B) a) (forall (ᾰ : A), (fun ([email protected]._hyg.403 : A) => B) ᾰ) (instHSMul.{u1, max u3 u2} R (forall (a : A), (fun ([email protected]._hyg.403 : A) => B) a) (Pi.instSMul.{u3, u2, u1} A R (fun (a : A) => (fun ([email protected]._hyg.403 : A) => B) a) (fun (i : A) => SMulZeroClass.toSMul.{u1, u2} R ((fun ([email protected]._hyg.403 : A) => B) i) (AddMonoid.toZero.{u2} ((fun ([email protected]._hyg.403 : A) => B) i) (AddCommMonoid.toAddMonoid.{u2} ((fun ([email protected]._hyg.403 : A) => B) i) _inst_4)) (DistribSMul.toSMulZeroClass.{u1, u2} R ((fun ([email protected]._hyg.403 : A) => B) i) (AddMonoid.toAddZeroClass.{u2} ((fun ([email protected]._hyg.403 : A) => B) i) (AddCommMonoid.toAddMonoid.{u2} ((fun ([email protected]._hyg.403 : A) => B) i) _inst_4)) (DistribMulAction.toDistribSMul.{u1, u2} R ((fun ([email protected]._hyg.403 : A) => B) i) _inst_1 (AddCommMonoid.toAddMonoid.{u2} ((fun ([email protected]._hyg.403 : A) => B) i) _inst_4) _inst_5))))) r (FunLike.coe.{max (succ u3) (succ u2), succ u3, succ u2} (AddMonoidHom.{u3, u2} A B (AddMonoid.toAddZeroClass.{u3} A _inst_3) (AddMonoid.toAddZeroClass.{u2} B (AddCommMonoid.toAddMonoid.{u2} B _inst_4))) A (fun (_x : A) => (fun ([email protected]._hyg.403 : A) => B) _x) (AddHomClass.toFunLike.{max u3 u2, u3, u2} (AddMonoidHom.{u3, u2} A B (AddMonoid.toAddZeroClass.{u3} A _inst_3) (AddMonoid.toAddZeroClass.{u2} B (AddCommMonoid.toAddMonoid.{u2} B _inst_4))) A B (AddZeroClass.toAdd.{u3} A (AddMonoid.toAddZeroClass.{u3} A _inst_3)) (AddZeroClass.toAdd.{u2} B (AddMonoid.toAddZeroClass.{u2} B (AddCommMonoid.toAddMonoid.{u2} B _inst_4))) (AddMonoidHomClass.toAddHomClass.{max u3 u2, u3, u2} (AddMonoidHom.{u3, u2} A B (AddMonoid.toAddZeroClass.{u3} A _inst_3) (AddMonoid.toAddZeroClass.{u2} B (AddCommMonoid.toAddMonoid.{u2} B _inst_4))) A B (AddMonoid.toAddZeroClass.{u3} A _inst_3) (AddMonoid.toAddZeroClass.{u2} B (AddCommMonoid.toAddMonoid.{u2} B _inst_4)) (AddMonoidHom.addMonoidHomClass.{u3, u2} A B (AddMonoid.toAddZeroClass.{u3} A _inst_3) (AddMonoid.toAddZeroClass.{u2} B (AddCommMonoid.toAddMonoid.{u2} B _inst_4))))) f)) Case conversion may be inaccurate. Consider using '#align add_monoid_hom.coe_smul AddMonoidHom.coe_smulₓ'. -/ @[simp] theorem coe_smul (r : R) (f : A →+ B) : ⇑(r • f) = r • f := rfl #align add_monoid_hom.coe_smul AddMonoidHom.coe_smul /- warning: add_monoid_hom.smul_apply -> AddMonoidHom.smul_apply is a dubious translation: lean 3 declaration is forall {R : Type.{u1}} {A : Type.{u2}} {B : Type.{u3}} [_inst_1 : Monoid.{u1} R] [_inst_3 : AddMonoid.{u2} A] [_inst_4 : AddCommMonoid.{u3} B] [_inst_5 : DistribMulAction.{u1, u3} R B _inst_1 (AddCommMonoid.toAddMonoid.{u3} B _inst_4)] (r : R) (f : AddMonoidHom.{u2, u3} A B (AddMonoid.toAddZeroClass.{u2} A _inst_3) (AddMonoid.toAddZeroClass.{u3} B (AddCommMonoid.toAddMonoid.{u3} B _inst_4))) (x : A), Eq.{succ u3} B (coeFn.{max (succ u3) (succ u2), max (succ u2) (succ u3)} (AddMonoidHom.{u2, u3} A B (AddMonoid.toAddZeroClass.{u2} A _inst_3) (AddMonoid.toAddZeroClass.{u3} B (AddCommMonoid.toAddMonoid.{u3} B _inst_4))) (fun (_x : AddMonoidHom.{u2, u3} A B (AddMonoid.toAddZeroClass.{u2} A _inst_3) (AddMonoid.toAddZeroClass.{u3} B (AddCommMonoid.toAddMonoid.{u3} B _inst_4))) => A -> B) (AddMonoidHom.hasCoeToFun.{u2, u3} A B (AddMonoid.toAddZeroClass.{u2} A _inst_3) (AddMonoid.toAddZeroClass.{u3} B (AddCommMonoid.toAddMonoid.{u3} B _inst_4))) (SMul.smul.{u1, max u3 u2} R (AddMonoidHom.{u2, u3} A B (AddMonoid.toAddZeroClass.{u2} A _inst_3) (AddMonoid.toAddZeroClass.{u3} B (AddCommMonoid.toAddMonoid.{u3} B _inst_4))) (SMulZeroClass.toHasSmul.{u1, max u3 u2} R (AddMonoidHom.{u2, u3} A B (AddMonoid.toAddZeroClass.{u2} A _inst_3) (AddMonoid.toAddZeroClass.{u3} B (AddCommMonoid.toAddMonoid.{u3} B _inst_4))) (AddZeroClass.toHasZero.{max u3 u2} (AddMonoidHom.{u2, u3} A B (AddMonoid.toAddZeroClass.{u2} A _inst_3) (AddMonoid.toAddZeroClass.{u3} B (AddCommMonoid.toAddMonoid.{u3} B _inst_4))) (AddMonoid.toAddZeroClass.{max u3 u2} (AddMonoidHom.{u2, u3} A B (AddMonoid.toAddZeroClass.{u2} A _inst_3) (AddMonoid.toAddZeroClass.{u3} B (AddCommMonoid.toAddMonoid.{u3} B _inst_4))) (AddCommMonoid.toAddMonoid.{max u3 u2} (AddMonoidHom.{u2, u3} A B (AddMonoid.toAddZeroClass.{u2} A _inst_3) (AddMonoid.toAddZeroClass.{u3} B (AddCommMonoid.toAddMonoid.{u3} B _inst_4))) (AddMonoidHom.addCommMonoid.{u2, u3} A B (AddMonoid.toAddZeroClass.{u2} A _inst_3) _inst_4)))) (DistribSMul.toSmulZeroClass.{u1, max u3 u2} R (AddMonoidHom.{u2, u3} A B (AddMonoid.toAddZeroClass.{u2} A _inst_3) (AddMonoid.toAddZeroClass.{u3} B (AddCommMonoid.toAddMonoid.{u3} B _inst_4))) (AddMonoid.toAddZeroClass.{max u3 u2} (AddMonoidHom.{u2, u3} A B (AddMonoid.toAddZeroClass.{u2} A _inst_3) (AddMonoid.toAddZeroClass.{u3} B (AddCommMonoid.toAddMonoid.{u3} B _inst_4))) (AddCommMonoid.toAddMonoid.{max u3 u2} (AddMonoidHom.{u2, u3} A B (AddMonoid.toAddZeroClass.{u2} A _inst_3) (AddMonoid.toAddZeroClass.{u3} B (AddCommMonoid.toAddMonoid.{u3} B _inst_4))) (AddMonoidHom.addCommMonoid.{u2, u3} A B (AddMonoid.toAddZeroClass.{u2} A _inst_3) _inst_4))) (DistribMulAction.toDistribSMul.{u1, max u3 u2} R (AddMonoidHom.{u2, u3} A B (AddMonoid.toAddZeroClass.{u2} A _inst_3) (AddMonoid.toAddZeroClass.{u3} B (AddCommMonoid.toAddMonoid.{u3} B _inst_4))) _inst_1 (AddCommMonoid.toAddMonoid.{max u3 u2} (AddMonoidHom.{u2, u3} A B (AddMonoid.toAddZeroClass.{u2} A _inst_3) (AddMonoid.toAddZeroClass.{u3} B (AddCommMonoid.toAddMonoid.{u3} B _inst_4))) (AddMonoidHom.addCommMonoid.{u2, u3} A B (AddMonoid.toAddZeroClass.{u2} A _inst_3) _inst_4)) (AddMonoidHom.distribMulAction.{u1, u2, u3} R A B _inst_1 _inst_3 _inst_4 _inst_5)))) r f) x) (SMul.smul.{u1, u3} R B (SMulZeroClass.toHasSmul.{u1, u3} R B (AddZeroClass.toHasZero.{u3} B (AddMonoid.toAddZeroClass.{u3} B (AddCommMonoid.toAddMonoid.{u3} B _inst_4))) (DistribSMul.toSmulZeroClass.{u1, u3} R B (AddMonoid.toAddZeroClass.{u3} B (AddCommMonoid.toAddMonoid.{u3} B _inst_4)) (DistribMulAction.toDistribSMul.{u1, u3} R B _inst_1 (AddCommMonoid.toAddMonoid.{u3} B _inst_4) _inst_5))) r (coeFn.{max (succ u3) (succ u2), max (succ u2) (succ u3)} (AddMonoidHom.{u2, u3} A B (AddMonoid.toAddZeroClass.{u2} A _inst_3) (AddMonoid.toAddZeroClass.{u3} B (AddCommMonoid.toAddMonoid.{u3} B _inst_4))) (fun (_x : AddMonoidHom.{u2, u3} A B (AddMonoid.toAddZeroClass.{u2} A _inst_3) (AddMonoid.toAddZeroClass.{u3} B (AddCommMonoid.toAddMonoid.{u3} B _inst_4))) => A -> B) (AddMonoidHom.hasCoeToFun.{u2, u3} A B (AddMonoid.toAddZeroClass.{u2} A _inst_3) (AddMonoid.toAddZeroClass.{u3} B (AddCommMonoid.toAddMonoid.{u3} B _inst_4))) f x)) but is expected to have type forall {R : Type.{u1}} {A : Type.{u3}} {B : Type.{u2}} [_inst_1 : Monoid.{u1} R] [_inst_3 : AddMonoid.{u3} A] [_inst_4 : AddCommMonoid.{u2} B] [_inst_5 : DistribMulAction.{u1, u2} R B _inst_1 (AddCommMonoid.toAddMonoid.{u2} B _inst_4)] (r : R) (f : AddMonoidHom.{u3, u2} A B (AddMonoid.toAddZeroClass.{u3} A _inst_3) (AddMonoid.toAddZeroClass.{u2} B (AddCommMonoid.toAddMonoid.{u2} B _inst_4))) (x : A), Eq.{succ u2} ((fun ([email protected]._hyg.403 : A) => B) x) (FunLike.coe.{max (succ u3) (succ u2), succ u3, succ u2} (AddMonoidHom.{u3, u2} A B (AddMonoid.toAddZeroClass.{u3} A _inst_3) (AddMonoid.toAddZeroClass.{u2} B (AddCommMonoid.toAddMonoid.{u2} B _inst_4))) A (fun (_x : A) => (fun ([email protected]._hyg.403 : A) => B) _x) (AddHomClass.toFunLike.{max u3 u2, u3, u2} (AddMonoidHom.{u3, u2} A B (AddMonoid.toAddZeroClass.{u3} A _inst_3) (AddMonoid.toAddZeroClass.{u2} B (AddCommMonoid.toAddMonoid.{u2} B _inst_4))) A B (AddZeroClass.toAdd.{u3} A (AddMonoid.toAddZeroClass.{u3} A _inst_3)) (AddZeroClass.toAdd.{u2} B (AddMonoid.toAddZeroClass.{u2} B (AddCommMonoid.toAddMonoid.{u2} B _inst_4))) (AddMonoidHomClass.toAddHomClass.{max u3 u2, u3, u2} (AddMonoidHom.{u3, u2} A B (AddMonoid.toAddZeroClass.{u3} A _inst_3) (AddMonoid.toAddZeroClass.{u2} B (AddCommMonoid.toAddMonoid.{u2} B _inst_4))) A B (AddMonoid.toAddZeroClass.{u3} A _inst_3) (AddMonoid.toAddZeroClass.{u2} B (AddCommMonoid.toAddMonoid.{u2} B _inst_4)) (AddMonoidHom.addMonoidHomClass.{u3, u2} A B (AddMonoid.toAddZeroClass.{u3} A _inst_3) (AddMonoid.toAddZeroClass.{u2} B (AddCommMonoid.toAddMonoid.{u2} B _inst_4))))) (HSMul.hSMul.{u1, max u3 u2, max u3 u2} R (AddMonoidHom.{u3, u2} A B (AddMonoid.toAddZeroClass.{u3} A _inst_3) (AddMonoid.toAddZeroClass.{u2} B (AddCommMonoid.toAddMonoid.{u2} B _inst_4))) (AddMonoidHom.{u3, u2} A B (AddMonoid.toAddZeroClass.{u3} A _inst_3) (AddMonoid.toAddZeroClass.{u2} B (AddCommMonoid.toAddMonoid.{u2} B _inst_4))) (instHSMul.{u1, max u3 u2} R (AddMonoidHom.{u3, u2} A B (AddMonoid.toAddZeroClass.{u3} A _inst_3) (AddMonoid.toAddZeroClass.{u2} B (AddCommMonoid.toAddMonoid.{u2} B _inst_4))) (SMulZeroClass.toSMul.{u1, max u3 u2} R (AddMonoidHom.{u3, u2} A B (AddMonoid.toAddZeroClass.{u3} A _inst_3) (AddMonoid.toAddZeroClass.{u2} B (AddCommMonoid.toAddMonoid.{u2} B _inst_4))) (instZeroAddMonoidHom.{u3, u2} A B (AddMonoid.toAddZeroClass.{u3} A _inst_3) (AddMonoid.toAddZeroClass.{u2} B (AddCommMonoid.toAddMonoid.{u2} B _inst_4))) (DistribSMul.toSMulZeroClass.{u1, max u3 u2} R (AddMonoidHom.{u3, u2} A B (AddMonoid.toAddZeroClass.{u3} A _inst_3) (AddMonoid.toAddZeroClass.{u2} B (AddCommMonoid.toAddMonoid.{u2} B _inst_4))) (AddMonoid.toAddZeroClass.{max u3 u2} (AddMonoidHom.{u3, u2} A B (AddMonoid.toAddZeroClass.{u3} A _inst_3) (AddMonoid.toAddZeroClass.{u2} B (AddCommMonoid.toAddMonoid.{u2} B _inst_4))) (AddCommMonoid.toAddMonoid.{max u3 u2} (AddMonoidHom.{u3, u2} A B (AddMonoid.toAddZeroClass.{u3} A _inst_3) (AddMonoid.toAddZeroClass.{u2} B (AddCommMonoid.toAddMonoid.{u2} B _inst_4))) (AddMonoidHom.addCommMonoid.{u3, u2} A B (AddMonoid.toAddZeroClass.{u3} A _inst_3) _inst_4))) (DistribMulAction.toDistribSMul.{u1, max u3 u2} R (AddMonoidHom.{u3, u2} A B (AddMonoid.toAddZeroClass.{u3} A _inst_3) (AddMonoid.toAddZeroClass.{u2} B (AddCommMonoid.toAddMonoid.{u2} B _inst_4))) _inst_1 (AddCommMonoid.toAddMonoid.{max u3 u2} (AddMonoidHom.{u3, u2} A B (AddMonoid.toAddZeroClass.{u3} A _inst_3) (AddMonoid.toAddZeroClass.{u2} B (AddCommMonoid.toAddMonoid.{u2} B _inst_4))) (AddMonoidHom.addCommMonoid.{u3, u2} A B (AddMonoid.toAddZeroClass.{u3} A _inst_3) _inst_4)) (AddMonoidHom.distribMulAction.{u1, u3, u2} R A B _inst_1 _inst_3 _inst_4 _inst_5))))) r f) x) (HSMul.hSMul.{u1, u2, u2} R ((fun ([email protected]._hyg.403 : A) => B) x) ((fun ([email protected]._hyg.403 : A) => B) x) (instHSMul.{u1, u2} R ((fun ([email protected]._hyg.403 : A) => B) x) (SMulZeroClass.toSMul.{u1, u2} R ((fun ([email protected]._hyg.403 : A) => B) x) (AddMonoid.toZero.{u2} ((fun ([email protected]._hyg.403 : A) => B) x) (AddCommMonoid.toAddMonoid.{u2} ((fun ([email protected]._hyg.403 : A) => B) x) _inst_4)) (DistribSMul.toSMulZeroClass.{u1, u2} R ((fun ([email protected]._hyg.403 : A) => B) x) (AddMonoid.toAddZeroClass.{u2} ((fun ([email protected]._hyg.403 : A) => B) x) (AddCommMonoid.toAddMonoid.{u2} ((fun ([email protected]._hyg.403 : A) => B) x) _inst_4)) (DistribMulAction.toDistribSMul.{u1, u2} R ((fun ([email protected]._hyg.403 : A) => B) x) _inst_1 (AddCommMonoid.toAddMonoid.{u2} ((fun ([email protected]._hyg.403 : A) => B) x) _inst_4) _inst_5)))) r (FunLike.coe.{max (succ u3) (succ u2), succ u3, succ u2} (AddMonoidHom.{u3, u2} A B (AddMonoid.toAddZeroClass.{u3} A _inst_3) (AddMonoid.toAddZeroClass.{u2} B (AddCommMonoid.toAddMonoid.{u2} B _inst_4))) A (fun (_x : A) => (fun ([email protected]._hyg.403 : A) => B) _x) (AddHomClass.toFunLike.{max u3 u2, u3, u2} (AddMonoidHom.{u3, u2} A B (AddMonoid.toAddZeroClass.{u3} A _inst_3) (AddMonoid.toAddZeroClass.{u2} B (AddCommMonoid.toAddMonoid.{u2} B _inst_4))) A B (AddZeroClass.toAdd.{u3} A (AddMonoid.toAddZeroClass.{u3} A _inst_3)) (AddZeroClass.toAdd.{u2} B (AddMonoid.toAddZeroClass.{u2} B (AddCommMonoid.toAddMonoid.{u2} B _inst_4))) (AddMonoidHomClass.toAddHomClass.{max u3 u2, u3, u2} (AddMonoidHom.{u3, u2} A B (AddMonoid.toAddZeroClass.{u3} A _inst_3) (AddMonoid.toAddZeroClass.{u2} B (AddCommMonoid.toAddMonoid.{u2} B _inst_4))) A B (AddMonoid.toAddZeroClass.{u3} A _inst_3) (AddMonoid.toAddZeroClass.{u2} B (AddCommMonoid.toAddMonoid.{u2} B _inst_4)) (AddMonoidHom.addMonoidHomClass.{u3, u2} A B (AddMonoid.toAddZeroClass.{u3} A _inst_3) (AddMonoid.toAddZeroClass.{u2} B (AddCommMonoid.toAddMonoid.{u2} B _inst_4))))) f x)) Case conversion may be inaccurate. Consider using '#align add_monoid_hom.smul_apply AddMonoidHom.smul_applyₓ'. -/ theorem smul_apply (r : R) (f : A →+ B) (x : A) : (r • f) x = r • f x := rfl #align add_monoid_hom.smul_apply AddMonoidHom.smul_apply instance [SMulCommClass R S B] : SMulCommClass R S (A →+ B) := ⟨fun a b f => ext fun x => smul_comm _ _ _⟩ instance [SMul R S] [IsScalarTower R S B] : IsScalarTower R S (A →+ B) := ⟨fun a b f => ext fun x => smul_assoc _ _ _⟩ instance [DistribMulAction Rᵐᵒᵖ B] [IsCentralScalar R B] : IsCentralScalar R (A →+ B) := ⟨fun a b => ext fun x => op_smul_eq_smul _ _⟩ end instance [Semiring R] [AddMonoid A] [AddCommMonoid B] [Module R B] : Module R (A →+ B) := { AddMonoidHom.distribMulAction with add_smul := fun r s x => ext fun y => by simp [add_smul] zero_smul := fun x => ext fun y => by simp [zero_smul] } end AddMonoidHom
/- Copyright (c) 2021 Thomas Browning. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Thomas Browning, Jireh Loreaux -/ import group_theory.subsemigroup.center import algebra.group_with_zero.units.lemmas /-! # Centralizers of magmas and semigroups > THIS FILE IS SYNCHRONIZED WITH MATHLIB4. > Any changes to this file require a corresponding PR to mathlib4. ## Main definitions * `set.centralizer`: the centralizer of a subset of a magma * `subsemigroup.centralizer`: the centralizer of a subset of a semigroup * `set.add_centralizer`: the centralizer of a subset of an additive magma * `add_subsemigroup.centralizer`: the centralizer of a subset of an additive semigroup We provide `monoid.centralizer`, `add_monoid.centralizer`, `subgroup.centralizer`, and `add_subgroup.centralizer` in other files. -/ variables {M : Type*} {S T : set M} namespace set variables (S) /-- The centralizer of a subset of a magma. -/ @[to_additive add_centralizer /-" The centralizer of a subset of an additive magma. "-/] def centralizer [has_mul M] : set M := {c | ∀ m ∈ S, m * c = c * m} variables {S} @[to_additive mem_add_centralizer] lemma mem_centralizer_iff [has_mul M] {c : M} : c ∈ centralizer S ↔ ∀ m ∈ S, m * c = c * m := iff.rfl @[to_additive decidable_mem_add_centralizer] instance decidable_mem_centralizer [has_mul M] [∀ a : M, decidable $ ∀ b ∈ S, b * a = a * b] : decidable_pred (∈ centralizer S) := λ _, decidable_of_iff' _ (mem_centralizer_iff) variables (S) @[simp, to_additive zero_mem_add_centralizer] lemma one_mem_centralizer [mul_one_class M] : (1 : M) ∈ centralizer S := by simp [mem_centralizer_iff] @[simp] lemma zero_mem_centralizer [mul_zero_class M] : (0 : M) ∈ centralizer S := by simp [mem_centralizer_iff] variables {S} {a b : M} @[simp, to_additive add_mem_add_centralizer] lemma mul_mem_centralizer [semigroup M] (ha : a ∈ centralizer S) (hb : b ∈ centralizer S) : a * b ∈ centralizer S := λ g hg, by rw [mul_assoc, ←hb g hg, ← mul_assoc, ha g hg, mul_assoc] @[simp, to_additive neg_mem_add_centralizer] lemma inv_mem_centralizer [group M] (ha : a ∈ centralizer S) : a⁻¹ ∈ centralizer S := λ g hg, by rw [mul_inv_eq_iff_eq_mul, mul_assoc, eq_inv_mul_iff_mul_eq, ha g hg] @[simp] lemma add_mem_centralizer [distrib M] (ha : a ∈ centralizer S) (hb : b ∈ centralizer S) : a + b ∈ centralizer S := λ c hc, by rw [add_mul, mul_add, ha c hc, hb c hc] @[simp] lemma neg_mem_centralizer [has_mul M] [has_distrib_neg M] (ha : a ∈ centralizer S) : -a ∈ centralizer S := λ c hc, by rw [mul_neg, ha c hc, neg_mul] @[simp] lemma inv_mem_centralizer₀ [group_with_zero M] (ha : a ∈ centralizer S) : a⁻¹ ∈ centralizer S := (eq_or_ne a 0).elim (λ h, by { rw [h, inv_zero], exact zero_mem_centralizer S }) (λ ha0 c hc, by rw [mul_inv_eq_iff_eq_mul₀ ha0, mul_assoc, eq_inv_mul_iff_mul_eq₀ ha0, ha c hc]) @[simp, to_additive sub_mem_add_centralizer] lemma div_mem_centralizer [group M] (ha : a ∈ centralizer S) (hb : b ∈ centralizer S) : a / b ∈ centralizer S := begin rw [div_eq_mul_inv], exact mul_mem_centralizer ha (inv_mem_centralizer hb), end @[simp] lemma div_mem_centralizer₀ [group_with_zero M] (ha : a ∈ centralizer S) (hb : b ∈ centralizer S) : a / b ∈ centralizer S := begin rw div_eq_mul_inv, exact mul_mem_centralizer ha (inv_mem_centralizer₀ hb), end @[to_additive add_centralizer_subset] lemma centralizer_subset [has_mul M] (h : S ⊆ T) : centralizer T ⊆ centralizer S := λ t ht s hs, ht s (h hs) variables (M) @[simp, to_additive add_centralizer_univ] lemma centralizer_univ [has_mul M] : centralizer univ = center M := subset.antisymm (λ a ha b, ha b (set.mem_univ b)) (λ a ha b hb, ha b) variables {M} (S) @[simp, to_additive add_centralizer_eq_univ] lemma centralizer_eq_univ [comm_semigroup M] : centralizer S = univ := subset.antisymm (subset_univ _) $ λ x hx y hy, mul_comm y x end set namespace subsemigroup section variables {M} [semigroup M] (S) /-- The centralizer of a subset of a semigroup `M`. -/ @[to_additive "The centralizer of a subset of an additive semigroup."] def centralizer : subsemigroup M := { carrier := S.centralizer, mul_mem' := λ a b, set.mul_mem_centralizer } @[simp, norm_cast, to_additive] lemma coe_centralizer : ↑(centralizer S) = S.centralizer := rfl variables {S} @[to_additive] lemma mem_centralizer_iff {z : M} : z ∈ centralizer S ↔ ∀ g ∈ S, g * z = z * g := iff.rfl @[to_additive] instance decidable_mem_centralizer (a) [decidable $ ∀ b ∈ S, b * a = a * b] : decidable (a ∈ centralizer S) := decidable_of_iff' _ mem_centralizer_iff @[to_additive] lemma centralizer_le (h : S ⊆ T) : centralizer T ≤ centralizer S := set.centralizer_subset h variables (M) @[simp, to_additive] lemma centralizer_univ : centralizer set.univ = center M := set_like.ext' (set.centralizer_univ M) end end subsemigroup -- Guard against import creep assert_not_exists finset
theory append_lemmas imports Main "HOL-Library.FuncSet" "HOL-Algebra.Group" begin datatype ('a,'b) monoidgentype = C 'a 'b (infix "!" 63) datatype ('a,'b) groupgentype = P "('a,'b) monoidgentype" | N "('a,'b) monoidgentype" type_synonym ('a,'b) word = "(('a,'b) groupgentype) list" primrec inverse::"('a,'b) groupgentype \<Rightarrow> ('a,'b) groupgentype" where "inverse (P x) = (N x)" |"inverse (N x) = (P x)" primrec wordinverse::"('a,'b) word \<Rightarrow> ('a, 'b) word" where "wordinverse [] = []" |"wordinverse (x#xs) = (wordinverse xs)@[inverse x]" inductive_set spanset::"('a,'b) word set\<Rightarrow> ('a,'b) word set" ("\<langle>_\<rangle>") for S::"('a,'b) word set" where "x \<in> S \<Longrightarrow> x \<in> \<langle>S\<rangle>" |"x \<in> inver ` S \<Longrightarrow> x \<in> \<langle>S\<rangle>" |"x \<in> S \<Longrightarrow> y \<in> \<langle>S\<rangle> \<Longrightarrow> x@y \<in> \<langle>S\<rangle>" |"x \<in> inver ` S \<Longrightarrow> ys \<in> \<langle>S\<rangle> \<Longrightarrow> x@y \<in> \<langle>S\<rangle>" definition setlistcross::"'a set \<Rightarrow> 'a list \<Rightarrow> 'a list set" where "setlistcross S xs = {[s]@xs | s. s \<in> S}" value "setlistcross {(1::nat), 2, 3} [(4::nat), 5, 6]" primrec lengthword::"nat \<Rightarrow> 'a set \<Rightarrow> 'a list set" where "lengthword 0 S = {[s] | s. s \<in> S}" |"lengthword (Suc n) S = \<Union> {setlistcross S xs | xs. xs \<in> (lengthword n S)}" abbreviation "ngroupword \<equiv> \<lambda> n (S::('a,'b) word set). lengthword n (S \<union> (wordinverse ` S))" datatype char = G | H value "ngroupword 1 {[P (C G (1::nat))], [N (C G (2::nat))], [P (C H (3::nat))]}" (*reduction removes cancellations next to each other*) fun reduction:: "('a,'b) word \<Rightarrow> ('a,'b) word" where "reduction [] = []" |"reduction [x] = [x]" |"reduction (g1#g2#wrd) = (if (g1 = inverse g2) then reduction wrd else (g1#(reduction (g2#wrd))))" value "reduction [P (C G (3::nat)), N (C G (3::nat)), (N (C G (2::nat)))]" fun reduced::"('a,'b) word \<Rightarrow> bool" where "reduced [] = True" |"reduced [g] = True" |"reduced (g#h#wrd) = (if (g \<noteq> inverse h) then reduced (h#wrd) else False)" primrec iter::"nat \<Rightarrow>('a \<Rightarrow> 'a) \<Rightarrow> ('a \<Rightarrow> 'a)" where "iter 0 f = (\<lambda> x. x)" |"iter (Suc n) f = (\<lambda> x. f ((iter n f) x))" (*Prove the following*) lemma fixedpt_iteration: assumes "f x = x" shows "iter (n+1) f x = x" using assms proof(induction n) case 0 then show ?case by simp next case (Suc n) then show ?case by simp qed lemma iterative_fixed_pt: assumes "iter (n+1) f x = iter n f x" shows "iter (k+(n+1)) f x = iter (k+n) f x" using assms proof(induction k) case 0 then show ?case by force next case (Suc m) have "iter (m + (n + 1)) f x = iter (Suc m + n) f x" by simp then show ?case using Suc.IH Suc.prems by fastforce qed (*prove converse of the following too*) lemma assumes "reduced wrd" shows "reduction wrd = wrd" using assms proof(induction wrd rule: reduction.induct) case 1 then show ?case by simp next case (2 x) then show ?case by simp next case (3 g1 g2 wrd) then show ?case proof(cases "g1 = inverse g2") case True then show ?thesis using 3 by force next case False have "reduced (g2#wrd)" using False 3 by force then show ?thesis using False 3 by force qed qed lemma length_reduction: "length (reduction wrd) \<le> length wrd" proof(induction wrd rule: reduction.induct) case 1 then show ?case by simp next case (2 x) then show ?case by simp next case (3 g1 g2 wrd) then show ?case proof(cases "g1 = inverse g2") case True then show ?thesis using 3 by force next case False then show ?thesis using 3 by auto qed qed lemma decreasing_length: assumes "reduction wrd \<noteq> wrd" shows "length (reduction wrd) < length wrd" using assms proof(induction wrd rule: reduction.induct) case 1 then show ?case by simp next case (2 x) then show ?case by simp next case (3 g1 g2 wrd) then show ?case proof(cases "g1 = inverse g2") case True then have red_inv:"reduction (g1#g2#wrd) = reduction wrd" by auto then show ?thesis proof(cases "reduction wrd = wrd") case True then have "reduction (g1#g2#wrd) = wrd" using red_inv by auto then have "length (reduction (g1#g2#wrd)) = length wrd" by auto then show ?thesis by simp next case False then have "length (reduction wrd) < length wrd" using 3 True by argo then show ?thesis using red_inv by force qed next case False have prem:"reduction (g1#g2#wrd) \<noteq> (g1#g2#wrd)" using 3 by argo then have "reduction (g1#g2#wrd) = g1#reduction (g2#wrd)" using False by auto then have "reduction (g2#wrd) \<noteq> g2#wrd" using prem by fastforce then have "length (g2#wrd) > length (reduction (g2#wrd))" using 3 False by blast then have "length (g1#g2#wrd) > length (reduction (g1#g2#wrd))" using False by force then show ?thesis by fast qed qed lemma if_length_reduction_eq: assumes "length (reduction (wrd)) = length wrd" shows "reduction wrd = wrd" using assms proof(induction wrd rule: reduction.induct) case 1 then show ?case by simp next case (2 x) then show ?case by simp next case (3 g1 g2 wrd) then show ?case proof(cases "g1 = inverse g2") case True then have "reduction (g1#g2#wrd) = reduction (wrd)" by simp then have "length (reduction (g1#g2#wrd)) = length (reduction (wrd))" by auto moreover have "length (wrd) > length (reduction wrd)" using 3 by (metis \<open>reduction (g1 # g2 # wrd) = reduction wrd\<close> decreasing_length impossible_Cons le_cases) then show ?thesis using 3 by auto next case False then show ?thesis using "3.prems" decreasing_length nat_neq_iff by blast qed qed (*"reduction-reduced lemma"*) lemma reduction_fixpt: assumes "reduction wrd = wrd" shows "reduced wrd" using assms proof(induction wrd rule:reduction.induct) case 1 then show ?case by simp next case (2 x) then show ?case by simp next case (3 g1 g2 wrd) then show ?case by (metis decreasing_length impossible_Cons length_Cons less_Suc_eq less_or_eq_imp_le list.inject reduced.simps(3) reduction.simps(3)) qed (*Show that length decreases if and only the word after reduction is not the same as the original word*) (*show that if after reduction of a word it does not change, that subsequent reductions will be ineffective*) (*use the word length argument decrement argument to show that the reduced word is finally reduced*) value "reduced [P (C G (1::nat)), N (C G (2::nat)), P (C G (1::nat))]" inductive reln::"('a,'b) word \<Rightarrow> ('a,'b) word \<Rightarrow> bool" (infixr "~" 65) where refl[intro!]: "a ~ a" | sym: "a ~ b \<Longrightarrow> b ~ a" | trans: "a ~ b \<Longrightarrow> b ~ c \<Longrightarrow> a ~ c" | base: "[g, inverse g] ~ []" | mult: "xs ~ xs' \<Longrightarrow> ys ~ ys' \<Longrightarrow> (xs@ys) ~ (xs'@ys')" lemma assumes "h = inverse g" shows "[g, h] ~ []" using assms reln.base inverse.simps by simp lemma relation: "(xs@ys) ~ xs@[g,inverse g]@ys" using reln.base reln.refl reln.mult proof- have "(xs@[g, inverse g]) ~ xs" using reln.base reln.mult reln.refl by fastforce then show ?thesis using mult[of "xs@[g, inverse g]" "xs" "ys" "ys"] reln.refl reln.sym by auto qed lemma inverse_of_inverse: assumes "g = inverse h" shows "h = inverse g" using assms inverse.simps by (metis groupgentype.exhaust) lemma rel_to_reduction:"xs ~ reduction xs" proof(induction xs rule:reduction.induct ) case 1 then show ?case using reln.refl by auto next case (2 x) then show ?case using reln.refl by auto next case (3 g1 g2 wrd) then show ?case proof(cases "g1 = inverse g2") case True have "[g1, g2] ~ []" using reln.base[of "g1"] inverse_of_inverse[of "g1" "g2"] True by blast then have 1:"([g1,g2]@wrd) ~ wrd" using reln.mult refl by fastforce with 3(1) have 2:"reduction ([g1,g2]@wrd) ~ wrd" using reln.trans True using append_Cons append_Nil reduction.simps(3) reln.sym by auto then show ?thesis using 1 reln.sym reln.trans by (metis append_Cons append_Nil) next case False then have "([g1]@(g2#wrd)) ~ ([g1]@(reduction (g2#wrd)))" using 3(2) reln.mult reln.refl by blast then have "([g1]@(g2#wrd)) ~ (reduction (g1#g2#wrd))" using False by simp then show ?thesis by simp qed qed definition wordeq::"('a,'b) word \<Rightarrow> ('a,'b) word set" ("[[_]]") where "wordeq wrd = {wrds. wrd ~ wrds}" (*This is approach for normal form using newsmans lemma*) definition cancel_at :: "nat \<Rightarrow> ('a,'b) word \<Rightarrow> ('a,'b) word" where "cancel_at i l = take i l @ drop (2+i) l" lemma cancel_at_leftappend: assumes "i\<ge>0" "(1+i) < length a" "cancel_at i a = b" shows "cancel_at (length c + i) (c@a) = (c@b)" proof- have "c@(take i a) = take (length c + i) (c@a)" using assms(1) assms(2) by auto moreover have "drop (i+2) a = drop (length c + (i+2)) (c@a)"using assms(1) assms(2) by simp ultimately show ?thesis unfolding cancel_at_def by (metis add.assoc add.commute append.assoc assms(3) cancel_at_def) qed lemma cancel_at_rightappend: assumes "i\<ge>0" "(1+i) < length a" "cancel_at i a = b" shows "cancel_at i (a@c) = (b@c)" proof- have "take i (a@c) = take i a" using assms(1) assms (2) by simp moreover have "(drop (2+i) a)@c = drop (2+i) (a@c)" using assms(1) assms(2) by simp ultimately show ?thesis unfolding cancel_at_def by (metis append.assoc assms(3) cancel_at_def) qed definition cancels_to_1_at :: "nat \<Rightarrow> ('a,'b) word \<Rightarrow> ('a,'b) word \<Rightarrow> bool" where "cancels_to_1_at i l1 l2 = (0\<le>i \<and> (1+i) < length l1 \<and> (inverse (l1 ! i) = (l1 ! (1+i))) \<and> (l2 = cancel_at i l1))" lemma cancels_to_1_at_leftappend: assumes "i\<ge>0" "(1+i) < length a" "cancels_to_1_at i a b" shows "cancels_to_1_at (length c + i) (c@a) (c@b)" unfolding cancels_to_1_at_def proof- have 1:"0 \<le> (length c + i)" using assms(1) by simp moreover have 2: "1 + (length c + i) < length (c @ a)" using assms(2) by auto have "(inverse (a ! i)) = (a ! (i+1))" using assms(3) by (metis add.commute cancels_to_1_at_def) moreover then have 3: "inverse ((c @ a) ! (length c + i)) = (c @ a) ! (1 + (length c + i))" by (metis add.commute add.left_commute nth_append_length_plus) have "(b = cancel_at i a)" using assms(3)using cancels_to_1_at_def by auto moreover then have 4: "c @ b = cancel_at (length c + i) (c @ a)" using cancel_at_leftappend assms(1) assms(2) by metis ultimately show "0 \<le> length c + i \<and> 1 + (length c + i) < length (c @ a) \<and> inverse ((c @ a) ! (length c + i)) = (c @ a) ! (1 + (length c + i)) \<and> c @ b = cancel_at (length c + i) (c @ a)" using "2" "3" by blast qed lemma cancels_to_1_at_rightappend: assumes "i\<ge>0" "(1+i) < length a" "cancels_to_1_at i a b" shows "cancels_to_1_at i (a@c) (b@c)" unfolding cancels_to_1_at_def proof- have 1:"0 \<le> i" using assms(1) by simp moreover have 2: "1 + i < length (a@c)" using assms(2) by auto have "(inverse (a ! i)) = (a ! (i+1))" using assms(3) by (metis add.commute cancels_to_1_at_def) moreover then have 3: "inverse ((a @ c) ! i) = (a @ c) ! (1 + i)" by (metis Suc_eq_plus1 Suc_lessD add.commute assms(2) nth_append) have "(b = cancel_at i a)" using assms(3)using cancels_to_1_at_def by auto moreover then have 4: "b@c = cancel_at i (a@c)" using cancel_at_rightappend assms(1) assms(2) by metis ultimately show "0 \<le> i \<and> 1 + i < length (a @ c) \<and> inverse ((a @ c) ! i) = (a @ c) ! (1 + i) \<and> b @ c = cancel_at i (a @ c)" using "2" "3" by blast qed definition cancels_to_1 :: "('a,'b) word \<Rightarrow> ('a,'b) word \<Rightarrow> bool" where "cancels_to_1 l1 l2 = (\<exists>i. cancels_to_1_at i l1 l2)" lemma cancels_to_1_leftappend: assumes "cancels_to_1 a b" shows "cancels_to_1 (c@a) (c@b)" using assms unfolding cancels_to_1_def proof- obtain i where "cancels_to_1_at i a b" using assms cancels_to_1_def by auto then have "i\<ge>0 \<and> (1+i) < length a" using cancels_to_1_at_def by auto then have "cancels_to_1_at (length c + i) (c@a) (c@b)" using \<open>cancels_to_1_at i a b\<close> cancels_to_1_at_leftappend by auto then show "\<exists>i. cancels_to_1_at i (c @ a) (c @ b)" by auto qed lemma cancels_to_1_rightappend: assumes "cancels_to_1 a b" shows "cancels_to_1 (a@c) (b@c)" using assms unfolding cancels_to_1_def proof- obtain i where "cancels_to_1_at i a b" using assms cancels_to_1_def by auto then have "i\<ge>0 \<and> (1+i) < length a" using cancels_to_1_at_def by auto then have "cancels_to_1_at i (a@c) (b@c)" by (simp add: cancels_to_1_at_rightappend \<open>cancels_to_1_at i a b\<close>) then show "\<exists>i. cancels_to_1_at i (a @ c) (b @ c)" by auto qed definition cancels_to :: "('a,'b) word \<Rightarrow> ('a,'b) word \<Rightarrow> bool" where "cancels_to = (cancels_to_1)^**" lemma cancels_to_leftappend: "cancels_to a b \<longrightarrow> cancels_to (z@a) (z@b)" unfolding cancels_to_def apply(rule impI) proof(induction rule:rtranclp.induct) case (rtrancl_refl a) then show ?case by simp next case (rtrancl_into_rtrancl a b c) then have 1:"cancels_to_1 (z@b) (z@c)"by (simp add: cancels_to_1_leftappend) have "cancels_to_1\<^sup>*\<^sup>* (z @ a) (z @ b)" by (simp add: rtrancl_into_rtrancl.IH) then show "cancels_to_1\<^sup>*\<^sup>* (z @ a) (z @ c)" using 1 by auto qed lemma cancels_to_rightappend: "cancels_to a b \<longrightarrow> cancels_to (a@z) (b@z)" unfolding cancels_to_def apply(rule impI) proof(induction rule:rtranclp.induct) case (rtrancl_refl a) then show ?case by simp next case (rtrancl_into_rtrancl a b c) then have 1:"cancels_to_1 (b@z) (c@z)"by (simp add: cancels_to_1_rightappend) have "cancels_to_1\<^sup>*\<^sup>* (a@z) (b@z)" by (simp add: rtrancl_into_rtrancl.IH) then show "cancels_to_1\<^sup>*\<^sup>* (a@z) (c@z)" using 1 by auto qed lemma "cancels_to x y \<Longrightarrow> x ~ y" unfolding cancels_to_def proof(induction rule: rtranclp.induct) case (rtrancl_refl a) then show ?case by blast next case (rtrancl_into_rtrancl a b c) then have "cancels_to_1 b c" by simp then obtain i where i:"cancels_to_1_at i b c" unfolding cancels_to_1_def by meson then have c_def:"(take i b)@(drop (i + 2) b) = c" unfolding cancels_to_1_at_def cancel_at_def by force moreover have "b!i = inverse (b!(i+1))" using i unfolding cancels_to_1_at_def cancel_at_def using inverse_of_inverse by (simp add: inverse_of_inverse add.commute) then have "[b!i, b!(i+1)] ~ []" by (metis base inverse_of_inverse) then have "([b!i, b!(i+1)]@(drop (i+2) b)) ~ []@(drop (i+2) b)" using reln.refl reln.mult by fast then have "((take i b)@(([b!i, b!(i+1)]@(drop (i+2) b)))) ~ (take i b)@(drop (i+2) b)" using reln.refl reln.mult by (simp add: mult reln.refl) then have "b ~ c" using c_def by (metis Cons_nth_drop_Suc add.commute add_2_eq_Suc' append_Cons append_self_conv2 cancels_to_1_at_def i id_take_nth_drop linorder_not_less plus_1_eq_Suc trans_le_add2) then show ?case using reln.trans rtrancl_into_rtrancl(3) by fast qed definition cancels_eq::"('a,'b) word \<Rightarrow> ('a,'b) word \<Rightarrow> bool" where "cancels_eq = (\<lambda> wrd1 wrd2. cancels_to wrd1 wrd2 \<or> cancels_to wrd2 wrd1)^**" lemma cancels_eq_leftappend: "cancels_eq a b \<longrightarrow> cancels_eq (z@a) (z@b)" unfolding cancels_eq_def apply(rule impI) proof(induction rule:rtranclp.induct) case (rtrancl_refl a) then show ?case by simp next case (rtrancl_into_rtrancl a b c) then have 1: "cancels_to (z@b) (z@c) \<or> cancels_to (z@c) (z@b)" using cancels_to_leftappend by blast have "(\<lambda>wrd1 wrd2. cancels_to wrd1 wrd2 \<or> cancels_to wrd2 wrd1)\<^sup>*\<^sup>* (z @ a) (z @ b)" by (simp add: rtrancl_into_rtrancl.IH) then show "(\<lambda>wrd1 wrd2. cancels_to wrd1 wrd2 \<or> cancels_to wrd2 wrd1)\<^sup>*\<^sup>* (z @ a) (z @ c)" unfolding cancels_eq_def using 1 by (metis (no_types, lifting) rtranclp.simps) qed lemma cancels_eq_rightappend: "cancels_eq a b \<longrightarrow> cancels_eq (a@z) (b@z)" unfolding cancels_eq_def apply(rule impI) proof(induction rule:rtranclp.induct) case (rtrancl_refl a) then show ?case by simp next case (rtrancl_into_rtrancl a b c) then have 1: "cancels_to (b@z) (c@z) \<or> cancels_to (c@z) (b@z)" using cancels_to_rightappend by auto have "(\<lambda>wrd1 wrd2. cancels_to wrd1 wrd2 \<or> cancels_to wrd2 wrd1)\<^sup>*\<^sup>* (a@z) (b@z)" by (simp add: rtrancl_into_rtrancl.IH) then show "(\<lambda>wrd1 wrd2. cancels_to wrd1 wrd2 \<or> cancels_to wrd2 wrd1)\<^sup>*\<^sup>* (a@z) (c@z)" unfolding cancels_eq_def using 1 by (metis (no_types, lifting) rtranclp.simps) qed lemma "x ~ y \<Longrightarrow> cancels_eq x y" proof(induction rule:reln.induct) case (refl a) then show ?case unfolding cancels_eq_def cancels_to_def by simp next case (sym a b) then show ?case unfolding cancels_eq_def by (metis (no_types, lifting) sympD sympI symp_rtranclp) next case (trans a b c) then show ?case by (metis (no_types, lifting) cancels_eq_def rtranclp_trans) next case (base g) then have "cancels_to_1_at 0 [g, inverse g] []" unfolding cancels_to_1_at_def cancel_at_def by auto then have "cancels_to [g, inverse g] []" unfolding cancels_to_def using cancels_to_1_def by auto then show ?case unfolding cancels_eq_def by (simp add: r_into_rtranclp) next case (mult xs xs' ys ys') have "cancels_eq xs xs'" by (simp add: mult.IH(1)) then have 1:"cancels_eq (xs@ys) (xs'@ys)" by (simp add: cancels_eq_rightappend) have "cancels_eq ys ys'" by (simp add: mult.IH(2)) then have 2:"cancels_eq (xs'@ys) (xs'@ys')" by (simp add: cancels_eq_leftappend) then show "cancels_eq (xs@ys) (xs'@ys')" using 1 2 by (metis (no_types, lifting) cancels_eq_def rtranclp_trans) qed
%% Fun with remap % Maps a rectangular image into a circle using cv.remap % % To build the mapping we do the following simple geometry calculation: % % * shift the origin (0,0) coordinate to the center of the image % * convert the cartesian x-y coordinates to polar r-theta form % * determine the length of the line with the same theta that touches the % border of the rectangle % * retain the theta, but scale the r value based on how much the line has to % shrink to fit into the circle. % * convert the modified r-theta values back into x-y coordinates % % Sources: % % * <http://sidekick.windforwings.com/2012/12/opencv-fun-with-remap.html> % %% % source image fname = which('coins.png'); if isempty(fname) fname = fullfile(mexopencv.root(), 'test', 'apple.jpg'); end src = imread(fname); %% % coordinates space, shifted so that origin is at the center of the image [h,w,~] = size(src); c = [w,h] / 2; [X,Y] = meshgrid(single(1:w)-c(1), single(1:h)-c(2)); %% % convert cartesian coordinates to polar form (r,theta) R = hypot(Y,X); T = atan(Y./X); % NOTE: atan not atan2 % handle NaN case when atan(0/0) in the center T(isnan(T)) = 0; %% % scale R radius = min(h,w) / 2; D = min(abs(c(1)./cos(T)), abs(c(2)./sin(T))); R = R ./ (radius ./ D); %% % remap points map_X = c(1) + sign(X) .* R .* cos(abs(T)); map_Y = c(2) + sign(Y) .* R .* sin(abs(T)); %% % destination image dst = cv.remap(src, map_X, map_Y); %% % show result subplot(121), imshow(src), title('image') subplot(122), imshow(dst), title('remapped')